2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2016 Broadcom Corporation.
8 * Copyright (C) 2016-2017 Broadcom Limited.
9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10 * refers to Broadcom Inc. and/or its subsidiaries.
13 * Derived from proprietary unpublished source code,
14 * Copyright (C) 2000-2016 Broadcom Corporation.
15 * Copyright (C) 2016-2017 Broadcom Ltd.
16 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17 * refers to Broadcom Inc. and/or its subsidiaries.
19 * Permission is hereby granted for the distribution of this firmware
20 * data in hexadecimal or equivalent format, provided this copyright
21 * notice is accompanying it.
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
47 #include <linux/if_vlan.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
59 #include <net/checksum.h>
63 #include <asm/byteorder.h>
64 #include <linux/uaccess.h>
66 #include <uapi/linux/net_tstamp.h>
67 #include <linux/ptp_clock_kernel.h>
74 /* Functions & macros to verify TG3_FLAGS types */
76 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
78 return test_bit(flag, bits);
81 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
86 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
88 clear_bit(flag, bits);
91 #define tg3_flag(tp, flag) \
92 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define tg3_flag_set(tp, flag) \
94 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define tg3_flag_clear(tp, flag) \
96 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
98 #define DRV_MODULE_NAME "tg3"
99 /* DO NOT UPDATE TG3_*_NUM defines */
100 #define TG3_MAJ_NUM 3
101 #define TG3_MIN_NUM 137
103 #define RESET_KIND_SHUTDOWN 0
104 #define RESET_KIND_INIT 1
105 #define RESET_KIND_SUSPEND 2
107 #define TG3_DEF_RX_MODE 0
108 #define TG3_DEF_TX_MODE 0
109 #define TG3_DEF_MSG_ENABLE \
119 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
121 /* length of time before we decide the hardware is borked,
122 * and dev->tx_timeout() should be called to fix the problem
125 #define TG3_TX_TIMEOUT (5 * HZ)
127 /* hardware minimum and maximum for a single frame's data payload */
128 #define TG3_MIN_MTU ETH_ZLEN
129 #define TG3_MAX_MTU(tp) \
130 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
132 /* These numbers seem to be hard coded in the NIC firmware somehow.
133 * You can't change the ring sizes, but you can change where you place
134 * them in the NIC onboard memory.
136 #define TG3_RX_STD_RING_SIZE(tp) \
137 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
138 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
139 #define TG3_DEF_RX_RING_PENDING 200
140 #define TG3_RX_JMB_RING_SIZE(tp) \
141 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
142 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
143 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
145 /* Do not place this n-ring entries value into the tp struct itself,
146 * we really want to expose these constants to GCC so that modulo et
147 * al. operations are done with shifts and masks instead of with
148 * hw multiply/modulo instructions. Another solution would be to
149 * replace things like '% foo' with '& (foo - 1)'.
152 #define TG3_TX_RING_SIZE 512
153 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
155 #define TG3_RX_STD_RING_BYTES(tp) \
156 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
157 #define TG3_RX_JMB_RING_BYTES(tp) \
158 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
159 #define TG3_RX_RCB_RING_BYTES(tp) \
160 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
161 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
163 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
165 #define TG3_DMA_BYTE_ENAB 64
167 #define TG3_RX_STD_DMA_SZ 1536
168 #define TG3_RX_JMB_DMA_SZ 9046
170 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
172 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
173 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
175 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
176 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
178 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
179 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
181 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
182 * that are at least dword aligned when used in PCIX mode. The driver
183 * works around this bug by double copying the packet. This workaround
184 * is built into the normal double copy length check for efficiency.
186 * However, the double copy is only necessary on those architectures
187 * where unaligned memory accesses are inefficient. For those architectures
188 * where unaligned memory accesses incur little penalty, we can reintegrate
189 * the 5701 in the normal rx path. Doing so saves a device structure
190 * dereference by hardcoding the double copy threshold in place.
192 #define TG3_RX_COPY_THRESHOLD 256
193 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
194 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
196 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
199 #if (NET_IP_ALIGN != 0)
200 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
202 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
205 /* minimum number of free TX descriptors required to wake up TX process */
206 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
207 #define TG3_TX_BD_DMA_MAX_2K 2048
208 #define TG3_TX_BD_DMA_MAX_4K 4096
210 #define TG3_RAW_IP_ALIGN 2
212 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
213 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
215 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
216 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
218 #define FIRMWARE_TG3 "tigon/tg3.bin"
219 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
220 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
221 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
223 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
224 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
225 MODULE_LICENSE("GPL");
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
230 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
237 static const struct pci_device_id tg3_pci_tbl[] = {
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 TG3_DRV_DATA_FLAG_5705_10_100},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 TG3_DRV_DATA_FLAG_5705_10_100},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 PCI_VENDOR_ID_LENOVO,
288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
345 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
346 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
347 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
348 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
349 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
350 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
351 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
352 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
356 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
358 static const struct {
359 const char string[ETH_GSTRING_LEN];
360 } ethtool_stats_keys[] = {
363 { "rx_ucast_packets" },
364 { "rx_mcast_packets" },
365 { "rx_bcast_packets" },
367 { "rx_align_errors" },
368 { "rx_xon_pause_rcvd" },
369 { "rx_xoff_pause_rcvd" },
370 { "rx_mac_ctrl_rcvd" },
371 { "rx_xoff_entered" },
372 { "rx_frame_too_long_errors" },
374 { "rx_undersize_packets" },
375 { "rx_in_length_errors" },
376 { "rx_out_length_errors" },
377 { "rx_64_or_less_octet_packets" },
378 { "rx_65_to_127_octet_packets" },
379 { "rx_128_to_255_octet_packets" },
380 { "rx_256_to_511_octet_packets" },
381 { "rx_512_to_1023_octet_packets" },
382 { "rx_1024_to_1522_octet_packets" },
383 { "rx_1523_to_2047_octet_packets" },
384 { "rx_2048_to_4095_octet_packets" },
385 { "rx_4096_to_8191_octet_packets" },
386 { "rx_8192_to_9022_octet_packets" },
393 { "tx_flow_control" },
395 { "tx_single_collisions" },
396 { "tx_mult_collisions" },
398 { "tx_excessive_collisions" },
399 { "tx_late_collisions" },
400 { "tx_collide_2times" },
401 { "tx_collide_3times" },
402 { "tx_collide_4times" },
403 { "tx_collide_5times" },
404 { "tx_collide_6times" },
405 { "tx_collide_7times" },
406 { "tx_collide_8times" },
407 { "tx_collide_9times" },
408 { "tx_collide_10times" },
409 { "tx_collide_11times" },
410 { "tx_collide_12times" },
411 { "tx_collide_13times" },
412 { "tx_collide_14times" },
413 { "tx_collide_15times" },
414 { "tx_ucast_packets" },
415 { "tx_mcast_packets" },
416 { "tx_bcast_packets" },
417 { "tx_carrier_sense_errors" },
421 { "dma_writeq_full" },
422 { "dma_write_prioq_full" },
426 { "rx_threshold_hit" },
428 { "dma_readq_full" },
429 { "dma_read_prioq_full" },
430 { "tx_comp_queue_full" },
432 { "ring_set_send_prod_index" },
433 { "ring_status_update" },
435 { "nic_avoided_irqs" },
436 { "nic_tx_threshold_hit" },
438 { "mbuf_lwm_thresh_hit" },
441 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
442 #define TG3_NVRAM_TEST 0
443 #define TG3_LINK_TEST 1
444 #define TG3_REGISTER_TEST 2
445 #define TG3_MEMORY_TEST 3
446 #define TG3_MAC_LOOPB_TEST 4
447 #define TG3_PHY_LOOPB_TEST 5
448 #define TG3_EXT_LOOPB_TEST 6
449 #define TG3_INTERRUPT_TEST 7
452 static const struct {
453 const char string[ETH_GSTRING_LEN];
454 } ethtool_test_keys[] = {
455 [TG3_NVRAM_TEST] = { "nvram test (online) " },
456 [TG3_LINK_TEST] = { "link test (online) " },
457 [TG3_REGISTER_TEST] = { "register test (offline)" },
458 [TG3_MEMORY_TEST] = { "memory test (offline)" },
459 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
460 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
461 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
462 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
465 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
468 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
470 writel(val, tp->regs + off);
473 static u32 tg3_read32(struct tg3 *tp, u32 off)
475 return readl(tp->regs + off);
478 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
480 writel(val, tp->aperegs + off);
483 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
485 return readl(tp->aperegs + off);
488 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
492 spin_lock_irqsave(&tp->indirect_lock, flags);
493 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
494 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
495 spin_unlock_irqrestore(&tp->indirect_lock, flags);
498 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
500 writel(val, tp->regs + off);
501 readl(tp->regs + off);
504 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
509 spin_lock_irqsave(&tp->indirect_lock, flags);
510 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
511 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
512 spin_unlock_irqrestore(&tp->indirect_lock, flags);
516 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
520 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
521 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
522 TG3_64BIT_REG_LOW, val);
525 if (off == TG3_RX_STD_PROD_IDX_REG) {
526 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
527 TG3_64BIT_REG_LOW, val);
531 spin_lock_irqsave(&tp->indirect_lock, flags);
532 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
533 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
534 spin_unlock_irqrestore(&tp->indirect_lock, flags);
536 /* In indirect mode when disabling interrupts, we also need
537 * to clear the interrupt bit in the GRC local ctrl register.
539 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
541 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
542 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
546 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
551 spin_lock_irqsave(&tp->indirect_lock, flags);
552 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
553 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
554 spin_unlock_irqrestore(&tp->indirect_lock, flags);
558 /* usec_wait specifies the wait time in usec when writing to certain registers
559 * where it is unsafe to read back the register without some delay.
560 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
561 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
563 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
565 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
566 /* Non-posted methods */
567 tp->write32(tp, off, val);
570 tg3_write32(tp, off, val);
575 /* Wait again after the read for the posted method to guarantee that
576 * the wait time is met.
582 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
584 tp->write32_mbox(tp, off, val);
585 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
586 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
587 !tg3_flag(tp, ICH_WORKAROUND)))
588 tp->read32_mbox(tp, off);
591 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
593 void __iomem *mbox = tp->regs + off;
595 if (tg3_flag(tp, TXD_MBOX_HWBUG))
597 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
598 tg3_flag(tp, FLUSH_POSTED_WRITES))
602 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
604 return readl(tp->regs + off + GRCMBOX_BASE);
607 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
609 writel(val, tp->regs + off + GRCMBOX_BASE);
612 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
613 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
614 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
615 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
616 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
618 #define tw32(reg, val) tp->write32(tp, reg, val)
619 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
620 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
621 #define tr32(reg) tp->read32(tp, reg)
623 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
627 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
628 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
631 spin_lock_irqsave(&tp->indirect_lock, flags);
632 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
633 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
634 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
636 /* Always leave this as zero. */
637 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
639 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
640 tw32_f(TG3PCI_MEM_WIN_DATA, val);
642 /* Always leave this as zero. */
643 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
645 spin_unlock_irqrestore(&tp->indirect_lock, flags);
648 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
652 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
653 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
658 spin_lock_irqsave(&tp->indirect_lock, flags);
659 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
660 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
661 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
663 /* Always leave this as zero. */
664 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
666 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
667 *val = tr32(TG3PCI_MEM_WIN_DATA);
669 /* Always leave this as zero. */
670 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
672 spin_unlock_irqrestore(&tp->indirect_lock, flags);
675 static void tg3_ape_lock_init(struct tg3 *tp)
680 if (tg3_asic_rev(tp) == ASIC_REV_5761)
681 regbase = TG3_APE_LOCK_GRANT;
683 regbase = TG3_APE_PER_LOCK_GRANT;
685 /* Make sure the driver hasn't any stale locks. */
686 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
688 case TG3_APE_LOCK_PHY0:
689 case TG3_APE_LOCK_PHY1:
690 case TG3_APE_LOCK_PHY2:
691 case TG3_APE_LOCK_PHY3:
692 bit = APE_LOCK_GRANT_DRIVER;
696 bit = APE_LOCK_GRANT_DRIVER;
698 bit = 1 << tp->pci_fn;
700 tg3_ape_write32(tp, regbase + 4 * i, bit);
705 static int tg3_ape_lock(struct tg3 *tp, int locknum)
709 u32 status, req, gnt, bit;
711 if (!tg3_flag(tp, ENABLE_APE))
715 case TG3_APE_LOCK_GPIO:
716 if (tg3_asic_rev(tp) == ASIC_REV_5761)
719 case TG3_APE_LOCK_GRC:
720 case TG3_APE_LOCK_MEM:
722 bit = APE_LOCK_REQ_DRIVER;
724 bit = 1 << tp->pci_fn;
726 case TG3_APE_LOCK_PHY0:
727 case TG3_APE_LOCK_PHY1:
728 case TG3_APE_LOCK_PHY2:
729 case TG3_APE_LOCK_PHY3:
730 bit = APE_LOCK_REQ_DRIVER;
736 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
737 req = TG3_APE_LOCK_REQ;
738 gnt = TG3_APE_LOCK_GRANT;
740 req = TG3_APE_PER_LOCK_REQ;
741 gnt = TG3_APE_PER_LOCK_GRANT;
746 tg3_ape_write32(tp, req + off, bit);
748 /* Wait for up to 1 millisecond to acquire lock. */
749 for (i = 0; i < 100; i++) {
750 status = tg3_ape_read32(tp, gnt + off);
753 if (pci_channel_offline(tp->pdev))
760 /* Revoke the lock request. */
761 tg3_ape_write32(tp, gnt + off, bit);
768 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
772 if (!tg3_flag(tp, ENABLE_APE))
776 case TG3_APE_LOCK_GPIO:
777 if (tg3_asic_rev(tp) == ASIC_REV_5761)
780 case TG3_APE_LOCK_GRC:
781 case TG3_APE_LOCK_MEM:
783 bit = APE_LOCK_GRANT_DRIVER;
785 bit = 1 << tp->pci_fn;
787 case TG3_APE_LOCK_PHY0:
788 case TG3_APE_LOCK_PHY1:
789 case TG3_APE_LOCK_PHY2:
790 case TG3_APE_LOCK_PHY3:
791 bit = APE_LOCK_GRANT_DRIVER;
797 if (tg3_asic_rev(tp) == ASIC_REV_5761)
798 gnt = TG3_APE_LOCK_GRANT;
800 gnt = TG3_APE_PER_LOCK_GRANT;
802 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
805 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
810 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
813 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
814 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
817 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
820 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
823 return timeout_us ? 0 : -EBUSY;
826 #ifdef CONFIG_TIGON3_HWMON
827 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
831 for (i = 0; i < timeout_us / 10; i++) {
832 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
834 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
840 return i == timeout_us / 10;
843 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
847 u32 i, bufoff, msgoff, maxlen, apedata;
849 if (!tg3_flag(tp, APE_HAS_NCSI))
852 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
853 if (apedata != APE_SEG_SIG_MAGIC)
856 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
857 if (!(apedata & APE_FW_STATUS_READY))
860 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
862 msgoff = bufoff + 2 * sizeof(u32);
863 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
868 /* Cap xfer sizes to scratchpad limits. */
869 length = (len > maxlen) ? maxlen : len;
872 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
873 if (!(apedata & APE_FW_STATUS_READY))
876 /* Wait for up to 1 msec for APE to service previous event. */
877 err = tg3_ape_event_lock(tp, 1000);
881 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
882 APE_EVENT_STATUS_SCRTCHPD_READ |
883 APE_EVENT_STATUS_EVENT_PENDING;
884 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
886 tg3_ape_write32(tp, bufoff, base_off);
887 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
889 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
890 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
894 if (tg3_ape_wait_for_event(tp, 30000))
897 for (i = 0; length; i += 4, length -= 4) {
898 u32 val = tg3_ape_read32(tp, msgoff + i);
899 memcpy(data, &val, sizeof(u32));
908 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
913 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
914 if (apedata != APE_SEG_SIG_MAGIC)
917 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
918 if (!(apedata & APE_FW_STATUS_READY))
921 /* Wait for up to 20 millisecond for APE to service previous event. */
922 err = tg3_ape_event_lock(tp, 20000);
926 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
927 event | APE_EVENT_STATUS_EVENT_PENDING);
929 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
930 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
935 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
940 if (!tg3_flag(tp, ENABLE_APE))
944 case RESET_KIND_INIT:
945 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
946 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
947 APE_HOST_SEG_SIG_MAGIC);
948 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
949 APE_HOST_SEG_LEN_MAGIC);
950 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
951 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
952 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
953 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
954 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
955 APE_HOST_BEHAV_NO_PHYLOCK);
956 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
957 TG3_APE_HOST_DRVR_STATE_START);
959 event = APE_EVENT_STATUS_STATE_START;
961 case RESET_KIND_SHUTDOWN:
962 if (device_may_wakeup(&tp->pdev->dev) &&
963 tg3_flag(tp, WOL_ENABLE)) {
964 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
965 TG3_APE_HOST_WOL_SPEED_AUTO);
966 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
968 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
970 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
972 event = APE_EVENT_STATUS_STATE_UNLOAD;
978 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
980 tg3_ape_send_event(tp, event);
983 static void tg3_send_ape_heartbeat(struct tg3 *tp,
984 unsigned long interval)
986 /* Check if hb interval has exceeded */
987 if (!tg3_flag(tp, ENABLE_APE) ||
988 time_before(jiffies, tp->ape_hb_jiffies + interval))
991 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
992 tp->ape_hb_jiffies = jiffies;
995 static void tg3_disable_ints(struct tg3 *tp)
999 tw32(TG3PCI_MISC_HOST_CTRL,
1000 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1001 for (i = 0; i < tp->irq_max; i++)
1002 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1005 static void tg3_enable_ints(struct tg3 *tp)
1012 tw32(TG3PCI_MISC_HOST_CTRL,
1013 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1015 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1016 for (i = 0; i < tp->irq_cnt; i++) {
1017 struct tg3_napi *tnapi = &tp->napi[i];
1019 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1020 if (tg3_flag(tp, 1SHOT_MSI))
1021 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1023 tp->coal_now |= tnapi->coal_now;
1026 /* Force an initial interrupt */
1027 if (!tg3_flag(tp, TAGGED_STATUS) &&
1028 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1029 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1031 tw32(HOSTCC_MODE, tp->coal_now);
1033 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1036 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1038 struct tg3 *tp = tnapi->tp;
1039 struct tg3_hw_status *sblk = tnapi->hw_status;
1040 unsigned int work_exists = 0;
1042 /* check for phy events */
1043 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1044 if (sblk->status & SD_STATUS_LINK_CHG)
1048 /* check for TX work to do */
1049 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1052 /* check for RX work to do */
1053 if (tnapi->rx_rcb_prod_idx &&
1054 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1061 * similar to tg3_enable_ints, but it accurately determines whether there
1062 * is new work pending and can return without flushing the PIO write
1063 * which reenables interrupts
1065 static void tg3_int_reenable(struct tg3_napi *tnapi)
1067 struct tg3 *tp = tnapi->tp;
1069 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1071 /* When doing tagged status, this work check is unnecessary.
1072 * The last_tag we write above tells the chip which piece of
1073 * work we've completed.
1075 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1076 tw32(HOSTCC_MODE, tp->coalesce_mode |
1077 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1080 static void tg3_switch_clocks(struct tg3 *tp)
1083 u32 orig_clock_ctrl;
1085 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1088 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1090 orig_clock_ctrl = clock_ctrl;
1091 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1092 CLOCK_CTRL_CLKRUN_OENABLE |
1094 tp->pci_clock_ctrl = clock_ctrl;
1096 if (tg3_flag(tp, 5705_PLUS)) {
1097 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1098 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1099 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1101 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1102 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1104 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1106 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1107 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1110 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1113 #define PHY_BUSY_LOOPS 5000
1115 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1122 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1124 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1128 tg3_ape_lock(tp, tp->phy_ape_lock);
1132 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1133 MI_COM_PHY_ADDR_MASK);
1134 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1135 MI_COM_REG_ADDR_MASK);
1136 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1138 tw32_f(MAC_MI_COM, frame_val);
1140 loops = PHY_BUSY_LOOPS;
1141 while (loops != 0) {
1143 frame_val = tr32(MAC_MI_COM);
1145 if ((frame_val & MI_COM_BUSY) == 0) {
1147 frame_val = tr32(MAC_MI_COM);
1155 *val = frame_val & MI_COM_DATA_MASK;
1159 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1160 tw32_f(MAC_MI_MODE, tp->mi_mode);
1164 tg3_ape_unlock(tp, tp->phy_ape_lock);
1169 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1171 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1174 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1181 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1182 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1185 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1187 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1191 tg3_ape_lock(tp, tp->phy_ape_lock);
1193 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1194 MI_COM_PHY_ADDR_MASK);
1195 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1196 MI_COM_REG_ADDR_MASK);
1197 frame_val |= (val & MI_COM_DATA_MASK);
1198 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1200 tw32_f(MAC_MI_COM, frame_val);
1202 loops = PHY_BUSY_LOOPS;
1203 while (loops != 0) {
1205 frame_val = tr32(MAC_MI_COM);
1206 if ((frame_val & MI_COM_BUSY) == 0) {
1208 frame_val = tr32(MAC_MI_COM);
1218 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1219 tw32_f(MAC_MI_MODE, tp->mi_mode);
1223 tg3_ape_unlock(tp, tp->phy_ape_lock);
1228 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1230 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1233 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1237 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1241 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1245 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1246 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1250 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1256 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1260 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1264 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1268 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1269 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1273 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1279 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1283 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1285 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1290 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1294 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1296 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1301 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1305 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1306 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1307 MII_TG3_AUXCTL_SHDWSEL_MISC);
1309 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1314 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1316 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1317 set |= MII_TG3_AUXCTL_MISC_WREN;
1319 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1322 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1327 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1333 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1335 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1337 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1338 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1343 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1345 return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1346 reg | val | MII_TG3_MISC_SHDW_WREN);
1349 static int tg3_bmcr_reset(struct tg3 *tp)
1354 /* OK, reset it, and poll the BMCR_RESET bit until it
1355 * clears or we time out.
1357 phy_control = BMCR_RESET;
1358 err = tg3_writephy(tp, MII_BMCR, phy_control);
1364 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1368 if ((phy_control & BMCR_RESET) == 0) {
1380 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1382 struct tg3 *tp = bp->priv;
1385 spin_lock_bh(&tp->lock);
1387 if (__tg3_readphy(tp, mii_id, reg, &val))
1390 spin_unlock_bh(&tp->lock);
1395 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1397 struct tg3 *tp = bp->priv;
1400 spin_lock_bh(&tp->lock);
1402 if (__tg3_writephy(tp, mii_id, reg, val))
1405 spin_unlock_bh(&tp->lock);
1410 static void tg3_mdio_config_5785(struct tg3 *tp)
1413 struct phy_device *phydev;
1415 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1416 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1417 case PHY_ID_BCM50610:
1418 case PHY_ID_BCM50610M:
1419 val = MAC_PHYCFG2_50610_LED_MODES;
1421 case PHY_ID_BCMAC131:
1422 val = MAC_PHYCFG2_AC131_LED_MODES;
1424 case PHY_ID_RTL8211C:
1425 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1427 case PHY_ID_RTL8201E:
1428 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1434 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1435 tw32(MAC_PHYCFG2, val);
1437 val = tr32(MAC_PHYCFG1);
1438 val &= ~(MAC_PHYCFG1_RGMII_INT |
1439 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1440 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1441 tw32(MAC_PHYCFG1, val);
1446 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1447 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1448 MAC_PHYCFG2_FMODE_MASK_MASK |
1449 MAC_PHYCFG2_GMODE_MASK_MASK |
1450 MAC_PHYCFG2_ACT_MASK_MASK |
1451 MAC_PHYCFG2_QUAL_MASK_MASK |
1452 MAC_PHYCFG2_INBAND_ENABLE;
1454 tw32(MAC_PHYCFG2, val);
1456 val = tr32(MAC_PHYCFG1);
1457 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1458 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1459 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1460 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1461 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1462 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1463 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1465 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1466 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1467 tw32(MAC_PHYCFG1, val);
1469 val = tr32(MAC_EXT_RGMII_MODE);
1470 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1471 MAC_RGMII_MODE_RX_QUALITY |
1472 MAC_RGMII_MODE_RX_ACTIVITY |
1473 MAC_RGMII_MODE_RX_ENG_DET |
1474 MAC_RGMII_MODE_TX_ENABLE |
1475 MAC_RGMII_MODE_TX_LOWPWR |
1476 MAC_RGMII_MODE_TX_RESET);
1477 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1478 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1479 val |= MAC_RGMII_MODE_RX_INT_B |
1480 MAC_RGMII_MODE_RX_QUALITY |
1481 MAC_RGMII_MODE_RX_ACTIVITY |
1482 MAC_RGMII_MODE_RX_ENG_DET;
1483 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1484 val |= MAC_RGMII_MODE_TX_ENABLE |
1485 MAC_RGMII_MODE_TX_LOWPWR |
1486 MAC_RGMII_MODE_TX_RESET;
1488 tw32(MAC_EXT_RGMII_MODE, val);
1491 static void tg3_mdio_start(struct tg3 *tp)
1493 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1494 tw32_f(MAC_MI_MODE, tp->mi_mode);
1497 if (tg3_flag(tp, MDIOBUS_INITED) &&
1498 tg3_asic_rev(tp) == ASIC_REV_5785)
1499 tg3_mdio_config_5785(tp);
1502 static int tg3_mdio_init(struct tg3 *tp)
1506 struct phy_device *phydev;
1508 if (tg3_flag(tp, 5717_PLUS)) {
1511 tp->phy_addr = tp->pci_fn + 1;
1513 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1514 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1516 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1517 TG3_CPMU_PHY_STRAP_IS_SERDES;
1520 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1523 addr = ssb_gige_get_phyaddr(tp->pdev);
1526 tp->phy_addr = addr;
1528 tp->phy_addr = TG3_PHY_MII_ADDR;
1532 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1535 tp->mdio_bus = mdiobus_alloc();
1536 if (tp->mdio_bus == NULL)
1539 tp->mdio_bus->name = "tg3 mdio bus";
1540 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1541 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1542 tp->mdio_bus->priv = tp;
1543 tp->mdio_bus->parent = &tp->pdev->dev;
1544 tp->mdio_bus->read = &tg3_mdio_read;
1545 tp->mdio_bus->write = &tg3_mdio_write;
1546 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1548 /* The bus registration will look for all the PHYs on the mdio bus.
1549 * Unfortunately, it does not ensure the PHY is powered up before
1550 * accessing the PHY ID registers. A chip reset is the
1551 * quickest way to bring the device back to an operational state..
1553 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1556 i = mdiobus_register(tp->mdio_bus);
1558 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1559 mdiobus_free(tp->mdio_bus);
1563 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1565 if (!phydev || !phydev->drv) {
1566 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1567 mdiobus_unregister(tp->mdio_bus);
1568 mdiobus_free(tp->mdio_bus);
1572 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1573 case PHY_ID_BCM57780:
1574 phydev->interface = PHY_INTERFACE_MODE_GMII;
1575 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1577 case PHY_ID_BCM50610:
1578 case PHY_ID_BCM50610M:
1579 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1580 PHY_BRCM_RX_REFCLK_UNUSED |
1581 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1582 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1584 case PHY_ID_RTL8211C:
1585 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1587 case PHY_ID_RTL8201E:
1588 case PHY_ID_BCMAC131:
1589 phydev->interface = PHY_INTERFACE_MODE_MII;
1590 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1591 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1595 tg3_flag_set(tp, MDIOBUS_INITED);
1597 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1598 tg3_mdio_config_5785(tp);
1603 static void tg3_mdio_fini(struct tg3 *tp)
1605 if (tg3_flag(tp, MDIOBUS_INITED)) {
1606 tg3_flag_clear(tp, MDIOBUS_INITED);
1607 mdiobus_unregister(tp->mdio_bus);
1608 mdiobus_free(tp->mdio_bus);
1612 /* tp->lock is held. */
1613 static inline void tg3_generate_fw_event(struct tg3 *tp)
1617 val = tr32(GRC_RX_CPU_EVENT);
1618 val |= GRC_RX_CPU_DRIVER_EVENT;
1619 tw32_f(GRC_RX_CPU_EVENT, val);
1621 tp->last_event_jiffies = jiffies;
1624 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1626 /* tp->lock is held. */
1627 static void tg3_wait_for_event_ack(struct tg3 *tp)
1630 unsigned int delay_cnt;
1633 /* If enough time has passed, no wait is necessary. */
1634 time_remain = (long)(tp->last_event_jiffies + 1 +
1635 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1637 if (time_remain < 0)
1640 /* Check if we can shorten the wait time. */
1641 delay_cnt = jiffies_to_usecs(time_remain);
1642 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1643 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1644 delay_cnt = (delay_cnt >> 3) + 1;
1646 for (i = 0; i < delay_cnt; i++) {
1647 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1649 if (pci_channel_offline(tp->pdev))
1656 /* tp->lock is held. */
1657 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1662 if (!tg3_readphy(tp, MII_BMCR, ®))
1664 if (!tg3_readphy(tp, MII_BMSR, ®))
1665 val |= (reg & 0xffff);
1669 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1671 if (!tg3_readphy(tp, MII_LPA, ®))
1672 val |= (reg & 0xffff);
1676 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1677 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1679 if (!tg3_readphy(tp, MII_STAT1000, ®))
1680 val |= (reg & 0xffff);
1684 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1691 /* tp->lock is held. */
1692 static void tg3_ump_link_report(struct tg3 *tp)
1696 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1699 tg3_phy_gather_ump_data(tp, data);
1701 tg3_wait_for_event_ack(tp);
1703 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1704 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1705 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1707 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1708 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1710 tg3_generate_fw_event(tp);
1713 /* tp->lock is held. */
1714 static void tg3_stop_fw(struct tg3 *tp)
1716 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1717 /* Wait for RX cpu to ACK the previous event. */
1718 tg3_wait_for_event_ack(tp);
1720 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1722 tg3_generate_fw_event(tp);
1724 /* Wait for RX cpu to ACK this event. */
1725 tg3_wait_for_event_ack(tp);
1729 /* tp->lock is held. */
1730 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1732 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1733 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1735 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1737 case RESET_KIND_INIT:
1738 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1742 case RESET_KIND_SHUTDOWN:
1743 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1747 case RESET_KIND_SUSPEND:
1748 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1758 /* tp->lock is held. */
1759 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1761 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1763 case RESET_KIND_INIT:
1764 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1765 DRV_STATE_START_DONE);
1768 case RESET_KIND_SHUTDOWN:
1769 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1770 DRV_STATE_UNLOAD_DONE);
1779 /* tp->lock is held. */
1780 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1782 if (tg3_flag(tp, ENABLE_ASF)) {
1784 case RESET_KIND_INIT:
1785 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1789 case RESET_KIND_SHUTDOWN:
1790 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1794 case RESET_KIND_SUSPEND:
1795 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1805 static int tg3_poll_fw(struct tg3 *tp)
1810 if (tg3_flag(tp, NO_FWARE_REPORTED))
1813 if (tg3_flag(tp, IS_SSB_CORE)) {
1814 /* We don't use firmware. */
1818 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1819 /* Wait up to 20ms for init done. */
1820 for (i = 0; i < 200; i++) {
1821 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1823 if (pci_channel_offline(tp->pdev))
1831 /* Wait for firmware initialization to complete. */
1832 for (i = 0; i < 100000; i++) {
1833 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1834 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1836 if (pci_channel_offline(tp->pdev)) {
1837 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1838 tg3_flag_set(tp, NO_FWARE_REPORTED);
1839 netdev_info(tp->dev, "No firmware running\n");
1848 /* Chip might not be fitted with firmware. Some Sun onboard
1849 * parts are configured like that. So don't signal the timeout
1850 * of the above loop as an error, but do report the lack of
1851 * running firmware once.
1853 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1854 tg3_flag_set(tp, NO_FWARE_REPORTED);
1856 netdev_info(tp->dev, "No firmware running\n");
1859 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1860 /* The 57765 A0 needs a little more
1861 * time to do some important work.
1869 static void tg3_link_report(struct tg3 *tp)
1871 if (!netif_carrier_ok(tp->dev)) {
1872 netif_info(tp, link, tp->dev, "Link is down\n");
1873 tg3_ump_link_report(tp);
1874 } else if (netif_msg_link(tp)) {
1875 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1876 (tp->link_config.active_speed == SPEED_1000 ?
1878 (tp->link_config.active_speed == SPEED_100 ?
1880 (tp->link_config.active_duplex == DUPLEX_FULL ?
1883 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1884 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1886 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1889 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1890 netdev_info(tp->dev, "EEE is %s\n",
1891 tp->setlpicnt ? "enabled" : "disabled");
1893 tg3_ump_link_report(tp);
1896 tp->link_up = netif_carrier_ok(tp->dev);
1899 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1903 if (adv & ADVERTISE_PAUSE_CAP) {
1904 flowctrl |= FLOW_CTRL_RX;
1905 if (!(adv & ADVERTISE_PAUSE_ASYM))
1906 flowctrl |= FLOW_CTRL_TX;
1907 } else if (adv & ADVERTISE_PAUSE_ASYM)
1908 flowctrl |= FLOW_CTRL_TX;
1913 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1917 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1918 miireg = ADVERTISE_1000XPAUSE;
1919 else if (flow_ctrl & FLOW_CTRL_TX)
1920 miireg = ADVERTISE_1000XPSE_ASYM;
1921 else if (flow_ctrl & FLOW_CTRL_RX)
1922 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1929 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1933 if (adv & ADVERTISE_1000XPAUSE) {
1934 flowctrl |= FLOW_CTRL_RX;
1935 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1936 flowctrl |= FLOW_CTRL_TX;
1937 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1938 flowctrl |= FLOW_CTRL_TX;
1943 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1947 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1948 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1949 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1950 if (lcladv & ADVERTISE_1000XPAUSE)
1952 if (rmtadv & ADVERTISE_1000XPAUSE)
1959 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1963 u32 old_rx_mode = tp->rx_mode;
1964 u32 old_tx_mode = tp->tx_mode;
1966 if (tg3_flag(tp, USE_PHYLIB))
1967 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1969 autoneg = tp->link_config.autoneg;
1971 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1972 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1973 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1975 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1977 flowctrl = tp->link_config.flowctrl;
1979 tp->link_config.active_flowctrl = flowctrl;
1981 if (flowctrl & FLOW_CTRL_RX)
1982 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1984 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1986 if (old_rx_mode != tp->rx_mode)
1987 tw32_f(MAC_RX_MODE, tp->rx_mode);
1989 if (flowctrl & FLOW_CTRL_TX)
1990 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1992 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1994 if (old_tx_mode != tp->tx_mode)
1995 tw32_f(MAC_TX_MODE, tp->tx_mode);
1998 static void tg3_adjust_link(struct net_device *dev)
2000 u8 oldflowctrl, linkmesg = 0;
2001 u32 mac_mode, lcl_adv, rmt_adv;
2002 struct tg3 *tp = netdev_priv(dev);
2003 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2005 spin_lock_bh(&tp->lock);
2007 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2008 MAC_MODE_HALF_DUPLEX);
2010 oldflowctrl = tp->link_config.active_flowctrl;
2016 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2017 mac_mode |= MAC_MODE_PORT_MODE_MII;
2018 else if (phydev->speed == SPEED_1000 ||
2019 tg3_asic_rev(tp) != ASIC_REV_5785)
2020 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2022 mac_mode |= MAC_MODE_PORT_MODE_MII;
2024 if (phydev->duplex == DUPLEX_HALF)
2025 mac_mode |= MAC_MODE_HALF_DUPLEX;
2027 lcl_adv = mii_advertise_flowctrl(
2028 tp->link_config.flowctrl);
2031 rmt_adv = LPA_PAUSE_CAP;
2032 if (phydev->asym_pause)
2033 rmt_adv |= LPA_PAUSE_ASYM;
2036 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2038 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2040 if (mac_mode != tp->mac_mode) {
2041 tp->mac_mode = mac_mode;
2042 tw32_f(MAC_MODE, tp->mac_mode);
2046 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2047 if (phydev->speed == SPEED_10)
2049 MAC_MI_STAT_10MBPS_MODE |
2050 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2052 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2055 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2056 tw32(MAC_TX_LENGTHS,
2057 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2058 (6 << TX_LENGTHS_IPG_SHIFT) |
2059 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2061 tw32(MAC_TX_LENGTHS,
2062 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2063 (6 << TX_LENGTHS_IPG_SHIFT) |
2064 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2066 if (phydev->link != tp->old_link ||
2067 phydev->speed != tp->link_config.active_speed ||
2068 phydev->duplex != tp->link_config.active_duplex ||
2069 oldflowctrl != tp->link_config.active_flowctrl)
2072 tp->old_link = phydev->link;
2073 tp->link_config.active_speed = phydev->speed;
2074 tp->link_config.active_duplex = phydev->duplex;
2076 spin_unlock_bh(&tp->lock);
2079 tg3_link_report(tp);
2082 static int tg3_phy_init(struct tg3 *tp)
2084 struct phy_device *phydev;
2086 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2089 /* Bring the PHY back to a known state. */
2092 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2094 /* Attach the MAC to the PHY. */
2095 phydev = phy_connect(tp->dev, phydev_name(phydev),
2096 tg3_adjust_link, phydev->interface);
2097 if (IS_ERR(phydev)) {
2098 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2099 return PTR_ERR(phydev);
2102 /* Mask with MAC supported features. */
2103 switch (phydev->interface) {
2104 case PHY_INTERFACE_MODE_GMII:
2105 case PHY_INTERFACE_MODE_RGMII:
2106 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2107 phy_set_max_speed(phydev, SPEED_1000);
2108 phy_support_asym_pause(phydev);
2112 case PHY_INTERFACE_MODE_MII:
2113 phy_set_max_speed(phydev, SPEED_100);
2114 phy_support_asym_pause(phydev);
2117 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2121 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2123 phy_attached_info(phydev);
2128 static void tg3_phy_start(struct tg3 *tp)
2130 struct phy_device *phydev;
2132 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2135 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2137 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2138 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2139 phydev->speed = tp->link_config.speed;
2140 phydev->duplex = tp->link_config.duplex;
2141 phydev->autoneg = tp->link_config.autoneg;
2142 ethtool_convert_legacy_u32_to_link_mode(
2143 phydev->advertising, tp->link_config.advertising);
2148 phy_start_aneg(phydev);
2151 static void tg3_phy_stop(struct tg3 *tp)
2153 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2156 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2159 static void tg3_phy_fini(struct tg3 *tp)
2161 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2162 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2163 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2167 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2172 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2175 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2176 /* Cannot do read-modify-write on 5401 */
2177 err = tg3_phy_auxctl_write(tp,
2178 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2179 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2184 err = tg3_phy_auxctl_read(tp,
2185 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2189 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2190 err = tg3_phy_auxctl_write(tp,
2191 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2197 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2201 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2204 tg3_writephy(tp, MII_TG3_FET_TEST,
2205 phytest | MII_TG3_FET_SHADOW_EN);
2206 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2208 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2210 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2211 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2213 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2217 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2221 if (!tg3_flag(tp, 5705_PLUS) ||
2222 (tg3_flag(tp, 5717_PLUS) &&
2223 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2226 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2227 tg3_phy_fet_toggle_apd(tp, enable);
2231 reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2232 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2233 MII_TG3_MISC_SHDW_SCR5_SDTL |
2234 MII_TG3_MISC_SHDW_SCR5_C125OE;
2235 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2236 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2238 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2241 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2243 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2245 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2248 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2252 if (!tg3_flag(tp, 5705_PLUS) ||
2253 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2256 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2259 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2260 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2262 tg3_writephy(tp, MII_TG3_FET_TEST,
2263 ephy | MII_TG3_FET_SHADOW_EN);
2264 if (!tg3_readphy(tp, reg, &phy)) {
2266 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2268 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2269 tg3_writephy(tp, reg, phy);
2271 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2276 ret = tg3_phy_auxctl_read(tp,
2277 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2280 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2282 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2283 tg3_phy_auxctl_write(tp,
2284 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2289 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2294 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2297 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2299 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2300 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2303 static void tg3_phy_apply_otp(struct tg3 *tp)
2312 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2315 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2316 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2317 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2319 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2320 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2321 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2323 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2324 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2325 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2327 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2328 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2330 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2331 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2333 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2334 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2335 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2337 tg3_phy_toggle_auxctl_smdsp(tp, false);
2340 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2343 struct ethtool_eee *dest = &tp->eee;
2345 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2351 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2354 /* Pull eee_active */
2355 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2356 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2357 dest->eee_active = 1;
2359 dest->eee_active = 0;
2361 /* Pull lp advertised settings */
2362 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2364 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2366 /* Pull advertised and eee_enabled settings */
2367 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2369 dest->eee_enabled = !!val;
2370 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2372 /* Pull tx_lpi_enabled */
2373 val = tr32(TG3_CPMU_EEE_MODE);
2374 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2376 /* Pull lpi timer value */
2377 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2380 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2384 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2389 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2391 tp->link_config.active_duplex == DUPLEX_FULL &&
2392 (tp->link_config.active_speed == SPEED_100 ||
2393 tp->link_config.active_speed == SPEED_1000)) {
2396 if (tp->link_config.active_speed == SPEED_1000)
2397 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2399 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2401 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2403 tg3_eee_pull_config(tp, NULL);
2404 if (tp->eee.eee_active)
2408 if (!tp->setlpicnt) {
2409 if (current_link_up &&
2410 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2411 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2412 tg3_phy_toggle_auxctl_smdsp(tp, false);
2415 val = tr32(TG3_CPMU_EEE_MODE);
2416 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2420 static void tg3_phy_eee_enable(struct tg3 *tp)
2424 if (tp->link_config.active_speed == SPEED_1000 &&
2425 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2426 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2427 tg3_flag(tp, 57765_CLASS)) &&
2428 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2429 val = MII_TG3_DSP_TAP26_ALNOKO |
2430 MII_TG3_DSP_TAP26_RMRXSTO;
2431 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2432 tg3_phy_toggle_auxctl_smdsp(tp, false);
2435 val = tr32(TG3_CPMU_EEE_MODE);
2436 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2439 static int tg3_wait_macro_done(struct tg3 *tp)
2446 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2447 if ((tmp32 & 0x1000) == 0)
2457 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2459 static const u32 test_pat[4][6] = {
2460 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2461 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2462 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2463 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2467 for (chan = 0; chan < 4; chan++) {
2470 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2471 (chan * 0x2000) | 0x0200);
2472 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2474 for (i = 0; i < 6; i++)
2475 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2478 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2479 if (tg3_wait_macro_done(tp)) {
2484 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2485 (chan * 0x2000) | 0x0200);
2486 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2487 if (tg3_wait_macro_done(tp)) {
2492 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2493 if (tg3_wait_macro_done(tp)) {
2498 for (i = 0; i < 6; i += 2) {
2501 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2502 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2503 tg3_wait_macro_done(tp)) {
2509 if (low != test_pat[chan][i] ||
2510 high != test_pat[chan][i+1]) {
2511 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2512 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2513 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2523 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2527 for (chan = 0; chan < 4; chan++) {
2530 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2531 (chan * 0x2000) | 0x0200);
2532 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2533 for (i = 0; i < 6; i++)
2534 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2535 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2536 if (tg3_wait_macro_done(tp))
2543 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2545 u32 reg32, phy9_orig;
2546 int retries, do_phy_reset, err;
2552 err = tg3_bmcr_reset(tp);
2558 /* Disable transmitter and interrupt. */
2559 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2563 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2565 /* Set full-duplex, 1000 mbps. */
2566 tg3_writephy(tp, MII_BMCR,
2567 BMCR_FULLDPLX | BMCR_SPEED1000);
2569 /* Set to master mode. */
2570 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2573 tg3_writephy(tp, MII_CTRL1000,
2574 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2576 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2580 /* Block the PHY control access. */
2581 tg3_phydsp_write(tp, 0x8005, 0x0800);
2583 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2586 } while (--retries);
2588 err = tg3_phy_reset_chanpat(tp);
2592 tg3_phydsp_write(tp, 0x8005, 0x0000);
2594 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2595 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2597 tg3_phy_toggle_auxctl_smdsp(tp, false);
2599 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2601 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
2606 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2611 static void tg3_carrier_off(struct tg3 *tp)
2613 netif_carrier_off(tp->dev);
2614 tp->link_up = false;
2617 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2619 if (tg3_flag(tp, ENABLE_ASF))
2620 netdev_warn(tp->dev,
2621 "Management side-band traffic will be interrupted during phy settings change\n");
2624 /* This will reset the tigon3 PHY if there is no valid
2625 * link unless the FORCE argument is non-zero.
2627 static int tg3_phy_reset(struct tg3 *tp)
2632 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2633 val = tr32(GRC_MISC_CFG);
2634 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2637 err = tg3_readphy(tp, MII_BMSR, &val);
2638 err |= tg3_readphy(tp, MII_BMSR, &val);
2642 if (netif_running(tp->dev) && tp->link_up) {
2643 netif_carrier_off(tp->dev);
2644 tg3_link_report(tp);
2647 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2648 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2649 tg3_asic_rev(tp) == ASIC_REV_5705) {
2650 err = tg3_phy_reset_5703_4_5(tp);
2657 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2658 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2659 cpmuctrl = tr32(TG3_CPMU_CTRL);
2660 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2662 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2665 err = tg3_bmcr_reset(tp);
2669 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2670 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2671 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2673 tw32(TG3_CPMU_CTRL, cpmuctrl);
2676 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2677 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2678 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2679 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2680 CPMU_LSPD_1000MB_MACCLK_12_5) {
2681 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2683 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2687 if (tg3_flag(tp, 5717_PLUS) &&
2688 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2691 tg3_phy_apply_otp(tp);
2693 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2694 tg3_phy_toggle_apd(tp, true);
2696 tg3_phy_toggle_apd(tp, false);
2699 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2700 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2701 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2702 tg3_phydsp_write(tp, 0x000a, 0x0323);
2703 tg3_phy_toggle_auxctl_smdsp(tp, false);
2706 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2707 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2708 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2711 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2712 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2713 tg3_phydsp_write(tp, 0x000a, 0x310b);
2714 tg3_phydsp_write(tp, 0x201f, 0x9506);
2715 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2716 tg3_phy_toggle_auxctl_smdsp(tp, false);
2718 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2719 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2720 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2721 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2722 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2723 tg3_writephy(tp, MII_TG3_TEST1,
2724 MII_TG3_TEST1_TRIM_EN | 0x4);
2726 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2728 tg3_phy_toggle_auxctl_smdsp(tp, false);
2732 /* Set Extended packet length bit (bit 14) on all chips that */
2733 /* support jumbo frames */
2734 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2735 /* Cannot do read-modify-write on 5401 */
2736 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2737 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2738 /* Set bit 14 with read-modify-write to preserve other bits */
2739 err = tg3_phy_auxctl_read(tp,
2740 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2742 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2743 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2746 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2747 * jumbo frames transmission.
2749 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2750 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2751 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2752 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2755 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2756 /* adjust output voltage */
2757 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2760 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2761 tg3_phydsp_write(tp, 0xffb, 0x4000);
2763 tg3_phy_toggle_automdix(tp, true);
2764 tg3_phy_set_wirespeed(tp);
2768 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2769 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2770 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2771 TG3_GPIO_MSG_NEED_VAUX)
2772 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2773 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2774 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2775 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2776 (TG3_GPIO_MSG_DRVR_PRES << 12))
2778 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2779 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2780 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2781 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2782 (TG3_GPIO_MSG_NEED_VAUX << 12))
2784 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2788 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2789 tg3_asic_rev(tp) == ASIC_REV_5719)
2790 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2792 status = tr32(TG3_CPMU_DRV_STATUS);
2794 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2795 status &= ~(TG3_GPIO_MSG_MASK << shift);
2796 status |= (newstat << shift);
2798 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2799 tg3_asic_rev(tp) == ASIC_REV_5719)
2800 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2802 tw32(TG3_CPMU_DRV_STATUS, status);
2804 return status >> TG3_APE_GPIO_MSG_SHIFT;
2807 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2809 if (!tg3_flag(tp, IS_NIC))
2812 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2813 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2814 tg3_asic_rev(tp) == ASIC_REV_5720) {
2815 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2818 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2820 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2821 TG3_GRC_LCLCTL_PWRSW_DELAY);
2823 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2825 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2826 TG3_GRC_LCLCTL_PWRSW_DELAY);
2832 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2836 if (!tg3_flag(tp, IS_NIC) ||
2837 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2838 tg3_asic_rev(tp) == ASIC_REV_5701)
2841 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2843 tw32_wait_f(GRC_LOCAL_CTRL,
2844 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2845 TG3_GRC_LCLCTL_PWRSW_DELAY);
2847 tw32_wait_f(GRC_LOCAL_CTRL,
2849 TG3_GRC_LCLCTL_PWRSW_DELAY);
2851 tw32_wait_f(GRC_LOCAL_CTRL,
2852 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2853 TG3_GRC_LCLCTL_PWRSW_DELAY);
2856 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2858 if (!tg3_flag(tp, IS_NIC))
2861 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2862 tg3_asic_rev(tp) == ASIC_REV_5701) {
2863 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2864 (GRC_LCLCTRL_GPIO_OE0 |
2865 GRC_LCLCTRL_GPIO_OE1 |
2866 GRC_LCLCTRL_GPIO_OE2 |
2867 GRC_LCLCTRL_GPIO_OUTPUT0 |
2868 GRC_LCLCTRL_GPIO_OUTPUT1),
2869 TG3_GRC_LCLCTL_PWRSW_DELAY);
2870 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2871 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2872 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2873 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2874 GRC_LCLCTRL_GPIO_OE1 |
2875 GRC_LCLCTRL_GPIO_OE2 |
2876 GRC_LCLCTRL_GPIO_OUTPUT0 |
2877 GRC_LCLCTRL_GPIO_OUTPUT1 |
2879 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2880 TG3_GRC_LCLCTL_PWRSW_DELAY);
2882 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2883 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2884 TG3_GRC_LCLCTL_PWRSW_DELAY);
2886 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2887 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2888 TG3_GRC_LCLCTL_PWRSW_DELAY);
2891 u32 grc_local_ctrl = 0;
2893 /* Workaround to prevent overdrawing Amps. */
2894 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2895 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2896 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2898 TG3_GRC_LCLCTL_PWRSW_DELAY);
2901 /* On 5753 and variants, GPIO2 cannot be used. */
2902 no_gpio2 = tp->nic_sram_data_cfg &
2903 NIC_SRAM_DATA_CFG_NO_GPIO2;
2905 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2906 GRC_LCLCTRL_GPIO_OE1 |
2907 GRC_LCLCTRL_GPIO_OE2 |
2908 GRC_LCLCTRL_GPIO_OUTPUT1 |
2909 GRC_LCLCTRL_GPIO_OUTPUT2;
2911 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2912 GRC_LCLCTRL_GPIO_OUTPUT2);
2914 tw32_wait_f(GRC_LOCAL_CTRL,
2915 tp->grc_local_ctrl | grc_local_ctrl,
2916 TG3_GRC_LCLCTL_PWRSW_DELAY);
2918 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2920 tw32_wait_f(GRC_LOCAL_CTRL,
2921 tp->grc_local_ctrl | grc_local_ctrl,
2922 TG3_GRC_LCLCTL_PWRSW_DELAY);
2925 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2926 tw32_wait_f(GRC_LOCAL_CTRL,
2927 tp->grc_local_ctrl | grc_local_ctrl,
2928 TG3_GRC_LCLCTL_PWRSW_DELAY);
2933 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2937 /* Serialize power state transitions */
2938 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2941 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2942 msg = TG3_GPIO_MSG_NEED_VAUX;
2944 msg = tg3_set_function_status(tp, msg);
2946 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2949 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2950 tg3_pwrsrc_switch_to_vaux(tp);
2952 tg3_pwrsrc_die_with_vmain(tp);
2955 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2958 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2960 bool need_vaux = false;
2962 /* The GPIOs do something completely different on 57765. */
2963 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2966 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2967 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2968 tg3_asic_rev(tp) == ASIC_REV_5720) {
2969 tg3_frob_aux_power_5717(tp, include_wol ?
2970 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2974 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2975 struct net_device *dev_peer;
2977 dev_peer = pci_get_drvdata(tp->pdev_peer);
2979 /* remove_one() may have been run on the peer. */
2981 struct tg3 *tp_peer = netdev_priv(dev_peer);
2983 if (tg3_flag(tp_peer, INIT_COMPLETE))
2986 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2987 tg3_flag(tp_peer, ENABLE_ASF))
2992 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2993 tg3_flag(tp, ENABLE_ASF))
2997 tg3_pwrsrc_switch_to_vaux(tp);
2999 tg3_pwrsrc_die_with_vmain(tp);
3002 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3004 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3006 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3007 if (speed != SPEED_10)
3009 } else if (speed == SPEED_10)
3015 static bool tg3_phy_power_bug(struct tg3 *tp)
3017 switch (tg3_asic_rev(tp)) {
3022 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3031 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3040 static bool tg3_phy_led_bug(struct tg3 *tp)
3042 switch (tg3_asic_rev(tp)) {
3045 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3054 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3058 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3061 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3062 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3063 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3064 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3067 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3068 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3069 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3074 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3076 val = tr32(GRC_MISC_CFG);
3077 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3080 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3082 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3085 tg3_writephy(tp, MII_ADVERTISE, 0);
3086 tg3_writephy(tp, MII_BMCR,
3087 BMCR_ANENABLE | BMCR_ANRESTART);
3089 tg3_writephy(tp, MII_TG3_FET_TEST,
3090 phytest | MII_TG3_FET_SHADOW_EN);
3091 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3092 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3094 MII_TG3_FET_SHDW_AUXMODE4,
3097 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3100 } else if (do_low_power) {
3101 if (!tg3_phy_led_bug(tp))
3102 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3103 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3105 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3106 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3107 MII_TG3_AUXCTL_PCTL_VREG_11V;
3108 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3111 /* The PHY should not be powered down on some chips because
3114 if (tg3_phy_power_bug(tp))
3117 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3118 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3119 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3120 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3121 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3122 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3125 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3128 /* tp->lock is held. */
3129 static int tg3_nvram_lock(struct tg3 *tp)
3131 if (tg3_flag(tp, NVRAM)) {
3134 if (tp->nvram_lock_cnt == 0) {
3135 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3136 for (i = 0; i < 8000; i++) {
3137 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3142 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3146 tp->nvram_lock_cnt++;
3151 /* tp->lock is held. */
3152 static void tg3_nvram_unlock(struct tg3 *tp)
3154 if (tg3_flag(tp, NVRAM)) {
3155 if (tp->nvram_lock_cnt > 0)
3156 tp->nvram_lock_cnt--;
3157 if (tp->nvram_lock_cnt == 0)
3158 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3162 /* tp->lock is held. */
3163 static void tg3_enable_nvram_access(struct tg3 *tp)
3165 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3166 u32 nvaccess = tr32(NVRAM_ACCESS);
3168 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3172 /* tp->lock is held. */
3173 static void tg3_disable_nvram_access(struct tg3 *tp)
3175 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3176 u32 nvaccess = tr32(NVRAM_ACCESS);
3178 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3182 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3183 u32 offset, u32 *val)
3188 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3191 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3192 EEPROM_ADDR_DEVID_MASK |
3194 tw32(GRC_EEPROM_ADDR,
3196 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3197 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3198 EEPROM_ADDR_ADDR_MASK) |
3199 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3201 for (i = 0; i < 1000; i++) {
3202 tmp = tr32(GRC_EEPROM_ADDR);
3204 if (tmp & EEPROM_ADDR_COMPLETE)
3208 if (!(tmp & EEPROM_ADDR_COMPLETE))
3211 tmp = tr32(GRC_EEPROM_DATA);
3214 * The data will always be opposite the native endian
3215 * format. Perform a blind byteswap to compensate.
3222 #define NVRAM_CMD_TIMEOUT 10000
3224 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3228 tw32(NVRAM_CMD, nvram_cmd);
3229 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3230 usleep_range(10, 40);
3231 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3237 if (i == NVRAM_CMD_TIMEOUT)
3243 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3245 if (tg3_flag(tp, NVRAM) &&
3246 tg3_flag(tp, NVRAM_BUFFERED) &&
3247 tg3_flag(tp, FLASH) &&
3248 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3249 (tp->nvram_jedecnum == JEDEC_ATMEL))
3251 addr = ((addr / tp->nvram_pagesize) <<
3252 ATMEL_AT45DB0X1B_PAGE_POS) +
3253 (addr % tp->nvram_pagesize);
3258 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3260 if (tg3_flag(tp, NVRAM) &&
3261 tg3_flag(tp, NVRAM_BUFFERED) &&
3262 tg3_flag(tp, FLASH) &&
3263 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3264 (tp->nvram_jedecnum == JEDEC_ATMEL))
3266 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3267 tp->nvram_pagesize) +
3268 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3273 /* NOTE: Data read in from NVRAM is byteswapped according to
3274 * the byteswapping settings for all other register accesses.
3275 * tg3 devices are BE devices, so on a BE machine, the data
3276 * returned will be exactly as it is seen in NVRAM. On a LE
3277 * machine, the 32-bit value will be byteswapped.
3279 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3283 if (!tg3_flag(tp, NVRAM))
3284 return tg3_nvram_read_using_eeprom(tp, offset, val);
3286 offset = tg3_nvram_phys_addr(tp, offset);
3288 if (offset > NVRAM_ADDR_MSK)
3291 ret = tg3_nvram_lock(tp);
3295 tg3_enable_nvram_access(tp);
3297 tw32(NVRAM_ADDR, offset);
3298 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3299 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3302 *val = tr32(NVRAM_RDDATA);
3304 tg3_disable_nvram_access(tp);
3306 tg3_nvram_unlock(tp);
3311 /* Ensures NVRAM data is in bytestream format. */
3312 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3315 int res = tg3_nvram_read(tp, offset, &v);
3317 *val = cpu_to_be32(v);
3321 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3322 u32 offset, u32 len, u8 *buf)
3327 for (i = 0; i < len; i += 4) {
3333 memcpy(&data, buf + i, 4);
3336 * The SEEPROM interface expects the data to always be opposite
3337 * the native endian format. We accomplish this by reversing
3338 * all the operations that would have been performed on the
3339 * data from a call to tg3_nvram_read_be32().
3341 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3343 val = tr32(GRC_EEPROM_ADDR);
3344 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3346 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3348 tw32(GRC_EEPROM_ADDR, val |
3349 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3350 (addr & EEPROM_ADDR_ADDR_MASK) |
3354 for (j = 0; j < 1000; j++) {
3355 val = tr32(GRC_EEPROM_ADDR);
3357 if (val & EEPROM_ADDR_COMPLETE)
3361 if (!(val & EEPROM_ADDR_COMPLETE)) {
3370 /* offset and length are dword aligned */
3371 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3375 u32 pagesize = tp->nvram_pagesize;
3376 u32 pagemask = pagesize - 1;
3380 tmp = kmalloc(pagesize, GFP_KERNEL);
3386 u32 phy_addr, page_off, size;
3388 phy_addr = offset & ~pagemask;
3390 for (j = 0; j < pagesize; j += 4) {
3391 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3392 (__be32 *) (tmp + j));
3399 page_off = offset & pagemask;
3406 memcpy(tmp + page_off, buf, size);
3408 offset = offset + (pagesize - page_off);
3410 tg3_enable_nvram_access(tp);
3413 * Before we can erase the flash page, we need
3414 * to issue a special "write enable" command.
3416 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3418 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3421 /* Erase the target page */
3422 tw32(NVRAM_ADDR, phy_addr);
3424 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3425 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3427 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3430 /* Issue another write enable to start the write. */
3431 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3433 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3436 for (j = 0; j < pagesize; j += 4) {
3439 data = *((__be32 *) (tmp + j));
3441 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3443 tw32(NVRAM_ADDR, phy_addr + j);
3445 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3449 nvram_cmd |= NVRAM_CMD_FIRST;
3450 else if (j == (pagesize - 4))
3451 nvram_cmd |= NVRAM_CMD_LAST;
3453 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3461 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3462 tg3_nvram_exec_cmd(tp, nvram_cmd);
3469 /* offset and length are dword aligned */
3470 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3475 for (i = 0; i < len; i += 4, offset += 4) {
3476 u32 page_off, phy_addr, nvram_cmd;
3479 memcpy(&data, buf + i, 4);
3480 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3482 page_off = offset % tp->nvram_pagesize;
3484 phy_addr = tg3_nvram_phys_addr(tp, offset);
3486 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3488 if (page_off == 0 || i == 0)
3489 nvram_cmd |= NVRAM_CMD_FIRST;
3490 if (page_off == (tp->nvram_pagesize - 4))
3491 nvram_cmd |= NVRAM_CMD_LAST;
3494 nvram_cmd |= NVRAM_CMD_LAST;
3496 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3497 !tg3_flag(tp, FLASH) ||
3498 !tg3_flag(tp, 57765_PLUS))
3499 tw32(NVRAM_ADDR, phy_addr);
3501 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3502 !tg3_flag(tp, 5755_PLUS) &&
3503 (tp->nvram_jedecnum == JEDEC_ST) &&
3504 (nvram_cmd & NVRAM_CMD_FIRST)) {
3507 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3508 ret = tg3_nvram_exec_cmd(tp, cmd);
3512 if (!tg3_flag(tp, FLASH)) {
3513 /* We always do complete word writes to eeprom. */
3514 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3517 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3524 /* offset and length are dword aligned */
3525 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3529 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3530 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3531 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3535 if (!tg3_flag(tp, NVRAM)) {
3536 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3540 ret = tg3_nvram_lock(tp);
3544 tg3_enable_nvram_access(tp);
3545 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3546 tw32(NVRAM_WRITE1, 0x406);
3548 grc_mode = tr32(GRC_MODE);
3549 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3551 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3552 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3555 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3559 grc_mode = tr32(GRC_MODE);
3560 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3562 tg3_disable_nvram_access(tp);
3563 tg3_nvram_unlock(tp);
3566 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3567 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3574 #define RX_CPU_SCRATCH_BASE 0x30000
3575 #define RX_CPU_SCRATCH_SIZE 0x04000
3576 #define TX_CPU_SCRATCH_BASE 0x34000
3577 #define TX_CPU_SCRATCH_SIZE 0x04000
3579 /* tp->lock is held. */
3580 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3583 const int iters = 10000;
3585 for (i = 0; i < iters; i++) {
3586 tw32(cpu_base + CPU_STATE, 0xffffffff);
3587 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3588 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3590 if (pci_channel_offline(tp->pdev))
3594 return (i == iters) ? -EBUSY : 0;
3597 /* tp->lock is held. */
3598 static int tg3_rxcpu_pause(struct tg3 *tp)
3600 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3602 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3603 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3609 /* tp->lock is held. */
3610 static int tg3_txcpu_pause(struct tg3 *tp)
3612 return tg3_pause_cpu(tp, TX_CPU_BASE);
3615 /* tp->lock is held. */
3616 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3618 tw32(cpu_base + CPU_STATE, 0xffffffff);
3619 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3622 /* tp->lock is held. */
3623 static void tg3_rxcpu_resume(struct tg3 *tp)
3625 tg3_resume_cpu(tp, RX_CPU_BASE);
3628 /* tp->lock is held. */
3629 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3633 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3635 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3636 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3638 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3641 if (cpu_base == RX_CPU_BASE) {
3642 rc = tg3_rxcpu_pause(tp);
3645 * There is only an Rx CPU for the 5750 derivative in the
3648 if (tg3_flag(tp, IS_SSB_CORE))
3651 rc = tg3_txcpu_pause(tp);
3655 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3656 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3660 /* Clear firmware's nvram arbitration. */
3661 if (tg3_flag(tp, NVRAM))
3662 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3666 static int tg3_fw_data_len(struct tg3 *tp,
3667 const struct tg3_firmware_hdr *fw_hdr)
3671 /* Non fragmented firmware have one firmware header followed by a
3672 * contiguous chunk of data to be written. The length field in that
3673 * header is not the length of data to be written but the complete
3674 * length of the bss. The data length is determined based on
3675 * tp->fw->size minus headers.
3677 * Fragmented firmware have a main header followed by multiple
3678 * fragments. Each fragment is identical to non fragmented firmware
3679 * with a firmware header followed by a contiguous chunk of data. In
3680 * the main header, the length field is unused and set to 0xffffffff.
3681 * In each fragment header the length is the entire size of that
3682 * fragment i.e. fragment data + header length. Data length is
3683 * therefore length field in the header minus TG3_FW_HDR_LEN.
3685 if (tp->fw_len == 0xffffffff)
3686 fw_len = be32_to_cpu(fw_hdr->len);
3688 fw_len = tp->fw->size;
3690 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3693 /* tp->lock is held. */
3694 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3695 u32 cpu_scratch_base, int cpu_scratch_size,
3696 const struct tg3_firmware_hdr *fw_hdr)
3699 void (*write_op)(struct tg3 *, u32, u32);
3700 int total_len = tp->fw->size;
3702 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3704 "%s: Trying to load TX cpu firmware which is 5705\n",
3709 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3710 write_op = tg3_write_mem;
3712 write_op = tg3_write_indirect_reg32;
3714 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3715 /* It is possible that bootcode is still loading at this point.
3716 * Get the nvram lock first before halting the cpu.
3718 int lock_err = tg3_nvram_lock(tp);
3719 err = tg3_halt_cpu(tp, cpu_base);
3721 tg3_nvram_unlock(tp);
3725 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3726 write_op(tp, cpu_scratch_base + i, 0);
3727 tw32(cpu_base + CPU_STATE, 0xffffffff);
3728 tw32(cpu_base + CPU_MODE,
3729 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3731 /* Subtract additional main header for fragmented firmware and
3732 * advance to the first fragment
3734 total_len -= TG3_FW_HDR_LEN;
3739 u32 *fw_data = (u32 *)(fw_hdr + 1);
3740 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3741 write_op(tp, cpu_scratch_base +
3742 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3744 be32_to_cpu(fw_data[i]));
3746 total_len -= be32_to_cpu(fw_hdr->len);
3748 /* Advance to next fragment */
3749 fw_hdr = (struct tg3_firmware_hdr *)
3750 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3751 } while (total_len > 0);
3759 /* tp->lock is held. */
3760 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3763 const int iters = 5;
3765 tw32(cpu_base + CPU_STATE, 0xffffffff);
3766 tw32_f(cpu_base + CPU_PC, pc);
3768 for (i = 0; i < iters; i++) {
3769 if (tr32(cpu_base + CPU_PC) == pc)
3771 tw32(cpu_base + CPU_STATE, 0xffffffff);
3772 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3773 tw32_f(cpu_base + CPU_PC, pc);
3777 return (i == iters) ? -EBUSY : 0;
3780 /* tp->lock is held. */
3781 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3783 const struct tg3_firmware_hdr *fw_hdr;
3786 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3788 /* Firmware blob starts with version numbers, followed by
3789 start address and length. We are setting complete length.
3790 length = end_address_of_bss - start_address_of_text.
3791 Remainder is the blob to be loaded contiguously
3792 from start address. */
3794 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3795 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3800 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3801 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3806 /* Now startup only the RX cpu. */
3807 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3808 be32_to_cpu(fw_hdr->base_addr));
3810 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3811 "should be %08x\n", __func__,
3812 tr32(RX_CPU_BASE + CPU_PC),
3813 be32_to_cpu(fw_hdr->base_addr));
3817 tg3_rxcpu_resume(tp);
3822 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3824 const int iters = 1000;
3828 /* Wait for boot code to complete initialization and enter service
3829 * loop. It is then safe to download service patches
3831 for (i = 0; i < iters; i++) {
3832 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3839 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3843 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3845 netdev_warn(tp->dev,
3846 "Other patches exist. Not downloading EEE patch\n");
3853 /* tp->lock is held. */
3854 static void tg3_load_57766_firmware(struct tg3 *tp)
3856 struct tg3_firmware_hdr *fw_hdr;
3858 if (!tg3_flag(tp, NO_NVRAM))
3861 if (tg3_validate_rxcpu_state(tp))
3867 /* This firmware blob has a different format than older firmware
3868 * releases as given below. The main difference is we have fragmented
3869 * data to be written to non-contiguous locations.
3871 * In the beginning we have a firmware header identical to other
3872 * firmware which consists of version, base addr and length. The length
3873 * here is unused and set to 0xffffffff.
3875 * This is followed by a series of firmware fragments which are
3876 * individually identical to previous firmware. i.e. they have the
3877 * firmware header and followed by data for that fragment. The version
3878 * field of the individual fragment header is unused.
3881 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3882 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3885 if (tg3_rxcpu_pause(tp))
3888 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3889 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3891 tg3_rxcpu_resume(tp);
3894 /* tp->lock is held. */
3895 static int tg3_load_tso_firmware(struct tg3 *tp)
3897 const struct tg3_firmware_hdr *fw_hdr;
3898 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3901 if (!tg3_flag(tp, FW_TSO))
3904 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3906 /* Firmware blob starts with version numbers, followed by
3907 start address and length. We are setting complete length.
3908 length = end_address_of_bss - start_address_of_text.
3909 Remainder is the blob to be loaded contiguously
3910 from start address. */
3912 cpu_scratch_size = tp->fw_len;
3914 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3915 cpu_base = RX_CPU_BASE;
3916 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3918 cpu_base = TX_CPU_BASE;
3919 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3920 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3923 err = tg3_load_firmware_cpu(tp, cpu_base,
3924 cpu_scratch_base, cpu_scratch_size,
3929 /* Now startup the cpu. */
3930 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3931 be32_to_cpu(fw_hdr->base_addr));
3934 "%s fails to set CPU PC, is %08x should be %08x\n",
3935 __func__, tr32(cpu_base + CPU_PC),
3936 be32_to_cpu(fw_hdr->base_addr));
3940 tg3_resume_cpu(tp, cpu_base);
3944 /* tp->lock is held. */
3945 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3947 u32 addr_high, addr_low;
3949 addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3950 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3951 (mac_addr[4] << 8) | mac_addr[5]);
3954 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3955 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3958 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3959 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3963 /* tp->lock is held. */
3964 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3969 for (i = 0; i < 4; i++) {
3970 if (i == 1 && skip_mac_1)
3972 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3975 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3976 tg3_asic_rev(tp) == ASIC_REV_5704) {
3977 for (i = 4; i < 16; i++)
3978 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3981 addr_high = (tp->dev->dev_addr[0] +
3982 tp->dev->dev_addr[1] +
3983 tp->dev->dev_addr[2] +
3984 tp->dev->dev_addr[3] +
3985 tp->dev->dev_addr[4] +
3986 tp->dev->dev_addr[5]) &
3987 TX_BACKOFF_SEED_MASK;
3988 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3991 static void tg3_enable_register_access(struct tg3 *tp)
3994 * Make sure register accesses (indirect or otherwise) will function
3997 pci_write_config_dword(tp->pdev,
3998 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4001 static int tg3_power_up(struct tg3 *tp)
4005 tg3_enable_register_access(tp);
4007 err = pci_set_power_state(tp->pdev, PCI_D0);
4009 /* Switch out of Vaux if it is a NIC */
4010 tg3_pwrsrc_switch_to_vmain(tp);
4012 netdev_err(tp->dev, "Transition to D0 failed\n");
4018 static int tg3_setup_phy(struct tg3 *, bool);
4020 static int tg3_power_down_prepare(struct tg3 *tp)
4023 bool device_should_wake, do_low_power;
4025 tg3_enable_register_access(tp);
4027 /* Restore the CLKREQ setting. */
4028 if (tg3_flag(tp, CLKREQ_BUG))
4029 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4030 PCI_EXP_LNKCTL_CLKREQ_EN);
4032 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4033 tw32(TG3PCI_MISC_HOST_CTRL,
4034 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4036 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4037 tg3_flag(tp, WOL_ENABLE);
4039 if (tg3_flag(tp, USE_PHYLIB)) {
4040 do_low_power = false;
4041 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4042 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4043 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4044 struct phy_device *phydev;
4047 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4049 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4051 tp->link_config.speed = phydev->speed;
4052 tp->link_config.duplex = phydev->duplex;
4053 tp->link_config.autoneg = phydev->autoneg;
4054 ethtool_convert_link_mode_to_legacy_u32(
4055 &tp->link_config.advertising,
4056 phydev->advertising);
4058 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4059 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4061 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4063 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4066 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4067 if (tg3_flag(tp, WOL_SPEED_100MB)) {
4068 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4070 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4072 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4075 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4080 linkmode_copy(phydev->advertising, advertising);
4081 phy_start_aneg(phydev);
4083 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4084 if (phyid != PHY_ID_BCMAC131) {
4085 phyid &= PHY_BCM_OUI_MASK;
4086 if (phyid == PHY_BCM_OUI_1 ||
4087 phyid == PHY_BCM_OUI_2 ||
4088 phyid == PHY_BCM_OUI_3)
4089 do_low_power = true;
4093 do_low_power = true;
4095 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4096 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4098 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4099 tg3_setup_phy(tp, false);
4102 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4105 val = tr32(GRC_VCPU_EXT_CTRL);
4106 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4107 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4111 for (i = 0; i < 200; i++) {
4112 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4113 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4118 if (tg3_flag(tp, WOL_CAP))
4119 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4120 WOL_DRV_STATE_SHUTDOWN |
4124 if (device_should_wake) {
4127 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4129 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4130 tg3_phy_auxctl_write(tp,
4131 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4132 MII_TG3_AUXCTL_PCTL_WOL_EN |
4133 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4134 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4138 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4139 mac_mode = MAC_MODE_PORT_MODE_GMII;
4140 else if (tp->phy_flags &
4141 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4142 if (tp->link_config.active_speed == SPEED_1000)
4143 mac_mode = MAC_MODE_PORT_MODE_GMII;
4145 mac_mode = MAC_MODE_PORT_MODE_MII;
4147 mac_mode = MAC_MODE_PORT_MODE_MII;
4149 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4150 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4151 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4152 SPEED_100 : SPEED_10;
4153 if (tg3_5700_link_polarity(tp, speed))
4154 mac_mode |= MAC_MODE_LINK_POLARITY;
4156 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4159 mac_mode = MAC_MODE_PORT_MODE_TBI;
4162 if (!tg3_flag(tp, 5750_PLUS))
4163 tw32(MAC_LED_CTRL, tp->led_ctrl);
4165 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4166 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4167 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4168 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4170 if (tg3_flag(tp, ENABLE_APE))
4171 mac_mode |= MAC_MODE_APE_TX_EN |
4172 MAC_MODE_APE_RX_EN |
4173 MAC_MODE_TDE_ENABLE;
4175 tw32_f(MAC_MODE, mac_mode);
4178 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4182 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4183 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4184 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4187 base_val = tp->pci_clock_ctrl;
4188 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4189 CLOCK_CTRL_TXCLK_DISABLE);
4191 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4192 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4193 } else if (tg3_flag(tp, 5780_CLASS) ||
4194 tg3_flag(tp, CPMU_PRESENT) ||
4195 tg3_asic_rev(tp) == ASIC_REV_5906) {
4197 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4198 u32 newbits1, newbits2;
4200 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4201 tg3_asic_rev(tp) == ASIC_REV_5701) {
4202 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4203 CLOCK_CTRL_TXCLK_DISABLE |
4205 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4206 } else if (tg3_flag(tp, 5705_PLUS)) {
4207 newbits1 = CLOCK_CTRL_625_CORE;
4208 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4210 newbits1 = CLOCK_CTRL_ALTCLK;
4211 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4214 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4217 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4220 if (!tg3_flag(tp, 5705_PLUS)) {
4223 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4224 tg3_asic_rev(tp) == ASIC_REV_5701) {
4225 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4226 CLOCK_CTRL_TXCLK_DISABLE |
4227 CLOCK_CTRL_44MHZ_CORE);
4229 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4232 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4233 tp->pci_clock_ctrl | newbits3, 40);
4237 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4238 tg3_power_down_phy(tp, do_low_power);
4240 tg3_frob_aux_power(tp, true);
4242 /* Workaround for unstable PLL clock */
4243 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4244 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4245 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4246 u32 val = tr32(0x7d00);
4248 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4250 if (!tg3_flag(tp, ENABLE_ASF)) {
4253 err = tg3_nvram_lock(tp);
4254 tg3_halt_cpu(tp, RX_CPU_BASE);
4256 tg3_nvram_unlock(tp);
4260 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4262 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4267 static void tg3_power_down(struct tg3 *tp)
4269 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4270 pci_set_power_state(tp->pdev, PCI_D3hot);
4273 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4275 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4276 case MII_TG3_AUX_STAT_10HALF:
4278 *duplex = DUPLEX_HALF;
4281 case MII_TG3_AUX_STAT_10FULL:
4283 *duplex = DUPLEX_FULL;
4286 case MII_TG3_AUX_STAT_100HALF:
4288 *duplex = DUPLEX_HALF;
4291 case MII_TG3_AUX_STAT_100FULL:
4293 *duplex = DUPLEX_FULL;
4296 case MII_TG3_AUX_STAT_1000HALF:
4297 *speed = SPEED_1000;
4298 *duplex = DUPLEX_HALF;
4301 case MII_TG3_AUX_STAT_1000FULL:
4302 *speed = SPEED_1000;
4303 *duplex = DUPLEX_FULL;
4307 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4308 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4310 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4314 *speed = SPEED_UNKNOWN;
4315 *duplex = DUPLEX_UNKNOWN;
4320 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4325 new_adv = ADVERTISE_CSMA;
4326 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4327 new_adv |= mii_advertise_flowctrl(flowctrl);
4329 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4333 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4334 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4336 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4337 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4338 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4340 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4345 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4348 tw32(TG3_CPMU_EEE_MODE,
4349 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4351 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4356 /* Advertise 100-BaseTX EEE ability */
4357 if (advertise & ADVERTISED_100baseT_Full)
4358 val |= MDIO_AN_EEE_ADV_100TX;
4359 /* Advertise 1000-BaseT EEE ability */
4360 if (advertise & ADVERTISED_1000baseT_Full)
4361 val |= MDIO_AN_EEE_ADV_1000T;
4363 if (!tp->eee.eee_enabled) {
4365 tp->eee.advertised = 0;
4367 tp->eee.advertised = advertise &
4368 (ADVERTISED_100baseT_Full |
4369 ADVERTISED_1000baseT_Full);
4372 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4376 switch (tg3_asic_rev(tp)) {
4378 case ASIC_REV_57765:
4379 case ASIC_REV_57766:
4381 /* If we advertised any eee advertisements above... */
4383 val = MII_TG3_DSP_TAP26_ALNOKO |
4384 MII_TG3_DSP_TAP26_RMRXSTO |
4385 MII_TG3_DSP_TAP26_OPCSINPT;
4386 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4390 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4391 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4392 MII_TG3_DSP_CH34TP2_HIBW01);
4395 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4404 static void tg3_phy_copper_begin(struct tg3 *tp)
4406 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4407 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4410 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4411 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4412 adv = ADVERTISED_10baseT_Half |
4413 ADVERTISED_10baseT_Full;
4414 if (tg3_flag(tp, WOL_SPEED_100MB))
4415 adv |= ADVERTISED_100baseT_Half |
4416 ADVERTISED_100baseT_Full;
4417 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4418 if (!(tp->phy_flags &
4419 TG3_PHYFLG_DISABLE_1G_HD_ADV))
4420 adv |= ADVERTISED_1000baseT_Half;
4421 adv |= ADVERTISED_1000baseT_Full;
4424 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4426 adv = tp->link_config.advertising;
4427 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4428 adv &= ~(ADVERTISED_1000baseT_Half |
4429 ADVERTISED_1000baseT_Full);
4431 fc = tp->link_config.flowctrl;
4434 tg3_phy_autoneg_cfg(tp, adv, fc);
4436 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4437 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4438 /* Normally during power down we want to autonegotiate
4439 * the lowest possible speed for WOL. However, to avoid
4440 * link flap, we leave it untouched.
4445 tg3_writephy(tp, MII_BMCR,
4446 BMCR_ANENABLE | BMCR_ANRESTART);
4449 u32 bmcr, orig_bmcr;
4451 tp->link_config.active_speed = tp->link_config.speed;
4452 tp->link_config.active_duplex = tp->link_config.duplex;
4454 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4455 /* With autoneg disabled, 5715 only links up when the
4456 * advertisement register has the configured speed
4459 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4463 switch (tp->link_config.speed) {
4469 bmcr |= BMCR_SPEED100;
4473 bmcr |= BMCR_SPEED1000;
4477 if (tp->link_config.duplex == DUPLEX_FULL)
4478 bmcr |= BMCR_FULLDPLX;
4480 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4481 (bmcr != orig_bmcr)) {
4482 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4483 for (i = 0; i < 1500; i++) {
4487 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4488 tg3_readphy(tp, MII_BMSR, &tmp))
4490 if (!(tmp & BMSR_LSTATUS)) {
4495 tg3_writephy(tp, MII_BMCR, bmcr);
4501 static int tg3_phy_pull_config(struct tg3 *tp)
4506 err = tg3_readphy(tp, MII_BMCR, &val);
4510 if (!(val & BMCR_ANENABLE)) {
4511 tp->link_config.autoneg = AUTONEG_DISABLE;
4512 tp->link_config.advertising = 0;
4513 tg3_flag_clear(tp, PAUSE_AUTONEG);
4517 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4519 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4522 tp->link_config.speed = SPEED_10;
4525 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4528 tp->link_config.speed = SPEED_100;
4530 case BMCR_SPEED1000:
4531 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4532 tp->link_config.speed = SPEED_1000;
4540 if (val & BMCR_FULLDPLX)
4541 tp->link_config.duplex = DUPLEX_FULL;
4543 tp->link_config.duplex = DUPLEX_HALF;
4545 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4551 tp->link_config.autoneg = AUTONEG_ENABLE;
4552 tp->link_config.advertising = ADVERTISED_Autoneg;
4553 tg3_flag_set(tp, PAUSE_AUTONEG);
4555 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4558 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4562 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4563 tp->link_config.advertising |= adv | ADVERTISED_TP;
4565 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4567 tp->link_config.advertising |= ADVERTISED_FIBRE;
4570 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4573 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4574 err = tg3_readphy(tp, MII_CTRL1000, &val);
4578 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4580 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4584 adv = tg3_decode_flowctrl_1000X(val);
4585 tp->link_config.flowctrl = adv;
4587 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4588 adv = mii_adv_to_ethtool_adv_x(val);
4591 tp->link_config.advertising |= adv;
4598 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4602 /* Turn off tap power management. */
4603 /* Set Extended packet length bit */
4604 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4606 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4607 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4608 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4609 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4610 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4617 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4619 struct ethtool_eee eee;
4621 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4624 tg3_eee_pull_config(tp, &eee);
4626 if (tp->eee.eee_enabled) {
4627 if (tp->eee.advertised != eee.advertised ||
4628 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4629 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4632 /* EEE is disabled but we're advertising */
4640 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4642 u32 advmsk, tgtadv, advertising;
4644 advertising = tp->link_config.advertising;
4645 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4647 advmsk = ADVERTISE_ALL;
4648 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4649 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4650 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4653 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4656 if ((*lcladv & advmsk) != tgtadv)
4659 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4662 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4664 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4668 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4669 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4670 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4671 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4672 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4674 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4677 if (tg3_ctrl != tgtadv)
4684 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4688 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4691 if (tg3_readphy(tp, MII_STAT1000, &val))
4694 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4697 if (tg3_readphy(tp, MII_LPA, rmtadv))
4700 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4701 tp->link_config.rmt_adv = lpeth;
4706 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4708 if (curr_link_up != tp->link_up) {
4710 netif_carrier_on(tp->dev);
4712 netif_carrier_off(tp->dev);
4713 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4714 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4717 tg3_link_report(tp);
4724 static void tg3_clear_mac_status(struct tg3 *tp)
4729 MAC_STATUS_SYNC_CHANGED |
4730 MAC_STATUS_CFG_CHANGED |
4731 MAC_STATUS_MI_COMPLETION |
4732 MAC_STATUS_LNKSTATE_CHANGED);
4736 static void tg3_setup_eee(struct tg3 *tp)
4740 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4741 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4742 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4743 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4745 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4747 tw32_f(TG3_CPMU_EEE_CTRL,
4748 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4750 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4751 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4752 TG3_CPMU_EEEMD_LPI_IN_RX |
4753 TG3_CPMU_EEEMD_EEE_ENABLE;
4755 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4756 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4758 if (tg3_flag(tp, ENABLE_APE))
4759 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4761 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4763 tw32_f(TG3_CPMU_EEE_DBTMR1,
4764 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4765 (tp->eee.tx_lpi_timer & 0xffff));
4767 tw32_f(TG3_CPMU_EEE_DBTMR2,
4768 TG3_CPMU_DBTMR2_APE_TX_2047US |
4769 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4772 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4774 bool current_link_up;
4776 u32 lcl_adv, rmt_adv;
4781 tg3_clear_mac_status(tp);
4783 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4785 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4789 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4791 /* Some third-party PHYs need to be reset on link going
4794 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4795 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4796 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4798 tg3_readphy(tp, MII_BMSR, &bmsr);
4799 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4800 !(bmsr & BMSR_LSTATUS))
4806 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4807 tg3_readphy(tp, MII_BMSR, &bmsr);
4808 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4809 !tg3_flag(tp, INIT_COMPLETE))
4812 if (!(bmsr & BMSR_LSTATUS)) {
4813 err = tg3_init_5401phy_dsp(tp);
4817 tg3_readphy(tp, MII_BMSR, &bmsr);
4818 for (i = 0; i < 1000; i++) {
4820 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4821 (bmsr & BMSR_LSTATUS)) {
4827 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4828 TG3_PHY_REV_BCM5401_B0 &&
4829 !(bmsr & BMSR_LSTATUS) &&
4830 tp->link_config.active_speed == SPEED_1000) {
4831 err = tg3_phy_reset(tp);
4833 err = tg3_init_5401phy_dsp(tp);
4838 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4839 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4840 /* 5701 {A0,B0} CRC bug workaround */
4841 tg3_writephy(tp, 0x15, 0x0a75);
4842 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4843 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4844 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4847 /* Clear pending interrupts... */
4848 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4849 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4851 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4852 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4853 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4854 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4856 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4857 tg3_asic_rev(tp) == ASIC_REV_5701) {
4858 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4859 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4860 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4862 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4865 current_link_up = false;
4866 current_speed = SPEED_UNKNOWN;
4867 current_duplex = DUPLEX_UNKNOWN;
4868 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4869 tp->link_config.rmt_adv = 0;
4871 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4872 err = tg3_phy_auxctl_read(tp,
4873 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4875 if (!err && !(val & (1 << 10))) {
4876 tg3_phy_auxctl_write(tp,
4877 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4884 for (i = 0; i < 100; i++) {
4885 tg3_readphy(tp, MII_BMSR, &bmsr);
4886 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4887 (bmsr & BMSR_LSTATUS))
4892 if (bmsr & BMSR_LSTATUS) {
4895 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4896 for (i = 0; i < 2000; i++) {
4898 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4903 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4908 for (i = 0; i < 200; i++) {
4909 tg3_readphy(tp, MII_BMCR, &bmcr);
4910 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4912 if (bmcr && bmcr != 0x7fff)
4920 tp->link_config.active_speed = current_speed;
4921 tp->link_config.active_duplex = current_duplex;
4923 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4924 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4926 if ((bmcr & BMCR_ANENABLE) &&
4928 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4929 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4930 current_link_up = true;
4932 /* EEE settings changes take effect only after a phy
4933 * reset. If we have skipped a reset due to Link Flap
4934 * Avoidance being enabled, do it now.
4936 if (!eee_config_ok &&
4937 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4943 if (!(bmcr & BMCR_ANENABLE) &&
4944 tp->link_config.speed == current_speed &&
4945 tp->link_config.duplex == current_duplex) {
4946 current_link_up = true;
4950 if (current_link_up &&
4951 tp->link_config.active_duplex == DUPLEX_FULL) {
4954 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4955 reg = MII_TG3_FET_GEN_STAT;
4956 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4958 reg = MII_TG3_EXT_STAT;
4959 bit = MII_TG3_EXT_STAT_MDIX;
4962 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4963 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4965 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4970 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4971 tg3_phy_copper_begin(tp);
4973 if (tg3_flag(tp, ROBOSWITCH)) {
4974 current_link_up = true;
4975 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4976 current_speed = SPEED_1000;
4977 current_duplex = DUPLEX_FULL;
4978 tp->link_config.active_speed = current_speed;
4979 tp->link_config.active_duplex = current_duplex;
4982 tg3_readphy(tp, MII_BMSR, &bmsr);
4983 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4984 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4985 current_link_up = true;
4988 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4989 if (current_link_up) {
4990 if (tp->link_config.active_speed == SPEED_100 ||
4991 tp->link_config.active_speed == SPEED_10)
4992 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4994 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4995 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4996 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4998 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5000 /* In order for the 5750 core in BCM4785 chip to work properly
5001 * in RGMII mode, the Led Control Register must be set up.
5003 if (tg3_flag(tp, RGMII_MODE)) {
5004 u32 led_ctrl = tr32(MAC_LED_CTRL);
5005 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5007 if (tp->link_config.active_speed == SPEED_10)
5008 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5009 else if (tp->link_config.active_speed == SPEED_100)
5010 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5011 LED_CTRL_100MBPS_ON);
5012 else if (tp->link_config.active_speed == SPEED_1000)
5013 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5014 LED_CTRL_1000MBPS_ON);
5016 tw32(MAC_LED_CTRL, led_ctrl);
5020 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5021 if (tp->link_config.active_duplex == DUPLEX_HALF)
5022 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5024 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5025 if (current_link_up &&
5026 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5027 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5029 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5032 /* ??? Without this setting Netgear GA302T PHY does not
5033 * ??? send/receive packets...
5035 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5036 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5037 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5038 tw32_f(MAC_MI_MODE, tp->mi_mode);
5042 tw32_f(MAC_MODE, tp->mac_mode);
5045 tg3_phy_eee_adjust(tp, current_link_up);
5047 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5048 /* Polled via timer. */
5049 tw32_f(MAC_EVENT, 0);
5051 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5055 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5057 tp->link_config.active_speed == SPEED_1000 &&
5058 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5061 (MAC_STATUS_SYNC_CHANGED |
5062 MAC_STATUS_CFG_CHANGED));
5065 NIC_SRAM_FIRMWARE_MBOX,
5066 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5069 /* Prevent send BD corruption. */
5070 if (tg3_flag(tp, CLKREQ_BUG)) {
5071 if (tp->link_config.active_speed == SPEED_100 ||
5072 tp->link_config.active_speed == SPEED_10)
5073 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5074 PCI_EXP_LNKCTL_CLKREQ_EN);
5076 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5077 PCI_EXP_LNKCTL_CLKREQ_EN);
5080 tg3_test_and_report_link_chg(tp, current_link_up);
5085 struct tg3_fiber_aneginfo {
5087 #define ANEG_STATE_UNKNOWN 0
5088 #define ANEG_STATE_AN_ENABLE 1
5089 #define ANEG_STATE_RESTART_INIT 2
5090 #define ANEG_STATE_RESTART 3
5091 #define ANEG_STATE_DISABLE_LINK_OK 4
5092 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5093 #define ANEG_STATE_ABILITY_DETECT 6
5094 #define ANEG_STATE_ACK_DETECT_INIT 7
5095 #define ANEG_STATE_ACK_DETECT 8
5096 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5097 #define ANEG_STATE_COMPLETE_ACK 10
5098 #define ANEG_STATE_IDLE_DETECT_INIT 11
5099 #define ANEG_STATE_IDLE_DETECT 12
5100 #define ANEG_STATE_LINK_OK 13
5101 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5102 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5105 #define MR_AN_ENABLE 0x00000001
5106 #define MR_RESTART_AN 0x00000002
5107 #define MR_AN_COMPLETE 0x00000004
5108 #define MR_PAGE_RX 0x00000008
5109 #define MR_NP_LOADED 0x00000010
5110 #define MR_TOGGLE_TX 0x00000020
5111 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5112 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5113 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5114 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5115 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5116 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5117 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5118 #define MR_TOGGLE_RX 0x00002000
5119 #define MR_NP_RX 0x00004000
5121 #define MR_LINK_OK 0x80000000
5123 unsigned long link_time, cur_time;
5125 u32 ability_match_cfg;
5126 int ability_match_count;
5128 char ability_match, idle_match, ack_match;
5130 u32 txconfig, rxconfig;
5131 #define ANEG_CFG_NP 0x00000080
5132 #define ANEG_CFG_ACK 0x00000040
5133 #define ANEG_CFG_RF2 0x00000020
5134 #define ANEG_CFG_RF1 0x00000010
5135 #define ANEG_CFG_PS2 0x00000001
5136 #define ANEG_CFG_PS1 0x00008000
5137 #define ANEG_CFG_HD 0x00004000
5138 #define ANEG_CFG_FD 0x00002000
5139 #define ANEG_CFG_INVAL 0x00001f06
5144 #define ANEG_TIMER_ENAB 2
5145 #define ANEG_FAILED -1
5147 #define ANEG_STATE_SETTLE_TIME 10000
5149 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5150 struct tg3_fiber_aneginfo *ap)
5153 unsigned long delta;
5157 if (ap->state == ANEG_STATE_UNKNOWN) {
5161 ap->ability_match_cfg = 0;
5162 ap->ability_match_count = 0;
5163 ap->ability_match = 0;
5169 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5170 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5172 if (rx_cfg_reg != ap->ability_match_cfg) {
5173 ap->ability_match_cfg = rx_cfg_reg;
5174 ap->ability_match = 0;
5175 ap->ability_match_count = 0;
5177 if (++ap->ability_match_count > 1) {
5178 ap->ability_match = 1;
5179 ap->ability_match_cfg = rx_cfg_reg;
5182 if (rx_cfg_reg & ANEG_CFG_ACK)
5190 ap->ability_match_cfg = 0;
5191 ap->ability_match_count = 0;
5192 ap->ability_match = 0;
5198 ap->rxconfig = rx_cfg_reg;
5201 switch (ap->state) {
5202 case ANEG_STATE_UNKNOWN:
5203 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5204 ap->state = ANEG_STATE_AN_ENABLE;
5207 case ANEG_STATE_AN_ENABLE:
5208 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5209 if (ap->flags & MR_AN_ENABLE) {
5212 ap->ability_match_cfg = 0;
5213 ap->ability_match_count = 0;
5214 ap->ability_match = 0;
5218 ap->state = ANEG_STATE_RESTART_INIT;
5220 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5224 case ANEG_STATE_RESTART_INIT:
5225 ap->link_time = ap->cur_time;
5226 ap->flags &= ~(MR_NP_LOADED);
5228 tw32(MAC_TX_AUTO_NEG, 0);
5229 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5230 tw32_f(MAC_MODE, tp->mac_mode);
5233 ret = ANEG_TIMER_ENAB;
5234 ap->state = ANEG_STATE_RESTART;
5237 case ANEG_STATE_RESTART:
5238 delta = ap->cur_time - ap->link_time;
5239 if (delta > ANEG_STATE_SETTLE_TIME)
5240 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5242 ret = ANEG_TIMER_ENAB;
5245 case ANEG_STATE_DISABLE_LINK_OK:
5249 case ANEG_STATE_ABILITY_DETECT_INIT:
5250 ap->flags &= ~(MR_TOGGLE_TX);
5251 ap->txconfig = ANEG_CFG_FD;
5252 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5253 if (flowctrl & ADVERTISE_1000XPAUSE)
5254 ap->txconfig |= ANEG_CFG_PS1;
5255 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5256 ap->txconfig |= ANEG_CFG_PS2;
5257 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5258 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5259 tw32_f(MAC_MODE, tp->mac_mode);
5262 ap->state = ANEG_STATE_ABILITY_DETECT;
5265 case ANEG_STATE_ABILITY_DETECT:
5266 if (ap->ability_match != 0 && ap->rxconfig != 0)
5267 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5270 case ANEG_STATE_ACK_DETECT_INIT:
5271 ap->txconfig |= ANEG_CFG_ACK;
5272 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5273 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5274 tw32_f(MAC_MODE, tp->mac_mode);
5277 ap->state = ANEG_STATE_ACK_DETECT;
5280 case ANEG_STATE_ACK_DETECT:
5281 if (ap->ack_match != 0) {
5282 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5283 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5284 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5286 ap->state = ANEG_STATE_AN_ENABLE;
5288 } else if (ap->ability_match != 0 &&
5289 ap->rxconfig == 0) {
5290 ap->state = ANEG_STATE_AN_ENABLE;
5294 case ANEG_STATE_COMPLETE_ACK_INIT:
5295 if (ap->rxconfig & ANEG_CFG_INVAL) {
5299 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5300 MR_LP_ADV_HALF_DUPLEX |
5301 MR_LP_ADV_SYM_PAUSE |
5302 MR_LP_ADV_ASYM_PAUSE |
5303 MR_LP_ADV_REMOTE_FAULT1 |
5304 MR_LP_ADV_REMOTE_FAULT2 |
5305 MR_LP_ADV_NEXT_PAGE |
5308 if (ap->rxconfig & ANEG_CFG_FD)
5309 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5310 if (ap->rxconfig & ANEG_CFG_HD)
5311 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5312 if (ap->rxconfig & ANEG_CFG_PS1)
5313 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5314 if (ap->rxconfig & ANEG_CFG_PS2)
5315 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5316 if (ap->rxconfig & ANEG_CFG_RF1)
5317 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5318 if (ap->rxconfig & ANEG_CFG_RF2)
5319 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5320 if (ap->rxconfig & ANEG_CFG_NP)
5321 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5323 ap->link_time = ap->cur_time;
5325 ap->flags ^= (MR_TOGGLE_TX);
5326 if (ap->rxconfig & 0x0008)
5327 ap->flags |= MR_TOGGLE_RX;
5328 if (ap->rxconfig & ANEG_CFG_NP)
5329 ap->flags |= MR_NP_RX;
5330 ap->flags |= MR_PAGE_RX;
5332 ap->state = ANEG_STATE_COMPLETE_ACK;
5333 ret = ANEG_TIMER_ENAB;
5336 case ANEG_STATE_COMPLETE_ACK:
5337 if (ap->ability_match != 0 &&
5338 ap->rxconfig == 0) {
5339 ap->state = ANEG_STATE_AN_ENABLE;
5342 delta = ap->cur_time - ap->link_time;
5343 if (delta > ANEG_STATE_SETTLE_TIME) {
5344 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5345 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5347 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5348 !(ap->flags & MR_NP_RX)) {
5349 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5357 case ANEG_STATE_IDLE_DETECT_INIT:
5358 ap->link_time = ap->cur_time;
5359 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5360 tw32_f(MAC_MODE, tp->mac_mode);
5363 ap->state = ANEG_STATE_IDLE_DETECT;
5364 ret = ANEG_TIMER_ENAB;
5367 case ANEG_STATE_IDLE_DETECT:
5368 if (ap->ability_match != 0 &&
5369 ap->rxconfig == 0) {
5370 ap->state = ANEG_STATE_AN_ENABLE;
5373 delta = ap->cur_time - ap->link_time;
5374 if (delta > ANEG_STATE_SETTLE_TIME) {
5375 /* XXX another gem from the Broadcom driver :( */
5376 ap->state = ANEG_STATE_LINK_OK;
5380 case ANEG_STATE_LINK_OK:
5381 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5385 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5386 /* ??? unimplemented */
5389 case ANEG_STATE_NEXT_PAGE_WAIT:
5390 /* ??? unimplemented */
5401 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5404 struct tg3_fiber_aneginfo aninfo;
5405 int status = ANEG_FAILED;
5409 tw32_f(MAC_TX_AUTO_NEG, 0);
5411 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5412 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5415 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5418 memset(&aninfo, 0, sizeof(aninfo));
5419 aninfo.flags |= MR_AN_ENABLE;
5420 aninfo.state = ANEG_STATE_UNKNOWN;
5421 aninfo.cur_time = 0;
5423 while (++tick < 195000) {
5424 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5425 if (status == ANEG_DONE || status == ANEG_FAILED)
5431 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5432 tw32_f(MAC_MODE, tp->mac_mode);
5435 *txflags = aninfo.txconfig;
5436 *rxflags = aninfo.flags;
5438 if (status == ANEG_DONE &&
5439 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5440 MR_LP_ADV_FULL_DUPLEX)))
5446 static void tg3_init_bcm8002(struct tg3 *tp)
5448 u32 mac_status = tr32(MAC_STATUS);
5451 /* Reset when initting first time or we have a link. */
5452 if (tg3_flag(tp, INIT_COMPLETE) &&
5453 !(mac_status & MAC_STATUS_PCS_SYNCED))
5456 /* Set PLL lock range. */
5457 tg3_writephy(tp, 0x16, 0x8007);
5460 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5462 /* Wait for reset to complete. */
5463 /* XXX schedule_timeout() ... */
5464 for (i = 0; i < 500; i++)
5467 /* Config mode; select PMA/Ch 1 regs. */
5468 tg3_writephy(tp, 0x10, 0x8411);
5470 /* Enable auto-lock and comdet, select txclk for tx. */
5471 tg3_writephy(tp, 0x11, 0x0a10);
5473 tg3_writephy(tp, 0x18, 0x00a0);
5474 tg3_writephy(tp, 0x16, 0x41ff);
5476 /* Assert and deassert POR. */
5477 tg3_writephy(tp, 0x13, 0x0400);
5479 tg3_writephy(tp, 0x13, 0x0000);
5481 tg3_writephy(tp, 0x11, 0x0a50);
5483 tg3_writephy(tp, 0x11, 0x0a10);
5485 /* Wait for signal to stabilize */
5486 /* XXX schedule_timeout() ... */
5487 for (i = 0; i < 15000; i++)
5490 /* Deselect the channel register so we can read the PHYID
5493 tg3_writephy(tp, 0x10, 0x8011);
5496 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5499 bool current_link_up;
5500 u32 sg_dig_ctrl, sg_dig_status;
5501 u32 serdes_cfg, expected_sg_dig_ctrl;
5502 int workaround, port_a;
5505 expected_sg_dig_ctrl = 0;
5508 current_link_up = false;
5510 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5511 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5513 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5516 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5517 /* preserve bits 20-23 for voltage regulator */
5518 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5521 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5523 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5524 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5526 u32 val = serdes_cfg;
5532 tw32_f(MAC_SERDES_CFG, val);
5535 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5537 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5538 tg3_setup_flow_control(tp, 0, 0);
5539 current_link_up = true;
5544 /* Want auto-negotiation. */
5545 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5547 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5548 if (flowctrl & ADVERTISE_1000XPAUSE)
5549 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5550 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5551 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5553 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5554 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5555 tp->serdes_counter &&
5556 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5557 MAC_STATUS_RCVD_CFG)) ==
5558 MAC_STATUS_PCS_SYNCED)) {
5559 tp->serdes_counter--;
5560 current_link_up = true;
5565 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5566 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5568 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5570 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5571 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5572 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5573 MAC_STATUS_SIGNAL_DET)) {
5574 sg_dig_status = tr32(SG_DIG_STATUS);
5575 mac_status = tr32(MAC_STATUS);
5577 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5578 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5579 u32 local_adv = 0, remote_adv = 0;
5581 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5582 local_adv |= ADVERTISE_1000XPAUSE;
5583 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5584 local_adv |= ADVERTISE_1000XPSE_ASYM;
5586 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5587 remote_adv |= LPA_1000XPAUSE;
5588 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5589 remote_adv |= LPA_1000XPAUSE_ASYM;
5591 tp->link_config.rmt_adv =
5592 mii_adv_to_ethtool_adv_x(remote_adv);
5594 tg3_setup_flow_control(tp, local_adv, remote_adv);
5595 current_link_up = true;
5596 tp->serdes_counter = 0;
5597 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5598 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5599 if (tp->serdes_counter)
5600 tp->serdes_counter--;
5603 u32 val = serdes_cfg;
5610 tw32_f(MAC_SERDES_CFG, val);
5613 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5616 /* Link parallel detection - link is up */
5617 /* only if we have PCS_SYNC and not */
5618 /* receiving config code words */
5619 mac_status = tr32(MAC_STATUS);
5620 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5621 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5622 tg3_setup_flow_control(tp, 0, 0);
5623 current_link_up = true;
5625 TG3_PHYFLG_PARALLEL_DETECT;
5626 tp->serdes_counter =
5627 SERDES_PARALLEL_DET_TIMEOUT;
5629 goto restart_autoneg;
5633 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5634 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5638 return current_link_up;
5641 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5643 bool current_link_up = false;
5645 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5648 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5649 u32 txflags, rxflags;
5652 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5653 u32 local_adv = 0, remote_adv = 0;
5655 if (txflags & ANEG_CFG_PS1)
5656 local_adv |= ADVERTISE_1000XPAUSE;
5657 if (txflags & ANEG_CFG_PS2)
5658 local_adv |= ADVERTISE_1000XPSE_ASYM;
5660 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5661 remote_adv |= LPA_1000XPAUSE;
5662 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5663 remote_adv |= LPA_1000XPAUSE_ASYM;
5665 tp->link_config.rmt_adv =
5666 mii_adv_to_ethtool_adv_x(remote_adv);
5668 tg3_setup_flow_control(tp, local_adv, remote_adv);
5670 current_link_up = true;
5672 for (i = 0; i < 30; i++) {
5675 (MAC_STATUS_SYNC_CHANGED |
5676 MAC_STATUS_CFG_CHANGED));
5678 if ((tr32(MAC_STATUS) &
5679 (MAC_STATUS_SYNC_CHANGED |
5680 MAC_STATUS_CFG_CHANGED)) == 0)
5684 mac_status = tr32(MAC_STATUS);
5685 if (!current_link_up &&
5686 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5687 !(mac_status & MAC_STATUS_RCVD_CFG))
5688 current_link_up = true;
5690 tg3_setup_flow_control(tp, 0, 0);
5692 /* Forcing 1000FD link up. */
5693 current_link_up = true;
5695 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5698 tw32_f(MAC_MODE, tp->mac_mode);
5703 return current_link_up;
5706 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5709 u32 orig_active_speed;
5710 u8 orig_active_duplex;
5712 bool current_link_up;
5715 orig_pause_cfg = tp->link_config.active_flowctrl;
5716 orig_active_speed = tp->link_config.active_speed;
5717 orig_active_duplex = tp->link_config.active_duplex;
5719 if (!tg3_flag(tp, HW_AUTONEG) &&
5721 tg3_flag(tp, INIT_COMPLETE)) {
5722 mac_status = tr32(MAC_STATUS);
5723 mac_status &= (MAC_STATUS_PCS_SYNCED |
5724 MAC_STATUS_SIGNAL_DET |
5725 MAC_STATUS_CFG_CHANGED |
5726 MAC_STATUS_RCVD_CFG);
5727 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5728 MAC_STATUS_SIGNAL_DET)) {
5729 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5730 MAC_STATUS_CFG_CHANGED));
5735 tw32_f(MAC_TX_AUTO_NEG, 0);
5737 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5738 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5739 tw32_f(MAC_MODE, tp->mac_mode);
5742 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5743 tg3_init_bcm8002(tp);
5745 /* Enable link change event even when serdes polling. */
5746 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5749 current_link_up = false;
5750 tp->link_config.rmt_adv = 0;
5751 mac_status = tr32(MAC_STATUS);
5753 if (tg3_flag(tp, HW_AUTONEG))
5754 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5756 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5758 tp->napi[0].hw_status->status =
5759 (SD_STATUS_UPDATED |
5760 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5762 for (i = 0; i < 100; i++) {
5763 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5764 MAC_STATUS_CFG_CHANGED));
5766 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5767 MAC_STATUS_CFG_CHANGED |
5768 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5772 mac_status = tr32(MAC_STATUS);
5773 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5774 current_link_up = false;
5775 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5776 tp->serdes_counter == 0) {
5777 tw32_f(MAC_MODE, (tp->mac_mode |
5778 MAC_MODE_SEND_CONFIGS));
5780 tw32_f(MAC_MODE, tp->mac_mode);
5784 if (current_link_up) {
5785 tp->link_config.active_speed = SPEED_1000;
5786 tp->link_config.active_duplex = DUPLEX_FULL;
5787 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5788 LED_CTRL_LNKLED_OVERRIDE |
5789 LED_CTRL_1000MBPS_ON));
5791 tp->link_config.active_speed = SPEED_UNKNOWN;
5792 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5793 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5794 LED_CTRL_LNKLED_OVERRIDE |
5795 LED_CTRL_TRAFFIC_OVERRIDE));
5798 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5799 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5800 if (orig_pause_cfg != now_pause_cfg ||
5801 orig_active_speed != tp->link_config.active_speed ||
5802 orig_active_duplex != tp->link_config.active_duplex)
5803 tg3_link_report(tp);
5809 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5813 u32 current_speed = SPEED_UNKNOWN;
5814 u8 current_duplex = DUPLEX_UNKNOWN;
5815 bool current_link_up = false;
5816 u32 local_adv, remote_adv, sgsr;
5818 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5819 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5820 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5821 (sgsr & SERDES_TG3_SGMII_MODE)) {
5826 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5828 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5829 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5831 current_link_up = true;
5832 if (sgsr & SERDES_TG3_SPEED_1000) {
5833 current_speed = SPEED_1000;
5834 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5835 } else if (sgsr & SERDES_TG3_SPEED_100) {
5836 current_speed = SPEED_100;
5837 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5839 current_speed = SPEED_10;
5840 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5843 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5844 current_duplex = DUPLEX_FULL;
5846 current_duplex = DUPLEX_HALF;
5849 tw32_f(MAC_MODE, tp->mac_mode);
5852 tg3_clear_mac_status(tp);
5854 goto fiber_setup_done;
5857 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5858 tw32_f(MAC_MODE, tp->mac_mode);
5861 tg3_clear_mac_status(tp);
5866 tp->link_config.rmt_adv = 0;
5868 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5869 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5870 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5871 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5872 bmsr |= BMSR_LSTATUS;
5874 bmsr &= ~BMSR_LSTATUS;
5877 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5879 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5880 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5881 /* do nothing, just check for link up at the end */
5882 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5885 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5886 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5887 ADVERTISE_1000XPAUSE |
5888 ADVERTISE_1000XPSE_ASYM |
5891 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5892 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5894 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5895 tg3_writephy(tp, MII_ADVERTISE, newadv);
5896 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5897 tg3_writephy(tp, MII_BMCR, bmcr);
5899 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5900 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5901 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5908 bmcr &= ~BMCR_SPEED1000;
5909 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5911 if (tp->link_config.duplex == DUPLEX_FULL)
5912 new_bmcr |= BMCR_FULLDPLX;
5914 if (new_bmcr != bmcr) {
5915 /* BMCR_SPEED1000 is a reserved bit that needs
5916 * to be set on write.
5918 new_bmcr |= BMCR_SPEED1000;
5920 /* Force a linkdown */
5924 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5925 adv &= ~(ADVERTISE_1000XFULL |
5926 ADVERTISE_1000XHALF |
5928 tg3_writephy(tp, MII_ADVERTISE, adv);
5929 tg3_writephy(tp, MII_BMCR, bmcr |
5933 tg3_carrier_off(tp);
5935 tg3_writephy(tp, MII_BMCR, new_bmcr);
5937 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5938 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5939 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5940 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5941 bmsr |= BMSR_LSTATUS;
5943 bmsr &= ~BMSR_LSTATUS;
5945 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5949 if (bmsr & BMSR_LSTATUS) {
5950 current_speed = SPEED_1000;
5951 current_link_up = true;
5952 if (bmcr & BMCR_FULLDPLX)
5953 current_duplex = DUPLEX_FULL;
5955 current_duplex = DUPLEX_HALF;
5960 if (bmcr & BMCR_ANENABLE) {
5963 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5964 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5965 common = local_adv & remote_adv;
5966 if (common & (ADVERTISE_1000XHALF |
5967 ADVERTISE_1000XFULL)) {
5968 if (common & ADVERTISE_1000XFULL)
5969 current_duplex = DUPLEX_FULL;
5971 current_duplex = DUPLEX_HALF;
5973 tp->link_config.rmt_adv =
5974 mii_adv_to_ethtool_adv_x(remote_adv);
5975 } else if (!tg3_flag(tp, 5780_CLASS)) {
5976 /* Link is up via parallel detect */
5978 current_link_up = false;
5984 if (current_link_up && current_duplex == DUPLEX_FULL)
5985 tg3_setup_flow_control(tp, local_adv, remote_adv);
5987 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5988 if (tp->link_config.active_duplex == DUPLEX_HALF)
5989 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5991 tw32_f(MAC_MODE, tp->mac_mode);
5994 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5996 tp->link_config.active_speed = current_speed;
5997 tp->link_config.active_duplex = current_duplex;
5999 tg3_test_and_report_link_chg(tp, current_link_up);
6003 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6005 if (tp->serdes_counter) {
6006 /* Give autoneg time to complete. */
6007 tp->serdes_counter--;
6012 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6015 tg3_readphy(tp, MII_BMCR, &bmcr);
6016 if (bmcr & BMCR_ANENABLE) {
6019 /* Select shadow register 0x1f */
6020 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6021 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6023 /* Select expansion interrupt status register */
6024 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6025 MII_TG3_DSP_EXP1_INT_STAT);
6026 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6027 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6029 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6030 /* We have signal detect and not receiving
6031 * config code words, link is up by parallel
6035 bmcr &= ~BMCR_ANENABLE;
6036 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6037 tg3_writephy(tp, MII_BMCR, bmcr);
6038 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6041 } else if (tp->link_up &&
6042 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6043 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6046 /* Select expansion interrupt status register */
6047 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6048 MII_TG3_DSP_EXP1_INT_STAT);
6049 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6053 /* Config code words received, turn on autoneg. */
6054 tg3_readphy(tp, MII_BMCR, &bmcr);
6055 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6057 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6063 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6068 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6069 err = tg3_setup_fiber_phy(tp, force_reset);
6070 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6071 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6073 err = tg3_setup_copper_phy(tp, force_reset);
6075 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6078 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6079 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6081 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6086 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6087 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6088 tw32(GRC_MISC_CFG, val);
6091 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6092 (6 << TX_LENGTHS_IPG_SHIFT);
6093 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6094 tg3_asic_rev(tp) == ASIC_REV_5762)
6095 val |= tr32(MAC_TX_LENGTHS) &
6096 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6097 TX_LENGTHS_CNT_DWN_VAL_MSK);
6099 if (tp->link_config.active_speed == SPEED_1000 &&
6100 tp->link_config.active_duplex == DUPLEX_HALF)
6101 tw32(MAC_TX_LENGTHS, val |
6102 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6104 tw32(MAC_TX_LENGTHS, val |
6105 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6107 if (!tg3_flag(tp, 5705_PLUS)) {
6109 tw32(HOSTCC_STAT_COAL_TICKS,
6110 tp->coal.stats_block_coalesce_usecs);
6112 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6116 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6117 val = tr32(PCIE_PWR_MGMT_THRESH);
6119 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6122 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6123 tw32(PCIE_PWR_MGMT_THRESH, val);
6129 /* tp->lock must be held */
6130 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6134 ptp_read_system_prets(sts);
6135 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6136 ptp_read_system_postts(sts);
6137 stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6142 /* tp->lock must be held */
6143 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6145 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6147 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6148 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6149 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6150 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6153 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6154 static inline void tg3_full_unlock(struct tg3 *tp);
6155 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6157 struct tg3 *tp = netdev_priv(dev);
6159 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6160 SOF_TIMESTAMPING_RX_SOFTWARE |
6161 SOF_TIMESTAMPING_SOFTWARE;
6163 if (tg3_flag(tp, PTP_CAPABLE)) {
6164 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6165 SOF_TIMESTAMPING_RX_HARDWARE |
6166 SOF_TIMESTAMPING_RAW_HARDWARE;
6170 info->phc_index = ptp_clock_index(tp->ptp_clock);
6172 info->phc_index = -1;
6174 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6176 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6177 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6178 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6179 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6183 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6185 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6186 bool neg_adj = false;
6194 /* Frequency adjustment is performed using hardware with a 24 bit
6195 * accumulator and a programmable correction value. On each clk, the
6196 * correction value gets added to the accumulator and when it
6197 * overflows, the time counter is incremented/decremented.
6199 * So conversion from ppb to correction value is
6200 * ppb * (1 << 24) / 1000000000
6202 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6203 TG3_EAV_REF_CLK_CORRECT_MASK;
6205 tg3_full_lock(tp, 0);
6208 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6209 TG3_EAV_REF_CLK_CORRECT_EN |
6210 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6212 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6214 tg3_full_unlock(tp);
6219 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6221 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6223 tg3_full_lock(tp, 0);
6224 tp->ptp_adjust += delta;
6225 tg3_full_unlock(tp);
6230 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6231 struct ptp_system_timestamp *sts)
6234 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6236 tg3_full_lock(tp, 0);
6237 ns = tg3_refclk_read(tp, sts);
6238 ns += tp->ptp_adjust;
6239 tg3_full_unlock(tp);
6241 *ts = ns_to_timespec64(ns);
6246 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6247 const struct timespec64 *ts)
6250 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6252 ns = timespec64_to_ns(ts);
6254 tg3_full_lock(tp, 0);
6255 tg3_refclk_write(tp, ns);
6257 tg3_full_unlock(tp);
6262 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6263 struct ptp_clock_request *rq, int on)
6265 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6270 case PTP_CLK_REQ_PEROUT:
6271 /* Reject requests with unsupported flags */
6272 if (rq->perout.flags)
6275 if (rq->perout.index != 0)
6278 tg3_full_lock(tp, 0);
6279 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6280 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6285 nsec = rq->perout.start.sec * 1000000000ULL +
6286 rq->perout.start.nsec;
6288 if (rq->perout.period.sec || rq->perout.period.nsec) {
6289 netdev_warn(tp->dev,
6290 "Device supports only a one-shot timesync output, period must be 0\n");
6295 if (nsec & (1ULL << 63)) {
6296 netdev_warn(tp->dev,
6297 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6302 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6303 tw32(TG3_EAV_WATCHDOG0_MSB,
6304 TG3_EAV_WATCHDOG0_EN |
6305 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6307 tw32(TG3_EAV_REF_CLCK_CTL,
6308 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6310 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6311 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6315 tg3_full_unlock(tp);
6325 static const struct ptp_clock_info tg3_ptp_caps = {
6326 .owner = THIS_MODULE,
6327 .name = "tg3 clock",
6328 .max_adj = 250000000,
6334 .adjfreq = tg3_ptp_adjfreq,
6335 .adjtime = tg3_ptp_adjtime,
6336 .gettimex64 = tg3_ptp_gettimex,
6337 .settime64 = tg3_ptp_settime,
6338 .enable = tg3_ptp_enable,
6341 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6342 struct skb_shared_hwtstamps *timestamp)
6344 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6345 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6349 /* tp->lock must be held */
6350 static void tg3_ptp_init(struct tg3 *tp)
6352 if (!tg3_flag(tp, PTP_CAPABLE))
6355 /* Initialize the hardware clock to the system time. */
6356 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6358 tp->ptp_info = tg3_ptp_caps;
6361 /* tp->lock must be held */
6362 static void tg3_ptp_resume(struct tg3 *tp)
6364 if (!tg3_flag(tp, PTP_CAPABLE))
6367 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6371 static void tg3_ptp_fini(struct tg3 *tp)
6373 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6376 ptp_clock_unregister(tp->ptp_clock);
6377 tp->ptp_clock = NULL;
6381 static inline int tg3_irq_sync(struct tg3 *tp)
6383 return tp->irq_sync;
6386 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6390 dst = (u32 *)((u8 *)dst + off);
6391 for (i = 0; i < len; i += sizeof(u32))
6392 *dst++ = tr32(off + i);
6395 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6397 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6398 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6399 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6400 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6401 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6402 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6403 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6404 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6405 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6406 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6407 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6408 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6409 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6410 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6411 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6412 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6413 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6414 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6415 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6417 if (tg3_flag(tp, SUPPORT_MSIX))
6418 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6420 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6421 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6422 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6423 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6424 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6425 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6426 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6427 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6429 if (!tg3_flag(tp, 5705_PLUS)) {
6430 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6431 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6432 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6435 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6436 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6437 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6438 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6439 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6441 if (tg3_flag(tp, NVRAM))
6442 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6445 static void tg3_dump_state(struct tg3 *tp)
6450 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6454 if (tg3_flag(tp, PCI_EXPRESS)) {
6455 /* Read up to but not including private PCI registers */
6456 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6457 regs[i / sizeof(u32)] = tr32(i);
6459 tg3_dump_legacy_regs(tp, regs);
6461 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6462 if (!regs[i + 0] && !regs[i + 1] &&
6463 !regs[i + 2] && !regs[i + 3])
6466 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6468 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6473 for (i = 0; i < tp->irq_cnt; i++) {
6474 struct tg3_napi *tnapi = &tp->napi[i];
6476 /* SW status block */
6478 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6480 tnapi->hw_status->status,
6481 tnapi->hw_status->status_tag,
6482 tnapi->hw_status->rx_jumbo_consumer,
6483 tnapi->hw_status->rx_consumer,
6484 tnapi->hw_status->rx_mini_consumer,
6485 tnapi->hw_status->idx[0].rx_producer,
6486 tnapi->hw_status->idx[0].tx_consumer);
6489 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6491 tnapi->last_tag, tnapi->last_irq_tag,
6492 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6494 tnapi->prodring.rx_std_prod_idx,
6495 tnapi->prodring.rx_std_cons_idx,
6496 tnapi->prodring.rx_jmb_prod_idx,
6497 tnapi->prodring.rx_jmb_cons_idx);
6501 /* This is called whenever we suspect that the system chipset is re-
6502 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6503 * is bogus tx completions. We try to recover by setting the
6504 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6507 static void tg3_tx_recover(struct tg3 *tp)
6509 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6510 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6512 netdev_warn(tp->dev,
6513 "The system may be re-ordering memory-mapped I/O "
6514 "cycles to the network device, attempting to recover. "
6515 "Please report the problem to the driver maintainer "
6516 "and include system chipset information.\n");
6518 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6521 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6523 /* Tell compiler to fetch tx indices from memory. */
6525 return tnapi->tx_pending -
6526 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6529 /* Tigon3 never reports partial packet sends. So we do not
6530 * need special logic to handle SKBs that have not had all
6531 * of their frags sent yet, like SunGEM does.
6533 static void tg3_tx(struct tg3_napi *tnapi)
6535 struct tg3 *tp = tnapi->tp;
6536 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6537 u32 sw_idx = tnapi->tx_cons;
6538 struct netdev_queue *txq;
6539 int index = tnapi - tp->napi;
6540 unsigned int pkts_compl = 0, bytes_compl = 0;
6542 if (tg3_flag(tp, ENABLE_TSS))
6545 txq = netdev_get_tx_queue(tp->dev, index);
6547 while (sw_idx != hw_idx) {
6548 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6549 struct sk_buff *skb = ri->skb;
6552 if (unlikely(skb == NULL)) {
6557 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6558 struct skb_shared_hwtstamps timestamp;
6559 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6560 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6562 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6564 skb_tstamp_tx(skb, ×tamp);
6567 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping),
6568 skb_headlen(skb), DMA_TO_DEVICE);
6572 while (ri->fragmented) {
6573 ri->fragmented = false;
6574 sw_idx = NEXT_TX(sw_idx);
6575 ri = &tnapi->tx_buffers[sw_idx];
6578 sw_idx = NEXT_TX(sw_idx);
6580 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6581 ri = &tnapi->tx_buffers[sw_idx];
6582 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6585 dma_unmap_page(&tp->pdev->dev,
6586 dma_unmap_addr(ri, mapping),
6587 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6590 while (ri->fragmented) {
6591 ri->fragmented = false;
6592 sw_idx = NEXT_TX(sw_idx);
6593 ri = &tnapi->tx_buffers[sw_idx];
6596 sw_idx = NEXT_TX(sw_idx);
6600 bytes_compl += skb->len;
6602 dev_consume_skb_any(skb);
6604 if (unlikely(tx_bug)) {
6610 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6612 tnapi->tx_cons = sw_idx;
6614 /* Need to make the tx_cons update visible to tg3_start_xmit()
6615 * before checking for netif_queue_stopped(). Without the
6616 * memory barrier, there is a small possibility that tg3_start_xmit()
6617 * will miss it and cause the queue to be stopped forever.
6621 if (unlikely(netif_tx_queue_stopped(txq) &&
6622 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6623 __netif_tx_lock(txq, smp_processor_id());
6624 if (netif_tx_queue_stopped(txq) &&
6625 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6626 netif_tx_wake_queue(txq);
6627 __netif_tx_unlock(txq);
6631 static void tg3_frag_free(bool is_frag, void *data)
6634 skb_free_frag(data);
6639 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6641 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6642 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6647 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz,
6649 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6654 /* Returns size of skb allocated or < 0 on error.
6656 * We only need to fill in the address because the other members
6657 * of the RX descriptor are invariant, see tg3_init_rings.
6659 * Note the purposeful assymetry of cpu vs. chip accesses. For
6660 * posting buffers we only dirty the first cache line of the RX
6661 * descriptor (containing the address). Whereas for the RX status
6662 * buffers the cpu only reads the last cacheline of the RX descriptor
6663 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6665 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6666 u32 opaque_key, u32 dest_idx_unmasked,
6667 unsigned int *frag_size)
6669 struct tg3_rx_buffer_desc *desc;
6670 struct ring_info *map;
6673 int skb_size, data_size, dest_idx;
6675 switch (opaque_key) {
6676 case RXD_OPAQUE_RING_STD:
6677 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6678 desc = &tpr->rx_std[dest_idx];
6679 map = &tpr->rx_std_buffers[dest_idx];
6680 data_size = tp->rx_pkt_map_sz;
6683 case RXD_OPAQUE_RING_JUMBO:
6684 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6685 desc = &tpr->rx_jmb[dest_idx].std;
6686 map = &tpr->rx_jmb_buffers[dest_idx];
6687 data_size = TG3_RX_JMB_MAP_SZ;
6694 /* Do not overwrite any of the map or rp information
6695 * until we are sure we can commit to a new buffer.
6697 * Callers depend upon this behavior and assume that
6698 * we leave everything unchanged if we fail.
6700 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6701 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6702 if (skb_size <= PAGE_SIZE) {
6703 data = napi_alloc_frag(skb_size);
6704 *frag_size = skb_size;
6706 data = kmalloc(skb_size, GFP_ATOMIC);
6712 mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp),
6713 data_size, DMA_FROM_DEVICE);
6714 if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
6715 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6720 dma_unmap_addr_set(map, mapping, mapping);
6722 desc->addr_hi = ((u64)mapping >> 32);
6723 desc->addr_lo = ((u64)mapping & 0xffffffff);
6728 /* We only need to move over in the address because the other
6729 * members of the RX descriptor are invariant. See notes above
6730 * tg3_alloc_rx_data for full details.
6732 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6733 struct tg3_rx_prodring_set *dpr,
6734 u32 opaque_key, int src_idx,
6735 u32 dest_idx_unmasked)
6737 struct tg3 *tp = tnapi->tp;
6738 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6739 struct ring_info *src_map, *dest_map;
6740 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6743 switch (opaque_key) {
6744 case RXD_OPAQUE_RING_STD:
6745 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6746 dest_desc = &dpr->rx_std[dest_idx];
6747 dest_map = &dpr->rx_std_buffers[dest_idx];
6748 src_desc = &spr->rx_std[src_idx];
6749 src_map = &spr->rx_std_buffers[src_idx];
6752 case RXD_OPAQUE_RING_JUMBO:
6753 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6754 dest_desc = &dpr->rx_jmb[dest_idx].std;
6755 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6756 src_desc = &spr->rx_jmb[src_idx].std;
6757 src_map = &spr->rx_jmb_buffers[src_idx];
6764 dest_map->data = src_map->data;
6765 dma_unmap_addr_set(dest_map, mapping,
6766 dma_unmap_addr(src_map, mapping));
6767 dest_desc->addr_hi = src_desc->addr_hi;
6768 dest_desc->addr_lo = src_desc->addr_lo;
6770 /* Ensure that the update to the skb happens after the physical
6771 * addresses have been transferred to the new BD location.
6775 src_map->data = NULL;
6778 /* The RX ring scheme is composed of multiple rings which post fresh
6779 * buffers to the chip, and one special ring the chip uses to report
6780 * status back to the host.
6782 * The special ring reports the status of received packets to the
6783 * host. The chip does not write into the original descriptor the
6784 * RX buffer was obtained from. The chip simply takes the original
6785 * descriptor as provided by the host, updates the status and length
6786 * field, then writes this into the next status ring entry.
6788 * Each ring the host uses to post buffers to the chip is described
6789 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6790 * it is first placed into the on-chip ram. When the packet's length
6791 * is known, it walks down the TG3_BDINFO entries to select the ring.
6792 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6793 * which is within the range of the new packet's length is chosen.
6795 * The "separate ring for rx status" scheme may sound queer, but it makes
6796 * sense from a cache coherency perspective. If only the host writes
6797 * to the buffer post rings, and only the chip writes to the rx status
6798 * rings, then cache lines never move beyond shared-modified state.
6799 * If both the host and chip were to write into the same ring, cache line
6800 * eviction could occur since both entities want it in an exclusive state.
6802 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6804 struct tg3 *tp = tnapi->tp;
6805 u32 work_mask, rx_std_posted = 0;
6806 u32 std_prod_idx, jmb_prod_idx;
6807 u32 sw_idx = tnapi->rx_rcb_ptr;
6810 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6812 hw_idx = *(tnapi->rx_rcb_prod_idx);
6814 * We need to order the read of hw_idx and the read of
6815 * the opaque cookie.
6820 std_prod_idx = tpr->rx_std_prod_idx;
6821 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6822 while (sw_idx != hw_idx && budget > 0) {
6823 struct ring_info *ri;
6824 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6826 struct sk_buff *skb;
6827 dma_addr_t dma_addr;
6828 u32 opaque_key, desc_idx, *post_ptr;
6832 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6833 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6834 if (opaque_key == RXD_OPAQUE_RING_STD) {
6835 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6836 dma_addr = dma_unmap_addr(ri, mapping);
6838 post_ptr = &std_prod_idx;
6840 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6841 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6842 dma_addr = dma_unmap_addr(ri, mapping);
6844 post_ptr = &jmb_prod_idx;
6846 goto next_pkt_nopost;
6848 work_mask |= opaque_key;
6850 if (desc->err_vlan & RXD_ERR_MASK) {
6852 tg3_recycle_rx(tnapi, tpr, opaque_key,
6853 desc_idx, *post_ptr);
6855 /* Other statistics kept track of by card. */
6860 prefetch(data + TG3_RX_OFFSET(tp));
6861 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6864 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6865 RXD_FLAG_PTPSTAT_PTPV1 ||
6866 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6867 RXD_FLAG_PTPSTAT_PTPV2) {
6868 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6869 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6872 if (len > TG3_RX_COPY_THRESH(tp)) {
6874 unsigned int frag_size;
6876 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6877 *post_ptr, &frag_size);
6881 dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size,
6884 /* Ensure that the update to the data happens
6885 * after the usage of the old DMA mapping.
6891 skb = build_skb(data, frag_size);
6893 tg3_frag_free(frag_size != 0, data);
6894 goto drop_it_no_recycle;
6896 skb_reserve(skb, TG3_RX_OFFSET(tp));
6898 tg3_recycle_rx(tnapi, tpr, opaque_key,
6899 desc_idx, *post_ptr);
6901 skb = netdev_alloc_skb(tp->dev,
6902 len + TG3_RAW_IP_ALIGN);
6904 goto drop_it_no_recycle;
6906 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6907 dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len,
6910 data + TG3_RX_OFFSET(tp),
6912 dma_sync_single_for_device(&tp->pdev->dev, dma_addr,
6913 len, DMA_FROM_DEVICE);
6918 tg3_hwclock_to_timestamp(tp, tstamp,
6919 skb_hwtstamps(skb));
6921 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6922 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6923 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6924 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6925 skb->ip_summed = CHECKSUM_UNNECESSARY;
6927 skb_checksum_none_assert(skb);
6929 skb->protocol = eth_type_trans(skb, tp->dev);
6931 if (len > (tp->dev->mtu + ETH_HLEN) &&
6932 skb->protocol != htons(ETH_P_8021Q) &&
6933 skb->protocol != htons(ETH_P_8021AD)) {
6934 dev_kfree_skb_any(skb);
6935 goto drop_it_no_recycle;
6938 if (desc->type_flags & RXD_FLAG_VLAN &&
6939 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6940 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6941 desc->err_vlan & RXD_VLAN_MASK);
6943 napi_gro_receive(&tnapi->napi, skb);
6951 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6952 tpr->rx_std_prod_idx = std_prod_idx &
6953 tp->rx_std_ring_mask;
6954 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6955 tpr->rx_std_prod_idx);
6956 work_mask &= ~RXD_OPAQUE_RING_STD;
6961 sw_idx &= tp->rx_ret_ring_mask;
6963 /* Refresh hw_idx to see if there is new work */
6964 if (sw_idx == hw_idx) {
6965 hw_idx = *(tnapi->rx_rcb_prod_idx);
6970 /* ACK the status ring. */
6971 tnapi->rx_rcb_ptr = sw_idx;
6972 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6974 /* Refill RX ring(s). */
6975 if (!tg3_flag(tp, ENABLE_RSS)) {
6976 /* Sync BD data before updating mailbox */
6979 if (work_mask & RXD_OPAQUE_RING_STD) {
6980 tpr->rx_std_prod_idx = std_prod_idx &
6981 tp->rx_std_ring_mask;
6982 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6983 tpr->rx_std_prod_idx);
6985 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6986 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6987 tp->rx_jmb_ring_mask;
6988 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6989 tpr->rx_jmb_prod_idx);
6991 } else if (work_mask) {
6992 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6993 * updated before the producer indices can be updated.
6997 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6998 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
7000 if (tnapi != &tp->napi[1]) {
7001 tp->rx_refill = true;
7002 napi_schedule(&tp->napi[1].napi);
7009 static void tg3_poll_link(struct tg3 *tp)
7011 /* handle link change and other phy events */
7012 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7013 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7015 if (sblk->status & SD_STATUS_LINK_CHG) {
7016 sblk->status = SD_STATUS_UPDATED |
7017 (sblk->status & ~SD_STATUS_LINK_CHG);
7018 spin_lock(&tp->lock);
7019 if (tg3_flag(tp, USE_PHYLIB)) {
7021 (MAC_STATUS_SYNC_CHANGED |
7022 MAC_STATUS_CFG_CHANGED |
7023 MAC_STATUS_MI_COMPLETION |
7024 MAC_STATUS_LNKSTATE_CHANGED));
7027 tg3_setup_phy(tp, false);
7028 spin_unlock(&tp->lock);
7033 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7034 struct tg3_rx_prodring_set *dpr,
7035 struct tg3_rx_prodring_set *spr)
7037 u32 si, di, cpycnt, src_prod_idx;
7041 src_prod_idx = spr->rx_std_prod_idx;
7043 /* Make sure updates to the rx_std_buffers[] entries and the
7044 * standard producer index are seen in the correct order.
7048 if (spr->rx_std_cons_idx == src_prod_idx)
7051 if (spr->rx_std_cons_idx < src_prod_idx)
7052 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7054 cpycnt = tp->rx_std_ring_mask + 1 -
7055 spr->rx_std_cons_idx;
7057 cpycnt = min(cpycnt,
7058 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7060 si = spr->rx_std_cons_idx;
7061 di = dpr->rx_std_prod_idx;
7063 for (i = di; i < di + cpycnt; i++) {
7064 if (dpr->rx_std_buffers[i].data) {
7074 /* Ensure that updates to the rx_std_buffers ring and the
7075 * shadowed hardware producer ring from tg3_recycle_skb() are
7076 * ordered correctly WRT the skb check above.
7080 memcpy(&dpr->rx_std_buffers[di],
7081 &spr->rx_std_buffers[si],
7082 cpycnt * sizeof(struct ring_info));
7084 for (i = 0; i < cpycnt; i++, di++, si++) {
7085 struct tg3_rx_buffer_desc *sbd, *dbd;
7086 sbd = &spr->rx_std[si];
7087 dbd = &dpr->rx_std[di];
7088 dbd->addr_hi = sbd->addr_hi;
7089 dbd->addr_lo = sbd->addr_lo;
7092 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7093 tp->rx_std_ring_mask;
7094 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7095 tp->rx_std_ring_mask;
7099 src_prod_idx = spr->rx_jmb_prod_idx;
7101 /* Make sure updates to the rx_jmb_buffers[] entries and
7102 * the jumbo producer index are seen in the correct order.
7106 if (spr->rx_jmb_cons_idx == src_prod_idx)
7109 if (spr->rx_jmb_cons_idx < src_prod_idx)
7110 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7112 cpycnt = tp->rx_jmb_ring_mask + 1 -
7113 spr->rx_jmb_cons_idx;
7115 cpycnt = min(cpycnt,
7116 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7118 si = spr->rx_jmb_cons_idx;
7119 di = dpr->rx_jmb_prod_idx;
7121 for (i = di; i < di + cpycnt; i++) {
7122 if (dpr->rx_jmb_buffers[i].data) {
7132 /* Ensure that updates to the rx_jmb_buffers ring and the
7133 * shadowed hardware producer ring from tg3_recycle_skb() are
7134 * ordered correctly WRT the skb check above.
7138 memcpy(&dpr->rx_jmb_buffers[di],
7139 &spr->rx_jmb_buffers[si],
7140 cpycnt * sizeof(struct ring_info));
7142 for (i = 0; i < cpycnt; i++, di++, si++) {
7143 struct tg3_rx_buffer_desc *sbd, *dbd;
7144 sbd = &spr->rx_jmb[si].std;
7145 dbd = &dpr->rx_jmb[di].std;
7146 dbd->addr_hi = sbd->addr_hi;
7147 dbd->addr_lo = sbd->addr_lo;
7150 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7151 tp->rx_jmb_ring_mask;
7152 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7153 tp->rx_jmb_ring_mask;
7159 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7161 struct tg3 *tp = tnapi->tp;
7163 /* run TX completion thread */
7164 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7166 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7170 if (!tnapi->rx_rcb_prod_idx)
7173 /* run RX thread, within the bounds set by NAPI.
7174 * All RX "locking" is done by ensuring outside
7175 * code synchronizes with tg3->napi.poll()
7177 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7178 work_done += tg3_rx(tnapi, budget - work_done);
7180 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7181 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7183 u32 std_prod_idx = dpr->rx_std_prod_idx;
7184 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7186 tp->rx_refill = false;
7187 for (i = 1; i <= tp->rxq_cnt; i++)
7188 err |= tg3_rx_prodring_xfer(tp, dpr,
7189 &tp->napi[i].prodring);
7193 if (std_prod_idx != dpr->rx_std_prod_idx)
7194 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7195 dpr->rx_std_prod_idx);
7197 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7198 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7199 dpr->rx_jmb_prod_idx);
7202 tw32_f(HOSTCC_MODE, tp->coal_now);
7208 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7210 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7211 schedule_work(&tp->reset_task);
7214 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7216 if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7217 cancel_work_sync(&tp->reset_task);
7218 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7221 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7223 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7224 struct tg3 *tp = tnapi->tp;
7226 struct tg3_hw_status *sblk = tnapi->hw_status;
7229 work_done = tg3_poll_work(tnapi, work_done, budget);
7231 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7234 if (unlikely(work_done >= budget))
7237 /* tp->last_tag is used in tg3_int_reenable() below
7238 * to tell the hw how much work has been processed,
7239 * so we must read it before checking for more work.
7241 tnapi->last_tag = sblk->status_tag;
7242 tnapi->last_irq_tag = tnapi->last_tag;
7245 /* check for RX/TX work to do */
7246 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7247 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7249 /* This test here is not race free, but will reduce
7250 * the number of interrupts by looping again.
7252 if (tnapi == &tp->napi[1] && tp->rx_refill)
7255 napi_complete_done(napi, work_done);
7256 /* Reenable interrupts. */
7257 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7259 /* This test here is synchronized by napi_schedule()
7260 * and napi_complete() to close the race condition.
7262 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7263 tw32(HOSTCC_MODE, tp->coalesce_mode |
7264 HOSTCC_MODE_ENABLE |
7271 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7275 /* work_done is guaranteed to be less than budget. */
7276 napi_complete(napi);
7277 tg3_reset_task_schedule(tp);
7281 static void tg3_process_error(struct tg3 *tp)
7284 bool real_error = false;
7286 if (tg3_flag(tp, ERROR_PROCESSED))
7289 /* Check Flow Attention register */
7290 val = tr32(HOSTCC_FLOW_ATTN);
7291 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7292 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7296 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7297 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7301 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7302 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7311 tg3_flag_set(tp, ERROR_PROCESSED);
7312 tg3_reset_task_schedule(tp);
7315 static int tg3_poll(struct napi_struct *napi, int budget)
7317 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7318 struct tg3 *tp = tnapi->tp;
7320 struct tg3_hw_status *sblk = tnapi->hw_status;
7323 if (sblk->status & SD_STATUS_ERROR)
7324 tg3_process_error(tp);
7328 work_done = tg3_poll_work(tnapi, work_done, budget);
7330 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7333 if (unlikely(work_done >= budget))
7336 if (tg3_flag(tp, TAGGED_STATUS)) {
7337 /* tp->last_tag is used in tg3_int_reenable() below
7338 * to tell the hw how much work has been processed,
7339 * so we must read it before checking for more work.
7341 tnapi->last_tag = sblk->status_tag;
7342 tnapi->last_irq_tag = tnapi->last_tag;
7345 sblk->status &= ~SD_STATUS_UPDATED;
7347 if (likely(!tg3_has_work(tnapi))) {
7348 napi_complete_done(napi, work_done);
7349 tg3_int_reenable(tnapi);
7354 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7358 /* work_done is guaranteed to be less than budget. */
7359 napi_complete(napi);
7360 tg3_reset_task_schedule(tp);
7364 static void tg3_napi_disable(struct tg3 *tp)
7368 for (i = tp->irq_cnt - 1; i >= 0; i--)
7369 napi_disable(&tp->napi[i].napi);
7372 static void tg3_napi_enable(struct tg3 *tp)
7376 for (i = 0; i < tp->irq_cnt; i++)
7377 napi_enable(&tp->napi[i].napi);
7380 static void tg3_napi_init(struct tg3 *tp)
7384 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7385 for (i = 1; i < tp->irq_cnt; i++)
7386 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7389 static void tg3_napi_fini(struct tg3 *tp)
7393 for (i = 0; i < tp->irq_cnt; i++)
7394 netif_napi_del(&tp->napi[i].napi);
7397 static inline void tg3_netif_stop(struct tg3 *tp)
7399 netif_trans_update(tp->dev); /* prevent tx timeout */
7400 tg3_napi_disable(tp);
7401 netif_carrier_off(tp->dev);
7402 netif_tx_disable(tp->dev);
7405 /* tp->lock must be held */
7406 static inline void tg3_netif_start(struct tg3 *tp)
7410 /* NOTE: unconditional netif_tx_wake_all_queues is only
7411 * appropriate so long as all callers are assured to
7412 * have free tx slots (such as after tg3_init_hw)
7414 netif_tx_wake_all_queues(tp->dev);
7417 netif_carrier_on(tp->dev);
7419 tg3_napi_enable(tp);
7420 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7421 tg3_enable_ints(tp);
7424 static void tg3_irq_quiesce(struct tg3 *tp)
7425 __releases(tp->lock)
7426 __acquires(tp->lock)
7430 BUG_ON(tp->irq_sync);
7435 spin_unlock_bh(&tp->lock);
7437 for (i = 0; i < tp->irq_cnt; i++)
7438 synchronize_irq(tp->napi[i].irq_vec);
7440 spin_lock_bh(&tp->lock);
7443 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7444 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7445 * with as well. Most of the time, this is not necessary except when
7446 * shutting down the device.
7448 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7450 spin_lock_bh(&tp->lock);
7452 tg3_irq_quiesce(tp);
7455 static inline void tg3_full_unlock(struct tg3 *tp)
7457 spin_unlock_bh(&tp->lock);
7460 /* One-shot MSI handler - Chip automatically disables interrupt
7461 * after sending MSI so driver doesn't have to do it.
7463 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7465 struct tg3_napi *tnapi = dev_id;
7466 struct tg3 *tp = tnapi->tp;
7468 prefetch(tnapi->hw_status);
7470 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7472 if (likely(!tg3_irq_sync(tp)))
7473 napi_schedule(&tnapi->napi);
7478 /* MSI ISR - No need to check for interrupt sharing and no need to
7479 * flush status block and interrupt mailbox. PCI ordering rules
7480 * guarantee that MSI will arrive after the status block.
7482 static irqreturn_t tg3_msi(int irq, void *dev_id)
7484 struct tg3_napi *tnapi = dev_id;
7485 struct tg3 *tp = tnapi->tp;
7487 prefetch(tnapi->hw_status);
7489 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7491 * Writing any value to intr-mbox-0 clears PCI INTA# and
7492 * chip-internal interrupt pending events.
7493 * Writing non-zero to intr-mbox-0 additional tells the
7494 * NIC to stop sending us irqs, engaging "in-intr-handler"
7497 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7498 if (likely(!tg3_irq_sync(tp)))
7499 napi_schedule(&tnapi->napi);
7501 return IRQ_RETVAL(1);
7504 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7506 struct tg3_napi *tnapi = dev_id;
7507 struct tg3 *tp = tnapi->tp;
7508 struct tg3_hw_status *sblk = tnapi->hw_status;
7509 unsigned int handled = 1;
7511 /* In INTx mode, it is possible for the interrupt to arrive at
7512 * the CPU before the status block posted prior to the interrupt.
7513 * Reading the PCI State register will confirm whether the
7514 * interrupt is ours and will flush the status block.
7516 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7517 if (tg3_flag(tp, CHIP_RESETTING) ||
7518 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7525 * Writing any value to intr-mbox-0 clears PCI INTA# and
7526 * chip-internal interrupt pending events.
7527 * Writing non-zero to intr-mbox-0 additional tells the
7528 * NIC to stop sending us irqs, engaging "in-intr-handler"
7531 * Flush the mailbox to de-assert the IRQ immediately to prevent
7532 * spurious interrupts. The flush impacts performance but
7533 * excessive spurious interrupts can be worse in some cases.
7535 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7536 if (tg3_irq_sync(tp))
7538 sblk->status &= ~SD_STATUS_UPDATED;
7539 if (likely(tg3_has_work(tnapi))) {
7540 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7541 napi_schedule(&tnapi->napi);
7543 /* No work, shared interrupt perhaps? re-enable
7544 * interrupts, and flush that PCI write
7546 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7550 return IRQ_RETVAL(handled);
7553 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7555 struct tg3_napi *tnapi = dev_id;
7556 struct tg3 *tp = tnapi->tp;
7557 struct tg3_hw_status *sblk = tnapi->hw_status;
7558 unsigned int handled = 1;
7560 /* In INTx mode, it is possible for the interrupt to arrive at
7561 * the CPU before the status block posted prior to the interrupt.
7562 * Reading the PCI State register will confirm whether the
7563 * interrupt is ours and will flush the status block.
7565 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7566 if (tg3_flag(tp, CHIP_RESETTING) ||
7567 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7574 * writing any value to intr-mbox-0 clears PCI INTA# and
7575 * chip-internal interrupt pending events.
7576 * writing non-zero to intr-mbox-0 additional tells the
7577 * NIC to stop sending us irqs, engaging "in-intr-handler"
7580 * Flush the mailbox to de-assert the IRQ immediately to prevent
7581 * spurious interrupts. The flush impacts performance but
7582 * excessive spurious interrupts can be worse in some cases.
7584 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7587 * In a shared interrupt configuration, sometimes other devices'
7588 * interrupts will scream. We record the current status tag here
7589 * so that the above check can report that the screaming interrupts
7590 * are unhandled. Eventually they will be silenced.
7592 tnapi->last_irq_tag = sblk->status_tag;
7594 if (tg3_irq_sync(tp))
7597 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7599 napi_schedule(&tnapi->napi);
7602 return IRQ_RETVAL(handled);
7605 /* ISR for interrupt test */
7606 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7608 struct tg3_napi *tnapi = dev_id;
7609 struct tg3 *tp = tnapi->tp;
7610 struct tg3_hw_status *sblk = tnapi->hw_status;
7612 if ((sblk->status & SD_STATUS_UPDATED) ||
7613 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7614 tg3_disable_ints(tp);
7615 return IRQ_RETVAL(1);
7617 return IRQ_RETVAL(0);
7620 #ifdef CONFIG_NET_POLL_CONTROLLER
7621 static void tg3_poll_controller(struct net_device *dev)
7624 struct tg3 *tp = netdev_priv(dev);
7626 if (tg3_irq_sync(tp))
7629 for (i = 0; i < tp->irq_cnt; i++)
7630 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7634 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
7636 struct tg3 *tp = netdev_priv(dev);
7638 if (netif_msg_tx_err(tp)) {
7639 netdev_err(dev, "transmit timed out, resetting\n");
7643 tg3_reset_task_schedule(tp);
7646 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7647 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7649 u32 base = (u32) mapping & 0xffffffff;
7651 return base + len + 8 < base;
7654 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7655 * of any 4GB boundaries: 4G, 8G, etc
7657 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7660 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7661 u32 base = (u32) mapping & 0xffffffff;
7663 return ((base + len + (mss & 0x3fff)) < base);
7668 /* Test for DMA addresses > 40-bit */
7669 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7672 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7673 if (tg3_flag(tp, 40BIT_DMA_BUG))
7674 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7681 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7682 dma_addr_t mapping, u32 len, u32 flags,
7685 txbd->addr_hi = ((u64) mapping >> 32);
7686 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7687 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7688 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7691 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7692 dma_addr_t map, u32 len, u32 flags,
7695 struct tg3 *tp = tnapi->tp;
7698 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7701 if (tg3_4g_overflow_test(map, len))
7704 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7707 if (tg3_40bit_overflow_test(tp, map, len))
7710 if (tp->dma_limit) {
7711 u32 prvidx = *entry;
7712 u32 tmp_flag = flags & ~TXD_FLAG_END;
7713 while (len > tp->dma_limit && *budget) {
7714 u32 frag_len = tp->dma_limit;
7715 len -= tp->dma_limit;
7717 /* Avoid the 8byte DMA problem */
7719 len += tp->dma_limit / 2;
7720 frag_len = tp->dma_limit / 2;
7723 tnapi->tx_buffers[*entry].fragmented = true;
7725 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7726 frag_len, tmp_flag, mss, vlan);
7729 *entry = NEXT_TX(*entry);
7736 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7737 len, flags, mss, vlan);
7739 *entry = NEXT_TX(*entry);
7742 tnapi->tx_buffers[prvidx].fragmented = false;
7746 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7747 len, flags, mss, vlan);
7748 *entry = NEXT_TX(*entry);
7754 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7757 struct sk_buff *skb;
7758 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7763 dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping),
7764 skb_headlen(skb), DMA_TO_DEVICE);
7766 while (txb->fragmented) {
7767 txb->fragmented = false;
7768 entry = NEXT_TX(entry);
7769 txb = &tnapi->tx_buffers[entry];
7772 for (i = 0; i <= last; i++) {
7773 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7775 entry = NEXT_TX(entry);
7776 txb = &tnapi->tx_buffers[entry];
7778 dma_unmap_page(&tnapi->tp->pdev->dev,
7779 dma_unmap_addr(txb, mapping),
7780 skb_frag_size(frag), DMA_TO_DEVICE);
7782 while (txb->fragmented) {
7783 txb->fragmented = false;
7784 entry = NEXT_TX(entry);
7785 txb = &tnapi->tx_buffers[entry];
7790 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7791 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7792 struct sk_buff **pskb,
7793 u32 *entry, u32 *budget,
7794 u32 base_flags, u32 mss, u32 vlan)
7796 struct tg3 *tp = tnapi->tp;
7797 struct sk_buff *new_skb, *skb = *pskb;
7798 dma_addr_t new_addr = 0;
7801 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7802 new_skb = skb_copy(skb, GFP_ATOMIC);
7804 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7806 new_skb = skb_copy_expand(skb,
7807 skb_headroom(skb) + more_headroom,
7808 skb_tailroom(skb), GFP_ATOMIC);
7814 /* New SKB is guaranteed to be linear. */
7815 new_addr = dma_map_single(&tp->pdev->dev, new_skb->data,
7816 new_skb->len, DMA_TO_DEVICE);
7817 /* Make sure the mapping succeeded */
7818 if (dma_mapping_error(&tp->pdev->dev, new_addr)) {
7819 dev_kfree_skb_any(new_skb);
7822 u32 save_entry = *entry;
7824 base_flags |= TXD_FLAG_END;
7826 tnapi->tx_buffers[*entry].skb = new_skb;
7827 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7830 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7831 new_skb->len, base_flags,
7833 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7834 dev_kfree_skb_any(new_skb);
7840 dev_consume_skb_any(skb);
7845 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7847 /* Check if we will never have enough descriptors,
7848 * as gso_segs can be more than current ring size
7850 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7853 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7855 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7856 * indicated in tg3_tx_frag_set()
7858 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7859 struct netdev_queue *txq, struct sk_buff *skb)
7861 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7862 struct sk_buff *segs, *seg, *next;
7864 /* Estimate the number of fragments in the worst case */
7865 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7866 netif_tx_stop_queue(txq);
7868 /* netif_tx_stop_queue() must be done before checking
7869 * checking tx index in tg3_tx_avail() below, because in
7870 * tg3_tx(), we update tx index before checking for
7871 * netif_tx_queue_stopped().
7874 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7875 return NETDEV_TX_BUSY;
7877 netif_tx_wake_queue(txq);
7880 segs = skb_gso_segment(skb, tp->dev->features &
7881 ~(NETIF_F_TSO | NETIF_F_TSO6));
7882 if (IS_ERR(segs) || !segs)
7883 goto tg3_tso_bug_end;
7885 skb_list_walk_safe(segs, seg, next) {
7886 skb_mark_not_on_list(seg);
7887 tg3_start_xmit(seg, tp->dev);
7891 dev_consume_skb_any(skb);
7893 return NETDEV_TX_OK;
7896 /* hard_start_xmit for all devices */
7897 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7899 struct tg3 *tp = netdev_priv(dev);
7900 u32 len, entry, base_flags, mss, vlan = 0;
7902 int i = -1, would_hit_hwbug;
7904 struct tg3_napi *tnapi;
7905 struct netdev_queue *txq;
7907 struct iphdr *iph = NULL;
7908 struct tcphdr *tcph = NULL;
7909 __sum16 tcp_csum = 0, ip_csum = 0;
7910 __be16 ip_tot_len = 0;
7912 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7913 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7914 if (tg3_flag(tp, ENABLE_TSS))
7917 budget = tg3_tx_avail(tnapi);
7919 /* We are running in BH disabled context with netif_tx_lock
7920 * and TX reclaim runs via tp->napi.poll inside of a software
7921 * interrupt. Furthermore, IRQ processing runs lockless so we have
7922 * no IRQ context deadlocks to worry about either. Rejoice!
7924 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7925 if (!netif_tx_queue_stopped(txq)) {
7926 netif_tx_stop_queue(txq);
7928 /* This is a hard error, log it. */
7930 "BUG! Tx Ring full when queue awake!\n");
7932 return NETDEV_TX_BUSY;
7935 entry = tnapi->tx_prod;
7938 mss = skb_shinfo(skb)->gso_size;
7940 u32 tcp_opt_len, hdr_len;
7942 if (skb_cow_head(skb, 0))
7946 tcp_opt_len = tcp_optlen(skb);
7948 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7950 /* HW/FW can not correctly segment packets that have been
7951 * vlan encapsulated.
7953 if (skb->protocol == htons(ETH_P_8021Q) ||
7954 skb->protocol == htons(ETH_P_8021AD)) {
7955 if (tg3_tso_bug_gso_check(tnapi, skb))
7956 return tg3_tso_bug(tp, tnapi, txq, skb);
7960 if (!skb_is_gso_v6(skb)) {
7961 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7962 tg3_flag(tp, TSO_BUG)) {
7963 if (tg3_tso_bug_gso_check(tnapi, skb))
7964 return tg3_tso_bug(tp, tnapi, txq, skb);
7967 ip_csum = iph->check;
7968 ip_tot_len = iph->tot_len;
7970 iph->tot_len = htons(mss + hdr_len);
7973 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7974 TXD_FLAG_CPU_POST_DMA);
7976 tcph = tcp_hdr(skb);
7977 tcp_csum = tcph->check;
7979 if (tg3_flag(tp, HW_TSO_1) ||
7980 tg3_flag(tp, HW_TSO_2) ||
7981 tg3_flag(tp, HW_TSO_3)) {
7983 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7985 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7989 if (tg3_flag(tp, HW_TSO_3)) {
7990 mss |= (hdr_len & 0xc) << 12;
7992 base_flags |= 0x00000010;
7993 base_flags |= (hdr_len & 0x3e0) << 5;
7994 } else if (tg3_flag(tp, HW_TSO_2))
7995 mss |= hdr_len << 9;
7996 else if (tg3_flag(tp, HW_TSO_1) ||
7997 tg3_asic_rev(tp) == ASIC_REV_5705) {
7998 if (tcp_opt_len || iph->ihl > 5) {
8001 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8002 mss |= (tsflags << 11);
8005 if (tcp_opt_len || iph->ihl > 5) {
8008 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8009 base_flags |= tsflags << 12;
8012 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8013 /* HW/FW can not correctly checksum packets that have been
8014 * vlan encapsulated.
8016 if (skb->protocol == htons(ETH_P_8021Q) ||
8017 skb->protocol == htons(ETH_P_8021AD)) {
8018 if (skb_checksum_help(skb))
8021 base_flags |= TXD_FLAG_TCPUDP_CSUM;
8025 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8026 !mss && skb->len > VLAN_ETH_FRAME_LEN)
8027 base_flags |= TXD_FLAG_JMB_PKT;
8029 if (skb_vlan_tag_present(skb)) {
8030 base_flags |= TXD_FLAG_VLAN;
8031 vlan = skb_vlan_tag_get(skb);
8034 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8035 tg3_flag(tp, TX_TSTAMP_EN)) {
8036 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8037 base_flags |= TXD_FLAG_HWTSTAMP;
8040 len = skb_headlen(skb);
8042 mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
8044 if (dma_mapping_error(&tp->pdev->dev, mapping))
8048 tnapi->tx_buffers[entry].skb = skb;
8049 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8051 would_hit_hwbug = 0;
8053 if (tg3_flag(tp, 5701_DMA_BUG))
8054 would_hit_hwbug = 1;
8056 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8057 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8059 would_hit_hwbug = 1;
8060 } else if (skb_shinfo(skb)->nr_frags > 0) {
8063 if (!tg3_flag(tp, HW_TSO_1) &&
8064 !tg3_flag(tp, HW_TSO_2) &&
8065 !tg3_flag(tp, HW_TSO_3))
8068 /* Now loop through additional data
8069 * fragments, and queue them.
8071 last = skb_shinfo(skb)->nr_frags - 1;
8072 for (i = 0; i <= last; i++) {
8073 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8075 len = skb_frag_size(frag);
8076 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8077 len, DMA_TO_DEVICE);
8079 tnapi->tx_buffers[entry].skb = NULL;
8080 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8082 if (dma_mapping_error(&tp->pdev->dev, mapping))
8086 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8088 ((i == last) ? TXD_FLAG_END : 0),
8090 would_hit_hwbug = 1;
8096 if (would_hit_hwbug) {
8097 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8099 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8100 /* If it's a TSO packet, do GSO instead of
8101 * allocating and copying to a large linear SKB
8104 iph->check = ip_csum;
8105 iph->tot_len = ip_tot_len;
8107 tcph->check = tcp_csum;
8108 return tg3_tso_bug(tp, tnapi, txq, skb);
8111 /* If the workaround fails due to memory/mapping
8112 * failure, silently drop this packet.
8114 entry = tnapi->tx_prod;
8115 budget = tg3_tx_avail(tnapi);
8116 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8117 base_flags, mss, vlan))
8121 skb_tx_timestamp(skb);
8122 netdev_tx_sent_queue(txq, skb->len);
8124 /* Sync BD data before updating mailbox */
8127 tnapi->tx_prod = entry;
8128 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8129 netif_tx_stop_queue(txq);
8131 /* netif_tx_stop_queue() must be done before checking
8132 * checking tx index in tg3_tx_avail() below, because in
8133 * tg3_tx(), we update tx index before checking for
8134 * netif_tx_queue_stopped().
8137 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8138 netif_tx_wake_queue(txq);
8141 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
8142 /* Packets are ready, update Tx producer idx on card. */
8143 tw32_tx_mbox(tnapi->prodmbox, entry);
8146 return NETDEV_TX_OK;
8149 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8150 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8152 dev_kfree_skb_any(skb);
8155 return NETDEV_TX_OK;
8158 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8161 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8162 MAC_MODE_PORT_MODE_MASK);
8164 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8166 if (!tg3_flag(tp, 5705_PLUS))
8167 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8169 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8170 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8172 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8174 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8176 if (tg3_flag(tp, 5705_PLUS) ||
8177 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8178 tg3_asic_rev(tp) == ASIC_REV_5700)
8179 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8182 tw32(MAC_MODE, tp->mac_mode);
8186 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8188 u32 val, bmcr, mac_mode, ptest = 0;
8190 tg3_phy_toggle_apd(tp, false);
8191 tg3_phy_toggle_automdix(tp, false);
8193 if (extlpbk && tg3_phy_set_extloopbk(tp))
8196 bmcr = BMCR_FULLDPLX;
8201 bmcr |= BMCR_SPEED100;
8205 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8207 bmcr |= BMCR_SPEED100;
8210 bmcr |= BMCR_SPEED1000;
8215 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8216 tg3_readphy(tp, MII_CTRL1000, &val);
8217 val |= CTL1000_AS_MASTER |
8218 CTL1000_ENABLE_MASTER;
8219 tg3_writephy(tp, MII_CTRL1000, val);
8221 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8222 MII_TG3_FET_PTEST_TRIM_2;
8223 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8226 bmcr |= BMCR_LOOPBACK;
8228 tg3_writephy(tp, MII_BMCR, bmcr);
8230 /* The write needs to be flushed for the FETs */
8231 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8232 tg3_readphy(tp, MII_BMCR, &bmcr);
8236 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8237 tg3_asic_rev(tp) == ASIC_REV_5785) {
8238 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8239 MII_TG3_FET_PTEST_FRC_TX_LINK |
8240 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8242 /* The write needs to be flushed for the AC131 */
8243 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8246 /* Reset to prevent losing 1st rx packet intermittently */
8247 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8248 tg3_flag(tp, 5780_CLASS)) {
8249 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8251 tw32_f(MAC_RX_MODE, tp->rx_mode);
8254 mac_mode = tp->mac_mode &
8255 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8256 if (speed == SPEED_1000)
8257 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8259 mac_mode |= MAC_MODE_PORT_MODE_MII;
8261 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8262 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8264 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8265 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8266 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8267 mac_mode |= MAC_MODE_LINK_POLARITY;
8269 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8270 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8273 tw32(MAC_MODE, mac_mode);
8279 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8281 struct tg3 *tp = netdev_priv(dev);
8283 if (features & NETIF_F_LOOPBACK) {
8284 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8287 spin_lock_bh(&tp->lock);
8288 tg3_mac_loopback(tp, true);
8289 netif_carrier_on(tp->dev);
8290 spin_unlock_bh(&tp->lock);
8291 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8293 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8296 spin_lock_bh(&tp->lock);
8297 tg3_mac_loopback(tp, false);
8298 /* Force link status check */
8299 tg3_setup_phy(tp, true);
8300 spin_unlock_bh(&tp->lock);
8301 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8305 static netdev_features_t tg3_fix_features(struct net_device *dev,
8306 netdev_features_t features)
8308 struct tg3 *tp = netdev_priv(dev);
8310 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8311 features &= ~NETIF_F_ALL_TSO;
8316 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8318 netdev_features_t changed = dev->features ^ features;
8320 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8321 tg3_set_loopback(dev, features);
8326 static void tg3_rx_prodring_free(struct tg3 *tp,
8327 struct tg3_rx_prodring_set *tpr)
8331 if (tpr != &tp->napi[0].prodring) {
8332 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8333 i = (i + 1) & tp->rx_std_ring_mask)
8334 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8337 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8338 for (i = tpr->rx_jmb_cons_idx;
8339 i != tpr->rx_jmb_prod_idx;
8340 i = (i + 1) & tp->rx_jmb_ring_mask) {
8341 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8349 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8350 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8353 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8354 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8355 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8360 /* Initialize rx rings for packet processing.
8362 * The chip has been shut down and the driver detached from
8363 * the networking, so no interrupts or new tx packets will
8364 * end up in the driver. tp->{tx,}lock are held and thus
8367 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8368 struct tg3_rx_prodring_set *tpr)
8370 u32 i, rx_pkt_dma_sz;
8372 tpr->rx_std_cons_idx = 0;
8373 tpr->rx_std_prod_idx = 0;
8374 tpr->rx_jmb_cons_idx = 0;
8375 tpr->rx_jmb_prod_idx = 0;
8377 if (tpr != &tp->napi[0].prodring) {
8378 memset(&tpr->rx_std_buffers[0], 0,
8379 TG3_RX_STD_BUFF_RING_SIZE(tp));
8380 if (tpr->rx_jmb_buffers)
8381 memset(&tpr->rx_jmb_buffers[0], 0,
8382 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8386 /* Zero out all descriptors. */
8387 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8389 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8390 if (tg3_flag(tp, 5780_CLASS) &&
8391 tp->dev->mtu > ETH_DATA_LEN)
8392 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8393 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8395 /* Initialize invariants of the rings, we only set this
8396 * stuff once. This works because the card does not
8397 * write into the rx buffer posting rings.
8399 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8400 struct tg3_rx_buffer_desc *rxd;
8402 rxd = &tpr->rx_std[i];
8403 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8404 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8405 rxd->opaque = (RXD_OPAQUE_RING_STD |
8406 (i << RXD_OPAQUE_INDEX_SHIFT));
8409 /* Now allocate fresh SKBs for each rx ring. */
8410 for (i = 0; i < tp->rx_pending; i++) {
8411 unsigned int frag_size;
8413 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8415 netdev_warn(tp->dev,
8416 "Using a smaller RX standard ring. Only "
8417 "%d out of %d buffers were allocated "
8418 "successfully\n", i, tp->rx_pending);
8426 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8429 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8431 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8434 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8435 struct tg3_rx_buffer_desc *rxd;
8437 rxd = &tpr->rx_jmb[i].std;
8438 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8439 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8441 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8442 (i << RXD_OPAQUE_INDEX_SHIFT));
8445 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8446 unsigned int frag_size;
8448 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8450 netdev_warn(tp->dev,
8451 "Using a smaller RX jumbo ring. Only %d "
8452 "out of %d buffers were allocated "
8453 "successfully\n", i, tp->rx_jumbo_pending);
8456 tp->rx_jumbo_pending = i;
8465 tg3_rx_prodring_free(tp, tpr);
8469 static void tg3_rx_prodring_fini(struct tg3 *tp,
8470 struct tg3_rx_prodring_set *tpr)
8472 kfree(tpr->rx_std_buffers);
8473 tpr->rx_std_buffers = NULL;
8474 kfree(tpr->rx_jmb_buffers);
8475 tpr->rx_jmb_buffers = NULL;
8477 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8478 tpr->rx_std, tpr->rx_std_mapping);
8482 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8483 tpr->rx_jmb, tpr->rx_jmb_mapping);
8488 static int tg3_rx_prodring_init(struct tg3 *tp,
8489 struct tg3_rx_prodring_set *tpr)
8491 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8493 if (!tpr->rx_std_buffers)
8496 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8497 TG3_RX_STD_RING_BYTES(tp),
8498 &tpr->rx_std_mapping,
8503 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8504 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8506 if (!tpr->rx_jmb_buffers)
8509 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8510 TG3_RX_JMB_RING_BYTES(tp),
8511 &tpr->rx_jmb_mapping,
8520 tg3_rx_prodring_fini(tp, tpr);
8524 /* Free up pending packets in all rx/tx rings.
8526 * The chip has been shut down and the driver detached from
8527 * the networking, so no interrupts or new tx packets will
8528 * end up in the driver. tp->{tx,}lock is not held and we are not
8529 * in an interrupt context and thus may sleep.
8531 static void tg3_free_rings(struct tg3 *tp)
8535 for (j = 0; j < tp->irq_cnt; j++) {
8536 struct tg3_napi *tnapi = &tp->napi[j];
8538 tg3_rx_prodring_free(tp, &tnapi->prodring);
8540 if (!tnapi->tx_buffers)
8543 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8544 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8549 tg3_tx_skb_unmap(tnapi, i,
8550 skb_shinfo(skb)->nr_frags - 1);
8552 dev_consume_skb_any(skb);
8554 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8558 /* Initialize tx/rx rings for packet processing.
8560 * The chip has been shut down and the driver detached from
8561 * the networking, so no interrupts or new tx packets will
8562 * end up in the driver. tp->{tx,}lock are held and thus
8565 static int tg3_init_rings(struct tg3 *tp)
8569 /* Free up all the SKBs. */
8572 for (i = 0; i < tp->irq_cnt; i++) {
8573 struct tg3_napi *tnapi = &tp->napi[i];
8575 tnapi->last_tag = 0;
8576 tnapi->last_irq_tag = 0;
8577 tnapi->hw_status->status = 0;
8578 tnapi->hw_status->status_tag = 0;
8579 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8584 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8586 tnapi->rx_rcb_ptr = 0;
8588 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8590 if (tnapi->prodring.rx_std &&
8591 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8600 static void tg3_mem_tx_release(struct tg3 *tp)
8604 for (i = 0; i < tp->irq_max; i++) {
8605 struct tg3_napi *tnapi = &tp->napi[i];
8607 if (tnapi->tx_ring) {
8608 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8609 tnapi->tx_ring, tnapi->tx_desc_mapping);
8610 tnapi->tx_ring = NULL;
8613 kfree(tnapi->tx_buffers);
8614 tnapi->tx_buffers = NULL;
8618 static int tg3_mem_tx_acquire(struct tg3 *tp)
8621 struct tg3_napi *tnapi = &tp->napi[0];
8623 /* If multivector TSS is enabled, vector 0 does not handle
8624 * tx interrupts. Don't allocate any resources for it.
8626 if (tg3_flag(tp, ENABLE_TSS))
8629 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8630 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8631 sizeof(struct tg3_tx_ring_info),
8633 if (!tnapi->tx_buffers)
8636 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8638 &tnapi->tx_desc_mapping,
8640 if (!tnapi->tx_ring)
8647 tg3_mem_tx_release(tp);
8651 static void tg3_mem_rx_release(struct tg3 *tp)
8655 for (i = 0; i < tp->irq_max; i++) {
8656 struct tg3_napi *tnapi = &tp->napi[i];
8658 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8663 dma_free_coherent(&tp->pdev->dev,
8664 TG3_RX_RCB_RING_BYTES(tp),
8666 tnapi->rx_rcb_mapping);
8667 tnapi->rx_rcb = NULL;
8671 static int tg3_mem_rx_acquire(struct tg3 *tp)
8673 unsigned int i, limit;
8675 limit = tp->rxq_cnt;
8677 /* If RSS is enabled, we need a (dummy) producer ring
8678 * set on vector zero. This is the true hw prodring.
8680 if (tg3_flag(tp, ENABLE_RSS))
8683 for (i = 0; i < limit; i++) {
8684 struct tg3_napi *tnapi = &tp->napi[i];
8686 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8689 /* If multivector RSS is enabled, vector 0
8690 * does not handle rx or tx interrupts.
8691 * Don't allocate any resources for it.
8693 if (!i && tg3_flag(tp, ENABLE_RSS))
8696 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8697 TG3_RX_RCB_RING_BYTES(tp),
8698 &tnapi->rx_rcb_mapping,
8707 tg3_mem_rx_release(tp);
8712 * Must not be invoked with interrupt sources disabled and
8713 * the hardware shutdown down.
8715 static void tg3_free_consistent(struct tg3 *tp)
8719 for (i = 0; i < tp->irq_cnt; i++) {
8720 struct tg3_napi *tnapi = &tp->napi[i];
8722 if (tnapi->hw_status) {
8723 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8725 tnapi->status_mapping);
8726 tnapi->hw_status = NULL;
8730 tg3_mem_rx_release(tp);
8731 tg3_mem_tx_release(tp);
8733 /* tp->hw_stats can be referenced safely:
8734 * 1. under rtnl_lock
8735 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8738 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8739 tp->hw_stats, tp->stats_mapping);
8740 tp->hw_stats = NULL;
8745 * Must not be invoked with interrupt sources disabled and
8746 * the hardware shutdown down. Can sleep.
8748 static int tg3_alloc_consistent(struct tg3 *tp)
8752 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8753 sizeof(struct tg3_hw_stats),
8754 &tp->stats_mapping, GFP_KERNEL);
8758 for (i = 0; i < tp->irq_cnt; i++) {
8759 struct tg3_napi *tnapi = &tp->napi[i];
8760 struct tg3_hw_status *sblk;
8762 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8764 &tnapi->status_mapping,
8766 if (!tnapi->hw_status)
8769 sblk = tnapi->hw_status;
8771 if (tg3_flag(tp, ENABLE_RSS)) {
8772 u16 *prodptr = NULL;
8775 * When RSS is enabled, the status block format changes
8776 * slightly. The "rx_jumbo_consumer", "reserved",
8777 * and "rx_mini_consumer" members get mapped to the
8778 * other three rx return ring producer indexes.
8782 prodptr = &sblk->idx[0].rx_producer;
8785 prodptr = &sblk->rx_jumbo_consumer;
8788 prodptr = &sblk->reserved;
8791 prodptr = &sblk->rx_mini_consumer;
8794 tnapi->rx_rcb_prod_idx = prodptr;
8796 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8800 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8806 tg3_free_consistent(tp);
8810 #define MAX_WAIT_CNT 1000
8812 /* To stop a block, clear the enable bit and poll till it
8813 * clears. tp->lock is held.
8815 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8820 if (tg3_flag(tp, 5705_PLUS)) {
8827 /* We can't enable/disable these bits of the
8828 * 5705/5750, just say success.
8841 for (i = 0; i < MAX_WAIT_CNT; i++) {
8842 if (pci_channel_offline(tp->pdev)) {
8843 dev_err(&tp->pdev->dev,
8844 "tg3_stop_block device offline, "
8845 "ofs=%lx enable_bit=%x\n",
8852 if ((val & enable_bit) == 0)
8856 if (i == MAX_WAIT_CNT && !silent) {
8857 dev_err(&tp->pdev->dev,
8858 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8866 /* tp->lock is held. */
8867 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8871 tg3_disable_ints(tp);
8873 if (pci_channel_offline(tp->pdev)) {
8874 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8875 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8880 tp->rx_mode &= ~RX_MODE_ENABLE;
8881 tw32_f(MAC_RX_MODE, tp->rx_mode);
8884 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8885 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8886 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8887 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8888 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8889 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8891 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8892 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8893 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8894 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8895 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8896 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8897 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8899 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8900 tw32_f(MAC_MODE, tp->mac_mode);
8903 tp->tx_mode &= ~TX_MODE_ENABLE;
8904 tw32_f(MAC_TX_MODE, tp->tx_mode);
8906 for (i = 0; i < MAX_WAIT_CNT; i++) {
8908 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8911 if (i >= MAX_WAIT_CNT) {
8912 dev_err(&tp->pdev->dev,
8913 "%s timed out, TX_MODE_ENABLE will not clear "
8914 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8918 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8919 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8920 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8922 tw32(FTQ_RESET, 0xffffffff);
8923 tw32(FTQ_RESET, 0x00000000);
8925 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8926 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8929 for (i = 0; i < tp->irq_cnt; i++) {
8930 struct tg3_napi *tnapi = &tp->napi[i];
8931 if (tnapi->hw_status)
8932 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8938 /* Save PCI command register before chip reset */
8939 static void tg3_save_pci_state(struct tg3 *tp)
8941 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8944 /* Restore PCI state after chip reset */
8945 static void tg3_restore_pci_state(struct tg3 *tp)
8949 /* Re-enable indirect register accesses. */
8950 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8951 tp->misc_host_ctrl);
8953 /* Set MAX PCI retry to zero. */
8954 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8955 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8956 tg3_flag(tp, PCIX_MODE))
8957 val |= PCISTATE_RETRY_SAME_DMA;
8958 /* Allow reads and writes to the APE register and memory space. */
8959 if (tg3_flag(tp, ENABLE_APE))
8960 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8961 PCISTATE_ALLOW_APE_SHMEM_WR |
8962 PCISTATE_ALLOW_APE_PSPACE_WR;
8963 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8965 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8967 if (!tg3_flag(tp, PCI_EXPRESS)) {
8968 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8969 tp->pci_cacheline_sz);
8970 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8974 /* Make sure PCI-X relaxed ordering bit is clear. */
8975 if (tg3_flag(tp, PCIX_MODE)) {
8978 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8980 pcix_cmd &= ~PCI_X_CMD_ERO;
8981 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8985 if (tg3_flag(tp, 5780_CLASS)) {
8987 /* Chip reset on 5780 will reset MSI enable bit,
8988 * so need to restore it.
8990 if (tg3_flag(tp, USING_MSI)) {
8993 pci_read_config_word(tp->pdev,
8994 tp->msi_cap + PCI_MSI_FLAGS,
8996 pci_write_config_word(tp->pdev,
8997 tp->msi_cap + PCI_MSI_FLAGS,
8998 ctrl | PCI_MSI_FLAGS_ENABLE);
8999 val = tr32(MSGINT_MODE);
9000 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9005 static void tg3_override_clk(struct tg3 *tp)
9009 switch (tg3_asic_rev(tp)) {
9011 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9012 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9013 TG3_CPMU_MAC_ORIDE_ENABLE);
9018 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9026 static void tg3_restore_clk(struct tg3 *tp)
9030 switch (tg3_asic_rev(tp)) {
9032 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9033 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9034 val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9039 val = tr32(TG3_CPMU_CLCK_ORIDE);
9040 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9048 /* tp->lock is held. */
9049 static int tg3_chip_reset(struct tg3 *tp)
9050 __releases(tp->lock)
9051 __acquires(tp->lock)
9054 void (*write_op)(struct tg3 *, u32, u32);
9057 if (!pci_device_is_present(tp->pdev))
9062 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9064 /* No matching tg3_nvram_unlock() after this because
9065 * chip reset below will undo the nvram lock.
9067 tp->nvram_lock_cnt = 0;
9069 /* GRC_MISC_CFG core clock reset will clear the memory
9070 * enable bit in PCI register 4 and the MSI enable bit
9071 * on some chips, so we save relevant registers here.
9073 tg3_save_pci_state(tp);
9075 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9076 tg3_flag(tp, 5755_PLUS))
9077 tw32(GRC_FASTBOOT_PC, 0);
9080 * We must avoid the readl() that normally takes place.
9081 * It locks machines, causes machine checks, and other
9082 * fun things. So, temporarily disable the 5701
9083 * hardware workaround, while we do the reset.
9085 write_op = tp->write32;
9086 if (write_op == tg3_write_flush_reg32)
9087 tp->write32 = tg3_write32;
9089 /* Prevent the irq handler from reading or writing PCI registers
9090 * during chip reset when the memory enable bit in the PCI command
9091 * register may be cleared. The chip does not generate interrupt
9092 * at this time, but the irq handler may still be called due to irq
9093 * sharing or irqpoll.
9095 tg3_flag_set(tp, CHIP_RESETTING);
9096 for (i = 0; i < tp->irq_cnt; i++) {
9097 struct tg3_napi *tnapi = &tp->napi[i];
9098 if (tnapi->hw_status) {
9099 tnapi->hw_status->status = 0;
9100 tnapi->hw_status->status_tag = 0;
9102 tnapi->last_tag = 0;
9103 tnapi->last_irq_tag = 0;
9107 tg3_full_unlock(tp);
9109 for (i = 0; i < tp->irq_cnt; i++)
9110 synchronize_irq(tp->napi[i].irq_vec);
9112 tg3_full_lock(tp, 0);
9114 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9115 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9116 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9120 val = GRC_MISC_CFG_CORECLK_RESET;
9122 if (tg3_flag(tp, PCI_EXPRESS)) {
9123 /* Force PCIe 1.0a mode */
9124 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9125 !tg3_flag(tp, 57765_PLUS) &&
9126 tr32(TG3_PCIE_PHY_TSTCTL) ==
9127 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9128 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9130 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9131 tw32(GRC_MISC_CFG, (1 << 29));
9136 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9137 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9138 tw32(GRC_VCPU_EXT_CTRL,
9139 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9142 /* Set the clock to the highest frequency to avoid timeouts. With link
9143 * aware mode, the clock speed could be slow and bootcode does not
9144 * complete within the expected time. Override the clock to allow the
9145 * bootcode to finish sooner and then restore it.
9147 tg3_override_clk(tp);
9149 /* Manage gphy power for all CPMU absent PCIe devices. */
9150 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9151 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9153 tw32(GRC_MISC_CFG, val);
9155 /* restore 5701 hardware bug workaround write method */
9156 tp->write32 = write_op;
9158 /* Unfortunately, we have to delay before the PCI read back.
9159 * Some 575X chips even will not respond to a PCI cfg access
9160 * when the reset command is given to the chip.
9162 * How do these hardware designers expect things to work
9163 * properly if the PCI write is posted for a long period
9164 * of time? It is always necessary to have some method by
9165 * which a register read back can occur to push the write
9166 * out which does the reset.
9168 * For most tg3 variants the trick below was working.
9173 /* Flush PCI posted writes. The normal MMIO registers
9174 * are inaccessible at this time so this is the only
9175 * way to make this reliably (actually, this is no longer
9176 * the case, see above). I tried to use indirect
9177 * register read/write but this upset some 5701 variants.
9179 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9183 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9186 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9190 /* Wait for link training to complete. */
9191 for (j = 0; j < 5000; j++)
9194 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9195 pci_write_config_dword(tp->pdev, 0xc4,
9196 cfg_val | (1 << 15));
9199 /* Clear the "no snoop" and "relaxed ordering" bits. */
9200 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9202 * Older PCIe devices only support the 128 byte
9203 * MPS setting. Enforce the restriction.
9205 if (!tg3_flag(tp, CPMU_PRESENT))
9206 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9207 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9209 /* Clear error status */
9210 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9211 PCI_EXP_DEVSTA_CED |
9212 PCI_EXP_DEVSTA_NFED |
9213 PCI_EXP_DEVSTA_FED |
9214 PCI_EXP_DEVSTA_URD);
9217 tg3_restore_pci_state(tp);
9219 tg3_flag_clear(tp, CHIP_RESETTING);
9220 tg3_flag_clear(tp, ERROR_PROCESSED);
9223 if (tg3_flag(tp, 5780_CLASS))
9224 val = tr32(MEMARB_MODE);
9225 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9227 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9229 tw32(0x5000, 0x400);
9232 if (tg3_flag(tp, IS_SSB_CORE)) {
9234 * BCM4785: In order to avoid repercussions from using
9235 * potentially defective internal ROM, stop the Rx RISC CPU,
9236 * which is not required.
9239 tg3_halt_cpu(tp, RX_CPU_BASE);
9242 err = tg3_poll_fw(tp);
9246 tw32(GRC_MODE, tp->grc_mode);
9248 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9251 tw32(0xc4, val | (1 << 15));
9254 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9255 tg3_asic_rev(tp) == ASIC_REV_5705) {
9256 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9257 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9258 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9259 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9262 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9263 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9265 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9266 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9271 tw32_f(MAC_MODE, val);
9274 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9278 if (tg3_flag(tp, PCI_EXPRESS) &&
9279 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9280 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9281 !tg3_flag(tp, 57765_PLUS)) {
9284 tw32(0x7c00, val | (1 << 25));
9287 tg3_restore_clk(tp);
9289 /* Increase the core clock speed to fix tx timeout issue for 5762
9290 * with 100Mbps link speed.
9292 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9293 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9294 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9295 TG3_CPMU_MAC_ORIDE_ENABLE);
9298 /* Reprobe ASF enable state. */
9299 tg3_flag_clear(tp, ENABLE_ASF);
9300 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9301 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9303 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9304 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9305 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9308 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9309 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9310 tg3_flag_set(tp, ENABLE_ASF);
9311 tp->last_event_jiffies = jiffies;
9312 if (tg3_flag(tp, 5750_PLUS))
9313 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9315 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9316 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9317 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9318 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9319 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9326 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9327 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9328 static void __tg3_set_rx_mode(struct net_device *);
9330 /* tp->lock is held. */
9331 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9337 tg3_write_sig_pre_reset(tp, kind);
9339 tg3_abort_hw(tp, silent);
9340 err = tg3_chip_reset(tp);
9342 __tg3_set_mac_addr(tp, false);
9344 tg3_write_sig_legacy(tp, kind);
9345 tg3_write_sig_post_reset(tp, kind);
9348 /* Save the stats across chip resets... */
9349 tg3_get_nstats(tp, &tp->net_stats_prev);
9350 tg3_get_estats(tp, &tp->estats_prev);
9352 /* And make sure the next sample is new data */
9353 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9359 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9361 struct tg3 *tp = netdev_priv(dev);
9362 struct sockaddr *addr = p;
9364 bool skip_mac_1 = false;
9366 if (!is_valid_ether_addr(addr->sa_data))
9367 return -EADDRNOTAVAIL;
9369 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9371 if (!netif_running(dev))
9374 if (tg3_flag(tp, ENABLE_ASF)) {
9375 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9377 addr0_high = tr32(MAC_ADDR_0_HIGH);
9378 addr0_low = tr32(MAC_ADDR_0_LOW);
9379 addr1_high = tr32(MAC_ADDR_1_HIGH);
9380 addr1_low = tr32(MAC_ADDR_1_LOW);
9382 /* Skip MAC addr 1 if ASF is using it. */
9383 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9384 !(addr1_high == 0 && addr1_low == 0))
9387 spin_lock_bh(&tp->lock);
9388 __tg3_set_mac_addr(tp, skip_mac_1);
9389 __tg3_set_rx_mode(dev);
9390 spin_unlock_bh(&tp->lock);
9395 /* tp->lock is held. */
9396 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9397 dma_addr_t mapping, u32 maxlen_flags,
9401 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9402 ((u64) mapping >> 32));
9404 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9405 ((u64) mapping & 0xffffffff));
9407 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9410 if (!tg3_flag(tp, 5705_PLUS))
9412 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9417 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9421 if (!tg3_flag(tp, ENABLE_TSS)) {
9422 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9423 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9424 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9426 tw32(HOSTCC_TXCOL_TICKS, 0);
9427 tw32(HOSTCC_TXMAX_FRAMES, 0);
9428 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9430 for (; i < tp->txq_cnt; i++) {
9433 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9434 tw32(reg, ec->tx_coalesce_usecs);
9435 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9436 tw32(reg, ec->tx_max_coalesced_frames);
9437 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9438 tw32(reg, ec->tx_max_coalesced_frames_irq);
9442 for (; i < tp->irq_max - 1; i++) {
9443 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9444 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9445 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9449 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9452 u32 limit = tp->rxq_cnt;
9454 if (!tg3_flag(tp, ENABLE_RSS)) {
9455 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9456 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9457 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9460 tw32(HOSTCC_RXCOL_TICKS, 0);
9461 tw32(HOSTCC_RXMAX_FRAMES, 0);
9462 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9465 for (; i < limit; i++) {
9468 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9469 tw32(reg, ec->rx_coalesce_usecs);
9470 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9471 tw32(reg, ec->rx_max_coalesced_frames);
9472 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9473 tw32(reg, ec->rx_max_coalesced_frames_irq);
9476 for (; i < tp->irq_max - 1; i++) {
9477 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9478 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9479 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9483 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9485 tg3_coal_tx_init(tp, ec);
9486 tg3_coal_rx_init(tp, ec);
9488 if (!tg3_flag(tp, 5705_PLUS)) {
9489 u32 val = ec->stats_block_coalesce_usecs;
9491 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9492 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9497 tw32(HOSTCC_STAT_COAL_TICKS, val);
9501 /* tp->lock is held. */
9502 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9506 /* Disable all transmit rings but the first. */
9507 if (!tg3_flag(tp, 5705_PLUS))
9508 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9509 else if (tg3_flag(tp, 5717_PLUS))
9510 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9511 else if (tg3_flag(tp, 57765_CLASS) ||
9512 tg3_asic_rev(tp) == ASIC_REV_5762)
9513 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9515 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9517 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9518 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9519 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9520 BDINFO_FLAGS_DISABLED);
9523 /* tp->lock is held. */
9524 static void tg3_tx_rcbs_init(struct tg3 *tp)
9527 u32 txrcb = NIC_SRAM_SEND_RCB;
9529 if (tg3_flag(tp, ENABLE_TSS))
9532 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9533 struct tg3_napi *tnapi = &tp->napi[i];
9535 if (!tnapi->tx_ring)
9538 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9539 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9540 NIC_SRAM_TX_BUFFER_DESC);
9544 /* tp->lock is held. */
9545 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9549 /* Disable all receive return rings but the first. */
9550 if (tg3_flag(tp, 5717_PLUS))
9551 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9552 else if (!tg3_flag(tp, 5705_PLUS))
9553 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9554 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9555 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9556 tg3_flag(tp, 57765_CLASS))
9557 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9559 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9561 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9562 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9563 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9564 BDINFO_FLAGS_DISABLED);
9567 /* tp->lock is held. */
9568 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9571 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9573 if (tg3_flag(tp, ENABLE_RSS))
9576 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9577 struct tg3_napi *tnapi = &tp->napi[i];
9582 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9583 (tp->rx_ret_ring_mask + 1) <<
9584 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9588 /* tp->lock is held. */
9589 static void tg3_rings_reset(struct tg3 *tp)
9593 struct tg3_napi *tnapi = &tp->napi[0];
9595 tg3_tx_rcbs_disable(tp);
9597 tg3_rx_ret_rcbs_disable(tp);
9599 /* Disable interrupts */
9600 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9601 tp->napi[0].chk_msi_cnt = 0;
9602 tp->napi[0].last_rx_cons = 0;
9603 tp->napi[0].last_tx_cons = 0;
9605 /* Zero mailbox registers. */
9606 if (tg3_flag(tp, SUPPORT_MSIX)) {
9607 for (i = 1; i < tp->irq_max; i++) {
9608 tp->napi[i].tx_prod = 0;
9609 tp->napi[i].tx_cons = 0;
9610 if (tg3_flag(tp, ENABLE_TSS))
9611 tw32_mailbox(tp->napi[i].prodmbox, 0);
9612 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9613 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9614 tp->napi[i].chk_msi_cnt = 0;
9615 tp->napi[i].last_rx_cons = 0;
9616 tp->napi[i].last_tx_cons = 0;
9618 if (!tg3_flag(tp, ENABLE_TSS))
9619 tw32_mailbox(tp->napi[0].prodmbox, 0);
9621 tp->napi[0].tx_prod = 0;
9622 tp->napi[0].tx_cons = 0;
9623 tw32_mailbox(tp->napi[0].prodmbox, 0);
9624 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9627 /* Make sure the NIC-based send BD rings are disabled. */
9628 if (!tg3_flag(tp, 5705_PLUS)) {
9629 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9630 for (i = 0; i < 16; i++)
9631 tw32_tx_mbox(mbox + i * 8, 0);
9634 /* Clear status block in ram. */
9635 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9637 /* Set status block DMA address */
9638 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9639 ((u64) tnapi->status_mapping >> 32));
9640 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9641 ((u64) tnapi->status_mapping & 0xffffffff));
9643 stblk = HOSTCC_STATBLCK_RING1;
9645 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9646 u64 mapping = (u64)tnapi->status_mapping;
9647 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9648 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9651 /* Clear status block in ram. */
9652 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9655 tg3_tx_rcbs_init(tp);
9656 tg3_rx_ret_rcbs_init(tp);
9659 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9661 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9663 if (!tg3_flag(tp, 5750_PLUS) ||
9664 tg3_flag(tp, 5780_CLASS) ||
9665 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9666 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9667 tg3_flag(tp, 57765_PLUS))
9668 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9669 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9670 tg3_asic_rev(tp) == ASIC_REV_5787)
9671 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9673 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9675 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9676 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9678 val = min(nic_rep_thresh, host_rep_thresh);
9679 tw32(RCVBDI_STD_THRESH, val);
9681 if (tg3_flag(tp, 57765_PLUS))
9682 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9684 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9687 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9689 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9691 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9692 tw32(RCVBDI_JUMBO_THRESH, val);
9694 if (tg3_flag(tp, 57765_PLUS))
9695 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9698 static inline u32 calc_crc(unsigned char *buf, int len)
9706 for (j = 0; j < len; j++) {
9709 for (k = 0; k < 8; k++) {
9715 reg ^= CRC32_POLY_LE;
9722 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9724 /* accept or reject all multicast frames */
9725 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9726 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9727 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9728 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9731 static void __tg3_set_rx_mode(struct net_device *dev)
9733 struct tg3 *tp = netdev_priv(dev);
9736 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9737 RX_MODE_KEEP_VLAN_TAG);
9739 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9740 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9743 if (!tg3_flag(tp, ENABLE_ASF))
9744 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9747 if (dev->flags & IFF_PROMISC) {
9748 /* Promiscuous mode. */
9749 rx_mode |= RX_MODE_PROMISC;
9750 } else if (dev->flags & IFF_ALLMULTI) {
9751 /* Accept all multicast. */
9752 tg3_set_multi(tp, 1);
9753 } else if (netdev_mc_empty(dev)) {
9754 /* Reject all multicast. */
9755 tg3_set_multi(tp, 0);
9757 /* Accept one or more multicast(s). */
9758 struct netdev_hw_addr *ha;
9759 u32 mc_filter[4] = { 0, };
9764 netdev_for_each_mc_addr(ha, dev) {
9765 crc = calc_crc(ha->addr, ETH_ALEN);
9767 regidx = (bit & 0x60) >> 5;
9769 mc_filter[regidx] |= (1 << bit);
9772 tw32(MAC_HASH_REG_0, mc_filter[0]);
9773 tw32(MAC_HASH_REG_1, mc_filter[1]);
9774 tw32(MAC_HASH_REG_2, mc_filter[2]);
9775 tw32(MAC_HASH_REG_3, mc_filter[3]);
9778 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9779 rx_mode |= RX_MODE_PROMISC;
9780 } else if (!(dev->flags & IFF_PROMISC)) {
9781 /* Add all entries into to the mac addr filter list */
9783 struct netdev_hw_addr *ha;
9785 netdev_for_each_uc_addr(ha, dev) {
9786 __tg3_set_one_mac_addr(tp, ha->addr,
9787 i + TG3_UCAST_ADDR_IDX(tp));
9792 if (rx_mode != tp->rx_mode) {
9793 tp->rx_mode = rx_mode;
9794 tw32_f(MAC_RX_MODE, rx_mode);
9799 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9803 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9804 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9807 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9811 if (!tg3_flag(tp, SUPPORT_MSIX))
9814 if (tp->rxq_cnt == 1) {
9815 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9819 /* Validate table against current IRQ count */
9820 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9821 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9825 if (i != TG3_RSS_INDIR_TBL_SIZE)
9826 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9829 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9832 u32 reg = MAC_RSS_INDIR_TBL_0;
9834 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9835 u32 val = tp->rss_ind_tbl[i];
9837 for (; i % 8; i++) {
9839 val |= tp->rss_ind_tbl[i];
9846 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9848 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9849 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9851 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9854 /* tp->lock is held. */
9855 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9857 u32 val, rdmac_mode;
9859 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9861 tg3_disable_ints(tp);
9865 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9867 if (tg3_flag(tp, INIT_COMPLETE))
9868 tg3_abort_hw(tp, 1);
9870 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9871 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9872 tg3_phy_pull_config(tp);
9873 tg3_eee_pull_config(tp, NULL);
9874 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9877 /* Enable MAC control of LPI */
9878 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9884 err = tg3_chip_reset(tp);
9888 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9890 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9891 val = tr32(TG3_CPMU_CTRL);
9892 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9893 tw32(TG3_CPMU_CTRL, val);
9895 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9896 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9897 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9898 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9900 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9901 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9902 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9903 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9905 val = tr32(TG3_CPMU_HST_ACC);
9906 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9907 val |= CPMU_HST_ACC_MACCLK_6_25;
9908 tw32(TG3_CPMU_HST_ACC, val);
9911 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9912 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9913 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9914 PCIE_PWR_MGMT_L1_THRESH_4MS;
9915 tw32(PCIE_PWR_MGMT_THRESH, val);
9917 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9918 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9920 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9922 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9923 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9926 if (tg3_flag(tp, L1PLLPD_EN)) {
9927 u32 grc_mode = tr32(GRC_MODE);
9929 /* Access the lower 1K of PL PCIE block registers. */
9930 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9931 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9933 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9934 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9935 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9937 tw32(GRC_MODE, grc_mode);
9940 if (tg3_flag(tp, 57765_CLASS)) {
9941 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9942 u32 grc_mode = tr32(GRC_MODE);
9944 /* Access the lower 1K of PL PCIE block registers. */
9945 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9946 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9948 val = tr32(TG3_PCIE_TLDLPL_PORT +
9949 TG3_PCIE_PL_LO_PHYCTL5);
9950 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9951 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9953 tw32(GRC_MODE, grc_mode);
9956 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9959 /* Fix transmit hangs */
9960 val = tr32(TG3_CPMU_PADRNG_CTL);
9961 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9962 tw32(TG3_CPMU_PADRNG_CTL, val);
9964 grc_mode = tr32(GRC_MODE);
9966 /* Access the lower 1K of DL PCIE block registers. */
9967 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9968 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9970 val = tr32(TG3_PCIE_TLDLPL_PORT +
9971 TG3_PCIE_DL_LO_FTSMAX);
9972 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9973 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9974 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9976 tw32(GRC_MODE, grc_mode);
9979 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9980 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9981 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9982 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9985 /* This works around an issue with Athlon chipsets on
9986 * B3 tigon3 silicon. This bit has no effect on any
9987 * other revision. But do not set this on PCI Express
9988 * chips and don't even touch the clocks if the CPMU is present.
9990 if (!tg3_flag(tp, CPMU_PRESENT)) {
9991 if (!tg3_flag(tp, PCI_EXPRESS))
9992 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9993 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9996 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9997 tg3_flag(tp, PCIX_MODE)) {
9998 val = tr32(TG3PCI_PCISTATE);
9999 val |= PCISTATE_RETRY_SAME_DMA;
10000 tw32(TG3PCI_PCISTATE, val);
10003 if (tg3_flag(tp, ENABLE_APE)) {
10004 /* Allow reads and writes to the
10005 * APE register and memory space.
10007 val = tr32(TG3PCI_PCISTATE);
10008 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10009 PCISTATE_ALLOW_APE_SHMEM_WR |
10010 PCISTATE_ALLOW_APE_PSPACE_WR;
10011 tw32(TG3PCI_PCISTATE, val);
10014 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10015 /* Enable some hw fixes. */
10016 val = tr32(TG3PCI_MSI_DATA);
10017 val |= (1 << 26) | (1 << 28) | (1 << 29);
10018 tw32(TG3PCI_MSI_DATA, val);
10021 /* Descriptor ring init may make accesses to the
10022 * NIC SRAM area to setup the TX descriptors, so we
10023 * can only do this after the hardware has been
10024 * successfully reset.
10026 err = tg3_init_rings(tp);
10030 if (tg3_flag(tp, 57765_PLUS)) {
10031 val = tr32(TG3PCI_DMA_RW_CTRL) &
10032 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10033 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10034 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10035 if (!tg3_flag(tp, 57765_CLASS) &&
10036 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10037 tg3_asic_rev(tp) != ASIC_REV_5762)
10038 val |= DMA_RWCTRL_TAGGED_STAT_WA;
10039 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10040 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10041 tg3_asic_rev(tp) != ASIC_REV_5761) {
10042 /* This value is determined during the probe time DMA
10043 * engine test, tg3_test_dma.
10045 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10048 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10049 GRC_MODE_4X_NIC_SEND_RINGS |
10050 GRC_MODE_NO_TX_PHDR_CSUM |
10051 GRC_MODE_NO_RX_PHDR_CSUM);
10052 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10054 /* Pseudo-header checksum is done by hardware logic and not
10055 * the offload processers, so make the chip do the pseudo-
10056 * header checksums on receive. For transmit it is more
10057 * convenient to do the pseudo-header checksum in software
10058 * as Linux does that on transmit for us in all cases.
10060 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10062 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10064 tw32(TG3_RX_PTP_CTL,
10065 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10067 if (tg3_flag(tp, PTP_CAPABLE))
10068 val |= GRC_MODE_TIME_SYNC_ENABLE;
10070 tw32(GRC_MODE, tp->grc_mode | val);
10072 /* On one of the AMD platform, MRRS is restricted to 4000 because of
10073 * south bridge limitation. As a workaround, Driver is setting MRRS
10074 * to 2048 instead of default 4096.
10076 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10077 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10078 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10079 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10082 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10083 val = tr32(GRC_MISC_CFG);
10085 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10086 tw32(GRC_MISC_CFG, val);
10088 /* Initialize MBUF/DESC pool. */
10089 if (tg3_flag(tp, 5750_PLUS)) {
10091 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10092 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10093 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10094 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10096 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10097 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10098 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10099 } else if (tg3_flag(tp, TSO_CAPABLE)) {
10102 fw_len = tp->fw_len;
10103 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10104 tw32(BUFMGR_MB_POOL_ADDR,
10105 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10106 tw32(BUFMGR_MB_POOL_SIZE,
10107 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10110 if (tp->dev->mtu <= ETH_DATA_LEN) {
10111 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10112 tp->bufmgr_config.mbuf_read_dma_low_water);
10113 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10114 tp->bufmgr_config.mbuf_mac_rx_low_water);
10115 tw32(BUFMGR_MB_HIGH_WATER,
10116 tp->bufmgr_config.mbuf_high_water);
10118 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10119 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10120 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10121 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10122 tw32(BUFMGR_MB_HIGH_WATER,
10123 tp->bufmgr_config.mbuf_high_water_jumbo);
10125 tw32(BUFMGR_DMA_LOW_WATER,
10126 tp->bufmgr_config.dma_low_water);
10127 tw32(BUFMGR_DMA_HIGH_WATER,
10128 tp->bufmgr_config.dma_high_water);
10130 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10131 if (tg3_asic_rev(tp) == ASIC_REV_5719)
10132 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10133 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10134 tg3_asic_rev(tp) == ASIC_REV_5762 ||
10135 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10136 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10137 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10138 tw32(BUFMGR_MODE, val);
10139 for (i = 0; i < 2000; i++) {
10140 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10145 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10149 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10150 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10152 tg3_setup_rxbd_thresholds(tp);
10154 /* Initialize TG3_BDINFO's at:
10155 * RCVDBDI_STD_BD: standard eth size rx ring
10156 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10157 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10160 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10161 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10162 * ring attribute flags
10163 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10165 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10166 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10168 * The size of each ring is fixed in the firmware, but the location is
10171 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10172 ((u64) tpr->rx_std_mapping >> 32));
10173 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10174 ((u64) tpr->rx_std_mapping & 0xffffffff));
10175 if (!tg3_flag(tp, 5717_PLUS))
10176 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10177 NIC_SRAM_RX_BUFFER_DESC);
10179 /* Disable the mini ring */
10180 if (!tg3_flag(tp, 5705_PLUS))
10181 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10182 BDINFO_FLAGS_DISABLED);
10184 /* Program the jumbo buffer descriptor ring control
10185 * blocks on those devices that have them.
10187 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10188 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10190 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10191 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10192 ((u64) tpr->rx_jmb_mapping >> 32));
10193 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10194 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10195 val = TG3_RX_JMB_RING_SIZE(tp) <<
10196 BDINFO_FLAGS_MAXLEN_SHIFT;
10197 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10198 val | BDINFO_FLAGS_USE_EXT_RECV);
10199 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10200 tg3_flag(tp, 57765_CLASS) ||
10201 tg3_asic_rev(tp) == ASIC_REV_5762)
10202 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10203 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10205 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10206 BDINFO_FLAGS_DISABLED);
10209 if (tg3_flag(tp, 57765_PLUS)) {
10210 val = TG3_RX_STD_RING_SIZE(tp);
10211 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10212 val |= (TG3_RX_STD_DMA_SZ << 2);
10214 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10216 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10218 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10220 tpr->rx_std_prod_idx = tp->rx_pending;
10221 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10223 tpr->rx_jmb_prod_idx =
10224 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10225 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10227 tg3_rings_reset(tp);
10229 /* Initialize MAC address and backoff seed. */
10230 __tg3_set_mac_addr(tp, false);
10232 /* MTU + ethernet header + FCS + optional VLAN tag */
10233 tw32(MAC_RX_MTU_SIZE,
10234 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10236 /* The slot time is changed by tg3_setup_phy if we
10237 * run at gigabit with half duplex.
10239 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10240 (6 << TX_LENGTHS_IPG_SHIFT) |
10241 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10243 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10244 tg3_asic_rev(tp) == ASIC_REV_5762)
10245 val |= tr32(MAC_TX_LENGTHS) &
10246 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10247 TX_LENGTHS_CNT_DWN_VAL_MSK);
10249 tw32(MAC_TX_LENGTHS, val);
10251 /* Receive rules. */
10252 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10253 tw32(RCVLPC_CONFIG, 0x0181);
10255 /* Calculate RDMAC_MODE setting early, we need it to determine
10256 * the RCVLPC_STATE_ENABLE mask.
10258 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10259 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10260 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10261 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10262 RDMAC_MODE_LNGREAD_ENAB);
10264 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10265 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10267 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10268 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10269 tg3_asic_rev(tp) == ASIC_REV_57780)
10270 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10271 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10272 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10274 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10275 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10276 if (tg3_flag(tp, TSO_CAPABLE) &&
10277 tg3_asic_rev(tp) == ASIC_REV_5705) {
10278 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10279 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10280 !tg3_flag(tp, IS_5788)) {
10281 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10285 if (tg3_flag(tp, PCI_EXPRESS))
10286 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10288 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10290 if (tp->dev->mtu <= ETH_DATA_LEN) {
10291 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10292 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10296 if (tg3_flag(tp, HW_TSO_1) ||
10297 tg3_flag(tp, HW_TSO_2) ||
10298 tg3_flag(tp, HW_TSO_3))
10299 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10301 if (tg3_flag(tp, 57765_PLUS) ||
10302 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10303 tg3_asic_rev(tp) == ASIC_REV_57780)
10304 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10306 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10307 tg3_asic_rev(tp) == ASIC_REV_5762)
10308 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10310 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10311 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10312 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10313 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10314 tg3_flag(tp, 57765_PLUS)) {
10317 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10318 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10320 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10322 val = tr32(tgtreg);
10323 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10324 tg3_asic_rev(tp) == ASIC_REV_5762) {
10325 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10326 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10327 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10328 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10329 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10330 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10332 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10335 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10336 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10337 tg3_asic_rev(tp) == ASIC_REV_5762) {
10340 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10341 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10343 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10345 val = tr32(tgtreg);
10347 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10348 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10351 /* Receive/send statistics. */
10352 if (tg3_flag(tp, 5750_PLUS)) {
10353 val = tr32(RCVLPC_STATS_ENABLE);
10354 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10355 tw32(RCVLPC_STATS_ENABLE, val);
10356 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10357 tg3_flag(tp, TSO_CAPABLE)) {
10358 val = tr32(RCVLPC_STATS_ENABLE);
10359 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10360 tw32(RCVLPC_STATS_ENABLE, val);
10362 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10364 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10365 tw32(SNDDATAI_STATSENAB, 0xffffff);
10366 tw32(SNDDATAI_STATSCTRL,
10367 (SNDDATAI_SCTRL_ENABLE |
10368 SNDDATAI_SCTRL_FASTUPD));
10370 /* Setup host coalescing engine. */
10371 tw32(HOSTCC_MODE, 0);
10372 for (i = 0; i < 2000; i++) {
10373 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10378 __tg3_set_coalesce(tp, &tp->coal);
10380 if (!tg3_flag(tp, 5705_PLUS)) {
10381 /* Status/statistics block address. See tg3_timer,
10382 * the tg3_periodic_fetch_stats call there, and
10383 * tg3_get_stats to see how this works for 5705/5750 chips.
10385 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10386 ((u64) tp->stats_mapping >> 32));
10387 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10388 ((u64) tp->stats_mapping & 0xffffffff));
10389 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10391 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10393 /* Clear statistics and status block memory areas */
10394 for (i = NIC_SRAM_STATS_BLK;
10395 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10396 i += sizeof(u32)) {
10397 tg3_write_mem(tp, i, 0);
10402 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10404 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10405 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10406 if (!tg3_flag(tp, 5705_PLUS))
10407 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10409 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10410 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10411 /* reset to prevent losing 1st rx packet intermittently */
10412 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10416 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10417 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10418 MAC_MODE_FHDE_ENABLE;
10419 if (tg3_flag(tp, ENABLE_APE))
10420 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10421 if (!tg3_flag(tp, 5705_PLUS) &&
10422 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10423 tg3_asic_rev(tp) != ASIC_REV_5700)
10424 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10425 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10428 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10429 * If TG3_FLAG_IS_NIC is zero, we should read the
10430 * register to preserve the GPIO settings for LOMs. The GPIOs,
10431 * whether used as inputs or outputs, are set by boot code after
10434 if (!tg3_flag(tp, IS_NIC)) {
10437 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10438 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10439 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10441 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10442 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10443 GRC_LCLCTRL_GPIO_OUTPUT3;
10445 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10446 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10448 tp->grc_local_ctrl &= ~gpio_mask;
10449 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10451 /* GPIO1 must be driven high for eeprom write protect */
10452 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10453 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10454 GRC_LCLCTRL_GPIO_OUTPUT1);
10456 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10459 if (tg3_flag(tp, USING_MSIX)) {
10460 val = tr32(MSGINT_MODE);
10461 val |= MSGINT_MODE_ENABLE;
10462 if (tp->irq_cnt > 1)
10463 val |= MSGINT_MODE_MULTIVEC_EN;
10464 if (!tg3_flag(tp, 1SHOT_MSI))
10465 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10466 tw32(MSGINT_MODE, val);
10469 if (!tg3_flag(tp, 5705_PLUS)) {
10470 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10474 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10475 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10476 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10477 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10478 WDMAC_MODE_LNGREAD_ENAB);
10480 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10481 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10482 if (tg3_flag(tp, TSO_CAPABLE) &&
10483 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10484 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10486 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10487 !tg3_flag(tp, IS_5788)) {
10488 val |= WDMAC_MODE_RX_ACCEL;
10492 /* Enable host coalescing bug fix */
10493 if (tg3_flag(tp, 5755_PLUS))
10494 val |= WDMAC_MODE_STATUS_TAG_FIX;
10496 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10497 val |= WDMAC_MODE_BURST_ALL_DATA;
10499 tw32_f(WDMAC_MODE, val);
10502 if (tg3_flag(tp, PCIX_MODE)) {
10505 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10507 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10508 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10509 pcix_cmd |= PCI_X_CMD_READ_2K;
10510 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10511 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10512 pcix_cmd |= PCI_X_CMD_READ_2K;
10514 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10518 tw32_f(RDMAC_MODE, rdmac_mode);
10521 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10522 tg3_asic_rev(tp) == ASIC_REV_5720) {
10523 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10524 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10527 if (i < TG3_NUM_RDMA_CHANNELS) {
10528 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10529 val |= tg3_lso_rd_dma_workaround_bit(tp);
10530 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10531 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10535 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10536 if (!tg3_flag(tp, 5705_PLUS))
10537 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10539 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10540 tw32(SNDDATAC_MODE,
10541 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10543 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10545 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10546 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10547 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10548 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10549 val |= RCVDBDI_MODE_LRG_RING_SZ;
10550 tw32(RCVDBDI_MODE, val);
10551 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10552 if (tg3_flag(tp, HW_TSO_1) ||
10553 tg3_flag(tp, HW_TSO_2) ||
10554 tg3_flag(tp, HW_TSO_3))
10555 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10556 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10557 if (tg3_flag(tp, ENABLE_TSS))
10558 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10559 tw32(SNDBDI_MODE, val);
10560 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10562 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10563 err = tg3_load_5701_a0_firmware_fix(tp);
10568 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10569 /* Ignore any errors for the firmware download. If download
10570 * fails, the device will operate with EEE disabled
10572 tg3_load_57766_firmware(tp);
10575 if (tg3_flag(tp, TSO_CAPABLE)) {
10576 err = tg3_load_tso_firmware(tp);
10581 tp->tx_mode = TX_MODE_ENABLE;
10583 if (tg3_flag(tp, 5755_PLUS) ||
10584 tg3_asic_rev(tp) == ASIC_REV_5906)
10585 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10587 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10588 tg3_asic_rev(tp) == ASIC_REV_5762) {
10589 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10590 tp->tx_mode &= ~val;
10591 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10594 tw32_f(MAC_TX_MODE, tp->tx_mode);
10597 if (tg3_flag(tp, ENABLE_RSS)) {
10600 tg3_rss_write_indir_tbl(tp);
10602 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10604 for (i = 0; i < 10 ; i++)
10605 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10608 tp->rx_mode = RX_MODE_ENABLE;
10609 if (tg3_flag(tp, 5755_PLUS))
10610 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10612 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10613 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10615 if (tg3_flag(tp, ENABLE_RSS))
10616 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10617 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10618 RX_MODE_RSS_IPV6_HASH_EN |
10619 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10620 RX_MODE_RSS_IPV4_HASH_EN |
10621 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10623 tw32_f(MAC_RX_MODE, tp->rx_mode);
10626 tw32(MAC_LED_CTRL, tp->led_ctrl);
10628 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10629 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10630 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10633 tw32_f(MAC_RX_MODE, tp->rx_mode);
10636 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10637 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10638 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10639 /* Set drive transmission level to 1.2V */
10640 /* only if the signal pre-emphasis bit is not set */
10641 val = tr32(MAC_SERDES_CFG);
10644 tw32(MAC_SERDES_CFG, val);
10646 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10647 tw32(MAC_SERDES_CFG, 0x616000);
10650 /* Prevent chip from dropping frames when flow control
10653 if (tg3_flag(tp, 57765_CLASS))
10657 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10659 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10660 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10661 /* Use hardware link auto-negotiation */
10662 tg3_flag_set(tp, HW_AUTONEG);
10665 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10666 tg3_asic_rev(tp) == ASIC_REV_5714) {
10669 tmp = tr32(SERDES_RX_CTRL);
10670 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10671 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10672 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10673 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10676 if (!tg3_flag(tp, USE_PHYLIB)) {
10677 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10678 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10680 err = tg3_setup_phy(tp, false);
10684 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10685 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10688 /* Clear CRC stats. */
10689 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10690 tg3_writephy(tp, MII_TG3_TEST1,
10691 tmp | MII_TG3_TEST1_CRC_EN);
10692 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10697 __tg3_set_rx_mode(tp->dev);
10699 /* Initialize receive rules. */
10700 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10701 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10702 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10703 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10705 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10709 if (tg3_flag(tp, ENABLE_ASF))
10713 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10716 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10719 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10722 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10725 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10728 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10731 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10734 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10737 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10740 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10743 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10746 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10749 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10751 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10759 if (tg3_flag(tp, ENABLE_APE))
10760 /* Write our heartbeat update interval to APE. */
10761 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10762 APE_HOST_HEARTBEAT_INT_5SEC);
10764 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10769 /* Called at device open time to get the chip ready for
10770 * packet processing. Invoked with tp->lock held.
10772 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10774 /* Chip may have been just powered on. If so, the boot code may still
10775 * be running initialization. Wait for it to finish to avoid races in
10776 * accessing the hardware.
10778 tg3_enable_register_access(tp);
10781 tg3_switch_clocks(tp);
10783 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10785 return tg3_reset_hw(tp, reset_phy);
10788 #ifdef CONFIG_TIGON3_HWMON
10789 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10791 u32 off, len = TG3_OCIR_LEN;
10794 for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
10795 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10797 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10798 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10799 memset(ocir, 0, len);
10803 /* sysfs attributes for hwmon */
10804 static ssize_t tg3_show_temp(struct device *dev,
10805 struct device_attribute *devattr, char *buf)
10807 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10808 struct tg3 *tp = dev_get_drvdata(dev);
10811 spin_lock_bh(&tp->lock);
10812 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10813 sizeof(temperature));
10814 spin_unlock_bh(&tp->lock);
10815 return sprintf(buf, "%u\n", temperature * 1000);
10819 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10820 TG3_TEMP_SENSOR_OFFSET);
10821 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10822 TG3_TEMP_CAUTION_OFFSET);
10823 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10824 TG3_TEMP_MAX_OFFSET);
10826 static struct attribute *tg3_attrs[] = {
10827 &sensor_dev_attr_temp1_input.dev_attr.attr,
10828 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10829 &sensor_dev_attr_temp1_max.dev_attr.attr,
10832 ATTRIBUTE_GROUPS(tg3);
10834 static void tg3_hwmon_close(struct tg3 *tp)
10836 if (tp->hwmon_dev) {
10837 hwmon_device_unregister(tp->hwmon_dev);
10838 tp->hwmon_dev = NULL;
10842 static void tg3_hwmon_open(struct tg3 *tp)
10846 struct pci_dev *pdev = tp->pdev;
10847 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10849 tg3_sd_scan_scratchpad(tp, ocirs);
10851 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10852 if (!ocirs[i].src_data_length)
10855 size += ocirs[i].src_hdr_length;
10856 size += ocirs[i].src_data_length;
10862 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10864 if (IS_ERR(tp->hwmon_dev)) {
10865 tp->hwmon_dev = NULL;
10866 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10870 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10871 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10872 #endif /* CONFIG_TIGON3_HWMON */
10875 #define TG3_STAT_ADD32(PSTAT, REG) \
10876 do { u32 __val = tr32(REG); \
10877 (PSTAT)->low += __val; \
10878 if ((PSTAT)->low < __val) \
10879 (PSTAT)->high += 1; \
10882 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10884 struct tg3_hw_stats *sp = tp->hw_stats;
10889 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10890 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10891 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10892 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10893 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10894 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10895 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10896 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10897 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10898 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10899 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10900 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10901 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10902 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10903 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10904 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10907 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10908 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10909 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10910 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10913 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10914 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10915 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10916 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10917 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10918 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10919 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10920 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10921 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10922 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10923 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10924 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10925 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10926 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10928 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10929 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10930 tg3_asic_rev(tp) != ASIC_REV_5762 &&
10931 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10932 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10933 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10935 u32 val = tr32(HOSTCC_FLOW_ATTN);
10936 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10938 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10939 sp->rx_discards.low += val;
10940 if (sp->rx_discards.low < val)
10941 sp->rx_discards.high += 1;
10943 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10945 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10948 static void tg3_chk_missed_msi(struct tg3 *tp)
10952 for (i = 0; i < tp->irq_cnt; i++) {
10953 struct tg3_napi *tnapi = &tp->napi[i];
10955 if (tg3_has_work(tnapi)) {
10956 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10957 tnapi->last_tx_cons == tnapi->tx_cons) {
10958 if (tnapi->chk_msi_cnt < 1) {
10959 tnapi->chk_msi_cnt++;
10965 tnapi->chk_msi_cnt = 0;
10966 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10967 tnapi->last_tx_cons = tnapi->tx_cons;
10971 static void tg3_timer(struct timer_list *t)
10973 struct tg3 *tp = from_timer(tp, t, timer);
10975 spin_lock(&tp->lock);
10977 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10978 spin_unlock(&tp->lock);
10979 goto restart_timer;
10982 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10983 tg3_flag(tp, 57765_CLASS))
10984 tg3_chk_missed_msi(tp);
10986 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10987 /* BCM4785: Flush posted writes from GbE to host memory. */
10991 if (!tg3_flag(tp, TAGGED_STATUS)) {
10992 /* All of this garbage is because when using non-tagged
10993 * IRQ status the mailbox/status_block protocol the chip
10994 * uses with the cpu is race prone.
10996 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10997 tw32(GRC_LOCAL_CTRL,
10998 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
11000 tw32(HOSTCC_MODE, tp->coalesce_mode |
11001 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11004 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11005 spin_unlock(&tp->lock);
11006 tg3_reset_task_schedule(tp);
11007 goto restart_timer;
11011 /* This part only runs once per second. */
11012 if (!--tp->timer_counter) {
11013 if (tg3_flag(tp, 5705_PLUS))
11014 tg3_periodic_fetch_stats(tp);
11016 if (tp->setlpicnt && !--tp->setlpicnt)
11017 tg3_phy_eee_enable(tp);
11019 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11023 mac_stat = tr32(MAC_STATUS);
11026 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11027 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11029 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11033 tg3_setup_phy(tp, false);
11034 } else if (tg3_flag(tp, POLL_SERDES)) {
11035 u32 mac_stat = tr32(MAC_STATUS);
11036 int need_setup = 0;
11039 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11042 if (!tp->link_up &&
11043 (mac_stat & (MAC_STATUS_PCS_SYNCED |
11044 MAC_STATUS_SIGNAL_DET))) {
11048 if (!tp->serdes_counter) {
11051 ~MAC_MODE_PORT_MODE_MASK));
11053 tw32_f(MAC_MODE, tp->mac_mode);
11056 tg3_setup_phy(tp, false);
11058 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11059 tg3_flag(tp, 5780_CLASS)) {
11060 tg3_serdes_parallel_detect(tp);
11061 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11062 u32 cpmu = tr32(TG3_CPMU_STATUS);
11063 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11064 TG3_CPMU_STATUS_LINK_MASK);
11066 if (link_up != tp->link_up)
11067 tg3_setup_phy(tp, false);
11070 tp->timer_counter = tp->timer_multiplier;
11073 /* Heartbeat is only sent once every 2 seconds.
11075 * The heartbeat is to tell the ASF firmware that the host
11076 * driver is still alive. In the event that the OS crashes,
11077 * ASF needs to reset the hardware to free up the FIFO space
11078 * that may be filled with rx packets destined for the host.
11079 * If the FIFO is full, ASF will no longer function properly.
11081 * Unintended resets have been reported on real time kernels
11082 * where the timer doesn't run on time. Netpoll will also have
11085 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11086 * to check the ring condition when the heartbeat is expiring
11087 * before doing the reset. This will prevent most unintended
11090 if (!--tp->asf_counter) {
11091 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11092 tg3_wait_for_event_ack(tp);
11094 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11095 FWCMD_NICDRV_ALIVE3);
11096 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11097 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11098 TG3_FW_UPDATE_TIMEOUT_SEC);
11100 tg3_generate_fw_event(tp);
11102 tp->asf_counter = tp->asf_multiplier;
11105 /* Update the APE heartbeat every 5 seconds.*/
11106 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11108 spin_unlock(&tp->lock);
11111 tp->timer.expires = jiffies + tp->timer_offset;
11112 add_timer(&tp->timer);
11115 static void tg3_timer_init(struct tg3 *tp)
11117 if (tg3_flag(tp, TAGGED_STATUS) &&
11118 tg3_asic_rev(tp) != ASIC_REV_5717 &&
11119 !tg3_flag(tp, 57765_CLASS))
11120 tp->timer_offset = HZ;
11122 tp->timer_offset = HZ / 10;
11124 BUG_ON(tp->timer_offset > HZ);
11126 tp->timer_multiplier = (HZ / tp->timer_offset);
11127 tp->asf_multiplier = (HZ / tp->timer_offset) *
11128 TG3_FW_UPDATE_FREQ_SEC;
11130 timer_setup(&tp->timer, tg3_timer, 0);
11133 static void tg3_timer_start(struct tg3 *tp)
11135 tp->asf_counter = tp->asf_multiplier;
11136 tp->timer_counter = tp->timer_multiplier;
11138 tp->timer.expires = jiffies + tp->timer_offset;
11139 add_timer(&tp->timer);
11142 static void tg3_timer_stop(struct tg3 *tp)
11144 del_timer_sync(&tp->timer);
11147 /* Restart hardware after configuration changes, self-test, etc.
11148 * Invoked with tp->lock held.
11150 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11151 __releases(tp->lock)
11152 __acquires(tp->lock)
11156 err = tg3_init_hw(tp, reset_phy);
11158 netdev_err(tp->dev,
11159 "Failed to re-initialize device, aborting\n");
11160 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11161 tg3_full_unlock(tp);
11162 tg3_timer_stop(tp);
11164 tg3_napi_enable(tp);
11165 dev_close(tp->dev);
11166 tg3_full_lock(tp, 0);
11171 static void tg3_reset_task(struct work_struct *work)
11173 struct tg3 *tp = container_of(work, struct tg3, reset_task);
11177 tg3_full_lock(tp, 0);
11179 if (!netif_running(tp->dev)) {
11180 tg3_flag_clear(tp, RESET_TASK_PENDING);
11181 tg3_full_unlock(tp);
11186 tg3_full_unlock(tp);
11190 tg3_netif_stop(tp);
11192 tg3_full_lock(tp, 1);
11194 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11195 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11196 tp->write32_rx_mbox = tg3_write_flush_reg32;
11197 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11198 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11201 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11202 err = tg3_init_hw(tp, true);
11204 tg3_full_unlock(tp);
11206 tg3_napi_enable(tp);
11207 /* Clear this flag so that tg3_reset_task_cancel() will not
11208 * call cancel_work_sync() and wait forever.
11210 tg3_flag_clear(tp, RESET_TASK_PENDING);
11211 dev_close(tp->dev);
11215 tg3_netif_start(tp);
11217 tg3_full_unlock(tp);
11222 tg3_flag_clear(tp, RESET_TASK_PENDING);
11227 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11230 unsigned long flags;
11232 struct tg3_napi *tnapi = &tp->napi[irq_num];
11234 if (tp->irq_cnt == 1)
11235 name = tp->dev->name;
11237 name = &tnapi->irq_lbl[0];
11238 if (tnapi->tx_buffers && tnapi->rx_rcb)
11239 snprintf(name, IFNAMSIZ,
11240 "%s-txrx-%d", tp->dev->name, irq_num);
11241 else if (tnapi->tx_buffers)
11242 snprintf(name, IFNAMSIZ,
11243 "%s-tx-%d", tp->dev->name, irq_num);
11244 else if (tnapi->rx_rcb)
11245 snprintf(name, IFNAMSIZ,
11246 "%s-rx-%d", tp->dev->name, irq_num);
11248 snprintf(name, IFNAMSIZ,
11249 "%s-%d", tp->dev->name, irq_num);
11250 name[IFNAMSIZ-1] = 0;
11253 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11255 if (tg3_flag(tp, 1SHOT_MSI))
11256 fn = tg3_msi_1shot;
11259 fn = tg3_interrupt;
11260 if (tg3_flag(tp, TAGGED_STATUS))
11261 fn = tg3_interrupt_tagged;
11262 flags = IRQF_SHARED;
11265 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11268 static int tg3_test_interrupt(struct tg3 *tp)
11270 struct tg3_napi *tnapi = &tp->napi[0];
11271 struct net_device *dev = tp->dev;
11272 int err, i, intr_ok = 0;
11275 if (!netif_running(dev))
11278 tg3_disable_ints(tp);
11280 free_irq(tnapi->irq_vec, tnapi);
11283 * Turn off MSI one shot mode. Otherwise this test has no
11284 * observable way to know whether the interrupt was delivered.
11286 if (tg3_flag(tp, 57765_PLUS)) {
11287 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11288 tw32(MSGINT_MODE, val);
11291 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11292 IRQF_SHARED, dev->name, tnapi);
11296 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11297 tg3_enable_ints(tp);
11299 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11302 for (i = 0; i < 5; i++) {
11303 u32 int_mbox, misc_host_ctrl;
11305 int_mbox = tr32_mailbox(tnapi->int_mbox);
11306 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11308 if ((int_mbox != 0) ||
11309 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11314 if (tg3_flag(tp, 57765_PLUS) &&
11315 tnapi->hw_status->status_tag != tnapi->last_tag)
11316 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11321 tg3_disable_ints(tp);
11323 free_irq(tnapi->irq_vec, tnapi);
11325 err = tg3_request_irq(tp, 0);
11331 /* Reenable MSI one shot mode. */
11332 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11333 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11334 tw32(MSGINT_MODE, val);
11342 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11343 * successfully restored
11345 static int tg3_test_msi(struct tg3 *tp)
11350 if (!tg3_flag(tp, USING_MSI))
11353 /* Turn off SERR reporting in case MSI terminates with Master
11356 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11357 pci_write_config_word(tp->pdev, PCI_COMMAND,
11358 pci_cmd & ~PCI_COMMAND_SERR);
11360 err = tg3_test_interrupt(tp);
11362 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11367 /* other failures */
11371 /* MSI test failed, go back to INTx mode */
11372 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11373 "to INTx mode. Please report this failure to the PCI "
11374 "maintainer and include system chipset information\n");
11376 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11378 pci_disable_msi(tp->pdev);
11380 tg3_flag_clear(tp, USING_MSI);
11381 tp->napi[0].irq_vec = tp->pdev->irq;
11383 err = tg3_request_irq(tp, 0);
11387 /* Need to reset the chip because the MSI cycle may have terminated
11388 * with Master Abort.
11390 tg3_full_lock(tp, 1);
11392 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11393 err = tg3_init_hw(tp, true);
11395 tg3_full_unlock(tp);
11398 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11403 static int tg3_request_firmware(struct tg3 *tp)
11405 const struct tg3_firmware_hdr *fw_hdr;
11407 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11408 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11413 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11415 /* Firmware blob starts with version numbers, followed by
11416 * start address and _full_ length including BSS sections
11417 * (which must be longer than the actual data, of course
11420 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11421 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11422 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11423 tp->fw_len, tp->fw_needed);
11424 release_firmware(tp->fw);
11429 /* We no longer need firmware; we have it. */
11430 tp->fw_needed = NULL;
11434 static u32 tg3_irq_count(struct tg3 *tp)
11436 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11439 /* We want as many rx rings enabled as there are cpus.
11440 * In multiqueue MSI-X mode, the first MSI-X vector
11441 * only deals with link interrupts, etc, so we add
11442 * one to the number of vectors we are requesting.
11444 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11450 static bool tg3_enable_msix(struct tg3 *tp)
11453 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11455 tp->txq_cnt = tp->txq_req;
11456 tp->rxq_cnt = tp->rxq_req;
11458 tp->rxq_cnt = netif_get_num_default_rss_queues();
11459 if (tp->rxq_cnt > tp->rxq_max)
11460 tp->rxq_cnt = tp->rxq_max;
11462 /* Disable multiple TX rings by default. Simple round-robin hardware
11463 * scheduling of the TX rings can cause starvation of rings with
11464 * small packets when other rings have TSO or jumbo packets.
11469 tp->irq_cnt = tg3_irq_count(tp);
11471 for (i = 0; i < tp->irq_max; i++) {
11472 msix_ent[i].entry = i;
11473 msix_ent[i].vector = 0;
11476 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11479 } else if (rc < tp->irq_cnt) {
11480 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11483 tp->rxq_cnt = max(rc - 1, 1);
11485 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11488 for (i = 0; i < tp->irq_max; i++)
11489 tp->napi[i].irq_vec = msix_ent[i].vector;
11491 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11492 pci_disable_msix(tp->pdev);
11496 if (tp->irq_cnt == 1)
11499 tg3_flag_set(tp, ENABLE_RSS);
11501 if (tp->txq_cnt > 1)
11502 tg3_flag_set(tp, ENABLE_TSS);
11504 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11509 static void tg3_ints_init(struct tg3 *tp)
11511 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11512 !tg3_flag(tp, TAGGED_STATUS)) {
11513 /* All MSI supporting chips should support tagged
11514 * status. Assert that this is the case.
11516 netdev_warn(tp->dev,
11517 "MSI without TAGGED_STATUS? Not using MSI\n");
11521 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11522 tg3_flag_set(tp, USING_MSIX);
11523 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11524 tg3_flag_set(tp, USING_MSI);
11526 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11527 u32 msi_mode = tr32(MSGINT_MODE);
11528 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11529 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11530 if (!tg3_flag(tp, 1SHOT_MSI))
11531 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11532 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11535 if (!tg3_flag(tp, USING_MSIX)) {
11537 tp->napi[0].irq_vec = tp->pdev->irq;
11540 if (tp->irq_cnt == 1) {
11543 netif_set_real_num_tx_queues(tp->dev, 1);
11544 netif_set_real_num_rx_queues(tp->dev, 1);
11548 static void tg3_ints_fini(struct tg3 *tp)
11550 if (tg3_flag(tp, USING_MSIX))
11551 pci_disable_msix(tp->pdev);
11552 else if (tg3_flag(tp, USING_MSI))
11553 pci_disable_msi(tp->pdev);
11554 tg3_flag_clear(tp, USING_MSI);
11555 tg3_flag_clear(tp, USING_MSIX);
11556 tg3_flag_clear(tp, ENABLE_RSS);
11557 tg3_flag_clear(tp, ENABLE_TSS);
11560 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11563 struct net_device *dev = tp->dev;
11567 * Setup interrupts first so we know how
11568 * many NAPI resources to allocate
11572 tg3_rss_check_indir_tbl(tp);
11574 /* The placement of this call is tied
11575 * to the setup and use of Host TX descriptors.
11577 err = tg3_alloc_consistent(tp);
11579 goto out_ints_fini;
11583 tg3_napi_enable(tp);
11585 for (i = 0; i < tp->irq_cnt; i++) {
11586 err = tg3_request_irq(tp, i);
11588 for (i--; i >= 0; i--) {
11589 struct tg3_napi *tnapi = &tp->napi[i];
11591 free_irq(tnapi->irq_vec, tnapi);
11593 goto out_napi_fini;
11597 tg3_full_lock(tp, 0);
11600 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11602 err = tg3_init_hw(tp, reset_phy);
11604 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11605 tg3_free_rings(tp);
11608 tg3_full_unlock(tp);
11613 if (test_irq && tg3_flag(tp, USING_MSI)) {
11614 err = tg3_test_msi(tp);
11617 tg3_full_lock(tp, 0);
11618 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11619 tg3_free_rings(tp);
11620 tg3_full_unlock(tp);
11622 goto out_napi_fini;
11625 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11626 u32 val = tr32(PCIE_TRANSACTION_CFG);
11628 tw32(PCIE_TRANSACTION_CFG,
11629 val | PCIE_TRANS_CFG_1SHOT_MSI);
11635 tg3_hwmon_open(tp);
11637 tg3_full_lock(tp, 0);
11639 tg3_timer_start(tp);
11640 tg3_flag_set(tp, INIT_COMPLETE);
11641 tg3_enable_ints(tp);
11643 tg3_ptp_resume(tp);
11645 tg3_full_unlock(tp);
11647 netif_tx_start_all_queues(dev);
11650 * Reset loopback feature if it was turned on while the device was down
11651 * make sure that it's installed properly now.
11653 if (dev->features & NETIF_F_LOOPBACK)
11654 tg3_set_loopback(dev, dev->features);
11659 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11660 struct tg3_napi *tnapi = &tp->napi[i];
11661 free_irq(tnapi->irq_vec, tnapi);
11665 tg3_napi_disable(tp);
11667 tg3_free_consistent(tp);
11675 static void tg3_stop(struct tg3 *tp)
11679 tg3_reset_task_cancel(tp);
11680 tg3_netif_stop(tp);
11682 tg3_timer_stop(tp);
11684 tg3_hwmon_close(tp);
11688 tg3_full_lock(tp, 1);
11690 tg3_disable_ints(tp);
11692 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11693 tg3_free_rings(tp);
11694 tg3_flag_clear(tp, INIT_COMPLETE);
11696 tg3_full_unlock(tp);
11698 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11699 struct tg3_napi *tnapi = &tp->napi[i];
11700 free_irq(tnapi->irq_vec, tnapi);
11707 tg3_free_consistent(tp);
11710 static int tg3_open(struct net_device *dev)
11712 struct tg3 *tp = netdev_priv(dev);
11715 if (tp->pcierr_recovery) {
11716 netdev_err(dev, "Failed to open device. PCI error recovery "
11721 if (tp->fw_needed) {
11722 err = tg3_request_firmware(tp);
11723 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11725 netdev_warn(tp->dev, "EEE capability disabled\n");
11726 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11727 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11728 netdev_warn(tp->dev, "EEE capability restored\n");
11729 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11731 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11735 netdev_warn(tp->dev, "TSO capability disabled\n");
11736 tg3_flag_clear(tp, TSO_CAPABLE);
11737 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11738 netdev_notice(tp->dev, "TSO capability restored\n");
11739 tg3_flag_set(tp, TSO_CAPABLE);
11743 tg3_carrier_off(tp);
11745 err = tg3_power_up(tp);
11749 tg3_full_lock(tp, 0);
11751 tg3_disable_ints(tp);
11752 tg3_flag_clear(tp, INIT_COMPLETE);
11754 tg3_full_unlock(tp);
11756 err = tg3_start(tp,
11757 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11760 tg3_frob_aux_power(tp, false);
11761 pci_set_power_state(tp->pdev, PCI_D3hot);
11767 static int tg3_close(struct net_device *dev)
11769 struct tg3 *tp = netdev_priv(dev);
11771 if (tp->pcierr_recovery) {
11772 netdev_err(dev, "Failed to close device. PCI error recovery "
11779 if (pci_device_is_present(tp->pdev)) {
11780 tg3_power_down_prepare(tp);
11782 tg3_carrier_off(tp);
11787 static inline u64 get_stat64(tg3_stat64_t *val)
11789 return ((u64)val->high << 32) | ((u64)val->low);
11792 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11794 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11796 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11797 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11798 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11801 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11802 tg3_writephy(tp, MII_TG3_TEST1,
11803 val | MII_TG3_TEST1_CRC_EN);
11804 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11808 tp->phy_crc_errors += val;
11810 return tp->phy_crc_errors;
11813 return get_stat64(&hw_stats->rx_fcs_errors);
11816 #define ESTAT_ADD(member) \
11817 estats->member = old_estats->member + \
11818 get_stat64(&hw_stats->member)
11820 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11822 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11823 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11825 ESTAT_ADD(rx_octets);
11826 ESTAT_ADD(rx_fragments);
11827 ESTAT_ADD(rx_ucast_packets);
11828 ESTAT_ADD(rx_mcast_packets);
11829 ESTAT_ADD(rx_bcast_packets);
11830 ESTAT_ADD(rx_fcs_errors);
11831 ESTAT_ADD(rx_align_errors);
11832 ESTAT_ADD(rx_xon_pause_rcvd);
11833 ESTAT_ADD(rx_xoff_pause_rcvd);
11834 ESTAT_ADD(rx_mac_ctrl_rcvd);
11835 ESTAT_ADD(rx_xoff_entered);
11836 ESTAT_ADD(rx_frame_too_long_errors);
11837 ESTAT_ADD(rx_jabbers);
11838 ESTAT_ADD(rx_undersize_packets);
11839 ESTAT_ADD(rx_in_length_errors);
11840 ESTAT_ADD(rx_out_length_errors);
11841 ESTAT_ADD(rx_64_or_less_octet_packets);
11842 ESTAT_ADD(rx_65_to_127_octet_packets);
11843 ESTAT_ADD(rx_128_to_255_octet_packets);
11844 ESTAT_ADD(rx_256_to_511_octet_packets);
11845 ESTAT_ADD(rx_512_to_1023_octet_packets);
11846 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11847 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11848 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11849 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11850 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11852 ESTAT_ADD(tx_octets);
11853 ESTAT_ADD(tx_collisions);
11854 ESTAT_ADD(tx_xon_sent);
11855 ESTAT_ADD(tx_xoff_sent);
11856 ESTAT_ADD(tx_flow_control);
11857 ESTAT_ADD(tx_mac_errors);
11858 ESTAT_ADD(tx_single_collisions);
11859 ESTAT_ADD(tx_mult_collisions);
11860 ESTAT_ADD(tx_deferred);
11861 ESTAT_ADD(tx_excessive_collisions);
11862 ESTAT_ADD(tx_late_collisions);
11863 ESTAT_ADD(tx_collide_2times);
11864 ESTAT_ADD(tx_collide_3times);
11865 ESTAT_ADD(tx_collide_4times);
11866 ESTAT_ADD(tx_collide_5times);
11867 ESTAT_ADD(tx_collide_6times);
11868 ESTAT_ADD(tx_collide_7times);
11869 ESTAT_ADD(tx_collide_8times);
11870 ESTAT_ADD(tx_collide_9times);
11871 ESTAT_ADD(tx_collide_10times);
11872 ESTAT_ADD(tx_collide_11times);
11873 ESTAT_ADD(tx_collide_12times);
11874 ESTAT_ADD(tx_collide_13times);
11875 ESTAT_ADD(tx_collide_14times);
11876 ESTAT_ADD(tx_collide_15times);
11877 ESTAT_ADD(tx_ucast_packets);
11878 ESTAT_ADD(tx_mcast_packets);
11879 ESTAT_ADD(tx_bcast_packets);
11880 ESTAT_ADD(tx_carrier_sense_errors);
11881 ESTAT_ADD(tx_discards);
11882 ESTAT_ADD(tx_errors);
11884 ESTAT_ADD(dma_writeq_full);
11885 ESTAT_ADD(dma_write_prioq_full);
11886 ESTAT_ADD(rxbds_empty);
11887 ESTAT_ADD(rx_discards);
11888 ESTAT_ADD(rx_errors);
11889 ESTAT_ADD(rx_threshold_hit);
11891 ESTAT_ADD(dma_readq_full);
11892 ESTAT_ADD(dma_read_prioq_full);
11893 ESTAT_ADD(tx_comp_queue_full);
11895 ESTAT_ADD(ring_set_send_prod_index);
11896 ESTAT_ADD(ring_status_update);
11897 ESTAT_ADD(nic_irqs);
11898 ESTAT_ADD(nic_avoided_irqs);
11899 ESTAT_ADD(nic_tx_threshold_hit);
11901 ESTAT_ADD(mbuf_lwm_thresh_hit);
11904 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11906 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11907 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11909 stats->rx_packets = old_stats->rx_packets +
11910 get_stat64(&hw_stats->rx_ucast_packets) +
11911 get_stat64(&hw_stats->rx_mcast_packets) +
11912 get_stat64(&hw_stats->rx_bcast_packets);
11914 stats->tx_packets = old_stats->tx_packets +
11915 get_stat64(&hw_stats->tx_ucast_packets) +
11916 get_stat64(&hw_stats->tx_mcast_packets) +
11917 get_stat64(&hw_stats->tx_bcast_packets);
11919 stats->rx_bytes = old_stats->rx_bytes +
11920 get_stat64(&hw_stats->rx_octets);
11921 stats->tx_bytes = old_stats->tx_bytes +
11922 get_stat64(&hw_stats->tx_octets);
11924 stats->rx_errors = old_stats->rx_errors +
11925 get_stat64(&hw_stats->rx_errors);
11926 stats->tx_errors = old_stats->tx_errors +
11927 get_stat64(&hw_stats->tx_errors) +
11928 get_stat64(&hw_stats->tx_mac_errors) +
11929 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11930 get_stat64(&hw_stats->tx_discards);
11932 stats->multicast = old_stats->multicast +
11933 get_stat64(&hw_stats->rx_mcast_packets);
11934 stats->collisions = old_stats->collisions +
11935 get_stat64(&hw_stats->tx_collisions);
11937 stats->rx_length_errors = old_stats->rx_length_errors +
11938 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11939 get_stat64(&hw_stats->rx_undersize_packets);
11941 stats->rx_frame_errors = old_stats->rx_frame_errors +
11942 get_stat64(&hw_stats->rx_align_errors);
11943 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11944 get_stat64(&hw_stats->tx_discards);
11945 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11946 get_stat64(&hw_stats->tx_carrier_sense_errors);
11948 stats->rx_crc_errors = old_stats->rx_crc_errors +
11949 tg3_calc_crc_errors(tp);
11951 stats->rx_missed_errors = old_stats->rx_missed_errors +
11952 get_stat64(&hw_stats->rx_discards);
11954 stats->rx_dropped = tp->rx_dropped;
11955 stats->tx_dropped = tp->tx_dropped;
11958 static int tg3_get_regs_len(struct net_device *dev)
11960 return TG3_REG_BLK_SIZE;
11963 static void tg3_get_regs(struct net_device *dev,
11964 struct ethtool_regs *regs, void *_p)
11966 struct tg3 *tp = netdev_priv(dev);
11970 memset(_p, 0, TG3_REG_BLK_SIZE);
11972 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11975 tg3_full_lock(tp, 0);
11977 tg3_dump_legacy_regs(tp, (u32 *)_p);
11979 tg3_full_unlock(tp);
11982 static int tg3_get_eeprom_len(struct net_device *dev)
11984 struct tg3 *tp = netdev_priv(dev);
11986 return tp->nvram_size;
11989 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11991 struct tg3 *tp = netdev_priv(dev);
11992 int ret, cpmu_restore = 0;
11994 u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11997 if (tg3_flag(tp, NO_NVRAM))
12000 offset = eeprom->offset;
12004 eeprom->magic = TG3_EEPROM_MAGIC;
12006 /* Override clock, link aware and link idle modes */
12007 if (tg3_flag(tp, CPMU_PRESENT)) {
12008 cpmu_val = tr32(TG3_CPMU_CTRL);
12009 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12010 CPMU_CTRL_LINK_IDLE_MODE)) {
12011 tw32(TG3_CPMU_CTRL, cpmu_val &
12012 ~(CPMU_CTRL_LINK_AWARE_MODE |
12013 CPMU_CTRL_LINK_IDLE_MODE));
12017 tg3_override_clk(tp);
12020 /* adjustments to start on required 4 byte boundary */
12021 b_offset = offset & 3;
12022 b_count = 4 - b_offset;
12023 if (b_count > len) {
12024 /* i.e. offset=1 len=2 */
12027 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12030 memcpy(data, ((char *)&val) + b_offset, b_count);
12033 eeprom->len += b_count;
12036 /* read bytes up to the last 4 byte boundary */
12037 pd = &data[eeprom->len];
12038 for (i = 0; i < (len - (len & 3)); i += 4) {
12039 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12046 memcpy(pd + i, &val, 4);
12047 if (need_resched()) {
12048 if (signal_pending(current)) {
12059 /* read last bytes not ending on 4 byte boundary */
12060 pd = &data[eeprom->len];
12062 b_offset = offset + len - b_count;
12063 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12066 memcpy(pd, &val, b_count);
12067 eeprom->len += b_count;
12072 /* Restore clock, link aware and link idle modes */
12073 tg3_restore_clk(tp);
12075 tw32(TG3_CPMU_CTRL, cpmu_val);
12080 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12082 struct tg3 *tp = netdev_priv(dev);
12084 u32 offset, len, b_offset, odd_len;
12086 __be32 start = 0, end;
12088 if (tg3_flag(tp, NO_NVRAM) ||
12089 eeprom->magic != TG3_EEPROM_MAGIC)
12092 offset = eeprom->offset;
12095 if ((b_offset = (offset & 3))) {
12096 /* adjustments to start on required 4 byte boundary */
12097 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12108 /* adjustments to end on required 4 byte boundary */
12110 len = (len + 3) & ~3;
12111 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12117 if (b_offset || odd_len) {
12118 buf = kmalloc(len, GFP_KERNEL);
12122 memcpy(buf, &start, 4);
12124 memcpy(buf+len-4, &end, 4);
12125 memcpy(buf + b_offset, data, eeprom->len);
12128 ret = tg3_nvram_write_block(tp, offset, len, buf);
12136 static int tg3_get_link_ksettings(struct net_device *dev,
12137 struct ethtool_link_ksettings *cmd)
12139 struct tg3 *tp = netdev_priv(dev);
12140 u32 supported, advertising;
12142 if (tg3_flag(tp, USE_PHYLIB)) {
12143 struct phy_device *phydev;
12144 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12146 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12147 phy_ethtool_ksettings_get(phydev, cmd);
12152 supported = (SUPPORTED_Autoneg);
12154 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12155 supported |= (SUPPORTED_1000baseT_Half |
12156 SUPPORTED_1000baseT_Full);
12158 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12159 supported |= (SUPPORTED_100baseT_Half |
12160 SUPPORTED_100baseT_Full |
12161 SUPPORTED_10baseT_Half |
12162 SUPPORTED_10baseT_Full |
12164 cmd->base.port = PORT_TP;
12166 supported |= SUPPORTED_FIBRE;
12167 cmd->base.port = PORT_FIBRE;
12169 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12172 advertising = tp->link_config.advertising;
12173 if (tg3_flag(tp, PAUSE_AUTONEG)) {
12174 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12175 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12176 advertising |= ADVERTISED_Pause;
12178 advertising |= ADVERTISED_Pause |
12179 ADVERTISED_Asym_Pause;
12181 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12182 advertising |= ADVERTISED_Asym_Pause;
12185 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12188 if (netif_running(dev) && tp->link_up) {
12189 cmd->base.speed = tp->link_config.active_speed;
12190 cmd->base.duplex = tp->link_config.active_duplex;
12191 ethtool_convert_legacy_u32_to_link_mode(
12192 cmd->link_modes.lp_advertising,
12193 tp->link_config.rmt_adv);
12195 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12196 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12197 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12199 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12202 cmd->base.speed = SPEED_UNKNOWN;
12203 cmd->base.duplex = DUPLEX_UNKNOWN;
12204 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12206 cmd->base.phy_address = tp->phy_addr;
12207 cmd->base.autoneg = tp->link_config.autoneg;
12211 static int tg3_set_link_ksettings(struct net_device *dev,
12212 const struct ethtool_link_ksettings *cmd)
12214 struct tg3 *tp = netdev_priv(dev);
12215 u32 speed = cmd->base.speed;
12218 if (tg3_flag(tp, USE_PHYLIB)) {
12219 struct phy_device *phydev;
12220 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12222 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12223 return phy_ethtool_ksettings_set(phydev, cmd);
12226 if (cmd->base.autoneg != AUTONEG_ENABLE &&
12227 cmd->base.autoneg != AUTONEG_DISABLE)
12230 if (cmd->base.autoneg == AUTONEG_DISABLE &&
12231 cmd->base.duplex != DUPLEX_FULL &&
12232 cmd->base.duplex != DUPLEX_HALF)
12235 ethtool_convert_link_mode_to_legacy_u32(&advertising,
12236 cmd->link_modes.advertising);
12238 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12239 u32 mask = ADVERTISED_Autoneg |
12241 ADVERTISED_Asym_Pause;
12243 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12244 mask |= ADVERTISED_1000baseT_Half |
12245 ADVERTISED_1000baseT_Full;
12247 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12248 mask |= ADVERTISED_100baseT_Half |
12249 ADVERTISED_100baseT_Full |
12250 ADVERTISED_10baseT_Half |
12251 ADVERTISED_10baseT_Full |
12254 mask |= ADVERTISED_FIBRE;
12256 if (advertising & ~mask)
12259 mask &= (ADVERTISED_1000baseT_Half |
12260 ADVERTISED_1000baseT_Full |
12261 ADVERTISED_100baseT_Half |
12262 ADVERTISED_100baseT_Full |
12263 ADVERTISED_10baseT_Half |
12264 ADVERTISED_10baseT_Full);
12266 advertising &= mask;
12268 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12269 if (speed != SPEED_1000)
12272 if (cmd->base.duplex != DUPLEX_FULL)
12275 if (speed != SPEED_100 &&
12281 tg3_full_lock(tp, 0);
12283 tp->link_config.autoneg = cmd->base.autoneg;
12284 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12285 tp->link_config.advertising = (advertising |
12286 ADVERTISED_Autoneg);
12287 tp->link_config.speed = SPEED_UNKNOWN;
12288 tp->link_config.duplex = DUPLEX_UNKNOWN;
12290 tp->link_config.advertising = 0;
12291 tp->link_config.speed = speed;
12292 tp->link_config.duplex = cmd->base.duplex;
12295 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12297 tg3_warn_mgmt_link_flap(tp);
12299 if (netif_running(dev))
12300 tg3_setup_phy(tp, true);
12302 tg3_full_unlock(tp);
12307 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12309 struct tg3 *tp = netdev_priv(dev);
12311 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12312 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12313 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12316 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12318 struct tg3 *tp = netdev_priv(dev);
12320 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12321 wol->supported = WAKE_MAGIC;
12323 wol->supported = 0;
12325 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12326 wol->wolopts = WAKE_MAGIC;
12327 memset(&wol->sopass, 0, sizeof(wol->sopass));
12330 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12332 struct tg3 *tp = netdev_priv(dev);
12333 struct device *dp = &tp->pdev->dev;
12335 if (wol->wolopts & ~WAKE_MAGIC)
12337 if ((wol->wolopts & WAKE_MAGIC) &&
12338 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12341 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12343 if (device_may_wakeup(dp))
12344 tg3_flag_set(tp, WOL_ENABLE);
12346 tg3_flag_clear(tp, WOL_ENABLE);
12351 static u32 tg3_get_msglevel(struct net_device *dev)
12353 struct tg3 *tp = netdev_priv(dev);
12354 return tp->msg_enable;
12357 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12359 struct tg3 *tp = netdev_priv(dev);
12360 tp->msg_enable = value;
12363 static int tg3_nway_reset(struct net_device *dev)
12365 struct tg3 *tp = netdev_priv(dev);
12368 if (!netif_running(dev))
12371 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12374 tg3_warn_mgmt_link_flap(tp);
12376 if (tg3_flag(tp, USE_PHYLIB)) {
12377 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12379 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12383 spin_lock_bh(&tp->lock);
12385 tg3_readphy(tp, MII_BMCR, &bmcr);
12386 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12387 ((bmcr & BMCR_ANENABLE) ||
12388 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12389 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12393 spin_unlock_bh(&tp->lock);
12399 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12401 struct tg3 *tp = netdev_priv(dev);
12403 ering->rx_max_pending = tp->rx_std_ring_mask;
12404 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12405 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12407 ering->rx_jumbo_max_pending = 0;
12409 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12411 ering->rx_pending = tp->rx_pending;
12412 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12413 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12415 ering->rx_jumbo_pending = 0;
12417 ering->tx_pending = tp->napi[0].tx_pending;
12420 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12422 struct tg3 *tp = netdev_priv(dev);
12423 int i, irq_sync = 0, err = 0;
12424 bool reset_phy = false;
12426 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12427 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12428 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12429 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12430 (tg3_flag(tp, TSO_BUG) &&
12431 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12434 if (netif_running(dev)) {
12436 tg3_netif_stop(tp);
12440 tg3_full_lock(tp, irq_sync);
12442 tp->rx_pending = ering->rx_pending;
12444 if (tg3_flag(tp, MAX_RXPEND_64) &&
12445 tp->rx_pending > 63)
12446 tp->rx_pending = 63;
12448 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12449 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12451 for (i = 0; i < tp->irq_max; i++)
12452 tp->napi[i].tx_pending = ering->tx_pending;
12454 if (netif_running(dev)) {
12455 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12456 /* Reset PHY to avoid PHY lock up */
12457 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12458 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12459 tg3_asic_rev(tp) == ASIC_REV_5720)
12462 err = tg3_restart_hw(tp, reset_phy);
12464 tg3_netif_start(tp);
12467 tg3_full_unlock(tp);
12469 if (irq_sync && !err)
12475 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12477 struct tg3 *tp = netdev_priv(dev);
12479 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12481 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12482 epause->rx_pause = 1;
12484 epause->rx_pause = 0;
12486 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12487 epause->tx_pause = 1;
12489 epause->tx_pause = 0;
12492 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12494 struct tg3 *tp = netdev_priv(dev);
12496 bool reset_phy = false;
12498 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12499 tg3_warn_mgmt_link_flap(tp);
12501 if (tg3_flag(tp, USE_PHYLIB)) {
12502 struct phy_device *phydev;
12504 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12506 if (!phy_validate_pause(phydev, epause))
12509 tp->link_config.flowctrl = 0;
12510 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12511 if (epause->rx_pause) {
12512 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12514 if (epause->tx_pause) {
12515 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12517 } else if (epause->tx_pause) {
12518 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12521 if (epause->autoneg)
12522 tg3_flag_set(tp, PAUSE_AUTONEG);
12524 tg3_flag_clear(tp, PAUSE_AUTONEG);
12526 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12527 if (phydev->autoneg) {
12528 /* phy_set_asym_pause() will
12529 * renegotiate the link to inform our
12530 * link partner of our flow control
12531 * settings, even if the flow control
12532 * is forced. Let tg3_adjust_link()
12533 * do the final flow control setup.
12538 if (!epause->autoneg)
12539 tg3_setup_flow_control(tp, 0, 0);
12544 if (netif_running(dev)) {
12545 tg3_netif_stop(tp);
12549 tg3_full_lock(tp, irq_sync);
12551 if (epause->autoneg)
12552 tg3_flag_set(tp, PAUSE_AUTONEG);
12554 tg3_flag_clear(tp, PAUSE_AUTONEG);
12555 if (epause->rx_pause)
12556 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12558 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12559 if (epause->tx_pause)
12560 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12562 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12564 if (netif_running(dev)) {
12565 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12566 /* Reset PHY to avoid PHY lock up */
12567 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12568 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12569 tg3_asic_rev(tp) == ASIC_REV_5720)
12572 err = tg3_restart_hw(tp, reset_phy);
12574 tg3_netif_start(tp);
12577 tg3_full_unlock(tp);
12580 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12585 static int tg3_get_sset_count(struct net_device *dev, int sset)
12589 return TG3_NUM_TEST;
12591 return TG3_NUM_STATS;
12593 return -EOPNOTSUPP;
12597 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12598 u32 *rules __always_unused)
12600 struct tg3 *tp = netdev_priv(dev);
12602 if (!tg3_flag(tp, SUPPORT_MSIX))
12603 return -EOPNOTSUPP;
12605 switch (info->cmd) {
12606 case ETHTOOL_GRXRINGS:
12607 if (netif_running(tp->dev))
12608 info->data = tp->rxq_cnt;
12610 info->data = num_online_cpus();
12611 if (info->data > TG3_RSS_MAX_NUM_QS)
12612 info->data = TG3_RSS_MAX_NUM_QS;
12618 return -EOPNOTSUPP;
12622 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12625 struct tg3 *tp = netdev_priv(dev);
12627 if (tg3_flag(tp, SUPPORT_MSIX))
12628 size = TG3_RSS_INDIR_TBL_SIZE;
12633 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12635 struct tg3 *tp = netdev_priv(dev);
12639 *hfunc = ETH_RSS_HASH_TOP;
12643 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12644 indir[i] = tp->rss_ind_tbl[i];
12649 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12652 struct tg3 *tp = netdev_priv(dev);
12655 /* We require at least one supported parameter to be changed and no
12656 * change in any of the unsupported parameters
12659 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12660 return -EOPNOTSUPP;
12665 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12666 tp->rss_ind_tbl[i] = indir[i];
12668 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12671 /* It is legal to write the indirection
12672 * table while the device is running.
12674 tg3_full_lock(tp, 0);
12675 tg3_rss_write_indir_tbl(tp);
12676 tg3_full_unlock(tp);
12681 static void tg3_get_channels(struct net_device *dev,
12682 struct ethtool_channels *channel)
12684 struct tg3 *tp = netdev_priv(dev);
12685 u32 deflt_qs = netif_get_num_default_rss_queues();
12687 channel->max_rx = tp->rxq_max;
12688 channel->max_tx = tp->txq_max;
12690 if (netif_running(dev)) {
12691 channel->rx_count = tp->rxq_cnt;
12692 channel->tx_count = tp->txq_cnt;
12695 channel->rx_count = tp->rxq_req;
12697 channel->rx_count = min(deflt_qs, tp->rxq_max);
12700 channel->tx_count = tp->txq_req;
12702 channel->tx_count = min(deflt_qs, tp->txq_max);
12706 static int tg3_set_channels(struct net_device *dev,
12707 struct ethtool_channels *channel)
12709 struct tg3 *tp = netdev_priv(dev);
12711 if (!tg3_flag(tp, SUPPORT_MSIX))
12712 return -EOPNOTSUPP;
12714 if (channel->rx_count > tp->rxq_max ||
12715 channel->tx_count > tp->txq_max)
12718 tp->rxq_req = channel->rx_count;
12719 tp->txq_req = channel->tx_count;
12721 if (!netif_running(dev))
12726 tg3_carrier_off(tp);
12728 tg3_start(tp, true, false, false);
12733 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12735 switch (stringset) {
12737 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12740 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12743 WARN_ON(1); /* we need a WARN() */
12748 static int tg3_set_phys_id(struct net_device *dev,
12749 enum ethtool_phys_id_state state)
12751 struct tg3 *tp = netdev_priv(dev);
12754 case ETHTOOL_ID_ACTIVE:
12755 return 1; /* cycle on/off once per second */
12757 case ETHTOOL_ID_ON:
12758 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12759 LED_CTRL_1000MBPS_ON |
12760 LED_CTRL_100MBPS_ON |
12761 LED_CTRL_10MBPS_ON |
12762 LED_CTRL_TRAFFIC_OVERRIDE |
12763 LED_CTRL_TRAFFIC_BLINK |
12764 LED_CTRL_TRAFFIC_LED);
12767 case ETHTOOL_ID_OFF:
12768 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12769 LED_CTRL_TRAFFIC_OVERRIDE);
12772 case ETHTOOL_ID_INACTIVE:
12773 tw32(MAC_LED_CTRL, tp->led_ctrl);
12780 static void tg3_get_ethtool_stats(struct net_device *dev,
12781 struct ethtool_stats *estats, u64 *tmp_stats)
12783 struct tg3 *tp = netdev_priv(dev);
12786 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12788 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12791 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen)
12795 u32 offset = 0, len = 0;
12798 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12801 if (magic == TG3_EEPROM_MAGIC) {
12802 for (offset = TG3_NVM_DIR_START;
12803 offset < TG3_NVM_DIR_END;
12804 offset += TG3_NVM_DIRENT_SIZE) {
12805 if (tg3_nvram_read(tp, offset, &val))
12808 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12809 TG3_NVM_DIRTYPE_EXTVPD)
12813 if (offset != TG3_NVM_DIR_END) {
12814 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12815 if (tg3_nvram_read(tp, offset + 4, &offset))
12818 offset = tg3_nvram_logical_addr(tp, offset);
12821 if (!offset || !len) {
12822 offset = TG3_NVM_VPD_OFF;
12823 len = TG3_NVM_VPD_LEN;
12826 buf = kmalloc(len, GFP_KERNEL);
12830 for (i = 0; i < len; i += 4) {
12831 /* The data is in little-endian format in NVRAM.
12832 * Use the big-endian read routines to preserve
12833 * the byte order as it exists in NVRAM.
12835 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12840 buf = pci_vpd_alloc(tp->pdev, vpdlen);
12852 #define NVRAM_TEST_SIZE 0x100
12853 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12854 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12855 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12856 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12857 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12858 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12859 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12860 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12862 static int tg3_test_nvram(struct tg3 *tp)
12866 int i, j, k, err = 0, size;
12869 if (tg3_flag(tp, NO_NVRAM))
12872 if (tg3_nvram_read(tp, 0, &magic) != 0)
12875 if (magic == TG3_EEPROM_MAGIC)
12876 size = NVRAM_TEST_SIZE;
12877 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12878 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12879 TG3_EEPROM_SB_FORMAT_1) {
12880 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12881 case TG3_EEPROM_SB_REVISION_0:
12882 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12884 case TG3_EEPROM_SB_REVISION_2:
12885 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12887 case TG3_EEPROM_SB_REVISION_3:
12888 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12890 case TG3_EEPROM_SB_REVISION_4:
12891 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12893 case TG3_EEPROM_SB_REVISION_5:
12894 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12896 case TG3_EEPROM_SB_REVISION_6:
12897 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12904 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12905 size = NVRAM_SELFBOOT_HW_SIZE;
12909 buf = kmalloc(size, GFP_KERNEL);
12914 for (i = 0, j = 0; i < size; i += 4, j++) {
12915 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12922 /* Selfboot format */
12923 magic = be32_to_cpu(buf[0]);
12924 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12925 TG3_EEPROM_MAGIC_FW) {
12926 u8 *buf8 = (u8 *) buf, csum8 = 0;
12928 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12929 TG3_EEPROM_SB_REVISION_2) {
12930 /* For rev 2, the csum doesn't include the MBA. */
12931 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12933 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12936 for (i = 0; i < size; i++)
12949 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12950 TG3_EEPROM_MAGIC_HW) {
12951 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12952 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12953 u8 *buf8 = (u8 *) buf;
12955 /* Separate the parity bits and the data bytes. */
12956 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12957 if ((i == 0) || (i == 8)) {
12961 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12962 parity[k++] = buf8[i] & msk;
12964 } else if (i == 16) {
12968 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12969 parity[k++] = buf8[i] & msk;
12972 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12973 parity[k++] = buf8[i] & msk;
12976 data[j++] = buf8[i];
12980 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12981 u8 hw8 = hweight8(data[i]);
12983 if ((hw8 & 0x1) && parity[i])
12985 else if (!(hw8 & 0x1) && !parity[i])
12994 /* Bootstrap checksum at offset 0x10 */
12995 csum = calc_crc((unsigned char *) buf, 0x10);
12996 if (csum != le32_to_cpu(buf[0x10/4]))
12999 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13000 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13001 if (csum != le32_to_cpu(buf[0xfc/4]))
13006 buf = tg3_vpd_readblock(tp, &len);
13010 err = pci_vpd_check_csum(buf, len);
13011 /* go on if no checksum found */
13019 #define TG3_SERDES_TIMEOUT_SEC 2
13020 #define TG3_COPPER_TIMEOUT_SEC 6
13022 static int tg3_test_link(struct tg3 *tp)
13026 if (!netif_running(tp->dev))
13029 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13030 max = TG3_SERDES_TIMEOUT_SEC;
13032 max = TG3_COPPER_TIMEOUT_SEC;
13034 for (i = 0; i < max; i++) {
13038 if (msleep_interruptible(1000))
13045 /* Only test the commonly used registers */
13046 static int tg3_test_registers(struct tg3 *tp)
13048 int i, is_5705, is_5750;
13049 u32 offset, read_mask, write_mask, val, save_val, read_val;
13053 #define TG3_FL_5705 0x1
13054 #define TG3_FL_NOT_5705 0x2
13055 #define TG3_FL_NOT_5788 0x4
13056 #define TG3_FL_NOT_5750 0x8
13060 /* MAC Control Registers */
13061 { MAC_MODE, TG3_FL_NOT_5705,
13062 0x00000000, 0x00ef6f8c },
13063 { MAC_MODE, TG3_FL_5705,
13064 0x00000000, 0x01ef6b8c },
13065 { MAC_STATUS, TG3_FL_NOT_5705,
13066 0x03800107, 0x00000000 },
13067 { MAC_STATUS, TG3_FL_5705,
13068 0x03800100, 0x00000000 },
13069 { MAC_ADDR_0_HIGH, 0x0000,
13070 0x00000000, 0x0000ffff },
13071 { MAC_ADDR_0_LOW, 0x0000,
13072 0x00000000, 0xffffffff },
13073 { MAC_RX_MTU_SIZE, 0x0000,
13074 0x00000000, 0x0000ffff },
13075 { MAC_TX_MODE, 0x0000,
13076 0x00000000, 0x00000070 },
13077 { MAC_TX_LENGTHS, 0x0000,
13078 0x00000000, 0x00003fff },
13079 { MAC_RX_MODE, TG3_FL_NOT_5705,
13080 0x00000000, 0x000007fc },
13081 { MAC_RX_MODE, TG3_FL_5705,
13082 0x00000000, 0x000007dc },
13083 { MAC_HASH_REG_0, 0x0000,
13084 0x00000000, 0xffffffff },
13085 { MAC_HASH_REG_1, 0x0000,
13086 0x00000000, 0xffffffff },
13087 { MAC_HASH_REG_2, 0x0000,
13088 0x00000000, 0xffffffff },
13089 { MAC_HASH_REG_3, 0x0000,
13090 0x00000000, 0xffffffff },
13092 /* Receive Data and Receive BD Initiator Control Registers. */
13093 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13094 0x00000000, 0xffffffff },
13095 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13096 0x00000000, 0xffffffff },
13097 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13098 0x00000000, 0x00000003 },
13099 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13100 0x00000000, 0xffffffff },
13101 { RCVDBDI_STD_BD+0, 0x0000,
13102 0x00000000, 0xffffffff },
13103 { RCVDBDI_STD_BD+4, 0x0000,
13104 0x00000000, 0xffffffff },
13105 { RCVDBDI_STD_BD+8, 0x0000,
13106 0x00000000, 0xffff0002 },
13107 { RCVDBDI_STD_BD+0xc, 0x0000,
13108 0x00000000, 0xffffffff },
13110 /* Receive BD Initiator Control Registers. */
13111 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13112 0x00000000, 0xffffffff },
13113 { RCVBDI_STD_THRESH, TG3_FL_5705,
13114 0x00000000, 0x000003ff },
13115 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13116 0x00000000, 0xffffffff },
13118 /* Host Coalescing Control Registers. */
13119 { HOSTCC_MODE, TG3_FL_NOT_5705,
13120 0x00000000, 0x00000004 },
13121 { HOSTCC_MODE, TG3_FL_5705,
13122 0x00000000, 0x000000f6 },
13123 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13124 0x00000000, 0xffffffff },
13125 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13126 0x00000000, 0x000003ff },
13127 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13128 0x00000000, 0xffffffff },
13129 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13130 0x00000000, 0x000003ff },
13131 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13132 0x00000000, 0xffffffff },
13133 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13134 0x00000000, 0x000000ff },
13135 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13136 0x00000000, 0xffffffff },
13137 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13138 0x00000000, 0x000000ff },
13139 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13140 0x00000000, 0xffffffff },
13141 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13142 0x00000000, 0xffffffff },
13143 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13144 0x00000000, 0xffffffff },
13145 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13146 0x00000000, 0x000000ff },
13147 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13148 0x00000000, 0xffffffff },
13149 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13150 0x00000000, 0x000000ff },
13151 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13152 0x00000000, 0xffffffff },
13153 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13154 0x00000000, 0xffffffff },
13155 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13156 0x00000000, 0xffffffff },
13157 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13158 0x00000000, 0xffffffff },
13159 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13160 0x00000000, 0xffffffff },
13161 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13162 0xffffffff, 0x00000000 },
13163 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13164 0xffffffff, 0x00000000 },
13166 /* Buffer Manager Control Registers. */
13167 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13168 0x00000000, 0x007fff80 },
13169 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13170 0x00000000, 0x007fffff },
13171 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13172 0x00000000, 0x0000003f },
13173 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13174 0x00000000, 0x000001ff },
13175 { BUFMGR_MB_HIGH_WATER, 0x0000,
13176 0x00000000, 0x000001ff },
13177 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13178 0xffffffff, 0x00000000 },
13179 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13180 0xffffffff, 0x00000000 },
13182 /* Mailbox Registers */
13183 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13184 0x00000000, 0x000001ff },
13185 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13186 0x00000000, 0x000001ff },
13187 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13188 0x00000000, 0x000007ff },
13189 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13190 0x00000000, 0x000001ff },
13192 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13195 is_5705 = is_5750 = 0;
13196 if (tg3_flag(tp, 5705_PLUS)) {
13198 if (tg3_flag(tp, 5750_PLUS))
13202 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13203 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13206 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13209 if (tg3_flag(tp, IS_5788) &&
13210 (reg_tbl[i].flags & TG3_FL_NOT_5788))
13213 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13216 offset = (u32) reg_tbl[i].offset;
13217 read_mask = reg_tbl[i].read_mask;
13218 write_mask = reg_tbl[i].write_mask;
13220 /* Save the original register content */
13221 save_val = tr32(offset);
13223 /* Determine the read-only value. */
13224 read_val = save_val & read_mask;
13226 /* Write zero to the register, then make sure the read-only bits
13227 * are not changed and the read/write bits are all zeros.
13231 val = tr32(offset);
13233 /* Test the read-only and read/write bits. */
13234 if (((val & read_mask) != read_val) || (val & write_mask))
13237 /* Write ones to all the bits defined by RdMask and WrMask, then
13238 * make sure the read-only bits are not changed and the
13239 * read/write bits are all ones.
13241 tw32(offset, read_mask | write_mask);
13243 val = tr32(offset);
13245 /* Test the read-only bits. */
13246 if ((val & read_mask) != read_val)
13249 /* Test the read/write bits. */
13250 if ((val & write_mask) != write_mask)
13253 tw32(offset, save_val);
13259 if (netif_msg_hw(tp))
13260 netdev_err(tp->dev,
13261 "Register test failed at offset %x\n", offset);
13262 tw32(offset, save_val);
13266 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13268 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13272 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13273 for (j = 0; j < len; j += 4) {
13276 tg3_write_mem(tp, offset + j, test_pattern[i]);
13277 tg3_read_mem(tp, offset + j, &val);
13278 if (val != test_pattern[i])
13285 static int tg3_test_memory(struct tg3 *tp)
13287 static struct mem_entry {
13290 } mem_tbl_570x[] = {
13291 { 0x00000000, 0x00b50},
13292 { 0x00002000, 0x1c000},
13293 { 0xffffffff, 0x00000}
13294 }, mem_tbl_5705[] = {
13295 { 0x00000100, 0x0000c},
13296 { 0x00000200, 0x00008},
13297 { 0x00004000, 0x00800},
13298 { 0x00006000, 0x01000},
13299 { 0x00008000, 0x02000},
13300 { 0x00010000, 0x0e000},
13301 { 0xffffffff, 0x00000}
13302 }, mem_tbl_5755[] = {
13303 { 0x00000200, 0x00008},
13304 { 0x00004000, 0x00800},
13305 { 0x00006000, 0x00800},
13306 { 0x00008000, 0x02000},
13307 { 0x00010000, 0x0c000},
13308 { 0xffffffff, 0x00000}
13309 }, mem_tbl_5906[] = {
13310 { 0x00000200, 0x00008},
13311 { 0x00004000, 0x00400},
13312 { 0x00006000, 0x00400},
13313 { 0x00008000, 0x01000},
13314 { 0x00010000, 0x01000},
13315 { 0xffffffff, 0x00000}
13316 }, mem_tbl_5717[] = {
13317 { 0x00000200, 0x00008},
13318 { 0x00010000, 0x0a000},
13319 { 0x00020000, 0x13c00},
13320 { 0xffffffff, 0x00000}
13321 }, mem_tbl_57765[] = {
13322 { 0x00000200, 0x00008},
13323 { 0x00004000, 0x00800},
13324 { 0x00006000, 0x09800},
13325 { 0x00010000, 0x0a000},
13326 { 0xffffffff, 0x00000}
13328 struct mem_entry *mem_tbl;
13332 if (tg3_flag(tp, 5717_PLUS))
13333 mem_tbl = mem_tbl_5717;
13334 else if (tg3_flag(tp, 57765_CLASS) ||
13335 tg3_asic_rev(tp) == ASIC_REV_5762)
13336 mem_tbl = mem_tbl_57765;
13337 else if (tg3_flag(tp, 5755_PLUS))
13338 mem_tbl = mem_tbl_5755;
13339 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13340 mem_tbl = mem_tbl_5906;
13341 else if (tg3_flag(tp, 5705_PLUS))
13342 mem_tbl = mem_tbl_5705;
13344 mem_tbl = mem_tbl_570x;
13346 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13347 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13355 #define TG3_TSO_MSS 500
13357 #define TG3_TSO_IP_HDR_LEN 20
13358 #define TG3_TSO_TCP_HDR_LEN 20
13359 #define TG3_TSO_TCP_OPT_LEN 12
13361 static const u8 tg3_tso_header[] = {
13363 0x45, 0x00, 0x00, 0x00,
13364 0x00, 0x00, 0x40, 0x00,
13365 0x40, 0x06, 0x00, 0x00,
13366 0x0a, 0x00, 0x00, 0x01,
13367 0x0a, 0x00, 0x00, 0x02,
13368 0x0d, 0x00, 0xe0, 0x00,
13369 0x00, 0x00, 0x01, 0x00,
13370 0x00, 0x00, 0x02, 0x00,
13371 0x80, 0x10, 0x10, 0x00,
13372 0x14, 0x09, 0x00, 0x00,
13373 0x01, 0x01, 0x08, 0x0a,
13374 0x11, 0x11, 0x11, 0x11,
13375 0x11, 0x11, 0x11, 0x11,
13378 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13380 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13381 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13383 struct sk_buff *skb;
13384 u8 *tx_data, *rx_data;
13386 int num_pkts, tx_len, rx_len, i, err;
13387 struct tg3_rx_buffer_desc *desc;
13388 struct tg3_napi *tnapi, *rnapi;
13389 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13391 tnapi = &tp->napi[0];
13392 rnapi = &tp->napi[0];
13393 if (tp->irq_cnt > 1) {
13394 if (tg3_flag(tp, ENABLE_RSS))
13395 rnapi = &tp->napi[1];
13396 if (tg3_flag(tp, ENABLE_TSS))
13397 tnapi = &tp->napi[1];
13399 coal_now = tnapi->coal_now | rnapi->coal_now;
13404 skb = netdev_alloc_skb(tp->dev, tx_len);
13408 tx_data = skb_put(skb, tx_len);
13409 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13410 memset(tx_data + ETH_ALEN, 0x0, 8);
13412 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13414 if (tso_loopback) {
13415 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13417 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13418 TG3_TSO_TCP_OPT_LEN;
13420 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13421 sizeof(tg3_tso_header));
13424 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13425 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13427 /* Set the total length field in the IP header */
13428 iph->tot_len = htons((u16)(mss + hdr_len));
13430 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13431 TXD_FLAG_CPU_POST_DMA);
13433 if (tg3_flag(tp, HW_TSO_1) ||
13434 tg3_flag(tp, HW_TSO_2) ||
13435 tg3_flag(tp, HW_TSO_3)) {
13437 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13438 th = (struct tcphdr *)&tx_data[val];
13441 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13443 if (tg3_flag(tp, HW_TSO_3)) {
13444 mss |= (hdr_len & 0xc) << 12;
13445 if (hdr_len & 0x10)
13446 base_flags |= 0x00000010;
13447 base_flags |= (hdr_len & 0x3e0) << 5;
13448 } else if (tg3_flag(tp, HW_TSO_2))
13449 mss |= hdr_len << 9;
13450 else if (tg3_flag(tp, HW_TSO_1) ||
13451 tg3_asic_rev(tp) == ASIC_REV_5705) {
13452 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13454 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13457 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13460 data_off = ETH_HLEN;
13462 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13463 tx_len > VLAN_ETH_FRAME_LEN)
13464 base_flags |= TXD_FLAG_JMB_PKT;
13467 for (i = data_off; i < tx_len; i++)
13468 tx_data[i] = (u8) (i & 0xff);
13470 map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE);
13471 if (dma_mapping_error(&tp->pdev->dev, map)) {
13472 dev_kfree_skb(skb);
13476 val = tnapi->tx_prod;
13477 tnapi->tx_buffers[val].skb = skb;
13478 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13480 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13485 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13487 budget = tg3_tx_avail(tnapi);
13488 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13489 base_flags | TXD_FLAG_END, mss, 0)) {
13490 tnapi->tx_buffers[val].skb = NULL;
13491 dev_kfree_skb(skb);
13497 /* Sync BD data before updating mailbox */
13500 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13501 tr32_mailbox(tnapi->prodmbox);
13505 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13506 for (i = 0; i < 35; i++) {
13507 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13512 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13513 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13514 if ((tx_idx == tnapi->tx_prod) &&
13515 (rx_idx == (rx_start_idx + num_pkts)))
13519 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13520 dev_kfree_skb(skb);
13522 if (tx_idx != tnapi->tx_prod)
13525 if (rx_idx != rx_start_idx + num_pkts)
13529 while (rx_idx != rx_start_idx) {
13530 desc = &rnapi->rx_rcb[rx_start_idx++];
13531 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13532 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13534 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13535 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13538 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13541 if (!tso_loopback) {
13542 if (rx_len != tx_len)
13545 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13546 if (opaque_key != RXD_OPAQUE_RING_STD)
13549 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13552 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13553 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13554 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13558 if (opaque_key == RXD_OPAQUE_RING_STD) {
13559 rx_data = tpr->rx_std_buffers[desc_idx].data;
13560 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13562 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13563 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13564 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13569 dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len,
13572 rx_data += TG3_RX_OFFSET(tp);
13573 for (i = data_off; i < rx_len; i++, val++) {
13574 if (*(rx_data + i) != (u8) (val & 0xff))
13581 /* tg3_free_rings will unmap and free the rx_data */
13586 #define TG3_STD_LOOPBACK_FAILED 1
13587 #define TG3_JMB_LOOPBACK_FAILED 2
13588 #define TG3_TSO_LOOPBACK_FAILED 4
13589 #define TG3_LOOPBACK_FAILED \
13590 (TG3_STD_LOOPBACK_FAILED | \
13591 TG3_JMB_LOOPBACK_FAILED | \
13592 TG3_TSO_LOOPBACK_FAILED)
13594 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13598 u32 jmb_pkt_sz = 9000;
13601 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13603 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13604 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13606 if (!netif_running(tp->dev)) {
13607 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13608 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13610 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13614 err = tg3_reset_hw(tp, true);
13616 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13617 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13619 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13623 if (tg3_flag(tp, ENABLE_RSS)) {
13626 /* Reroute all rx packets to the 1st queue */
13627 for (i = MAC_RSS_INDIR_TBL_0;
13628 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13632 /* HW errata - mac loopback fails in some cases on 5780.
13633 * Normal traffic and PHY loopback are not affected by
13634 * errata. Also, the MAC loopback test is deprecated for
13635 * all newer ASIC revisions.
13637 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13638 !tg3_flag(tp, CPMU_PRESENT)) {
13639 tg3_mac_loopback(tp, true);
13641 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13642 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13644 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13645 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13646 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13648 tg3_mac_loopback(tp, false);
13651 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13652 !tg3_flag(tp, USE_PHYLIB)) {
13655 tg3_phy_lpbk_set(tp, 0, false);
13657 /* Wait for link */
13658 for (i = 0; i < 100; i++) {
13659 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13664 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13665 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13666 if (tg3_flag(tp, TSO_CAPABLE) &&
13667 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13668 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13669 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13670 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13671 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13674 tg3_phy_lpbk_set(tp, 0, true);
13676 /* All link indications report up, but the hardware
13677 * isn't really ready for about 20 msec. Double it
13682 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13683 data[TG3_EXT_LOOPB_TEST] |=
13684 TG3_STD_LOOPBACK_FAILED;
13685 if (tg3_flag(tp, TSO_CAPABLE) &&
13686 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13687 data[TG3_EXT_LOOPB_TEST] |=
13688 TG3_TSO_LOOPBACK_FAILED;
13689 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13690 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13691 data[TG3_EXT_LOOPB_TEST] |=
13692 TG3_JMB_LOOPBACK_FAILED;
13695 /* Re-enable gphy autopowerdown. */
13696 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13697 tg3_phy_toggle_apd(tp, true);
13700 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13701 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13704 tp->phy_flags |= eee_cap;
13709 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13712 struct tg3 *tp = netdev_priv(dev);
13713 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13715 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13716 if (tg3_power_up(tp)) {
13717 etest->flags |= ETH_TEST_FL_FAILED;
13718 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13721 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13724 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13726 if (tg3_test_nvram(tp) != 0) {
13727 etest->flags |= ETH_TEST_FL_FAILED;
13728 data[TG3_NVRAM_TEST] = 1;
13730 if (!doextlpbk && tg3_test_link(tp)) {
13731 etest->flags |= ETH_TEST_FL_FAILED;
13732 data[TG3_LINK_TEST] = 1;
13734 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13735 int err, err2 = 0, irq_sync = 0;
13737 if (netif_running(dev)) {
13739 tg3_netif_stop(tp);
13743 tg3_full_lock(tp, irq_sync);
13744 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13745 err = tg3_nvram_lock(tp);
13746 tg3_halt_cpu(tp, RX_CPU_BASE);
13747 if (!tg3_flag(tp, 5705_PLUS))
13748 tg3_halt_cpu(tp, TX_CPU_BASE);
13750 tg3_nvram_unlock(tp);
13752 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13755 if (tg3_test_registers(tp) != 0) {
13756 etest->flags |= ETH_TEST_FL_FAILED;
13757 data[TG3_REGISTER_TEST] = 1;
13760 if (tg3_test_memory(tp) != 0) {
13761 etest->flags |= ETH_TEST_FL_FAILED;
13762 data[TG3_MEMORY_TEST] = 1;
13766 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13768 if (tg3_test_loopback(tp, data, doextlpbk))
13769 etest->flags |= ETH_TEST_FL_FAILED;
13771 tg3_full_unlock(tp);
13773 if (tg3_test_interrupt(tp) != 0) {
13774 etest->flags |= ETH_TEST_FL_FAILED;
13775 data[TG3_INTERRUPT_TEST] = 1;
13778 tg3_full_lock(tp, 0);
13780 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13781 if (netif_running(dev)) {
13782 tg3_flag_set(tp, INIT_COMPLETE);
13783 err2 = tg3_restart_hw(tp, true);
13785 tg3_netif_start(tp);
13788 tg3_full_unlock(tp);
13790 if (irq_sync && !err2)
13793 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13794 tg3_power_down_prepare(tp);
13798 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13800 struct tg3 *tp = netdev_priv(dev);
13801 struct hwtstamp_config stmpconf;
13803 if (!tg3_flag(tp, PTP_CAPABLE))
13804 return -EOPNOTSUPP;
13806 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13809 if (stmpconf.flags)
13812 if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13813 stmpconf.tx_type != HWTSTAMP_TX_OFF)
13816 switch (stmpconf.rx_filter) {
13817 case HWTSTAMP_FILTER_NONE:
13820 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13821 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13822 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13824 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13825 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13826 TG3_RX_PTP_CTL_SYNC_EVNT;
13828 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13829 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13830 TG3_RX_PTP_CTL_DELAY_REQ;
13832 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13833 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13834 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13836 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13837 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13838 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13840 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13841 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13842 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13844 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13845 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13846 TG3_RX_PTP_CTL_SYNC_EVNT;
13848 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13849 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13850 TG3_RX_PTP_CTL_SYNC_EVNT;
13852 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13853 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13854 TG3_RX_PTP_CTL_SYNC_EVNT;
13856 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13857 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13858 TG3_RX_PTP_CTL_DELAY_REQ;
13860 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13861 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13862 TG3_RX_PTP_CTL_DELAY_REQ;
13864 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13865 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13866 TG3_RX_PTP_CTL_DELAY_REQ;
13872 if (netif_running(dev) && tp->rxptpctl)
13873 tw32(TG3_RX_PTP_CTL,
13874 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13876 if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13877 tg3_flag_set(tp, TX_TSTAMP_EN);
13879 tg3_flag_clear(tp, TX_TSTAMP_EN);
13881 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13885 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13887 struct tg3 *tp = netdev_priv(dev);
13888 struct hwtstamp_config stmpconf;
13890 if (!tg3_flag(tp, PTP_CAPABLE))
13891 return -EOPNOTSUPP;
13893 stmpconf.flags = 0;
13894 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13895 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13897 switch (tp->rxptpctl) {
13899 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13901 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13902 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13904 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13905 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13907 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13908 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13910 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13911 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13913 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13914 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13916 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13917 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13919 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13920 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13922 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13923 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13925 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13926 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13928 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13929 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13931 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13932 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13934 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13935 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13942 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13946 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13948 struct mii_ioctl_data *data = if_mii(ifr);
13949 struct tg3 *tp = netdev_priv(dev);
13952 if (tg3_flag(tp, USE_PHYLIB)) {
13953 struct phy_device *phydev;
13954 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13956 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13957 return phy_mii_ioctl(phydev, ifr, cmd);
13962 data->phy_id = tp->phy_addr;
13965 case SIOCGMIIREG: {
13968 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13969 break; /* We have no PHY */
13971 if (!netif_running(dev))
13974 spin_lock_bh(&tp->lock);
13975 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13976 data->reg_num & 0x1f, &mii_regval);
13977 spin_unlock_bh(&tp->lock);
13979 data->val_out = mii_regval;
13985 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13986 break; /* We have no PHY */
13988 if (!netif_running(dev))
13991 spin_lock_bh(&tp->lock);
13992 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13993 data->reg_num & 0x1f, data->val_in);
13994 spin_unlock_bh(&tp->lock);
13998 case SIOCSHWTSTAMP:
13999 return tg3_hwtstamp_set(dev, ifr);
14001 case SIOCGHWTSTAMP:
14002 return tg3_hwtstamp_get(dev, ifr);
14008 return -EOPNOTSUPP;
14011 static int tg3_get_coalesce(struct net_device *dev,
14012 struct ethtool_coalesce *ec,
14013 struct kernel_ethtool_coalesce *kernel_coal,
14014 struct netlink_ext_ack *extack)
14016 struct tg3 *tp = netdev_priv(dev);
14018 memcpy(ec, &tp->coal, sizeof(*ec));
14022 static int tg3_set_coalesce(struct net_device *dev,
14023 struct ethtool_coalesce *ec,
14024 struct kernel_ethtool_coalesce *kernel_coal,
14025 struct netlink_ext_ack *extack)
14027 struct tg3 *tp = netdev_priv(dev);
14028 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14029 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14031 if (!tg3_flag(tp, 5705_PLUS)) {
14032 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14033 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14034 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14035 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14038 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14039 (!ec->rx_coalesce_usecs) ||
14040 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14041 (!ec->tx_coalesce_usecs) ||
14042 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14043 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14044 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14045 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14046 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14047 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14048 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14049 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14052 /* Only copy relevant parameters, ignore all others. */
14053 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14054 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14055 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14056 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14057 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14058 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14059 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14060 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14061 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14063 if (netif_running(dev)) {
14064 tg3_full_lock(tp, 0);
14065 __tg3_set_coalesce(tp, &tp->coal);
14066 tg3_full_unlock(tp);
14071 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14073 struct tg3 *tp = netdev_priv(dev);
14075 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14076 netdev_warn(tp->dev, "Board does not support EEE!\n");
14077 return -EOPNOTSUPP;
14080 if (edata->advertised != tp->eee.advertised) {
14081 netdev_warn(tp->dev,
14082 "Direct manipulation of EEE advertisement is not supported\n");
14086 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14087 netdev_warn(tp->dev,
14088 "Maximal Tx Lpi timer supported is %#x(u)\n",
14089 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14095 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14096 tg3_warn_mgmt_link_flap(tp);
14098 if (netif_running(tp->dev)) {
14099 tg3_full_lock(tp, 0);
14102 tg3_full_unlock(tp);
14108 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14110 struct tg3 *tp = netdev_priv(dev);
14112 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14113 netdev_warn(tp->dev,
14114 "Board does not support EEE!\n");
14115 return -EOPNOTSUPP;
14122 static const struct ethtool_ops tg3_ethtool_ops = {
14123 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
14124 ETHTOOL_COALESCE_MAX_FRAMES |
14125 ETHTOOL_COALESCE_USECS_IRQ |
14126 ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
14127 ETHTOOL_COALESCE_STATS_BLOCK_USECS,
14128 .get_drvinfo = tg3_get_drvinfo,
14129 .get_regs_len = tg3_get_regs_len,
14130 .get_regs = tg3_get_regs,
14131 .get_wol = tg3_get_wol,
14132 .set_wol = tg3_set_wol,
14133 .get_msglevel = tg3_get_msglevel,
14134 .set_msglevel = tg3_set_msglevel,
14135 .nway_reset = tg3_nway_reset,
14136 .get_link = ethtool_op_get_link,
14137 .get_eeprom_len = tg3_get_eeprom_len,
14138 .get_eeprom = tg3_get_eeprom,
14139 .set_eeprom = tg3_set_eeprom,
14140 .get_ringparam = tg3_get_ringparam,
14141 .set_ringparam = tg3_set_ringparam,
14142 .get_pauseparam = tg3_get_pauseparam,
14143 .set_pauseparam = tg3_set_pauseparam,
14144 .self_test = tg3_self_test,
14145 .get_strings = tg3_get_strings,
14146 .set_phys_id = tg3_set_phys_id,
14147 .get_ethtool_stats = tg3_get_ethtool_stats,
14148 .get_coalesce = tg3_get_coalesce,
14149 .set_coalesce = tg3_set_coalesce,
14150 .get_sset_count = tg3_get_sset_count,
14151 .get_rxnfc = tg3_get_rxnfc,
14152 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
14153 .get_rxfh = tg3_get_rxfh,
14154 .set_rxfh = tg3_set_rxfh,
14155 .get_channels = tg3_get_channels,
14156 .set_channels = tg3_set_channels,
14157 .get_ts_info = tg3_get_ts_info,
14158 .get_eee = tg3_get_eee,
14159 .set_eee = tg3_set_eee,
14160 .get_link_ksettings = tg3_get_link_ksettings,
14161 .set_link_ksettings = tg3_set_link_ksettings,
14164 static void tg3_get_stats64(struct net_device *dev,
14165 struct rtnl_link_stats64 *stats)
14167 struct tg3 *tp = netdev_priv(dev);
14169 spin_lock_bh(&tp->lock);
14170 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14171 *stats = tp->net_stats_prev;
14172 spin_unlock_bh(&tp->lock);
14176 tg3_get_nstats(tp, stats);
14177 spin_unlock_bh(&tp->lock);
14180 static void tg3_set_rx_mode(struct net_device *dev)
14182 struct tg3 *tp = netdev_priv(dev);
14184 if (!netif_running(dev))
14187 tg3_full_lock(tp, 0);
14188 __tg3_set_rx_mode(dev);
14189 tg3_full_unlock(tp);
14192 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14195 dev->mtu = new_mtu;
14197 if (new_mtu > ETH_DATA_LEN) {
14198 if (tg3_flag(tp, 5780_CLASS)) {
14199 netdev_update_features(dev);
14200 tg3_flag_clear(tp, TSO_CAPABLE);
14202 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14205 if (tg3_flag(tp, 5780_CLASS)) {
14206 tg3_flag_set(tp, TSO_CAPABLE);
14207 netdev_update_features(dev);
14209 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14213 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14215 struct tg3 *tp = netdev_priv(dev);
14217 bool reset_phy = false;
14219 if (!netif_running(dev)) {
14220 /* We'll just catch it later when the
14223 tg3_set_mtu(dev, tp, new_mtu);
14229 tg3_netif_stop(tp);
14231 tg3_set_mtu(dev, tp, new_mtu);
14233 tg3_full_lock(tp, 1);
14235 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14237 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14238 * breaks all requests to 256 bytes.
14240 if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14241 tg3_asic_rev(tp) == ASIC_REV_5717 ||
14242 tg3_asic_rev(tp) == ASIC_REV_5719 ||
14243 tg3_asic_rev(tp) == ASIC_REV_5720)
14246 err = tg3_restart_hw(tp, reset_phy);
14249 tg3_netif_start(tp);
14251 tg3_full_unlock(tp);
14259 static const struct net_device_ops tg3_netdev_ops = {
14260 .ndo_open = tg3_open,
14261 .ndo_stop = tg3_close,
14262 .ndo_start_xmit = tg3_start_xmit,
14263 .ndo_get_stats64 = tg3_get_stats64,
14264 .ndo_validate_addr = eth_validate_addr,
14265 .ndo_set_rx_mode = tg3_set_rx_mode,
14266 .ndo_set_mac_address = tg3_set_mac_addr,
14267 .ndo_eth_ioctl = tg3_ioctl,
14268 .ndo_tx_timeout = tg3_tx_timeout,
14269 .ndo_change_mtu = tg3_change_mtu,
14270 .ndo_fix_features = tg3_fix_features,
14271 .ndo_set_features = tg3_set_features,
14272 #ifdef CONFIG_NET_POLL_CONTROLLER
14273 .ndo_poll_controller = tg3_poll_controller,
14277 static void tg3_get_eeprom_size(struct tg3 *tp)
14279 u32 cursize, val, magic;
14281 tp->nvram_size = EEPROM_CHIP_SIZE;
14283 if (tg3_nvram_read(tp, 0, &magic) != 0)
14286 if ((magic != TG3_EEPROM_MAGIC) &&
14287 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14288 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14292 * Size the chip by reading offsets at increasing powers of two.
14293 * When we encounter our validation signature, we know the addressing
14294 * has wrapped around, and thus have our chip size.
14298 while (cursize < tp->nvram_size) {
14299 if (tg3_nvram_read(tp, cursize, &val) != 0)
14308 tp->nvram_size = cursize;
14311 static void tg3_get_nvram_size(struct tg3 *tp)
14315 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14318 /* Selfboot format */
14319 if (val != TG3_EEPROM_MAGIC) {
14320 tg3_get_eeprom_size(tp);
14324 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14326 /* This is confusing. We want to operate on the
14327 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14328 * call will read from NVRAM and byteswap the data
14329 * according to the byteswapping settings for all
14330 * other register accesses. This ensures the data we
14331 * want will always reside in the lower 16-bits.
14332 * However, the data in NVRAM is in LE format, which
14333 * means the data from the NVRAM read will always be
14334 * opposite the endianness of the CPU. The 16-bit
14335 * byteswap then brings the data to CPU endianness.
14337 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14341 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14344 static void tg3_get_nvram_info(struct tg3 *tp)
14348 nvcfg1 = tr32(NVRAM_CFG1);
14349 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14350 tg3_flag_set(tp, FLASH);
14352 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14353 tw32(NVRAM_CFG1, nvcfg1);
14356 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14357 tg3_flag(tp, 5780_CLASS)) {
14358 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14359 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14360 tp->nvram_jedecnum = JEDEC_ATMEL;
14361 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14362 tg3_flag_set(tp, NVRAM_BUFFERED);
14364 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14365 tp->nvram_jedecnum = JEDEC_ATMEL;
14366 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14368 case FLASH_VENDOR_ATMEL_EEPROM:
14369 tp->nvram_jedecnum = JEDEC_ATMEL;
14370 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14371 tg3_flag_set(tp, NVRAM_BUFFERED);
14373 case FLASH_VENDOR_ST:
14374 tp->nvram_jedecnum = JEDEC_ST;
14375 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14376 tg3_flag_set(tp, NVRAM_BUFFERED);
14378 case FLASH_VENDOR_SAIFUN:
14379 tp->nvram_jedecnum = JEDEC_SAIFUN;
14380 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14382 case FLASH_VENDOR_SST_SMALL:
14383 case FLASH_VENDOR_SST_LARGE:
14384 tp->nvram_jedecnum = JEDEC_SST;
14385 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14389 tp->nvram_jedecnum = JEDEC_ATMEL;
14390 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14391 tg3_flag_set(tp, NVRAM_BUFFERED);
14395 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14397 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14398 case FLASH_5752PAGE_SIZE_256:
14399 tp->nvram_pagesize = 256;
14401 case FLASH_5752PAGE_SIZE_512:
14402 tp->nvram_pagesize = 512;
14404 case FLASH_5752PAGE_SIZE_1K:
14405 tp->nvram_pagesize = 1024;
14407 case FLASH_5752PAGE_SIZE_2K:
14408 tp->nvram_pagesize = 2048;
14410 case FLASH_5752PAGE_SIZE_4K:
14411 tp->nvram_pagesize = 4096;
14413 case FLASH_5752PAGE_SIZE_264:
14414 tp->nvram_pagesize = 264;
14416 case FLASH_5752PAGE_SIZE_528:
14417 tp->nvram_pagesize = 528;
14422 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14426 nvcfg1 = tr32(NVRAM_CFG1);
14428 /* NVRAM protection for TPM */
14429 if (nvcfg1 & (1 << 27))
14430 tg3_flag_set(tp, PROTECTED_NVRAM);
14432 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14433 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14434 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14435 tp->nvram_jedecnum = JEDEC_ATMEL;
14436 tg3_flag_set(tp, NVRAM_BUFFERED);
14438 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14439 tp->nvram_jedecnum = JEDEC_ATMEL;
14440 tg3_flag_set(tp, NVRAM_BUFFERED);
14441 tg3_flag_set(tp, FLASH);
14443 case FLASH_5752VENDOR_ST_M45PE10:
14444 case FLASH_5752VENDOR_ST_M45PE20:
14445 case FLASH_5752VENDOR_ST_M45PE40:
14446 tp->nvram_jedecnum = JEDEC_ST;
14447 tg3_flag_set(tp, NVRAM_BUFFERED);
14448 tg3_flag_set(tp, FLASH);
14452 if (tg3_flag(tp, FLASH)) {
14453 tg3_nvram_get_pagesize(tp, nvcfg1);
14455 /* For eeprom, set pagesize to maximum eeprom size */
14456 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14458 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14459 tw32(NVRAM_CFG1, nvcfg1);
14463 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14465 u32 nvcfg1, protect = 0;
14467 nvcfg1 = tr32(NVRAM_CFG1);
14469 /* NVRAM protection for TPM */
14470 if (nvcfg1 & (1 << 27)) {
14471 tg3_flag_set(tp, PROTECTED_NVRAM);
14475 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14477 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14478 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14479 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14480 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14481 tp->nvram_jedecnum = JEDEC_ATMEL;
14482 tg3_flag_set(tp, NVRAM_BUFFERED);
14483 tg3_flag_set(tp, FLASH);
14484 tp->nvram_pagesize = 264;
14485 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14486 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14487 tp->nvram_size = (protect ? 0x3e200 :
14488 TG3_NVRAM_SIZE_512KB);
14489 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14490 tp->nvram_size = (protect ? 0x1f200 :
14491 TG3_NVRAM_SIZE_256KB);
14493 tp->nvram_size = (protect ? 0x1f200 :
14494 TG3_NVRAM_SIZE_128KB);
14496 case FLASH_5752VENDOR_ST_M45PE10:
14497 case FLASH_5752VENDOR_ST_M45PE20:
14498 case FLASH_5752VENDOR_ST_M45PE40:
14499 tp->nvram_jedecnum = JEDEC_ST;
14500 tg3_flag_set(tp, NVRAM_BUFFERED);
14501 tg3_flag_set(tp, FLASH);
14502 tp->nvram_pagesize = 256;
14503 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14504 tp->nvram_size = (protect ?
14505 TG3_NVRAM_SIZE_64KB :
14506 TG3_NVRAM_SIZE_128KB);
14507 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14508 tp->nvram_size = (protect ?
14509 TG3_NVRAM_SIZE_64KB :
14510 TG3_NVRAM_SIZE_256KB);
14512 tp->nvram_size = (protect ?
14513 TG3_NVRAM_SIZE_128KB :
14514 TG3_NVRAM_SIZE_512KB);
14519 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14523 nvcfg1 = tr32(NVRAM_CFG1);
14525 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14526 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14527 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14528 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14529 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14530 tp->nvram_jedecnum = JEDEC_ATMEL;
14531 tg3_flag_set(tp, NVRAM_BUFFERED);
14532 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14534 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14535 tw32(NVRAM_CFG1, nvcfg1);
14537 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14538 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14539 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14540 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14541 tp->nvram_jedecnum = JEDEC_ATMEL;
14542 tg3_flag_set(tp, NVRAM_BUFFERED);
14543 tg3_flag_set(tp, FLASH);
14544 tp->nvram_pagesize = 264;
14546 case FLASH_5752VENDOR_ST_M45PE10:
14547 case FLASH_5752VENDOR_ST_M45PE20:
14548 case FLASH_5752VENDOR_ST_M45PE40:
14549 tp->nvram_jedecnum = JEDEC_ST;
14550 tg3_flag_set(tp, NVRAM_BUFFERED);
14551 tg3_flag_set(tp, FLASH);
14552 tp->nvram_pagesize = 256;
14557 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14559 u32 nvcfg1, protect = 0;
14561 nvcfg1 = tr32(NVRAM_CFG1);
14563 /* NVRAM protection for TPM */
14564 if (nvcfg1 & (1 << 27)) {
14565 tg3_flag_set(tp, PROTECTED_NVRAM);
14569 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14571 case FLASH_5761VENDOR_ATMEL_ADB021D:
14572 case FLASH_5761VENDOR_ATMEL_ADB041D:
14573 case FLASH_5761VENDOR_ATMEL_ADB081D:
14574 case FLASH_5761VENDOR_ATMEL_ADB161D:
14575 case FLASH_5761VENDOR_ATMEL_MDB021D:
14576 case FLASH_5761VENDOR_ATMEL_MDB041D:
14577 case FLASH_5761VENDOR_ATMEL_MDB081D:
14578 case FLASH_5761VENDOR_ATMEL_MDB161D:
14579 tp->nvram_jedecnum = JEDEC_ATMEL;
14580 tg3_flag_set(tp, NVRAM_BUFFERED);
14581 tg3_flag_set(tp, FLASH);
14582 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14583 tp->nvram_pagesize = 256;
14585 case FLASH_5761VENDOR_ST_A_M45PE20:
14586 case FLASH_5761VENDOR_ST_A_M45PE40:
14587 case FLASH_5761VENDOR_ST_A_M45PE80:
14588 case FLASH_5761VENDOR_ST_A_M45PE16:
14589 case FLASH_5761VENDOR_ST_M_M45PE20:
14590 case FLASH_5761VENDOR_ST_M_M45PE40:
14591 case FLASH_5761VENDOR_ST_M_M45PE80:
14592 case FLASH_5761VENDOR_ST_M_M45PE16:
14593 tp->nvram_jedecnum = JEDEC_ST;
14594 tg3_flag_set(tp, NVRAM_BUFFERED);
14595 tg3_flag_set(tp, FLASH);
14596 tp->nvram_pagesize = 256;
14601 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14604 case FLASH_5761VENDOR_ATMEL_ADB161D:
14605 case FLASH_5761VENDOR_ATMEL_MDB161D:
14606 case FLASH_5761VENDOR_ST_A_M45PE16:
14607 case FLASH_5761VENDOR_ST_M_M45PE16:
14608 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14610 case FLASH_5761VENDOR_ATMEL_ADB081D:
14611 case FLASH_5761VENDOR_ATMEL_MDB081D:
14612 case FLASH_5761VENDOR_ST_A_M45PE80:
14613 case FLASH_5761VENDOR_ST_M_M45PE80:
14614 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14616 case FLASH_5761VENDOR_ATMEL_ADB041D:
14617 case FLASH_5761VENDOR_ATMEL_MDB041D:
14618 case FLASH_5761VENDOR_ST_A_M45PE40:
14619 case FLASH_5761VENDOR_ST_M_M45PE40:
14620 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14622 case FLASH_5761VENDOR_ATMEL_ADB021D:
14623 case FLASH_5761VENDOR_ATMEL_MDB021D:
14624 case FLASH_5761VENDOR_ST_A_M45PE20:
14625 case FLASH_5761VENDOR_ST_M_M45PE20:
14626 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14632 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14634 tp->nvram_jedecnum = JEDEC_ATMEL;
14635 tg3_flag_set(tp, NVRAM_BUFFERED);
14636 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14639 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14643 nvcfg1 = tr32(NVRAM_CFG1);
14645 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14646 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14647 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14648 tp->nvram_jedecnum = JEDEC_ATMEL;
14649 tg3_flag_set(tp, NVRAM_BUFFERED);
14650 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14652 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14653 tw32(NVRAM_CFG1, nvcfg1);
14655 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14656 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14657 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14658 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14659 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14660 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14661 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14662 tp->nvram_jedecnum = JEDEC_ATMEL;
14663 tg3_flag_set(tp, NVRAM_BUFFERED);
14664 tg3_flag_set(tp, FLASH);
14666 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14667 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14668 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14669 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14670 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14672 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14673 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14674 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14676 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14677 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14678 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14682 case FLASH_5752VENDOR_ST_M45PE10:
14683 case FLASH_5752VENDOR_ST_M45PE20:
14684 case FLASH_5752VENDOR_ST_M45PE40:
14685 tp->nvram_jedecnum = JEDEC_ST;
14686 tg3_flag_set(tp, NVRAM_BUFFERED);
14687 tg3_flag_set(tp, FLASH);
14689 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14690 case FLASH_5752VENDOR_ST_M45PE10:
14691 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14693 case FLASH_5752VENDOR_ST_M45PE20:
14694 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14696 case FLASH_5752VENDOR_ST_M45PE40:
14697 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14702 tg3_flag_set(tp, NO_NVRAM);
14706 tg3_nvram_get_pagesize(tp, nvcfg1);
14707 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14708 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14712 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14716 nvcfg1 = tr32(NVRAM_CFG1);
14718 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14719 case FLASH_5717VENDOR_ATMEL_EEPROM:
14720 case FLASH_5717VENDOR_MICRO_EEPROM:
14721 tp->nvram_jedecnum = JEDEC_ATMEL;
14722 tg3_flag_set(tp, NVRAM_BUFFERED);
14723 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14725 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14726 tw32(NVRAM_CFG1, nvcfg1);
14728 case FLASH_5717VENDOR_ATMEL_MDB011D:
14729 case FLASH_5717VENDOR_ATMEL_ADB011B:
14730 case FLASH_5717VENDOR_ATMEL_ADB011D:
14731 case FLASH_5717VENDOR_ATMEL_MDB021D:
14732 case FLASH_5717VENDOR_ATMEL_ADB021B:
14733 case FLASH_5717VENDOR_ATMEL_ADB021D:
14734 case FLASH_5717VENDOR_ATMEL_45USPT:
14735 tp->nvram_jedecnum = JEDEC_ATMEL;
14736 tg3_flag_set(tp, NVRAM_BUFFERED);
14737 tg3_flag_set(tp, FLASH);
14739 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14740 case FLASH_5717VENDOR_ATMEL_MDB021D:
14741 /* Detect size with tg3_nvram_get_size() */
14743 case FLASH_5717VENDOR_ATMEL_ADB021B:
14744 case FLASH_5717VENDOR_ATMEL_ADB021D:
14745 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14748 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14752 case FLASH_5717VENDOR_ST_M_M25PE10:
14753 case FLASH_5717VENDOR_ST_A_M25PE10:
14754 case FLASH_5717VENDOR_ST_M_M45PE10:
14755 case FLASH_5717VENDOR_ST_A_M45PE10:
14756 case FLASH_5717VENDOR_ST_M_M25PE20:
14757 case FLASH_5717VENDOR_ST_A_M25PE20:
14758 case FLASH_5717VENDOR_ST_M_M45PE20:
14759 case FLASH_5717VENDOR_ST_A_M45PE20:
14760 case FLASH_5717VENDOR_ST_25USPT:
14761 case FLASH_5717VENDOR_ST_45USPT:
14762 tp->nvram_jedecnum = JEDEC_ST;
14763 tg3_flag_set(tp, NVRAM_BUFFERED);
14764 tg3_flag_set(tp, FLASH);
14766 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14767 case FLASH_5717VENDOR_ST_M_M25PE20:
14768 case FLASH_5717VENDOR_ST_M_M45PE20:
14769 /* Detect size with tg3_nvram_get_size() */
14771 case FLASH_5717VENDOR_ST_A_M25PE20:
14772 case FLASH_5717VENDOR_ST_A_M45PE20:
14773 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14776 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14781 tg3_flag_set(tp, NO_NVRAM);
14785 tg3_nvram_get_pagesize(tp, nvcfg1);
14786 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14787 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14790 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14792 u32 nvcfg1, nvmpinstrp, nv_status;
14794 nvcfg1 = tr32(NVRAM_CFG1);
14795 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14797 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14798 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14799 tg3_flag_set(tp, NO_NVRAM);
14803 switch (nvmpinstrp) {
14804 case FLASH_5762_MX25L_100:
14805 case FLASH_5762_MX25L_200:
14806 case FLASH_5762_MX25L_400:
14807 case FLASH_5762_MX25L_800:
14808 case FLASH_5762_MX25L_160_320:
14809 tp->nvram_pagesize = 4096;
14810 tp->nvram_jedecnum = JEDEC_MACRONIX;
14811 tg3_flag_set(tp, NVRAM_BUFFERED);
14812 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14813 tg3_flag_set(tp, FLASH);
14814 nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14816 (1 << (nv_status >> AUTOSENSE_DEVID &
14817 AUTOSENSE_DEVID_MASK)
14818 << AUTOSENSE_SIZE_IN_MB);
14821 case FLASH_5762_EEPROM_HD:
14822 nvmpinstrp = FLASH_5720_EEPROM_HD;
14824 case FLASH_5762_EEPROM_LD:
14825 nvmpinstrp = FLASH_5720_EEPROM_LD;
14827 case FLASH_5720VENDOR_M_ST_M45PE20:
14828 /* This pinstrap supports multiple sizes, so force it
14829 * to read the actual size from location 0xf0.
14831 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14836 switch (nvmpinstrp) {
14837 case FLASH_5720_EEPROM_HD:
14838 case FLASH_5720_EEPROM_LD:
14839 tp->nvram_jedecnum = JEDEC_ATMEL;
14840 tg3_flag_set(tp, NVRAM_BUFFERED);
14842 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14843 tw32(NVRAM_CFG1, nvcfg1);
14844 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14845 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14847 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14849 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14850 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14851 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14852 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14853 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14854 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14855 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14856 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14857 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14858 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14859 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14860 case FLASH_5720VENDOR_ATMEL_45USPT:
14861 tp->nvram_jedecnum = JEDEC_ATMEL;
14862 tg3_flag_set(tp, NVRAM_BUFFERED);
14863 tg3_flag_set(tp, FLASH);
14865 switch (nvmpinstrp) {
14866 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14867 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14868 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14869 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14871 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14872 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14873 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14874 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14876 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14877 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14878 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14881 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14882 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14886 case FLASH_5720VENDOR_M_ST_M25PE10:
14887 case FLASH_5720VENDOR_M_ST_M45PE10:
14888 case FLASH_5720VENDOR_A_ST_M25PE10:
14889 case FLASH_5720VENDOR_A_ST_M45PE10:
14890 case FLASH_5720VENDOR_M_ST_M25PE20:
14891 case FLASH_5720VENDOR_M_ST_M45PE20:
14892 case FLASH_5720VENDOR_A_ST_M25PE20:
14893 case FLASH_5720VENDOR_A_ST_M45PE20:
14894 case FLASH_5720VENDOR_M_ST_M25PE40:
14895 case FLASH_5720VENDOR_M_ST_M45PE40:
14896 case FLASH_5720VENDOR_A_ST_M25PE40:
14897 case FLASH_5720VENDOR_A_ST_M45PE40:
14898 case FLASH_5720VENDOR_M_ST_M25PE80:
14899 case FLASH_5720VENDOR_M_ST_M45PE80:
14900 case FLASH_5720VENDOR_A_ST_M25PE80:
14901 case FLASH_5720VENDOR_A_ST_M45PE80:
14902 case FLASH_5720VENDOR_ST_25USPT:
14903 case FLASH_5720VENDOR_ST_45USPT:
14904 tp->nvram_jedecnum = JEDEC_ST;
14905 tg3_flag_set(tp, NVRAM_BUFFERED);
14906 tg3_flag_set(tp, FLASH);
14908 switch (nvmpinstrp) {
14909 case FLASH_5720VENDOR_M_ST_M25PE20:
14910 case FLASH_5720VENDOR_M_ST_M45PE20:
14911 case FLASH_5720VENDOR_A_ST_M25PE20:
14912 case FLASH_5720VENDOR_A_ST_M45PE20:
14913 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14915 case FLASH_5720VENDOR_M_ST_M25PE40:
14916 case FLASH_5720VENDOR_M_ST_M45PE40:
14917 case FLASH_5720VENDOR_A_ST_M25PE40:
14918 case FLASH_5720VENDOR_A_ST_M45PE40:
14919 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14921 case FLASH_5720VENDOR_M_ST_M25PE80:
14922 case FLASH_5720VENDOR_M_ST_M45PE80:
14923 case FLASH_5720VENDOR_A_ST_M25PE80:
14924 case FLASH_5720VENDOR_A_ST_M45PE80:
14925 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14928 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14929 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14934 tg3_flag_set(tp, NO_NVRAM);
14938 tg3_nvram_get_pagesize(tp, nvcfg1);
14939 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14940 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14942 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14945 if (tg3_nvram_read(tp, 0, &val))
14948 if (val != TG3_EEPROM_MAGIC &&
14949 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14950 tg3_flag_set(tp, NO_NVRAM);
14954 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14955 static void tg3_nvram_init(struct tg3 *tp)
14957 if (tg3_flag(tp, IS_SSB_CORE)) {
14958 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14959 tg3_flag_clear(tp, NVRAM);
14960 tg3_flag_clear(tp, NVRAM_BUFFERED);
14961 tg3_flag_set(tp, NO_NVRAM);
14965 tw32_f(GRC_EEPROM_ADDR,
14966 (EEPROM_ADDR_FSM_RESET |
14967 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14968 EEPROM_ADDR_CLKPERD_SHIFT)));
14972 /* Enable seeprom accesses. */
14973 tw32_f(GRC_LOCAL_CTRL,
14974 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14977 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14978 tg3_asic_rev(tp) != ASIC_REV_5701) {
14979 tg3_flag_set(tp, NVRAM);
14981 if (tg3_nvram_lock(tp)) {
14982 netdev_warn(tp->dev,
14983 "Cannot get nvram lock, %s failed\n",
14987 tg3_enable_nvram_access(tp);
14989 tp->nvram_size = 0;
14991 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14992 tg3_get_5752_nvram_info(tp);
14993 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14994 tg3_get_5755_nvram_info(tp);
14995 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14996 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14997 tg3_asic_rev(tp) == ASIC_REV_5785)
14998 tg3_get_5787_nvram_info(tp);
14999 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15000 tg3_get_5761_nvram_info(tp);
15001 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15002 tg3_get_5906_nvram_info(tp);
15003 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15004 tg3_flag(tp, 57765_CLASS))
15005 tg3_get_57780_nvram_info(tp);
15006 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15007 tg3_asic_rev(tp) == ASIC_REV_5719)
15008 tg3_get_5717_nvram_info(tp);
15009 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15010 tg3_asic_rev(tp) == ASIC_REV_5762)
15011 tg3_get_5720_nvram_info(tp);
15013 tg3_get_nvram_info(tp);
15015 if (tp->nvram_size == 0)
15016 tg3_get_nvram_size(tp);
15018 tg3_disable_nvram_access(tp);
15019 tg3_nvram_unlock(tp);
15022 tg3_flag_clear(tp, NVRAM);
15023 tg3_flag_clear(tp, NVRAM_BUFFERED);
15025 tg3_get_eeprom_size(tp);
15029 struct subsys_tbl_ent {
15030 u16 subsys_vendor, subsys_devid;
15034 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15035 /* Broadcom boards. */
15036 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15037 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15038 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15039 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15040 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15041 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15042 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15043 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15044 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15045 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15046 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15047 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15048 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15049 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15050 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15051 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15052 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15053 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15054 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15055 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15056 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15057 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15060 { TG3PCI_SUBVENDOR_ID_3COM,
15061 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15062 { TG3PCI_SUBVENDOR_ID_3COM,
15063 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15064 { TG3PCI_SUBVENDOR_ID_3COM,
15065 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15066 { TG3PCI_SUBVENDOR_ID_3COM,
15067 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15068 { TG3PCI_SUBVENDOR_ID_3COM,
15069 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15072 { TG3PCI_SUBVENDOR_ID_DELL,
15073 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15074 { TG3PCI_SUBVENDOR_ID_DELL,
15075 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15076 { TG3PCI_SUBVENDOR_ID_DELL,
15077 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15078 { TG3PCI_SUBVENDOR_ID_DELL,
15079 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15081 /* Compaq boards. */
15082 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15083 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15084 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15085 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15086 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15087 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15088 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15089 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15090 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15091 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15094 { TG3PCI_SUBVENDOR_ID_IBM,
15095 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15098 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15102 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15103 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15104 tp->pdev->subsystem_vendor) &&
15105 (subsys_id_to_phy_id[i].subsys_devid ==
15106 tp->pdev->subsystem_device))
15107 return &subsys_id_to_phy_id[i];
15112 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15116 tp->phy_id = TG3_PHY_ID_INVALID;
15117 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15119 /* Assume an onboard device and WOL capable by default. */
15120 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15121 tg3_flag_set(tp, WOL_CAP);
15123 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15124 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15125 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15126 tg3_flag_set(tp, IS_NIC);
15128 val = tr32(VCPU_CFGSHDW);
15129 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15130 tg3_flag_set(tp, ASPM_WORKAROUND);
15131 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15132 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15133 tg3_flag_set(tp, WOL_ENABLE);
15134 device_set_wakeup_enable(&tp->pdev->dev, true);
15139 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15140 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15141 u32 nic_cfg, led_cfg;
15142 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15143 u32 nic_phy_id, ver, eeprom_phy_id;
15144 int eeprom_phy_serdes = 0;
15146 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15147 tp->nic_sram_data_cfg = nic_cfg;
15149 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15150 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15151 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15152 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15153 tg3_asic_rev(tp) != ASIC_REV_5703 &&
15154 (ver > 0) && (ver < 0x100))
15155 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15157 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15158 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15160 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15161 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15162 tg3_asic_rev(tp) == ASIC_REV_5720)
15163 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15165 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15166 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15167 eeprom_phy_serdes = 1;
15169 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15170 if (nic_phy_id != 0) {
15171 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15172 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15174 eeprom_phy_id = (id1 >> 16) << 10;
15175 eeprom_phy_id |= (id2 & 0xfc00) << 16;
15176 eeprom_phy_id |= (id2 & 0x03ff) << 0;
15180 tp->phy_id = eeprom_phy_id;
15181 if (eeprom_phy_serdes) {
15182 if (!tg3_flag(tp, 5705_PLUS))
15183 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15185 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15188 if (tg3_flag(tp, 5750_PLUS))
15189 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15190 SHASTA_EXT_LED_MODE_MASK);
15192 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15196 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15197 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15200 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15201 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15204 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15205 tp->led_ctrl = LED_CTRL_MODE_MAC;
15207 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15208 * read on some older 5700/5701 bootcode.
15210 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15211 tg3_asic_rev(tp) == ASIC_REV_5701)
15212 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15216 case SHASTA_EXT_LED_SHARED:
15217 tp->led_ctrl = LED_CTRL_MODE_SHARED;
15218 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15219 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15220 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15221 LED_CTRL_MODE_PHY_2);
15223 if (tg3_flag(tp, 5717_PLUS) ||
15224 tg3_asic_rev(tp) == ASIC_REV_5762)
15225 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15226 LED_CTRL_BLINK_RATE_MASK;
15230 case SHASTA_EXT_LED_MAC:
15231 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15234 case SHASTA_EXT_LED_COMBO:
15235 tp->led_ctrl = LED_CTRL_MODE_COMBO;
15236 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15237 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15238 LED_CTRL_MODE_PHY_2);
15243 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15244 tg3_asic_rev(tp) == ASIC_REV_5701) &&
15245 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15246 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15248 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15249 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15251 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15252 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15253 if ((tp->pdev->subsystem_vendor ==
15254 PCI_VENDOR_ID_ARIMA) &&
15255 (tp->pdev->subsystem_device == 0x205a ||
15256 tp->pdev->subsystem_device == 0x2063))
15257 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15259 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15260 tg3_flag_set(tp, IS_NIC);
15263 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15264 tg3_flag_set(tp, ENABLE_ASF);
15265 if (tg3_flag(tp, 5750_PLUS))
15266 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15269 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15270 tg3_flag(tp, 5750_PLUS))
15271 tg3_flag_set(tp, ENABLE_APE);
15273 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15274 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15275 tg3_flag_clear(tp, WOL_CAP);
15277 if (tg3_flag(tp, WOL_CAP) &&
15278 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15279 tg3_flag_set(tp, WOL_ENABLE);
15280 device_set_wakeup_enable(&tp->pdev->dev, true);
15283 if (cfg2 & (1 << 17))
15284 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15286 /* serdes signal pre-emphasis in register 0x590 set by */
15287 /* bootcode if bit 18 is set */
15288 if (cfg2 & (1 << 18))
15289 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15291 if ((tg3_flag(tp, 57765_PLUS) ||
15292 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15293 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15294 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15295 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15297 if (tg3_flag(tp, PCI_EXPRESS)) {
15300 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15301 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15302 !tg3_flag(tp, 57765_PLUS) &&
15303 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15304 tg3_flag_set(tp, ASPM_WORKAROUND);
15305 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15306 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15307 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15308 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15311 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15312 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15313 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15314 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15315 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15316 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15318 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15319 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15322 if (tg3_flag(tp, WOL_CAP))
15323 device_set_wakeup_enable(&tp->pdev->dev,
15324 tg3_flag(tp, WOL_ENABLE));
15326 device_set_wakeup_capable(&tp->pdev->dev, false);
15329 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15332 u32 val2, off = offset * 8;
15334 err = tg3_nvram_lock(tp);
15338 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15339 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15340 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15341 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15344 for (i = 0; i < 100; i++) {
15345 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15346 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15347 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15353 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15355 tg3_nvram_unlock(tp);
15356 if (val2 & APE_OTP_STATUS_CMD_DONE)
15362 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15367 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15368 tw32(OTP_CTRL, cmd);
15370 /* Wait for up to 1 ms for command to execute. */
15371 for (i = 0; i < 100; i++) {
15372 val = tr32(OTP_STATUS);
15373 if (val & OTP_STATUS_CMD_DONE)
15378 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15381 /* Read the gphy configuration from the OTP region of the chip. The gphy
15382 * configuration is a 32-bit value that straddles the alignment boundary.
15383 * We do two 32-bit reads and then shift and merge the results.
15385 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15387 u32 bhalf_otp, thalf_otp;
15389 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15391 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15394 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15396 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15399 thalf_otp = tr32(OTP_READ_DATA);
15401 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15403 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15406 bhalf_otp = tr32(OTP_READ_DATA);
15408 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15411 static void tg3_phy_init_link_config(struct tg3 *tp)
15413 u32 adv = ADVERTISED_Autoneg;
15415 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15416 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15417 adv |= ADVERTISED_1000baseT_Half;
15418 adv |= ADVERTISED_1000baseT_Full;
15421 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15422 adv |= ADVERTISED_100baseT_Half |
15423 ADVERTISED_100baseT_Full |
15424 ADVERTISED_10baseT_Half |
15425 ADVERTISED_10baseT_Full |
15428 adv |= ADVERTISED_FIBRE;
15430 tp->link_config.advertising = adv;
15431 tp->link_config.speed = SPEED_UNKNOWN;
15432 tp->link_config.duplex = DUPLEX_UNKNOWN;
15433 tp->link_config.autoneg = AUTONEG_ENABLE;
15434 tp->link_config.active_speed = SPEED_UNKNOWN;
15435 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15440 static int tg3_phy_probe(struct tg3 *tp)
15442 u32 hw_phy_id_1, hw_phy_id_2;
15443 u32 hw_phy_id, hw_phy_id_masked;
15446 /* flow control autonegotiation is default behavior */
15447 tg3_flag_set(tp, PAUSE_AUTONEG);
15448 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15450 if (tg3_flag(tp, ENABLE_APE)) {
15451 switch (tp->pci_fn) {
15453 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15456 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15459 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15462 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15467 if (!tg3_flag(tp, ENABLE_ASF) &&
15468 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15469 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15470 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15471 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15473 if (tg3_flag(tp, USE_PHYLIB))
15474 return tg3_phy_init(tp);
15476 /* Reading the PHY ID register can conflict with ASF
15477 * firmware access to the PHY hardware.
15480 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15481 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15483 /* Now read the physical PHY_ID from the chip and verify
15484 * that it is sane. If it doesn't look good, we fall back
15485 * to either the hard-coded table based PHY_ID and failing
15486 * that the value found in the eeprom area.
15488 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15489 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15491 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15492 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15493 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15495 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15498 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15499 tp->phy_id = hw_phy_id;
15500 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15501 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15503 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15505 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15506 /* Do nothing, phy ID already set up in
15507 * tg3_get_eeprom_hw_cfg().
15510 struct subsys_tbl_ent *p;
15512 /* No eeprom signature? Try the hardcoded
15513 * subsys device table.
15515 p = tg3_lookup_by_subsys(tp);
15517 tp->phy_id = p->phy_id;
15518 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15519 /* For now we saw the IDs 0xbc050cd0,
15520 * 0xbc050f80 and 0xbc050c30 on devices
15521 * connected to an BCM4785 and there are
15522 * probably more. Just assume that the phy is
15523 * supported when it is connected to a SSB core
15530 tp->phy_id == TG3_PHY_ID_BCM8002)
15531 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15535 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15536 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15537 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15538 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15539 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15540 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15541 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15542 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15543 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15544 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15546 tp->eee.supported = SUPPORTED_100baseT_Full |
15547 SUPPORTED_1000baseT_Full;
15548 tp->eee.advertised = ADVERTISED_100baseT_Full |
15549 ADVERTISED_1000baseT_Full;
15550 tp->eee.eee_enabled = 1;
15551 tp->eee.tx_lpi_enabled = 1;
15552 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15555 tg3_phy_init_link_config(tp);
15557 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15558 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15559 !tg3_flag(tp, ENABLE_APE) &&
15560 !tg3_flag(tp, ENABLE_ASF)) {
15563 tg3_readphy(tp, MII_BMSR, &bmsr);
15564 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15565 (bmsr & BMSR_LSTATUS))
15566 goto skip_phy_reset;
15568 err = tg3_phy_reset(tp);
15572 tg3_phy_set_wirespeed(tp);
15574 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15575 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15576 tp->link_config.flowctrl);
15578 tg3_writephy(tp, MII_BMCR,
15579 BMCR_ANENABLE | BMCR_ANRESTART);
15584 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15585 err = tg3_init_5401phy_dsp(tp);
15589 err = tg3_init_5401phy_dsp(tp);
15595 static void tg3_read_vpd(struct tg3 *tp)
15598 unsigned int len, vpdlen;
15601 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15605 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15606 PCI_VPD_RO_KEYWORD_MFR_ID, &len);
15610 if (len != 4 || memcmp(vpd_data + i, "1028", 4))
15613 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15614 PCI_VPD_RO_KEYWORD_VENDOR0, &len);
15618 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15619 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i);
15622 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15623 PCI_VPD_RO_KEYWORD_PARTNO, &len);
15625 goto out_not_found;
15627 if (len > TG3_BPN_SIZE)
15628 goto out_not_found;
15630 memcpy(tp->board_part_number, &vpd_data[i], len);
15634 if (tp->board_part_number[0])
15638 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15639 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15640 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15641 strcpy(tp->board_part_number, "BCM5717");
15642 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15643 strcpy(tp->board_part_number, "BCM5718");
15646 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15647 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15648 strcpy(tp->board_part_number, "BCM57780");
15649 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15650 strcpy(tp->board_part_number, "BCM57760");
15651 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15652 strcpy(tp->board_part_number, "BCM57790");
15653 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15654 strcpy(tp->board_part_number, "BCM57788");
15657 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15658 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15659 strcpy(tp->board_part_number, "BCM57761");
15660 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15661 strcpy(tp->board_part_number, "BCM57765");
15662 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15663 strcpy(tp->board_part_number, "BCM57781");
15664 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15665 strcpy(tp->board_part_number, "BCM57785");
15666 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15667 strcpy(tp->board_part_number, "BCM57791");
15668 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15669 strcpy(tp->board_part_number, "BCM57795");
15672 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15673 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15674 strcpy(tp->board_part_number, "BCM57762");
15675 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15676 strcpy(tp->board_part_number, "BCM57766");
15677 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15678 strcpy(tp->board_part_number, "BCM57782");
15679 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15680 strcpy(tp->board_part_number, "BCM57786");
15683 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15684 strcpy(tp->board_part_number, "BCM95906");
15687 strcpy(tp->board_part_number, "none");
15691 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15695 if (tg3_nvram_read(tp, offset, &val) ||
15696 (val & 0xfc000000) != 0x0c000000 ||
15697 tg3_nvram_read(tp, offset + 4, &val) ||
15704 static void tg3_read_bc_ver(struct tg3 *tp)
15706 u32 val, offset, start, ver_offset;
15708 bool newver = false;
15710 if (tg3_nvram_read(tp, 0xc, &offset) ||
15711 tg3_nvram_read(tp, 0x4, &start))
15714 offset = tg3_nvram_logical_addr(tp, offset);
15716 if (tg3_nvram_read(tp, offset, &val))
15719 if ((val & 0xfc000000) == 0x0c000000) {
15720 if (tg3_nvram_read(tp, offset + 4, &val))
15727 dst_off = strlen(tp->fw_ver);
15730 if (TG3_VER_SIZE - dst_off < 16 ||
15731 tg3_nvram_read(tp, offset + 8, &ver_offset))
15734 offset = offset + ver_offset - start;
15735 for (i = 0; i < 16; i += 4) {
15737 if (tg3_nvram_read_be32(tp, offset + i, &v))
15740 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15745 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15748 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15749 TG3_NVM_BCVER_MAJSFT;
15750 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15751 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15752 "v%d.%02d", major, minor);
15756 static void tg3_read_hwsb_ver(struct tg3 *tp)
15758 u32 val, major, minor;
15760 /* Use native endian representation */
15761 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15764 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15765 TG3_NVM_HWSB_CFG1_MAJSFT;
15766 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15767 TG3_NVM_HWSB_CFG1_MINSFT;
15769 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15772 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15774 u32 offset, major, minor, build;
15776 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15778 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15781 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15782 case TG3_EEPROM_SB_REVISION_0:
15783 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15785 case TG3_EEPROM_SB_REVISION_2:
15786 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15788 case TG3_EEPROM_SB_REVISION_3:
15789 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15791 case TG3_EEPROM_SB_REVISION_4:
15792 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15794 case TG3_EEPROM_SB_REVISION_5:
15795 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15797 case TG3_EEPROM_SB_REVISION_6:
15798 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15804 if (tg3_nvram_read(tp, offset, &val))
15807 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15808 TG3_EEPROM_SB_EDH_BLD_SHFT;
15809 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15810 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15811 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15813 if (minor > 99 || build > 26)
15816 offset = strlen(tp->fw_ver);
15817 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15818 " v%d.%02d", major, minor);
15821 offset = strlen(tp->fw_ver);
15822 if (offset < TG3_VER_SIZE - 1)
15823 tp->fw_ver[offset] = 'a' + build - 1;
15827 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15829 u32 val, offset, start;
15832 for (offset = TG3_NVM_DIR_START;
15833 offset < TG3_NVM_DIR_END;
15834 offset += TG3_NVM_DIRENT_SIZE) {
15835 if (tg3_nvram_read(tp, offset, &val))
15838 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15842 if (offset == TG3_NVM_DIR_END)
15845 if (!tg3_flag(tp, 5705_PLUS))
15846 start = 0x08000000;
15847 else if (tg3_nvram_read(tp, offset - 4, &start))
15850 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15851 !tg3_fw_img_is_valid(tp, offset) ||
15852 tg3_nvram_read(tp, offset + 8, &val))
15855 offset += val - start;
15857 vlen = strlen(tp->fw_ver);
15859 tp->fw_ver[vlen++] = ',';
15860 tp->fw_ver[vlen++] = ' ';
15862 for (i = 0; i < 4; i++) {
15864 if (tg3_nvram_read_be32(tp, offset, &v))
15867 offset += sizeof(v);
15869 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15870 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15874 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15879 static void tg3_probe_ncsi(struct tg3 *tp)
15883 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15884 if (apedata != APE_SEG_SIG_MAGIC)
15887 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15888 if (!(apedata & APE_FW_STATUS_READY))
15891 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15892 tg3_flag_set(tp, APE_HAS_NCSI);
15895 static void tg3_read_dash_ver(struct tg3 *tp)
15901 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15903 if (tg3_flag(tp, APE_HAS_NCSI))
15905 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15910 vlen = strlen(tp->fw_ver);
15912 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15914 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15915 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15916 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15917 (apedata & APE_FW_VERSION_BLDMSK));
15920 static void tg3_read_otp_ver(struct tg3 *tp)
15924 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15927 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15928 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15929 TG3_OTP_MAGIC0_VALID(val)) {
15930 u64 val64 = (u64) val << 32 | val2;
15934 for (i = 0; i < 7; i++) {
15935 if ((val64 & 0xff) == 0)
15937 ver = val64 & 0xff;
15940 vlen = strlen(tp->fw_ver);
15941 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15945 static void tg3_read_fw_ver(struct tg3 *tp)
15948 bool vpd_vers = false;
15950 if (tp->fw_ver[0] != 0)
15953 if (tg3_flag(tp, NO_NVRAM)) {
15954 strcat(tp->fw_ver, "sb");
15955 tg3_read_otp_ver(tp);
15959 if (tg3_nvram_read(tp, 0, &val))
15962 if (val == TG3_EEPROM_MAGIC)
15963 tg3_read_bc_ver(tp);
15964 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15965 tg3_read_sb_ver(tp, val);
15966 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15967 tg3_read_hwsb_ver(tp);
15969 if (tg3_flag(tp, ENABLE_ASF)) {
15970 if (tg3_flag(tp, ENABLE_APE)) {
15971 tg3_probe_ncsi(tp);
15973 tg3_read_dash_ver(tp);
15974 } else if (!vpd_vers) {
15975 tg3_read_mgmtfw_ver(tp);
15979 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15982 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15984 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15985 return TG3_RX_RET_MAX_SIZE_5717;
15986 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15987 return TG3_RX_RET_MAX_SIZE_5700;
15989 return TG3_RX_RET_MAX_SIZE_5705;
15992 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
15993 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15994 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15995 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15999 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16001 struct pci_dev *peer;
16002 unsigned int func, devnr = tp->pdev->devfn & ~7;
16004 for (func = 0; func < 8; func++) {
16005 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16006 if (peer && peer != tp->pdev)
16010 /* 5704 can be configured in single-port mode, set peer to
16011 * tp->pdev in that case.
16019 * We don't need to keep the refcount elevated; there's no way
16020 * to remove one half of this device without removing the other
16027 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16029 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16030 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16033 /* All devices that use the alternate
16034 * ASIC REV location have a CPMU.
16036 tg3_flag_set(tp, CPMU_PRESENT);
16038 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16039 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16040 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16041 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16042 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16043 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16044 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16045 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16046 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16047 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16048 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16049 reg = TG3PCI_GEN2_PRODID_ASICREV;
16050 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16051 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16052 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16053 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16054 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16055 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16056 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16057 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16058 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16059 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16060 reg = TG3PCI_GEN15_PRODID_ASICREV;
16062 reg = TG3PCI_PRODID_ASICREV;
16064 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16067 /* Wrong chip ID in 5752 A0. This code can be removed later
16068 * as A0 is not in production.
16070 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16071 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16073 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16074 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16076 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16077 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16078 tg3_asic_rev(tp) == ASIC_REV_5720)
16079 tg3_flag_set(tp, 5717_PLUS);
16081 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16082 tg3_asic_rev(tp) == ASIC_REV_57766)
16083 tg3_flag_set(tp, 57765_CLASS);
16085 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16086 tg3_asic_rev(tp) == ASIC_REV_5762)
16087 tg3_flag_set(tp, 57765_PLUS);
16089 /* Intentionally exclude ASIC_REV_5906 */
16090 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16091 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16092 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16093 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16094 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16095 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16096 tg3_flag(tp, 57765_PLUS))
16097 tg3_flag_set(tp, 5755_PLUS);
16099 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16100 tg3_asic_rev(tp) == ASIC_REV_5714)
16101 tg3_flag_set(tp, 5780_CLASS);
16103 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16104 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16105 tg3_asic_rev(tp) == ASIC_REV_5906 ||
16106 tg3_flag(tp, 5755_PLUS) ||
16107 tg3_flag(tp, 5780_CLASS))
16108 tg3_flag_set(tp, 5750_PLUS);
16110 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16111 tg3_flag(tp, 5750_PLUS))
16112 tg3_flag_set(tp, 5705_PLUS);
16115 static bool tg3_10_100_only_device(struct tg3 *tp,
16116 const struct pci_device_id *ent)
16118 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16120 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16121 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16122 (tp->phy_flags & TG3_PHYFLG_IS_FET))
16125 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16126 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16127 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16137 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16140 u32 pci_state_reg, grc_misc_cfg;
16145 /* Force memory write invalidate off. If we leave it on,
16146 * then on 5700_BX chips we have to enable a workaround.
16147 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16148 * to match the cacheline size. The Broadcom driver have this
16149 * workaround but turns MWI off all the times so never uses
16150 * it. This seems to suggest that the workaround is insufficient.
16152 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16153 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16154 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16156 /* Important! -- Make sure register accesses are byteswapped
16157 * correctly. Also, for those chips that require it, make
16158 * sure that indirect register accesses are enabled before
16159 * the first operation.
16161 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16163 tp->misc_host_ctrl |= (misc_ctrl_reg &
16164 MISC_HOST_CTRL_CHIPREV);
16165 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16166 tp->misc_host_ctrl);
16168 tg3_detect_asic_rev(tp, misc_ctrl_reg);
16170 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16171 * we need to disable memory and use config. cycles
16172 * only to access all registers. The 5702/03 chips
16173 * can mistakenly decode the special cycles from the
16174 * ICH chipsets as memory write cycles, causing corruption
16175 * of register and memory space. Only certain ICH bridges
16176 * will drive special cycles with non-zero data during the
16177 * address phase which can fall within the 5703's address
16178 * range. This is not an ICH bug as the PCI spec allows
16179 * non-zero address during special cycles. However, only
16180 * these ICH bridges are known to drive non-zero addresses
16181 * during special cycles.
16183 * Since special cycles do not cross PCI bridges, we only
16184 * enable this workaround if the 5703 is on the secondary
16185 * bus of these ICH bridges.
16187 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16188 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16189 static struct tg3_dev_id {
16193 } ich_chipsets[] = {
16194 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16196 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16198 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16200 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16204 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16205 struct pci_dev *bridge = NULL;
16207 while (pci_id->vendor != 0) {
16208 bridge = pci_get_device(pci_id->vendor, pci_id->device,
16214 if (pci_id->rev != PCI_ANY_ID) {
16215 if (bridge->revision > pci_id->rev)
16218 if (bridge->subordinate &&
16219 (bridge->subordinate->number ==
16220 tp->pdev->bus->number)) {
16221 tg3_flag_set(tp, ICH_WORKAROUND);
16222 pci_dev_put(bridge);
16228 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16229 static struct tg3_dev_id {
16232 } bridge_chipsets[] = {
16233 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16234 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16237 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16238 struct pci_dev *bridge = NULL;
16240 while (pci_id->vendor != 0) {
16241 bridge = pci_get_device(pci_id->vendor,
16248 if (bridge->subordinate &&
16249 (bridge->subordinate->number <=
16250 tp->pdev->bus->number) &&
16251 (bridge->subordinate->busn_res.end >=
16252 tp->pdev->bus->number)) {
16253 tg3_flag_set(tp, 5701_DMA_BUG);
16254 pci_dev_put(bridge);
16260 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16261 * DMA addresses > 40-bit. This bridge may have other additional
16262 * 57xx devices behind it in some 4-port NIC designs for example.
16263 * Any tg3 device found behind the bridge will also need the 40-bit
16266 if (tg3_flag(tp, 5780_CLASS)) {
16267 tg3_flag_set(tp, 40BIT_DMA_BUG);
16268 tp->msi_cap = tp->pdev->msi_cap;
16270 struct pci_dev *bridge = NULL;
16273 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16274 PCI_DEVICE_ID_SERVERWORKS_EPB,
16276 if (bridge && bridge->subordinate &&
16277 (bridge->subordinate->number <=
16278 tp->pdev->bus->number) &&
16279 (bridge->subordinate->busn_res.end >=
16280 tp->pdev->bus->number)) {
16281 tg3_flag_set(tp, 40BIT_DMA_BUG);
16282 pci_dev_put(bridge);
16288 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16289 tg3_asic_rev(tp) == ASIC_REV_5714)
16290 tp->pdev_peer = tg3_find_peer(tp);
16292 /* Determine TSO capabilities */
16293 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16294 ; /* Do nothing. HW bug. */
16295 else if (tg3_flag(tp, 57765_PLUS))
16296 tg3_flag_set(tp, HW_TSO_3);
16297 else if (tg3_flag(tp, 5755_PLUS) ||
16298 tg3_asic_rev(tp) == ASIC_REV_5906)
16299 tg3_flag_set(tp, HW_TSO_2);
16300 else if (tg3_flag(tp, 5750_PLUS)) {
16301 tg3_flag_set(tp, HW_TSO_1);
16302 tg3_flag_set(tp, TSO_BUG);
16303 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16304 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16305 tg3_flag_clear(tp, TSO_BUG);
16306 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16307 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16308 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16309 tg3_flag_set(tp, FW_TSO);
16310 tg3_flag_set(tp, TSO_BUG);
16311 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16312 tp->fw_needed = FIRMWARE_TG3TSO5;
16314 tp->fw_needed = FIRMWARE_TG3TSO;
16317 /* Selectively allow TSO based on operating conditions */
16318 if (tg3_flag(tp, HW_TSO_1) ||
16319 tg3_flag(tp, HW_TSO_2) ||
16320 tg3_flag(tp, HW_TSO_3) ||
16321 tg3_flag(tp, FW_TSO)) {
16322 /* For firmware TSO, assume ASF is disabled.
16323 * We'll disable TSO later if we discover ASF
16324 * is enabled in tg3_get_eeprom_hw_cfg().
16326 tg3_flag_set(tp, TSO_CAPABLE);
16328 tg3_flag_clear(tp, TSO_CAPABLE);
16329 tg3_flag_clear(tp, TSO_BUG);
16330 tp->fw_needed = NULL;
16333 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16334 tp->fw_needed = FIRMWARE_TG3;
16336 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16337 tp->fw_needed = FIRMWARE_TG357766;
16341 if (tg3_flag(tp, 5750_PLUS)) {
16342 tg3_flag_set(tp, SUPPORT_MSI);
16343 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16344 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16345 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16346 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16347 tp->pdev_peer == tp->pdev))
16348 tg3_flag_clear(tp, SUPPORT_MSI);
16350 if (tg3_flag(tp, 5755_PLUS) ||
16351 tg3_asic_rev(tp) == ASIC_REV_5906) {
16352 tg3_flag_set(tp, 1SHOT_MSI);
16355 if (tg3_flag(tp, 57765_PLUS)) {
16356 tg3_flag_set(tp, SUPPORT_MSIX);
16357 tp->irq_max = TG3_IRQ_MAX_VECS;
16363 if (tp->irq_max > 1) {
16364 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16365 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16367 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16368 tg3_asic_rev(tp) == ASIC_REV_5720)
16369 tp->txq_max = tp->irq_max - 1;
16372 if (tg3_flag(tp, 5755_PLUS) ||
16373 tg3_asic_rev(tp) == ASIC_REV_5906)
16374 tg3_flag_set(tp, SHORT_DMA_BUG);
16376 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16377 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16379 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16380 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16381 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16382 tg3_asic_rev(tp) == ASIC_REV_5762)
16383 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16385 if (tg3_flag(tp, 57765_PLUS) &&
16386 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16387 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16389 if (!tg3_flag(tp, 5705_PLUS) ||
16390 tg3_flag(tp, 5780_CLASS) ||
16391 tg3_flag(tp, USE_JUMBO_BDFLAG))
16392 tg3_flag_set(tp, JUMBO_CAPABLE);
16394 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16397 if (pci_is_pcie(tp->pdev)) {
16400 tg3_flag_set(tp, PCI_EXPRESS);
16402 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16403 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16404 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16405 tg3_flag_clear(tp, HW_TSO_2);
16406 tg3_flag_clear(tp, TSO_CAPABLE);
16408 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16409 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16410 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16411 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16412 tg3_flag_set(tp, CLKREQ_BUG);
16413 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16414 tg3_flag_set(tp, L1PLLPD_EN);
16416 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16417 /* BCM5785 devices are effectively PCIe devices, and should
16418 * follow PCIe codepaths, but do not have a PCIe capabilities
16421 tg3_flag_set(tp, PCI_EXPRESS);
16422 } else if (!tg3_flag(tp, 5705_PLUS) ||
16423 tg3_flag(tp, 5780_CLASS)) {
16424 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16425 if (!tp->pcix_cap) {
16426 dev_err(&tp->pdev->dev,
16427 "Cannot find PCI-X capability, aborting\n");
16431 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16432 tg3_flag_set(tp, PCIX_MODE);
16435 /* If we have an AMD 762 or VIA K8T800 chipset, write
16436 * reordering to the mailbox registers done by the host
16437 * controller can cause major troubles. We read back from
16438 * every mailbox register write to force the writes to be
16439 * posted to the chip in order.
16441 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16442 !tg3_flag(tp, PCI_EXPRESS))
16443 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16445 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16446 &tp->pci_cacheline_sz);
16447 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16448 &tp->pci_lat_timer);
16449 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16450 tp->pci_lat_timer < 64) {
16451 tp->pci_lat_timer = 64;
16452 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16453 tp->pci_lat_timer);
16456 /* Important! -- It is critical that the PCI-X hw workaround
16457 * situation is decided before the first MMIO register access.
16459 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16460 /* 5700 BX chips need to have their TX producer index
16461 * mailboxes written twice to workaround a bug.
16463 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16465 /* If we are in PCI-X mode, enable register write workaround.
16467 * The workaround is to use indirect register accesses
16468 * for all chip writes not to mailbox registers.
16470 if (tg3_flag(tp, PCIX_MODE)) {
16473 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16475 /* The chip can have it's power management PCI config
16476 * space registers clobbered due to this bug.
16477 * So explicitly force the chip into D0 here.
16479 pci_read_config_dword(tp->pdev,
16480 tp->pdev->pm_cap + PCI_PM_CTRL,
16482 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16483 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16484 pci_write_config_dword(tp->pdev,
16485 tp->pdev->pm_cap + PCI_PM_CTRL,
16488 /* Also, force SERR#/PERR# in PCI command. */
16489 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16490 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16491 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16495 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16496 tg3_flag_set(tp, PCI_HIGH_SPEED);
16497 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16498 tg3_flag_set(tp, PCI_32BIT);
16500 /* Chip-specific fixup from Broadcom driver */
16501 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16502 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16503 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16504 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16507 /* Default fast path register access methods */
16508 tp->read32 = tg3_read32;
16509 tp->write32 = tg3_write32;
16510 tp->read32_mbox = tg3_read32;
16511 tp->write32_mbox = tg3_write32;
16512 tp->write32_tx_mbox = tg3_write32;
16513 tp->write32_rx_mbox = tg3_write32;
16515 /* Various workaround register access methods */
16516 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16517 tp->write32 = tg3_write_indirect_reg32;
16518 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16519 (tg3_flag(tp, PCI_EXPRESS) &&
16520 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16522 * Back to back register writes can cause problems on these
16523 * chips, the workaround is to read back all reg writes
16524 * except those to mailbox regs.
16526 * See tg3_write_indirect_reg32().
16528 tp->write32 = tg3_write_flush_reg32;
16531 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16532 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16533 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16534 tp->write32_rx_mbox = tg3_write_flush_reg32;
16537 if (tg3_flag(tp, ICH_WORKAROUND)) {
16538 tp->read32 = tg3_read_indirect_reg32;
16539 tp->write32 = tg3_write_indirect_reg32;
16540 tp->read32_mbox = tg3_read_indirect_mbox;
16541 tp->write32_mbox = tg3_write_indirect_mbox;
16542 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16543 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16548 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16549 pci_cmd &= ~PCI_COMMAND_MEMORY;
16550 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16552 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16553 tp->read32_mbox = tg3_read32_mbox_5906;
16554 tp->write32_mbox = tg3_write32_mbox_5906;
16555 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16556 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16559 if (tp->write32 == tg3_write_indirect_reg32 ||
16560 (tg3_flag(tp, PCIX_MODE) &&
16561 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16562 tg3_asic_rev(tp) == ASIC_REV_5701)))
16563 tg3_flag_set(tp, SRAM_USE_CONFIG);
16565 /* The memory arbiter has to be enabled in order for SRAM accesses
16566 * to succeed. Normally on powerup the tg3 chip firmware will make
16567 * sure it is enabled, but other entities such as system netboot
16568 * code might disable it.
16570 val = tr32(MEMARB_MODE);
16571 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16573 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16574 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16575 tg3_flag(tp, 5780_CLASS)) {
16576 if (tg3_flag(tp, PCIX_MODE)) {
16577 pci_read_config_dword(tp->pdev,
16578 tp->pcix_cap + PCI_X_STATUS,
16580 tp->pci_fn = val & 0x7;
16582 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16583 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16584 tg3_asic_rev(tp) == ASIC_REV_5720) {
16585 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16586 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16587 val = tr32(TG3_CPMU_STATUS);
16589 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16590 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16592 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16593 TG3_CPMU_STATUS_FSHFT_5719;
16596 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16597 tp->write32_tx_mbox = tg3_write_flush_reg32;
16598 tp->write32_rx_mbox = tg3_write_flush_reg32;
16601 /* Get eeprom hw config before calling tg3_set_power_state().
16602 * In particular, the TG3_FLAG_IS_NIC flag must be
16603 * determined before calling tg3_set_power_state() so that
16604 * we know whether or not to switch out of Vaux power.
16605 * When the flag is set, it means that GPIO1 is used for eeprom
16606 * write protect and also implies that it is a LOM where GPIOs
16607 * are not used to switch power.
16609 tg3_get_eeprom_hw_cfg(tp);
16611 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16612 tg3_flag_clear(tp, TSO_CAPABLE);
16613 tg3_flag_clear(tp, TSO_BUG);
16614 tp->fw_needed = NULL;
16617 if (tg3_flag(tp, ENABLE_APE)) {
16618 /* Allow reads and writes to the
16619 * APE register and memory space.
16621 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16622 PCISTATE_ALLOW_APE_SHMEM_WR |
16623 PCISTATE_ALLOW_APE_PSPACE_WR;
16624 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16627 tg3_ape_lock_init(tp);
16628 tp->ape_hb_interval =
16629 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16632 /* Set up tp->grc_local_ctrl before calling
16633 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16634 * will bring 5700's external PHY out of reset.
16635 * It is also used as eeprom write protect on LOMs.
16637 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16638 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16639 tg3_flag(tp, EEPROM_WRITE_PROT))
16640 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16641 GRC_LCLCTRL_GPIO_OUTPUT1);
16642 /* Unused GPIO3 must be driven as output on 5752 because there
16643 * are no pull-up resistors on unused GPIO pins.
16645 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16646 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16648 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16649 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16650 tg3_flag(tp, 57765_CLASS))
16651 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16653 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16654 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16655 /* Turn off the debug UART. */
16656 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16657 if (tg3_flag(tp, IS_NIC))
16658 /* Keep VMain power. */
16659 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16660 GRC_LCLCTRL_GPIO_OUTPUT0;
16663 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16664 tp->grc_local_ctrl |=
16665 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16667 /* Switch out of Vaux if it is a NIC */
16668 tg3_pwrsrc_switch_to_vmain(tp);
16670 /* Derive initial jumbo mode from MTU assigned in
16671 * ether_setup() via the alloc_etherdev() call
16673 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16674 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16676 /* Determine WakeOnLan speed to use. */
16677 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16678 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16679 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16680 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16681 tg3_flag_clear(tp, WOL_SPEED_100MB);
16683 tg3_flag_set(tp, WOL_SPEED_100MB);
16686 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16687 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16689 /* A few boards don't want Ethernet@WireSpeed phy feature */
16690 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16691 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16692 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16693 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16694 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16695 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16696 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16698 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16699 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16700 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16701 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16702 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16704 if (tg3_flag(tp, 5705_PLUS) &&
16705 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16706 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16707 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16708 !tg3_flag(tp, 57765_PLUS)) {
16709 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16710 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16711 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16712 tg3_asic_rev(tp) == ASIC_REV_5761) {
16713 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16714 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16715 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16716 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16717 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16719 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16722 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16723 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16724 tp->phy_otp = tg3_read_otp_phycfg(tp);
16725 if (tp->phy_otp == 0)
16726 tp->phy_otp = TG3_OTP_DEFAULT;
16729 if (tg3_flag(tp, CPMU_PRESENT))
16730 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16732 tp->mi_mode = MAC_MI_MODE_BASE;
16734 tp->coalesce_mode = 0;
16735 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16736 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16737 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16739 /* Set these bits to enable statistics workaround. */
16740 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16741 tg3_asic_rev(tp) == ASIC_REV_5762 ||
16742 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16743 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16744 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16745 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16748 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16749 tg3_asic_rev(tp) == ASIC_REV_57780)
16750 tg3_flag_set(tp, USE_PHYLIB);
16752 err = tg3_mdio_init(tp);
16756 /* Initialize data/descriptor byte/word swapping. */
16757 val = tr32(GRC_MODE);
16758 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16759 tg3_asic_rev(tp) == ASIC_REV_5762)
16760 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16761 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16762 GRC_MODE_B2HRX_ENABLE |
16763 GRC_MODE_HTX2B_ENABLE |
16764 GRC_MODE_HOST_STACKUP);
16766 val &= GRC_MODE_HOST_STACKUP;
16768 tw32(GRC_MODE, val | tp->grc_mode);
16770 tg3_switch_clocks(tp);
16772 /* Clear this out for sanity. */
16773 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16775 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16776 tw32(TG3PCI_REG_BASE_ADDR, 0);
16778 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16780 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16781 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16782 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16783 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16784 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16785 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16786 void __iomem *sram_base;
16788 /* Write some dummy words into the SRAM status block
16789 * area, see if it reads back correctly. If the return
16790 * value is bad, force enable the PCIX workaround.
16792 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16794 writel(0x00000000, sram_base);
16795 writel(0x00000000, sram_base + 4);
16796 writel(0xffffffff, sram_base + 4);
16797 if (readl(sram_base) != 0x00000000)
16798 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16803 tg3_nvram_init(tp);
16805 /* If the device has an NVRAM, no need to load patch firmware */
16806 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16807 !tg3_flag(tp, NO_NVRAM))
16808 tp->fw_needed = NULL;
16810 grc_misc_cfg = tr32(GRC_MISC_CFG);
16811 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16813 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16814 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16815 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16816 tg3_flag_set(tp, IS_5788);
16818 if (!tg3_flag(tp, IS_5788) &&
16819 tg3_asic_rev(tp) != ASIC_REV_5700)
16820 tg3_flag_set(tp, TAGGED_STATUS);
16821 if (tg3_flag(tp, TAGGED_STATUS)) {
16822 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16823 HOSTCC_MODE_CLRTICK_TXBD);
16825 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16826 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16827 tp->misc_host_ctrl);
16830 /* Preserve the APE MAC_MODE bits */
16831 if (tg3_flag(tp, ENABLE_APE))
16832 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16836 if (tg3_10_100_only_device(tp, ent))
16837 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16839 err = tg3_phy_probe(tp);
16841 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16842 /* ... but do not return immediately ... */
16847 tg3_read_fw_ver(tp);
16849 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16850 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16852 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16853 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16855 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16858 /* 5700 {AX,BX} chips have a broken status block link
16859 * change bit implementation, so we must use the
16860 * status register in those cases.
16862 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16863 tg3_flag_set(tp, USE_LINKCHG_REG);
16865 tg3_flag_clear(tp, USE_LINKCHG_REG);
16867 /* The led_ctrl is set during tg3_phy_probe, here we might
16868 * have to force the link status polling mechanism based
16869 * upon subsystem IDs.
16871 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16872 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16873 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16874 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16875 tg3_flag_set(tp, USE_LINKCHG_REG);
16878 /* For all SERDES we poll the MAC status register. */
16879 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16880 tg3_flag_set(tp, POLL_SERDES);
16882 tg3_flag_clear(tp, POLL_SERDES);
16884 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16885 tg3_flag_set(tp, POLL_CPMU_LINK);
16887 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16888 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16889 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16890 tg3_flag(tp, PCIX_MODE)) {
16891 tp->rx_offset = NET_SKB_PAD;
16892 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16893 tp->rx_copy_thresh = ~(u16)0;
16897 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16898 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16899 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16901 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16903 /* Increment the rx prod index on the rx std ring by at most
16904 * 8 for these chips to workaround hw errata.
16906 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16907 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16908 tg3_asic_rev(tp) == ASIC_REV_5755)
16909 tp->rx_std_max_post = 8;
16911 if (tg3_flag(tp, ASPM_WORKAROUND))
16912 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16913 PCIE_PWR_MGMT_L1_THRESH_MSK;
16918 static int tg3_get_device_address(struct tg3 *tp)
16920 struct net_device *dev = tp->dev;
16921 u32 hi, lo, mac_offset;
16925 if (!eth_platform_get_mac_address(&tp->pdev->dev, dev->dev_addr))
16928 if (tg3_flag(tp, IS_SSB_CORE)) {
16929 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16930 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16935 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16936 tg3_flag(tp, 5780_CLASS)) {
16937 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16939 if (tg3_nvram_lock(tp))
16940 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16942 tg3_nvram_unlock(tp);
16943 } else if (tg3_flag(tp, 5717_PLUS)) {
16944 if (tp->pci_fn & 1)
16946 if (tp->pci_fn > 1)
16947 mac_offset += 0x18c;
16948 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16951 /* First try to get it from MAC address mailbox. */
16952 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16953 if ((hi >> 16) == 0x484b) {
16954 dev->dev_addr[0] = (hi >> 8) & 0xff;
16955 dev->dev_addr[1] = (hi >> 0) & 0xff;
16957 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16958 dev->dev_addr[2] = (lo >> 24) & 0xff;
16959 dev->dev_addr[3] = (lo >> 16) & 0xff;
16960 dev->dev_addr[4] = (lo >> 8) & 0xff;
16961 dev->dev_addr[5] = (lo >> 0) & 0xff;
16963 /* Some old bootcode may report a 0 MAC address in SRAM */
16964 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16967 /* Next, try NVRAM. */
16968 if (!tg3_flag(tp, NO_NVRAM) &&
16969 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16970 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16971 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16972 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16974 /* Finally just fetch it out of the MAC control regs. */
16976 hi = tr32(MAC_ADDR_0_HIGH);
16977 lo = tr32(MAC_ADDR_0_LOW);
16979 dev->dev_addr[5] = lo & 0xff;
16980 dev->dev_addr[4] = (lo >> 8) & 0xff;
16981 dev->dev_addr[3] = (lo >> 16) & 0xff;
16982 dev->dev_addr[2] = (lo >> 24) & 0xff;
16983 dev->dev_addr[1] = hi & 0xff;
16984 dev->dev_addr[0] = (hi >> 8) & 0xff;
16988 if (!is_valid_ether_addr(&dev->dev_addr[0]))
16993 #define BOUNDARY_SINGLE_CACHELINE 1
16994 #define BOUNDARY_MULTI_CACHELINE 2
16996 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16998 int cacheline_size;
17002 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17004 cacheline_size = 1024;
17006 cacheline_size = (int) byte * 4;
17008 /* On 5703 and later chips, the boundary bits have no
17011 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17012 tg3_asic_rev(tp) != ASIC_REV_5701 &&
17013 !tg3_flag(tp, PCI_EXPRESS))
17016 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17017 goal = BOUNDARY_MULTI_CACHELINE;
17019 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17020 goal = BOUNDARY_SINGLE_CACHELINE;
17026 if (tg3_flag(tp, 57765_PLUS)) {
17027 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17034 /* PCI controllers on most RISC systems tend to disconnect
17035 * when a device tries to burst across a cache-line boundary.
17036 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17038 * Unfortunately, for PCI-E there are only limited
17039 * write-side controls for this, and thus for reads
17040 * we will still get the disconnects. We'll also waste
17041 * these PCI cycles for both read and write for chips
17042 * other than 5700 and 5701 which do not implement the
17045 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17046 switch (cacheline_size) {
17051 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17052 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17053 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17055 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17056 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17061 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17062 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17066 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17067 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17070 } else if (tg3_flag(tp, PCI_EXPRESS)) {
17071 switch (cacheline_size) {
17075 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17076 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17077 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17083 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17084 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17088 switch (cacheline_size) {
17090 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17091 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17092 DMA_RWCTRL_WRITE_BNDRY_16);
17097 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17098 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17099 DMA_RWCTRL_WRITE_BNDRY_32);
17104 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17105 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17106 DMA_RWCTRL_WRITE_BNDRY_64);
17111 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17112 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17113 DMA_RWCTRL_WRITE_BNDRY_128);
17118 val |= (DMA_RWCTRL_READ_BNDRY_256 |
17119 DMA_RWCTRL_WRITE_BNDRY_256);
17122 val |= (DMA_RWCTRL_READ_BNDRY_512 |
17123 DMA_RWCTRL_WRITE_BNDRY_512);
17127 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17128 DMA_RWCTRL_WRITE_BNDRY_1024);
17137 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17138 int size, bool to_device)
17140 struct tg3_internal_buffer_desc test_desc;
17141 u32 sram_dma_descs;
17144 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17146 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17147 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17148 tw32(RDMAC_STATUS, 0);
17149 tw32(WDMAC_STATUS, 0);
17151 tw32(BUFMGR_MODE, 0);
17152 tw32(FTQ_RESET, 0);
17154 test_desc.addr_hi = ((u64) buf_dma) >> 32;
17155 test_desc.addr_lo = buf_dma & 0xffffffff;
17156 test_desc.nic_mbuf = 0x00002100;
17157 test_desc.len = size;
17160 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17161 * the *second* time the tg3 driver was getting loaded after an
17164 * Broadcom tells me:
17165 * ...the DMA engine is connected to the GRC block and a DMA
17166 * reset may affect the GRC block in some unpredictable way...
17167 * The behavior of resets to individual blocks has not been tested.
17169 * Broadcom noted the GRC reset will also reset all sub-components.
17172 test_desc.cqid_sqid = (13 << 8) | 2;
17174 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17177 test_desc.cqid_sqid = (16 << 8) | 7;
17179 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17182 test_desc.flags = 0x00000005;
17184 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17187 val = *(((u32 *)&test_desc) + i);
17188 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17189 sram_dma_descs + (i * sizeof(u32)));
17190 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17192 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17195 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17197 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17200 for (i = 0; i < 40; i++) {
17204 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17206 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17207 if ((val & 0xffff) == sram_dma_descs) {
17218 #define TEST_BUFFER_SIZE 0x2000
17220 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17221 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17225 static int tg3_test_dma(struct tg3 *tp)
17227 dma_addr_t buf_dma;
17228 u32 *buf, saved_dma_rwctrl;
17231 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17232 &buf_dma, GFP_KERNEL);
17238 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17239 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17241 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17243 if (tg3_flag(tp, 57765_PLUS))
17246 if (tg3_flag(tp, PCI_EXPRESS)) {
17247 /* DMA read watermark not used on PCIE */
17248 tp->dma_rwctrl |= 0x00180000;
17249 } else if (!tg3_flag(tp, PCIX_MODE)) {
17250 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17251 tg3_asic_rev(tp) == ASIC_REV_5750)
17252 tp->dma_rwctrl |= 0x003f0000;
17254 tp->dma_rwctrl |= 0x003f000f;
17256 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17257 tg3_asic_rev(tp) == ASIC_REV_5704) {
17258 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17259 u32 read_water = 0x7;
17261 /* If the 5704 is behind the EPB bridge, we can
17262 * do the less restrictive ONE_DMA workaround for
17263 * better performance.
17265 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17266 tg3_asic_rev(tp) == ASIC_REV_5704)
17267 tp->dma_rwctrl |= 0x8000;
17268 else if (ccval == 0x6 || ccval == 0x7)
17269 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17271 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17273 /* Set bit 23 to enable PCIX hw bug fix */
17275 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17276 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17278 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17279 /* 5780 always in PCIX mode */
17280 tp->dma_rwctrl |= 0x00144000;
17281 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17282 /* 5714 always in PCIX mode */
17283 tp->dma_rwctrl |= 0x00148000;
17285 tp->dma_rwctrl |= 0x001b000f;
17288 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17289 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17291 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17292 tg3_asic_rev(tp) == ASIC_REV_5704)
17293 tp->dma_rwctrl &= 0xfffffff0;
17295 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17296 tg3_asic_rev(tp) == ASIC_REV_5701) {
17297 /* Remove this if it causes problems for some boards. */
17298 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17300 /* On 5700/5701 chips, we need to set this bit.
17301 * Otherwise the chip will issue cacheline transactions
17302 * to streamable DMA memory with not all the byte
17303 * enables turned on. This is an error on several
17304 * RISC PCI controllers, in particular sparc64.
17306 * On 5703/5704 chips, this bit has been reassigned
17307 * a different meaning. In particular, it is used
17308 * on those chips to enable a PCI-X workaround.
17310 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17313 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17316 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17317 tg3_asic_rev(tp) != ASIC_REV_5701)
17320 /* It is best to perform DMA test with maximum write burst size
17321 * to expose the 5700/5701 write DMA bug.
17323 saved_dma_rwctrl = tp->dma_rwctrl;
17324 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17325 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17330 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17333 /* Send the buffer to the chip. */
17334 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17336 dev_err(&tp->pdev->dev,
17337 "%s: Buffer write failed. err = %d\n",
17342 /* Now read it back. */
17343 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17345 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17346 "err = %d\n", __func__, ret);
17351 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17355 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17356 DMA_RWCTRL_WRITE_BNDRY_16) {
17357 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17358 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17359 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17362 dev_err(&tp->pdev->dev,
17363 "%s: Buffer corrupted on read back! "
17364 "(%d != %d)\n", __func__, p[i], i);
17370 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17376 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17377 DMA_RWCTRL_WRITE_BNDRY_16) {
17378 /* DMA test passed without adjusting DMA boundary,
17379 * now look for chipsets that are known to expose the
17380 * DMA bug without failing the test.
17382 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17383 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17384 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17386 /* Safe to use the calculated DMA boundary. */
17387 tp->dma_rwctrl = saved_dma_rwctrl;
17390 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17394 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17399 static void tg3_init_bufmgr_config(struct tg3 *tp)
17401 if (tg3_flag(tp, 57765_PLUS)) {
17402 tp->bufmgr_config.mbuf_read_dma_low_water =
17403 DEFAULT_MB_RDMA_LOW_WATER_5705;
17404 tp->bufmgr_config.mbuf_mac_rx_low_water =
17405 DEFAULT_MB_MACRX_LOW_WATER_57765;
17406 tp->bufmgr_config.mbuf_high_water =
17407 DEFAULT_MB_HIGH_WATER_57765;
17409 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17410 DEFAULT_MB_RDMA_LOW_WATER_5705;
17411 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17412 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17413 tp->bufmgr_config.mbuf_high_water_jumbo =
17414 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17415 } else if (tg3_flag(tp, 5705_PLUS)) {
17416 tp->bufmgr_config.mbuf_read_dma_low_water =
17417 DEFAULT_MB_RDMA_LOW_WATER_5705;
17418 tp->bufmgr_config.mbuf_mac_rx_low_water =
17419 DEFAULT_MB_MACRX_LOW_WATER_5705;
17420 tp->bufmgr_config.mbuf_high_water =
17421 DEFAULT_MB_HIGH_WATER_5705;
17422 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17423 tp->bufmgr_config.mbuf_mac_rx_low_water =
17424 DEFAULT_MB_MACRX_LOW_WATER_5906;
17425 tp->bufmgr_config.mbuf_high_water =
17426 DEFAULT_MB_HIGH_WATER_5906;
17429 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17430 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17431 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17432 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17433 tp->bufmgr_config.mbuf_high_water_jumbo =
17434 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17436 tp->bufmgr_config.mbuf_read_dma_low_water =
17437 DEFAULT_MB_RDMA_LOW_WATER;
17438 tp->bufmgr_config.mbuf_mac_rx_low_water =
17439 DEFAULT_MB_MACRX_LOW_WATER;
17440 tp->bufmgr_config.mbuf_high_water =
17441 DEFAULT_MB_HIGH_WATER;
17443 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17444 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17445 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17446 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17447 tp->bufmgr_config.mbuf_high_water_jumbo =
17448 DEFAULT_MB_HIGH_WATER_JUMBO;
17451 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17452 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17455 static char *tg3_phy_string(struct tg3 *tp)
17457 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17458 case TG3_PHY_ID_BCM5400: return "5400";
17459 case TG3_PHY_ID_BCM5401: return "5401";
17460 case TG3_PHY_ID_BCM5411: return "5411";
17461 case TG3_PHY_ID_BCM5701: return "5701";
17462 case TG3_PHY_ID_BCM5703: return "5703";
17463 case TG3_PHY_ID_BCM5704: return "5704";
17464 case TG3_PHY_ID_BCM5705: return "5705";
17465 case TG3_PHY_ID_BCM5750: return "5750";
17466 case TG3_PHY_ID_BCM5752: return "5752";
17467 case TG3_PHY_ID_BCM5714: return "5714";
17468 case TG3_PHY_ID_BCM5780: return "5780";
17469 case TG3_PHY_ID_BCM5755: return "5755";
17470 case TG3_PHY_ID_BCM5787: return "5787";
17471 case TG3_PHY_ID_BCM5784: return "5784";
17472 case TG3_PHY_ID_BCM5756: return "5722/5756";
17473 case TG3_PHY_ID_BCM5906: return "5906";
17474 case TG3_PHY_ID_BCM5761: return "5761";
17475 case TG3_PHY_ID_BCM5718C: return "5718C";
17476 case TG3_PHY_ID_BCM5718S: return "5718S";
17477 case TG3_PHY_ID_BCM57765: return "57765";
17478 case TG3_PHY_ID_BCM5719C: return "5719C";
17479 case TG3_PHY_ID_BCM5720C: return "5720C";
17480 case TG3_PHY_ID_BCM5762: return "5762C";
17481 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17482 case 0: return "serdes";
17483 default: return "unknown";
17487 static char *tg3_bus_string(struct tg3 *tp, char *str)
17489 if (tg3_flag(tp, PCI_EXPRESS)) {
17490 strcpy(str, "PCI Express");
17492 } else if (tg3_flag(tp, PCIX_MODE)) {
17493 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17495 strcpy(str, "PCIX:");
17497 if ((clock_ctrl == 7) ||
17498 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17499 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17500 strcat(str, "133MHz");
17501 else if (clock_ctrl == 0)
17502 strcat(str, "33MHz");
17503 else if (clock_ctrl == 2)
17504 strcat(str, "50MHz");
17505 else if (clock_ctrl == 4)
17506 strcat(str, "66MHz");
17507 else if (clock_ctrl == 6)
17508 strcat(str, "100MHz");
17510 strcpy(str, "PCI:");
17511 if (tg3_flag(tp, PCI_HIGH_SPEED))
17512 strcat(str, "66MHz");
17514 strcat(str, "33MHz");
17516 if (tg3_flag(tp, PCI_32BIT))
17517 strcat(str, ":32-bit");
17519 strcat(str, ":64-bit");
17523 static void tg3_init_coal(struct tg3 *tp)
17525 struct ethtool_coalesce *ec = &tp->coal;
17527 memset(ec, 0, sizeof(*ec));
17528 ec->cmd = ETHTOOL_GCOALESCE;
17529 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17530 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17531 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17532 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17533 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17534 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17535 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17536 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17537 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17539 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17540 HOSTCC_MODE_CLRTICK_TXBD)) {
17541 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17542 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17543 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17544 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17547 if (tg3_flag(tp, 5705_PLUS)) {
17548 ec->rx_coalesce_usecs_irq = 0;
17549 ec->tx_coalesce_usecs_irq = 0;
17550 ec->stats_block_coalesce_usecs = 0;
17554 static int tg3_init_one(struct pci_dev *pdev,
17555 const struct pci_device_id *ent)
17557 struct net_device *dev;
17560 u32 sndmbx, rcvmbx, intmbx;
17562 u64 dma_mask, persist_dma_mask;
17563 netdev_features_t features = 0;
17565 err = pci_enable_device(pdev);
17567 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17571 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17573 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17574 goto err_out_disable_pdev;
17577 pci_set_master(pdev);
17579 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17582 goto err_out_free_res;
17585 SET_NETDEV_DEV(dev, &pdev->dev);
17587 tp = netdev_priv(dev);
17590 tp->rx_mode = TG3_DEF_RX_MODE;
17591 tp->tx_mode = TG3_DEF_TX_MODE;
17593 tp->pcierr_recovery = false;
17596 tp->msg_enable = tg3_debug;
17598 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17600 if (pdev_is_ssb_gige_core(pdev)) {
17601 tg3_flag_set(tp, IS_SSB_CORE);
17602 if (ssb_gige_must_flush_posted_writes(pdev))
17603 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17604 if (ssb_gige_one_dma_at_once(pdev))
17605 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17606 if (ssb_gige_have_roboswitch(pdev)) {
17607 tg3_flag_set(tp, USE_PHYLIB);
17608 tg3_flag_set(tp, ROBOSWITCH);
17610 if (ssb_gige_is_rgmii(pdev))
17611 tg3_flag_set(tp, RGMII_MODE);
17614 /* The word/byte swap controls here control register access byte
17615 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17618 tp->misc_host_ctrl =
17619 MISC_HOST_CTRL_MASK_PCI_INT |
17620 MISC_HOST_CTRL_WORD_SWAP |
17621 MISC_HOST_CTRL_INDIR_ACCESS |
17622 MISC_HOST_CTRL_PCISTATE_RW;
17624 /* The NONFRM (non-frame) byte/word swap controls take effect
17625 * on descriptor entries, anything which isn't packet data.
17627 * The StrongARM chips on the board (one for tx, one for rx)
17628 * are running in big-endian mode.
17630 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17631 GRC_MODE_WSWAP_NONFRM_DATA);
17632 #ifdef __BIG_ENDIAN
17633 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17635 spin_lock_init(&tp->lock);
17636 spin_lock_init(&tp->indirect_lock);
17637 INIT_WORK(&tp->reset_task, tg3_reset_task);
17639 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17641 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17643 goto err_out_free_dev;
17646 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17647 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17648 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17649 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17650 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17651 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17652 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17653 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17654 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17655 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17656 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17657 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17658 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17659 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17660 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17661 tg3_flag_set(tp, ENABLE_APE);
17662 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17663 if (!tp->aperegs) {
17664 dev_err(&pdev->dev,
17665 "Cannot map APE registers, aborting\n");
17667 goto err_out_iounmap;
17671 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17672 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17674 dev->ethtool_ops = &tg3_ethtool_ops;
17675 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17676 dev->netdev_ops = &tg3_netdev_ops;
17677 dev->irq = pdev->irq;
17679 err = tg3_get_invariants(tp, ent);
17681 dev_err(&pdev->dev,
17682 "Problem fetching invariants of chip, aborting\n");
17683 goto err_out_apeunmap;
17686 /* The EPB bridge inside 5714, 5715, and 5780 and any
17687 * device behind the EPB cannot support DMA addresses > 40-bit.
17688 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17689 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17690 * do DMA address check in tg3_start_xmit().
17692 if (tg3_flag(tp, IS_5788))
17693 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17694 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17695 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17696 #ifdef CONFIG_HIGHMEM
17697 dma_mask = DMA_BIT_MASK(64);
17700 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17702 /* Configure DMA attributes. */
17703 if (dma_mask > DMA_BIT_MASK(32)) {
17704 err = dma_set_mask(&pdev->dev, dma_mask);
17706 features |= NETIF_F_HIGHDMA;
17707 err = dma_set_coherent_mask(&pdev->dev,
17710 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17711 "DMA for consistent allocations\n");
17712 goto err_out_apeunmap;
17716 if (err || dma_mask == DMA_BIT_MASK(32)) {
17717 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
17719 dev_err(&pdev->dev,
17720 "No usable DMA configuration, aborting\n");
17721 goto err_out_apeunmap;
17725 tg3_init_bufmgr_config(tp);
17727 /* 5700 B0 chips do not support checksumming correctly due
17728 * to hardware bugs.
17730 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17731 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17733 if (tg3_flag(tp, 5755_PLUS))
17734 features |= NETIF_F_IPV6_CSUM;
17737 /* TSO is on by default on chips that support hardware TSO.
17738 * Firmware TSO on older chips gives lower performance, so it
17739 * is off by default, but can be enabled using ethtool.
17741 if ((tg3_flag(tp, HW_TSO_1) ||
17742 tg3_flag(tp, HW_TSO_2) ||
17743 tg3_flag(tp, HW_TSO_3)) &&
17744 (features & NETIF_F_IP_CSUM))
17745 features |= NETIF_F_TSO;
17746 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17747 if (features & NETIF_F_IPV6_CSUM)
17748 features |= NETIF_F_TSO6;
17749 if (tg3_flag(tp, HW_TSO_3) ||
17750 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17751 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17752 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17753 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17754 tg3_asic_rev(tp) == ASIC_REV_57780)
17755 features |= NETIF_F_TSO_ECN;
17758 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17759 NETIF_F_HW_VLAN_CTAG_RX;
17760 dev->vlan_features |= features;
17763 * Add loopback capability only for a subset of devices that support
17764 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17765 * loopback for the remaining devices.
17767 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17768 !tg3_flag(tp, CPMU_PRESENT))
17769 /* Add the loopback capability */
17770 features |= NETIF_F_LOOPBACK;
17772 dev->hw_features |= features;
17773 dev->priv_flags |= IFF_UNICAST_FLT;
17775 /* MTU range: 60 - 9000 or 1500, depending on hardware */
17776 dev->min_mtu = TG3_MIN_MTU;
17777 dev->max_mtu = TG3_MAX_MTU(tp);
17779 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17780 !tg3_flag(tp, TSO_CAPABLE) &&
17781 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17782 tg3_flag_set(tp, MAX_RXPEND_64);
17783 tp->rx_pending = 63;
17786 err = tg3_get_device_address(tp);
17788 dev_err(&pdev->dev,
17789 "Could not obtain valid ethernet address, aborting\n");
17790 goto err_out_apeunmap;
17793 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17794 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17795 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17796 for (i = 0; i < tp->irq_max; i++) {
17797 struct tg3_napi *tnapi = &tp->napi[i];
17800 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17802 tnapi->int_mbox = intmbx;
17808 tnapi->consmbox = rcvmbx;
17809 tnapi->prodmbox = sndmbx;
17812 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17814 tnapi->coal_now = HOSTCC_MODE_NOW;
17816 if (!tg3_flag(tp, SUPPORT_MSIX))
17820 * If we support MSIX, we'll be using RSS. If we're using
17821 * RSS, the first vector only handles link interrupts and the
17822 * remaining vectors handle rx and tx interrupts. Reuse the
17823 * mailbox values for the next iteration. The values we setup
17824 * above are still useful for the single vectored mode.
17838 * Reset chip in case UNDI or EFI driver did not shutdown
17839 * DMA self test will enable WDMAC and we'll see (spurious)
17840 * pending DMA on the PCI bus at that point.
17842 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17843 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17844 tg3_full_lock(tp, 0);
17845 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17846 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17847 tg3_full_unlock(tp);
17850 err = tg3_test_dma(tp);
17852 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17853 goto err_out_apeunmap;
17858 pci_set_drvdata(pdev, dev);
17860 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17861 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17862 tg3_asic_rev(tp) == ASIC_REV_5762)
17863 tg3_flag_set(tp, PTP_CAPABLE);
17865 tg3_timer_init(tp);
17867 tg3_carrier_off(tp);
17869 err = register_netdev(dev);
17871 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17872 goto err_out_apeunmap;
17875 if (tg3_flag(tp, PTP_CAPABLE)) {
17877 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17879 if (IS_ERR(tp->ptp_clock))
17880 tp->ptp_clock = NULL;
17883 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17884 tp->board_part_number,
17885 tg3_chip_rev_id(tp),
17886 tg3_bus_string(tp, str),
17889 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17892 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17893 ethtype = "10/100Base-TX";
17894 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17895 ethtype = "1000Base-SX";
17897 ethtype = "10/100/1000Base-T";
17899 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17900 "(WireSpeed[%d], EEE[%d])\n",
17901 tg3_phy_string(tp), ethtype,
17902 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17903 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17906 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17907 (dev->features & NETIF_F_RXCSUM) != 0,
17908 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17909 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17910 tg3_flag(tp, ENABLE_ASF) != 0,
17911 tg3_flag(tp, TSO_CAPABLE) != 0);
17912 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17914 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17915 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17917 pci_save_state(pdev);
17923 iounmap(tp->aperegs);
17924 tp->aperegs = NULL;
17937 pci_release_regions(pdev);
17939 err_out_disable_pdev:
17940 if (pci_is_enabled(pdev))
17941 pci_disable_device(pdev);
17945 static void tg3_remove_one(struct pci_dev *pdev)
17947 struct net_device *dev = pci_get_drvdata(pdev);
17950 struct tg3 *tp = netdev_priv(dev);
17954 release_firmware(tp->fw);
17956 tg3_reset_task_cancel(tp);
17958 if (tg3_flag(tp, USE_PHYLIB)) {
17963 unregister_netdev(dev);
17965 iounmap(tp->aperegs);
17966 tp->aperegs = NULL;
17973 pci_release_regions(pdev);
17974 pci_disable_device(pdev);
17978 #ifdef CONFIG_PM_SLEEP
17979 static int tg3_suspend(struct device *device)
17981 struct net_device *dev = dev_get_drvdata(device);
17982 struct tg3 *tp = netdev_priv(dev);
17987 if (!netif_running(dev))
17990 tg3_reset_task_cancel(tp);
17992 tg3_netif_stop(tp);
17994 tg3_timer_stop(tp);
17996 tg3_full_lock(tp, 1);
17997 tg3_disable_ints(tp);
17998 tg3_full_unlock(tp);
18000 netif_device_detach(dev);
18002 tg3_full_lock(tp, 0);
18003 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18004 tg3_flag_clear(tp, INIT_COMPLETE);
18005 tg3_full_unlock(tp);
18007 err = tg3_power_down_prepare(tp);
18011 tg3_full_lock(tp, 0);
18013 tg3_flag_set(tp, INIT_COMPLETE);
18014 err2 = tg3_restart_hw(tp, true);
18018 tg3_timer_start(tp);
18020 netif_device_attach(dev);
18021 tg3_netif_start(tp);
18024 tg3_full_unlock(tp);
18035 static int tg3_resume(struct device *device)
18037 struct net_device *dev = dev_get_drvdata(device);
18038 struct tg3 *tp = netdev_priv(dev);
18043 if (!netif_running(dev))
18046 netif_device_attach(dev);
18048 tg3_full_lock(tp, 0);
18050 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18052 tg3_flag_set(tp, INIT_COMPLETE);
18053 err = tg3_restart_hw(tp,
18054 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18058 tg3_timer_start(tp);
18060 tg3_netif_start(tp);
18063 tg3_full_unlock(tp);
18072 #endif /* CONFIG_PM_SLEEP */
18074 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18076 static void tg3_shutdown(struct pci_dev *pdev)
18078 struct net_device *dev = pci_get_drvdata(pdev);
18079 struct tg3 *tp = netdev_priv(dev);
18082 netif_device_detach(dev);
18084 if (netif_running(dev))
18087 if (system_state == SYSTEM_POWER_OFF)
18088 tg3_power_down(tp);
18094 * tg3_io_error_detected - called when PCI error is detected
18095 * @pdev: Pointer to PCI device
18096 * @state: The current pci connection state
18098 * This function is called after a PCI bus error affecting
18099 * this device has been detected.
18101 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18102 pci_channel_state_t state)
18104 struct net_device *netdev = pci_get_drvdata(pdev);
18105 struct tg3 *tp = netdev_priv(netdev);
18106 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18108 netdev_info(netdev, "PCI I/O error detected\n");
18112 /* Could be second call or maybe we don't have netdev yet */
18113 if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18116 /* We needn't recover from permanent error */
18117 if (state == pci_channel_io_frozen)
18118 tp->pcierr_recovery = true;
18122 tg3_netif_stop(tp);
18124 tg3_timer_stop(tp);
18126 /* Want to make sure that the reset task doesn't run */
18127 tg3_reset_task_cancel(tp);
18129 netif_device_detach(netdev);
18131 /* Clean up software state, even if MMIO is blocked */
18132 tg3_full_lock(tp, 0);
18133 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18134 tg3_full_unlock(tp);
18137 if (state == pci_channel_io_perm_failure) {
18139 tg3_napi_enable(tp);
18142 err = PCI_ERS_RESULT_DISCONNECT;
18144 pci_disable_device(pdev);
18153 * tg3_io_slot_reset - called after the pci bus has been reset.
18154 * @pdev: Pointer to PCI device
18156 * Restart the card from scratch, as if from a cold-boot.
18157 * At this point, the card has exprienced a hard reset,
18158 * followed by fixups by BIOS, and has its config space
18159 * set up identically to what it was at cold boot.
18161 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18163 struct net_device *netdev = pci_get_drvdata(pdev);
18164 struct tg3 *tp = netdev_priv(netdev);
18165 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18170 if (pci_enable_device(pdev)) {
18171 dev_err(&pdev->dev,
18172 "Cannot re-enable PCI device after reset.\n");
18176 pci_set_master(pdev);
18177 pci_restore_state(pdev);
18178 pci_save_state(pdev);
18180 if (!netdev || !netif_running(netdev)) {
18181 rc = PCI_ERS_RESULT_RECOVERED;
18185 err = tg3_power_up(tp);
18189 rc = PCI_ERS_RESULT_RECOVERED;
18192 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18193 tg3_napi_enable(tp);
18202 * tg3_io_resume - called when traffic can start flowing again.
18203 * @pdev: Pointer to PCI device
18205 * This callback is called when the error recovery driver tells
18206 * us that its OK to resume normal operation.
18208 static void tg3_io_resume(struct pci_dev *pdev)
18210 struct net_device *netdev = pci_get_drvdata(pdev);
18211 struct tg3 *tp = netdev_priv(netdev);
18216 if (!netdev || !netif_running(netdev))
18219 tg3_full_lock(tp, 0);
18220 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18221 tg3_flag_set(tp, INIT_COMPLETE);
18222 err = tg3_restart_hw(tp, true);
18224 tg3_full_unlock(tp);
18225 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18229 netif_device_attach(netdev);
18231 tg3_timer_start(tp);
18233 tg3_netif_start(tp);
18235 tg3_full_unlock(tp);
18240 tp->pcierr_recovery = false;
18244 static const struct pci_error_handlers tg3_err_handler = {
18245 .error_detected = tg3_io_error_detected,
18246 .slot_reset = tg3_io_slot_reset,
18247 .resume = tg3_io_resume
18250 static struct pci_driver tg3_driver = {
18251 .name = DRV_MODULE_NAME,
18252 .id_table = tg3_pci_tbl,
18253 .probe = tg3_init_one,
18254 .remove = tg3_remove_one,
18255 .err_handler = &tg3_err_handler,
18256 .driver.pm = &tg3_pm_ops,
18257 .shutdown = tg3_shutdown,
18260 module_pci_driver(tg3_driver);