2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2016 Broadcom Corporation.
8 * Copyright (C) 2016-2017 Broadcom Limited.
9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10 * refers to Broadcom Inc. and/or its subsidiaries.
13 * Derived from proprietary unpublished source code,
14 * Copyright (C) 2000-2016 Broadcom Corporation.
15 * Copyright (C) 2016-2017 Broadcom Ltd.
16 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17 * refers to Broadcom Inc. and/or its subsidiaries.
19 * Permission is hereby granted for the distribution of this firmware
20 * data in hexadecimal or equivalent format, provided this copyright
21 * notice is accompanying it.
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
47 #include <linux/if_vlan.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
59 #include <net/checksum.h>
64 #include <asm/byteorder.h>
65 #include <linux/uaccess.h>
67 #include <uapi/linux/net_tstamp.h>
68 #include <linux/ptp_clock_kernel.h>
75 /* Functions & macros to verify TG3_FLAGS types */
77 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
79 return test_bit(flag, bits);
82 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
87 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
89 clear_bit(flag, bits);
92 #define tg3_flag(tp, flag) \
93 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
94 #define tg3_flag_set(tp, flag) \
95 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
96 #define tg3_flag_clear(tp, flag) \
97 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
99 #define DRV_MODULE_NAME "tg3"
100 /* DO NOT UPDATE TG3_*_NUM defines */
101 #define TG3_MAJ_NUM 3
102 #define TG3_MIN_NUM 137
104 #define RESET_KIND_SHUTDOWN 0
105 #define RESET_KIND_INIT 1
106 #define RESET_KIND_SUSPEND 2
108 #define TG3_DEF_RX_MODE 0
109 #define TG3_DEF_TX_MODE 0
110 #define TG3_DEF_MSG_ENABLE \
120 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
122 /* length of time before we decide the hardware is borked,
123 * and dev->tx_timeout() should be called to fix the problem
126 #define TG3_TX_TIMEOUT (5 * HZ)
128 /* hardware minimum and maximum for a single frame's data payload */
129 #define TG3_MIN_MTU ETH_ZLEN
130 #define TG3_MAX_MTU(tp) \
131 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
133 /* These numbers seem to be hard coded in the NIC firmware somehow.
134 * You can't change the ring sizes, but you can change where you place
135 * them in the NIC onboard memory.
137 #define TG3_RX_STD_RING_SIZE(tp) \
138 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
139 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
140 #define TG3_DEF_RX_RING_PENDING 200
141 #define TG3_RX_JMB_RING_SIZE(tp) \
142 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
143 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
144 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
146 /* Do not place this n-ring entries value into the tp struct itself,
147 * we really want to expose these constants to GCC so that modulo et
148 * al. operations are done with shifts and masks instead of with
149 * hw multiply/modulo instructions. Another solution would be to
150 * replace things like '% foo' with '& (foo - 1)'.
153 #define TG3_TX_RING_SIZE 512
154 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
156 #define TG3_RX_STD_RING_BYTES(tp) \
157 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
158 #define TG3_RX_JMB_RING_BYTES(tp) \
159 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
160 #define TG3_RX_RCB_RING_BYTES(tp) \
161 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
162 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
164 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
166 #define TG3_DMA_BYTE_ENAB 64
168 #define TG3_RX_STD_DMA_SZ 1536
169 #define TG3_RX_JMB_DMA_SZ 9046
171 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
173 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
174 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
176 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
177 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
179 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
180 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
182 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
183 * that are at least dword aligned when used in PCIX mode. The driver
184 * works around this bug by double copying the packet. This workaround
185 * is built into the normal double copy length check for efficiency.
187 * However, the double copy is only necessary on those architectures
188 * where unaligned memory accesses are inefficient. For those architectures
189 * where unaligned memory accesses incur little penalty, we can reintegrate
190 * the 5701 in the normal rx path. Doing so saves a device structure
191 * dereference by hardcoding the double copy threshold in place.
193 #define TG3_RX_COPY_THRESHOLD 256
194 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
195 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
197 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
200 #if (NET_IP_ALIGN != 0)
201 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
203 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
206 /* minimum number of free TX descriptors required to wake up TX process */
207 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
208 #define TG3_TX_BD_DMA_MAX_2K 2048
209 #define TG3_TX_BD_DMA_MAX_4K 4096
211 #define TG3_RAW_IP_ALIGN 2
213 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
214 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
216 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
217 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
219 #define FIRMWARE_TG3 "tigon/tg3.bin"
220 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
221 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
222 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
224 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
225 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
226 MODULE_LICENSE("GPL");
227 MODULE_FIRMWARE(FIRMWARE_TG3);
228 MODULE_FIRMWARE(FIRMWARE_TG357766);
229 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
230 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
232 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
233 module_param(tg3_debug, int, 0);
234 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
236 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
237 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
239 static const struct pci_device_id tg3_pci_tbl[] = {
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
259 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
260 TG3_DRV_DATA_FLAG_5705_10_100},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
262 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
263 TG3_DRV_DATA_FLAG_5705_10_100},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
266 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
267 TG3_DRV_DATA_FLAG_5705_10_100},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
274 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
280 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
288 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
289 PCI_VENDOR_ID_LENOVO,
290 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
291 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
294 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
313 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
314 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
315 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
316 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
317 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
318 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
322 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
334 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
347 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
348 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
349 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
350 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
351 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
352 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
353 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
354 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
358 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
360 static const struct {
361 const char string[ETH_GSTRING_LEN];
362 } ethtool_stats_keys[] = {
365 { "rx_ucast_packets" },
366 { "rx_mcast_packets" },
367 { "rx_bcast_packets" },
369 { "rx_align_errors" },
370 { "rx_xon_pause_rcvd" },
371 { "rx_xoff_pause_rcvd" },
372 { "rx_mac_ctrl_rcvd" },
373 { "rx_xoff_entered" },
374 { "rx_frame_too_long_errors" },
376 { "rx_undersize_packets" },
377 { "rx_in_length_errors" },
378 { "rx_out_length_errors" },
379 { "rx_64_or_less_octet_packets" },
380 { "rx_65_to_127_octet_packets" },
381 { "rx_128_to_255_octet_packets" },
382 { "rx_256_to_511_octet_packets" },
383 { "rx_512_to_1023_octet_packets" },
384 { "rx_1024_to_1522_octet_packets" },
385 { "rx_1523_to_2047_octet_packets" },
386 { "rx_2048_to_4095_octet_packets" },
387 { "rx_4096_to_8191_octet_packets" },
388 { "rx_8192_to_9022_octet_packets" },
395 { "tx_flow_control" },
397 { "tx_single_collisions" },
398 { "tx_mult_collisions" },
400 { "tx_excessive_collisions" },
401 { "tx_late_collisions" },
402 { "tx_collide_2times" },
403 { "tx_collide_3times" },
404 { "tx_collide_4times" },
405 { "tx_collide_5times" },
406 { "tx_collide_6times" },
407 { "tx_collide_7times" },
408 { "tx_collide_8times" },
409 { "tx_collide_9times" },
410 { "tx_collide_10times" },
411 { "tx_collide_11times" },
412 { "tx_collide_12times" },
413 { "tx_collide_13times" },
414 { "tx_collide_14times" },
415 { "tx_collide_15times" },
416 { "tx_ucast_packets" },
417 { "tx_mcast_packets" },
418 { "tx_bcast_packets" },
419 { "tx_carrier_sense_errors" },
423 { "dma_writeq_full" },
424 { "dma_write_prioq_full" },
428 { "rx_threshold_hit" },
430 { "dma_readq_full" },
431 { "dma_read_prioq_full" },
432 { "tx_comp_queue_full" },
434 { "ring_set_send_prod_index" },
435 { "ring_status_update" },
437 { "nic_avoided_irqs" },
438 { "nic_tx_threshold_hit" },
440 { "mbuf_lwm_thresh_hit" },
443 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
444 #define TG3_NVRAM_TEST 0
445 #define TG3_LINK_TEST 1
446 #define TG3_REGISTER_TEST 2
447 #define TG3_MEMORY_TEST 3
448 #define TG3_MAC_LOOPB_TEST 4
449 #define TG3_PHY_LOOPB_TEST 5
450 #define TG3_EXT_LOOPB_TEST 6
451 #define TG3_INTERRUPT_TEST 7
454 static const struct {
455 const char string[ETH_GSTRING_LEN];
456 } ethtool_test_keys[] = {
457 [TG3_NVRAM_TEST] = { "nvram test (online) " },
458 [TG3_LINK_TEST] = { "link test (online) " },
459 [TG3_REGISTER_TEST] = { "register test (offline)" },
460 [TG3_MEMORY_TEST] = { "memory test (offline)" },
461 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
462 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
463 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
464 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
467 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
470 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
472 writel(val, tp->regs + off);
475 static u32 tg3_read32(struct tg3 *tp, u32 off)
477 return readl(tp->regs + off);
480 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
482 writel(val, tp->aperegs + off);
485 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
487 return readl(tp->aperegs + off);
490 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
494 spin_lock_irqsave(&tp->indirect_lock, flags);
495 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
496 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
497 spin_unlock_irqrestore(&tp->indirect_lock, flags);
500 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
502 writel(val, tp->regs + off);
503 readl(tp->regs + off);
506 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
511 spin_lock_irqsave(&tp->indirect_lock, flags);
512 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
513 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
514 spin_unlock_irqrestore(&tp->indirect_lock, flags);
518 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
522 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
523 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
524 TG3_64BIT_REG_LOW, val);
527 if (off == TG3_RX_STD_PROD_IDX_REG) {
528 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
529 TG3_64BIT_REG_LOW, val);
533 spin_lock_irqsave(&tp->indirect_lock, flags);
534 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
535 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
536 spin_unlock_irqrestore(&tp->indirect_lock, flags);
538 /* In indirect mode when disabling interrupts, we also need
539 * to clear the interrupt bit in the GRC local ctrl register.
541 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
543 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
544 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
548 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
553 spin_lock_irqsave(&tp->indirect_lock, flags);
554 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
555 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
556 spin_unlock_irqrestore(&tp->indirect_lock, flags);
560 /* usec_wait specifies the wait time in usec when writing to certain registers
561 * where it is unsafe to read back the register without some delay.
562 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
563 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
565 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
567 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
568 /* Non-posted methods */
569 tp->write32(tp, off, val);
572 tg3_write32(tp, off, val);
577 /* Wait again after the read for the posted method to guarantee that
578 * the wait time is met.
584 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
586 tp->write32_mbox(tp, off, val);
587 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
588 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
589 !tg3_flag(tp, ICH_WORKAROUND)))
590 tp->read32_mbox(tp, off);
593 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
595 void __iomem *mbox = tp->regs + off;
597 if (tg3_flag(tp, TXD_MBOX_HWBUG))
599 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
600 tg3_flag(tp, FLUSH_POSTED_WRITES))
604 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
606 return readl(tp->regs + off + GRCMBOX_BASE);
609 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
611 writel(val, tp->regs + off + GRCMBOX_BASE);
614 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
615 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
616 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
617 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
618 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
620 #define tw32(reg, val) tp->write32(tp, reg, val)
621 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
622 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
623 #define tr32(reg) tp->read32(tp, reg)
625 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
629 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
630 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
633 spin_lock_irqsave(&tp->indirect_lock, flags);
634 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
635 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
636 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
638 /* Always leave this as zero. */
639 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
641 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
642 tw32_f(TG3PCI_MEM_WIN_DATA, val);
644 /* Always leave this as zero. */
645 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
647 spin_unlock_irqrestore(&tp->indirect_lock, flags);
650 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
654 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
655 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
660 spin_lock_irqsave(&tp->indirect_lock, flags);
661 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
662 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
663 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
665 /* Always leave this as zero. */
666 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
668 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
669 *val = tr32(TG3PCI_MEM_WIN_DATA);
671 /* Always leave this as zero. */
672 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
674 spin_unlock_irqrestore(&tp->indirect_lock, flags);
677 static void tg3_ape_lock_init(struct tg3 *tp)
682 if (tg3_asic_rev(tp) == ASIC_REV_5761)
683 regbase = TG3_APE_LOCK_GRANT;
685 regbase = TG3_APE_PER_LOCK_GRANT;
687 /* Make sure the driver hasn't any stale locks. */
688 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
690 case TG3_APE_LOCK_PHY0:
691 case TG3_APE_LOCK_PHY1:
692 case TG3_APE_LOCK_PHY2:
693 case TG3_APE_LOCK_PHY3:
694 bit = APE_LOCK_GRANT_DRIVER;
698 bit = APE_LOCK_GRANT_DRIVER;
700 bit = 1 << tp->pci_fn;
702 tg3_ape_write32(tp, regbase + 4 * i, bit);
707 static int tg3_ape_lock(struct tg3 *tp, int locknum)
711 u32 status, req, gnt, bit;
713 if (!tg3_flag(tp, ENABLE_APE))
717 case TG3_APE_LOCK_GPIO:
718 if (tg3_asic_rev(tp) == ASIC_REV_5761)
721 case TG3_APE_LOCK_GRC:
722 case TG3_APE_LOCK_MEM:
724 bit = APE_LOCK_REQ_DRIVER;
726 bit = 1 << tp->pci_fn;
728 case TG3_APE_LOCK_PHY0:
729 case TG3_APE_LOCK_PHY1:
730 case TG3_APE_LOCK_PHY2:
731 case TG3_APE_LOCK_PHY3:
732 bit = APE_LOCK_REQ_DRIVER;
738 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
739 req = TG3_APE_LOCK_REQ;
740 gnt = TG3_APE_LOCK_GRANT;
742 req = TG3_APE_PER_LOCK_REQ;
743 gnt = TG3_APE_PER_LOCK_GRANT;
748 tg3_ape_write32(tp, req + off, bit);
750 /* Wait for up to 1 millisecond to acquire lock. */
751 for (i = 0; i < 100; i++) {
752 status = tg3_ape_read32(tp, gnt + off);
755 if (pci_channel_offline(tp->pdev))
762 /* Revoke the lock request. */
763 tg3_ape_write32(tp, gnt + off, bit);
770 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
774 if (!tg3_flag(tp, ENABLE_APE))
778 case TG3_APE_LOCK_GPIO:
779 if (tg3_asic_rev(tp) == ASIC_REV_5761)
782 case TG3_APE_LOCK_GRC:
783 case TG3_APE_LOCK_MEM:
785 bit = APE_LOCK_GRANT_DRIVER;
787 bit = 1 << tp->pci_fn;
789 case TG3_APE_LOCK_PHY0:
790 case TG3_APE_LOCK_PHY1:
791 case TG3_APE_LOCK_PHY2:
792 case TG3_APE_LOCK_PHY3:
793 bit = APE_LOCK_GRANT_DRIVER;
799 if (tg3_asic_rev(tp) == ASIC_REV_5761)
800 gnt = TG3_APE_LOCK_GRANT;
802 gnt = TG3_APE_PER_LOCK_GRANT;
804 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
807 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
812 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
815 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
816 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
819 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
822 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
825 return timeout_us ? 0 : -EBUSY;
828 #ifdef CONFIG_TIGON3_HWMON
829 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
833 for (i = 0; i < timeout_us / 10; i++) {
834 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
836 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
842 return i == timeout_us / 10;
845 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
849 u32 i, bufoff, msgoff, maxlen, apedata;
851 if (!tg3_flag(tp, APE_HAS_NCSI))
854 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
855 if (apedata != APE_SEG_SIG_MAGIC)
858 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
859 if (!(apedata & APE_FW_STATUS_READY))
862 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
864 msgoff = bufoff + 2 * sizeof(u32);
865 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
870 /* Cap xfer sizes to scratchpad limits. */
871 length = (len > maxlen) ? maxlen : len;
874 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
875 if (!(apedata & APE_FW_STATUS_READY))
878 /* Wait for up to 1 msec for APE to service previous event. */
879 err = tg3_ape_event_lock(tp, 1000);
883 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
884 APE_EVENT_STATUS_SCRTCHPD_READ |
885 APE_EVENT_STATUS_EVENT_PENDING;
886 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
888 tg3_ape_write32(tp, bufoff, base_off);
889 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
891 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
892 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
896 if (tg3_ape_wait_for_event(tp, 30000))
899 for (i = 0; length; i += 4, length -= 4) {
900 u32 val = tg3_ape_read32(tp, msgoff + i);
901 memcpy(data, &val, sizeof(u32));
910 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
915 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
916 if (apedata != APE_SEG_SIG_MAGIC)
919 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
920 if (!(apedata & APE_FW_STATUS_READY))
923 /* Wait for up to 20 millisecond for APE to service previous event. */
924 err = tg3_ape_event_lock(tp, 20000);
928 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
929 event | APE_EVENT_STATUS_EVENT_PENDING);
931 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
932 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
937 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
942 if (!tg3_flag(tp, ENABLE_APE))
946 case RESET_KIND_INIT:
947 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
948 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
949 APE_HOST_SEG_SIG_MAGIC);
950 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
951 APE_HOST_SEG_LEN_MAGIC);
952 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
953 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
954 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
955 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
956 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
957 APE_HOST_BEHAV_NO_PHYLOCK);
958 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
959 TG3_APE_HOST_DRVR_STATE_START);
961 event = APE_EVENT_STATUS_STATE_START;
963 case RESET_KIND_SHUTDOWN:
964 if (device_may_wakeup(&tp->pdev->dev) &&
965 tg3_flag(tp, WOL_ENABLE)) {
966 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
967 TG3_APE_HOST_WOL_SPEED_AUTO);
968 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
970 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
972 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
974 event = APE_EVENT_STATUS_STATE_UNLOAD;
980 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
982 tg3_ape_send_event(tp, event);
985 static void tg3_send_ape_heartbeat(struct tg3 *tp,
986 unsigned long interval)
988 /* Check if hb interval has exceeded */
989 if (!tg3_flag(tp, ENABLE_APE) ||
990 time_before(jiffies, tp->ape_hb_jiffies + interval))
993 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
994 tp->ape_hb_jiffies = jiffies;
997 static void tg3_disable_ints(struct tg3 *tp)
1001 tw32(TG3PCI_MISC_HOST_CTRL,
1002 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1003 for (i = 0; i < tp->irq_max; i++)
1004 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1007 static void tg3_enable_ints(struct tg3 *tp)
1014 tw32(TG3PCI_MISC_HOST_CTRL,
1015 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1017 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1018 for (i = 0; i < tp->irq_cnt; i++) {
1019 struct tg3_napi *tnapi = &tp->napi[i];
1021 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1022 if (tg3_flag(tp, 1SHOT_MSI))
1023 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1025 tp->coal_now |= tnapi->coal_now;
1028 /* Force an initial interrupt */
1029 if (!tg3_flag(tp, TAGGED_STATUS) &&
1030 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1031 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1033 tw32(HOSTCC_MODE, tp->coal_now);
1035 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1038 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1040 struct tg3 *tp = tnapi->tp;
1041 struct tg3_hw_status *sblk = tnapi->hw_status;
1042 unsigned int work_exists = 0;
1044 /* check for phy events */
1045 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1046 if (sblk->status & SD_STATUS_LINK_CHG)
1050 /* check for TX work to do */
1051 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1054 /* check for RX work to do */
1055 if (tnapi->rx_rcb_prod_idx &&
1056 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1063 * similar to tg3_enable_ints, but it accurately determines whether there
1064 * is new work pending and can return without flushing the PIO write
1065 * which reenables interrupts
1067 static void tg3_int_reenable(struct tg3_napi *tnapi)
1069 struct tg3 *tp = tnapi->tp;
1071 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1073 /* When doing tagged status, this work check is unnecessary.
1074 * The last_tag we write above tells the chip which piece of
1075 * work we've completed.
1077 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1078 tw32(HOSTCC_MODE, tp->coalesce_mode |
1079 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1082 static void tg3_switch_clocks(struct tg3 *tp)
1085 u32 orig_clock_ctrl;
1087 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1090 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1092 orig_clock_ctrl = clock_ctrl;
1093 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1094 CLOCK_CTRL_CLKRUN_OENABLE |
1096 tp->pci_clock_ctrl = clock_ctrl;
1098 if (tg3_flag(tp, 5705_PLUS)) {
1099 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1100 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1101 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1103 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1104 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1106 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1108 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1109 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1112 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1115 #define PHY_BUSY_LOOPS 5000
1117 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1124 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1126 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1130 tg3_ape_lock(tp, tp->phy_ape_lock);
1134 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1135 MI_COM_PHY_ADDR_MASK);
1136 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1137 MI_COM_REG_ADDR_MASK);
1138 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1140 tw32_f(MAC_MI_COM, frame_val);
1142 loops = PHY_BUSY_LOOPS;
1143 while (loops != 0) {
1145 frame_val = tr32(MAC_MI_COM);
1147 if ((frame_val & MI_COM_BUSY) == 0) {
1149 frame_val = tr32(MAC_MI_COM);
1157 *val = frame_val & MI_COM_DATA_MASK;
1161 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1162 tw32_f(MAC_MI_MODE, tp->mi_mode);
1166 tg3_ape_unlock(tp, tp->phy_ape_lock);
1171 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1173 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1176 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1183 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1184 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1187 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1189 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1193 tg3_ape_lock(tp, tp->phy_ape_lock);
1195 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1196 MI_COM_PHY_ADDR_MASK);
1197 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1198 MI_COM_REG_ADDR_MASK);
1199 frame_val |= (val & MI_COM_DATA_MASK);
1200 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1202 tw32_f(MAC_MI_COM, frame_val);
1204 loops = PHY_BUSY_LOOPS;
1205 while (loops != 0) {
1207 frame_val = tr32(MAC_MI_COM);
1208 if ((frame_val & MI_COM_BUSY) == 0) {
1210 frame_val = tr32(MAC_MI_COM);
1220 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1221 tw32_f(MAC_MI_MODE, tp->mi_mode);
1225 tg3_ape_unlock(tp, tp->phy_ape_lock);
1230 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1232 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1235 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1239 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1243 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1247 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1248 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1252 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1258 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1262 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1266 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1270 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1271 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1275 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1281 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1285 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1287 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1292 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1296 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1298 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1303 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1307 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1308 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1309 MII_TG3_AUXCTL_SHDWSEL_MISC);
1311 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1316 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1318 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1319 set |= MII_TG3_AUXCTL_MISC_WREN;
1321 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1324 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1329 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1335 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1337 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1339 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1340 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1345 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1347 return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1348 reg | val | MII_TG3_MISC_SHDW_WREN);
1351 static int tg3_bmcr_reset(struct tg3 *tp)
1356 /* OK, reset it, and poll the BMCR_RESET bit until it
1357 * clears or we time out.
1359 phy_control = BMCR_RESET;
1360 err = tg3_writephy(tp, MII_BMCR, phy_control);
1366 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1370 if ((phy_control & BMCR_RESET) == 0) {
1382 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1384 struct tg3 *tp = bp->priv;
1387 spin_lock_bh(&tp->lock);
1389 if (__tg3_readphy(tp, mii_id, reg, &val))
1392 spin_unlock_bh(&tp->lock);
1397 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1399 struct tg3 *tp = bp->priv;
1402 spin_lock_bh(&tp->lock);
1404 if (__tg3_writephy(tp, mii_id, reg, val))
1407 spin_unlock_bh(&tp->lock);
1412 static void tg3_mdio_config_5785(struct tg3 *tp)
1415 struct phy_device *phydev;
1417 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1418 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1419 case PHY_ID_BCM50610:
1420 case PHY_ID_BCM50610M:
1421 val = MAC_PHYCFG2_50610_LED_MODES;
1423 case PHY_ID_BCMAC131:
1424 val = MAC_PHYCFG2_AC131_LED_MODES;
1426 case PHY_ID_RTL8211C:
1427 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1429 case PHY_ID_RTL8201E:
1430 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1436 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1437 tw32(MAC_PHYCFG2, val);
1439 val = tr32(MAC_PHYCFG1);
1440 val &= ~(MAC_PHYCFG1_RGMII_INT |
1441 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1442 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1443 tw32(MAC_PHYCFG1, val);
1448 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1449 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1450 MAC_PHYCFG2_FMODE_MASK_MASK |
1451 MAC_PHYCFG2_GMODE_MASK_MASK |
1452 MAC_PHYCFG2_ACT_MASK_MASK |
1453 MAC_PHYCFG2_QUAL_MASK_MASK |
1454 MAC_PHYCFG2_INBAND_ENABLE;
1456 tw32(MAC_PHYCFG2, val);
1458 val = tr32(MAC_PHYCFG1);
1459 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1460 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1461 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1462 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1463 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1464 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1465 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1467 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1468 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1469 tw32(MAC_PHYCFG1, val);
1471 val = tr32(MAC_EXT_RGMII_MODE);
1472 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1473 MAC_RGMII_MODE_RX_QUALITY |
1474 MAC_RGMII_MODE_RX_ACTIVITY |
1475 MAC_RGMII_MODE_RX_ENG_DET |
1476 MAC_RGMII_MODE_TX_ENABLE |
1477 MAC_RGMII_MODE_TX_LOWPWR |
1478 MAC_RGMII_MODE_TX_RESET);
1479 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1480 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1481 val |= MAC_RGMII_MODE_RX_INT_B |
1482 MAC_RGMII_MODE_RX_QUALITY |
1483 MAC_RGMII_MODE_RX_ACTIVITY |
1484 MAC_RGMII_MODE_RX_ENG_DET;
1485 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1486 val |= MAC_RGMII_MODE_TX_ENABLE |
1487 MAC_RGMII_MODE_TX_LOWPWR |
1488 MAC_RGMII_MODE_TX_RESET;
1490 tw32(MAC_EXT_RGMII_MODE, val);
1493 static void tg3_mdio_start(struct tg3 *tp)
1495 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1496 tw32_f(MAC_MI_MODE, tp->mi_mode);
1499 if (tg3_flag(tp, MDIOBUS_INITED) &&
1500 tg3_asic_rev(tp) == ASIC_REV_5785)
1501 tg3_mdio_config_5785(tp);
1504 static int tg3_mdio_init(struct tg3 *tp)
1508 struct phy_device *phydev;
1510 if (tg3_flag(tp, 5717_PLUS)) {
1513 tp->phy_addr = tp->pci_fn + 1;
1515 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1516 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1518 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1519 TG3_CPMU_PHY_STRAP_IS_SERDES;
1522 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1525 addr = ssb_gige_get_phyaddr(tp->pdev);
1528 tp->phy_addr = addr;
1530 tp->phy_addr = TG3_PHY_MII_ADDR;
1534 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1537 tp->mdio_bus = mdiobus_alloc();
1538 if (tp->mdio_bus == NULL)
1541 tp->mdio_bus->name = "tg3 mdio bus";
1542 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1543 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1544 tp->mdio_bus->priv = tp;
1545 tp->mdio_bus->parent = &tp->pdev->dev;
1546 tp->mdio_bus->read = &tg3_mdio_read;
1547 tp->mdio_bus->write = &tg3_mdio_write;
1548 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1550 /* The bus registration will look for all the PHYs on the mdio bus.
1551 * Unfortunately, it does not ensure the PHY is powered up before
1552 * accessing the PHY ID registers. A chip reset is the
1553 * quickest way to bring the device back to an operational state..
1555 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1558 i = mdiobus_register(tp->mdio_bus);
1560 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1561 mdiobus_free(tp->mdio_bus);
1565 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1567 if (!phydev || !phydev->drv) {
1568 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1569 mdiobus_unregister(tp->mdio_bus);
1570 mdiobus_free(tp->mdio_bus);
1574 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1575 case PHY_ID_BCM57780:
1576 phydev->interface = PHY_INTERFACE_MODE_GMII;
1577 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1579 case PHY_ID_BCM50610:
1580 case PHY_ID_BCM50610M:
1581 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1582 PHY_BRCM_RX_REFCLK_UNUSED |
1583 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1584 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1586 case PHY_ID_RTL8211C:
1587 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1589 case PHY_ID_RTL8201E:
1590 case PHY_ID_BCMAC131:
1591 phydev->interface = PHY_INTERFACE_MODE_MII;
1592 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1593 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1597 tg3_flag_set(tp, MDIOBUS_INITED);
1599 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1600 tg3_mdio_config_5785(tp);
1605 static void tg3_mdio_fini(struct tg3 *tp)
1607 if (tg3_flag(tp, MDIOBUS_INITED)) {
1608 tg3_flag_clear(tp, MDIOBUS_INITED);
1609 mdiobus_unregister(tp->mdio_bus);
1610 mdiobus_free(tp->mdio_bus);
1614 /* tp->lock is held. */
1615 static inline void tg3_generate_fw_event(struct tg3 *tp)
1619 val = tr32(GRC_RX_CPU_EVENT);
1620 val |= GRC_RX_CPU_DRIVER_EVENT;
1621 tw32_f(GRC_RX_CPU_EVENT, val);
1623 tp->last_event_jiffies = jiffies;
1626 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1628 /* tp->lock is held. */
1629 static void tg3_wait_for_event_ack(struct tg3 *tp)
1632 unsigned int delay_cnt;
1635 /* If enough time has passed, no wait is necessary. */
1636 time_remain = (long)(tp->last_event_jiffies + 1 +
1637 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1639 if (time_remain < 0)
1642 /* Check if we can shorten the wait time. */
1643 delay_cnt = jiffies_to_usecs(time_remain);
1644 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1645 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1646 delay_cnt = (delay_cnt >> 3) + 1;
1648 for (i = 0; i < delay_cnt; i++) {
1649 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1651 if (pci_channel_offline(tp->pdev))
1658 /* tp->lock is held. */
1659 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1664 if (!tg3_readphy(tp, MII_BMCR, ®))
1666 if (!tg3_readphy(tp, MII_BMSR, ®))
1667 val |= (reg & 0xffff);
1671 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1673 if (!tg3_readphy(tp, MII_LPA, ®))
1674 val |= (reg & 0xffff);
1678 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1679 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1681 if (!tg3_readphy(tp, MII_STAT1000, ®))
1682 val |= (reg & 0xffff);
1686 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1693 /* tp->lock is held. */
1694 static void tg3_ump_link_report(struct tg3 *tp)
1698 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1701 tg3_phy_gather_ump_data(tp, data);
1703 tg3_wait_for_event_ack(tp);
1705 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1707 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1708 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1709 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1710 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1712 tg3_generate_fw_event(tp);
1715 /* tp->lock is held. */
1716 static void tg3_stop_fw(struct tg3 *tp)
1718 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1719 /* Wait for RX cpu to ACK the previous event. */
1720 tg3_wait_for_event_ack(tp);
1722 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1724 tg3_generate_fw_event(tp);
1726 /* Wait for RX cpu to ACK this event. */
1727 tg3_wait_for_event_ack(tp);
1731 /* tp->lock is held. */
1732 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1734 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1735 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1737 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1739 case RESET_KIND_INIT:
1740 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1744 case RESET_KIND_SHUTDOWN:
1745 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1749 case RESET_KIND_SUSPEND:
1750 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1760 /* tp->lock is held. */
1761 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1763 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1765 case RESET_KIND_INIT:
1766 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1767 DRV_STATE_START_DONE);
1770 case RESET_KIND_SHUTDOWN:
1771 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1772 DRV_STATE_UNLOAD_DONE);
1781 /* tp->lock is held. */
1782 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1784 if (tg3_flag(tp, ENABLE_ASF)) {
1786 case RESET_KIND_INIT:
1787 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1791 case RESET_KIND_SHUTDOWN:
1792 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1796 case RESET_KIND_SUSPEND:
1797 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1807 static int tg3_poll_fw(struct tg3 *tp)
1812 if (tg3_flag(tp, NO_FWARE_REPORTED))
1815 if (tg3_flag(tp, IS_SSB_CORE)) {
1816 /* We don't use firmware. */
1820 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1821 /* Wait up to 20ms for init done. */
1822 for (i = 0; i < 200; i++) {
1823 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1825 if (pci_channel_offline(tp->pdev))
1833 /* Wait for firmware initialization to complete. */
1834 for (i = 0; i < 100000; i++) {
1835 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1836 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1838 if (pci_channel_offline(tp->pdev)) {
1839 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1840 tg3_flag_set(tp, NO_FWARE_REPORTED);
1841 netdev_info(tp->dev, "No firmware running\n");
1850 /* Chip might not be fitted with firmware. Some Sun onboard
1851 * parts are configured like that. So don't signal the timeout
1852 * of the above loop as an error, but do report the lack of
1853 * running firmware once.
1855 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1856 tg3_flag_set(tp, NO_FWARE_REPORTED);
1858 netdev_info(tp->dev, "No firmware running\n");
1861 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1862 /* The 57765 A0 needs a little more
1863 * time to do some important work.
1871 static void tg3_link_report(struct tg3 *tp)
1873 if (!netif_carrier_ok(tp->dev)) {
1874 netif_info(tp, link, tp->dev, "Link is down\n");
1875 tg3_ump_link_report(tp);
1876 } else if (netif_msg_link(tp)) {
1877 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1878 (tp->link_config.active_speed == SPEED_1000 ?
1880 (tp->link_config.active_speed == SPEED_100 ?
1882 (tp->link_config.active_duplex == DUPLEX_FULL ?
1885 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1886 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1888 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1891 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1892 netdev_info(tp->dev, "EEE is %s\n",
1893 tp->setlpicnt ? "enabled" : "disabled");
1895 tg3_ump_link_report(tp);
1898 tp->link_up = netif_carrier_ok(tp->dev);
1901 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1905 if (adv & ADVERTISE_PAUSE_CAP) {
1906 flowctrl |= FLOW_CTRL_RX;
1907 if (!(adv & ADVERTISE_PAUSE_ASYM))
1908 flowctrl |= FLOW_CTRL_TX;
1909 } else if (adv & ADVERTISE_PAUSE_ASYM)
1910 flowctrl |= FLOW_CTRL_TX;
1915 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1919 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1920 miireg = ADVERTISE_1000XPAUSE;
1921 else if (flow_ctrl & FLOW_CTRL_TX)
1922 miireg = ADVERTISE_1000XPSE_ASYM;
1923 else if (flow_ctrl & FLOW_CTRL_RX)
1924 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1931 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1935 if (adv & ADVERTISE_1000XPAUSE) {
1936 flowctrl |= FLOW_CTRL_RX;
1937 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1938 flowctrl |= FLOW_CTRL_TX;
1939 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1940 flowctrl |= FLOW_CTRL_TX;
1945 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1949 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1950 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1951 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1952 if (lcladv & ADVERTISE_1000XPAUSE)
1954 if (rmtadv & ADVERTISE_1000XPAUSE)
1961 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1965 u32 old_rx_mode = tp->rx_mode;
1966 u32 old_tx_mode = tp->tx_mode;
1968 if (tg3_flag(tp, USE_PHYLIB))
1969 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1971 autoneg = tp->link_config.autoneg;
1973 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1974 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1975 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1977 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1979 flowctrl = tp->link_config.flowctrl;
1981 tp->link_config.active_flowctrl = flowctrl;
1983 if (flowctrl & FLOW_CTRL_RX)
1984 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1986 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1988 if (old_rx_mode != tp->rx_mode)
1989 tw32_f(MAC_RX_MODE, tp->rx_mode);
1991 if (flowctrl & FLOW_CTRL_TX)
1992 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1994 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1996 if (old_tx_mode != tp->tx_mode)
1997 tw32_f(MAC_TX_MODE, tp->tx_mode);
2000 static void tg3_adjust_link(struct net_device *dev)
2002 u8 oldflowctrl, linkmesg = 0;
2003 u32 mac_mode, lcl_adv, rmt_adv;
2004 struct tg3 *tp = netdev_priv(dev);
2005 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2007 spin_lock_bh(&tp->lock);
2009 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2010 MAC_MODE_HALF_DUPLEX);
2012 oldflowctrl = tp->link_config.active_flowctrl;
2018 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2019 mac_mode |= MAC_MODE_PORT_MODE_MII;
2020 else if (phydev->speed == SPEED_1000 ||
2021 tg3_asic_rev(tp) != ASIC_REV_5785)
2022 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2024 mac_mode |= MAC_MODE_PORT_MODE_MII;
2026 if (phydev->duplex == DUPLEX_HALF)
2027 mac_mode |= MAC_MODE_HALF_DUPLEX;
2029 lcl_adv = mii_advertise_flowctrl(
2030 tp->link_config.flowctrl);
2033 rmt_adv = LPA_PAUSE_CAP;
2034 if (phydev->asym_pause)
2035 rmt_adv |= LPA_PAUSE_ASYM;
2038 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2040 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2042 if (mac_mode != tp->mac_mode) {
2043 tp->mac_mode = mac_mode;
2044 tw32_f(MAC_MODE, tp->mac_mode);
2048 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2049 if (phydev->speed == SPEED_10)
2051 MAC_MI_STAT_10MBPS_MODE |
2052 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2054 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2057 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2058 tw32(MAC_TX_LENGTHS,
2059 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2060 (6 << TX_LENGTHS_IPG_SHIFT) |
2061 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2063 tw32(MAC_TX_LENGTHS,
2064 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2065 (6 << TX_LENGTHS_IPG_SHIFT) |
2066 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2068 if (phydev->link != tp->old_link ||
2069 phydev->speed != tp->link_config.active_speed ||
2070 phydev->duplex != tp->link_config.active_duplex ||
2071 oldflowctrl != tp->link_config.active_flowctrl)
2074 tp->old_link = phydev->link;
2075 tp->link_config.active_speed = phydev->speed;
2076 tp->link_config.active_duplex = phydev->duplex;
2078 spin_unlock_bh(&tp->lock);
2081 tg3_link_report(tp);
2084 static int tg3_phy_init(struct tg3 *tp)
2086 struct phy_device *phydev;
2088 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2091 /* Bring the PHY back to a known state. */
2094 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2096 /* Attach the MAC to the PHY. */
2097 phydev = phy_connect(tp->dev, phydev_name(phydev),
2098 tg3_adjust_link, phydev->interface);
2099 if (IS_ERR(phydev)) {
2100 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2101 return PTR_ERR(phydev);
2104 /* Mask with MAC supported features. */
2105 switch (phydev->interface) {
2106 case PHY_INTERFACE_MODE_GMII:
2107 case PHY_INTERFACE_MODE_RGMII:
2108 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2109 phy_set_max_speed(phydev, SPEED_1000);
2110 phy_support_asym_pause(phydev);
2114 case PHY_INTERFACE_MODE_MII:
2115 phy_set_max_speed(phydev, SPEED_100);
2116 phy_support_asym_pause(phydev);
2119 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2123 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2125 phy_attached_info(phydev);
2130 static void tg3_phy_start(struct tg3 *tp)
2132 struct phy_device *phydev;
2134 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2137 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2139 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2140 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2141 phydev->speed = tp->link_config.speed;
2142 phydev->duplex = tp->link_config.duplex;
2143 phydev->autoneg = tp->link_config.autoneg;
2144 ethtool_convert_legacy_u32_to_link_mode(
2145 phydev->advertising, tp->link_config.advertising);
2150 phy_start_aneg(phydev);
2153 static void tg3_phy_stop(struct tg3 *tp)
2155 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2158 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2161 static void tg3_phy_fini(struct tg3 *tp)
2163 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2164 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2165 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2169 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2174 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2177 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2178 /* Cannot do read-modify-write on 5401 */
2179 err = tg3_phy_auxctl_write(tp,
2180 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2181 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2186 err = tg3_phy_auxctl_read(tp,
2187 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2191 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2192 err = tg3_phy_auxctl_write(tp,
2193 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2199 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2203 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2206 tg3_writephy(tp, MII_TG3_FET_TEST,
2207 phytest | MII_TG3_FET_SHADOW_EN);
2208 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2210 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2212 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2213 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2215 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2219 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2223 if (!tg3_flag(tp, 5705_PLUS) ||
2224 (tg3_flag(tp, 5717_PLUS) &&
2225 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2228 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2229 tg3_phy_fet_toggle_apd(tp, enable);
2233 reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2234 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2235 MII_TG3_MISC_SHDW_SCR5_SDTL |
2236 MII_TG3_MISC_SHDW_SCR5_C125OE;
2237 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2238 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2240 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2243 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2245 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2247 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2250 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2254 if (!tg3_flag(tp, 5705_PLUS) ||
2255 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2258 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2261 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2262 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2264 tg3_writephy(tp, MII_TG3_FET_TEST,
2265 ephy | MII_TG3_FET_SHADOW_EN);
2266 if (!tg3_readphy(tp, reg, &phy)) {
2268 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2270 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2271 tg3_writephy(tp, reg, phy);
2273 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2278 ret = tg3_phy_auxctl_read(tp,
2279 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2282 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2284 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2285 tg3_phy_auxctl_write(tp,
2286 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2291 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2296 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2299 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2301 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2302 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2305 static void tg3_phy_apply_otp(struct tg3 *tp)
2314 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2317 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2318 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2319 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2321 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2322 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2323 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2325 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2326 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2327 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2329 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2330 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2332 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2333 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2335 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2336 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2337 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2339 tg3_phy_toggle_auxctl_smdsp(tp, false);
2342 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2345 struct ethtool_eee *dest = &tp->eee;
2347 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2353 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2356 /* Pull eee_active */
2357 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2358 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2359 dest->eee_active = 1;
2361 dest->eee_active = 0;
2363 /* Pull lp advertised settings */
2364 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2366 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2368 /* Pull advertised and eee_enabled settings */
2369 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2371 dest->eee_enabled = !!val;
2372 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2374 /* Pull tx_lpi_enabled */
2375 val = tr32(TG3_CPMU_EEE_MODE);
2376 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2378 /* Pull lpi timer value */
2379 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2382 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2386 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2391 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2393 tp->link_config.active_duplex == DUPLEX_FULL &&
2394 (tp->link_config.active_speed == SPEED_100 ||
2395 tp->link_config.active_speed == SPEED_1000)) {
2398 if (tp->link_config.active_speed == SPEED_1000)
2399 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2401 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2403 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2405 tg3_eee_pull_config(tp, NULL);
2406 if (tp->eee.eee_active)
2410 if (!tp->setlpicnt) {
2411 if (current_link_up &&
2412 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2413 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2414 tg3_phy_toggle_auxctl_smdsp(tp, false);
2417 val = tr32(TG3_CPMU_EEE_MODE);
2418 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2422 static void tg3_phy_eee_enable(struct tg3 *tp)
2426 if (tp->link_config.active_speed == SPEED_1000 &&
2427 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2428 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2429 tg3_flag(tp, 57765_CLASS)) &&
2430 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2431 val = MII_TG3_DSP_TAP26_ALNOKO |
2432 MII_TG3_DSP_TAP26_RMRXSTO;
2433 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2434 tg3_phy_toggle_auxctl_smdsp(tp, false);
2437 val = tr32(TG3_CPMU_EEE_MODE);
2438 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2441 static int tg3_wait_macro_done(struct tg3 *tp)
2448 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2449 if ((tmp32 & 0x1000) == 0)
2459 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2461 static const u32 test_pat[4][6] = {
2462 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2463 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2464 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2465 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2469 for (chan = 0; chan < 4; chan++) {
2472 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2473 (chan * 0x2000) | 0x0200);
2474 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2476 for (i = 0; i < 6; i++)
2477 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2480 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2481 if (tg3_wait_macro_done(tp)) {
2486 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2487 (chan * 0x2000) | 0x0200);
2488 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2489 if (tg3_wait_macro_done(tp)) {
2494 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2495 if (tg3_wait_macro_done(tp)) {
2500 for (i = 0; i < 6; i += 2) {
2503 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2504 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2505 tg3_wait_macro_done(tp)) {
2511 if (low != test_pat[chan][i] ||
2512 high != test_pat[chan][i+1]) {
2513 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2514 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2515 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2525 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2529 for (chan = 0; chan < 4; chan++) {
2532 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2533 (chan * 0x2000) | 0x0200);
2534 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2535 for (i = 0; i < 6; i++)
2536 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2537 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2538 if (tg3_wait_macro_done(tp))
2545 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2547 u32 reg32, phy9_orig;
2548 int retries, do_phy_reset, err;
2554 err = tg3_bmcr_reset(tp);
2560 /* Disable transmitter and interrupt. */
2561 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2565 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2567 /* Set full-duplex, 1000 mbps. */
2568 tg3_writephy(tp, MII_BMCR,
2569 BMCR_FULLDPLX | BMCR_SPEED1000);
2571 /* Set to master mode. */
2572 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2575 tg3_writephy(tp, MII_CTRL1000,
2576 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2578 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2582 /* Block the PHY control access. */
2583 tg3_phydsp_write(tp, 0x8005, 0x0800);
2585 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2588 } while (--retries);
2590 err = tg3_phy_reset_chanpat(tp);
2594 tg3_phydsp_write(tp, 0x8005, 0x0000);
2596 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2597 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2599 tg3_phy_toggle_auxctl_smdsp(tp, false);
2601 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2603 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
2608 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2613 static void tg3_carrier_off(struct tg3 *tp)
2615 netif_carrier_off(tp->dev);
2616 tp->link_up = false;
2619 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2621 if (tg3_flag(tp, ENABLE_ASF))
2622 netdev_warn(tp->dev,
2623 "Management side-band traffic will be interrupted during phy settings change\n");
2626 /* This will reset the tigon3 PHY if there is no valid
2627 * link unless the FORCE argument is non-zero.
2629 static int tg3_phy_reset(struct tg3 *tp)
2634 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2635 val = tr32(GRC_MISC_CFG);
2636 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2639 err = tg3_readphy(tp, MII_BMSR, &val);
2640 err |= tg3_readphy(tp, MII_BMSR, &val);
2644 if (netif_running(tp->dev) && tp->link_up) {
2645 netif_carrier_off(tp->dev);
2646 tg3_link_report(tp);
2649 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2650 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2651 tg3_asic_rev(tp) == ASIC_REV_5705) {
2652 err = tg3_phy_reset_5703_4_5(tp);
2659 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2660 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2661 cpmuctrl = tr32(TG3_CPMU_CTRL);
2662 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2664 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2667 err = tg3_bmcr_reset(tp);
2671 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2672 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2673 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2675 tw32(TG3_CPMU_CTRL, cpmuctrl);
2678 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2679 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2680 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2681 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2682 CPMU_LSPD_1000MB_MACCLK_12_5) {
2683 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2685 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2689 if (tg3_flag(tp, 5717_PLUS) &&
2690 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2693 tg3_phy_apply_otp(tp);
2695 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2696 tg3_phy_toggle_apd(tp, true);
2698 tg3_phy_toggle_apd(tp, false);
2701 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2702 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2703 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2704 tg3_phydsp_write(tp, 0x000a, 0x0323);
2705 tg3_phy_toggle_auxctl_smdsp(tp, false);
2708 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2709 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2710 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2713 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2714 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2715 tg3_phydsp_write(tp, 0x000a, 0x310b);
2716 tg3_phydsp_write(tp, 0x201f, 0x9506);
2717 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2718 tg3_phy_toggle_auxctl_smdsp(tp, false);
2720 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2721 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2722 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2723 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2724 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2725 tg3_writephy(tp, MII_TG3_TEST1,
2726 MII_TG3_TEST1_TRIM_EN | 0x4);
2728 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2730 tg3_phy_toggle_auxctl_smdsp(tp, false);
2734 /* Set Extended packet length bit (bit 14) on all chips that */
2735 /* support jumbo frames */
2736 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2737 /* Cannot do read-modify-write on 5401 */
2738 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2739 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2740 /* Set bit 14 with read-modify-write to preserve other bits */
2741 err = tg3_phy_auxctl_read(tp,
2742 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2744 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2745 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2748 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2749 * jumbo frames transmission.
2751 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2752 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2753 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2754 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2757 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2758 /* adjust output voltage */
2759 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2762 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2763 tg3_phydsp_write(tp, 0xffb, 0x4000);
2765 tg3_phy_toggle_automdix(tp, true);
2766 tg3_phy_set_wirespeed(tp);
2770 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2771 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2772 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2773 TG3_GPIO_MSG_NEED_VAUX)
2774 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2775 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2776 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2777 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2778 (TG3_GPIO_MSG_DRVR_PRES << 12))
2780 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2781 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2782 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2783 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2784 (TG3_GPIO_MSG_NEED_VAUX << 12))
2786 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2790 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2791 tg3_asic_rev(tp) == ASIC_REV_5719)
2792 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2794 status = tr32(TG3_CPMU_DRV_STATUS);
2796 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2797 status &= ~(TG3_GPIO_MSG_MASK << shift);
2798 status |= (newstat << shift);
2800 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2801 tg3_asic_rev(tp) == ASIC_REV_5719)
2802 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2804 tw32(TG3_CPMU_DRV_STATUS, status);
2806 return status >> TG3_APE_GPIO_MSG_SHIFT;
2809 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2811 if (!tg3_flag(tp, IS_NIC))
2814 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2815 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2816 tg3_asic_rev(tp) == ASIC_REV_5720) {
2817 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2820 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2822 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2823 TG3_GRC_LCLCTL_PWRSW_DELAY);
2825 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2827 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2828 TG3_GRC_LCLCTL_PWRSW_DELAY);
2834 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2838 if (!tg3_flag(tp, IS_NIC) ||
2839 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2840 tg3_asic_rev(tp) == ASIC_REV_5701)
2843 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2845 tw32_wait_f(GRC_LOCAL_CTRL,
2846 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2847 TG3_GRC_LCLCTL_PWRSW_DELAY);
2849 tw32_wait_f(GRC_LOCAL_CTRL,
2851 TG3_GRC_LCLCTL_PWRSW_DELAY);
2853 tw32_wait_f(GRC_LOCAL_CTRL,
2854 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2855 TG3_GRC_LCLCTL_PWRSW_DELAY);
2858 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2860 if (!tg3_flag(tp, IS_NIC))
2863 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2864 tg3_asic_rev(tp) == ASIC_REV_5701) {
2865 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2866 (GRC_LCLCTRL_GPIO_OE0 |
2867 GRC_LCLCTRL_GPIO_OE1 |
2868 GRC_LCLCTRL_GPIO_OE2 |
2869 GRC_LCLCTRL_GPIO_OUTPUT0 |
2870 GRC_LCLCTRL_GPIO_OUTPUT1),
2871 TG3_GRC_LCLCTL_PWRSW_DELAY);
2872 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2873 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2874 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2875 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2876 GRC_LCLCTRL_GPIO_OE1 |
2877 GRC_LCLCTRL_GPIO_OE2 |
2878 GRC_LCLCTRL_GPIO_OUTPUT0 |
2879 GRC_LCLCTRL_GPIO_OUTPUT1 |
2881 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2882 TG3_GRC_LCLCTL_PWRSW_DELAY);
2884 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2885 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2886 TG3_GRC_LCLCTL_PWRSW_DELAY);
2888 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2889 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2890 TG3_GRC_LCLCTL_PWRSW_DELAY);
2893 u32 grc_local_ctrl = 0;
2895 /* Workaround to prevent overdrawing Amps. */
2896 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2897 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2898 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2900 TG3_GRC_LCLCTL_PWRSW_DELAY);
2903 /* On 5753 and variants, GPIO2 cannot be used. */
2904 no_gpio2 = tp->nic_sram_data_cfg &
2905 NIC_SRAM_DATA_CFG_NO_GPIO2;
2907 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2908 GRC_LCLCTRL_GPIO_OE1 |
2909 GRC_LCLCTRL_GPIO_OE2 |
2910 GRC_LCLCTRL_GPIO_OUTPUT1 |
2911 GRC_LCLCTRL_GPIO_OUTPUT2;
2913 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2914 GRC_LCLCTRL_GPIO_OUTPUT2);
2916 tw32_wait_f(GRC_LOCAL_CTRL,
2917 tp->grc_local_ctrl | grc_local_ctrl,
2918 TG3_GRC_LCLCTL_PWRSW_DELAY);
2920 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2922 tw32_wait_f(GRC_LOCAL_CTRL,
2923 tp->grc_local_ctrl | grc_local_ctrl,
2924 TG3_GRC_LCLCTL_PWRSW_DELAY);
2927 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2928 tw32_wait_f(GRC_LOCAL_CTRL,
2929 tp->grc_local_ctrl | grc_local_ctrl,
2930 TG3_GRC_LCLCTL_PWRSW_DELAY);
2935 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2939 /* Serialize power state transitions */
2940 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2943 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2944 msg = TG3_GPIO_MSG_NEED_VAUX;
2946 msg = tg3_set_function_status(tp, msg);
2948 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2951 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2952 tg3_pwrsrc_switch_to_vaux(tp);
2954 tg3_pwrsrc_die_with_vmain(tp);
2957 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2960 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2962 bool need_vaux = false;
2964 /* The GPIOs do something completely different on 57765. */
2965 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2968 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2969 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2970 tg3_asic_rev(tp) == ASIC_REV_5720) {
2971 tg3_frob_aux_power_5717(tp, include_wol ?
2972 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2976 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2977 struct net_device *dev_peer;
2979 dev_peer = pci_get_drvdata(tp->pdev_peer);
2981 /* remove_one() may have been run on the peer. */
2983 struct tg3 *tp_peer = netdev_priv(dev_peer);
2985 if (tg3_flag(tp_peer, INIT_COMPLETE))
2988 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2989 tg3_flag(tp_peer, ENABLE_ASF))
2994 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2995 tg3_flag(tp, ENABLE_ASF))
2999 tg3_pwrsrc_switch_to_vaux(tp);
3001 tg3_pwrsrc_die_with_vmain(tp);
3004 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3006 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3008 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3009 if (speed != SPEED_10)
3011 } else if (speed == SPEED_10)
3017 static bool tg3_phy_power_bug(struct tg3 *tp)
3019 switch (tg3_asic_rev(tp)) {
3024 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3033 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3042 static bool tg3_phy_led_bug(struct tg3 *tp)
3044 switch (tg3_asic_rev(tp)) {
3047 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3056 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3060 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3063 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3064 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3065 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3066 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3069 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3070 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3071 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3076 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3078 val = tr32(GRC_MISC_CFG);
3079 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3082 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3084 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3087 tg3_writephy(tp, MII_ADVERTISE, 0);
3088 tg3_writephy(tp, MII_BMCR,
3089 BMCR_ANENABLE | BMCR_ANRESTART);
3091 tg3_writephy(tp, MII_TG3_FET_TEST,
3092 phytest | MII_TG3_FET_SHADOW_EN);
3093 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3094 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3096 MII_TG3_FET_SHDW_AUXMODE4,
3099 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3102 } else if (do_low_power) {
3103 if (!tg3_phy_led_bug(tp))
3104 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3105 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3107 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3108 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3109 MII_TG3_AUXCTL_PCTL_VREG_11V;
3110 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3113 /* The PHY should not be powered down on some chips because
3116 if (tg3_phy_power_bug(tp))
3119 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3120 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3121 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3122 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3123 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3124 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3127 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3130 /* tp->lock is held. */
3131 static int tg3_nvram_lock(struct tg3 *tp)
3133 if (tg3_flag(tp, NVRAM)) {
3136 if (tp->nvram_lock_cnt == 0) {
3137 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3138 for (i = 0; i < 8000; i++) {
3139 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3144 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3148 tp->nvram_lock_cnt++;
3153 /* tp->lock is held. */
3154 static void tg3_nvram_unlock(struct tg3 *tp)
3156 if (tg3_flag(tp, NVRAM)) {
3157 if (tp->nvram_lock_cnt > 0)
3158 tp->nvram_lock_cnt--;
3159 if (tp->nvram_lock_cnt == 0)
3160 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3164 /* tp->lock is held. */
3165 static void tg3_enable_nvram_access(struct tg3 *tp)
3167 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3168 u32 nvaccess = tr32(NVRAM_ACCESS);
3170 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3174 /* tp->lock is held. */
3175 static void tg3_disable_nvram_access(struct tg3 *tp)
3177 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3178 u32 nvaccess = tr32(NVRAM_ACCESS);
3180 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3184 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3185 u32 offset, u32 *val)
3190 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3193 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3194 EEPROM_ADDR_DEVID_MASK |
3196 tw32(GRC_EEPROM_ADDR,
3198 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3199 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3200 EEPROM_ADDR_ADDR_MASK) |
3201 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3203 for (i = 0; i < 1000; i++) {
3204 tmp = tr32(GRC_EEPROM_ADDR);
3206 if (tmp & EEPROM_ADDR_COMPLETE)
3210 if (!(tmp & EEPROM_ADDR_COMPLETE))
3213 tmp = tr32(GRC_EEPROM_DATA);
3216 * The data will always be opposite the native endian
3217 * format. Perform a blind byteswap to compensate.
3224 #define NVRAM_CMD_TIMEOUT 10000
3226 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3230 tw32(NVRAM_CMD, nvram_cmd);
3231 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3232 usleep_range(10, 40);
3233 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3239 if (i == NVRAM_CMD_TIMEOUT)
3245 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3247 if (tg3_flag(tp, NVRAM) &&
3248 tg3_flag(tp, NVRAM_BUFFERED) &&
3249 tg3_flag(tp, FLASH) &&
3250 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3251 (tp->nvram_jedecnum == JEDEC_ATMEL))
3253 addr = ((addr / tp->nvram_pagesize) <<
3254 ATMEL_AT45DB0X1B_PAGE_POS) +
3255 (addr % tp->nvram_pagesize);
3260 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3262 if (tg3_flag(tp, NVRAM) &&
3263 tg3_flag(tp, NVRAM_BUFFERED) &&
3264 tg3_flag(tp, FLASH) &&
3265 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3266 (tp->nvram_jedecnum == JEDEC_ATMEL))
3268 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3269 tp->nvram_pagesize) +
3270 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3275 /* NOTE: Data read in from NVRAM is byteswapped according to
3276 * the byteswapping settings for all other register accesses.
3277 * tg3 devices are BE devices, so on a BE machine, the data
3278 * returned will be exactly as it is seen in NVRAM. On a LE
3279 * machine, the 32-bit value will be byteswapped.
3281 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3285 if (!tg3_flag(tp, NVRAM))
3286 return tg3_nvram_read_using_eeprom(tp, offset, val);
3288 offset = tg3_nvram_phys_addr(tp, offset);
3290 if (offset > NVRAM_ADDR_MSK)
3293 ret = tg3_nvram_lock(tp);
3297 tg3_enable_nvram_access(tp);
3299 tw32(NVRAM_ADDR, offset);
3300 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3301 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3304 *val = tr32(NVRAM_RDDATA);
3306 tg3_disable_nvram_access(tp);
3308 tg3_nvram_unlock(tp);
3313 /* Ensures NVRAM data is in bytestream format. */
3314 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3317 int res = tg3_nvram_read(tp, offset, &v);
3319 *val = cpu_to_be32(v);
3323 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3324 u32 offset, u32 len, u8 *buf)
3329 for (i = 0; i < len; i += 4) {
3335 memcpy(&data, buf + i, 4);
3338 * The SEEPROM interface expects the data to always be opposite
3339 * the native endian format. We accomplish this by reversing
3340 * all the operations that would have been performed on the
3341 * data from a call to tg3_nvram_read_be32().
3343 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3345 val = tr32(GRC_EEPROM_ADDR);
3346 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3348 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3350 tw32(GRC_EEPROM_ADDR, val |
3351 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3352 (addr & EEPROM_ADDR_ADDR_MASK) |
3356 for (j = 0; j < 1000; j++) {
3357 val = tr32(GRC_EEPROM_ADDR);
3359 if (val & EEPROM_ADDR_COMPLETE)
3363 if (!(val & EEPROM_ADDR_COMPLETE)) {
3372 /* offset and length are dword aligned */
3373 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3377 u32 pagesize = tp->nvram_pagesize;
3378 u32 pagemask = pagesize - 1;
3382 tmp = kmalloc(pagesize, GFP_KERNEL);
3388 u32 phy_addr, page_off, size;
3390 phy_addr = offset & ~pagemask;
3392 for (j = 0; j < pagesize; j += 4) {
3393 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3394 (__be32 *) (tmp + j));
3401 page_off = offset & pagemask;
3408 memcpy(tmp + page_off, buf, size);
3410 offset = offset + (pagesize - page_off);
3412 tg3_enable_nvram_access(tp);
3415 * Before we can erase the flash page, we need
3416 * to issue a special "write enable" command.
3418 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3420 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3423 /* Erase the target page */
3424 tw32(NVRAM_ADDR, phy_addr);
3426 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3427 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3429 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3432 /* Issue another write enable to start the write. */
3433 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3435 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3438 for (j = 0; j < pagesize; j += 4) {
3441 data = *((__be32 *) (tmp + j));
3443 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3445 tw32(NVRAM_ADDR, phy_addr + j);
3447 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3451 nvram_cmd |= NVRAM_CMD_FIRST;
3452 else if (j == (pagesize - 4))
3453 nvram_cmd |= NVRAM_CMD_LAST;
3455 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3463 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3464 tg3_nvram_exec_cmd(tp, nvram_cmd);
3471 /* offset and length are dword aligned */
3472 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3477 for (i = 0; i < len; i += 4, offset += 4) {
3478 u32 page_off, phy_addr, nvram_cmd;
3481 memcpy(&data, buf + i, 4);
3482 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3484 page_off = offset % tp->nvram_pagesize;
3486 phy_addr = tg3_nvram_phys_addr(tp, offset);
3488 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3490 if (page_off == 0 || i == 0)
3491 nvram_cmd |= NVRAM_CMD_FIRST;
3492 if (page_off == (tp->nvram_pagesize - 4))
3493 nvram_cmd |= NVRAM_CMD_LAST;
3496 nvram_cmd |= NVRAM_CMD_LAST;
3498 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3499 !tg3_flag(tp, FLASH) ||
3500 !tg3_flag(tp, 57765_PLUS))
3501 tw32(NVRAM_ADDR, phy_addr);
3503 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3504 !tg3_flag(tp, 5755_PLUS) &&
3505 (tp->nvram_jedecnum == JEDEC_ST) &&
3506 (nvram_cmd & NVRAM_CMD_FIRST)) {
3509 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3510 ret = tg3_nvram_exec_cmd(tp, cmd);
3514 if (!tg3_flag(tp, FLASH)) {
3515 /* We always do complete word writes to eeprom. */
3516 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3519 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3526 /* offset and length are dword aligned */
3527 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3531 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3532 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3533 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3537 if (!tg3_flag(tp, NVRAM)) {
3538 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3542 ret = tg3_nvram_lock(tp);
3546 tg3_enable_nvram_access(tp);
3547 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3548 tw32(NVRAM_WRITE1, 0x406);
3550 grc_mode = tr32(GRC_MODE);
3551 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3553 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3554 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3557 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3561 grc_mode = tr32(GRC_MODE);
3562 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3564 tg3_disable_nvram_access(tp);
3565 tg3_nvram_unlock(tp);
3568 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3569 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3576 #define RX_CPU_SCRATCH_BASE 0x30000
3577 #define RX_CPU_SCRATCH_SIZE 0x04000
3578 #define TX_CPU_SCRATCH_BASE 0x34000
3579 #define TX_CPU_SCRATCH_SIZE 0x04000
3581 /* tp->lock is held. */
3582 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3585 const int iters = 10000;
3587 for (i = 0; i < iters; i++) {
3588 tw32(cpu_base + CPU_STATE, 0xffffffff);
3589 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3590 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3592 if (pci_channel_offline(tp->pdev))
3596 return (i == iters) ? -EBUSY : 0;
3599 /* tp->lock is held. */
3600 static int tg3_rxcpu_pause(struct tg3 *tp)
3602 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3604 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3605 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3611 /* tp->lock is held. */
3612 static int tg3_txcpu_pause(struct tg3 *tp)
3614 return tg3_pause_cpu(tp, TX_CPU_BASE);
3617 /* tp->lock is held. */
3618 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3620 tw32(cpu_base + CPU_STATE, 0xffffffff);
3621 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3624 /* tp->lock is held. */
3625 static void tg3_rxcpu_resume(struct tg3 *tp)
3627 tg3_resume_cpu(tp, RX_CPU_BASE);
3630 /* tp->lock is held. */
3631 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3635 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3637 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3638 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3640 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3643 if (cpu_base == RX_CPU_BASE) {
3644 rc = tg3_rxcpu_pause(tp);
3647 * There is only an Rx CPU for the 5750 derivative in the
3650 if (tg3_flag(tp, IS_SSB_CORE))
3653 rc = tg3_txcpu_pause(tp);
3657 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3658 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3662 /* Clear firmware's nvram arbitration. */
3663 if (tg3_flag(tp, NVRAM))
3664 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3668 static int tg3_fw_data_len(struct tg3 *tp,
3669 const struct tg3_firmware_hdr *fw_hdr)
3673 /* Non fragmented firmware have one firmware header followed by a
3674 * contiguous chunk of data to be written. The length field in that
3675 * header is not the length of data to be written but the complete
3676 * length of the bss. The data length is determined based on
3677 * tp->fw->size minus headers.
3679 * Fragmented firmware have a main header followed by multiple
3680 * fragments. Each fragment is identical to non fragmented firmware
3681 * with a firmware header followed by a contiguous chunk of data. In
3682 * the main header, the length field is unused and set to 0xffffffff.
3683 * In each fragment header the length is the entire size of that
3684 * fragment i.e. fragment data + header length. Data length is
3685 * therefore length field in the header minus TG3_FW_HDR_LEN.
3687 if (tp->fw_len == 0xffffffff)
3688 fw_len = be32_to_cpu(fw_hdr->len);
3690 fw_len = tp->fw->size;
3692 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3695 /* tp->lock is held. */
3696 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3697 u32 cpu_scratch_base, int cpu_scratch_size,
3698 const struct tg3_firmware_hdr *fw_hdr)
3701 void (*write_op)(struct tg3 *, u32, u32);
3702 int total_len = tp->fw->size;
3704 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3706 "%s: Trying to load TX cpu firmware which is 5705\n",
3711 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3712 write_op = tg3_write_mem;
3714 write_op = tg3_write_indirect_reg32;
3716 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3717 /* It is possible that bootcode is still loading at this point.
3718 * Get the nvram lock first before halting the cpu.
3720 int lock_err = tg3_nvram_lock(tp);
3721 err = tg3_halt_cpu(tp, cpu_base);
3723 tg3_nvram_unlock(tp);
3727 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3728 write_op(tp, cpu_scratch_base + i, 0);
3729 tw32(cpu_base + CPU_STATE, 0xffffffff);
3730 tw32(cpu_base + CPU_MODE,
3731 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3733 /* Subtract additional main header for fragmented firmware and
3734 * advance to the first fragment
3736 total_len -= TG3_FW_HDR_LEN;
3741 u32 *fw_data = (u32 *)(fw_hdr + 1);
3742 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3743 write_op(tp, cpu_scratch_base +
3744 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3746 be32_to_cpu(fw_data[i]));
3748 total_len -= be32_to_cpu(fw_hdr->len);
3750 /* Advance to next fragment */
3751 fw_hdr = (struct tg3_firmware_hdr *)
3752 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3753 } while (total_len > 0);
3761 /* tp->lock is held. */
3762 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3765 const int iters = 5;
3767 tw32(cpu_base + CPU_STATE, 0xffffffff);
3768 tw32_f(cpu_base + CPU_PC, pc);
3770 for (i = 0; i < iters; i++) {
3771 if (tr32(cpu_base + CPU_PC) == pc)
3773 tw32(cpu_base + CPU_STATE, 0xffffffff);
3774 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3775 tw32_f(cpu_base + CPU_PC, pc);
3779 return (i == iters) ? -EBUSY : 0;
3782 /* tp->lock is held. */
3783 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3785 const struct tg3_firmware_hdr *fw_hdr;
3788 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3790 /* Firmware blob starts with version numbers, followed by
3791 start address and length. We are setting complete length.
3792 length = end_address_of_bss - start_address_of_text.
3793 Remainder is the blob to be loaded contiguously
3794 from start address. */
3796 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3797 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3802 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3803 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3808 /* Now startup only the RX cpu. */
3809 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3810 be32_to_cpu(fw_hdr->base_addr));
3812 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3813 "should be %08x\n", __func__,
3814 tr32(RX_CPU_BASE + CPU_PC),
3815 be32_to_cpu(fw_hdr->base_addr));
3819 tg3_rxcpu_resume(tp);
3824 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3826 const int iters = 1000;
3830 /* Wait for boot code to complete initialization and enter service
3831 * loop. It is then safe to download service patches
3833 for (i = 0; i < iters; i++) {
3834 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3841 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3845 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3847 netdev_warn(tp->dev,
3848 "Other patches exist. Not downloading EEE patch\n");
3855 /* tp->lock is held. */
3856 static void tg3_load_57766_firmware(struct tg3 *tp)
3858 struct tg3_firmware_hdr *fw_hdr;
3860 if (!tg3_flag(tp, NO_NVRAM))
3863 if (tg3_validate_rxcpu_state(tp))
3869 /* This firmware blob has a different format than older firmware
3870 * releases as given below. The main difference is we have fragmented
3871 * data to be written to non-contiguous locations.
3873 * In the beginning we have a firmware header identical to other
3874 * firmware which consists of version, base addr and length. The length
3875 * here is unused and set to 0xffffffff.
3877 * This is followed by a series of firmware fragments which are
3878 * individually identical to previous firmware. i.e. they have the
3879 * firmware header and followed by data for that fragment. The version
3880 * field of the individual fragment header is unused.
3883 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3884 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3887 if (tg3_rxcpu_pause(tp))
3890 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3891 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3893 tg3_rxcpu_resume(tp);
3896 /* tp->lock is held. */
3897 static int tg3_load_tso_firmware(struct tg3 *tp)
3899 const struct tg3_firmware_hdr *fw_hdr;
3900 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3903 if (!tg3_flag(tp, FW_TSO))
3906 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3908 /* Firmware blob starts with version numbers, followed by
3909 start address and length. We are setting complete length.
3910 length = end_address_of_bss - start_address_of_text.
3911 Remainder is the blob to be loaded contiguously
3912 from start address. */
3914 cpu_scratch_size = tp->fw_len;
3916 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3917 cpu_base = RX_CPU_BASE;
3918 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3920 cpu_base = TX_CPU_BASE;
3921 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3922 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3925 err = tg3_load_firmware_cpu(tp, cpu_base,
3926 cpu_scratch_base, cpu_scratch_size,
3931 /* Now startup the cpu. */
3932 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3933 be32_to_cpu(fw_hdr->base_addr));
3936 "%s fails to set CPU PC, is %08x should be %08x\n",
3937 __func__, tr32(cpu_base + CPU_PC),
3938 be32_to_cpu(fw_hdr->base_addr));
3942 tg3_resume_cpu(tp, cpu_base);
3946 /* tp->lock is held. */
3947 static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr,
3950 u32 addr_high, addr_low;
3952 addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3953 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3954 (mac_addr[4] << 8) | mac_addr[5]);
3957 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3958 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3961 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3962 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3966 /* tp->lock is held. */
3967 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3972 for (i = 0; i < 4; i++) {
3973 if (i == 1 && skip_mac_1)
3975 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3978 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3979 tg3_asic_rev(tp) == ASIC_REV_5704) {
3980 for (i = 4; i < 16; i++)
3981 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3984 addr_high = (tp->dev->dev_addr[0] +
3985 tp->dev->dev_addr[1] +
3986 tp->dev->dev_addr[2] +
3987 tp->dev->dev_addr[3] +
3988 tp->dev->dev_addr[4] +
3989 tp->dev->dev_addr[5]) &
3990 TX_BACKOFF_SEED_MASK;
3991 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3994 static void tg3_enable_register_access(struct tg3 *tp)
3997 * Make sure register accesses (indirect or otherwise) will function
4000 pci_write_config_dword(tp->pdev,
4001 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4004 static int tg3_power_up(struct tg3 *tp)
4008 tg3_enable_register_access(tp);
4010 err = pci_set_power_state(tp->pdev, PCI_D0);
4012 /* Switch out of Vaux if it is a NIC */
4013 tg3_pwrsrc_switch_to_vmain(tp);
4015 netdev_err(tp->dev, "Transition to D0 failed\n");
4021 static int tg3_setup_phy(struct tg3 *, bool);
4023 static int tg3_power_down_prepare(struct tg3 *tp)
4026 bool device_should_wake, do_low_power;
4028 tg3_enable_register_access(tp);
4030 /* Restore the CLKREQ setting. */
4031 if (tg3_flag(tp, CLKREQ_BUG))
4032 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4033 PCI_EXP_LNKCTL_CLKREQ_EN);
4035 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4036 tw32(TG3PCI_MISC_HOST_CTRL,
4037 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4039 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4040 tg3_flag(tp, WOL_ENABLE);
4042 if (tg3_flag(tp, USE_PHYLIB)) {
4043 do_low_power = false;
4044 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4045 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4046 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4047 struct phy_device *phydev;
4050 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4052 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4054 tp->link_config.speed = phydev->speed;
4055 tp->link_config.duplex = phydev->duplex;
4056 tp->link_config.autoneg = phydev->autoneg;
4057 ethtool_convert_link_mode_to_legacy_u32(
4058 &tp->link_config.advertising,
4059 phydev->advertising);
4061 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4062 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4064 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4066 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4069 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4070 if (tg3_flag(tp, WOL_SPEED_100MB)) {
4071 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4073 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4075 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4078 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4083 linkmode_copy(phydev->advertising, advertising);
4084 phy_start_aneg(phydev);
4086 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4087 if (phyid != PHY_ID_BCMAC131) {
4088 phyid &= PHY_BCM_OUI_MASK;
4089 if (phyid == PHY_BCM_OUI_1 ||
4090 phyid == PHY_BCM_OUI_2 ||
4091 phyid == PHY_BCM_OUI_3)
4092 do_low_power = true;
4096 do_low_power = true;
4098 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4099 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4101 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4102 tg3_setup_phy(tp, false);
4105 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4108 val = tr32(GRC_VCPU_EXT_CTRL);
4109 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4110 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4114 for (i = 0; i < 200; i++) {
4115 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4116 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4121 if (tg3_flag(tp, WOL_CAP))
4122 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4123 WOL_DRV_STATE_SHUTDOWN |
4127 if (device_should_wake) {
4130 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4132 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4133 tg3_phy_auxctl_write(tp,
4134 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4135 MII_TG3_AUXCTL_PCTL_WOL_EN |
4136 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4137 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4141 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4142 mac_mode = MAC_MODE_PORT_MODE_GMII;
4143 else if (tp->phy_flags &
4144 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4145 if (tp->link_config.active_speed == SPEED_1000)
4146 mac_mode = MAC_MODE_PORT_MODE_GMII;
4148 mac_mode = MAC_MODE_PORT_MODE_MII;
4150 mac_mode = MAC_MODE_PORT_MODE_MII;
4152 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4153 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4154 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4155 SPEED_100 : SPEED_10;
4156 if (tg3_5700_link_polarity(tp, speed))
4157 mac_mode |= MAC_MODE_LINK_POLARITY;
4159 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4162 mac_mode = MAC_MODE_PORT_MODE_TBI;
4165 if (!tg3_flag(tp, 5750_PLUS))
4166 tw32(MAC_LED_CTRL, tp->led_ctrl);
4168 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4169 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4170 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4171 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4173 if (tg3_flag(tp, ENABLE_APE))
4174 mac_mode |= MAC_MODE_APE_TX_EN |
4175 MAC_MODE_APE_RX_EN |
4176 MAC_MODE_TDE_ENABLE;
4178 tw32_f(MAC_MODE, mac_mode);
4181 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4185 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4186 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4187 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4190 base_val = tp->pci_clock_ctrl;
4191 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4192 CLOCK_CTRL_TXCLK_DISABLE);
4194 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4195 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4196 } else if (tg3_flag(tp, 5780_CLASS) ||
4197 tg3_flag(tp, CPMU_PRESENT) ||
4198 tg3_asic_rev(tp) == ASIC_REV_5906) {
4200 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4201 u32 newbits1, newbits2;
4203 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4204 tg3_asic_rev(tp) == ASIC_REV_5701) {
4205 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4206 CLOCK_CTRL_TXCLK_DISABLE |
4208 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4209 } else if (tg3_flag(tp, 5705_PLUS)) {
4210 newbits1 = CLOCK_CTRL_625_CORE;
4211 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4213 newbits1 = CLOCK_CTRL_ALTCLK;
4214 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4217 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4220 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4223 if (!tg3_flag(tp, 5705_PLUS)) {
4226 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4227 tg3_asic_rev(tp) == ASIC_REV_5701) {
4228 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4229 CLOCK_CTRL_TXCLK_DISABLE |
4230 CLOCK_CTRL_44MHZ_CORE);
4232 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4235 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4236 tp->pci_clock_ctrl | newbits3, 40);
4240 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4241 tg3_power_down_phy(tp, do_low_power);
4243 tg3_frob_aux_power(tp, true);
4245 /* Workaround for unstable PLL clock */
4246 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4247 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4248 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4249 u32 val = tr32(0x7d00);
4251 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4253 if (!tg3_flag(tp, ENABLE_ASF)) {
4256 err = tg3_nvram_lock(tp);
4257 tg3_halt_cpu(tp, RX_CPU_BASE);
4259 tg3_nvram_unlock(tp);
4263 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4265 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4270 static void tg3_power_down(struct tg3 *tp)
4272 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4273 pci_set_power_state(tp->pdev, PCI_D3hot);
4276 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4278 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4279 case MII_TG3_AUX_STAT_10HALF:
4281 *duplex = DUPLEX_HALF;
4284 case MII_TG3_AUX_STAT_10FULL:
4286 *duplex = DUPLEX_FULL;
4289 case MII_TG3_AUX_STAT_100HALF:
4291 *duplex = DUPLEX_HALF;
4294 case MII_TG3_AUX_STAT_100FULL:
4296 *duplex = DUPLEX_FULL;
4299 case MII_TG3_AUX_STAT_1000HALF:
4300 *speed = SPEED_1000;
4301 *duplex = DUPLEX_HALF;
4304 case MII_TG3_AUX_STAT_1000FULL:
4305 *speed = SPEED_1000;
4306 *duplex = DUPLEX_FULL;
4310 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4311 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4313 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4317 *speed = SPEED_UNKNOWN;
4318 *duplex = DUPLEX_UNKNOWN;
4323 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4328 new_adv = ADVERTISE_CSMA;
4329 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4330 new_adv |= mii_advertise_flowctrl(flowctrl);
4332 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4336 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4337 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4339 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4340 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4341 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4343 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4348 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4351 tw32(TG3_CPMU_EEE_MODE,
4352 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4354 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4359 /* Advertise 100-BaseTX EEE ability */
4360 if (advertise & ADVERTISED_100baseT_Full)
4361 val |= MDIO_AN_EEE_ADV_100TX;
4362 /* Advertise 1000-BaseT EEE ability */
4363 if (advertise & ADVERTISED_1000baseT_Full)
4364 val |= MDIO_AN_EEE_ADV_1000T;
4366 if (!tp->eee.eee_enabled) {
4368 tp->eee.advertised = 0;
4370 tp->eee.advertised = advertise &
4371 (ADVERTISED_100baseT_Full |
4372 ADVERTISED_1000baseT_Full);
4375 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4379 switch (tg3_asic_rev(tp)) {
4381 case ASIC_REV_57765:
4382 case ASIC_REV_57766:
4384 /* If we advertised any eee advertisements above... */
4386 val = MII_TG3_DSP_TAP26_ALNOKO |
4387 MII_TG3_DSP_TAP26_RMRXSTO |
4388 MII_TG3_DSP_TAP26_OPCSINPT;
4389 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4393 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4394 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4395 MII_TG3_DSP_CH34TP2_HIBW01);
4398 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4407 static void tg3_phy_copper_begin(struct tg3 *tp)
4409 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4410 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4413 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4414 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4415 adv = ADVERTISED_10baseT_Half |
4416 ADVERTISED_10baseT_Full;
4417 if (tg3_flag(tp, WOL_SPEED_100MB))
4418 adv |= ADVERTISED_100baseT_Half |
4419 ADVERTISED_100baseT_Full;
4420 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4421 if (!(tp->phy_flags &
4422 TG3_PHYFLG_DISABLE_1G_HD_ADV))
4423 adv |= ADVERTISED_1000baseT_Half;
4424 adv |= ADVERTISED_1000baseT_Full;
4427 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4429 adv = tp->link_config.advertising;
4430 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4431 adv &= ~(ADVERTISED_1000baseT_Half |
4432 ADVERTISED_1000baseT_Full);
4434 fc = tp->link_config.flowctrl;
4437 tg3_phy_autoneg_cfg(tp, adv, fc);
4439 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4440 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4441 /* Normally during power down we want to autonegotiate
4442 * the lowest possible speed for WOL. However, to avoid
4443 * link flap, we leave it untouched.
4448 tg3_writephy(tp, MII_BMCR,
4449 BMCR_ANENABLE | BMCR_ANRESTART);
4452 u32 bmcr, orig_bmcr;
4454 tp->link_config.active_speed = tp->link_config.speed;
4455 tp->link_config.active_duplex = tp->link_config.duplex;
4457 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4458 /* With autoneg disabled, 5715 only links up when the
4459 * advertisement register has the configured speed
4462 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4466 switch (tp->link_config.speed) {
4472 bmcr |= BMCR_SPEED100;
4476 bmcr |= BMCR_SPEED1000;
4480 if (tp->link_config.duplex == DUPLEX_FULL)
4481 bmcr |= BMCR_FULLDPLX;
4483 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4484 (bmcr != orig_bmcr)) {
4485 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4486 for (i = 0; i < 1500; i++) {
4490 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4491 tg3_readphy(tp, MII_BMSR, &tmp))
4493 if (!(tmp & BMSR_LSTATUS)) {
4498 tg3_writephy(tp, MII_BMCR, bmcr);
4504 static int tg3_phy_pull_config(struct tg3 *tp)
4509 err = tg3_readphy(tp, MII_BMCR, &val);
4513 if (!(val & BMCR_ANENABLE)) {
4514 tp->link_config.autoneg = AUTONEG_DISABLE;
4515 tp->link_config.advertising = 0;
4516 tg3_flag_clear(tp, PAUSE_AUTONEG);
4520 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4522 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4525 tp->link_config.speed = SPEED_10;
4528 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4531 tp->link_config.speed = SPEED_100;
4533 case BMCR_SPEED1000:
4534 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4535 tp->link_config.speed = SPEED_1000;
4543 if (val & BMCR_FULLDPLX)
4544 tp->link_config.duplex = DUPLEX_FULL;
4546 tp->link_config.duplex = DUPLEX_HALF;
4548 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4554 tp->link_config.autoneg = AUTONEG_ENABLE;
4555 tp->link_config.advertising = ADVERTISED_Autoneg;
4556 tg3_flag_set(tp, PAUSE_AUTONEG);
4558 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4561 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4565 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4566 tp->link_config.advertising |= adv | ADVERTISED_TP;
4568 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4570 tp->link_config.advertising |= ADVERTISED_FIBRE;
4573 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4576 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4577 err = tg3_readphy(tp, MII_CTRL1000, &val);
4581 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4583 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4587 adv = tg3_decode_flowctrl_1000X(val);
4588 tp->link_config.flowctrl = adv;
4590 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4591 adv = mii_adv_to_ethtool_adv_x(val);
4594 tp->link_config.advertising |= adv;
4601 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4605 /* Turn off tap power management. */
4606 /* Set Extended packet length bit */
4607 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4609 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4610 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4611 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4612 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4613 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4620 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4622 struct ethtool_eee eee;
4624 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4627 tg3_eee_pull_config(tp, &eee);
4629 if (tp->eee.eee_enabled) {
4630 if (tp->eee.advertised != eee.advertised ||
4631 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4632 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4635 /* EEE is disabled but we're advertising */
4643 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4645 u32 advmsk, tgtadv, advertising;
4647 advertising = tp->link_config.advertising;
4648 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4650 advmsk = ADVERTISE_ALL;
4651 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4652 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4653 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4656 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4659 if ((*lcladv & advmsk) != tgtadv)
4662 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4665 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4667 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4671 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4672 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4673 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4674 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4675 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4677 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4680 if (tg3_ctrl != tgtadv)
4687 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4691 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4694 if (tg3_readphy(tp, MII_STAT1000, &val))
4697 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4700 if (tg3_readphy(tp, MII_LPA, rmtadv))
4703 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4704 tp->link_config.rmt_adv = lpeth;
4709 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4711 if (curr_link_up != tp->link_up) {
4713 netif_carrier_on(tp->dev);
4715 netif_carrier_off(tp->dev);
4716 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4717 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4720 tg3_link_report(tp);
4727 static void tg3_clear_mac_status(struct tg3 *tp)
4732 MAC_STATUS_SYNC_CHANGED |
4733 MAC_STATUS_CFG_CHANGED |
4734 MAC_STATUS_MI_COMPLETION |
4735 MAC_STATUS_LNKSTATE_CHANGED);
4739 static void tg3_setup_eee(struct tg3 *tp)
4743 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4744 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4745 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4746 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4748 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4750 tw32_f(TG3_CPMU_EEE_CTRL,
4751 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4753 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4754 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4755 TG3_CPMU_EEEMD_LPI_IN_RX |
4756 TG3_CPMU_EEEMD_EEE_ENABLE;
4758 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4759 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4761 if (tg3_flag(tp, ENABLE_APE))
4762 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4764 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4766 tw32_f(TG3_CPMU_EEE_DBTMR1,
4767 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4768 (tp->eee.tx_lpi_timer & 0xffff));
4770 tw32_f(TG3_CPMU_EEE_DBTMR2,
4771 TG3_CPMU_DBTMR2_APE_TX_2047US |
4772 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4775 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4777 bool current_link_up;
4779 u32 lcl_adv, rmt_adv;
4784 tg3_clear_mac_status(tp);
4786 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4788 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4792 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4794 /* Some third-party PHYs need to be reset on link going
4797 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4798 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4799 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4801 tg3_readphy(tp, MII_BMSR, &bmsr);
4802 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4803 !(bmsr & BMSR_LSTATUS))
4809 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4810 tg3_readphy(tp, MII_BMSR, &bmsr);
4811 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4812 !tg3_flag(tp, INIT_COMPLETE))
4815 if (!(bmsr & BMSR_LSTATUS)) {
4816 err = tg3_init_5401phy_dsp(tp);
4820 tg3_readphy(tp, MII_BMSR, &bmsr);
4821 for (i = 0; i < 1000; i++) {
4823 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4824 (bmsr & BMSR_LSTATUS)) {
4830 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4831 TG3_PHY_REV_BCM5401_B0 &&
4832 !(bmsr & BMSR_LSTATUS) &&
4833 tp->link_config.active_speed == SPEED_1000) {
4834 err = tg3_phy_reset(tp);
4836 err = tg3_init_5401phy_dsp(tp);
4841 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4842 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4843 /* 5701 {A0,B0} CRC bug workaround */
4844 tg3_writephy(tp, 0x15, 0x0a75);
4845 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4846 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4847 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4850 /* Clear pending interrupts... */
4851 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4852 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4854 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4855 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4856 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4857 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4859 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4860 tg3_asic_rev(tp) == ASIC_REV_5701) {
4861 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4862 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4863 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4865 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4868 current_link_up = false;
4869 current_speed = SPEED_UNKNOWN;
4870 current_duplex = DUPLEX_UNKNOWN;
4871 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4872 tp->link_config.rmt_adv = 0;
4874 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4875 err = tg3_phy_auxctl_read(tp,
4876 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4878 if (!err && !(val & (1 << 10))) {
4879 tg3_phy_auxctl_write(tp,
4880 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4887 for (i = 0; i < 100; i++) {
4888 tg3_readphy(tp, MII_BMSR, &bmsr);
4889 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4890 (bmsr & BMSR_LSTATUS))
4895 if (bmsr & BMSR_LSTATUS) {
4898 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4899 for (i = 0; i < 2000; i++) {
4901 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4906 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4911 for (i = 0; i < 200; i++) {
4912 tg3_readphy(tp, MII_BMCR, &bmcr);
4913 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4915 if (bmcr && bmcr != 0x7fff)
4923 tp->link_config.active_speed = current_speed;
4924 tp->link_config.active_duplex = current_duplex;
4926 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4927 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4929 if ((bmcr & BMCR_ANENABLE) &&
4931 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4932 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4933 current_link_up = true;
4935 /* EEE settings changes take effect only after a phy
4936 * reset. If we have skipped a reset due to Link Flap
4937 * Avoidance being enabled, do it now.
4939 if (!eee_config_ok &&
4940 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4946 if (!(bmcr & BMCR_ANENABLE) &&
4947 tp->link_config.speed == current_speed &&
4948 tp->link_config.duplex == current_duplex) {
4949 current_link_up = true;
4953 if (current_link_up &&
4954 tp->link_config.active_duplex == DUPLEX_FULL) {
4957 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4958 reg = MII_TG3_FET_GEN_STAT;
4959 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4961 reg = MII_TG3_EXT_STAT;
4962 bit = MII_TG3_EXT_STAT_MDIX;
4965 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4966 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4968 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4973 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4974 tg3_phy_copper_begin(tp);
4976 if (tg3_flag(tp, ROBOSWITCH)) {
4977 current_link_up = true;
4978 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4979 current_speed = SPEED_1000;
4980 current_duplex = DUPLEX_FULL;
4981 tp->link_config.active_speed = current_speed;
4982 tp->link_config.active_duplex = current_duplex;
4985 tg3_readphy(tp, MII_BMSR, &bmsr);
4986 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4987 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4988 current_link_up = true;
4991 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4992 if (current_link_up) {
4993 if (tp->link_config.active_speed == SPEED_100 ||
4994 tp->link_config.active_speed == SPEED_10)
4995 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4997 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4998 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4999 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5001 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5003 /* In order for the 5750 core in BCM4785 chip to work properly
5004 * in RGMII mode, the Led Control Register must be set up.
5006 if (tg3_flag(tp, RGMII_MODE)) {
5007 u32 led_ctrl = tr32(MAC_LED_CTRL);
5008 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5010 if (tp->link_config.active_speed == SPEED_10)
5011 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5012 else if (tp->link_config.active_speed == SPEED_100)
5013 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5014 LED_CTRL_100MBPS_ON);
5015 else if (tp->link_config.active_speed == SPEED_1000)
5016 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5017 LED_CTRL_1000MBPS_ON);
5019 tw32(MAC_LED_CTRL, led_ctrl);
5023 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5024 if (tp->link_config.active_duplex == DUPLEX_HALF)
5025 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5027 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5028 if (current_link_up &&
5029 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5030 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5032 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5035 /* ??? Without this setting Netgear GA302T PHY does not
5036 * ??? send/receive packets...
5038 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5039 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5040 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5041 tw32_f(MAC_MI_MODE, tp->mi_mode);
5045 tw32_f(MAC_MODE, tp->mac_mode);
5048 tg3_phy_eee_adjust(tp, current_link_up);
5050 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5051 /* Polled via timer. */
5052 tw32_f(MAC_EVENT, 0);
5054 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5058 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5060 tp->link_config.active_speed == SPEED_1000 &&
5061 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5064 (MAC_STATUS_SYNC_CHANGED |
5065 MAC_STATUS_CFG_CHANGED));
5068 NIC_SRAM_FIRMWARE_MBOX,
5069 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5072 /* Prevent send BD corruption. */
5073 if (tg3_flag(tp, CLKREQ_BUG)) {
5074 if (tp->link_config.active_speed == SPEED_100 ||
5075 tp->link_config.active_speed == SPEED_10)
5076 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5077 PCI_EXP_LNKCTL_CLKREQ_EN);
5079 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5080 PCI_EXP_LNKCTL_CLKREQ_EN);
5083 tg3_test_and_report_link_chg(tp, current_link_up);
5088 struct tg3_fiber_aneginfo {
5090 #define ANEG_STATE_UNKNOWN 0
5091 #define ANEG_STATE_AN_ENABLE 1
5092 #define ANEG_STATE_RESTART_INIT 2
5093 #define ANEG_STATE_RESTART 3
5094 #define ANEG_STATE_DISABLE_LINK_OK 4
5095 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5096 #define ANEG_STATE_ABILITY_DETECT 6
5097 #define ANEG_STATE_ACK_DETECT_INIT 7
5098 #define ANEG_STATE_ACK_DETECT 8
5099 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5100 #define ANEG_STATE_COMPLETE_ACK 10
5101 #define ANEG_STATE_IDLE_DETECT_INIT 11
5102 #define ANEG_STATE_IDLE_DETECT 12
5103 #define ANEG_STATE_LINK_OK 13
5104 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5105 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5108 #define MR_AN_ENABLE 0x00000001
5109 #define MR_RESTART_AN 0x00000002
5110 #define MR_AN_COMPLETE 0x00000004
5111 #define MR_PAGE_RX 0x00000008
5112 #define MR_NP_LOADED 0x00000010
5113 #define MR_TOGGLE_TX 0x00000020
5114 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5115 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5116 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5117 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5118 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5119 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5120 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5121 #define MR_TOGGLE_RX 0x00002000
5122 #define MR_NP_RX 0x00004000
5124 #define MR_LINK_OK 0x80000000
5126 unsigned long link_time, cur_time;
5128 u32 ability_match_cfg;
5129 int ability_match_count;
5131 char ability_match, idle_match, ack_match;
5133 u32 txconfig, rxconfig;
5134 #define ANEG_CFG_NP 0x00000080
5135 #define ANEG_CFG_ACK 0x00000040
5136 #define ANEG_CFG_RF2 0x00000020
5137 #define ANEG_CFG_RF1 0x00000010
5138 #define ANEG_CFG_PS2 0x00000001
5139 #define ANEG_CFG_PS1 0x00008000
5140 #define ANEG_CFG_HD 0x00004000
5141 #define ANEG_CFG_FD 0x00002000
5142 #define ANEG_CFG_INVAL 0x00001f06
5147 #define ANEG_TIMER_ENAB 2
5148 #define ANEG_FAILED -1
5150 #define ANEG_STATE_SETTLE_TIME 10000
5152 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5153 struct tg3_fiber_aneginfo *ap)
5156 unsigned long delta;
5160 if (ap->state == ANEG_STATE_UNKNOWN) {
5164 ap->ability_match_cfg = 0;
5165 ap->ability_match_count = 0;
5166 ap->ability_match = 0;
5172 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5173 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5175 if (rx_cfg_reg != ap->ability_match_cfg) {
5176 ap->ability_match_cfg = rx_cfg_reg;
5177 ap->ability_match = 0;
5178 ap->ability_match_count = 0;
5180 if (++ap->ability_match_count > 1) {
5181 ap->ability_match = 1;
5182 ap->ability_match_cfg = rx_cfg_reg;
5185 if (rx_cfg_reg & ANEG_CFG_ACK)
5193 ap->ability_match_cfg = 0;
5194 ap->ability_match_count = 0;
5195 ap->ability_match = 0;
5201 ap->rxconfig = rx_cfg_reg;
5204 switch (ap->state) {
5205 case ANEG_STATE_UNKNOWN:
5206 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5207 ap->state = ANEG_STATE_AN_ENABLE;
5210 case ANEG_STATE_AN_ENABLE:
5211 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5212 if (ap->flags & MR_AN_ENABLE) {
5215 ap->ability_match_cfg = 0;
5216 ap->ability_match_count = 0;
5217 ap->ability_match = 0;
5221 ap->state = ANEG_STATE_RESTART_INIT;
5223 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5227 case ANEG_STATE_RESTART_INIT:
5228 ap->link_time = ap->cur_time;
5229 ap->flags &= ~(MR_NP_LOADED);
5231 tw32(MAC_TX_AUTO_NEG, 0);
5232 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5233 tw32_f(MAC_MODE, tp->mac_mode);
5236 ret = ANEG_TIMER_ENAB;
5237 ap->state = ANEG_STATE_RESTART;
5240 case ANEG_STATE_RESTART:
5241 delta = ap->cur_time - ap->link_time;
5242 if (delta > ANEG_STATE_SETTLE_TIME)
5243 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5245 ret = ANEG_TIMER_ENAB;
5248 case ANEG_STATE_DISABLE_LINK_OK:
5252 case ANEG_STATE_ABILITY_DETECT_INIT:
5253 ap->flags &= ~(MR_TOGGLE_TX);
5254 ap->txconfig = ANEG_CFG_FD;
5255 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5256 if (flowctrl & ADVERTISE_1000XPAUSE)
5257 ap->txconfig |= ANEG_CFG_PS1;
5258 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5259 ap->txconfig |= ANEG_CFG_PS2;
5260 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5261 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5262 tw32_f(MAC_MODE, tp->mac_mode);
5265 ap->state = ANEG_STATE_ABILITY_DETECT;
5268 case ANEG_STATE_ABILITY_DETECT:
5269 if (ap->ability_match != 0 && ap->rxconfig != 0)
5270 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5273 case ANEG_STATE_ACK_DETECT_INIT:
5274 ap->txconfig |= ANEG_CFG_ACK;
5275 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5276 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5277 tw32_f(MAC_MODE, tp->mac_mode);
5280 ap->state = ANEG_STATE_ACK_DETECT;
5283 case ANEG_STATE_ACK_DETECT:
5284 if (ap->ack_match != 0) {
5285 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5286 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5287 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5289 ap->state = ANEG_STATE_AN_ENABLE;
5291 } else if (ap->ability_match != 0 &&
5292 ap->rxconfig == 0) {
5293 ap->state = ANEG_STATE_AN_ENABLE;
5297 case ANEG_STATE_COMPLETE_ACK_INIT:
5298 if (ap->rxconfig & ANEG_CFG_INVAL) {
5302 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5303 MR_LP_ADV_HALF_DUPLEX |
5304 MR_LP_ADV_SYM_PAUSE |
5305 MR_LP_ADV_ASYM_PAUSE |
5306 MR_LP_ADV_REMOTE_FAULT1 |
5307 MR_LP_ADV_REMOTE_FAULT2 |
5308 MR_LP_ADV_NEXT_PAGE |
5311 if (ap->rxconfig & ANEG_CFG_FD)
5312 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5313 if (ap->rxconfig & ANEG_CFG_HD)
5314 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5315 if (ap->rxconfig & ANEG_CFG_PS1)
5316 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5317 if (ap->rxconfig & ANEG_CFG_PS2)
5318 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5319 if (ap->rxconfig & ANEG_CFG_RF1)
5320 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5321 if (ap->rxconfig & ANEG_CFG_RF2)
5322 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5323 if (ap->rxconfig & ANEG_CFG_NP)
5324 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5326 ap->link_time = ap->cur_time;
5328 ap->flags ^= (MR_TOGGLE_TX);
5329 if (ap->rxconfig & 0x0008)
5330 ap->flags |= MR_TOGGLE_RX;
5331 if (ap->rxconfig & ANEG_CFG_NP)
5332 ap->flags |= MR_NP_RX;
5333 ap->flags |= MR_PAGE_RX;
5335 ap->state = ANEG_STATE_COMPLETE_ACK;
5336 ret = ANEG_TIMER_ENAB;
5339 case ANEG_STATE_COMPLETE_ACK:
5340 if (ap->ability_match != 0 &&
5341 ap->rxconfig == 0) {
5342 ap->state = ANEG_STATE_AN_ENABLE;
5345 delta = ap->cur_time - ap->link_time;
5346 if (delta > ANEG_STATE_SETTLE_TIME) {
5347 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5348 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5350 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5351 !(ap->flags & MR_NP_RX)) {
5352 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5360 case ANEG_STATE_IDLE_DETECT_INIT:
5361 ap->link_time = ap->cur_time;
5362 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5363 tw32_f(MAC_MODE, tp->mac_mode);
5366 ap->state = ANEG_STATE_IDLE_DETECT;
5367 ret = ANEG_TIMER_ENAB;
5370 case ANEG_STATE_IDLE_DETECT:
5371 if (ap->ability_match != 0 &&
5372 ap->rxconfig == 0) {
5373 ap->state = ANEG_STATE_AN_ENABLE;
5376 delta = ap->cur_time - ap->link_time;
5377 if (delta > ANEG_STATE_SETTLE_TIME) {
5378 /* XXX another gem from the Broadcom driver :( */
5379 ap->state = ANEG_STATE_LINK_OK;
5383 case ANEG_STATE_LINK_OK:
5384 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5388 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5389 /* ??? unimplemented */
5392 case ANEG_STATE_NEXT_PAGE_WAIT:
5393 /* ??? unimplemented */
5404 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5407 struct tg3_fiber_aneginfo aninfo;
5408 int status = ANEG_FAILED;
5412 tw32_f(MAC_TX_AUTO_NEG, 0);
5414 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5415 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5418 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5421 memset(&aninfo, 0, sizeof(aninfo));
5422 aninfo.flags |= MR_AN_ENABLE;
5423 aninfo.state = ANEG_STATE_UNKNOWN;
5424 aninfo.cur_time = 0;
5426 while (++tick < 195000) {
5427 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5428 if (status == ANEG_DONE || status == ANEG_FAILED)
5434 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5435 tw32_f(MAC_MODE, tp->mac_mode);
5438 *txflags = aninfo.txconfig;
5439 *rxflags = aninfo.flags;
5441 if (status == ANEG_DONE &&
5442 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5443 MR_LP_ADV_FULL_DUPLEX)))
5449 static void tg3_init_bcm8002(struct tg3 *tp)
5451 u32 mac_status = tr32(MAC_STATUS);
5454 /* Reset when initting first time or we have a link. */
5455 if (tg3_flag(tp, INIT_COMPLETE) &&
5456 !(mac_status & MAC_STATUS_PCS_SYNCED))
5459 /* Set PLL lock range. */
5460 tg3_writephy(tp, 0x16, 0x8007);
5463 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5465 /* Wait for reset to complete. */
5466 /* XXX schedule_timeout() ... */
5467 for (i = 0; i < 500; i++)
5470 /* Config mode; select PMA/Ch 1 regs. */
5471 tg3_writephy(tp, 0x10, 0x8411);
5473 /* Enable auto-lock and comdet, select txclk for tx. */
5474 tg3_writephy(tp, 0x11, 0x0a10);
5476 tg3_writephy(tp, 0x18, 0x00a0);
5477 tg3_writephy(tp, 0x16, 0x41ff);
5479 /* Assert and deassert POR. */
5480 tg3_writephy(tp, 0x13, 0x0400);
5482 tg3_writephy(tp, 0x13, 0x0000);
5484 tg3_writephy(tp, 0x11, 0x0a50);
5486 tg3_writephy(tp, 0x11, 0x0a10);
5488 /* Wait for signal to stabilize */
5489 /* XXX schedule_timeout() ... */
5490 for (i = 0; i < 15000; i++)
5493 /* Deselect the channel register so we can read the PHYID
5496 tg3_writephy(tp, 0x10, 0x8011);
5499 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5502 bool current_link_up;
5503 u32 sg_dig_ctrl, sg_dig_status;
5504 u32 serdes_cfg, expected_sg_dig_ctrl;
5505 int workaround, port_a;
5510 current_link_up = false;
5512 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5513 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5515 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5518 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5519 /* preserve bits 20-23 for voltage regulator */
5520 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5523 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5525 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5526 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5528 u32 val = serdes_cfg;
5534 tw32_f(MAC_SERDES_CFG, val);
5537 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5539 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5540 tg3_setup_flow_control(tp, 0, 0);
5541 current_link_up = true;
5546 /* Want auto-negotiation. */
5547 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5549 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5550 if (flowctrl & ADVERTISE_1000XPAUSE)
5551 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5552 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5553 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5555 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5556 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5557 tp->serdes_counter &&
5558 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5559 MAC_STATUS_RCVD_CFG)) ==
5560 MAC_STATUS_PCS_SYNCED)) {
5561 tp->serdes_counter--;
5562 current_link_up = true;
5567 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5568 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5570 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5572 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5573 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5574 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5575 MAC_STATUS_SIGNAL_DET)) {
5576 sg_dig_status = tr32(SG_DIG_STATUS);
5577 mac_status = tr32(MAC_STATUS);
5579 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5580 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5581 u32 local_adv = 0, remote_adv = 0;
5583 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5584 local_adv |= ADVERTISE_1000XPAUSE;
5585 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5586 local_adv |= ADVERTISE_1000XPSE_ASYM;
5588 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5589 remote_adv |= LPA_1000XPAUSE;
5590 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5591 remote_adv |= LPA_1000XPAUSE_ASYM;
5593 tp->link_config.rmt_adv =
5594 mii_adv_to_ethtool_adv_x(remote_adv);
5596 tg3_setup_flow_control(tp, local_adv, remote_adv);
5597 current_link_up = true;
5598 tp->serdes_counter = 0;
5599 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5600 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5601 if (tp->serdes_counter)
5602 tp->serdes_counter--;
5605 u32 val = serdes_cfg;
5612 tw32_f(MAC_SERDES_CFG, val);
5615 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5618 /* Link parallel detection - link is up */
5619 /* only if we have PCS_SYNC and not */
5620 /* receiving config code words */
5621 mac_status = tr32(MAC_STATUS);
5622 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5623 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5624 tg3_setup_flow_control(tp, 0, 0);
5625 current_link_up = true;
5627 TG3_PHYFLG_PARALLEL_DETECT;
5628 tp->serdes_counter =
5629 SERDES_PARALLEL_DET_TIMEOUT;
5631 goto restart_autoneg;
5635 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5636 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5640 return current_link_up;
5643 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5645 bool current_link_up = false;
5647 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5650 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5651 u32 txflags, rxflags;
5654 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5655 u32 local_adv = 0, remote_adv = 0;
5657 if (txflags & ANEG_CFG_PS1)
5658 local_adv |= ADVERTISE_1000XPAUSE;
5659 if (txflags & ANEG_CFG_PS2)
5660 local_adv |= ADVERTISE_1000XPSE_ASYM;
5662 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5663 remote_adv |= LPA_1000XPAUSE;
5664 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5665 remote_adv |= LPA_1000XPAUSE_ASYM;
5667 tp->link_config.rmt_adv =
5668 mii_adv_to_ethtool_adv_x(remote_adv);
5670 tg3_setup_flow_control(tp, local_adv, remote_adv);
5672 current_link_up = true;
5674 for (i = 0; i < 30; i++) {
5677 (MAC_STATUS_SYNC_CHANGED |
5678 MAC_STATUS_CFG_CHANGED));
5680 if ((tr32(MAC_STATUS) &
5681 (MAC_STATUS_SYNC_CHANGED |
5682 MAC_STATUS_CFG_CHANGED)) == 0)
5686 mac_status = tr32(MAC_STATUS);
5687 if (!current_link_up &&
5688 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5689 !(mac_status & MAC_STATUS_RCVD_CFG))
5690 current_link_up = true;
5692 tg3_setup_flow_control(tp, 0, 0);
5694 /* Forcing 1000FD link up. */
5695 current_link_up = true;
5697 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5700 tw32_f(MAC_MODE, tp->mac_mode);
5705 return current_link_up;
5708 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5711 u32 orig_active_speed;
5712 u8 orig_active_duplex;
5714 bool current_link_up;
5717 orig_pause_cfg = tp->link_config.active_flowctrl;
5718 orig_active_speed = tp->link_config.active_speed;
5719 orig_active_duplex = tp->link_config.active_duplex;
5721 if (!tg3_flag(tp, HW_AUTONEG) &&
5723 tg3_flag(tp, INIT_COMPLETE)) {
5724 mac_status = tr32(MAC_STATUS);
5725 mac_status &= (MAC_STATUS_PCS_SYNCED |
5726 MAC_STATUS_SIGNAL_DET |
5727 MAC_STATUS_CFG_CHANGED |
5728 MAC_STATUS_RCVD_CFG);
5729 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5730 MAC_STATUS_SIGNAL_DET)) {
5731 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5732 MAC_STATUS_CFG_CHANGED));
5737 tw32_f(MAC_TX_AUTO_NEG, 0);
5739 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5740 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5741 tw32_f(MAC_MODE, tp->mac_mode);
5744 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5745 tg3_init_bcm8002(tp);
5747 /* Enable link change event even when serdes polling. */
5748 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5751 tp->link_config.rmt_adv = 0;
5752 mac_status = tr32(MAC_STATUS);
5754 if (tg3_flag(tp, HW_AUTONEG))
5755 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5757 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5759 tp->napi[0].hw_status->status =
5760 (SD_STATUS_UPDATED |
5761 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5763 for (i = 0; i < 100; i++) {
5764 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5765 MAC_STATUS_CFG_CHANGED));
5767 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5768 MAC_STATUS_CFG_CHANGED |
5769 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5773 mac_status = tr32(MAC_STATUS);
5774 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5775 current_link_up = false;
5776 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5777 tp->serdes_counter == 0) {
5778 tw32_f(MAC_MODE, (tp->mac_mode |
5779 MAC_MODE_SEND_CONFIGS));
5781 tw32_f(MAC_MODE, tp->mac_mode);
5785 if (current_link_up) {
5786 tp->link_config.active_speed = SPEED_1000;
5787 tp->link_config.active_duplex = DUPLEX_FULL;
5788 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5789 LED_CTRL_LNKLED_OVERRIDE |
5790 LED_CTRL_1000MBPS_ON));
5792 tp->link_config.active_speed = SPEED_UNKNOWN;
5793 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5794 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5795 LED_CTRL_LNKLED_OVERRIDE |
5796 LED_CTRL_TRAFFIC_OVERRIDE));
5799 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5800 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5801 if (orig_pause_cfg != now_pause_cfg ||
5802 orig_active_speed != tp->link_config.active_speed ||
5803 orig_active_duplex != tp->link_config.active_duplex)
5804 tg3_link_report(tp);
5810 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5814 u32 current_speed = SPEED_UNKNOWN;
5815 u8 current_duplex = DUPLEX_UNKNOWN;
5816 bool current_link_up = false;
5817 u32 local_adv, remote_adv, sgsr;
5819 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5820 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5821 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5822 (sgsr & SERDES_TG3_SGMII_MODE)) {
5827 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5829 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5830 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5832 current_link_up = true;
5833 if (sgsr & SERDES_TG3_SPEED_1000) {
5834 current_speed = SPEED_1000;
5835 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5836 } else if (sgsr & SERDES_TG3_SPEED_100) {
5837 current_speed = SPEED_100;
5838 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5840 current_speed = SPEED_10;
5841 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5844 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5845 current_duplex = DUPLEX_FULL;
5847 current_duplex = DUPLEX_HALF;
5850 tw32_f(MAC_MODE, tp->mac_mode);
5853 tg3_clear_mac_status(tp);
5855 goto fiber_setup_done;
5858 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5859 tw32_f(MAC_MODE, tp->mac_mode);
5862 tg3_clear_mac_status(tp);
5867 tp->link_config.rmt_adv = 0;
5869 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5870 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5871 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5872 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5873 bmsr |= BMSR_LSTATUS;
5875 bmsr &= ~BMSR_LSTATUS;
5878 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5880 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5881 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5882 /* do nothing, just check for link up at the end */
5883 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5886 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5887 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5888 ADVERTISE_1000XPAUSE |
5889 ADVERTISE_1000XPSE_ASYM |
5892 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5893 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5895 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5896 tg3_writephy(tp, MII_ADVERTISE, newadv);
5897 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5898 tg3_writephy(tp, MII_BMCR, bmcr);
5900 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5901 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5902 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5909 bmcr &= ~BMCR_SPEED1000;
5910 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5912 if (tp->link_config.duplex == DUPLEX_FULL)
5913 new_bmcr |= BMCR_FULLDPLX;
5915 if (new_bmcr != bmcr) {
5916 /* BMCR_SPEED1000 is a reserved bit that needs
5917 * to be set on write.
5919 new_bmcr |= BMCR_SPEED1000;
5921 /* Force a linkdown */
5925 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5926 adv &= ~(ADVERTISE_1000XFULL |
5927 ADVERTISE_1000XHALF |
5929 tg3_writephy(tp, MII_ADVERTISE, adv);
5930 tg3_writephy(tp, MII_BMCR, bmcr |
5934 tg3_carrier_off(tp);
5936 tg3_writephy(tp, MII_BMCR, new_bmcr);
5938 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5939 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5940 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5941 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5942 bmsr |= BMSR_LSTATUS;
5944 bmsr &= ~BMSR_LSTATUS;
5946 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5950 if (bmsr & BMSR_LSTATUS) {
5951 current_speed = SPEED_1000;
5952 current_link_up = true;
5953 if (bmcr & BMCR_FULLDPLX)
5954 current_duplex = DUPLEX_FULL;
5956 current_duplex = DUPLEX_HALF;
5961 if (bmcr & BMCR_ANENABLE) {
5964 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5965 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5966 common = local_adv & remote_adv;
5967 if (common & (ADVERTISE_1000XHALF |
5968 ADVERTISE_1000XFULL)) {
5969 if (common & ADVERTISE_1000XFULL)
5970 current_duplex = DUPLEX_FULL;
5972 current_duplex = DUPLEX_HALF;
5974 tp->link_config.rmt_adv =
5975 mii_adv_to_ethtool_adv_x(remote_adv);
5976 } else if (!tg3_flag(tp, 5780_CLASS)) {
5977 /* Link is up via parallel detect */
5979 current_link_up = false;
5985 if (current_link_up && current_duplex == DUPLEX_FULL)
5986 tg3_setup_flow_control(tp, local_adv, remote_adv);
5988 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5989 if (tp->link_config.active_duplex == DUPLEX_HALF)
5990 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5992 tw32_f(MAC_MODE, tp->mac_mode);
5995 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5997 tp->link_config.active_speed = current_speed;
5998 tp->link_config.active_duplex = current_duplex;
6000 tg3_test_and_report_link_chg(tp, current_link_up);
6004 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6006 if (tp->serdes_counter) {
6007 /* Give autoneg time to complete. */
6008 tp->serdes_counter--;
6013 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6016 tg3_readphy(tp, MII_BMCR, &bmcr);
6017 if (bmcr & BMCR_ANENABLE) {
6020 /* Select shadow register 0x1f */
6021 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6022 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6024 /* Select expansion interrupt status register */
6025 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6026 MII_TG3_DSP_EXP1_INT_STAT);
6027 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6028 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6030 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6031 /* We have signal detect and not receiving
6032 * config code words, link is up by parallel
6036 bmcr &= ~BMCR_ANENABLE;
6037 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6038 tg3_writephy(tp, MII_BMCR, bmcr);
6039 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6042 } else if (tp->link_up &&
6043 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6044 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6047 /* Select expansion interrupt status register */
6048 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6049 MII_TG3_DSP_EXP1_INT_STAT);
6050 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6054 /* Config code words received, turn on autoneg. */
6055 tg3_readphy(tp, MII_BMCR, &bmcr);
6056 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6058 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6064 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6069 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6070 err = tg3_setup_fiber_phy(tp, force_reset);
6071 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6072 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6074 err = tg3_setup_copper_phy(tp, force_reset);
6076 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6079 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6080 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6082 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6087 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6088 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6089 tw32(GRC_MISC_CFG, val);
6092 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6093 (6 << TX_LENGTHS_IPG_SHIFT);
6094 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6095 tg3_asic_rev(tp) == ASIC_REV_5762)
6096 val |= tr32(MAC_TX_LENGTHS) &
6097 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6098 TX_LENGTHS_CNT_DWN_VAL_MSK);
6100 if (tp->link_config.active_speed == SPEED_1000 &&
6101 tp->link_config.active_duplex == DUPLEX_HALF)
6102 tw32(MAC_TX_LENGTHS, val |
6103 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6105 tw32(MAC_TX_LENGTHS, val |
6106 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6108 if (!tg3_flag(tp, 5705_PLUS)) {
6110 tw32(HOSTCC_STAT_COAL_TICKS,
6111 tp->coal.stats_block_coalesce_usecs);
6113 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6117 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6118 val = tr32(PCIE_PWR_MGMT_THRESH);
6120 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6123 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6124 tw32(PCIE_PWR_MGMT_THRESH, val);
6130 /* tp->lock must be held */
6131 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6135 ptp_read_system_prets(sts);
6136 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6137 ptp_read_system_postts(sts);
6138 stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6143 /* tp->lock must be held */
6144 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6146 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6148 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6149 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6150 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6151 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6154 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6155 static inline void tg3_full_unlock(struct tg3 *tp);
6156 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6158 struct tg3 *tp = netdev_priv(dev);
6160 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6161 SOF_TIMESTAMPING_RX_SOFTWARE |
6162 SOF_TIMESTAMPING_SOFTWARE;
6164 if (tg3_flag(tp, PTP_CAPABLE)) {
6165 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6166 SOF_TIMESTAMPING_RX_HARDWARE |
6167 SOF_TIMESTAMPING_RAW_HARDWARE;
6171 info->phc_index = ptp_clock_index(tp->ptp_clock);
6173 info->phc_index = -1;
6175 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6177 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6178 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6179 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6180 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6184 static int tg3_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
6186 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6190 /* Frequency adjustment is performed using hardware with a 24 bit
6191 * accumulator and a programmable correction value. On each clk, the
6192 * correction value gets added to the accumulator and when it
6193 * overflows, the time counter is incremented/decremented.
6195 neg_adj = diff_by_scaled_ppm(1 << 24, scaled_ppm, &correction);
6197 tg3_full_lock(tp, 0);
6200 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6201 TG3_EAV_REF_CLK_CORRECT_EN |
6202 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) |
6203 ((u32)correction & TG3_EAV_REF_CLK_CORRECT_MASK));
6205 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6207 tg3_full_unlock(tp);
6212 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6214 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6216 tg3_full_lock(tp, 0);
6217 tp->ptp_adjust += delta;
6218 tg3_full_unlock(tp);
6223 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6224 struct ptp_system_timestamp *sts)
6227 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6229 tg3_full_lock(tp, 0);
6230 ns = tg3_refclk_read(tp, sts);
6231 ns += tp->ptp_adjust;
6232 tg3_full_unlock(tp);
6234 *ts = ns_to_timespec64(ns);
6239 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6240 const struct timespec64 *ts)
6243 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6245 ns = timespec64_to_ns(ts);
6247 tg3_full_lock(tp, 0);
6248 tg3_refclk_write(tp, ns);
6250 tg3_full_unlock(tp);
6255 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6256 struct ptp_clock_request *rq, int on)
6258 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6263 case PTP_CLK_REQ_PEROUT:
6264 /* Reject requests with unsupported flags */
6265 if (rq->perout.flags)
6268 if (rq->perout.index != 0)
6271 tg3_full_lock(tp, 0);
6272 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6273 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6278 nsec = rq->perout.start.sec * 1000000000ULL +
6279 rq->perout.start.nsec;
6281 if (rq->perout.period.sec || rq->perout.period.nsec) {
6282 netdev_warn(tp->dev,
6283 "Device supports only a one-shot timesync output, period must be 0\n");
6288 if (nsec & (1ULL << 63)) {
6289 netdev_warn(tp->dev,
6290 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6295 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6296 tw32(TG3_EAV_WATCHDOG0_MSB,
6297 TG3_EAV_WATCHDOG0_EN |
6298 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6300 tw32(TG3_EAV_REF_CLCK_CTL,
6301 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6303 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6304 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6308 tg3_full_unlock(tp);
6318 static const struct ptp_clock_info tg3_ptp_caps = {
6319 .owner = THIS_MODULE,
6320 .name = "tg3 clock",
6321 .max_adj = 250000000,
6327 .adjfine = tg3_ptp_adjfine,
6328 .adjtime = tg3_ptp_adjtime,
6329 .gettimex64 = tg3_ptp_gettimex,
6330 .settime64 = tg3_ptp_settime,
6331 .enable = tg3_ptp_enable,
6334 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6335 struct skb_shared_hwtstamps *timestamp)
6337 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6338 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6342 /* tp->lock must be held */
6343 static void tg3_ptp_init(struct tg3 *tp)
6345 if (!tg3_flag(tp, PTP_CAPABLE))
6348 /* Initialize the hardware clock to the system time. */
6349 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6351 tp->ptp_info = tg3_ptp_caps;
6354 /* tp->lock must be held */
6355 static void tg3_ptp_resume(struct tg3 *tp)
6357 if (!tg3_flag(tp, PTP_CAPABLE))
6360 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6364 static void tg3_ptp_fini(struct tg3 *tp)
6366 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6369 ptp_clock_unregister(tp->ptp_clock);
6370 tp->ptp_clock = NULL;
6374 static inline int tg3_irq_sync(struct tg3 *tp)
6376 return tp->irq_sync;
6379 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6383 dst = (u32 *)((u8 *)dst + off);
6384 for (i = 0; i < len; i += sizeof(u32))
6385 *dst++ = tr32(off + i);
6388 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6390 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6391 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6392 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6393 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6394 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6395 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6396 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6397 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6398 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6399 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6400 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6401 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6402 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6403 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6404 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6405 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6406 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6407 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6408 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6410 if (tg3_flag(tp, SUPPORT_MSIX))
6411 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6413 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6414 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6415 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6416 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6417 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6418 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6419 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6420 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6422 if (!tg3_flag(tp, 5705_PLUS)) {
6423 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6424 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6425 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6428 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6429 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6430 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6431 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6432 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6434 if (tg3_flag(tp, NVRAM))
6435 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6438 static void tg3_dump_state(struct tg3 *tp)
6443 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6447 if (tg3_flag(tp, PCI_EXPRESS)) {
6448 /* Read up to but not including private PCI registers */
6449 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6450 regs[i / sizeof(u32)] = tr32(i);
6452 tg3_dump_legacy_regs(tp, regs);
6454 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6455 if (!regs[i + 0] && !regs[i + 1] &&
6456 !regs[i + 2] && !regs[i + 3])
6459 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6461 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6466 for (i = 0; i < tp->irq_cnt; i++) {
6467 struct tg3_napi *tnapi = &tp->napi[i];
6469 /* SW status block */
6471 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6473 tnapi->hw_status->status,
6474 tnapi->hw_status->status_tag,
6475 tnapi->hw_status->rx_jumbo_consumer,
6476 tnapi->hw_status->rx_consumer,
6477 tnapi->hw_status->rx_mini_consumer,
6478 tnapi->hw_status->idx[0].rx_producer,
6479 tnapi->hw_status->idx[0].tx_consumer);
6482 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6484 tnapi->last_tag, tnapi->last_irq_tag,
6485 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6487 tnapi->prodring.rx_std_prod_idx,
6488 tnapi->prodring.rx_std_cons_idx,
6489 tnapi->prodring.rx_jmb_prod_idx,
6490 tnapi->prodring.rx_jmb_cons_idx);
6494 /* This is called whenever we suspect that the system chipset is re-
6495 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6496 * is bogus tx completions. We try to recover by setting the
6497 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6500 static void tg3_tx_recover(struct tg3 *tp)
6502 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6503 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6505 netdev_warn(tp->dev,
6506 "The system may be re-ordering memory-mapped I/O "
6507 "cycles to the network device, attempting to recover. "
6508 "Please report the problem to the driver maintainer "
6509 "and include system chipset information.\n");
6511 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6514 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6516 /* Tell compiler to fetch tx indices from memory. */
6518 return tnapi->tx_pending -
6519 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6522 /* Tigon3 never reports partial packet sends. So we do not
6523 * need special logic to handle SKBs that have not had all
6524 * of their frags sent yet, like SunGEM does.
6526 static void tg3_tx(struct tg3_napi *tnapi)
6528 struct tg3 *tp = tnapi->tp;
6529 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6530 u32 sw_idx = tnapi->tx_cons;
6531 struct netdev_queue *txq;
6532 int index = tnapi - tp->napi;
6533 unsigned int pkts_compl = 0, bytes_compl = 0;
6535 if (tg3_flag(tp, ENABLE_TSS))
6538 txq = netdev_get_tx_queue(tp->dev, index);
6540 while (sw_idx != hw_idx) {
6541 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6542 struct sk_buff *skb = ri->skb;
6545 if (unlikely(skb == NULL)) {
6550 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6551 struct skb_shared_hwtstamps timestamp;
6552 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6553 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6555 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6557 skb_tstamp_tx(skb, ×tamp);
6560 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping),
6561 skb_headlen(skb), DMA_TO_DEVICE);
6565 while (ri->fragmented) {
6566 ri->fragmented = false;
6567 sw_idx = NEXT_TX(sw_idx);
6568 ri = &tnapi->tx_buffers[sw_idx];
6571 sw_idx = NEXT_TX(sw_idx);
6573 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6574 ri = &tnapi->tx_buffers[sw_idx];
6575 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6578 dma_unmap_page(&tp->pdev->dev,
6579 dma_unmap_addr(ri, mapping),
6580 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6583 while (ri->fragmented) {
6584 ri->fragmented = false;
6585 sw_idx = NEXT_TX(sw_idx);
6586 ri = &tnapi->tx_buffers[sw_idx];
6589 sw_idx = NEXT_TX(sw_idx);
6593 bytes_compl += skb->len;
6595 dev_consume_skb_any(skb);
6597 if (unlikely(tx_bug)) {
6603 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6605 tnapi->tx_cons = sw_idx;
6607 /* Need to make the tx_cons update visible to tg3_start_xmit()
6608 * before checking for netif_queue_stopped(). Without the
6609 * memory barrier, there is a small possibility that tg3_start_xmit()
6610 * will miss it and cause the queue to be stopped forever.
6614 if (unlikely(netif_tx_queue_stopped(txq) &&
6615 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6616 __netif_tx_lock(txq, smp_processor_id());
6617 if (netif_tx_queue_stopped(txq) &&
6618 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6619 netif_tx_wake_queue(txq);
6620 __netif_tx_unlock(txq);
6624 static void tg3_frag_free(bool is_frag, void *data)
6627 skb_free_frag(data);
6632 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6634 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6635 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6640 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz,
6642 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6647 /* Returns size of skb allocated or < 0 on error.
6649 * We only need to fill in the address because the other members
6650 * of the RX descriptor are invariant, see tg3_init_rings.
6652 * Note the purposeful assymetry of cpu vs. chip accesses. For
6653 * posting buffers we only dirty the first cache line of the RX
6654 * descriptor (containing the address). Whereas for the RX status
6655 * buffers the cpu only reads the last cacheline of the RX descriptor
6656 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6658 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6659 u32 opaque_key, u32 dest_idx_unmasked,
6660 unsigned int *frag_size)
6662 struct tg3_rx_buffer_desc *desc;
6663 struct ring_info *map;
6666 int skb_size, data_size, dest_idx;
6668 switch (opaque_key) {
6669 case RXD_OPAQUE_RING_STD:
6670 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6671 desc = &tpr->rx_std[dest_idx];
6672 map = &tpr->rx_std_buffers[dest_idx];
6673 data_size = tp->rx_pkt_map_sz;
6676 case RXD_OPAQUE_RING_JUMBO:
6677 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6678 desc = &tpr->rx_jmb[dest_idx].std;
6679 map = &tpr->rx_jmb_buffers[dest_idx];
6680 data_size = TG3_RX_JMB_MAP_SZ;
6687 /* Do not overwrite any of the map or rp information
6688 * until we are sure we can commit to a new buffer.
6690 * Callers depend upon this behavior and assume that
6691 * we leave everything unchanged if we fail.
6693 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6694 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6695 if (skb_size <= PAGE_SIZE) {
6696 data = napi_alloc_frag(skb_size);
6697 *frag_size = skb_size;
6699 data = kmalloc(skb_size, GFP_ATOMIC);
6705 mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp),
6706 data_size, DMA_FROM_DEVICE);
6707 if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
6708 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6713 dma_unmap_addr_set(map, mapping, mapping);
6715 desc->addr_hi = ((u64)mapping >> 32);
6716 desc->addr_lo = ((u64)mapping & 0xffffffff);
6721 /* We only need to move over in the address because the other
6722 * members of the RX descriptor are invariant. See notes above
6723 * tg3_alloc_rx_data for full details.
6725 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6726 struct tg3_rx_prodring_set *dpr,
6727 u32 opaque_key, int src_idx,
6728 u32 dest_idx_unmasked)
6730 struct tg3 *tp = tnapi->tp;
6731 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6732 struct ring_info *src_map, *dest_map;
6733 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6736 switch (opaque_key) {
6737 case RXD_OPAQUE_RING_STD:
6738 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6739 dest_desc = &dpr->rx_std[dest_idx];
6740 dest_map = &dpr->rx_std_buffers[dest_idx];
6741 src_desc = &spr->rx_std[src_idx];
6742 src_map = &spr->rx_std_buffers[src_idx];
6745 case RXD_OPAQUE_RING_JUMBO:
6746 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6747 dest_desc = &dpr->rx_jmb[dest_idx].std;
6748 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6749 src_desc = &spr->rx_jmb[src_idx].std;
6750 src_map = &spr->rx_jmb_buffers[src_idx];
6757 dest_map->data = src_map->data;
6758 dma_unmap_addr_set(dest_map, mapping,
6759 dma_unmap_addr(src_map, mapping));
6760 dest_desc->addr_hi = src_desc->addr_hi;
6761 dest_desc->addr_lo = src_desc->addr_lo;
6763 /* Ensure that the update to the skb happens after the physical
6764 * addresses have been transferred to the new BD location.
6768 src_map->data = NULL;
6771 /* The RX ring scheme is composed of multiple rings which post fresh
6772 * buffers to the chip, and one special ring the chip uses to report
6773 * status back to the host.
6775 * The special ring reports the status of received packets to the
6776 * host. The chip does not write into the original descriptor the
6777 * RX buffer was obtained from. The chip simply takes the original
6778 * descriptor as provided by the host, updates the status and length
6779 * field, then writes this into the next status ring entry.
6781 * Each ring the host uses to post buffers to the chip is described
6782 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6783 * it is first placed into the on-chip ram. When the packet's length
6784 * is known, it walks down the TG3_BDINFO entries to select the ring.
6785 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6786 * which is within the range of the new packet's length is chosen.
6788 * The "separate ring for rx status" scheme may sound queer, but it makes
6789 * sense from a cache coherency perspective. If only the host writes
6790 * to the buffer post rings, and only the chip writes to the rx status
6791 * rings, then cache lines never move beyond shared-modified state.
6792 * If both the host and chip were to write into the same ring, cache line
6793 * eviction could occur since both entities want it in an exclusive state.
6795 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6797 struct tg3 *tp = tnapi->tp;
6798 u32 work_mask, rx_std_posted = 0;
6799 u32 std_prod_idx, jmb_prod_idx;
6800 u32 sw_idx = tnapi->rx_rcb_ptr;
6803 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6805 hw_idx = *(tnapi->rx_rcb_prod_idx);
6807 * We need to order the read of hw_idx and the read of
6808 * the opaque cookie.
6813 std_prod_idx = tpr->rx_std_prod_idx;
6814 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6815 while (sw_idx != hw_idx && budget > 0) {
6816 struct ring_info *ri;
6817 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6819 struct sk_buff *skb;
6820 dma_addr_t dma_addr;
6821 u32 opaque_key, desc_idx, *post_ptr;
6825 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6826 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6827 if (opaque_key == RXD_OPAQUE_RING_STD) {
6828 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6829 dma_addr = dma_unmap_addr(ri, mapping);
6831 post_ptr = &std_prod_idx;
6833 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6834 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6835 dma_addr = dma_unmap_addr(ri, mapping);
6837 post_ptr = &jmb_prod_idx;
6839 goto next_pkt_nopost;
6841 work_mask |= opaque_key;
6843 if (desc->err_vlan & RXD_ERR_MASK) {
6845 tg3_recycle_rx(tnapi, tpr, opaque_key,
6846 desc_idx, *post_ptr);
6848 /* Other statistics kept track of by card. */
6853 prefetch(data + TG3_RX_OFFSET(tp));
6854 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6857 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6858 RXD_FLAG_PTPSTAT_PTPV1 ||
6859 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6860 RXD_FLAG_PTPSTAT_PTPV2) {
6861 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6862 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6865 if (len > TG3_RX_COPY_THRESH(tp)) {
6867 unsigned int frag_size;
6869 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6870 *post_ptr, &frag_size);
6874 dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size,
6877 /* Ensure that the update to the data happens
6878 * after the usage of the old DMA mapping.
6884 skb = build_skb(data, frag_size);
6886 tg3_frag_free(frag_size != 0, data);
6887 goto drop_it_no_recycle;
6889 skb_reserve(skb, TG3_RX_OFFSET(tp));
6891 tg3_recycle_rx(tnapi, tpr, opaque_key,
6892 desc_idx, *post_ptr);
6894 skb = netdev_alloc_skb(tp->dev,
6895 len + TG3_RAW_IP_ALIGN);
6897 goto drop_it_no_recycle;
6899 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6900 dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len,
6903 data + TG3_RX_OFFSET(tp),
6905 dma_sync_single_for_device(&tp->pdev->dev, dma_addr,
6906 len, DMA_FROM_DEVICE);
6911 tg3_hwclock_to_timestamp(tp, tstamp,
6912 skb_hwtstamps(skb));
6914 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6915 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6916 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6917 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6918 skb->ip_summed = CHECKSUM_UNNECESSARY;
6920 skb_checksum_none_assert(skb);
6922 skb->protocol = eth_type_trans(skb, tp->dev);
6924 if (len > (tp->dev->mtu + ETH_HLEN) &&
6925 skb->protocol != htons(ETH_P_8021Q) &&
6926 skb->protocol != htons(ETH_P_8021AD)) {
6927 dev_kfree_skb_any(skb);
6928 goto drop_it_no_recycle;
6931 if (desc->type_flags & RXD_FLAG_VLAN &&
6932 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6933 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6934 desc->err_vlan & RXD_VLAN_MASK);
6936 napi_gro_receive(&tnapi->napi, skb);
6944 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6945 tpr->rx_std_prod_idx = std_prod_idx &
6946 tp->rx_std_ring_mask;
6947 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6948 tpr->rx_std_prod_idx);
6949 work_mask &= ~RXD_OPAQUE_RING_STD;
6954 sw_idx &= tp->rx_ret_ring_mask;
6956 /* Refresh hw_idx to see if there is new work */
6957 if (sw_idx == hw_idx) {
6958 hw_idx = *(tnapi->rx_rcb_prod_idx);
6963 /* ACK the status ring. */
6964 tnapi->rx_rcb_ptr = sw_idx;
6965 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6967 /* Refill RX ring(s). */
6968 if (!tg3_flag(tp, ENABLE_RSS)) {
6969 /* Sync BD data before updating mailbox */
6972 if (work_mask & RXD_OPAQUE_RING_STD) {
6973 tpr->rx_std_prod_idx = std_prod_idx &
6974 tp->rx_std_ring_mask;
6975 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6976 tpr->rx_std_prod_idx);
6978 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6979 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6980 tp->rx_jmb_ring_mask;
6981 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6982 tpr->rx_jmb_prod_idx);
6984 } else if (work_mask) {
6985 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6986 * updated before the producer indices can be updated.
6990 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6991 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6993 if (tnapi != &tp->napi[1]) {
6994 tp->rx_refill = true;
6995 napi_schedule(&tp->napi[1].napi);
7002 static void tg3_poll_link(struct tg3 *tp)
7004 /* handle link change and other phy events */
7005 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7006 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7008 if (sblk->status & SD_STATUS_LINK_CHG) {
7009 sblk->status = SD_STATUS_UPDATED |
7010 (sblk->status & ~SD_STATUS_LINK_CHG);
7011 spin_lock(&tp->lock);
7012 if (tg3_flag(tp, USE_PHYLIB)) {
7014 (MAC_STATUS_SYNC_CHANGED |
7015 MAC_STATUS_CFG_CHANGED |
7016 MAC_STATUS_MI_COMPLETION |
7017 MAC_STATUS_LNKSTATE_CHANGED));
7020 tg3_setup_phy(tp, false);
7021 spin_unlock(&tp->lock);
7026 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7027 struct tg3_rx_prodring_set *dpr,
7028 struct tg3_rx_prodring_set *spr)
7030 u32 si, di, cpycnt, src_prod_idx;
7034 src_prod_idx = spr->rx_std_prod_idx;
7036 /* Make sure updates to the rx_std_buffers[] entries and the
7037 * standard producer index are seen in the correct order.
7041 if (spr->rx_std_cons_idx == src_prod_idx)
7044 if (spr->rx_std_cons_idx < src_prod_idx)
7045 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7047 cpycnt = tp->rx_std_ring_mask + 1 -
7048 spr->rx_std_cons_idx;
7050 cpycnt = min(cpycnt,
7051 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7053 si = spr->rx_std_cons_idx;
7054 di = dpr->rx_std_prod_idx;
7056 for (i = di; i < di + cpycnt; i++) {
7057 if (dpr->rx_std_buffers[i].data) {
7067 /* Ensure that updates to the rx_std_buffers ring and the
7068 * shadowed hardware producer ring from tg3_recycle_skb() are
7069 * ordered correctly WRT the skb check above.
7073 memcpy(&dpr->rx_std_buffers[di],
7074 &spr->rx_std_buffers[si],
7075 cpycnt * sizeof(struct ring_info));
7077 for (i = 0; i < cpycnt; i++, di++, si++) {
7078 struct tg3_rx_buffer_desc *sbd, *dbd;
7079 sbd = &spr->rx_std[si];
7080 dbd = &dpr->rx_std[di];
7081 dbd->addr_hi = sbd->addr_hi;
7082 dbd->addr_lo = sbd->addr_lo;
7085 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7086 tp->rx_std_ring_mask;
7087 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7088 tp->rx_std_ring_mask;
7092 src_prod_idx = spr->rx_jmb_prod_idx;
7094 /* Make sure updates to the rx_jmb_buffers[] entries and
7095 * the jumbo producer index are seen in the correct order.
7099 if (spr->rx_jmb_cons_idx == src_prod_idx)
7102 if (spr->rx_jmb_cons_idx < src_prod_idx)
7103 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7105 cpycnt = tp->rx_jmb_ring_mask + 1 -
7106 spr->rx_jmb_cons_idx;
7108 cpycnt = min(cpycnt,
7109 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7111 si = spr->rx_jmb_cons_idx;
7112 di = dpr->rx_jmb_prod_idx;
7114 for (i = di; i < di + cpycnt; i++) {
7115 if (dpr->rx_jmb_buffers[i].data) {
7125 /* Ensure that updates to the rx_jmb_buffers ring and the
7126 * shadowed hardware producer ring from tg3_recycle_skb() are
7127 * ordered correctly WRT the skb check above.
7131 memcpy(&dpr->rx_jmb_buffers[di],
7132 &spr->rx_jmb_buffers[si],
7133 cpycnt * sizeof(struct ring_info));
7135 for (i = 0; i < cpycnt; i++, di++, si++) {
7136 struct tg3_rx_buffer_desc *sbd, *dbd;
7137 sbd = &spr->rx_jmb[si].std;
7138 dbd = &dpr->rx_jmb[di].std;
7139 dbd->addr_hi = sbd->addr_hi;
7140 dbd->addr_lo = sbd->addr_lo;
7143 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7144 tp->rx_jmb_ring_mask;
7145 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7146 tp->rx_jmb_ring_mask;
7152 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7154 struct tg3 *tp = tnapi->tp;
7156 /* run TX completion thread */
7157 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7159 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7163 if (!tnapi->rx_rcb_prod_idx)
7166 /* run RX thread, within the bounds set by NAPI.
7167 * All RX "locking" is done by ensuring outside
7168 * code synchronizes with tg3->napi.poll()
7170 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7171 work_done += tg3_rx(tnapi, budget - work_done);
7173 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7174 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7176 u32 std_prod_idx = dpr->rx_std_prod_idx;
7177 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7179 tp->rx_refill = false;
7180 for (i = 1; i <= tp->rxq_cnt; i++)
7181 err |= tg3_rx_prodring_xfer(tp, dpr,
7182 &tp->napi[i].prodring);
7186 if (std_prod_idx != dpr->rx_std_prod_idx)
7187 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7188 dpr->rx_std_prod_idx);
7190 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7191 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7192 dpr->rx_jmb_prod_idx);
7195 tw32_f(HOSTCC_MODE, tp->coal_now);
7201 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7203 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7204 schedule_work(&tp->reset_task);
7207 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7209 if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7210 cancel_work_sync(&tp->reset_task);
7211 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7214 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7216 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7217 struct tg3 *tp = tnapi->tp;
7219 struct tg3_hw_status *sblk = tnapi->hw_status;
7222 work_done = tg3_poll_work(tnapi, work_done, budget);
7224 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7227 if (unlikely(work_done >= budget))
7230 /* tp->last_tag is used in tg3_int_reenable() below
7231 * to tell the hw how much work has been processed,
7232 * so we must read it before checking for more work.
7234 tnapi->last_tag = sblk->status_tag;
7235 tnapi->last_irq_tag = tnapi->last_tag;
7238 /* check for RX/TX work to do */
7239 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7240 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7242 /* This test here is not race free, but will reduce
7243 * the number of interrupts by looping again.
7245 if (tnapi == &tp->napi[1] && tp->rx_refill)
7248 napi_complete_done(napi, work_done);
7249 /* Reenable interrupts. */
7250 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7252 /* This test here is synchronized by napi_schedule()
7253 * and napi_complete() to close the race condition.
7255 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7256 tw32(HOSTCC_MODE, tp->coalesce_mode |
7257 HOSTCC_MODE_ENABLE |
7264 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7268 /* work_done is guaranteed to be less than budget. */
7269 napi_complete(napi);
7270 tg3_reset_task_schedule(tp);
7274 static void tg3_process_error(struct tg3 *tp)
7277 bool real_error = false;
7279 if (tg3_flag(tp, ERROR_PROCESSED))
7282 /* Check Flow Attention register */
7283 val = tr32(HOSTCC_FLOW_ATTN);
7284 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7285 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7289 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7290 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7294 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7295 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7304 tg3_flag_set(tp, ERROR_PROCESSED);
7305 tg3_reset_task_schedule(tp);
7308 static int tg3_poll(struct napi_struct *napi, int budget)
7310 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7311 struct tg3 *tp = tnapi->tp;
7313 struct tg3_hw_status *sblk = tnapi->hw_status;
7316 if (sblk->status & SD_STATUS_ERROR)
7317 tg3_process_error(tp);
7321 work_done = tg3_poll_work(tnapi, work_done, budget);
7323 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7326 if (unlikely(work_done >= budget))
7329 if (tg3_flag(tp, TAGGED_STATUS)) {
7330 /* tp->last_tag is used in tg3_int_reenable() below
7331 * to tell the hw how much work has been processed,
7332 * so we must read it before checking for more work.
7334 tnapi->last_tag = sblk->status_tag;
7335 tnapi->last_irq_tag = tnapi->last_tag;
7338 sblk->status &= ~SD_STATUS_UPDATED;
7340 if (likely(!tg3_has_work(tnapi))) {
7341 napi_complete_done(napi, work_done);
7342 tg3_int_reenable(tnapi);
7347 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7351 /* work_done is guaranteed to be less than budget. */
7352 napi_complete(napi);
7353 tg3_reset_task_schedule(tp);
7357 static void tg3_napi_disable(struct tg3 *tp)
7361 for (i = tp->irq_cnt - 1; i >= 0; i--)
7362 napi_disable(&tp->napi[i].napi);
7365 static void tg3_napi_enable(struct tg3 *tp)
7369 for (i = 0; i < tp->irq_cnt; i++)
7370 napi_enable(&tp->napi[i].napi);
7373 static void tg3_napi_init(struct tg3 *tp)
7377 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll);
7378 for (i = 1; i < tp->irq_cnt; i++)
7379 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix);
7382 static void tg3_napi_fini(struct tg3 *tp)
7386 for (i = 0; i < tp->irq_cnt; i++)
7387 netif_napi_del(&tp->napi[i].napi);
7390 static inline void tg3_netif_stop(struct tg3 *tp)
7392 netif_trans_update(tp->dev); /* prevent tx timeout */
7393 tg3_napi_disable(tp);
7394 netif_carrier_off(tp->dev);
7395 netif_tx_disable(tp->dev);
7398 /* tp->lock must be held */
7399 static inline void tg3_netif_start(struct tg3 *tp)
7403 /* NOTE: unconditional netif_tx_wake_all_queues is only
7404 * appropriate so long as all callers are assured to
7405 * have free tx slots (such as after tg3_init_hw)
7407 netif_tx_wake_all_queues(tp->dev);
7410 netif_carrier_on(tp->dev);
7412 tg3_napi_enable(tp);
7413 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7414 tg3_enable_ints(tp);
7417 static void tg3_irq_quiesce(struct tg3 *tp)
7418 __releases(tp->lock)
7419 __acquires(tp->lock)
7423 BUG_ON(tp->irq_sync);
7428 spin_unlock_bh(&tp->lock);
7430 for (i = 0; i < tp->irq_cnt; i++)
7431 synchronize_irq(tp->napi[i].irq_vec);
7433 spin_lock_bh(&tp->lock);
7436 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7437 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7438 * with as well. Most of the time, this is not necessary except when
7439 * shutting down the device.
7441 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7443 spin_lock_bh(&tp->lock);
7445 tg3_irq_quiesce(tp);
7448 static inline void tg3_full_unlock(struct tg3 *tp)
7450 spin_unlock_bh(&tp->lock);
7453 /* One-shot MSI handler - Chip automatically disables interrupt
7454 * after sending MSI so driver doesn't have to do it.
7456 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7458 struct tg3_napi *tnapi = dev_id;
7459 struct tg3 *tp = tnapi->tp;
7461 prefetch(tnapi->hw_status);
7463 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7465 if (likely(!tg3_irq_sync(tp)))
7466 napi_schedule(&tnapi->napi);
7471 /* MSI ISR - No need to check for interrupt sharing and no need to
7472 * flush status block and interrupt mailbox. PCI ordering rules
7473 * guarantee that MSI will arrive after the status block.
7475 static irqreturn_t tg3_msi(int irq, void *dev_id)
7477 struct tg3_napi *tnapi = dev_id;
7478 struct tg3 *tp = tnapi->tp;
7480 prefetch(tnapi->hw_status);
7482 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7484 * Writing any value to intr-mbox-0 clears PCI INTA# and
7485 * chip-internal interrupt pending events.
7486 * Writing non-zero to intr-mbox-0 additional tells the
7487 * NIC to stop sending us irqs, engaging "in-intr-handler"
7490 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7491 if (likely(!tg3_irq_sync(tp)))
7492 napi_schedule(&tnapi->napi);
7494 return IRQ_RETVAL(1);
7497 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7499 struct tg3_napi *tnapi = dev_id;
7500 struct tg3 *tp = tnapi->tp;
7501 struct tg3_hw_status *sblk = tnapi->hw_status;
7502 unsigned int handled = 1;
7504 /* In INTx mode, it is possible for the interrupt to arrive at
7505 * the CPU before the status block posted prior to the interrupt.
7506 * Reading the PCI State register will confirm whether the
7507 * interrupt is ours and will flush the status block.
7509 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7510 if (tg3_flag(tp, CHIP_RESETTING) ||
7511 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7518 * Writing any value to intr-mbox-0 clears PCI INTA# and
7519 * chip-internal interrupt pending events.
7520 * Writing non-zero to intr-mbox-0 additional tells the
7521 * NIC to stop sending us irqs, engaging "in-intr-handler"
7524 * Flush the mailbox to de-assert the IRQ immediately to prevent
7525 * spurious interrupts. The flush impacts performance but
7526 * excessive spurious interrupts can be worse in some cases.
7528 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7529 if (tg3_irq_sync(tp))
7531 sblk->status &= ~SD_STATUS_UPDATED;
7532 if (likely(tg3_has_work(tnapi))) {
7533 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7534 napi_schedule(&tnapi->napi);
7536 /* No work, shared interrupt perhaps? re-enable
7537 * interrupts, and flush that PCI write
7539 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7543 return IRQ_RETVAL(handled);
7546 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7548 struct tg3_napi *tnapi = dev_id;
7549 struct tg3 *tp = tnapi->tp;
7550 struct tg3_hw_status *sblk = tnapi->hw_status;
7551 unsigned int handled = 1;
7553 /* In INTx mode, it is possible for the interrupt to arrive at
7554 * the CPU before the status block posted prior to the interrupt.
7555 * Reading the PCI State register will confirm whether the
7556 * interrupt is ours and will flush the status block.
7558 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7559 if (tg3_flag(tp, CHIP_RESETTING) ||
7560 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7567 * writing any value to intr-mbox-0 clears PCI INTA# and
7568 * chip-internal interrupt pending events.
7569 * writing non-zero to intr-mbox-0 additional tells the
7570 * NIC to stop sending us irqs, engaging "in-intr-handler"
7573 * Flush the mailbox to de-assert the IRQ immediately to prevent
7574 * spurious interrupts. The flush impacts performance but
7575 * excessive spurious interrupts can be worse in some cases.
7577 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7580 * In a shared interrupt configuration, sometimes other devices'
7581 * interrupts will scream. We record the current status tag here
7582 * so that the above check can report that the screaming interrupts
7583 * are unhandled. Eventually they will be silenced.
7585 tnapi->last_irq_tag = sblk->status_tag;
7587 if (tg3_irq_sync(tp))
7590 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7592 napi_schedule(&tnapi->napi);
7595 return IRQ_RETVAL(handled);
7598 /* ISR for interrupt test */
7599 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7601 struct tg3_napi *tnapi = dev_id;
7602 struct tg3 *tp = tnapi->tp;
7603 struct tg3_hw_status *sblk = tnapi->hw_status;
7605 if ((sblk->status & SD_STATUS_UPDATED) ||
7606 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7607 tg3_disable_ints(tp);
7608 return IRQ_RETVAL(1);
7610 return IRQ_RETVAL(0);
7613 #ifdef CONFIG_NET_POLL_CONTROLLER
7614 static void tg3_poll_controller(struct net_device *dev)
7617 struct tg3 *tp = netdev_priv(dev);
7619 if (tg3_irq_sync(tp))
7622 for (i = 0; i < tp->irq_cnt; i++)
7623 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7627 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
7629 struct tg3 *tp = netdev_priv(dev);
7631 if (netif_msg_tx_err(tp)) {
7632 netdev_err(dev, "transmit timed out, resetting\n");
7636 tg3_reset_task_schedule(tp);
7639 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7640 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7642 u32 base = (u32) mapping & 0xffffffff;
7644 return base + len + 8 < base;
7647 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7648 * of any 4GB boundaries: 4G, 8G, etc
7650 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7653 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7654 u32 base = (u32) mapping & 0xffffffff;
7656 return ((base + len + (mss & 0x3fff)) < base);
7661 /* Test for DMA addresses > 40-bit */
7662 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7665 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7666 if (tg3_flag(tp, 40BIT_DMA_BUG))
7667 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7674 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7675 dma_addr_t mapping, u32 len, u32 flags,
7678 txbd->addr_hi = ((u64) mapping >> 32);
7679 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7680 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7681 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7684 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7685 dma_addr_t map, u32 len, u32 flags,
7688 struct tg3 *tp = tnapi->tp;
7691 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7694 if (tg3_4g_overflow_test(map, len))
7697 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7700 if (tg3_40bit_overflow_test(tp, map, len))
7703 if (tp->dma_limit) {
7704 u32 prvidx = *entry;
7705 u32 tmp_flag = flags & ~TXD_FLAG_END;
7706 while (len > tp->dma_limit && *budget) {
7707 u32 frag_len = tp->dma_limit;
7708 len -= tp->dma_limit;
7710 /* Avoid the 8byte DMA problem */
7712 len += tp->dma_limit / 2;
7713 frag_len = tp->dma_limit / 2;
7716 tnapi->tx_buffers[*entry].fragmented = true;
7718 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7719 frag_len, tmp_flag, mss, vlan);
7722 *entry = NEXT_TX(*entry);
7729 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7730 len, flags, mss, vlan);
7732 *entry = NEXT_TX(*entry);
7735 tnapi->tx_buffers[prvidx].fragmented = false;
7739 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7740 len, flags, mss, vlan);
7741 *entry = NEXT_TX(*entry);
7747 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7750 struct sk_buff *skb;
7751 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7756 dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping),
7757 skb_headlen(skb), DMA_TO_DEVICE);
7759 while (txb->fragmented) {
7760 txb->fragmented = false;
7761 entry = NEXT_TX(entry);
7762 txb = &tnapi->tx_buffers[entry];
7765 for (i = 0; i <= last; i++) {
7766 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7768 entry = NEXT_TX(entry);
7769 txb = &tnapi->tx_buffers[entry];
7771 dma_unmap_page(&tnapi->tp->pdev->dev,
7772 dma_unmap_addr(txb, mapping),
7773 skb_frag_size(frag), DMA_TO_DEVICE);
7775 while (txb->fragmented) {
7776 txb->fragmented = false;
7777 entry = NEXT_TX(entry);
7778 txb = &tnapi->tx_buffers[entry];
7783 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7784 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7785 struct sk_buff **pskb,
7786 u32 *entry, u32 *budget,
7787 u32 base_flags, u32 mss, u32 vlan)
7789 struct tg3 *tp = tnapi->tp;
7790 struct sk_buff *new_skb, *skb = *pskb;
7791 dma_addr_t new_addr = 0;
7794 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7795 new_skb = skb_copy(skb, GFP_ATOMIC);
7797 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7799 new_skb = skb_copy_expand(skb,
7800 skb_headroom(skb) + more_headroom,
7801 skb_tailroom(skb), GFP_ATOMIC);
7807 /* New SKB is guaranteed to be linear. */
7808 new_addr = dma_map_single(&tp->pdev->dev, new_skb->data,
7809 new_skb->len, DMA_TO_DEVICE);
7810 /* Make sure the mapping succeeded */
7811 if (dma_mapping_error(&tp->pdev->dev, new_addr)) {
7812 dev_kfree_skb_any(new_skb);
7815 u32 save_entry = *entry;
7817 base_flags |= TXD_FLAG_END;
7819 tnapi->tx_buffers[*entry].skb = new_skb;
7820 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7823 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7824 new_skb->len, base_flags,
7826 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7827 dev_kfree_skb_any(new_skb);
7833 dev_consume_skb_any(skb);
7838 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7840 /* Check if we will never have enough descriptors,
7841 * as gso_segs can be more than current ring size
7843 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7846 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7848 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7849 * indicated in tg3_tx_frag_set()
7851 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7852 struct netdev_queue *txq, struct sk_buff *skb)
7854 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7855 struct sk_buff *segs, *seg, *next;
7857 /* Estimate the number of fragments in the worst case */
7858 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7859 netif_tx_stop_queue(txq);
7861 /* netif_tx_stop_queue() must be done before checking
7862 * checking tx index in tg3_tx_avail() below, because in
7863 * tg3_tx(), we update tx index before checking for
7864 * netif_tx_queue_stopped().
7867 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7868 return NETDEV_TX_BUSY;
7870 netif_tx_wake_queue(txq);
7873 segs = skb_gso_segment(skb, tp->dev->features &
7874 ~(NETIF_F_TSO | NETIF_F_TSO6));
7875 if (IS_ERR(segs) || !segs)
7876 goto tg3_tso_bug_end;
7878 skb_list_walk_safe(segs, seg, next) {
7879 skb_mark_not_on_list(seg);
7880 tg3_start_xmit(seg, tp->dev);
7884 dev_consume_skb_any(skb);
7886 return NETDEV_TX_OK;
7889 /* hard_start_xmit for all devices */
7890 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7892 struct tg3 *tp = netdev_priv(dev);
7893 u32 len, entry, base_flags, mss, vlan = 0;
7895 int i = -1, would_hit_hwbug;
7897 struct tg3_napi *tnapi;
7898 struct netdev_queue *txq;
7900 struct iphdr *iph = NULL;
7901 struct tcphdr *tcph = NULL;
7902 __sum16 tcp_csum = 0, ip_csum = 0;
7903 __be16 ip_tot_len = 0;
7905 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7906 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7907 if (tg3_flag(tp, ENABLE_TSS))
7910 budget = tg3_tx_avail(tnapi);
7912 /* We are running in BH disabled context with netif_tx_lock
7913 * and TX reclaim runs via tp->napi.poll inside of a software
7914 * interrupt. Furthermore, IRQ processing runs lockless so we have
7915 * no IRQ context deadlocks to worry about either. Rejoice!
7917 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7918 if (!netif_tx_queue_stopped(txq)) {
7919 netif_tx_stop_queue(txq);
7921 /* This is a hard error, log it. */
7923 "BUG! Tx Ring full when queue awake!\n");
7925 return NETDEV_TX_BUSY;
7928 entry = tnapi->tx_prod;
7931 mss = skb_shinfo(skb)->gso_size;
7933 u32 tcp_opt_len, hdr_len;
7935 if (skb_cow_head(skb, 0))
7939 tcp_opt_len = tcp_optlen(skb);
7941 hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN;
7943 /* HW/FW can not correctly segment packets that have been
7944 * vlan encapsulated.
7946 if (skb->protocol == htons(ETH_P_8021Q) ||
7947 skb->protocol == htons(ETH_P_8021AD)) {
7948 if (tg3_tso_bug_gso_check(tnapi, skb))
7949 return tg3_tso_bug(tp, tnapi, txq, skb);
7953 if (!skb_is_gso_v6(skb)) {
7954 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7955 tg3_flag(tp, TSO_BUG)) {
7956 if (tg3_tso_bug_gso_check(tnapi, skb))
7957 return tg3_tso_bug(tp, tnapi, txq, skb);
7960 ip_csum = iph->check;
7961 ip_tot_len = iph->tot_len;
7963 iph->tot_len = htons(mss + hdr_len);
7966 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7967 TXD_FLAG_CPU_POST_DMA);
7969 tcph = tcp_hdr(skb);
7970 tcp_csum = tcph->check;
7972 if (tg3_flag(tp, HW_TSO_1) ||
7973 tg3_flag(tp, HW_TSO_2) ||
7974 tg3_flag(tp, HW_TSO_3)) {
7976 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7978 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7982 if (tg3_flag(tp, HW_TSO_3)) {
7983 mss |= (hdr_len & 0xc) << 12;
7985 base_flags |= 0x00000010;
7986 base_flags |= (hdr_len & 0x3e0) << 5;
7987 } else if (tg3_flag(tp, HW_TSO_2))
7988 mss |= hdr_len << 9;
7989 else if (tg3_flag(tp, HW_TSO_1) ||
7990 tg3_asic_rev(tp) == ASIC_REV_5705) {
7991 if (tcp_opt_len || iph->ihl > 5) {
7994 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7995 mss |= (tsflags << 11);
7998 if (tcp_opt_len || iph->ihl > 5) {
8001 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8002 base_flags |= tsflags << 12;
8005 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8006 /* HW/FW can not correctly checksum packets that have been
8007 * vlan encapsulated.
8009 if (skb->protocol == htons(ETH_P_8021Q) ||
8010 skb->protocol == htons(ETH_P_8021AD)) {
8011 if (skb_checksum_help(skb))
8014 base_flags |= TXD_FLAG_TCPUDP_CSUM;
8018 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8019 !mss && skb->len > VLAN_ETH_FRAME_LEN)
8020 base_flags |= TXD_FLAG_JMB_PKT;
8022 if (skb_vlan_tag_present(skb)) {
8023 base_flags |= TXD_FLAG_VLAN;
8024 vlan = skb_vlan_tag_get(skb);
8027 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8028 tg3_flag(tp, TX_TSTAMP_EN)) {
8029 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8030 base_flags |= TXD_FLAG_HWTSTAMP;
8033 len = skb_headlen(skb);
8035 mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
8037 if (dma_mapping_error(&tp->pdev->dev, mapping))
8041 tnapi->tx_buffers[entry].skb = skb;
8042 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8044 would_hit_hwbug = 0;
8046 if (tg3_flag(tp, 5701_DMA_BUG))
8047 would_hit_hwbug = 1;
8049 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8050 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8052 would_hit_hwbug = 1;
8053 } else if (skb_shinfo(skb)->nr_frags > 0) {
8056 if (!tg3_flag(tp, HW_TSO_1) &&
8057 !tg3_flag(tp, HW_TSO_2) &&
8058 !tg3_flag(tp, HW_TSO_3))
8061 /* Now loop through additional data
8062 * fragments, and queue them.
8064 last = skb_shinfo(skb)->nr_frags - 1;
8065 for (i = 0; i <= last; i++) {
8066 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8068 len = skb_frag_size(frag);
8069 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8070 len, DMA_TO_DEVICE);
8072 tnapi->tx_buffers[entry].skb = NULL;
8073 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8075 if (dma_mapping_error(&tp->pdev->dev, mapping))
8079 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8081 ((i == last) ? TXD_FLAG_END : 0),
8083 would_hit_hwbug = 1;
8089 if (would_hit_hwbug) {
8090 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8092 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8093 /* If it's a TSO packet, do GSO instead of
8094 * allocating and copying to a large linear SKB
8097 iph->check = ip_csum;
8098 iph->tot_len = ip_tot_len;
8100 tcph->check = tcp_csum;
8101 return tg3_tso_bug(tp, tnapi, txq, skb);
8104 /* If the workaround fails due to memory/mapping
8105 * failure, silently drop this packet.
8107 entry = tnapi->tx_prod;
8108 budget = tg3_tx_avail(tnapi);
8109 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8110 base_flags, mss, vlan))
8114 skb_tx_timestamp(skb);
8115 netdev_tx_sent_queue(txq, skb->len);
8117 /* Sync BD data before updating mailbox */
8120 tnapi->tx_prod = entry;
8121 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8122 netif_tx_stop_queue(txq);
8124 /* netif_tx_stop_queue() must be done before checking
8125 * checking tx index in tg3_tx_avail() below, because in
8126 * tg3_tx(), we update tx index before checking for
8127 * netif_tx_queue_stopped().
8130 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8131 netif_tx_wake_queue(txq);
8134 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
8135 /* Packets are ready, update Tx producer idx on card. */
8136 tw32_tx_mbox(tnapi->prodmbox, entry);
8139 return NETDEV_TX_OK;
8142 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8143 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8145 dev_kfree_skb_any(skb);
8148 return NETDEV_TX_OK;
8151 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8154 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8155 MAC_MODE_PORT_MODE_MASK);
8157 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8159 if (!tg3_flag(tp, 5705_PLUS))
8160 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8162 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8163 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8165 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8167 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8169 if (tg3_flag(tp, 5705_PLUS) ||
8170 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8171 tg3_asic_rev(tp) == ASIC_REV_5700)
8172 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8175 tw32(MAC_MODE, tp->mac_mode);
8179 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8181 u32 val, bmcr, mac_mode, ptest = 0;
8183 tg3_phy_toggle_apd(tp, false);
8184 tg3_phy_toggle_automdix(tp, false);
8186 if (extlpbk && tg3_phy_set_extloopbk(tp))
8189 bmcr = BMCR_FULLDPLX;
8194 bmcr |= BMCR_SPEED100;
8198 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8200 bmcr |= BMCR_SPEED100;
8203 bmcr |= BMCR_SPEED1000;
8208 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8209 tg3_readphy(tp, MII_CTRL1000, &val);
8210 val |= CTL1000_AS_MASTER |
8211 CTL1000_ENABLE_MASTER;
8212 tg3_writephy(tp, MII_CTRL1000, val);
8214 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8215 MII_TG3_FET_PTEST_TRIM_2;
8216 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8219 bmcr |= BMCR_LOOPBACK;
8221 tg3_writephy(tp, MII_BMCR, bmcr);
8223 /* The write needs to be flushed for the FETs */
8224 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8225 tg3_readphy(tp, MII_BMCR, &bmcr);
8229 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8230 tg3_asic_rev(tp) == ASIC_REV_5785) {
8231 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8232 MII_TG3_FET_PTEST_FRC_TX_LINK |
8233 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8235 /* The write needs to be flushed for the AC131 */
8236 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8239 /* Reset to prevent losing 1st rx packet intermittently */
8240 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8241 tg3_flag(tp, 5780_CLASS)) {
8242 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8244 tw32_f(MAC_RX_MODE, tp->rx_mode);
8247 mac_mode = tp->mac_mode &
8248 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8249 if (speed == SPEED_1000)
8250 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8252 mac_mode |= MAC_MODE_PORT_MODE_MII;
8254 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8255 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8257 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8258 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8259 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8260 mac_mode |= MAC_MODE_LINK_POLARITY;
8262 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8263 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8266 tw32(MAC_MODE, mac_mode);
8272 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8274 struct tg3 *tp = netdev_priv(dev);
8276 if (features & NETIF_F_LOOPBACK) {
8277 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8280 spin_lock_bh(&tp->lock);
8281 tg3_mac_loopback(tp, true);
8282 netif_carrier_on(tp->dev);
8283 spin_unlock_bh(&tp->lock);
8284 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8286 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8289 spin_lock_bh(&tp->lock);
8290 tg3_mac_loopback(tp, false);
8291 /* Force link status check */
8292 tg3_setup_phy(tp, true);
8293 spin_unlock_bh(&tp->lock);
8294 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8298 static netdev_features_t tg3_fix_features(struct net_device *dev,
8299 netdev_features_t features)
8301 struct tg3 *tp = netdev_priv(dev);
8303 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8304 features &= ~NETIF_F_ALL_TSO;
8309 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8311 netdev_features_t changed = dev->features ^ features;
8313 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8314 tg3_set_loopback(dev, features);
8319 static void tg3_rx_prodring_free(struct tg3 *tp,
8320 struct tg3_rx_prodring_set *tpr)
8324 if (tpr != &tp->napi[0].prodring) {
8325 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8326 i = (i + 1) & tp->rx_std_ring_mask)
8327 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8330 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8331 for (i = tpr->rx_jmb_cons_idx;
8332 i != tpr->rx_jmb_prod_idx;
8333 i = (i + 1) & tp->rx_jmb_ring_mask) {
8334 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8342 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8343 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8346 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8347 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8348 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8353 /* Initialize rx rings for packet processing.
8355 * The chip has been shut down and the driver detached from
8356 * the networking, so no interrupts or new tx packets will
8357 * end up in the driver. tp->{tx,}lock are held and thus
8360 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8361 struct tg3_rx_prodring_set *tpr)
8363 u32 i, rx_pkt_dma_sz;
8365 tpr->rx_std_cons_idx = 0;
8366 tpr->rx_std_prod_idx = 0;
8367 tpr->rx_jmb_cons_idx = 0;
8368 tpr->rx_jmb_prod_idx = 0;
8370 if (tpr != &tp->napi[0].prodring) {
8371 memset(&tpr->rx_std_buffers[0], 0,
8372 TG3_RX_STD_BUFF_RING_SIZE(tp));
8373 if (tpr->rx_jmb_buffers)
8374 memset(&tpr->rx_jmb_buffers[0], 0,
8375 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8379 /* Zero out all descriptors. */
8380 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8382 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8383 if (tg3_flag(tp, 5780_CLASS) &&
8384 tp->dev->mtu > ETH_DATA_LEN)
8385 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8386 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8388 /* Initialize invariants of the rings, we only set this
8389 * stuff once. This works because the card does not
8390 * write into the rx buffer posting rings.
8392 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8393 struct tg3_rx_buffer_desc *rxd;
8395 rxd = &tpr->rx_std[i];
8396 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8397 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8398 rxd->opaque = (RXD_OPAQUE_RING_STD |
8399 (i << RXD_OPAQUE_INDEX_SHIFT));
8402 /* Now allocate fresh SKBs for each rx ring. */
8403 for (i = 0; i < tp->rx_pending; i++) {
8404 unsigned int frag_size;
8406 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8408 netdev_warn(tp->dev,
8409 "Using a smaller RX standard ring. Only "
8410 "%d out of %d buffers were allocated "
8411 "successfully\n", i, tp->rx_pending);
8419 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8422 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8424 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8427 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8428 struct tg3_rx_buffer_desc *rxd;
8430 rxd = &tpr->rx_jmb[i].std;
8431 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8432 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8434 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8435 (i << RXD_OPAQUE_INDEX_SHIFT));
8438 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8439 unsigned int frag_size;
8441 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8443 netdev_warn(tp->dev,
8444 "Using a smaller RX jumbo ring. Only %d "
8445 "out of %d buffers were allocated "
8446 "successfully\n", i, tp->rx_jumbo_pending);
8449 tp->rx_jumbo_pending = i;
8458 tg3_rx_prodring_free(tp, tpr);
8462 static void tg3_rx_prodring_fini(struct tg3 *tp,
8463 struct tg3_rx_prodring_set *tpr)
8465 kfree(tpr->rx_std_buffers);
8466 tpr->rx_std_buffers = NULL;
8467 kfree(tpr->rx_jmb_buffers);
8468 tpr->rx_jmb_buffers = NULL;
8470 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8471 tpr->rx_std, tpr->rx_std_mapping);
8475 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8476 tpr->rx_jmb, tpr->rx_jmb_mapping);
8481 static int tg3_rx_prodring_init(struct tg3 *tp,
8482 struct tg3_rx_prodring_set *tpr)
8484 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8486 if (!tpr->rx_std_buffers)
8489 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8490 TG3_RX_STD_RING_BYTES(tp),
8491 &tpr->rx_std_mapping,
8496 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8497 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8499 if (!tpr->rx_jmb_buffers)
8502 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8503 TG3_RX_JMB_RING_BYTES(tp),
8504 &tpr->rx_jmb_mapping,
8513 tg3_rx_prodring_fini(tp, tpr);
8517 /* Free up pending packets in all rx/tx rings.
8519 * The chip has been shut down and the driver detached from
8520 * the networking, so no interrupts or new tx packets will
8521 * end up in the driver. tp->{tx,}lock is not held and we are not
8522 * in an interrupt context and thus may sleep.
8524 static void tg3_free_rings(struct tg3 *tp)
8528 for (j = 0; j < tp->irq_cnt; j++) {
8529 struct tg3_napi *tnapi = &tp->napi[j];
8531 tg3_rx_prodring_free(tp, &tnapi->prodring);
8533 if (!tnapi->tx_buffers)
8536 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8537 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8542 tg3_tx_skb_unmap(tnapi, i,
8543 skb_shinfo(skb)->nr_frags - 1);
8545 dev_consume_skb_any(skb);
8547 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8551 /* Initialize tx/rx rings for packet processing.
8553 * The chip has been shut down and the driver detached from
8554 * the networking, so no interrupts or new tx packets will
8555 * end up in the driver. tp->{tx,}lock are held and thus
8558 static int tg3_init_rings(struct tg3 *tp)
8562 /* Free up all the SKBs. */
8565 for (i = 0; i < tp->irq_cnt; i++) {
8566 struct tg3_napi *tnapi = &tp->napi[i];
8568 tnapi->last_tag = 0;
8569 tnapi->last_irq_tag = 0;
8570 tnapi->hw_status->status = 0;
8571 tnapi->hw_status->status_tag = 0;
8572 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8577 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8579 tnapi->rx_rcb_ptr = 0;
8581 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8583 if (tnapi->prodring.rx_std &&
8584 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8593 static void tg3_mem_tx_release(struct tg3 *tp)
8597 for (i = 0; i < tp->irq_max; i++) {
8598 struct tg3_napi *tnapi = &tp->napi[i];
8600 if (tnapi->tx_ring) {
8601 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8602 tnapi->tx_ring, tnapi->tx_desc_mapping);
8603 tnapi->tx_ring = NULL;
8606 kfree(tnapi->tx_buffers);
8607 tnapi->tx_buffers = NULL;
8611 static int tg3_mem_tx_acquire(struct tg3 *tp)
8614 struct tg3_napi *tnapi = &tp->napi[0];
8616 /* If multivector TSS is enabled, vector 0 does not handle
8617 * tx interrupts. Don't allocate any resources for it.
8619 if (tg3_flag(tp, ENABLE_TSS))
8622 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8623 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8624 sizeof(struct tg3_tx_ring_info),
8626 if (!tnapi->tx_buffers)
8629 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8631 &tnapi->tx_desc_mapping,
8633 if (!tnapi->tx_ring)
8640 tg3_mem_tx_release(tp);
8644 static void tg3_mem_rx_release(struct tg3 *tp)
8648 for (i = 0; i < tp->irq_max; i++) {
8649 struct tg3_napi *tnapi = &tp->napi[i];
8651 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8656 dma_free_coherent(&tp->pdev->dev,
8657 TG3_RX_RCB_RING_BYTES(tp),
8659 tnapi->rx_rcb_mapping);
8660 tnapi->rx_rcb = NULL;
8664 static int tg3_mem_rx_acquire(struct tg3 *tp)
8666 unsigned int i, limit;
8668 limit = tp->rxq_cnt;
8670 /* If RSS is enabled, we need a (dummy) producer ring
8671 * set on vector zero. This is the true hw prodring.
8673 if (tg3_flag(tp, ENABLE_RSS))
8676 for (i = 0; i < limit; i++) {
8677 struct tg3_napi *tnapi = &tp->napi[i];
8679 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8682 /* If multivector RSS is enabled, vector 0
8683 * does not handle rx or tx interrupts.
8684 * Don't allocate any resources for it.
8686 if (!i && tg3_flag(tp, ENABLE_RSS))
8689 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8690 TG3_RX_RCB_RING_BYTES(tp),
8691 &tnapi->rx_rcb_mapping,
8700 tg3_mem_rx_release(tp);
8705 * Must not be invoked with interrupt sources disabled and
8706 * the hardware shutdown down.
8708 static void tg3_free_consistent(struct tg3 *tp)
8712 for (i = 0; i < tp->irq_cnt; i++) {
8713 struct tg3_napi *tnapi = &tp->napi[i];
8715 if (tnapi->hw_status) {
8716 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8718 tnapi->status_mapping);
8719 tnapi->hw_status = NULL;
8723 tg3_mem_rx_release(tp);
8724 tg3_mem_tx_release(tp);
8726 /* tp->hw_stats can be referenced safely:
8727 * 1. under rtnl_lock
8728 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8731 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8732 tp->hw_stats, tp->stats_mapping);
8733 tp->hw_stats = NULL;
8738 * Must not be invoked with interrupt sources disabled and
8739 * the hardware shutdown down. Can sleep.
8741 static int tg3_alloc_consistent(struct tg3 *tp)
8745 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8746 sizeof(struct tg3_hw_stats),
8747 &tp->stats_mapping, GFP_KERNEL);
8751 for (i = 0; i < tp->irq_cnt; i++) {
8752 struct tg3_napi *tnapi = &tp->napi[i];
8753 struct tg3_hw_status *sblk;
8755 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8757 &tnapi->status_mapping,
8759 if (!tnapi->hw_status)
8762 sblk = tnapi->hw_status;
8764 if (tg3_flag(tp, ENABLE_RSS)) {
8765 u16 *prodptr = NULL;
8768 * When RSS is enabled, the status block format changes
8769 * slightly. The "rx_jumbo_consumer", "reserved",
8770 * and "rx_mini_consumer" members get mapped to the
8771 * other three rx return ring producer indexes.
8775 prodptr = &sblk->idx[0].rx_producer;
8778 prodptr = &sblk->rx_jumbo_consumer;
8781 prodptr = &sblk->reserved;
8784 prodptr = &sblk->rx_mini_consumer;
8787 tnapi->rx_rcb_prod_idx = prodptr;
8789 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8793 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8799 tg3_free_consistent(tp);
8803 #define MAX_WAIT_CNT 1000
8805 /* To stop a block, clear the enable bit and poll till it
8806 * clears. tp->lock is held.
8808 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8813 if (tg3_flag(tp, 5705_PLUS)) {
8820 /* We can't enable/disable these bits of the
8821 * 5705/5750, just say success.
8834 for (i = 0; i < MAX_WAIT_CNT; i++) {
8835 if (pci_channel_offline(tp->pdev)) {
8836 dev_err(&tp->pdev->dev,
8837 "tg3_stop_block device offline, "
8838 "ofs=%lx enable_bit=%x\n",
8845 if ((val & enable_bit) == 0)
8849 if (i == MAX_WAIT_CNT && !silent) {
8850 dev_err(&tp->pdev->dev,
8851 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8859 /* tp->lock is held. */
8860 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8864 tg3_disable_ints(tp);
8866 if (pci_channel_offline(tp->pdev)) {
8867 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8868 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8873 tp->rx_mode &= ~RX_MODE_ENABLE;
8874 tw32_f(MAC_RX_MODE, tp->rx_mode);
8877 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8878 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8879 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8880 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8881 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8882 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8884 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8885 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8886 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8887 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8888 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8889 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8890 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8892 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8893 tw32_f(MAC_MODE, tp->mac_mode);
8896 tp->tx_mode &= ~TX_MODE_ENABLE;
8897 tw32_f(MAC_TX_MODE, tp->tx_mode);
8899 for (i = 0; i < MAX_WAIT_CNT; i++) {
8901 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8904 if (i >= MAX_WAIT_CNT) {
8905 dev_err(&tp->pdev->dev,
8906 "%s timed out, TX_MODE_ENABLE will not clear "
8907 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8911 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8912 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8913 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8915 tw32(FTQ_RESET, 0xffffffff);
8916 tw32(FTQ_RESET, 0x00000000);
8918 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8919 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8922 for (i = 0; i < tp->irq_cnt; i++) {
8923 struct tg3_napi *tnapi = &tp->napi[i];
8924 if (tnapi->hw_status)
8925 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8931 /* Save PCI command register before chip reset */
8932 static void tg3_save_pci_state(struct tg3 *tp)
8934 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8937 /* Restore PCI state after chip reset */
8938 static void tg3_restore_pci_state(struct tg3 *tp)
8942 /* Re-enable indirect register accesses. */
8943 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8944 tp->misc_host_ctrl);
8946 /* Set MAX PCI retry to zero. */
8947 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8948 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8949 tg3_flag(tp, PCIX_MODE))
8950 val |= PCISTATE_RETRY_SAME_DMA;
8951 /* Allow reads and writes to the APE register and memory space. */
8952 if (tg3_flag(tp, ENABLE_APE))
8953 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8954 PCISTATE_ALLOW_APE_SHMEM_WR |
8955 PCISTATE_ALLOW_APE_PSPACE_WR;
8956 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8958 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8960 if (!tg3_flag(tp, PCI_EXPRESS)) {
8961 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8962 tp->pci_cacheline_sz);
8963 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8967 /* Make sure PCI-X relaxed ordering bit is clear. */
8968 if (tg3_flag(tp, PCIX_MODE)) {
8971 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8973 pcix_cmd &= ~PCI_X_CMD_ERO;
8974 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8978 if (tg3_flag(tp, 5780_CLASS)) {
8980 /* Chip reset on 5780 will reset MSI enable bit,
8981 * so need to restore it.
8983 if (tg3_flag(tp, USING_MSI)) {
8986 pci_read_config_word(tp->pdev,
8987 tp->msi_cap + PCI_MSI_FLAGS,
8989 pci_write_config_word(tp->pdev,
8990 tp->msi_cap + PCI_MSI_FLAGS,
8991 ctrl | PCI_MSI_FLAGS_ENABLE);
8992 val = tr32(MSGINT_MODE);
8993 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8998 static void tg3_override_clk(struct tg3 *tp)
9002 switch (tg3_asic_rev(tp)) {
9004 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9005 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9006 TG3_CPMU_MAC_ORIDE_ENABLE);
9011 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9019 static void tg3_restore_clk(struct tg3 *tp)
9023 switch (tg3_asic_rev(tp)) {
9025 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9026 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9027 val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9032 val = tr32(TG3_CPMU_CLCK_ORIDE);
9033 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9041 /* tp->lock is held. */
9042 static int tg3_chip_reset(struct tg3 *tp)
9043 __releases(tp->lock)
9044 __acquires(tp->lock)
9047 void (*write_op)(struct tg3 *, u32, u32);
9050 if (!pci_device_is_present(tp->pdev))
9055 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9057 /* No matching tg3_nvram_unlock() after this because
9058 * chip reset below will undo the nvram lock.
9060 tp->nvram_lock_cnt = 0;
9062 /* GRC_MISC_CFG core clock reset will clear the memory
9063 * enable bit in PCI register 4 and the MSI enable bit
9064 * on some chips, so we save relevant registers here.
9066 tg3_save_pci_state(tp);
9068 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9069 tg3_flag(tp, 5755_PLUS))
9070 tw32(GRC_FASTBOOT_PC, 0);
9073 * We must avoid the readl() that normally takes place.
9074 * It locks machines, causes machine checks, and other
9075 * fun things. So, temporarily disable the 5701
9076 * hardware workaround, while we do the reset.
9078 write_op = tp->write32;
9079 if (write_op == tg3_write_flush_reg32)
9080 tp->write32 = tg3_write32;
9082 /* Prevent the irq handler from reading or writing PCI registers
9083 * during chip reset when the memory enable bit in the PCI command
9084 * register may be cleared. The chip does not generate interrupt
9085 * at this time, but the irq handler may still be called due to irq
9086 * sharing or irqpoll.
9088 tg3_flag_set(tp, CHIP_RESETTING);
9089 for (i = 0; i < tp->irq_cnt; i++) {
9090 struct tg3_napi *tnapi = &tp->napi[i];
9091 if (tnapi->hw_status) {
9092 tnapi->hw_status->status = 0;
9093 tnapi->hw_status->status_tag = 0;
9095 tnapi->last_tag = 0;
9096 tnapi->last_irq_tag = 0;
9100 tg3_full_unlock(tp);
9102 for (i = 0; i < tp->irq_cnt; i++)
9103 synchronize_irq(tp->napi[i].irq_vec);
9105 tg3_full_lock(tp, 0);
9107 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9108 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9109 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9113 val = GRC_MISC_CFG_CORECLK_RESET;
9115 if (tg3_flag(tp, PCI_EXPRESS)) {
9116 /* Force PCIe 1.0a mode */
9117 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9118 !tg3_flag(tp, 57765_PLUS) &&
9119 tr32(TG3_PCIE_PHY_TSTCTL) ==
9120 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9121 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9123 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9124 tw32(GRC_MISC_CFG, (1 << 29));
9129 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9130 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9131 tw32(GRC_VCPU_EXT_CTRL,
9132 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9135 /* Set the clock to the highest frequency to avoid timeouts. With link
9136 * aware mode, the clock speed could be slow and bootcode does not
9137 * complete within the expected time. Override the clock to allow the
9138 * bootcode to finish sooner and then restore it.
9140 tg3_override_clk(tp);
9142 /* Manage gphy power for all CPMU absent PCIe devices. */
9143 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9144 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9146 tw32(GRC_MISC_CFG, val);
9148 /* restore 5701 hardware bug workaround write method */
9149 tp->write32 = write_op;
9151 /* Unfortunately, we have to delay before the PCI read back.
9152 * Some 575X chips even will not respond to a PCI cfg access
9153 * when the reset command is given to the chip.
9155 * How do these hardware designers expect things to work
9156 * properly if the PCI write is posted for a long period
9157 * of time? It is always necessary to have some method by
9158 * which a register read back can occur to push the write
9159 * out which does the reset.
9161 * For most tg3 variants the trick below was working.
9166 /* Flush PCI posted writes. The normal MMIO registers
9167 * are inaccessible at this time so this is the only
9168 * way to make this reliably (actually, this is no longer
9169 * the case, see above). I tried to use indirect
9170 * register read/write but this upset some 5701 variants.
9172 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9176 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9179 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9183 /* Wait for link training to complete. */
9184 for (j = 0; j < 5000; j++)
9187 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9188 pci_write_config_dword(tp->pdev, 0xc4,
9189 cfg_val | (1 << 15));
9192 /* Clear the "no snoop" and "relaxed ordering" bits. */
9193 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9195 * Older PCIe devices only support the 128 byte
9196 * MPS setting. Enforce the restriction.
9198 if (!tg3_flag(tp, CPMU_PRESENT))
9199 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9200 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9202 /* Clear error status */
9203 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9204 PCI_EXP_DEVSTA_CED |
9205 PCI_EXP_DEVSTA_NFED |
9206 PCI_EXP_DEVSTA_FED |
9207 PCI_EXP_DEVSTA_URD);
9210 tg3_restore_pci_state(tp);
9212 tg3_flag_clear(tp, CHIP_RESETTING);
9213 tg3_flag_clear(tp, ERROR_PROCESSED);
9216 if (tg3_flag(tp, 5780_CLASS))
9217 val = tr32(MEMARB_MODE);
9218 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9220 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9222 tw32(0x5000, 0x400);
9225 if (tg3_flag(tp, IS_SSB_CORE)) {
9227 * BCM4785: In order to avoid repercussions from using
9228 * potentially defective internal ROM, stop the Rx RISC CPU,
9229 * which is not required.
9232 tg3_halt_cpu(tp, RX_CPU_BASE);
9235 err = tg3_poll_fw(tp);
9239 tw32(GRC_MODE, tp->grc_mode);
9241 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9244 tw32(0xc4, val | (1 << 15));
9247 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9248 tg3_asic_rev(tp) == ASIC_REV_5705) {
9249 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9250 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9251 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9252 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9255 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9256 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9258 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9259 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9264 tw32_f(MAC_MODE, val);
9267 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9271 if (tg3_flag(tp, PCI_EXPRESS) &&
9272 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9273 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9274 !tg3_flag(tp, 57765_PLUS)) {
9277 tw32(0x7c00, val | (1 << 25));
9280 tg3_restore_clk(tp);
9282 /* Increase the core clock speed to fix tx timeout issue for 5762
9283 * with 100Mbps link speed.
9285 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9286 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9287 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9288 TG3_CPMU_MAC_ORIDE_ENABLE);
9291 /* Reprobe ASF enable state. */
9292 tg3_flag_clear(tp, ENABLE_ASF);
9293 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9294 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9296 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9297 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9298 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9301 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9302 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9303 tg3_flag_set(tp, ENABLE_ASF);
9304 tp->last_event_jiffies = jiffies;
9305 if (tg3_flag(tp, 5750_PLUS))
9306 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9308 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9309 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9310 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9311 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9312 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9319 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9320 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9321 static void __tg3_set_rx_mode(struct net_device *);
9323 /* tp->lock is held. */
9324 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9330 tg3_write_sig_pre_reset(tp, kind);
9332 tg3_abort_hw(tp, silent);
9333 err = tg3_chip_reset(tp);
9335 __tg3_set_mac_addr(tp, false);
9337 tg3_write_sig_legacy(tp, kind);
9338 tg3_write_sig_post_reset(tp, kind);
9341 /* Save the stats across chip resets... */
9342 tg3_get_nstats(tp, &tp->net_stats_prev);
9343 tg3_get_estats(tp, &tp->estats_prev);
9345 /* And make sure the next sample is new data */
9346 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9352 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9354 struct tg3 *tp = netdev_priv(dev);
9355 struct sockaddr *addr = p;
9357 bool skip_mac_1 = false;
9359 if (!is_valid_ether_addr(addr->sa_data))
9360 return -EADDRNOTAVAIL;
9362 eth_hw_addr_set(dev, addr->sa_data);
9364 if (!netif_running(dev))
9367 if (tg3_flag(tp, ENABLE_ASF)) {
9368 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9370 addr0_high = tr32(MAC_ADDR_0_HIGH);
9371 addr0_low = tr32(MAC_ADDR_0_LOW);
9372 addr1_high = tr32(MAC_ADDR_1_HIGH);
9373 addr1_low = tr32(MAC_ADDR_1_LOW);
9375 /* Skip MAC addr 1 if ASF is using it. */
9376 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9377 !(addr1_high == 0 && addr1_low == 0))
9380 spin_lock_bh(&tp->lock);
9381 __tg3_set_mac_addr(tp, skip_mac_1);
9382 __tg3_set_rx_mode(dev);
9383 spin_unlock_bh(&tp->lock);
9388 /* tp->lock is held. */
9389 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9390 dma_addr_t mapping, u32 maxlen_flags,
9394 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9395 ((u64) mapping >> 32));
9397 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9398 ((u64) mapping & 0xffffffff));
9400 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9403 if (!tg3_flag(tp, 5705_PLUS))
9405 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9410 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9414 if (!tg3_flag(tp, ENABLE_TSS)) {
9415 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9416 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9417 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9419 tw32(HOSTCC_TXCOL_TICKS, 0);
9420 tw32(HOSTCC_TXMAX_FRAMES, 0);
9421 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9423 for (; i < tp->txq_cnt; i++) {
9426 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9427 tw32(reg, ec->tx_coalesce_usecs);
9428 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9429 tw32(reg, ec->tx_max_coalesced_frames);
9430 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9431 tw32(reg, ec->tx_max_coalesced_frames_irq);
9435 for (; i < tp->irq_max - 1; i++) {
9436 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9437 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9438 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9442 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9445 u32 limit = tp->rxq_cnt;
9447 if (!tg3_flag(tp, ENABLE_RSS)) {
9448 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9449 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9450 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9453 tw32(HOSTCC_RXCOL_TICKS, 0);
9454 tw32(HOSTCC_RXMAX_FRAMES, 0);
9455 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9458 for (; i < limit; i++) {
9461 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9462 tw32(reg, ec->rx_coalesce_usecs);
9463 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9464 tw32(reg, ec->rx_max_coalesced_frames);
9465 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9466 tw32(reg, ec->rx_max_coalesced_frames_irq);
9469 for (; i < tp->irq_max - 1; i++) {
9470 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9471 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9472 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9476 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9478 tg3_coal_tx_init(tp, ec);
9479 tg3_coal_rx_init(tp, ec);
9481 if (!tg3_flag(tp, 5705_PLUS)) {
9482 u32 val = ec->stats_block_coalesce_usecs;
9484 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9485 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9490 tw32(HOSTCC_STAT_COAL_TICKS, val);
9494 /* tp->lock is held. */
9495 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9499 /* Disable all transmit rings but the first. */
9500 if (!tg3_flag(tp, 5705_PLUS))
9501 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9502 else if (tg3_flag(tp, 5717_PLUS))
9503 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9504 else if (tg3_flag(tp, 57765_CLASS) ||
9505 tg3_asic_rev(tp) == ASIC_REV_5762)
9506 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9508 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9510 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9511 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9512 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9513 BDINFO_FLAGS_DISABLED);
9516 /* tp->lock is held. */
9517 static void tg3_tx_rcbs_init(struct tg3 *tp)
9520 u32 txrcb = NIC_SRAM_SEND_RCB;
9522 if (tg3_flag(tp, ENABLE_TSS))
9525 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9526 struct tg3_napi *tnapi = &tp->napi[i];
9528 if (!tnapi->tx_ring)
9531 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9532 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9533 NIC_SRAM_TX_BUFFER_DESC);
9537 /* tp->lock is held. */
9538 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9542 /* Disable all receive return rings but the first. */
9543 if (tg3_flag(tp, 5717_PLUS))
9544 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9545 else if (!tg3_flag(tp, 5705_PLUS))
9546 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9547 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9548 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9549 tg3_flag(tp, 57765_CLASS))
9550 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9552 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9554 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9555 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9556 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9557 BDINFO_FLAGS_DISABLED);
9560 /* tp->lock is held. */
9561 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9564 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9566 if (tg3_flag(tp, ENABLE_RSS))
9569 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9570 struct tg3_napi *tnapi = &tp->napi[i];
9575 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9576 (tp->rx_ret_ring_mask + 1) <<
9577 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9581 /* tp->lock is held. */
9582 static void tg3_rings_reset(struct tg3 *tp)
9586 struct tg3_napi *tnapi = &tp->napi[0];
9588 tg3_tx_rcbs_disable(tp);
9590 tg3_rx_ret_rcbs_disable(tp);
9592 /* Disable interrupts */
9593 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9594 tp->napi[0].chk_msi_cnt = 0;
9595 tp->napi[0].last_rx_cons = 0;
9596 tp->napi[0].last_tx_cons = 0;
9598 /* Zero mailbox registers. */
9599 if (tg3_flag(tp, SUPPORT_MSIX)) {
9600 for (i = 1; i < tp->irq_max; i++) {
9601 tp->napi[i].tx_prod = 0;
9602 tp->napi[i].tx_cons = 0;
9603 if (tg3_flag(tp, ENABLE_TSS))
9604 tw32_mailbox(tp->napi[i].prodmbox, 0);
9605 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9606 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9607 tp->napi[i].chk_msi_cnt = 0;
9608 tp->napi[i].last_rx_cons = 0;
9609 tp->napi[i].last_tx_cons = 0;
9611 if (!tg3_flag(tp, ENABLE_TSS))
9612 tw32_mailbox(tp->napi[0].prodmbox, 0);
9614 tp->napi[0].tx_prod = 0;
9615 tp->napi[0].tx_cons = 0;
9616 tw32_mailbox(tp->napi[0].prodmbox, 0);
9617 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9620 /* Make sure the NIC-based send BD rings are disabled. */
9621 if (!tg3_flag(tp, 5705_PLUS)) {
9622 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9623 for (i = 0; i < 16; i++)
9624 tw32_tx_mbox(mbox + i * 8, 0);
9627 /* Clear status block in ram. */
9628 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9630 /* Set status block DMA address */
9631 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9632 ((u64) tnapi->status_mapping >> 32));
9633 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9634 ((u64) tnapi->status_mapping & 0xffffffff));
9636 stblk = HOSTCC_STATBLCK_RING1;
9638 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9639 u64 mapping = (u64)tnapi->status_mapping;
9640 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9641 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9644 /* Clear status block in ram. */
9645 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9648 tg3_tx_rcbs_init(tp);
9649 tg3_rx_ret_rcbs_init(tp);
9652 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9654 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9656 if (!tg3_flag(tp, 5750_PLUS) ||
9657 tg3_flag(tp, 5780_CLASS) ||
9658 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9659 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9660 tg3_flag(tp, 57765_PLUS))
9661 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9662 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9663 tg3_asic_rev(tp) == ASIC_REV_5787)
9664 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9666 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9668 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9669 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9671 val = min(nic_rep_thresh, host_rep_thresh);
9672 tw32(RCVBDI_STD_THRESH, val);
9674 if (tg3_flag(tp, 57765_PLUS))
9675 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9677 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9680 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9682 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9684 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9685 tw32(RCVBDI_JUMBO_THRESH, val);
9687 if (tg3_flag(tp, 57765_PLUS))
9688 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9691 static inline u32 calc_crc(unsigned char *buf, int len)
9699 for (j = 0; j < len; j++) {
9702 for (k = 0; k < 8; k++) {
9708 reg ^= CRC32_POLY_LE;
9715 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9717 /* accept or reject all multicast frames */
9718 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9719 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9720 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9721 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9724 static void __tg3_set_rx_mode(struct net_device *dev)
9726 struct tg3 *tp = netdev_priv(dev);
9729 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9730 RX_MODE_KEEP_VLAN_TAG);
9732 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9733 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9736 if (!tg3_flag(tp, ENABLE_ASF))
9737 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9740 if (dev->flags & IFF_PROMISC) {
9741 /* Promiscuous mode. */
9742 rx_mode |= RX_MODE_PROMISC;
9743 } else if (dev->flags & IFF_ALLMULTI) {
9744 /* Accept all multicast. */
9745 tg3_set_multi(tp, 1);
9746 } else if (netdev_mc_empty(dev)) {
9747 /* Reject all multicast. */
9748 tg3_set_multi(tp, 0);
9750 /* Accept one or more multicast(s). */
9751 struct netdev_hw_addr *ha;
9752 u32 mc_filter[4] = { 0, };
9757 netdev_for_each_mc_addr(ha, dev) {
9758 crc = calc_crc(ha->addr, ETH_ALEN);
9760 regidx = (bit & 0x60) >> 5;
9762 mc_filter[regidx] |= (1 << bit);
9765 tw32(MAC_HASH_REG_0, mc_filter[0]);
9766 tw32(MAC_HASH_REG_1, mc_filter[1]);
9767 tw32(MAC_HASH_REG_2, mc_filter[2]);
9768 tw32(MAC_HASH_REG_3, mc_filter[3]);
9771 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9772 rx_mode |= RX_MODE_PROMISC;
9773 } else if (!(dev->flags & IFF_PROMISC)) {
9774 /* Add all entries into to the mac addr filter list */
9776 struct netdev_hw_addr *ha;
9778 netdev_for_each_uc_addr(ha, dev) {
9779 __tg3_set_one_mac_addr(tp, ha->addr,
9780 i + TG3_UCAST_ADDR_IDX(tp));
9785 if (rx_mode != tp->rx_mode) {
9786 tp->rx_mode = rx_mode;
9787 tw32_f(MAC_RX_MODE, rx_mode);
9792 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9796 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9797 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9800 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9804 if (!tg3_flag(tp, SUPPORT_MSIX))
9807 if (tp->rxq_cnt == 1) {
9808 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9812 /* Validate table against current IRQ count */
9813 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9814 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9818 if (i != TG3_RSS_INDIR_TBL_SIZE)
9819 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9822 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9825 u32 reg = MAC_RSS_INDIR_TBL_0;
9827 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9828 u32 val = tp->rss_ind_tbl[i];
9830 for (; i % 8; i++) {
9832 val |= tp->rss_ind_tbl[i];
9839 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9841 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9842 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9844 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9847 /* tp->lock is held. */
9848 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9850 u32 val, rdmac_mode;
9852 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9854 tg3_disable_ints(tp);
9858 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9860 if (tg3_flag(tp, INIT_COMPLETE))
9861 tg3_abort_hw(tp, 1);
9863 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9864 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9865 tg3_phy_pull_config(tp);
9866 tg3_eee_pull_config(tp, NULL);
9867 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9870 /* Enable MAC control of LPI */
9871 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9877 err = tg3_chip_reset(tp);
9881 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9883 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9884 val = tr32(TG3_CPMU_CTRL);
9885 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9886 tw32(TG3_CPMU_CTRL, val);
9888 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9889 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9890 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9891 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9893 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9894 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9895 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9896 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9898 val = tr32(TG3_CPMU_HST_ACC);
9899 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9900 val |= CPMU_HST_ACC_MACCLK_6_25;
9901 tw32(TG3_CPMU_HST_ACC, val);
9904 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9905 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9906 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9907 PCIE_PWR_MGMT_L1_THRESH_4MS;
9908 tw32(PCIE_PWR_MGMT_THRESH, val);
9910 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9911 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9913 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9915 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9916 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9919 if (tg3_flag(tp, L1PLLPD_EN)) {
9920 u32 grc_mode = tr32(GRC_MODE);
9922 /* Access the lower 1K of PL PCIE block registers. */
9923 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9924 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9926 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9927 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9928 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9930 tw32(GRC_MODE, grc_mode);
9933 if (tg3_flag(tp, 57765_CLASS)) {
9934 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9935 u32 grc_mode = tr32(GRC_MODE);
9937 /* Access the lower 1K of PL PCIE block registers. */
9938 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9939 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9941 val = tr32(TG3_PCIE_TLDLPL_PORT +
9942 TG3_PCIE_PL_LO_PHYCTL5);
9943 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9944 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9946 tw32(GRC_MODE, grc_mode);
9949 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9952 /* Fix transmit hangs */
9953 val = tr32(TG3_CPMU_PADRNG_CTL);
9954 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9955 tw32(TG3_CPMU_PADRNG_CTL, val);
9957 grc_mode = tr32(GRC_MODE);
9959 /* Access the lower 1K of DL PCIE block registers. */
9960 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9961 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9963 val = tr32(TG3_PCIE_TLDLPL_PORT +
9964 TG3_PCIE_DL_LO_FTSMAX);
9965 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9966 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9967 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9969 tw32(GRC_MODE, grc_mode);
9972 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9973 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9974 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9975 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9978 /* This works around an issue with Athlon chipsets on
9979 * B3 tigon3 silicon. This bit has no effect on any
9980 * other revision. But do not set this on PCI Express
9981 * chips and don't even touch the clocks if the CPMU is present.
9983 if (!tg3_flag(tp, CPMU_PRESENT)) {
9984 if (!tg3_flag(tp, PCI_EXPRESS))
9985 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9986 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9989 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9990 tg3_flag(tp, PCIX_MODE)) {
9991 val = tr32(TG3PCI_PCISTATE);
9992 val |= PCISTATE_RETRY_SAME_DMA;
9993 tw32(TG3PCI_PCISTATE, val);
9996 if (tg3_flag(tp, ENABLE_APE)) {
9997 /* Allow reads and writes to the
9998 * APE register and memory space.
10000 val = tr32(TG3PCI_PCISTATE);
10001 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10002 PCISTATE_ALLOW_APE_SHMEM_WR |
10003 PCISTATE_ALLOW_APE_PSPACE_WR;
10004 tw32(TG3PCI_PCISTATE, val);
10007 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10008 /* Enable some hw fixes. */
10009 val = tr32(TG3PCI_MSI_DATA);
10010 val |= (1 << 26) | (1 << 28) | (1 << 29);
10011 tw32(TG3PCI_MSI_DATA, val);
10014 /* Descriptor ring init may make accesses to the
10015 * NIC SRAM area to setup the TX descriptors, so we
10016 * can only do this after the hardware has been
10017 * successfully reset.
10019 err = tg3_init_rings(tp);
10023 if (tg3_flag(tp, 57765_PLUS)) {
10024 val = tr32(TG3PCI_DMA_RW_CTRL) &
10025 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10026 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10027 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10028 if (!tg3_flag(tp, 57765_CLASS) &&
10029 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10030 tg3_asic_rev(tp) != ASIC_REV_5762)
10031 val |= DMA_RWCTRL_TAGGED_STAT_WA;
10032 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10033 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10034 tg3_asic_rev(tp) != ASIC_REV_5761) {
10035 /* This value is determined during the probe time DMA
10036 * engine test, tg3_test_dma.
10038 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10041 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10042 GRC_MODE_4X_NIC_SEND_RINGS |
10043 GRC_MODE_NO_TX_PHDR_CSUM |
10044 GRC_MODE_NO_RX_PHDR_CSUM);
10045 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10047 /* Pseudo-header checksum is done by hardware logic and not
10048 * the offload processers, so make the chip do the pseudo-
10049 * header checksums on receive. For transmit it is more
10050 * convenient to do the pseudo-header checksum in software
10051 * as Linux does that on transmit for us in all cases.
10053 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10055 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10057 tw32(TG3_RX_PTP_CTL,
10058 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10060 if (tg3_flag(tp, PTP_CAPABLE))
10061 val |= GRC_MODE_TIME_SYNC_ENABLE;
10063 tw32(GRC_MODE, tp->grc_mode | val);
10065 /* On one of the AMD platform, MRRS is restricted to 4000 because of
10066 * south bridge limitation. As a workaround, Driver is setting MRRS
10067 * to 2048 instead of default 4096.
10069 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10070 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10071 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10072 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10075 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10076 val = tr32(GRC_MISC_CFG);
10078 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10079 tw32(GRC_MISC_CFG, val);
10081 /* Initialize MBUF/DESC pool. */
10082 if (tg3_flag(tp, 5750_PLUS)) {
10084 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10085 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10086 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10087 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10089 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10090 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10091 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10092 } else if (tg3_flag(tp, TSO_CAPABLE)) {
10095 fw_len = tp->fw_len;
10096 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10097 tw32(BUFMGR_MB_POOL_ADDR,
10098 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10099 tw32(BUFMGR_MB_POOL_SIZE,
10100 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10103 if (tp->dev->mtu <= ETH_DATA_LEN) {
10104 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10105 tp->bufmgr_config.mbuf_read_dma_low_water);
10106 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10107 tp->bufmgr_config.mbuf_mac_rx_low_water);
10108 tw32(BUFMGR_MB_HIGH_WATER,
10109 tp->bufmgr_config.mbuf_high_water);
10111 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10112 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10113 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10114 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10115 tw32(BUFMGR_MB_HIGH_WATER,
10116 tp->bufmgr_config.mbuf_high_water_jumbo);
10118 tw32(BUFMGR_DMA_LOW_WATER,
10119 tp->bufmgr_config.dma_low_water);
10120 tw32(BUFMGR_DMA_HIGH_WATER,
10121 tp->bufmgr_config.dma_high_water);
10123 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10124 if (tg3_asic_rev(tp) == ASIC_REV_5719)
10125 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10126 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10127 tg3_asic_rev(tp) == ASIC_REV_5762 ||
10128 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10129 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10130 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10131 tw32(BUFMGR_MODE, val);
10132 for (i = 0; i < 2000; i++) {
10133 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10138 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10142 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10143 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10145 tg3_setup_rxbd_thresholds(tp);
10147 /* Initialize TG3_BDINFO's at:
10148 * RCVDBDI_STD_BD: standard eth size rx ring
10149 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10150 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10153 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10154 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10155 * ring attribute flags
10156 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10158 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10159 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10161 * The size of each ring is fixed in the firmware, but the location is
10164 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10165 ((u64) tpr->rx_std_mapping >> 32));
10166 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10167 ((u64) tpr->rx_std_mapping & 0xffffffff));
10168 if (!tg3_flag(tp, 5717_PLUS))
10169 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10170 NIC_SRAM_RX_BUFFER_DESC);
10172 /* Disable the mini ring */
10173 if (!tg3_flag(tp, 5705_PLUS))
10174 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10175 BDINFO_FLAGS_DISABLED);
10177 /* Program the jumbo buffer descriptor ring control
10178 * blocks on those devices that have them.
10180 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10181 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10183 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10184 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10185 ((u64) tpr->rx_jmb_mapping >> 32));
10186 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10187 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10188 val = TG3_RX_JMB_RING_SIZE(tp) <<
10189 BDINFO_FLAGS_MAXLEN_SHIFT;
10190 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10191 val | BDINFO_FLAGS_USE_EXT_RECV);
10192 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10193 tg3_flag(tp, 57765_CLASS) ||
10194 tg3_asic_rev(tp) == ASIC_REV_5762)
10195 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10196 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10198 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10199 BDINFO_FLAGS_DISABLED);
10202 if (tg3_flag(tp, 57765_PLUS)) {
10203 val = TG3_RX_STD_RING_SIZE(tp);
10204 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10205 val |= (TG3_RX_STD_DMA_SZ << 2);
10207 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10209 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10211 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10213 tpr->rx_std_prod_idx = tp->rx_pending;
10214 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10216 tpr->rx_jmb_prod_idx =
10217 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10218 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10220 tg3_rings_reset(tp);
10222 /* Initialize MAC address and backoff seed. */
10223 __tg3_set_mac_addr(tp, false);
10225 /* MTU + ethernet header + FCS + optional VLAN tag */
10226 tw32(MAC_RX_MTU_SIZE,
10227 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10229 /* The slot time is changed by tg3_setup_phy if we
10230 * run at gigabit with half duplex.
10232 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10233 (6 << TX_LENGTHS_IPG_SHIFT) |
10234 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10236 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10237 tg3_asic_rev(tp) == ASIC_REV_5762)
10238 val |= tr32(MAC_TX_LENGTHS) &
10239 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10240 TX_LENGTHS_CNT_DWN_VAL_MSK);
10242 tw32(MAC_TX_LENGTHS, val);
10244 /* Receive rules. */
10245 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10246 tw32(RCVLPC_CONFIG, 0x0181);
10248 /* Calculate RDMAC_MODE setting early, we need it to determine
10249 * the RCVLPC_STATE_ENABLE mask.
10251 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10252 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10253 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10254 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10255 RDMAC_MODE_LNGREAD_ENAB);
10257 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10258 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10260 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10261 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10262 tg3_asic_rev(tp) == ASIC_REV_57780)
10263 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10264 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10265 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10267 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10268 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10269 if (tg3_flag(tp, TSO_CAPABLE)) {
10270 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10271 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10272 !tg3_flag(tp, IS_5788)) {
10273 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10277 if (tg3_flag(tp, PCI_EXPRESS))
10278 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10280 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10282 if (tp->dev->mtu <= ETH_DATA_LEN) {
10283 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10284 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10288 if (tg3_flag(tp, HW_TSO_1) ||
10289 tg3_flag(tp, HW_TSO_2) ||
10290 tg3_flag(tp, HW_TSO_3))
10291 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10293 if (tg3_flag(tp, 57765_PLUS) ||
10294 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10295 tg3_asic_rev(tp) == ASIC_REV_57780)
10296 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10298 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10299 tg3_asic_rev(tp) == ASIC_REV_5762)
10300 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10302 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10303 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10304 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10305 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10306 tg3_flag(tp, 57765_PLUS)) {
10309 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10310 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10312 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10314 val = tr32(tgtreg);
10315 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10316 tg3_asic_rev(tp) == ASIC_REV_5762) {
10317 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10318 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10319 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10320 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10321 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10322 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10324 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10327 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10328 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10329 tg3_asic_rev(tp) == ASIC_REV_5762) {
10332 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10333 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10335 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10337 val = tr32(tgtreg);
10339 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10340 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10343 /* Receive/send statistics. */
10344 if (tg3_flag(tp, 5750_PLUS)) {
10345 val = tr32(RCVLPC_STATS_ENABLE);
10346 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10347 tw32(RCVLPC_STATS_ENABLE, val);
10348 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10349 tg3_flag(tp, TSO_CAPABLE)) {
10350 val = tr32(RCVLPC_STATS_ENABLE);
10351 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10352 tw32(RCVLPC_STATS_ENABLE, val);
10354 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10356 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10357 tw32(SNDDATAI_STATSENAB, 0xffffff);
10358 tw32(SNDDATAI_STATSCTRL,
10359 (SNDDATAI_SCTRL_ENABLE |
10360 SNDDATAI_SCTRL_FASTUPD));
10362 /* Setup host coalescing engine. */
10363 tw32(HOSTCC_MODE, 0);
10364 for (i = 0; i < 2000; i++) {
10365 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10370 __tg3_set_coalesce(tp, &tp->coal);
10372 if (!tg3_flag(tp, 5705_PLUS)) {
10373 /* Status/statistics block address. See tg3_timer,
10374 * the tg3_periodic_fetch_stats call there, and
10375 * tg3_get_stats to see how this works for 5705/5750 chips.
10377 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10378 ((u64) tp->stats_mapping >> 32));
10379 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10380 ((u64) tp->stats_mapping & 0xffffffff));
10381 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10383 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10385 /* Clear statistics and status block memory areas */
10386 for (i = NIC_SRAM_STATS_BLK;
10387 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10388 i += sizeof(u32)) {
10389 tg3_write_mem(tp, i, 0);
10394 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10396 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10397 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10398 if (!tg3_flag(tp, 5705_PLUS))
10399 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10401 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10402 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10403 /* reset to prevent losing 1st rx packet intermittently */
10404 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10408 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10409 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10410 MAC_MODE_FHDE_ENABLE;
10411 if (tg3_flag(tp, ENABLE_APE))
10412 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10413 if (!tg3_flag(tp, 5705_PLUS) &&
10414 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10415 tg3_asic_rev(tp) != ASIC_REV_5700)
10416 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10417 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10420 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10421 * If TG3_FLAG_IS_NIC is zero, we should read the
10422 * register to preserve the GPIO settings for LOMs. The GPIOs,
10423 * whether used as inputs or outputs, are set by boot code after
10426 if (!tg3_flag(tp, IS_NIC)) {
10429 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10430 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10431 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10433 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10434 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10435 GRC_LCLCTRL_GPIO_OUTPUT3;
10437 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10438 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10440 tp->grc_local_ctrl &= ~gpio_mask;
10441 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10443 /* GPIO1 must be driven high for eeprom write protect */
10444 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10445 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10446 GRC_LCLCTRL_GPIO_OUTPUT1);
10448 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10451 if (tg3_flag(tp, USING_MSIX)) {
10452 val = tr32(MSGINT_MODE);
10453 val |= MSGINT_MODE_ENABLE;
10454 if (tp->irq_cnt > 1)
10455 val |= MSGINT_MODE_MULTIVEC_EN;
10456 if (!tg3_flag(tp, 1SHOT_MSI))
10457 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10458 tw32(MSGINT_MODE, val);
10461 if (!tg3_flag(tp, 5705_PLUS)) {
10462 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10466 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10467 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10468 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10469 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10470 WDMAC_MODE_LNGREAD_ENAB);
10472 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10473 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10474 if (tg3_flag(tp, TSO_CAPABLE) &&
10475 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10476 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10478 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10479 !tg3_flag(tp, IS_5788)) {
10480 val |= WDMAC_MODE_RX_ACCEL;
10484 /* Enable host coalescing bug fix */
10485 if (tg3_flag(tp, 5755_PLUS))
10486 val |= WDMAC_MODE_STATUS_TAG_FIX;
10488 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10489 val |= WDMAC_MODE_BURST_ALL_DATA;
10491 tw32_f(WDMAC_MODE, val);
10494 if (tg3_flag(tp, PCIX_MODE)) {
10497 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10499 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10500 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10501 pcix_cmd |= PCI_X_CMD_READ_2K;
10502 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10503 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10504 pcix_cmd |= PCI_X_CMD_READ_2K;
10506 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10510 tw32_f(RDMAC_MODE, rdmac_mode);
10513 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10514 tg3_asic_rev(tp) == ASIC_REV_5720) {
10515 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10516 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10519 if (i < TG3_NUM_RDMA_CHANNELS) {
10520 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10521 val |= tg3_lso_rd_dma_workaround_bit(tp);
10522 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10523 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10527 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10528 if (!tg3_flag(tp, 5705_PLUS))
10529 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10531 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10532 tw32(SNDDATAC_MODE,
10533 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10535 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10537 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10538 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10539 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10540 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10541 val |= RCVDBDI_MODE_LRG_RING_SZ;
10542 tw32(RCVDBDI_MODE, val);
10543 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10544 if (tg3_flag(tp, HW_TSO_1) ||
10545 tg3_flag(tp, HW_TSO_2) ||
10546 tg3_flag(tp, HW_TSO_3))
10547 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10548 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10549 if (tg3_flag(tp, ENABLE_TSS))
10550 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10551 tw32(SNDBDI_MODE, val);
10552 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10554 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10555 err = tg3_load_5701_a0_firmware_fix(tp);
10560 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10561 /* Ignore any errors for the firmware download. If download
10562 * fails, the device will operate with EEE disabled
10564 tg3_load_57766_firmware(tp);
10567 if (tg3_flag(tp, TSO_CAPABLE)) {
10568 err = tg3_load_tso_firmware(tp);
10573 tp->tx_mode = TX_MODE_ENABLE;
10575 if (tg3_flag(tp, 5755_PLUS) ||
10576 tg3_asic_rev(tp) == ASIC_REV_5906)
10577 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10579 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10580 tg3_asic_rev(tp) == ASIC_REV_5762) {
10581 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10582 tp->tx_mode &= ~val;
10583 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10586 tw32_f(MAC_TX_MODE, tp->tx_mode);
10589 if (tg3_flag(tp, ENABLE_RSS)) {
10592 tg3_rss_write_indir_tbl(tp);
10594 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10596 for (i = 0; i < 10 ; i++)
10597 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10600 tp->rx_mode = RX_MODE_ENABLE;
10601 if (tg3_flag(tp, 5755_PLUS))
10602 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10604 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10605 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10607 if (tg3_flag(tp, ENABLE_RSS))
10608 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10609 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10610 RX_MODE_RSS_IPV6_HASH_EN |
10611 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10612 RX_MODE_RSS_IPV4_HASH_EN |
10613 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10615 tw32_f(MAC_RX_MODE, tp->rx_mode);
10618 tw32(MAC_LED_CTRL, tp->led_ctrl);
10620 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10621 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10622 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10625 tw32_f(MAC_RX_MODE, tp->rx_mode);
10628 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10629 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10630 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10631 /* Set drive transmission level to 1.2V */
10632 /* only if the signal pre-emphasis bit is not set */
10633 val = tr32(MAC_SERDES_CFG);
10636 tw32(MAC_SERDES_CFG, val);
10638 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10639 tw32(MAC_SERDES_CFG, 0x616000);
10642 /* Prevent chip from dropping frames when flow control
10645 if (tg3_flag(tp, 57765_CLASS))
10649 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10651 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10652 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10653 /* Use hardware link auto-negotiation */
10654 tg3_flag_set(tp, HW_AUTONEG);
10657 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10658 tg3_asic_rev(tp) == ASIC_REV_5714) {
10661 tmp = tr32(SERDES_RX_CTRL);
10662 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10663 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10664 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10665 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10668 if (!tg3_flag(tp, USE_PHYLIB)) {
10669 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10670 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10672 err = tg3_setup_phy(tp, false);
10676 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10677 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10680 /* Clear CRC stats. */
10681 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10682 tg3_writephy(tp, MII_TG3_TEST1,
10683 tmp | MII_TG3_TEST1_CRC_EN);
10684 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10689 __tg3_set_rx_mode(tp->dev);
10691 /* Initialize receive rules. */
10692 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10693 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10694 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10695 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10697 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10701 if (tg3_flag(tp, ENABLE_ASF))
10705 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10708 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10711 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10714 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10717 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10720 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10723 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10726 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10729 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10732 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10735 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10738 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10741 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10743 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10751 if (tg3_flag(tp, ENABLE_APE))
10752 /* Write our heartbeat update interval to APE. */
10753 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10754 APE_HOST_HEARTBEAT_INT_5SEC);
10756 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10761 /* Called at device open time to get the chip ready for
10762 * packet processing. Invoked with tp->lock held.
10764 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10766 /* Chip may have been just powered on. If so, the boot code may still
10767 * be running initialization. Wait for it to finish to avoid races in
10768 * accessing the hardware.
10770 tg3_enable_register_access(tp);
10773 tg3_switch_clocks(tp);
10775 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10777 return tg3_reset_hw(tp, reset_phy);
10780 #ifdef CONFIG_TIGON3_HWMON
10781 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10783 u32 off, len = TG3_OCIR_LEN;
10786 for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
10787 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10789 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10790 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10791 memset(ocir, 0, len);
10795 /* sysfs attributes for hwmon */
10796 static ssize_t tg3_show_temp(struct device *dev,
10797 struct device_attribute *devattr, char *buf)
10799 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10800 struct tg3 *tp = dev_get_drvdata(dev);
10803 spin_lock_bh(&tp->lock);
10804 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10805 sizeof(temperature));
10806 spin_unlock_bh(&tp->lock);
10807 return sprintf(buf, "%u\n", temperature * 1000);
10811 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10812 TG3_TEMP_SENSOR_OFFSET);
10813 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10814 TG3_TEMP_CAUTION_OFFSET);
10815 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10816 TG3_TEMP_MAX_OFFSET);
10818 static struct attribute *tg3_attrs[] = {
10819 &sensor_dev_attr_temp1_input.dev_attr.attr,
10820 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10821 &sensor_dev_attr_temp1_max.dev_attr.attr,
10824 ATTRIBUTE_GROUPS(tg3);
10826 static void tg3_hwmon_close(struct tg3 *tp)
10828 if (tp->hwmon_dev) {
10829 hwmon_device_unregister(tp->hwmon_dev);
10830 tp->hwmon_dev = NULL;
10834 static void tg3_hwmon_open(struct tg3 *tp)
10838 struct pci_dev *pdev = tp->pdev;
10839 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10841 tg3_sd_scan_scratchpad(tp, ocirs);
10843 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10844 if (!ocirs[i].src_data_length)
10847 size += ocirs[i].src_hdr_length;
10848 size += ocirs[i].src_data_length;
10854 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10856 if (IS_ERR(tp->hwmon_dev)) {
10857 tp->hwmon_dev = NULL;
10858 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10862 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10863 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10864 #endif /* CONFIG_TIGON3_HWMON */
10867 #define TG3_STAT_ADD32(PSTAT, REG) \
10868 do { u32 __val = tr32(REG); \
10869 (PSTAT)->low += __val; \
10870 if ((PSTAT)->low < __val) \
10871 (PSTAT)->high += 1; \
10874 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10876 struct tg3_hw_stats *sp = tp->hw_stats;
10881 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10882 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10883 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10884 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10885 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10886 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10887 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10888 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10889 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10890 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10891 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10892 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10893 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10894 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10895 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10896 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10899 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10900 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10901 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10902 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10905 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10906 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10907 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10908 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10909 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10910 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10911 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10912 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10913 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10914 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10915 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10916 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10917 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10918 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10920 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10921 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10922 tg3_asic_rev(tp) != ASIC_REV_5762 &&
10923 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10924 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10925 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10927 u32 val = tr32(HOSTCC_FLOW_ATTN);
10928 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10930 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10931 sp->rx_discards.low += val;
10932 if (sp->rx_discards.low < val)
10933 sp->rx_discards.high += 1;
10935 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10937 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10940 static void tg3_chk_missed_msi(struct tg3 *tp)
10944 for (i = 0; i < tp->irq_cnt; i++) {
10945 struct tg3_napi *tnapi = &tp->napi[i];
10947 if (tg3_has_work(tnapi)) {
10948 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10949 tnapi->last_tx_cons == tnapi->tx_cons) {
10950 if (tnapi->chk_msi_cnt < 1) {
10951 tnapi->chk_msi_cnt++;
10957 tnapi->chk_msi_cnt = 0;
10958 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10959 tnapi->last_tx_cons = tnapi->tx_cons;
10963 static void tg3_timer(struct timer_list *t)
10965 struct tg3 *tp = from_timer(tp, t, timer);
10967 spin_lock(&tp->lock);
10969 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10970 spin_unlock(&tp->lock);
10971 goto restart_timer;
10974 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10975 tg3_flag(tp, 57765_CLASS))
10976 tg3_chk_missed_msi(tp);
10978 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10979 /* BCM4785: Flush posted writes from GbE to host memory. */
10983 if (!tg3_flag(tp, TAGGED_STATUS)) {
10984 /* All of this garbage is because when using non-tagged
10985 * IRQ status the mailbox/status_block protocol the chip
10986 * uses with the cpu is race prone.
10988 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10989 tw32(GRC_LOCAL_CTRL,
10990 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10992 tw32(HOSTCC_MODE, tp->coalesce_mode |
10993 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10996 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10997 spin_unlock(&tp->lock);
10998 tg3_reset_task_schedule(tp);
10999 goto restart_timer;
11003 /* This part only runs once per second. */
11004 if (!--tp->timer_counter) {
11005 if (tg3_flag(tp, 5705_PLUS))
11006 tg3_periodic_fetch_stats(tp);
11008 if (tp->setlpicnt && !--tp->setlpicnt)
11009 tg3_phy_eee_enable(tp);
11011 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11015 mac_stat = tr32(MAC_STATUS);
11018 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11019 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11021 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11025 tg3_setup_phy(tp, false);
11026 } else if (tg3_flag(tp, POLL_SERDES)) {
11027 u32 mac_stat = tr32(MAC_STATUS);
11028 int need_setup = 0;
11031 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11034 if (!tp->link_up &&
11035 (mac_stat & (MAC_STATUS_PCS_SYNCED |
11036 MAC_STATUS_SIGNAL_DET))) {
11040 if (!tp->serdes_counter) {
11043 ~MAC_MODE_PORT_MODE_MASK));
11045 tw32_f(MAC_MODE, tp->mac_mode);
11048 tg3_setup_phy(tp, false);
11050 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11051 tg3_flag(tp, 5780_CLASS)) {
11052 tg3_serdes_parallel_detect(tp);
11053 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11054 u32 cpmu = tr32(TG3_CPMU_STATUS);
11055 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11056 TG3_CPMU_STATUS_LINK_MASK);
11058 if (link_up != tp->link_up)
11059 tg3_setup_phy(tp, false);
11062 tp->timer_counter = tp->timer_multiplier;
11065 /* Heartbeat is only sent once every 2 seconds.
11067 * The heartbeat is to tell the ASF firmware that the host
11068 * driver is still alive. In the event that the OS crashes,
11069 * ASF needs to reset the hardware to free up the FIFO space
11070 * that may be filled with rx packets destined for the host.
11071 * If the FIFO is full, ASF will no longer function properly.
11073 * Unintended resets have been reported on real time kernels
11074 * where the timer doesn't run on time. Netpoll will also have
11077 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11078 * to check the ring condition when the heartbeat is expiring
11079 * before doing the reset. This will prevent most unintended
11082 if (!--tp->asf_counter) {
11083 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11084 tg3_wait_for_event_ack(tp);
11086 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11087 FWCMD_NICDRV_ALIVE3);
11088 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11089 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11090 TG3_FW_UPDATE_TIMEOUT_SEC);
11092 tg3_generate_fw_event(tp);
11094 tp->asf_counter = tp->asf_multiplier;
11097 /* Update the APE heartbeat every 5 seconds.*/
11098 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11100 spin_unlock(&tp->lock);
11103 tp->timer.expires = jiffies + tp->timer_offset;
11104 add_timer(&tp->timer);
11107 static void tg3_timer_init(struct tg3 *tp)
11109 if (tg3_flag(tp, TAGGED_STATUS) &&
11110 tg3_asic_rev(tp) != ASIC_REV_5717 &&
11111 !tg3_flag(tp, 57765_CLASS))
11112 tp->timer_offset = HZ;
11114 tp->timer_offset = HZ / 10;
11116 BUG_ON(tp->timer_offset > HZ);
11118 tp->timer_multiplier = (HZ / tp->timer_offset);
11119 tp->asf_multiplier = (HZ / tp->timer_offset) *
11120 TG3_FW_UPDATE_FREQ_SEC;
11122 timer_setup(&tp->timer, tg3_timer, 0);
11125 static void tg3_timer_start(struct tg3 *tp)
11127 tp->asf_counter = tp->asf_multiplier;
11128 tp->timer_counter = tp->timer_multiplier;
11130 tp->timer.expires = jiffies + tp->timer_offset;
11131 add_timer(&tp->timer);
11134 static void tg3_timer_stop(struct tg3 *tp)
11136 del_timer_sync(&tp->timer);
11139 /* Restart hardware after configuration changes, self-test, etc.
11140 * Invoked with tp->lock held.
11142 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11143 __releases(tp->lock)
11144 __acquires(tp->lock)
11148 err = tg3_init_hw(tp, reset_phy);
11150 netdev_err(tp->dev,
11151 "Failed to re-initialize device, aborting\n");
11152 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11153 tg3_full_unlock(tp);
11154 tg3_timer_stop(tp);
11156 tg3_napi_enable(tp);
11157 dev_close(tp->dev);
11158 tg3_full_lock(tp, 0);
11163 static void tg3_reset_task(struct work_struct *work)
11165 struct tg3 *tp = container_of(work, struct tg3, reset_task);
11169 tg3_full_lock(tp, 0);
11171 if (tp->pcierr_recovery || !netif_running(tp->dev)) {
11172 tg3_flag_clear(tp, RESET_TASK_PENDING);
11173 tg3_full_unlock(tp);
11178 tg3_full_unlock(tp);
11182 tg3_netif_stop(tp);
11184 tg3_full_lock(tp, 1);
11186 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11187 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11188 tp->write32_rx_mbox = tg3_write_flush_reg32;
11189 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11190 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11193 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11194 err = tg3_init_hw(tp, true);
11196 tg3_full_unlock(tp);
11198 tg3_napi_enable(tp);
11199 /* Clear this flag so that tg3_reset_task_cancel() will not
11200 * call cancel_work_sync() and wait forever.
11202 tg3_flag_clear(tp, RESET_TASK_PENDING);
11203 dev_close(tp->dev);
11207 tg3_netif_start(tp);
11208 tg3_full_unlock(tp);
11210 tg3_flag_clear(tp, RESET_TASK_PENDING);
11215 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11218 unsigned long flags;
11220 struct tg3_napi *tnapi = &tp->napi[irq_num];
11222 if (tp->irq_cnt == 1)
11223 name = tp->dev->name;
11225 name = &tnapi->irq_lbl[0];
11226 if (tnapi->tx_buffers && tnapi->rx_rcb)
11227 snprintf(name, IFNAMSIZ,
11228 "%s-txrx-%d", tp->dev->name, irq_num);
11229 else if (tnapi->tx_buffers)
11230 snprintf(name, IFNAMSIZ,
11231 "%s-tx-%d", tp->dev->name, irq_num);
11232 else if (tnapi->rx_rcb)
11233 snprintf(name, IFNAMSIZ,
11234 "%s-rx-%d", tp->dev->name, irq_num);
11236 snprintf(name, IFNAMSIZ,
11237 "%s-%d", tp->dev->name, irq_num);
11238 name[IFNAMSIZ-1] = 0;
11241 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11243 if (tg3_flag(tp, 1SHOT_MSI))
11244 fn = tg3_msi_1shot;
11247 fn = tg3_interrupt;
11248 if (tg3_flag(tp, TAGGED_STATUS))
11249 fn = tg3_interrupt_tagged;
11250 flags = IRQF_SHARED;
11253 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11256 static int tg3_test_interrupt(struct tg3 *tp)
11258 struct tg3_napi *tnapi = &tp->napi[0];
11259 struct net_device *dev = tp->dev;
11260 int err, i, intr_ok = 0;
11263 if (!netif_running(dev))
11266 tg3_disable_ints(tp);
11268 free_irq(tnapi->irq_vec, tnapi);
11271 * Turn off MSI one shot mode. Otherwise this test has no
11272 * observable way to know whether the interrupt was delivered.
11274 if (tg3_flag(tp, 57765_PLUS)) {
11275 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11276 tw32(MSGINT_MODE, val);
11279 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11280 IRQF_SHARED, dev->name, tnapi);
11284 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11285 tg3_enable_ints(tp);
11287 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11290 for (i = 0; i < 5; i++) {
11291 u32 int_mbox, misc_host_ctrl;
11293 int_mbox = tr32_mailbox(tnapi->int_mbox);
11294 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11296 if ((int_mbox != 0) ||
11297 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11302 if (tg3_flag(tp, 57765_PLUS) &&
11303 tnapi->hw_status->status_tag != tnapi->last_tag)
11304 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11309 tg3_disable_ints(tp);
11311 free_irq(tnapi->irq_vec, tnapi);
11313 err = tg3_request_irq(tp, 0);
11319 /* Reenable MSI one shot mode. */
11320 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11321 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11322 tw32(MSGINT_MODE, val);
11330 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11331 * successfully restored
11333 static int tg3_test_msi(struct tg3 *tp)
11338 if (!tg3_flag(tp, USING_MSI))
11341 /* Turn off SERR reporting in case MSI terminates with Master
11344 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11345 pci_write_config_word(tp->pdev, PCI_COMMAND,
11346 pci_cmd & ~PCI_COMMAND_SERR);
11348 err = tg3_test_interrupt(tp);
11350 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11355 /* other failures */
11359 /* MSI test failed, go back to INTx mode */
11360 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11361 "to INTx mode. Please report this failure to the PCI "
11362 "maintainer and include system chipset information\n");
11364 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11366 pci_disable_msi(tp->pdev);
11368 tg3_flag_clear(tp, USING_MSI);
11369 tp->napi[0].irq_vec = tp->pdev->irq;
11371 err = tg3_request_irq(tp, 0);
11375 /* Need to reset the chip because the MSI cycle may have terminated
11376 * with Master Abort.
11378 tg3_full_lock(tp, 1);
11380 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11381 err = tg3_init_hw(tp, true);
11383 tg3_full_unlock(tp);
11386 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11391 static int tg3_request_firmware(struct tg3 *tp)
11393 const struct tg3_firmware_hdr *fw_hdr;
11395 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11396 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11401 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11403 /* Firmware blob starts with version numbers, followed by
11404 * start address and _full_ length including BSS sections
11405 * (which must be longer than the actual data, of course
11408 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11409 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11410 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11411 tp->fw_len, tp->fw_needed);
11412 release_firmware(tp->fw);
11417 /* We no longer need firmware; we have it. */
11418 tp->fw_needed = NULL;
11422 static u32 tg3_irq_count(struct tg3 *tp)
11424 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11427 /* We want as many rx rings enabled as there are cpus.
11428 * In multiqueue MSI-X mode, the first MSI-X vector
11429 * only deals with link interrupts, etc, so we add
11430 * one to the number of vectors we are requesting.
11432 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11438 static bool tg3_enable_msix(struct tg3 *tp)
11441 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11443 tp->txq_cnt = tp->txq_req;
11444 tp->rxq_cnt = tp->rxq_req;
11446 tp->rxq_cnt = netif_get_num_default_rss_queues();
11447 if (tp->rxq_cnt > tp->rxq_max)
11448 tp->rxq_cnt = tp->rxq_max;
11450 /* Disable multiple TX rings by default. Simple round-robin hardware
11451 * scheduling of the TX rings can cause starvation of rings with
11452 * small packets when other rings have TSO or jumbo packets.
11457 tp->irq_cnt = tg3_irq_count(tp);
11459 for (i = 0; i < tp->irq_max; i++) {
11460 msix_ent[i].entry = i;
11461 msix_ent[i].vector = 0;
11464 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11467 } else if (rc < tp->irq_cnt) {
11468 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11471 tp->rxq_cnt = max(rc - 1, 1);
11473 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11476 for (i = 0; i < tp->irq_max; i++)
11477 tp->napi[i].irq_vec = msix_ent[i].vector;
11479 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11480 pci_disable_msix(tp->pdev);
11484 if (tp->irq_cnt == 1)
11487 tg3_flag_set(tp, ENABLE_RSS);
11489 if (tp->txq_cnt > 1)
11490 tg3_flag_set(tp, ENABLE_TSS);
11492 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11497 static void tg3_ints_init(struct tg3 *tp)
11499 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11500 !tg3_flag(tp, TAGGED_STATUS)) {
11501 /* All MSI supporting chips should support tagged
11502 * status. Assert that this is the case.
11504 netdev_warn(tp->dev,
11505 "MSI without TAGGED_STATUS? Not using MSI\n");
11509 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11510 tg3_flag_set(tp, USING_MSIX);
11511 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11512 tg3_flag_set(tp, USING_MSI);
11514 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11515 u32 msi_mode = tr32(MSGINT_MODE);
11516 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11517 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11518 if (!tg3_flag(tp, 1SHOT_MSI))
11519 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11520 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11523 if (!tg3_flag(tp, USING_MSIX)) {
11525 tp->napi[0].irq_vec = tp->pdev->irq;
11528 if (tp->irq_cnt == 1) {
11531 netif_set_real_num_tx_queues(tp->dev, 1);
11532 netif_set_real_num_rx_queues(tp->dev, 1);
11536 static void tg3_ints_fini(struct tg3 *tp)
11538 if (tg3_flag(tp, USING_MSIX))
11539 pci_disable_msix(tp->pdev);
11540 else if (tg3_flag(tp, USING_MSI))
11541 pci_disable_msi(tp->pdev);
11542 tg3_flag_clear(tp, USING_MSI);
11543 tg3_flag_clear(tp, USING_MSIX);
11544 tg3_flag_clear(tp, ENABLE_RSS);
11545 tg3_flag_clear(tp, ENABLE_TSS);
11548 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11551 struct net_device *dev = tp->dev;
11555 * Setup interrupts first so we know how
11556 * many NAPI resources to allocate
11560 tg3_rss_check_indir_tbl(tp);
11562 /* The placement of this call is tied
11563 * to the setup and use of Host TX descriptors.
11565 err = tg3_alloc_consistent(tp);
11567 goto out_ints_fini;
11571 tg3_napi_enable(tp);
11573 for (i = 0; i < tp->irq_cnt; i++) {
11574 err = tg3_request_irq(tp, i);
11576 for (i--; i >= 0; i--) {
11577 struct tg3_napi *tnapi = &tp->napi[i];
11579 free_irq(tnapi->irq_vec, tnapi);
11581 goto out_napi_fini;
11585 tg3_full_lock(tp, 0);
11588 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11590 err = tg3_init_hw(tp, reset_phy);
11592 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11593 tg3_free_rings(tp);
11596 tg3_full_unlock(tp);
11601 if (test_irq && tg3_flag(tp, USING_MSI)) {
11602 err = tg3_test_msi(tp);
11605 tg3_full_lock(tp, 0);
11606 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11607 tg3_free_rings(tp);
11608 tg3_full_unlock(tp);
11610 goto out_napi_fini;
11613 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11614 u32 val = tr32(PCIE_TRANSACTION_CFG);
11616 tw32(PCIE_TRANSACTION_CFG,
11617 val | PCIE_TRANS_CFG_1SHOT_MSI);
11623 tg3_hwmon_open(tp);
11625 tg3_full_lock(tp, 0);
11627 tg3_timer_start(tp);
11628 tg3_flag_set(tp, INIT_COMPLETE);
11629 tg3_enable_ints(tp);
11631 tg3_ptp_resume(tp);
11633 tg3_full_unlock(tp);
11635 netif_tx_start_all_queues(dev);
11638 * Reset loopback feature if it was turned on while the device was down
11639 * make sure that it's installed properly now.
11641 if (dev->features & NETIF_F_LOOPBACK)
11642 tg3_set_loopback(dev, dev->features);
11647 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11648 struct tg3_napi *tnapi = &tp->napi[i];
11649 free_irq(tnapi->irq_vec, tnapi);
11653 tg3_napi_disable(tp);
11655 tg3_free_consistent(tp);
11663 static void tg3_stop(struct tg3 *tp)
11667 tg3_reset_task_cancel(tp);
11668 tg3_netif_stop(tp);
11670 tg3_timer_stop(tp);
11672 tg3_hwmon_close(tp);
11676 tg3_full_lock(tp, 1);
11678 tg3_disable_ints(tp);
11680 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11681 tg3_free_rings(tp);
11682 tg3_flag_clear(tp, INIT_COMPLETE);
11684 tg3_full_unlock(tp);
11686 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11687 struct tg3_napi *tnapi = &tp->napi[i];
11688 free_irq(tnapi->irq_vec, tnapi);
11695 tg3_free_consistent(tp);
11698 static int tg3_open(struct net_device *dev)
11700 struct tg3 *tp = netdev_priv(dev);
11703 if (tp->pcierr_recovery) {
11704 netdev_err(dev, "Failed to open device. PCI error recovery "
11709 if (tp->fw_needed) {
11710 err = tg3_request_firmware(tp);
11711 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11713 netdev_warn(tp->dev, "EEE capability disabled\n");
11714 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11715 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11716 netdev_warn(tp->dev, "EEE capability restored\n");
11717 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11719 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11723 netdev_warn(tp->dev, "TSO capability disabled\n");
11724 tg3_flag_clear(tp, TSO_CAPABLE);
11725 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11726 netdev_notice(tp->dev, "TSO capability restored\n");
11727 tg3_flag_set(tp, TSO_CAPABLE);
11731 tg3_carrier_off(tp);
11733 err = tg3_power_up(tp);
11737 tg3_full_lock(tp, 0);
11739 tg3_disable_ints(tp);
11740 tg3_flag_clear(tp, INIT_COMPLETE);
11742 tg3_full_unlock(tp);
11744 err = tg3_start(tp,
11745 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11748 tg3_frob_aux_power(tp, false);
11749 pci_set_power_state(tp->pdev, PCI_D3hot);
11755 static int tg3_close(struct net_device *dev)
11757 struct tg3 *tp = netdev_priv(dev);
11759 if (tp->pcierr_recovery) {
11760 netdev_err(dev, "Failed to close device. PCI error recovery "
11767 if (pci_device_is_present(tp->pdev)) {
11768 tg3_power_down_prepare(tp);
11770 tg3_carrier_off(tp);
11775 static inline u64 get_stat64(tg3_stat64_t *val)
11777 return ((u64)val->high << 32) | ((u64)val->low);
11780 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11782 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11784 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11785 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11786 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11789 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11790 tg3_writephy(tp, MII_TG3_TEST1,
11791 val | MII_TG3_TEST1_CRC_EN);
11792 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11796 tp->phy_crc_errors += val;
11798 return tp->phy_crc_errors;
11801 return get_stat64(&hw_stats->rx_fcs_errors);
11804 #define ESTAT_ADD(member) \
11805 estats->member = old_estats->member + \
11806 get_stat64(&hw_stats->member)
11808 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11810 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11811 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11813 ESTAT_ADD(rx_octets);
11814 ESTAT_ADD(rx_fragments);
11815 ESTAT_ADD(rx_ucast_packets);
11816 ESTAT_ADD(rx_mcast_packets);
11817 ESTAT_ADD(rx_bcast_packets);
11818 ESTAT_ADD(rx_fcs_errors);
11819 ESTAT_ADD(rx_align_errors);
11820 ESTAT_ADD(rx_xon_pause_rcvd);
11821 ESTAT_ADD(rx_xoff_pause_rcvd);
11822 ESTAT_ADD(rx_mac_ctrl_rcvd);
11823 ESTAT_ADD(rx_xoff_entered);
11824 ESTAT_ADD(rx_frame_too_long_errors);
11825 ESTAT_ADD(rx_jabbers);
11826 ESTAT_ADD(rx_undersize_packets);
11827 ESTAT_ADD(rx_in_length_errors);
11828 ESTAT_ADD(rx_out_length_errors);
11829 ESTAT_ADD(rx_64_or_less_octet_packets);
11830 ESTAT_ADD(rx_65_to_127_octet_packets);
11831 ESTAT_ADD(rx_128_to_255_octet_packets);
11832 ESTAT_ADD(rx_256_to_511_octet_packets);
11833 ESTAT_ADD(rx_512_to_1023_octet_packets);
11834 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11835 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11836 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11837 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11838 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11840 ESTAT_ADD(tx_octets);
11841 ESTAT_ADD(tx_collisions);
11842 ESTAT_ADD(tx_xon_sent);
11843 ESTAT_ADD(tx_xoff_sent);
11844 ESTAT_ADD(tx_flow_control);
11845 ESTAT_ADD(tx_mac_errors);
11846 ESTAT_ADD(tx_single_collisions);
11847 ESTAT_ADD(tx_mult_collisions);
11848 ESTAT_ADD(tx_deferred);
11849 ESTAT_ADD(tx_excessive_collisions);
11850 ESTAT_ADD(tx_late_collisions);
11851 ESTAT_ADD(tx_collide_2times);
11852 ESTAT_ADD(tx_collide_3times);
11853 ESTAT_ADD(tx_collide_4times);
11854 ESTAT_ADD(tx_collide_5times);
11855 ESTAT_ADD(tx_collide_6times);
11856 ESTAT_ADD(tx_collide_7times);
11857 ESTAT_ADD(tx_collide_8times);
11858 ESTAT_ADD(tx_collide_9times);
11859 ESTAT_ADD(tx_collide_10times);
11860 ESTAT_ADD(tx_collide_11times);
11861 ESTAT_ADD(tx_collide_12times);
11862 ESTAT_ADD(tx_collide_13times);
11863 ESTAT_ADD(tx_collide_14times);
11864 ESTAT_ADD(tx_collide_15times);
11865 ESTAT_ADD(tx_ucast_packets);
11866 ESTAT_ADD(tx_mcast_packets);
11867 ESTAT_ADD(tx_bcast_packets);
11868 ESTAT_ADD(tx_carrier_sense_errors);
11869 ESTAT_ADD(tx_discards);
11870 ESTAT_ADD(tx_errors);
11872 ESTAT_ADD(dma_writeq_full);
11873 ESTAT_ADD(dma_write_prioq_full);
11874 ESTAT_ADD(rxbds_empty);
11875 ESTAT_ADD(rx_discards);
11876 ESTAT_ADD(rx_errors);
11877 ESTAT_ADD(rx_threshold_hit);
11879 ESTAT_ADD(dma_readq_full);
11880 ESTAT_ADD(dma_read_prioq_full);
11881 ESTAT_ADD(tx_comp_queue_full);
11883 ESTAT_ADD(ring_set_send_prod_index);
11884 ESTAT_ADD(ring_status_update);
11885 ESTAT_ADD(nic_irqs);
11886 ESTAT_ADD(nic_avoided_irqs);
11887 ESTAT_ADD(nic_tx_threshold_hit);
11889 ESTAT_ADD(mbuf_lwm_thresh_hit);
11892 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11894 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11895 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11897 stats->rx_packets = old_stats->rx_packets +
11898 get_stat64(&hw_stats->rx_ucast_packets) +
11899 get_stat64(&hw_stats->rx_mcast_packets) +
11900 get_stat64(&hw_stats->rx_bcast_packets);
11902 stats->tx_packets = old_stats->tx_packets +
11903 get_stat64(&hw_stats->tx_ucast_packets) +
11904 get_stat64(&hw_stats->tx_mcast_packets) +
11905 get_stat64(&hw_stats->tx_bcast_packets);
11907 stats->rx_bytes = old_stats->rx_bytes +
11908 get_stat64(&hw_stats->rx_octets);
11909 stats->tx_bytes = old_stats->tx_bytes +
11910 get_stat64(&hw_stats->tx_octets);
11912 stats->rx_errors = old_stats->rx_errors +
11913 get_stat64(&hw_stats->rx_errors);
11914 stats->tx_errors = old_stats->tx_errors +
11915 get_stat64(&hw_stats->tx_errors) +
11916 get_stat64(&hw_stats->tx_mac_errors) +
11917 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11918 get_stat64(&hw_stats->tx_discards);
11920 stats->multicast = old_stats->multicast +
11921 get_stat64(&hw_stats->rx_mcast_packets);
11922 stats->collisions = old_stats->collisions +
11923 get_stat64(&hw_stats->tx_collisions);
11925 stats->rx_length_errors = old_stats->rx_length_errors +
11926 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11927 get_stat64(&hw_stats->rx_undersize_packets);
11929 stats->rx_frame_errors = old_stats->rx_frame_errors +
11930 get_stat64(&hw_stats->rx_align_errors);
11931 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11932 get_stat64(&hw_stats->tx_discards);
11933 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11934 get_stat64(&hw_stats->tx_carrier_sense_errors);
11936 stats->rx_crc_errors = old_stats->rx_crc_errors +
11937 tg3_calc_crc_errors(tp);
11939 stats->rx_missed_errors = old_stats->rx_missed_errors +
11940 get_stat64(&hw_stats->rx_discards);
11942 stats->rx_dropped = tp->rx_dropped;
11943 stats->tx_dropped = tp->tx_dropped;
11946 static int tg3_get_regs_len(struct net_device *dev)
11948 return TG3_REG_BLK_SIZE;
11951 static void tg3_get_regs(struct net_device *dev,
11952 struct ethtool_regs *regs, void *_p)
11954 struct tg3 *tp = netdev_priv(dev);
11958 memset(_p, 0, TG3_REG_BLK_SIZE);
11960 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11963 tg3_full_lock(tp, 0);
11965 tg3_dump_legacy_regs(tp, (u32 *)_p);
11967 tg3_full_unlock(tp);
11970 static int tg3_get_eeprom_len(struct net_device *dev)
11972 struct tg3 *tp = netdev_priv(dev);
11974 return tp->nvram_size;
11977 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11979 struct tg3 *tp = netdev_priv(dev);
11980 int ret, cpmu_restore = 0;
11982 u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11985 if (tg3_flag(tp, NO_NVRAM))
11988 offset = eeprom->offset;
11992 eeprom->magic = TG3_EEPROM_MAGIC;
11994 /* Override clock, link aware and link idle modes */
11995 if (tg3_flag(tp, CPMU_PRESENT)) {
11996 cpmu_val = tr32(TG3_CPMU_CTRL);
11997 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
11998 CPMU_CTRL_LINK_IDLE_MODE)) {
11999 tw32(TG3_CPMU_CTRL, cpmu_val &
12000 ~(CPMU_CTRL_LINK_AWARE_MODE |
12001 CPMU_CTRL_LINK_IDLE_MODE));
12005 tg3_override_clk(tp);
12008 /* adjustments to start on required 4 byte boundary */
12009 b_offset = offset & 3;
12010 b_count = 4 - b_offset;
12011 if (b_count > len) {
12012 /* i.e. offset=1 len=2 */
12015 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12018 memcpy(data, ((char *)&val) + b_offset, b_count);
12021 eeprom->len += b_count;
12024 /* read bytes up to the last 4 byte boundary */
12025 pd = &data[eeprom->len];
12026 for (i = 0; i < (len - (len & 3)); i += 4) {
12027 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12034 memcpy(pd + i, &val, 4);
12035 if (need_resched()) {
12036 if (signal_pending(current)) {
12047 /* read last bytes not ending on 4 byte boundary */
12048 pd = &data[eeprom->len];
12050 b_offset = offset + len - b_count;
12051 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12054 memcpy(pd, &val, b_count);
12055 eeprom->len += b_count;
12060 /* Restore clock, link aware and link idle modes */
12061 tg3_restore_clk(tp);
12063 tw32(TG3_CPMU_CTRL, cpmu_val);
12068 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12070 struct tg3 *tp = netdev_priv(dev);
12072 u32 offset, len, b_offset, odd_len;
12074 __be32 start = 0, end;
12076 if (tg3_flag(tp, NO_NVRAM) ||
12077 eeprom->magic != TG3_EEPROM_MAGIC)
12080 offset = eeprom->offset;
12083 if ((b_offset = (offset & 3))) {
12084 /* adjustments to start on required 4 byte boundary */
12085 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12096 /* adjustments to end on required 4 byte boundary */
12098 len = (len + 3) & ~3;
12099 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12105 if (b_offset || odd_len) {
12106 buf = kmalloc(len, GFP_KERNEL);
12110 memcpy(buf, &start, 4);
12112 memcpy(buf+len-4, &end, 4);
12113 memcpy(buf + b_offset, data, eeprom->len);
12116 ret = tg3_nvram_write_block(tp, offset, len, buf);
12124 static int tg3_get_link_ksettings(struct net_device *dev,
12125 struct ethtool_link_ksettings *cmd)
12127 struct tg3 *tp = netdev_priv(dev);
12128 u32 supported, advertising;
12130 if (tg3_flag(tp, USE_PHYLIB)) {
12131 struct phy_device *phydev;
12132 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12134 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12135 phy_ethtool_ksettings_get(phydev, cmd);
12140 supported = (SUPPORTED_Autoneg);
12142 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12143 supported |= (SUPPORTED_1000baseT_Half |
12144 SUPPORTED_1000baseT_Full);
12146 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12147 supported |= (SUPPORTED_100baseT_Half |
12148 SUPPORTED_100baseT_Full |
12149 SUPPORTED_10baseT_Half |
12150 SUPPORTED_10baseT_Full |
12152 cmd->base.port = PORT_TP;
12154 supported |= SUPPORTED_FIBRE;
12155 cmd->base.port = PORT_FIBRE;
12157 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12160 advertising = tp->link_config.advertising;
12161 if (tg3_flag(tp, PAUSE_AUTONEG)) {
12162 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12163 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12164 advertising |= ADVERTISED_Pause;
12166 advertising |= ADVERTISED_Pause |
12167 ADVERTISED_Asym_Pause;
12169 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12170 advertising |= ADVERTISED_Asym_Pause;
12173 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12176 if (netif_running(dev) && tp->link_up) {
12177 cmd->base.speed = tp->link_config.active_speed;
12178 cmd->base.duplex = tp->link_config.active_duplex;
12179 ethtool_convert_legacy_u32_to_link_mode(
12180 cmd->link_modes.lp_advertising,
12181 tp->link_config.rmt_adv);
12183 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12184 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12185 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12187 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12190 cmd->base.speed = SPEED_UNKNOWN;
12191 cmd->base.duplex = DUPLEX_UNKNOWN;
12192 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12194 cmd->base.phy_address = tp->phy_addr;
12195 cmd->base.autoneg = tp->link_config.autoneg;
12199 static int tg3_set_link_ksettings(struct net_device *dev,
12200 const struct ethtool_link_ksettings *cmd)
12202 struct tg3 *tp = netdev_priv(dev);
12203 u32 speed = cmd->base.speed;
12206 if (tg3_flag(tp, USE_PHYLIB)) {
12207 struct phy_device *phydev;
12208 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12210 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12211 return phy_ethtool_ksettings_set(phydev, cmd);
12214 if (cmd->base.autoneg != AUTONEG_ENABLE &&
12215 cmd->base.autoneg != AUTONEG_DISABLE)
12218 if (cmd->base.autoneg == AUTONEG_DISABLE &&
12219 cmd->base.duplex != DUPLEX_FULL &&
12220 cmd->base.duplex != DUPLEX_HALF)
12223 ethtool_convert_link_mode_to_legacy_u32(&advertising,
12224 cmd->link_modes.advertising);
12226 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12227 u32 mask = ADVERTISED_Autoneg |
12229 ADVERTISED_Asym_Pause;
12231 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12232 mask |= ADVERTISED_1000baseT_Half |
12233 ADVERTISED_1000baseT_Full;
12235 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12236 mask |= ADVERTISED_100baseT_Half |
12237 ADVERTISED_100baseT_Full |
12238 ADVERTISED_10baseT_Half |
12239 ADVERTISED_10baseT_Full |
12242 mask |= ADVERTISED_FIBRE;
12244 if (advertising & ~mask)
12247 mask &= (ADVERTISED_1000baseT_Half |
12248 ADVERTISED_1000baseT_Full |
12249 ADVERTISED_100baseT_Half |
12250 ADVERTISED_100baseT_Full |
12251 ADVERTISED_10baseT_Half |
12252 ADVERTISED_10baseT_Full);
12254 advertising &= mask;
12256 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12257 if (speed != SPEED_1000)
12260 if (cmd->base.duplex != DUPLEX_FULL)
12263 if (speed != SPEED_100 &&
12269 tg3_full_lock(tp, 0);
12271 tp->link_config.autoneg = cmd->base.autoneg;
12272 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12273 tp->link_config.advertising = (advertising |
12274 ADVERTISED_Autoneg);
12275 tp->link_config.speed = SPEED_UNKNOWN;
12276 tp->link_config.duplex = DUPLEX_UNKNOWN;
12278 tp->link_config.advertising = 0;
12279 tp->link_config.speed = speed;
12280 tp->link_config.duplex = cmd->base.duplex;
12283 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12285 tg3_warn_mgmt_link_flap(tp);
12287 if (netif_running(dev))
12288 tg3_setup_phy(tp, true);
12290 tg3_full_unlock(tp);
12295 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12297 struct tg3 *tp = netdev_priv(dev);
12299 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12300 strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12301 strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12304 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12306 struct tg3 *tp = netdev_priv(dev);
12308 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12309 wol->supported = WAKE_MAGIC;
12311 wol->supported = 0;
12313 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12314 wol->wolopts = WAKE_MAGIC;
12315 memset(&wol->sopass, 0, sizeof(wol->sopass));
12318 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12320 struct tg3 *tp = netdev_priv(dev);
12321 struct device *dp = &tp->pdev->dev;
12323 if (wol->wolopts & ~WAKE_MAGIC)
12325 if ((wol->wolopts & WAKE_MAGIC) &&
12326 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12329 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12331 if (device_may_wakeup(dp))
12332 tg3_flag_set(tp, WOL_ENABLE);
12334 tg3_flag_clear(tp, WOL_ENABLE);
12339 static u32 tg3_get_msglevel(struct net_device *dev)
12341 struct tg3 *tp = netdev_priv(dev);
12342 return tp->msg_enable;
12345 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12347 struct tg3 *tp = netdev_priv(dev);
12348 tp->msg_enable = value;
12351 static int tg3_nway_reset(struct net_device *dev)
12353 struct tg3 *tp = netdev_priv(dev);
12356 if (!netif_running(dev))
12359 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12362 tg3_warn_mgmt_link_flap(tp);
12364 if (tg3_flag(tp, USE_PHYLIB)) {
12365 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12367 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12371 spin_lock_bh(&tp->lock);
12373 tg3_readphy(tp, MII_BMCR, &bmcr);
12374 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12375 ((bmcr & BMCR_ANENABLE) ||
12376 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12377 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12381 spin_unlock_bh(&tp->lock);
12387 static void tg3_get_ringparam(struct net_device *dev,
12388 struct ethtool_ringparam *ering,
12389 struct kernel_ethtool_ringparam *kernel_ering,
12390 struct netlink_ext_ack *extack)
12392 struct tg3 *tp = netdev_priv(dev);
12394 ering->rx_max_pending = tp->rx_std_ring_mask;
12395 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12396 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12398 ering->rx_jumbo_max_pending = 0;
12400 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12402 ering->rx_pending = tp->rx_pending;
12403 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12404 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12406 ering->rx_jumbo_pending = 0;
12408 ering->tx_pending = tp->napi[0].tx_pending;
12411 static int tg3_set_ringparam(struct net_device *dev,
12412 struct ethtool_ringparam *ering,
12413 struct kernel_ethtool_ringparam *kernel_ering,
12414 struct netlink_ext_ack *extack)
12416 struct tg3 *tp = netdev_priv(dev);
12417 int i, irq_sync = 0, err = 0;
12418 bool reset_phy = false;
12420 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12421 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12422 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12423 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12424 (tg3_flag(tp, TSO_BUG) &&
12425 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12428 if (netif_running(dev)) {
12430 tg3_netif_stop(tp);
12434 tg3_full_lock(tp, irq_sync);
12436 tp->rx_pending = ering->rx_pending;
12438 if (tg3_flag(tp, MAX_RXPEND_64) &&
12439 tp->rx_pending > 63)
12440 tp->rx_pending = 63;
12442 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12443 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12445 for (i = 0; i < tp->irq_max; i++)
12446 tp->napi[i].tx_pending = ering->tx_pending;
12448 if (netif_running(dev)) {
12449 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12450 /* Reset PHY to avoid PHY lock up */
12451 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12452 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12453 tg3_asic_rev(tp) == ASIC_REV_5720)
12456 err = tg3_restart_hw(tp, reset_phy);
12458 tg3_netif_start(tp);
12461 tg3_full_unlock(tp);
12463 if (irq_sync && !err)
12469 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12471 struct tg3 *tp = netdev_priv(dev);
12473 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12475 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12476 epause->rx_pause = 1;
12478 epause->rx_pause = 0;
12480 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12481 epause->tx_pause = 1;
12483 epause->tx_pause = 0;
12486 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12488 struct tg3 *tp = netdev_priv(dev);
12490 bool reset_phy = false;
12492 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12493 tg3_warn_mgmt_link_flap(tp);
12495 if (tg3_flag(tp, USE_PHYLIB)) {
12496 struct phy_device *phydev;
12498 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12500 if (!phy_validate_pause(phydev, epause))
12503 tp->link_config.flowctrl = 0;
12504 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12505 if (epause->rx_pause) {
12506 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12508 if (epause->tx_pause) {
12509 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12511 } else if (epause->tx_pause) {
12512 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12515 if (epause->autoneg)
12516 tg3_flag_set(tp, PAUSE_AUTONEG);
12518 tg3_flag_clear(tp, PAUSE_AUTONEG);
12520 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12521 if (phydev->autoneg) {
12522 /* phy_set_asym_pause() will
12523 * renegotiate the link to inform our
12524 * link partner of our flow control
12525 * settings, even if the flow control
12526 * is forced. Let tg3_adjust_link()
12527 * do the final flow control setup.
12532 if (!epause->autoneg)
12533 tg3_setup_flow_control(tp, 0, 0);
12538 if (netif_running(dev)) {
12539 tg3_netif_stop(tp);
12543 tg3_full_lock(tp, irq_sync);
12545 if (epause->autoneg)
12546 tg3_flag_set(tp, PAUSE_AUTONEG);
12548 tg3_flag_clear(tp, PAUSE_AUTONEG);
12549 if (epause->rx_pause)
12550 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12552 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12553 if (epause->tx_pause)
12554 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12556 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12558 if (netif_running(dev)) {
12559 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12560 /* Reset PHY to avoid PHY lock up */
12561 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12562 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12563 tg3_asic_rev(tp) == ASIC_REV_5720)
12566 err = tg3_restart_hw(tp, reset_phy);
12568 tg3_netif_start(tp);
12571 tg3_full_unlock(tp);
12574 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12579 static int tg3_get_sset_count(struct net_device *dev, int sset)
12583 return TG3_NUM_TEST;
12585 return TG3_NUM_STATS;
12587 return -EOPNOTSUPP;
12591 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12592 u32 *rules __always_unused)
12594 struct tg3 *tp = netdev_priv(dev);
12596 if (!tg3_flag(tp, SUPPORT_MSIX))
12597 return -EOPNOTSUPP;
12599 switch (info->cmd) {
12600 case ETHTOOL_GRXRINGS:
12601 if (netif_running(tp->dev))
12602 info->data = tp->rxq_cnt;
12604 info->data = num_online_cpus();
12605 if (info->data > TG3_RSS_MAX_NUM_QS)
12606 info->data = TG3_RSS_MAX_NUM_QS;
12612 return -EOPNOTSUPP;
12616 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12619 struct tg3 *tp = netdev_priv(dev);
12621 if (tg3_flag(tp, SUPPORT_MSIX))
12622 size = TG3_RSS_INDIR_TBL_SIZE;
12627 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12629 struct tg3 *tp = netdev_priv(dev);
12633 *hfunc = ETH_RSS_HASH_TOP;
12637 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12638 indir[i] = tp->rss_ind_tbl[i];
12643 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12646 struct tg3 *tp = netdev_priv(dev);
12649 /* We require at least one supported parameter to be changed and no
12650 * change in any of the unsupported parameters
12653 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12654 return -EOPNOTSUPP;
12659 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12660 tp->rss_ind_tbl[i] = indir[i];
12662 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12665 /* It is legal to write the indirection
12666 * table while the device is running.
12668 tg3_full_lock(tp, 0);
12669 tg3_rss_write_indir_tbl(tp);
12670 tg3_full_unlock(tp);
12675 static void tg3_get_channels(struct net_device *dev,
12676 struct ethtool_channels *channel)
12678 struct tg3 *tp = netdev_priv(dev);
12679 u32 deflt_qs = netif_get_num_default_rss_queues();
12681 channel->max_rx = tp->rxq_max;
12682 channel->max_tx = tp->txq_max;
12684 if (netif_running(dev)) {
12685 channel->rx_count = tp->rxq_cnt;
12686 channel->tx_count = tp->txq_cnt;
12689 channel->rx_count = tp->rxq_req;
12691 channel->rx_count = min(deflt_qs, tp->rxq_max);
12694 channel->tx_count = tp->txq_req;
12696 channel->tx_count = min(deflt_qs, tp->txq_max);
12700 static int tg3_set_channels(struct net_device *dev,
12701 struct ethtool_channels *channel)
12703 struct tg3 *tp = netdev_priv(dev);
12705 if (!tg3_flag(tp, SUPPORT_MSIX))
12706 return -EOPNOTSUPP;
12708 if (channel->rx_count > tp->rxq_max ||
12709 channel->tx_count > tp->txq_max)
12712 tp->rxq_req = channel->rx_count;
12713 tp->txq_req = channel->tx_count;
12715 if (!netif_running(dev))
12720 tg3_carrier_off(tp);
12722 tg3_start(tp, true, false, false);
12727 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12729 switch (stringset) {
12731 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12734 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12737 WARN_ON(1); /* we need a WARN() */
12742 static int tg3_set_phys_id(struct net_device *dev,
12743 enum ethtool_phys_id_state state)
12745 struct tg3 *tp = netdev_priv(dev);
12748 case ETHTOOL_ID_ACTIVE:
12749 return 1; /* cycle on/off once per second */
12751 case ETHTOOL_ID_ON:
12752 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12753 LED_CTRL_1000MBPS_ON |
12754 LED_CTRL_100MBPS_ON |
12755 LED_CTRL_10MBPS_ON |
12756 LED_CTRL_TRAFFIC_OVERRIDE |
12757 LED_CTRL_TRAFFIC_BLINK |
12758 LED_CTRL_TRAFFIC_LED);
12761 case ETHTOOL_ID_OFF:
12762 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12763 LED_CTRL_TRAFFIC_OVERRIDE);
12766 case ETHTOOL_ID_INACTIVE:
12767 tw32(MAC_LED_CTRL, tp->led_ctrl);
12774 static void tg3_get_ethtool_stats(struct net_device *dev,
12775 struct ethtool_stats *estats, u64 *tmp_stats)
12777 struct tg3 *tp = netdev_priv(dev);
12780 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12782 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12785 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen)
12789 u32 offset = 0, len = 0;
12792 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12795 if (magic == TG3_EEPROM_MAGIC) {
12796 for (offset = TG3_NVM_DIR_START;
12797 offset < TG3_NVM_DIR_END;
12798 offset += TG3_NVM_DIRENT_SIZE) {
12799 if (tg3_nvram_read(tp, offset, &val))
12802 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12803 TG3_NVM_DIRTYPE_EXTVPD)
12807 if (offset != TG3_NVM_DIR_END) {
12808 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12809 if (tg3_nvram_read(tp, offset + 4, &offset))
12812 offset = tg3_nvram_logical_addr(tp, offset);
12815 if (!offset || !len) {
12816 offset = TG3_NVM_VPD_OFF;
12817 len = TG3_NVM_VPD_LEN;
12820 buf = kmalloc(len, GFP_KERNEL);
12824 for (i = 0; i < len; i += 4) {
12825 /* The data is in little-endian format in NVRAM.
12826 * Use the big-endian read routines to preserve
12827 * the byte order as it exists in NVRAM.
12829 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12834 buf = pci_vpd_alloc(tp->pdev, vpdlen);
12846 #define NVRAM_TEST_SIZE 0x100
12847 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12848 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12849 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12850 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12851 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12852 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12853 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12854 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12856 static int tg3_test_nvram(struct tg3 *tp)
12860 int i, j, k, err = 0, size;
12863 if (tg3_flag(tp, NO_NVRAM))
12866 if (tg3_nvram_read(tp, 0, &magic) != 0)
12869 if (magic == TG3_EEPROM_MAGIC)
12870 size = NVRAM_TEST_SIZE;
12871 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12872 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12873 TG3_EEPROM_SB_FORMAT_1) {
12874 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12875 case TG3_EEPROM_SB_REVISION_0:
12876 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12878 case TG3_EEPROM_SB_REVISION_2:
12879 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12881 case TG3_EEPROM_SB_REVISION_3:
12882 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12884 case TG3_EEPROM_SB_REVISION_4:
12885 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12887 case TG3_EEPROM_SB_REVISION_5:
12888 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12890 case TG3_EEPROM_SB_REVISION_6:
12891 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12898 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12899 size = NVRAM_SELFBOOT_HW_SIZE;
12903 buf = kmalloc(size, GFP_KERNEL);
12908 for (i = 0, j = 0; i < size; i += 4, j++) {
12909 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12916 /* Selfboot format */
12917 magic = be32_to_cpu(buf[0]);
12918 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12919 TG3_EEPROM_MAGIC_FW) {
12920 u8 *buf8 = (u8 *) buf, csum8 = 0;
12922 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12923 TG3_EEPROM_SB_REVISION_2) {
12924 /* For rev 2, the csum doesn't include the MBA. */
12925 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12927 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12930 for (i = 0; i < size; i++)
12943 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12944 TG3_EEPROM_MAGIC_HW) {
12945 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12946 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12947 u8 *buf8 = (u8 *) buf;
12949 /* Separate the parity bits and the data bytes. */
12950 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12951 if ((i == 0) || (i == 8)) {
12955 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12956 parity[k++] = buf8[i] & msk;
12958 } else if (i == 16) {
12962 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12963 parity[k++] = buf8[i] & msk;
12966 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12967 parity[k++] = buf8[i] & msk;
12970 data[j++] = buf8[i];
12974 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12975 u8 hw8 = hweight8(data[i]);
12977 if ((hw8 & 0x1) && parity[i])
12979 else if (!(hw8 & 0x1) && !parity[i])
12988 /* Bootstrap checksum at offset 0x10 */
12989 csum = calc_crc((unsigned char *) buf, 0x10);
12990 if (csum != le32_to_cpu(buf[0x10/4]))
12993 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12994 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12995 if (csum != le32_to_cpu(buf[0xfc/4]))
13000 buf = tg3_vpd_readblock(tp, &len);
13004 err = pci_vpd_check_csum(buf, len);
13005 /* go on if no checksum found */
13013 #define TG3_SERDES_TIMEOUT_SEC 2
13014 #define TG3_COPPER_TIMEOUT_SEC 6
13016 static int tg3_test_link(struct tg3 *tp)
13020 if (!netif_running(tp->dev))
13023 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13024 max = TG3_SERDES_TIMEOUT_SEC;
13026 max = TG3_COPPER_TIMEOUT_SEC;
13028 for (i = 0; i < max; i++) {
13032 if (msleep_interruptible(1000))
13039 /* Only test the commonly used registers */
13040 static int tg3_test_registers(struct tg3 *tp)
13042 int i, is_5705, is_5750;
13043 u32 offset, read_mask, write_mask, val, save_val, read_val;
13047 #define TG3_FL_5705 0x1
13048 #define TG3_FL_NOT_5705 0x2
13049 #define TG3_FL_NOT_5788 0x4
13050 #define TG3_FL_NOT_5750 0x8
13054 /* MAC Control Registers */
13055 { MAC_MODE, TG3_FL_NOT_5705,
13056 0x00000000, 0x00ef6f8c },
13057 { MAC_MODE, TG3_FL_5705,
13058 0x00000000, 0x01ef6b8c },
13059 { MAC_STATUS, TG3_FL_NOT_5705,
13060 0x03800107, 0x00000000 },
13061 { MAC_STATUS, TG3_FL_5705,
13062 0x03800100, 0x00000000 },
13063 { MAC_ADDR_0_HIGH, 0x0000,
13064 0x00000000, 0x0000ffff },
13065 { MAC_ADDR_0_LOW, 0x0000,
13066 0x00000000, 0xffffffff },
13067 { MAC_RX_MTU_SIZE, 0x0000,
13068 0x00000000, 0x0000ffff },
13069 { MAC_TX_MODE, 0x0000,
13070 0x00000000, 0x00000070 },
13071 { MAC_TX_LENGTHS, 0x0000,
13072 0x00000000, 0x00003fff },
13073 { MAC_RX_MODE, TG3_FL_NOT_5705,
13074 0x00000000, 0x000007fc },
13075 { MAC_RX_MODE, TG3_FL_5705,
13076 0x00000000, 0x000007dc },
13077 { MAC_HASH_REG_0, 0x0000,
13078 0x00000000, 0xffffffff },
13079 { MAC_HASH_REG_1, 0x0000,
13080 0x00000000, 0xffffffff },
13081 { MAC_HASH_REG_2, 0x0000,
13082 0x00000000, 0xffffffff },
13083 { MAC_HASH_REG_3, 0x0000,
13084 0x00000000, 0xffffffff },
13086 /* Receive Data and Receive BD Initiator Control Registers. */
13087 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13088 0x00000000, 0xffffffff },
13089 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13090 0x00000000, 0xffffffff },
13091 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13092 0x00000000, 0x00000003 },
13093 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13094 0x00000000, 0xffffffff },
13095 { RCVDBDI_STD_BD+0, 0x0000,
13096 0x00000000, 0xffffffff },
13097 { RCVDBDI_STD_BD+4, 0x0000,
13098 0x00000000, 0xffffffff },
13099 { RCVDBDI_STD_BD+8, 0x0000,
13100 0x00000000, 0xffff0002 },
13101 { RCVDBDI_STD_BD+0xc, 0x0000,
13102 0x00000000, 0xffffffff },
13104 /* Receive BD Initiator Control Registers. */
13105 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13106 0x00000000, 0xffffffff },
13107 { RCVBDI_STD_THRESH, TG3_FL_5705,
13108 0x00000000, 0x000003ff },
13109 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13110 0x00000000, 0xffffffff },
13112 /* Host Coalescing Control Registers. */
13113 { HOSTCC_MODE, TG3_FL_NOT_5705,
13114 0x00000000, 0x00000004 },
13115 { HOSTCC_MODE, TG3_FL_5705,
13116 0x00000000, 0x000000f6 },
13117 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13118 0x00000000, 0xffffffff },
13119 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13120 0x00000000, 0x000003ff },
13121 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13122 0x00000000, 0xffffffff },
13123 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13124 0x00000000, 0x000003ff },
13125 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13126 0x00000000, 0xffffffff },
13127 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13128 0x00000000, 0x000000ff },
13129 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13130 0x00000000, 0xffffffff },
13131 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13132 0x00000000, 0x000000ff },
13133 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13134 0x00000000, 0xffffffff },
13135 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13136 0x00000000, 0xffffffff },
13137 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13138 0x00000000, 0xffffffff },
13139 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13140 0x00000000, 0x000000ff },
13141 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13142 0x00000000, 0xffffffff },
13143 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13144 0x00000000, 0x000000ff },
13145 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13146 0x00000000, 0xffffffff },
13147 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13148 0x00000000, 0xffffffff },
13149 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13150 0x00000000, 0xffffffff },
13151 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13152 0x00000000, 0xffffffff },
13153 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13154 0x00000000, 0xffffffff },
13155 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13156 0xffffffff, 0x00000000 },
13157 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13158 0xffffffff, 0x00000000 },
13160 /* Buffer Manager Control Registers. */
13161 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13162 0x00000000, 0x007fff80 },
13163 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13164 0x00000000, 0x007fffff },
13165 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13166 0x00000000, 0x0000003f },
13167 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13168 0x00000000, 0x000001ff },
13169 { BUFMGR_MB_HIGH_WATER, 0x0000,
13170 0x00000000, 0x000001ff },
13171 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13172 0xffffffff, 0x00000000 },
13173 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13174 0xffffffff, 0x00000000 },
13176 /* Mailbox Registers */
13177 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13178 0x00000000, 0x000001ff },
13179 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13180 0x00000000, 0x000001ff },
13181 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13182 0x00000000, 0x000007ff },
13183 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13184 0x00000000, 0x000001ff },
13186 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13189 is_5705 = is_5750 = 0;
13190 if (tg3_flag(tp, 5705_PLUS)) {
13192 if (tg3_flag(tp, 5750_PLUS))
13196 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13197 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13200 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13203 if (tg3_flag(tp, IS_5788) &&
13204 (reg_tbl[i].flags & TG3_FL_NOT_5788))
13207 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13210 offset = (u32) reg_tbl[i].offset;
13211 read_mask = reg_tbl[i].read_mask;
13212 write_mask = reg_tbl[i].write_mask;
13214 /* Save the original register content */
13215 save_val = tr32(offset);
13217 /* Determine the read-only value. */
13218 read_val = save_val & read_mask;
13220 /* Write zero to the register, then make sure the read-only bits
13221 * are not changed and the read/write bits are all zeros.
13225 val = tr32(offset);
13227 /* Test the read-only and read/write bits. */
13228 if (((val & read_mask) != read_val) || (val & write_mask))
13231 /* Write ones to all the bits defined by RdMask and WrMask, then
13232 * make sure the read-only bits are not changed and the
13233 * read/write bits are all ones.
13235 tw32(offset, read_mask | write_mask);
13237 val = tr32(offset);
13239 /* Test the read-only bits. */
13240 if ((val & read_mask) != read_val)
13243 /* Test the read/write bits. */
13244 if ((val & write_mask) != write_mask)
13247 tw32(offset, save_val);
13253 if (netif_msg_hw(tp))
13254 netdev_err(tp->dev,
13255 "Register test failed at offset %x\n", offset);
13256 tw32(offset, save_val);
13260 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13262 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13266 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13267 for (j = 0; j < len; j += 4) {
13270 tg3_write_mem(tp, offset + j, test_pattern[i]);
13271 tg3_read_mem(tp, offset + j, &val);
13272 if (val != test_pattern[i])
13279 static int tg3_test_memory(struct tg3 *tp)
13281 static struct mem_entry {
13284 } mem_tbl_570x[] = {
13285 { 0x00000000, 0x00b50},
13286 { 0x00002000, 0x1c000},
13287 { 0xffffffff, 0x00000}
13288 }, mem_tbl_5705[] = {
13289 { 0x00000100, 0x0000c},
13290 { 0x00000200, 0x00008},
13291 { 0x00004000, 0x00800},
13292 { 0x00006000, 0x01000},
13293 { 0x00008000, 0x02000},
13294 { 0x00010000, 0x0e000},
13295 { 0xffffffff, 0x00000}
13296 }, mem_tbl_5755[] = {
13297 { 0x00000200, 0x00008},
13298 { 0x00004000, 0x00800},
13299 { 0x00006000, 0x00800},
13300 { 0x00008000, 0x02000},
13301 { 0x00010000, 0x0c000},
13302 { 0xffffffff, 0x00000}
13303 }, mem_tbl_5906[] = {
13304 { 0x00000200, 0x00008},
13305 { 0x00004000, 0x00400},
13306 { 0x00006000, 0x00400},
13307 { 0x00008000, 0x01000},
13308 { 0x00010000, 0x01000},
13309 { 0xffffffff, 0x00000}
13310 }, mem_tbl_5717[] = {
13311 { 0x00000200, 0x00008},
13312 { 0x00010000, 0x0a000},
13313 { 0x00020000, 0x13c00},
13314 { 0xffffffff, 0x00000}
13315 }, mem_tbl_57765[] = {
13316 { 0x00000200, 0x00008},
13317 { 0x00004000, 0x00800},
13318 { 0x00006000, 0x09800},
13319 { 0x00010000, 0x0a000},
13320 { 0xffffffff, 0x00000}
13322 struct mem_entry *mem_tbl;
13326 if (tg3_flag(tp, 5717_PLUS))
13327 mem_tbl = mem_tbl_5717;
13328 else if (tg3_flag(tp, 57765_CLASS) ||
13329 tg3_asic_rev(tp) == ASIC_REV_5762)
13330 mem_tbl = mem_tbl_57765;
13331 else if (tg3_flag(tp, 5755_PLUS))
13332 mem_tbl = mem_tbl_5755;
13333 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13334 mem_tbl = mem_tbl_5906;
13335 else if (tg3_flag(tp, 5705_PLUS))
13336 mem_tbl = mem_tbl_5705;
13338 mem_tbl = mem_tbl_570x;
13340 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13341 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13349 #define TG3_TSO_MSS 500
13351 #define TG3_TSO_IP_HDR_LEN 20
13352 #define TG3_TSO_TCP_HDR_LEN 20
13353 #define TG3_TSO_TCP_OPT_LEN 12
13355 static const u8 tg3_tso_header[] = {
13357 0x45, 0x00, 0x00, 0x00,
13358 0x00, 0x00, 0x40, 0x00,
13359 0x40, 0x06, 0x00, 0x00,
13360 0x0a, 0x00, 0x00, 0x01,
13361 0x0a, 0x00, 0x00, 0x02,
13362 0x0d, 0x00, 0xe0, 0x00,
13363 0x00, 0x00, 0x01, 0x00,
13364 0x00, 0x00, 0x02, 0x00,
13365 0x80, 0x10, 0x10, 0x00,
13366 0x14, 0x09, 0x00, 0x00,
13367 0x01, 0x01, 0x08, 0x0a,
13368 0x11, 0x11, 0x11, 0x11,
13369 0x11, 0x11, 0x11, 0x11,
13372 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13374 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13375 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13377 struct sk_buff *skb;
13378 u8 *tx_data, *rx_data;
13380 int num_pkts, tx_len, rx_len, i, err;
13381 struct tg3_rx_buffer_desc *desc;
13382 struct tg3_napi *tnapi, *rnapi;
13383 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13385 tnapi = &tp->napi[0];
13386 rnapi = &tp->napi[0];
13387 if (tp->irq_cnt > 1) {
13388 if (tg3_flag(tp, ENABLE_RSS))
13389 rnapi = &tp->napi[1];
13390 if (tg3_flag(tp, ENABLE_TSS))
13391 tnapi = &tp->napi[1];
13393 coal_now = tnapi->coal_now | rnapi->coal_now;
13398 skb = netdev_alloc_skb(tp->dev, tx_len);
13402 tx_data = skb_put(skb, tx_len);
13403 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13404 memset(tx_data + ETH_ALEN, 0x0, 8);
13406 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13408 if (tso_loopback) {
13409 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13411 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13412 TG3_TSO_TCP_OPT_LEN;
13414 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13415 sizeof(tg3_tso_header));
13418 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13419 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13421 /* Set the total length field in the IP header */
13422 iph->tot_len = htons((u16)(mss + hdr_len));
13424 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13425 TXD_FLAG_CPU_POST_DMA);
13427 if (tg3_flag(tp, HW_TSO_1) ||
13428 tg3_flag(tp, HW_TSO_2) ||
13429 tg3_flag(tp, HW_TSO_3)) {
13431 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13432 th = (struct tcphdr *)&tx_data[val];
13435 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13437 if (tg3_flag(tp, HW_TSO_3)) {
13438 mss |= (hdr_len & 0xc) << 12;
13439 if (hdr_len & 0x10)
13440 base_flags |= 0x00000010;
13441 base_flags |= (hdr_len & 0x3e0) << 5;
13442 } else if (tg3_flag(tp, HW_TSO_2))
13443 mss |= hdr_len << 9;
13444 else if (tg3_flag(tp, HW_TSO_1) ||
13445 tg3_asic_rev(tp) == ASIC_REV_5705) {
13446 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13448 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13451 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13454 data_off = ETH_HLEN;
13456 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13457 tx_len > VLAN_ETH_FRAME_LEN)
13458 base_flags |= TXD_FLAG_JMB_PKT;
13461 for (i = data_off; i < tx_len; i++)
13462 tx_data[i] = (u8) (i & 0xff);
13464 map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE);
13465 if (dma_mapping_error(&tp->pdev->dev, map)) {
13466 dev_kfree_skb(skb);
13470 val = tnapi->tx_prod;
13471 tnapi->tx_buffers[val].skb = skb;
13472 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13474 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13479 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13481 budget = tg3_tx_avail(tnapi);
13482 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13483 base_flags | TXD_FLAG_END, mss, 0)) {
13484 tnapi->tx_buffers[val].skb = NULL;
13485 dev_kfree_skb(skb);
13491 /* Sync BD data before updating mailbox */
13494 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13495 tr32_mailbox(tnapi->prodmbox);
13499 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13500 for (i = 0; i < 35; i++) {
13501 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13506 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13507 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13508 if ((tx_idx == tnapi->tx_prod) &&
13509 (rx_idx == (rx_start_idx + num_pkts)))
13513 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13514 dev_kfree_skb(skb);
13516 if (tx_idx != tnapi->tx_prod)
13519 if (rx_idx != rx_start_idx + num_pkts)
13523 while (rx_idx != rx_start_idx) {
13524 desc = &rnapi->rx_rcb[rx_start_idx++];
13525 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13526 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13528 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13529 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13532 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13535 if (!tso_loopback) {
13536 if (rx_len != tx_len)
13539 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13540 if (opaque_key != RXD_OPAQUE_RING_STD)
13543 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13546 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13547 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13548 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13552 if (opaque_key == RXD_OPAQUE_RING_STD) {
13553 rx_data = tpr->rx_std_buffers[desc_idx].data;
13554 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13556 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13557 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13558 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13563 dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len,
13566 rx_data += TG3_RX_OFFSET(tp);
13567 for (i = data_off; i < rx_len; i++, val++) {
13568 if (*(rx_data + i) != (u8) (val & 0xff))
13575 /* tg3_free_rings will unmap and free the rx_data */
13580 #define TG3_STD_LOOPBACK_FAILED 1
13581 #define TG3_JMB_LOOPBACK_FAILED 2
13582 #define TG3_TSO_LOOPBACK_FAILED 4
13583 #define TG3_LOOPBACK_FAILED \
13584 (TG3_STD_LOOPBACK_FAILED | \
13585 TG3_JMB_LOOPBACK_FAILED | \
13586 TG3_TSO_LOOPBACK_FAILED)
13588 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13592 u32 jmb_pkt_sz = 9000;
13595 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13597 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13598 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13600 if (!netif_running(tp->dev)) {
13601 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13602 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13604 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13608 err = tg3_reset_hw(tp, true);
13610 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13611 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13613 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13617 if (tg3_flag(tp, ENABLE_RSS)) {
13620 /* Reroute all rx packets to the 1st queue */
13621 for (i = MAC_RSS_INDIR_TBL_0;
13622 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13626 /* HW errata - mac loopback fails in some cases on 5780.
13627 * Normal traffic and PHY loopback are not affected by
13628 * errata. Also, the MAC loopback test is deprecated for
13629 * all newer ASIC revisions.
13631 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13632 !tg3_flag(tp, CPMU_PRESENT)) {
13633 tg3_mac_loopback(tp, true);
13635 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13636 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13638 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13639 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13640 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13642 tg3_mac_loopback(tp, false);
13645 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13646 !tg3_flag(tp, USE_PHYLIB)) {
13649 tg3_phy_lpbk_set(tp, 0, false);
13651 /* Wait for link */
13652 for (i = 0; i < 100; i++) {
13653 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13658 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13659 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13660 if (tg3_flag(tp, TSO_CAPABLE) &&
13661 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13662 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13663 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13664 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13665 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13668 tg3_phy_lpbk_set(tp, 0, true);
13670 /* All link indications report up, but the hardware
13671 * isn't really ready for about 20 msec. Double it
13676 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13677 data[TG3_EXT_LOOPB_TEST] |=
13678 TG3_STD_LOOPBACK_FAILED;
13679 if (tg3_flag(tp, TSO_CAPABLE) &&
13680 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13681 data[TG3_EXT_LOOPB_TEST] |=
13682 TG3_TSO_LOOPBACK_FAILED;
13683 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13684 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13685 data[TG3_EXT_LOOPB_TEST] |=
13686 TG3_JMB_LOOPBACK_FAILED;
13689 /* Re-enable gphy autopowerdown. */
13690 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13691 tg3_phy_toggle_apd(tp, true);
13694 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13695 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13698 tp->phy_flags |= eee_cap;
13703 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13706 struct tg3 *tp = netdev_priv(dev);
13707 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13709 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13710 if (tg3_power_up(tp)) {
13711 etest->flags |= ETH_TEST_FL_FAILED;
13712 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13715 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13718 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13720 if (tg3_test_nvram(tp) != 0) {
13721 etest->flags |= ETH_TEST_FL_FAILED;
13722 data[TG3_NVRAM_TEST] = 1;
13724 if (!doextlpbk && tg3_test_link(tp)) {
13725 etest->flags |= ETH_TEST_FL_FAILED;
13726 data[TG3_LINK_TEST] = 1;
13728 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13729 int err, err2 = 0, irq_sync = 0;
13731 if (netif_running(dev)) {
13733 tg3_netif_stop(tp);
13737 tg3_full_lock(tp, irq_sync);
13738 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13739 err = tg3_nvram_lock(tp);
13740 tg3_halt_cpu(tp, RX_CPU_BASE);
13741 if (!tg3_flag(tp, 5705_PLUS))
13742 tg3_halt_cpu(tp, TX_CPU_BASE);
13744 tg3_nvram_unlock(tp);
13746 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13749 if (tg3_test_registers(tp) != 0) {
13750 etest->flags |= ETH_TEST_FL_FAILED;
13751 data[TG3_REGISTER_TEST] = 1;
13754 if (tg3_test_memory(tp) != 0) {
13755 etest->flags |= ETH_TEST_FL_FAILED;
13756 data[TG3_MEMORY_TEST] = 1;
13760 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13762 if (tg3_test_loopback(tp, data, doextlpbk))
13763 etest->flags |= ETH_TEST_FL_FAILED;
13765 tg3_full_unlock(tp);
13767 if (tg3_test_interrupt(tp) != 0) {
13768 etest->flags |= ETH_TEST_FL_FAILED;
13769 data[TG3_INTERRUPT_TEST] = 1;
13772 tg3_full_lock(tp, 0);
13774 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13775 if (netif_running(dev)) {
13776 tg3_flag_set(tp, INIT_COMPLETE);
13777 err2 = tg3_restart_hw(tp, true);
13779 tg3_netif_start(tp);
13782 tg3_full_unlock(tp);
13784 if (irq_sync && !err2)
13787 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13788 tg3_power_down_prepare(tp);
13792 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13794 struct tg3 *tp = netdev_priv(dev);
13795 struct hwtstamp_config stmpconf;
13797 if (!tg3_flag(tp, PTP_CAPABLE))
13798 return -EOPNOTSUPP;
13800 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13803 if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13804 stmpconf.tx_type != HWTSTAMP_TX_OFF)
13807 switch (stmpconf.rx_filter) {
13808 case HWTSTAMP_FILTER_NONE:
13811 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13812 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13813 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13815 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13816 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13817 TG3_RX_PTP_CTL_SYNC_EVNT;
13819 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13820 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13821 TG3_RX_PTP_CTL_DELAY_REQ;
13823 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13824 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13825 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13827 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13828 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13829 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13831 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13832 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13833 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13835 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13836 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13837 TG3_RX_PTP_CTL_SYNC_EVNT;
13839 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13840 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13841 TG3_RX_PTP_CTL_SYNC_EVNT;
13843 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13844 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13845 TG3_RX_PTP_CTL_SYNC_EVNT;
13847 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13848 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13849 TG3_RX_PTP_CTL_DELAY_REQ;
13851 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13852 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13853 TG3_RX_PTP_CTL_DELAY_REQ;
13855 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13856 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13857 TG3_RX_PTP_CTL_DELAY_REQ;
13863 if (netif_running(dev) && tp->rxptpctl)
13864 tw32(TG3_RX_PTP_CTL,
13865 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13867 if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13868 tg3_flag_set(tp, TX_TSTAMP_EN);
13870 tg3_flag_clear(tp, TX_TSTAMP_EN);
13872 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13876 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13878 struct tg3 *tp = netdev_priv(dev);
13879 struct hwtstamp_config stmpconf;
13881 if (!tg3_flag(tp, PTP_CAPABLE))
13882 return -EOPNOTSUPP;
13884 stmpconf.flags = 0;
13885 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13886 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13888 switch (tp->rxptpctl) {
13890 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13892 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13893 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13895 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13896 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13898 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13899 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13901 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13902 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13904 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13905 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13907 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13908 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13910 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13911 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13913 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13914 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13916 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13917 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13919 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13920 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13922 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13923 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13925 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13926 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13933 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13937 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13939 struct mii_ioctl_data *data = if_mii(ifr);
13940 struct tg3 *tp = netdev_priv(dev);
13943 if (tg3_flag(tp, USE_PHYLIB)) {
13944 struct phy_device *phydev;
13945 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13947 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13948 return phy_mii_ioctl(phydev, ifr, cmd);
13953 data->phy_id = tp->phy_addr;
13956 case SIOCGMIIREG: {
13959 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13960 break; /* We have no PHY */
13962 if (!netif_running(dev))
13965 spin_lock_bh(&tp->lock);
13966 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13967 data->reg_num & 0x1f, &mii_regval);
13968 spin_unlock_bh(&tp->lock);
13970 data->val_out = mii_regval;
13976 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13977 break; /* We have no PHY */
13979 if (!netif_running(dev))
13982 spin_lock_bh(&tp->lock);
13983 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13984 data->reg_num & 0x1f, data->val_in);
13985 spin_unlock_bh(&tp->lock);
13989 case SIOCSHWTSTAMP:
13990 return tg3_hwtstamp_set(dev, ifr);
13992 case SIOCGHWTSTAMP:
13993 return tg3_hwtstamp_get(dev, ifr);
13999 return -EOPNOTSUPP;
14002 static int tg3_get_coalesce(struct net_device *dev,
14003 struct ethtool_coalesce *ec,
14004 struct kernel_ethtool_coalesce *kernel_coal,
14005 struct netlink_ext_ack *extack)
14007 struct tg3 *tp = netdev_priv(dev);
14009 memcpy(ec, &tp->coal, sizeof(*ec));
14013 static int tg3_set_coalesce(struct net_device *dev,
14014 struct ethtool_coalesce *ec,
14015 struct kernel_ethtool_coalesce *kernel_coal,
14016 struct netlink_ext_ack *extack)
14018 struct tg3 *tp = netdev_priv(dev);
14019 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14020 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14022 if (!tg3_flag(tp, 5705_PLUS)) {
14023 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14024 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14025 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14026 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14029 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14030 (!ec->rx_coalesce_usecs) ||
14031 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14032 (!ec->tx_coalesce_usecs) ||
14033 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14034 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14035 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14036 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14037 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14038 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14039 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14040 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14043 /* Only copy relevant parameters, ignore all others. */
14044 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14045 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14046 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14047 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14048 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14049 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14050 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14051 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14052 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14054 if (netif_running(dev)) {
14055 tg3_full_lock(tp, 0);
14056 __tg3_set_coalesce(tp, &tp->coal);
14057 tg3_full_unlock(tp);
14062 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14064 struct tg3 *tp = netdev_priv(dev);
14066 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14067 netdev_warn(tp->dev, "Board does not support EEE!\n");
14068 return -EOPNOTSUPP;
14071 if (edata->advertised != tp->eee.advertised) {
14072 netdev_warn(tp->dev,
14073 "Direct manipulation of EEE advertisement is not supported\n");
14077 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14078 netdev_warn(tp->dev,
14079 "Maximal Tx Lpi timer supported is %#x(u)\n",
14080 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14086 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14087 tg3_warn_mgmt_link_flap(tp);
14089 if (netif_running(tp->dev)) {
14090 tg3_full_lock(tp, 0);
14093 tg3_full_unlock(tp);
14099 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14101 struct tg3 *tp = netdev_priv(dev);
14103 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14104 netdev_warn(tp->dev,
14105 "Board does not support EEE!\n");
14106 return -EOPNOTSUPP;
14113 static const struct ethtool_ops tg3_ethtool_ops = {
14114 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
14115 ETHTOOL_COALESCE_MAX_FRAMES |
14116 ETHTOOL_COALESCE_USECS_IRQ |
14117 ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
14118 ETHTOOL_COALESCE_STATS_BLOCK_USECS,
14119 .get_drvinfo = tg3_get_drvinfo,
14120 .get_regs_len = tg3_get_regs_len,
14121 .get_regs = tg3_get_regs,
14122 .get_wol = tg3_get_wol,
14123 .set_wol = tg3_set_wol,
14124 .get_msglevel = tg3_get_msglevel,
14125 .set_msglevel = tg3_set_msglevel,
14126 .nway_reset = tg3_nway_reset,
14127 .get_link = ethtool_op_get_link,
14128 .get_eeprom_len = tg3_get_eeprom_len,
14129 .get_eeprom = tg3_get_eeprom,
14130 .set_eeprom = tg3_set_eeprom,
14131 .get_ringparam = tg3_get_ringparam,
14132 .set_ringparam = tg3_set_ringparam,
14133 .get_pauseparam = tg3_get_pauseparam,
14134 .set_pauseparam = tg3_set_pauseparam,
14135 .self_test = tg3_self_test,
14136 .get_strings = tg3_get_strings,
14137 .set_phys_id = tg3_set_phys_id,
14138 .get_ethtool_stats = tg3_get_ethtool_stats,
14139 .get_coalesce = tg3_get_coalesce,
14140 .set_coalesce = tg3_set_coalesce,
14141 .get_sset_count = tg3_get_sset_count,
14142 .get_rxnfc = tg3_get_rxnfc,
14143 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
14144 .get_rxfh = tg3_get_rxfh,
14145 .set_rxfh = tg3_set_rxfh,
14146 .get_channels = tg3_get_channels,
14147 .set_channels = tg3_set_channels,
14148 .get_ts_info = tg3_get_ts_info,
14149 .get_eee = tg3_get_eee,
14150 .set_eee = tg3_set_eee,
14151 .get_link_ksettings = tg3_get_link_ksettings,
14152 .set_link_ksettings = tg3_set_link_ksettings,
14155 static void tg3_get_stats64(struct net_device *dev,
14156 struct rtnl_link_stats64 *stats)
14158 struct tg3 *tp = netdev_priv(dev);
14160 spin_lock_bh(&tp->lock);
14161 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14162 *stats = tp->net_stats_prev;
14163 spin_unlock_bh(&tp->lock);
14167 tg3_get_nstats(tp, stats);
14168 spin_unlock_bh(&tp->lock);
14171 static void tg3_set_rx_mode(struct net_device *dev)
14173 struct tg3 *tp = netdev_priv(dev);
14175 if (!netif_running(dev))
14178 tg3_full_lock(tp, 0);
14179 __tg3_set_rx_mode(dev);
14180 tg3_full_unlock(tp);
14183 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14186 dev->mtu = new_mtu;
14188 if (new_mtu > ETH_DATA_LEN) {
14189 if (tg3_flag(tp, 5780_CLASS)) {
14190 netdev_update_features(dev);
14191 tg3_flag_clear(tp, TSO_CAPABLE);
14193 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14196 if (tg3_flag(tp, 5780_CLASS)) {
14197 tg3_flag_set(tp, TSO_CAPABLE);
14198 netdev_update_features(dev);
14200 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14204 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14206 struct tg3 *tp = netdev_priv(dev);
14208 bool reset_phy = false;
14210 if (!netif_running(dev)) {
14211 /* We'll just catch it later when the
14214 tg3_set_mtu(dev, tp, new_mtu);
14220 tg3_netif_stop(tp);
14222 tg3_set_mtu(dev, tp, new_mtu);
14224 tg3_full_lock(tp, 1);
14226 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14228 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14229 * breaks all requests to 256 bytes.
14231 if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14232 tg3_asic_rev(tp) == ASIC_REV_5717 ||
14233 tg3_asic_rev(tp) == ASIC_REV_5719 ||
14234 tg3_asic_rev(tp) == ASIC_REV_5720)
14237 err = tg3_restart_hw(tp, reset_phy);
14240 tg3_netif_start(tp);
14242 tg3_full_unlock(tp);
14250 static const struct net_device_ops tg3_netdev_ops = {
14251 .ndo_open = tg3_open,
14252 .ndo_stop = tg3_close,
14253 .ndo_start_xmit = tg3_start_xmit,
14254 .ndo_get_stats64 = tg3_get_stats64,
14255 .ndo_validate_addr = eth_validate_addr,
14256 .ndo_set_rx_mode = tg3_set_rx_mode,
14257 .ndo_set_mac_address = tg3_set_mac_addr,
14258 .ndo_eth_ioctl = tg3_ioctl,
14259 .ndo_tx_timeout = tg3_tx_timeout,
14260 .ndo_change_mtu = tg3_change_mtu,
14261 .ndo_fix_features = tg3_fix_features,
14262 .ndo_set_features = tg3_set_features,
14263 #ifdef CONFIG_NET_POLL_CONTROLLER
14264 .ndo_poll_controller = tg3_poll_controller,
14268 static void tg3_get_eeprom_size(struct tg3 *tp)
14270 u32 cursize, val, magic;
14272 tp->nvram_size = EEPROM_CHIP_SIZE;
14274 if (tg3_nvram_read(tp, 0, &magic) != 0)
14277 if ((magic != TG3_EEPROM_MAGIC) &&
14278 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14279 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14283 * Size the chip by reading offsets at increasing powers of two.
14284 * When we encounter our validation signature, we know the addressing
14285 * has wrapped around, and thus have our chip size.
14289 while (cursize < tp->nvram_size) {
14290 if (tg3_nvram_read(tp, cursize, &val) != 0)
14299 tp->nvram_size = cursize;
14302 static void tg3_get_nvram_size(struct tg3 *tp)
14306 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14309 /* Selfboot format */
14310 if (val != TG3_EEPROM_MAGIC) {
14311 tg3_get_eeprom_size(tp);
14315 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14317 /* This is confusing. We want to operate on the
14318 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14319 * call will read from NVRAM and byteswap the data
14320 * according to the byteswapping settings for all
14321 * other register accesses. This ensures the data we
14322 * want will always reside in the lower 16-bits.
14323 * However, the data in NVRAM is in LE format, which
14324 * means the data from the NVRAM read will always be
14325 * opposite the endianness of the CPU. The 16-bit
14326 * byteswap then brings the data to CPU endianness.
14328 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14332 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14335 static void tg3_get_nvram_info(struct tg3 *tp)
14339 nvcfg1 = tr32(NVRAM_CFG1);
14340 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14341 tg3_flag_set(tp, FLASH);
14343 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14344 tw32(NVRAM_CFG1, nvcfg1);
14347 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14348 tg3_flag(tp, 5780_CLASS)) {
14349 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14350 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14351 tp->nvram_jedecnum = JEDEC_ATMEL;
14352 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14353 tg3_flag_set(tp, NVRAM_BUFFERED);
14355 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14356 tp->nvram_jedecnum = JEDEC_ATMEL;
14357 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14359 case FLASH_VENDOR_ATMEL_EEPROM:
14360 tp->nvram_jedecnum = JEDEC_ATMEL;
14361 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14362 tg3_flag_set(tp, NVRAM_BUFFERED);
14364 case FLASH_VENDOR_ST:
14365 tp->nvram_jedecnum = JEDEC_ST;
14366 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14367 tg3_flag_set(tp, NVRAM_BUFFERED);
14369 case FLASH_VENDOR_SAIFUN:
14370 tp->nvram_jedecnum = JEDEC_SAIFUN;
14371 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14373 case FLASH_VENDOR_SST_SMALL:
14374 case FLASH_VENDOR_SST_LARGE:
14375 tp->nvram_jedecnum = JEDEC_SST;
14376 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14380 tp->nvram_jedecnum = JEDEC_ATMEL;
14381 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14382 tg3_flag_set(tp, NVRAM_BUFFERED);
14386 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14388 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14389 case FLASH_5752PAGE_SIZE_256:
14390 tp->nvram_pagesize = 256;
14392 case FLASH_5752PAGE_SIZE_512:
14393 tp->nvram_pagesize = 512;
14395 case FLASH_5752PAGE_SIZE_1K:
14396 tp->nvram_pagesize = 1024;
14398 case FLASH_5752PAGE_SIZE_2K:
14399 tp->nvram_pagesize = 2048;
14401 case FLASH_5752PAGE_SIZE_4K:
14402 tp->nvram_pagesize = 4096;
14404 case FLASH_5752PAGE_SIZE_264:
14405 tp->nvram_pagesize = 264;
14407 case FLASH_5752PAGE_SIZE_528:
14408 tp->nvram_pagesize = 528;
14413 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14417 nvcfg1 = tr32(NVRAM_CFG1);
14419 /* NVRAM protection for TPM */
14420 if (nvcfg1 & (1 << 27))
14421 tg3_flag_set(tp, PROTECTED_NVRAM);
14423 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14424 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14425 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14426 tp->nvram_jedecnum = JEDEC_ATMEL;
14427 tg3_flag_set(tp, NVRAM_BUFFERED);
14429 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14430 tp->nvram_jedecnum = JEDEC_ATMEL;
14431 tg3_flag_set(tp, NVRAM_BUFFERED);
14432 tg3_flag_set(tp, FLASH);
14434 case FLASH_5752VENDOR_ST_M45PE10:
14435 case FLASH_5752VENDOR_ST_M45PE20:
14436 case FLASH_5752VENDOR_ST_M45PE40:
14437 tp->nvram_jedecnum = JEDEC_ST;
14438 tg3_flag_set(tp, NVRAM_BUFFERED);
14439 tg3_flag_set(tp, FLASH);
14443 if (tg3_flag(tp, FLASH)) {
14444 tg3_nvram_get_pagesize(tp, nvcfg1);
14446 /* For eeprom, set pagesize to maximum eeprom size */
14447 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14449 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14450 tw32(NVRAM_CFG1, nvcfg1);
14454 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14456 u32 nvcfg1, protect = 0;
14458 nvcfg1 = tr32(NVRAM_CFG1);
14460 /* NVRAM protection for TPM */
14461 if (nvcfg1 & (1 << 27)) {
14462 tg3_flag_set(tp, PROTECTED_NVRAM);
14466 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14468 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14469 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14470 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14471 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14472 tp->nvram_jedecnum = JEDEC_ATMEL;
14473 tg3_flag_set(tp, NVRAM_BUFFERED);
14474 tg3_flag_set(tp, FLASH);
14475 tp->nvram_pagesize = 264;
14476 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14477 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14478 tp->nvram_size = (protect ? 0x3e200 :
14479 TG3_NVRAM_SIZE_512KB);
14480 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14481 tp->nvram_size = (protect ? 0x1f200 :
14482 TG3_NVRAM_SIZE_256KB);
14484 tp->nvram_size = (protect ? 0x1f200 :
14485 TG3_NVRAM_SIZE_128KB);
14487 case FLASH_5752VENDOR_ST_M45PE10:
14488 case FLASH_5752VENDOR_ST_M45PE20:
14489 case FLASH_5752VENDOR_ST_M45PE40:
14490 tp->nvram_jedecnum = JEDEC_ST;
14491 tg3_flag_set(tp, NVRAM_BUFFERED);
14492 tg3_flag_set(tp, FLASH);
14493 tp->nvram_pagesize = 256;
14494 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14495 tp->nvram_size = (protect ?
14496 TG3_NVRAM_SIZE_64KB :
14497 TG3_NVRAM_SIZE_128KB);
14498 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14499 tp->nvram_size = (protect ?
14500 TG3_NVRAM_SIZE_64KB :
14501 TG3_NVRAM_SIZE_256KB);
14503 tp->nvram_size = (protect ?
14504 TG3_NVRAM_SIZE_128KB :
14505 TG3_NVRAM_SIZE_512KB);
14510 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14514 nvcfg1 = tr32(NVRAM_CFG1);
14516 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14517 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14518 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14519 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14520 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14521 tp->nvram_jedecnum = JEDEC_ATMEL;
14522 tg3_flag_set(tp, NVRAM_BUFFERED);
14523 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14525 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14526 tw32(NVRAM_CFG1, nvcfg1);
14528 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14529 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14530 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14531 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14532 tp->nvram_jedecnum = JEDEC_ATMEL;
14533 tg3_flag_set(tp, NVRAM_BUFFERED);
14534 tg3_flag_set(tp, FLASH);
14535 tp->nvram_pagesize = 264;
14537 case FLASH_5752VENDOR_ST_M45PE10:
14538 case FLASH_5752VENDOR_ST_M45PE20:
14539 case FLASH_5752VENDOR_ST_M45PE40:
14540 tp->nvram_jedecnum = JEDEC_ST;
14541 tg3_flag_set(tp, NVRAM_BUFFERED);
14542 tg3_flag_set(tp, FLASH);
14543 tp->nvram_pagesize = 256;
14548 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14550 u32 nvcfg1, protect = 0;
14552 nvcfg1 = tr32(NVRAM_CFG1);
14554 /* NVRAM protection for TPM */
14555 if (nvcfg1 & (1 << 27)) {
14556 tg3_flag_set(tp, PROTECTED_NVRAM);
14560 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14562 case FLASH_5761VENDOR_ATMEL_ADB021D:
14563 case FLASH_5761VENDOR_ATMEL_ADB041D:
14564 case FLASH_5761VENDOR_ATMEL_ADB081D:
14565 case FLASH_5761VENDOR_ATMEL_ADB161D:
14566 case FLASH_5761VENDOR_ATMEL_MDB021D:
14567 case FLASH_5761VENDOR_ATMEL_MDB041D:
14568 case FLASH_5761VENDOR_ATMEL_MDB081D:
14569 case FLASH_5761VENDOR_ATMEL_MDB161D:
14570 tp->nvram_jedecnum = JEDEC_ATMEL;
14571 tg3_flag_set(tp, NVRAM_BUFFERED);
14572 tg3_flag_set(tp, FLASH);
14573 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14574 tp->nvram_pagesize = 256;
14576 case FLASH_5761VENDOR_ST_A_M45PE20:
14577 case FLASH_5761VENDOR_ST_A_M45PE40:
14578 case FLASH_5761VENDOR_ST_A_M45PE80:
14579 case FLASH_5761VENDOR_ST_A_M45PE16:
14580 case FLASH_5761VENDOR_ST_M_M45PE20:
14581 case FLASH_5761VENDOR_ST_M_M45PE40:
14582 case FLASH_5761VENDOR_ST_M_M45PE80:
14583 case FLASH_5761VENDOR_ST_M_M45PE16:
14584 tp->nvram_jedecnum = JEDEC_ST;
14585 tg3_flag_set(tp, NVRAM_BUFFERED);
14586 tg3_flag_set(tp, FLASH);
14587 tp->nvram_pagesize = 256;
14592 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14595 case FLASH_5761VENDOR_ATMEL_ADB161D:
14596 case FLASH_5761VENDOR_ATMEL_MDB161D:
14597 case FLASH_5761VENDOR_ST_A_M45PE16:
14598 case FLASH_5761VENDOR_ST_M_M45PE16:
14599 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14601 case FLASH_5761VENDOR_ATMEL_ADB081D:
14602 case FLASH_5761VENDOR_ATMEL_MDB081D:
14603 case FLASH_5761VENDOR_ST_A_M45PE80:
14604 case FLASH_5761VENDOR_ST_M_M45PE80:
14605 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14607 case FLASH_5761VENDOR_ATMEL_ADB041D:
14608 case FLASH_5761VENDOR_ATMEL_MDB041D:
14609 case FLASH_5761VENDOR_ST_A_M45PE40:
14610 case FLASH_5761VENDOR_ST_M_M45PE40:
14611 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14613 case FLASH_5761VENDOR_ATMEL_ADB021D:
14614 case FLASH_5761VENDOR_ATMEL_MDB021D:
14615 case FLASH_5761VENDOR_ST_A_M45PE20:
14616 case FLASH_5761VENDOR_ST_M_M45PE20:
14617 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14623 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14625 tp->nvram_jedecnum = JEDEC_ATMEL;
14626 tg3_flag_set(tp, NVRAM_BUFFERED);
14627 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14630 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14634 nvcfg1 = tr32(NVRAM_CFG1);
14636 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14637 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14638 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14639 tp->nvram_jedecnum = JEDEC_ATMEL;
14640 tg3_flag_set(tp, NVRAM_BUFFERED);
14641 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14643 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14644 tw32(NVRAM_CFG1, nvcfg1);
14646 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14647 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14648 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14649 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14650 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14651 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14652 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14653 tp->nvram_jedecnum = JEDEC_ATMEL;
14654 tg3_flag_set(tp, NVRAM_BUFFERED);
14655 tg3_flag_set(tp, FLASH);
14657 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14658 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14659 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14660 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14661 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14663 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14664 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14665 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14667 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14668 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14669 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14673 case FLASH_5752VENDOR_ST_M45PE10:
14674 case FLASH_5752VENDOR_ST_M45PE20:
14675 case FLASH_5752VENDOR_ST_M45PE40:
14676 tp->nvram_jedecnum = JEDEC_ST;
14677 tg3_flag_set(tp, NVRAM_BUFFERED);
14678 tg3_flag_set(tp, FLASH);
14680 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14681 case FLASH_5752VENDOR_ST_M45PE10:
14682 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14684 case FLASH_5752VENDOR_ST_M45PE20:
14685 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14687 case FLASH_5752VENDOR_ST_M45PE40:
14688 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14693 tg3_flag_set(tp, NO_NVRAM);
14697 tg3_nvram_get_pagesize(tp, nvcfg1);
14698 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14699 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14703 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14707 nvcfg1 = tr32(NVRAM_CFG1);
14709 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14710 case FLASH_5717VENDOR_ATMEL_EEPROM:
14711 case FLASH_5717VENDOR_MICRO_EEPROM:
14712 tp->nvram_jedecnum = JEDEC_ATMEL;
14713 tg3_flag_set(tp, NVRAM_BUFFERED);
14714 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14716 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14717 tw32(NVRAM_CFG1, nvcfg1);
14719 case FLASH_5717VENDOR_ATMEL_MDB011D:
14720 case FLASH_5717VENDOR_ATMEL_ADB011B:
14721 case FLASH_5717VENDOR_ATMEL_ADB011D:
14722 case FLASH_5717VENDOR_ATMEL_MDB021D:
14723 case FLASH_5717VENDOR_ATMEL_ADB021B:
14724 case FLASH_5717VENDOR_ATMEL_ADB021D:
14725 case FLASH_5717VENDOR_ATMEL_45USPT:
14726 tp->nvram_jedecnum = JEDEC_ATMEL;
14727 tg3_flag_set(tp, NVRAM_BUFFERED);
14728 tg3_flag_set(tp, FLASH);
14730 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14731 case FLASH_5717VENDOR_ATMEL_MDB021D:
14732 /* Detect size with tg3_nvram_get_size() */
14734 case FLASH_5717VENDOR_ATMEL_ADB021B:
14735 case FLASH_5717VENDOR_ATMEL_ADB021D:
14736 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14739 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14743 case FLASH_5717VENDOR_ST_M_M25PE10:
14744 case FLASH_5717VENDOR_ST_A_M25PE10:
14745 case FLASH_5717VENDOR_ST_M_M45PE10:
14746 case FLASH_5717VENDOR_ST_A_M45PE10:
14747 case FLASH_5717VENDOR_ST_M_M25PE20:
14748 case FLASH_5717VENDOR_ST_A_M25PE20:
14749 case FLASH_5717VENDOR_ST_M_M45PE20:
14750 case FLASH_5717VENDOR_ST_A_M45PE20:
14751 case FLASH_5717VENDOR_ST_25USPT:
14752 case FLASH_5717VENDOR_ST_45USPT:
14753 tp->nvram_jedecnum = JEDEC_ST;
14754 tg3_flag_set(tp, NVRAM_BUFFERED);
14755 tg3_flag_set(tp, FLASH);
14757 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14758 case FLASH_5717VENDOR_ST_M_M25PE20:
14759 case FLASH_5717VENDOR_ST_M_M45PE20:
14760 /* Detect size with tg3_nvram_get_size() */
14762 case FLASH_5717VENDOR_ST_A_M25PE20:
14763 case FLASH_5717VENDOR_ST_A_M45PE20:
14764 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14767 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14772 tg3_flag_set(tp, NO_NVRAM);
14776 tg3_nvram_get_pagesize(tp, nvcfg1);
14777 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14778 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14781 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14783 u32 nvcfg1, nvmpinstrp, nv_status;
14785 nvcfg1 = tr32(NVRAM_CFG1);
14786 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14788 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14789 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14790 tg3_flag_set(tp, NO_NVRAM);
14794 switch (nvmpinstrp) {
14795 case FLASH_5762_MX25L_100:
14796 case FLASH_5762_MX25L_200:
14797 case FLASH_5762_MX25L_400:
14798 case FLASH_5762_MX25L_800:
14799 case FLASH_5762_MX25L_160_320:
14800 tp->nvram_pagesize = 4096;
14801 tp->nvram_jedecnum = JEDEC_MACRONIX;
14802 tg3_flag_set(tp, NVRAM_BUFFERED);
14803 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14804 tg3_flag_set(tp, FLASH);
14805 nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14807 (1 << (nv_status >> AUTOSENSE_DEVID &
14808 AUTOSENSE_DEVID_MASK)
14809 << AUTOSENSE_SIZE_IN_MB);
14812 case FLASH_5762_EEPROM_HD:
14813 nvmpinstrp = FLASH_5720_EEPROM_HD;
14815 case FLASH_5762_EEPROM_LD:
14816 nvmpinstrp = FLASH_5720_EEPROM_LD;
14818 case FLASH_5720VENDOR_M_ST_M45PE20:
14819 /* This pinstrap supports multiple sizes, so force it
14820 * to read the actual size from location 0xf0.
14822 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14827 switch (nvmpinstrp) {
14828 case FLASH_5720_EEPROM_HD:
14829 case FLASH_5720_EEPROM_LD:
14830 tp->nvram_jedecnum = JEDEC_ATMEL;
14831 tg3_flag_set(tp, NVRAM_BUFFERED);
14833 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14834 tw32(NVRAM_CFG1, nvcfg1);
14835 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14836 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14838 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14840 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14841 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14842 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14843 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14844 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14845 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14846 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14847 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14848 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14849 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14850 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14851 case FLASH_5720VENDOR_ATMEL_45USPT:
14852 tp->nvram_jedecnum = JEDEC_ATMEL;
14853 tg3_flag_set(tp, NVRAM_BUFFERED);
14854 tg3_flag_set(tp, FLASH);
14856 switch (nvmpinstrp) {
14857 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14858 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14859 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14860 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14862 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14863 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14864 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14865 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14867 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14868 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14869 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14872 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14873 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14877 case FLASH_5720VENDOR_M_ST_M25PE10:
14878 case FLASH_5720VENDOR_M_ST_M45PE10:
14879 case FLASH_5720VENDOR_A_ST_M25PE10:
14880 case FLASH_5720VENDOR_A_ST_M45PE10:
14881 case FLASH_5720VENDOR_M_ST_M25PE20:
14882 case FLASH_5720VENDOR_M_ST_M45PE20:
14883 case FLASH_5720VENDOR_A_ST_M25PE20:
14884 case FLASH_5720VENDOR_A_ST_M45PE20:
14885 case FLASH_5720VENDOR_M_ST_M25PE40:
14886 case FLASH_5720VENDOR_M_ST_M45PE40:
14887 case FLASH_5720VENDOR_A_ST_M25PE40:
14888 case FLASH_5720VENDOR_A_ST_M45PE40:
14889 case FLASH_5720VENDOR_M_ST_M25PE80:
14890 case FLASH_5720VENDOR_M_ST_M45PE80:
14891 case FLASH_5720VENDOR_A_ST_M25PE80:
14892 case FLASH_5720VENDOR_A_ST_M45PE80:
14893 case FLASH_5720VENDOR_ST_25USPT:
14894 case FLASH_5720VENDOR_ST_45USPT:
14895 tp->nvram_jedecnum = JEDEC_ST;
14896 tg3_flag_set(tp, NVRAM_BUFFERED);
14897 tg3_flag_set(tp, FLASH);
14899 switch (nvmpinstrp) {
14900 case FLASH_5720VENDOR_M_ST_M25PE20:
14901 case FLASH_5720VENDOR_M_ST_M45PE20:
14902 case FLASH_5720VENDOR_A_ST_M25PE20:
14903 case FLASH_5720VENDOR_A_ST_M45PE20:
14904 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14906 case FLASH_5720VENDOR_M_ST_M25PE40:
14907 case FLASH_5720VENDOR_M_ST_M45PE40:
14908 case FLASH_5720VENDOR_A_ST_M25PE40:
14909 case FLASH_5720VENDOR_A_ST_M45PE40:
14910 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14912 case FLASH_5720VENDOR_M_ST_M25PE80:
14913 case FLASH_5720VENDOR_M_ST_M45PE80:
14914 case FLASH_5720VENDOR_A_ST_M25PE80:
14915 case FLASH_5720VENDOR_A_ST_M45PE80:
14916 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14919 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14920 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14925 tg3_flag_set(tp, NO_NVRAM);
14929 tg3_nvram_get_pagesize(tp, nvcfg1);
14930 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14931 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14933 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14936 if (tg3_nvram_read(tp, 0, &val))
14939 if (val != TG3_EEPROM_MAGIC &&
14940 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14941 tg3_flag_set(tp, NO_NVRAM);
14945 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14946 static void tg3_nvram_init(struct tg3 *tp)
14948 if (tg3_flag(tp, IS_SSB_CORE)) {
14949 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14950 tg3_flag_clear(tp, NVRAM);
14951 tg3_flag_clear(tp, NVRAM_BUFFERED);
14952 tg3_flag_set(tp, NO_NVRAM);
14956 tw32_f(GRC_EEPROM_ADDR,
14957 (EEPROM_ADDR_FSM_RESET |
14958 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14959 EEPROM_ADDR_CLKPERD_SHIFT)));
14963 /* Enable seeprom accesses. */
14964 tw32_f(GRC_LOCAL_CTRL,
14965 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14968 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14969 tg3_asic_rev(tp) != ASIC_REV_5701) {
14970 tg3_flag_set(tp, NVRAM);
14972 if (tg3_nvram_lock(tp)) {
14973 netdev_warn(tp->dev,
14974 "Cannot get nvram lock, %s failed\n",
14978 tg3_enable_nvram_access(tp);
14980 tp->nvram_size = 0;
14982 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14983 tg3_get_5752_nvram_info(tp);
14984 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14985 tg3_get_5755_nvram_info(tp);
14986 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14987 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14988 tg3_asic_rev(tp) == ASIC_REV_5785)
14989 tg3_get_5787_nvram_info(tp);
14990 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14991 tg3_get_5761_nvram_info(tp);
14992 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14993 tg3_get_5906_nvram_info(tp);
14994 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14995 tg3_flag(tp, 57765_CLASS))
14996 tg3_get_57780_nvram_info(tp);
14997 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14998 tg3_asic_rev(tp) == ASIC_REV_5719)
14999 tg3_get_5717_nvram_info(tp);
15000 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15001 tg3_asic_rev(tp) == ASIC_REV_5762)
15002 tg3_get_5720_nvram_info(tp);
15004 tg3_get_nvram_info(tp);
15006 if (tp->nvram_size == 0)
15007 tg3_get_nvram_size(tp);
15009 tg3_disable_nvram_access(tp);
15010 tg3_nvram_unlock(tp);
15013 tg3_flag_clear(tp, NVRAM);
15014 tg3_flag_clear(tp, NVRAM_BUFFERED);
15016 tg3_get_eeprom_size(tp);
15020 struct subsys_tbl_ent {
15021 u16 subsys_vendor, subsys_devid;
15025 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15026 /* Broadcom boards. */
15027 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15028 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15029 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15030 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15031 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15032 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15033 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15034 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15035 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15036 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15037 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15038 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15039 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15040 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15041 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15042 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15043 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15044 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15045 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15046 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15047 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15048 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15051 { TG3PCI_SUBVENDOR_ID_3COM,
15052 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15053 { TG3PCI_SUBVENDOR_ID_3COM,
15054 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15055 { TG3PCI_SUBVENDOR_ID_3COM,
15056 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15057 { TG3PCI_SUBVENDOR_ID_3COM,
15058 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15059 { TG3PCI_SUBVENDOR_ID_3COM,
15060 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15063 { TG3PCI_SUBVENDOR_ID_DELL,
15064 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15065 { TG3PCI_SUBVENDOR_ID_DELL,
15066 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15067 { TG3PCI_SUBVENDOR_ID_DELL,
15068 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15069 { TG3PCI_SUBVENDOR_ID_DELL,
15070 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15072 /* Compaq boards. */
15073 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15074 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15075 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15076 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15077 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15078 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15079 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15080 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15081 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15082 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15085 { TG3PCI_SUBVENDOR_ID_IBM,
15086 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15089 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15093 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15094 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15095 tp->pdev->subsystem_vendor) &&
15096 (subsys_id_to_phy_id[i].subsys_devid ==
15097 tp->pdev->subsystem_device))
15098 return &subsys_id_to_phy_id[i];
15103 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15107 tp->phy_id = TG3_PHY_ID_INVALID;
15108 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15110 /* Assume an onboard device and WOL capable by default. */
15111 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15112 tg3_flag_set(tp, WOL_CAP);
15114 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15115 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15116 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15117 tg3_flag_set(tp, IS_NIC);
15119 val = tr32(VCPU_CFGSHDW);
15120 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15121 tg3_flag_set(tp, ASPM_WORKAROUND);
15122 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15123 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15124 tg3_flag_set(tp, WOL_ENABLE);
15125 device_set_wakeup_enable(&tp->pdev->dev, true);
15130 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15131 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15132 u32 nic_cfg, led_cfg;
15133 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15134 u32 nic_phy_id, ver, eeprom_phy_id;
15135 int eeprom_phy_serdes = 0;
15137 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15138 tp->nic_sram_data_cfg = nic_cfg;
15140 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15141 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15142 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15143 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15144 tg3_asic_rev(tp) != ASIC_REV_5703 &&
15145 (ver > 0) && (ver < 0x100))
15146 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15148 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15149 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15151 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15152 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15153 tg3_asic_rev(tp) == ASIC_REV_5720)
15154 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15156 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15157 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15158 eeprom_phy_serdes = 1;
15160 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15161 if (nic_phy_id != 0) {
15162 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15163 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15165 eeprom_phy_id = (id1 >> 16) << 10;
15166 eeprom_phy_id |= (id2 & 0xfc00) << 16;
15167 eeprom_phy_id |= (id2 & 0x03ff) << 0;
15171 tp->phy_id = eeprom_phy_id;
15172 if (eeprom_phy_serdes) {
15173 if (!tg3_flag(tp, 5705_PLUS))
15174 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15176 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15179 if (tg3_flag(tp, 5750_PLUS))
15180 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15181 SHASTA_EXT_LED_MODE_MASK);
15183 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15187 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15188 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15191 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15192 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15195 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15196 tp->led_ctrl = LED_CTRL_MODE_MAC;
15198 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15199 * read on some older 5700/5701 bootcode.
15201 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15202 tg3_asic_rev(tp) == ASIC_REV_5701)
15203 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15207 case SHASTA_EXT_LED_SHARED:
15208 tp->led_ctrl = LED_CTRL_MODE_SHARED;
15209 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15210 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15211 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15212 LED_CTRL_MODE_PHY_2);
15214 if (tg3_flag(tp, 5717_PLUS) ||
15215 tg3_asic_rev(tp) == ASIC_REV_5762)
15216 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15217 LED_CTRL_BLINK_RATE_MASK;
15221 case SHASTA_EXT_LED_MAC:
15222 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15225 case SHASTA_EXT_LED_COMBO:
15226 tp->led_ctrl = LED_CTRL_MODE_COMBO;
15227 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15228 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15229 LED_CTRL_MODE_PHY_2);
15234 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15235 tg3_asic_rev(tp) == ASIC_REV_5701) &&
15236 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15237 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15239 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15240 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15242 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15243 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15244 if ((tp->pdev->subsystem_vendor ==
15245 PCI_VENDOR_ID_ARIMA) &&
15246 (tp->pdev->subsystem_device == 0x205a ||
15247 tp->pdev->subsystem_device == 0x2063))
15248 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15250 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15251 tg3_flag_set(tp, IS_NIC);
15254 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15255 tg3_flag_set(tp, ENABLE_ASF);
15256 if (tg3_flag(tp, 5750_PLUS))
15257 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15260 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15261 tg3_flag(tp, 5750_PLUS))
15262 tg3_flag_set(tp, ENABLE_APE);
15264 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15265 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15266 tg3_flag_clear(tp, WOL_CAP);
15268 if (tg3_flag(tp, WOL_CAP) &&
15269 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15270 tg3_flag_set(tp, WOL_ENABLE);
15271 device_set_wakeup_enable(&tp->pdev->dev, true);
15274 if (cfg2 & (1 << 17))
15275 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15277 /* serdes signal pre-emphasis in register 0x590 set by */
15278 /* bootcode if bit 18 is set */
15279 if (cfg2 & (1 << 18))
15280 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15282 if ((tg3_flag(tp, 57765_PLUS) ||
15283 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15284 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15285 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15286 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15288 if (tg3_flag(tp, PCI_EXPRESS)) {
15291 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15292 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15293 !tg3_flag(tp, 57765_PLUS) &&
15294 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15295 tg3_flag_set(tp, ASPM_WORKAROUND);
15296 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15297 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15298 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15299 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15302 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15303 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15304 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15305 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15306 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15307 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15309 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15310 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15313 if (tg3_flag(tp, WOL_CAP))
15314 device_set_wakeup_enable(&tp->pdev->dev,
15315 tg3_flag(tp, WOL_ENABLE));
15317 device_set_wakeup_capable(&tp->pdev->dev, false);
15320 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15323 u32 val2, off = offset * 8;
15325 err = tg3_nvram_lock(tp);
15329 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15330 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15331 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15332 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15335 for (i = 0; i < 100; i++) {
15336 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15337 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15338 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15344 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15346 tg3_nvram_unlock(tp);
15347 if (val2 & APE_OTP_STATUS_CMD_DONE)
15353 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15358 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15359 tw32(OTP_CTRL, cmd);
15361 /* Wait for up to 1 ms for command to execute. */
15362 for (i = 0; i < 100; i++) {
15363 val = tr32(OTP_STATUS);
15364 if (val & OTP_STATUS_CMD_DONE)
15369 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15372 /* Read the gphy configuration from the OTP region of the chip. The gphy
15373 * configuration is a 32-bit value that straddles the alignment boundary.
15374 * We do two 32-bit reads and then shift and merge the results.
15376 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15378 u32 bhalf_otp, thalf_otp;
15380 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15382 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15385 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15387 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15390 thalf_otp = tr32(OTP_READ_DATA);
15392 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15394 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15397 bhalf_otp = tr32(OTP_READ_DATA);
15399 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15402 static void tg3_phy_init_link_config(struct tg3 *tp)
15404 u32 adv = ADVERTISED_Autoneg;
15406 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15407 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15408 adv |= ADVERTISED_1000baseT_Half;
15409 adv |= ADVERTISED_1000baseT_Full;
15412 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15413 adv |= ADVERTISED_100baseT_Half |
15414 ADVERTISED_100baseT_Full |
15415 ADVERTISED_10baseT_Half |
15416 ADVERTISED_10baseT_Full |
15419 adv |= ADVERTISED_FIBRE;
15421 tp->link_config.advertising = adv;
15422 tp->link_config.speed = SPEED_UNKNOWN;
15423 tp->link_config.duplex = DUPLEX_UNKNOWN;
15424 tp->link_config.autoneg = AUTONEG_ENABLE;
15425 tp->link_config.active_speed = SPEED_UNKNOWN;
15426 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15431 static int tg3_phy_probe(struct tg3 *tp)
15433 u32 hw_phy_id_1, hw_phy_id_2;
15434 u32 hw_phy_id, hw_phy_id_masked;
15437 /* flow control autonegotiation is default behavior */
15438 tg3_flag_set(tp, PAUSE_AUTONEG);
15439 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15441 if (tg3_flag(tp, ENABLE_APE)) {
15442 switch (tp->pci_fn) {
15444 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15447 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15450 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15453 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15458 if (!tg3_flag(tp, ENABLE_ASF) &&
15459 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15460 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15461 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15462 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15464 if (tg3_flag(tp, USE_PHYLIB))
15465 return tg3_phy_init(tp);
15467 /* Reading the PHY ID register can conflict with ASF
15468 * firmware access to the PHY hardware.
15471 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15472 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15474 /* Now read the physical PHY_ID from the chip and verify
15475 * that it is sane. If it doesn't look good, we fall back
15476 * to either the hard-coded table based PHY_ID and failing
15477 * that the value found in the eeprom area.
15479 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15480 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15482 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15483 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15484 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15486 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15489 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15490 tp->phy_id = hw_phy_id;
15491 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15492 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15494 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15496 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15497 /* Do nothing, phy ID already set up in
15498 * tg3_get_eeprom_hw_cfg().
15501 struct subsys_tbl_ent *p;
15503 /* No eeprom signature? Try the hardcoded
15504 * subsys device table.
15506 p = tg3_lookup_by_subsys(tp);
15508 tp->phy_id = p->phy_id;
15509 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15510 /* For now we saw the IDs 0xbc050cd0,
15511 * 0xbc050f80 and 0xbc050c30 on devices
15512 * connected to an BCM4785 and there are
15513 * probably more. Just assume that the phy is
15514 * supported when it is connected to a SSB core
15521 tp->phy_id == TG3_PHY_ID_BCM8002)
15522 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15526 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15527 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15528 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15529 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15530 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15531 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15532 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15533 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15534 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15535 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15537 tp->eee.supported = SUPPORTED_100baseT_Full |
15538 SUPPORTED_1000baseT_Full;
15539 tp->eee.advertised = ADVERTISED_100baseT_Full |
15540 ADVERTISED_1000baseT_Full;
15541 tp->eee.eee_enabled = 1;
15542 tp->eee.tx_lpi_enabled = 1;
15543 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15546 tg3_phy_init_link_config(tp);
15548 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15549 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15550 !tg3_flag(tp, ENABLE_APE) &&
15551 !tg3_flag(tp, ENABLE_ASF)) {
15554 tg3_readphy(tp, MII_BMSR, &bmsr);
15555 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15556 (bmsr & BMSR_LSTATUS))
15557 goto skip_phy_reset;
15559 err = tg3_phy_reset(tp);
15563 tg3_phy_set_wirespeed(tp);
15565 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15566 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15567 tp->link_config.flowctrl);
15569 tg3_writephy(tp, MII_BMCR,
15570 BMCR_ANENABLE | BMCR_ANRESTART);
15575 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15576 err = tg3_init_5401phy_dsp(tp);
15580 err = tg3_init_5401phy_dsp(tp);
15586 static void tg3_read_vpd(struct tg3 *tp)
15589 unsigned int len, vpdlen;
15592 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15596 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15597 PCI_VPD_RO_KEYWORD_MFR_ID, &len);
15601 if (len != 4 || memcmp(vpd_data + i, "1028", 4))
15604 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15605 PCI_VPD_RO_KEYWORD_VENDOR0, &len);
15609 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15610 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i);
15613 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15614 PCI_VPD_RO_KEYWORD_PARTNO, &len);
15616 goto out_not_found;
15618 if (len > TG3_BPN_SIZE)
15619 goto out_not_found;
15621 memcpy(tp->board_part_number, &vpd_data[i], len);
15625 if (tp->board_part_number[0])
15629 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15630 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15631 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15632 strcpy(tp->board_part_number, "BCM5717");
15633 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15634 strcpy(tp->board_part_number, "BCM5718");
15637 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15638 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15639 strcpy(tp->board_part_number, "BCM57780");
15640 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15641 strcpy(tp->board_part_number, "BCM57760");
15642 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15643 strcpy(tp->board_part_number, "BCM57790");
15644 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15645 strcpy(tp->board_part_number, "BCM57788");
15648 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15649 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15650 strcpy(tp->board_part_number, "BCM57761");
15651 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15652 strcpy(tp->board_part_number, "BCM57765");
15653 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15654 strcpy(tp->board_part_number, "BCM57781");
15655 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15656 strcpy(tp->board_part_number, "BCM57785");
15657 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15658 strcpy(tp->board_part_number, "BCM57791");
15659 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15660 strcpy(tp->board_part_number, "BCM57795");
15663 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15664 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15665 strcpy(tp->board_part_number, "BCM57762");
15666 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15667 strcpy(tp->board_part_number, "BCM57766");
15668 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15669 strcpy(tp->board_part_number, "BCM57782");
15670 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15671 strcpy(tp->board_part_number, "BCM57786");
15674 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15675 strcpy(tp->board_part_number, "BCM95906");
15678 strcpy(tp->board_part_number, "none");
15682 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15686 if (tg3_nvram_read(tp, offset, &val) ||
15687 (val & 0xfc000000) != 0x0c000000 ||
15688 tg3_nvram_read(tp, offset + 4, &val) ||
15695 static void tg3_read_bc_ver(struct tg3 *tp)
15697 u32 val, offset, start, ver_offset;
15699 bool newver = false;
15701 if (tg3_nvram_read(tp, 0xc, &offset) ||
15702 tg3_nvram_read(tp, 0x4, &start))
15705 offset = tg3_nvram_logical_addr(tp, offset);
15707 if (tg3_nvram_read(tp, offset, &val))
15710 if ((val & 0xfc000000) == 0x0c000000) {
15711 if (tg3_nvram_read(tp, offset + 4, &val))
15718 dst_off = strlen(tp->fw_ver);
15721 if (TG3_VER_SIZE - dst_off < 16 ||
15722 tg3_nvram_read(tp, offset + 8, &ver_offset))
15725 offset = offset + ver_offset - start;
15726 for (i = 0; i < 16; i += 4) {
15728 if (tg3_nvram_read_be32(tp, offset + i, &v))
15731 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15736 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15739 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15740 TG3_NVM_BCVER_MAJSFT;
15741 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15742 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15743 "v%d.%02d", major, minor);
15747 static void tg3_read_hwsb_ver(struct tg3 *tp)
15749 u32 val, major, minor;
15751 /* Use native endian representation */
15752 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15755 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15756 TG3_NVM_HWSB_CFG1_MAJSFT;
15757 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15758 TG3_NVM_HWSB_CFG1_MINSFT;
15760 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15763 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15765 u32 offset, major, minor, build;
15767 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15769 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15772 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15773 case TG3_EEPROM_SB_REVISION_0:
15774 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15776 case TG3_EEPROM_SB_REVISION_2:
15777 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15779 case TG3_EEPROM_SB_REVISION_3:
15780 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15782 case TG3_EEPROM_SB_REVISION_4:
15783 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15785 case TG3_EEPROM_SB_REVISION_5:
15786 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15788 case TG3_EEPROM_SB_REVISION_6:
15789 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15795 if (tg3_nvram_read(tp, offset, &val))
15798 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15799 TG3_EEPROM_SB_EDH_BLD_SHFT;
15800 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15801 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15802 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15804 if (minor > 99 || build > 26)
15807 offset = strlen(tp->fw_ver);
15808 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15809 " v%d.%02d", major, minor);
15812 offset = strlen(tp->fw_ver);
15813 if (offset < TG3_VER_SIZE - 1)
15814 tp->fw_ver[offset] = 'a' + build - 1;
15818 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15820 u32 val, offset, start;
15823 for (offset = TG3_NVM_DIR_START;
15824 offset < TG3_NVM_DIR_END;
15825 offset += TG3_NVM_DIRENT_SIZE) {
15826 if (tg3_nvram_read(tp, offset, &val))
15829 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15833 if (offset == TG3_NVM_DIR_END)
15836 if (!tg3_flag(tp, 5705_PLUS))
15837 start = 0x08000000;
15838 else if (tg3_nvram_read(tp, offset - 4, &start))
15841 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15842 !tg3_fw_img_is_valid(tp, offset) ||
15843 tg3_nvram_read(tp, offset + 8, &val))
15846 offset += val - start;
15848 vlen = strlen(tp->fw_ver);
15850 tp->fw_ver[vlen++] = ',';
15851 tp->fw_ver[vlen++] = ' ';
15853 for (i = 0; i < 4; i++) {
15855 if (tg3_nvram_read_be32(tp, offset, &v))
15858 offset += sizeof(v);
15860 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15861 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15865 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15870 static void tg3_probe_ncsi(struct tg3 *tp)
15874 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15875 if (apedata != APE_SEG_SIG_MAGIC)
15878 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15879 if (!(apedata & APE_FW_STATUS_READY))
15882 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15883 tg3_flag_set(tp, APE_HAS_NCSI);
15886 static void tg3_read_dash_ver(struct tg3 *tp)
15892 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15894 if (tg3_flag(tp, APE_HAS_NCSI))
15896 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15901 vlen = strlen(tp->fw_ver);
15903 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15905 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15906 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15907 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15908 (apedata & APE_FW_VERSION_BLDMSK));
15911 static void tg3_read_otp_ver(struct tg3 *tp)
15915 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15918 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15919 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15920 TG3_OTP_MAGIC0_VALID(val)) {
15921 u64 val64 = (u64) val << 32 | val2;
15925 for (i = 0; i < 7; i++) {
15926 if ((val64 & 0xff) == 0)
15928 ver = val64 & 0xff;
15931 vlen = strlen(tp->fw_ver);
15932 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15936 static void tg3_read_fw_ver(struct tg3 *tp)
15939 bool vpd_vers = false;
15941 if (tp->fw_ver[0] != 0)
15944 if (tg3_flag(tp, NO_NVRAM)) {
15945 strcat(tp->fw_ver, "sb");
15946 tg3_read_otp_ver(tp);
15950 if (tg3_nvram_read(tp, 0, &val))
15953 if (val == TG3_EEPROM_MAGIC)
15954 tg3_read_bc_ver(tp);
15955 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15956 tg3_read_sb_ver(tp, val);
15957 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15958 tg3_read_hwsb_ver(tp);
15960 if (tg3_flag(tp, ENABLE_ASF)) {
15961 if (tg3_flag(tp, ENABLE_APE)) {
15962 tg3_probe_ncsi(tp);
15964 tg3_read_dash_ver(tp);
15965 } else if (!vpd_vers) {
15966 tg3_read_mgmtfw_ver(tp);
15970 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15973 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15975 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15976 return TG3_RX_RET_MAX_SIZE_5717;
15977 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15978 return TG3_RX_RET_MAX_SIZE_5700;
15980 return TG3_RX_RET_MAX_SIZE_5705;
15983 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
15984 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15985 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15986 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15990 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15992 struct pci_dev *peer;
15993 unsigned int func, devnr = tp->pdev->devfn & ~7;
15995 for (func = 0; func < 8; func++) {
15996 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15997 if (peer && peer != tp->pdev)
16001 /* 5704 can be configured in single-port mode, set peer to
16002 * tp->pdev in that case.
16010 * We don't need to keep the refcount elevated; there's no way
16011 * to remove one half of this device without removing the other
16018 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16020 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16021 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16024 /* All devices that use the alternate
16025 * ASIC REV location have a CPMU.
16027 tg3_flag_set(tp, CPMU_PRESENT);
16029 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16030 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16031 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16032 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16033 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16034 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16035 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16036 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16037 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16038 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16039 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16040 reg = TG3PCI_GEN2_PRODID_ASICREV;
16041 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16042 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16043 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16044 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16045 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16046 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16047 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16048 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16049 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16050 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16051 reg = TG3PCI_GEN15_PRODID_ASICREV;
16053 reg = TG3PCI_PRODID_ASICREV;
16055 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16058 /* Wrong chip ID in 5752 A0. This code can be removed later
16059 * as A0 is not in production.
16061 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16062 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16064 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16065 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16067 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16068 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16069 tg3_asic_rev(tp) == ASIC_REV_5720)
16070 tg3_flag_set(tp, 5717_PLUS);
16072 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16073 tg3_asic_rev(tp) == ASIC_REV_57766)
16074 tg3_flag_set(tp, 57765_CLASS);
16076 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16077 tg3_asic_rev(tp) == ASIC_REV_5762)
16078 tg3_flag_set(tp, 57765_PLUS);
16080 /* Intentionally exclude ASIC_REV_5906 */
16081 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16082 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16083 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16084 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16085 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16086 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16087 tg3_flag(tp, 57765_PLUS))
16088 tg3_flag_set(tp, 5755_PLUS);
16090 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16091 tg3_asic_rev(tp) == ASIC_REV_5714)
16092 tg3_flag_set(tp, 5780_CLASS);
16094 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16095 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16096 tg3_asic_rev(tp) == ASIC_REV_5906 ||
16097 tg3_flag(tp, 5755_PLUS) ||
16098 tg3_flag(tp, 5780_CLASS))
16099 tg3_flag_set(tp, 5750_PLUS);
16101 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16102 tg3_flag(tp, 5750_PLUS))
16103 tg3_flag_set(tp, 5705_PLUS);
16106 static bool tg3_10_100_only_device(struct tg3 *tp,
16107 const struct pci_device_id *ent)
16109 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16111 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16112 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16113 (tp->phy_flags & TG3_PHYFLG_IS_FET))
16116 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16117 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16118 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16128 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16131 u32 pci_state_reg, grc_misc_cfg;
16136 /* Force memory write invalidate off. If we leave it on,
16137 * then on 5700_BX chips we have to enable a workaround.
16138 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16139 * to match the cacheline size. The Broadcom driver have this
16140 * workaround but turns MWI off all the times so never uses
16141 * it. This seems to suggest that the workaround is insufficient.
16143 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16144 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16145 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16147 /* Important! -- Make sure register accesses are byteswapped
16148 * correctly. Also, for those chips that require it, make
16149 * sure that indirect register accesses are enabled before
16150 * the first operation.
16152 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16154 tp->misc_host_ctrl |= (misc_ctrl_reg &
16155 MISC_HOST_CTRL_CHIPREV);
16156 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16157 tp->misc_host_ctrl);
16159 tg3_detect_asic_rev(tp, misc_ctrl_reg);
16161 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16162 * we need to disable memory and use config. cycles
16163 * only to access all registers. The 5702/03 chips
16164 * can mistakenly decode the special cycles from the
16165 * ICH chipsets as memory write cycles, causing corruption
16166 * of register and memory space. Only certain ICH bridges
16167 * will drive special cycles with non-zero data during the
16168 * address phase which can fall within the 5703's address
16169 * range. This is not an ICH bug as the PCI spec allows
16170 * non-zero address during special cycles. However, only
16171 * these ICH bridges are known to drive non-zero addresses
16172 * during special cycles.
16174 * Since special cycles do not cross PCI bridges, we only
16175 * enable this workaround if the 5703 is on the secondary
16176 * bus of these ICH bridges.
16178 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16179 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16180 static struct tg3_dev_id {
16184 } ich_chipsets[] = {
16185 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16187 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16189 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16191 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16195 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16196 struct pci_dev *bridge = NULL;
16198 while (pci_id->vendor != 0) {
16199 bridge = pci_get_device(pci_id->vendor, pci_id->device,
16205 if (pci_id->rev != PCI_ANY_ID) {
16206 if (bridge->revision > pci_id->rev)
16209 if (bridge->subordinate &&
16210 (bridge->subordinate->number ==
16211 tp->pdev->bus->number)) {
16212 tg3_flag_set(tp, ICH_WORKAROUND);
16213 pci_dev_put(bridge);
16219 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16220 static struct tg3_dev_id {
16223 } bridge_chipsets[] = {
16224 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16225 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16228 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16229 struct pci_dev *bridge = NULL;
16231 while (pci_id->vendor != 0) {
16232 bridge = pci_get_device(pci_id->vendor,
16239 if (bridge->subordinate &&
16240 (bridge->subordinate->number <=
16241 tp->pdev->bus->number) &&
16242 (bridge->subordinate->busn_res.end >=
16243 tp->pdev->bus->number)) {
16244 tg3_flag_set(tp, 5701_DMA_BUG);
16245 pci_dev_put(bridge);
16251 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16252 * DMA addresses > 40-bit. This bridge may have other additional
16253 * 57xx devices behind it in some 4-port NIC designs for example.
16254 * Any tg3 device found behind the bridge will also need the 40-bit
16257 if (tg3_flag(tp, 5780_CLASS)) {
16258 tg3_flag_set(tp, 40BIT_DMA_BUG);
16259 tp->msi_cap = tp->pdev->msi_cap;
16261 struct pci_dev *bridge = NULL;
16264 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16265 PCI_DEVICE_ID_SERVERWORKS_EPB,
16267 if (bridge && bridge->subordinate &&
16268 (bridge->subordinate->number <=
16269 tp->pdev->bus->number) &&
16270 (bridge->subordinate->busn_res.end >=
16271 tp->pdev->bus->number)) {
16272 tg3_flag_set(tp, 40BIT_DMA_BUG);
16273 pci_dev_put(bridge);
16279 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16280 tg3_asic_rev(tp) == ASIC_REV_5714)
16281 tp->pdev_peer = tg3_find_peer(tp);
16283 /* Determine TSO capabilities */
16284 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16285 ; /* Do nothing. HW bug. */
16286 else if (tg3_flag(tp, 57765_PLUS))
16287 tg3_flag_set(tp, HW_TSO_3);
16288 else if (tg3_flag(tp, 5755_PLUS) ||
16289 tg3_asic_rev(tp) == ASIC_REV_5906)
16290 tg3_flag_set(tp, HW_TSO_2);
16291 else if (tg3_flag(tp, 5750_PLUS)) {
16292 tg3_flag_set(tp, HW_TSO_1);
16293 tg3_flag_set(tp, TSO_BUG);
16294 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16295 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16296 tg3_flag_clear(tp, TSO_BUG);
16297 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16298 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16299 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16300 tg3_flag_set(tp, FW_TSO);
16301 tg3_flag_set(tp, TSO_BUG);
16302 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16303 tp->fw_needed = FIRMWARE_TG3TSO5;
16305 tp->fw_needed = FIRMWARE_TG3TSO;
16308 /* Selectively allow TSO based on operating conditions */
16309 if (tg3_flag(tp, HW_TSO_1) ||
16310 tg3_flag(tp, HW_TSO_2) ||
16311 tg3_flag(tp, HW_TSO_3) ||
16312 tg3_flag(tp, FW_TSO)) {
16313 /* For firmware TSO, assume ASF is disabled.
16314 * We'll disable TSO later if we discover ASF
16315 * is enabled in tg3_get_eeprom_hw_cfg().
16317 tg3_flag_set(tp, TSO_CAPABLE);
16319 tg3_flag_clear(tp, TSO_CAPABLE);
16320 tg3_flag_clear(tp, TSO_BUG);
16321 tp->fw_needed = NULL;
16324 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16325 tp->fw_needed = FIRMWARE_TG3;
16327 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16328 tp->fw_needed = FIRMWARE_TG357766;
16332 if (tg3_flag(tp, 5750_PLUS)) {
16333 tg3_flag_set(tp, SUPPORT_MSI);
16334 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16335 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16336 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16337 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16338 tp->pdev_peer == tp->pdev))
16339 tg3_flag_clear(tp, SUPPORT_MSI);
16341 if (tg3_flag(tp, 5755_PLUS) ||
16342 tg3_asic_rev(tp) == ASIC_REV_5906) {
16343 tg3_flag_set(tp, 1SHOT_MSI);
16346 if (tg3_flag(tp, 57765_PLUS)) {
16347 tg3_flag_set(tp, SUPPORT_MSIX);
16348 tp->irq_max = TG3_IRQ_MAX_VECS;
16354 if (tp->irq_max > 1) {
16355 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16356 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16358 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16359 tg3_asic_rev(tp) == ASIC_REV_5720)
16360 tp->txq_max = tp->irq_max - 1;
16363 if (tg3_flag(tp, 5755_PLUS) ||
16364 tg3_asic_rev(tp) == ASIC_REV_5906)
16365 tg3_flag_set(tp, SHORT_DMA_BUG);
16367 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16368 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16370 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16371 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16372 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16373 tg3_asic_rev(tp) == ASIC_REV_5762)
16374 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16376 if (tg3_flag(tp, 57765_PLUS) &&
16377 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16378 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16380 if (!tg3_flag(tp, 5705_PLUS) ||
16381 tg3_flag(tp, 5780_CLASS) ||
16382 tg3_flag(tp, USE_JUMBO_BDFLAG))
16383 tg3_flag_set(tp, JUMBO_CAPABLE);
16385 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16388 if (pci_is_pcie(tp->pdev)) {
16391 tg3_flag_set(tp, PCI_EXPRESS);
16393 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16394 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16395 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16396 tg3_flag_clear(tp, HW_TSO_2);
16397 tg3_flag_clear(tp, TSO_CAPABLE);
16399 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16400 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16401 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16402 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16403 tg3_flag_set(tp, CLKREQ_BUG);
16404 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16405 tg3_flag_set(tp, L1PLLPD_EN);
16407 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16408 /* BCM5785 devices are effectively PCIe devices, and should
16409 * follow PCIe codepaths, but do not have a PCIe capabilities
16412 tg3_flag_set(tp, PCI_EXPRESS);
16413 } else if (!tg3_flag(tp, 5705_PLUS) ||
16414 tg3_flag(tp, 5780_CLASS)) {
16415 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16416 if (!tp->pcix_cap) {
16417 dev_err(&tp->pdev->dev,
16418 "Cannot find PCI-X capability, aborting\n");
16422 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16423 tg3_flag_set(tp, PCIX_MODE);
16426 /* If we have an AMD 762 or VIA K8T800 chipset, write
16427 * reordering to the mailbox registers done by the host
16428 * controller can cause major troubles. We read back from
16429 * every mailbox register write to force the writes to be
16430 * posted to the chip in order.
16432 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16433 !tg3_flag(tp, PCI_EXPRESS))
16434 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16436 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16437 &tp->pci_cacheline_sz);
16438 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16439 &tp->pci_lat_timer);
16440 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16441 tp->pci_lat_timer < 64) {
16442 tp->pci_lat_timer = 64;
16443 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16444 tp->pci_lat_timer);
16447 /* Important! -- It is critical that the PCI-X hw workaround
16448 * situation is decided before the first MMIO register access.
16450 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16451 /* 5700 BX chips need to have their TX producer index
16452 * mailboxes written twice to workaround a bug.
16454 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16456 /* If we are in PCI-X mode, enable register write workaround.
16458 * The workaround is to use indirect register accesses
16459 * for all chip writes not to mailbox registers.
16461 if (tg3_flag(tp, PCIX_MODE)) {
16464 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16466 /* The chip can have it's power management PCI config
16467 * space registers clobbered due to this bug.
16468 * So explicitly force the chip into D0 here.
16470 pci_read_config_dword(tp->pdev,
16471 tp->pdev->pm_cap + PCI_PM_CTRL,
16473 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16474 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16475 pci_write_config_dword(tp->pdev,
16476 tp->pdev->pm_cap + PCI_PM_CTRL,
16479 /* Also, force SERR#/PERR# in PCI command. */
16480 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16481 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16482 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16486 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16487 tg3_flag_set(tp, PCI_HIGH_SPEED);
16488 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16489 tg3_flag_set(tp, PCI_32BIT);
16491 /* Chip-specific fixup from Broadcom driver */
16492 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16493 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16494 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16495 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16498 /* Default fast path register access methods */
16499 tp->read32 = tg3_read32;
16500 tp->write32 = tg3_write32;
16501 tp->read32_mbox = tg3_read32;
16502 tp->write32_mbox = tg3_write32;
16503 tp->write32_tx_mbox = tg3_write32;
16504 tp->write32_rx_mbox = tg3_write32;
16506 /* Various workaround register access methods */
16507 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16508 tp->write32 = tg3_write_indirect_reg32;
16509 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16510 (tg3_flag(tp, PCI_EXPRESS) &&
16511 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16513 * Back to back register writes can cause problems on these
16514 * chips, the workaround is to read back all reg writes
16515 * except those to mailbox regs.
16517 * See tg3_write_indirect_reg32().
16519 tp->write32 = tg3_write_flush_reg32;
16522 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16523 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16524 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16525 tp->write32_rx_mbox = tg3_write_flush_reg32;
16528 if (tg3_flag(tp, ICH_WORKAROUND)) {
16529 tp->read32 = tg3_read_indirect_reg32;
16530 tp->write32 = tg3_write_indirect_reg32;
16531 tp->read32_mbox = tg3_read_indirect_mbox;
16532 tp->write32_mbox = tg3_write_indirect_mbox;
16533 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16534 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16539 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16540 pci_cmd &= ~PCI_COMMAND_MEMORY;
16541 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16543 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16544 tp->read32_mbox = tg3_read32_mbox_5906;
16545 tp->write32_mbox = tg3_write32_mbox_5906;
16546 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16547 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16550 if (tp->write32 == tg3_write_indirect_reg32 ||
16551 (tg3_flag(tp, PCIX_MODE) &&
16552 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16553 tg3_asic_rev(tp) == ASIC_REV_5701)))
16554 tg3_flag_set(tp, SRAM_USE_CONFIG);
16556 /* The memory arbiter has to be enabled in order for SRAM accesses
16557 * to succeed. Normally on powerup the tg3 chip firmware will make
16558 * sure it is enabled, but other entities such as system netboot
16559 * code might disable it.
16561 val = tr32(MEMARB_MODE);
16562 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16564 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16565 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16566 tg3_flag(tp, 5780_CLASS)) {
16567 if (tg3_flag(tp, PCIX_MODE)) {
16568 pci_read_config_dword(tp->pdev,
16569 tp->pcix_cap + PCI_X_STATUS,
16571 tp->pci_fn = val & 0x7;
16573 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16574 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16575 tg3_asic_rev(tp) == ASIC_REV_5720) {
16576 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16577 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16578 val = tr32(TG3_CPMU_STATUS);
16580 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16581 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16583 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16584 TG3_CPMU_STATUS_FSHFT_5719;
16587 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16588 tp->write32_tx_mbox = tg3_write_flush_reg32;
16589 tp->write32_rx_mbox = tg3_write_flush_reg32;
16592 /* Get eeprom hw config before calling tg3_set_power_state().
16593 * In particular, the TG3_FLAG_IS_NIC flag must be
16594 * determined before calling tg3_set_power_state() so that
16595 * we know whether or not to switch out of Vaux power.
16596 * When the flag is set, it means that GPIO1 is used for eeprom
16597 * write protect and also implies that it is a LOM where GPIOs
16598 * are not used to switch power.
16600 tg3_get_eeprom_hw_cfg(tp);
16602 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16603 tg3_flag_clear(tp, TSO_CAPABLE);
16604 tg3_flag_clear(tp, TSO_BUG);
16605 tp->fw_needed = NULL;
16608 if (tg3_flag(tp, ENABLE_APE)) {
16609 /* Allow reads and writes to the
16610 * APE register and memory space.
16612 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16613 PCISTATE_ALLOW_APE_SHMEM_WR |
16614 PCISTATE_ALLOW_APE_PSPACE_WR;
16615 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16618 tg3_ape_lock_init(tp);
16619 tp->ape_hb_interval =
16620 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16623 /* Set up tp->grc_local_ctrl before calling
16624 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16625 * will bring 5700's external PHY out of reset.
16626 * It is also used as eeprom write protect on LOMs.
16628 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16629 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16630 tg3_flag(tp, EEPROM_WRITE_PROT))
16631 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16632 GRC_LCLCTRL_GPIO_OUTPUT1);
16633 /* Unused GPIO3 must be driven as output on 5752 because there
16634 * are no pull-up resistors on unused GPIO pins.
16636 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16637 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16639 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16640 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16641 tg3_flag(tp, 57765_CLASS))
16642 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16644 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16645 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16646 /* Turn off the debug UART. */
16647 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16648 if (tg3_flag(tp, IS_NIC))
16649 /* Keep VMain power. */
16650 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16651 GRC_LCLCTRL_GPIO_OUTPUT0;
16654 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16655 tp->grc_local_ctrl |=
16656 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16658 /* Switch out of Vaux if it is a NIC */
16659 tg3_pwrsrc_switch_to_vmain(tp);
16661 /* Derive initial jumbo mode from MTU assigned in
16662 * ether_setup() via the alloc_etherdev() call
16664 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16665 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16667 /* Determine WakeOnLan speed to use. */
16668 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16669 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16670 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16671 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16672 tg3_flag_clear(tp, WOL_SPEED_100MB);
16674 tg3_flag_set(tp, WOL_SPEED_100MB);
16677 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16678 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16680 /* A few boards don't want Ethernet@WireSpeed phy feature */
16681 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16682 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16683 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16684 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16685 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16686 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16687 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16689 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16690 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16691 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16692 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16693 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16695 if (tg3_flag(tp, 5705_PLUS) &&
16696 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16697 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16698 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16699 !tg3_flag(tp, 57765_PLUS)) {
16700 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16701 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16702 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16703 tg3_asic_rev(tp) == ASIC_REV_5761) {
16704 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16705 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16706 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16707 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16708 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16710 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16713 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16714 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16715 tp->phy_otp = tg3_read_otp_phycfg(tp);
16716 if (tp->phy_otp == 0)
16717 tp->phy_otp = TG3_OTP_DEFAULT;
16720 if (tg3_flag(tp, CPMU_PRESENT))
16721 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16723 tp->mi_mode = MAC_MI_MODE_BASE;
16725 tp->coalesce_mode = 0;
16726 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16727 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16728 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16730 /* Set these bits to enable statistics workaround. */
16731 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16732 tg3_asic_rev(tp) == ASIC_REV_5762 ||
16733 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16734 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16735 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16736 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16739 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16740 tg3_asic_rev(tp) == ASIC_REV_57780)
16741 tg3_flag_set(tp, USE_PHYLIB);
16743 err = tg3_mdio_init(tp);
16747 /* Initialize data/descriptor byte/word swapping. */
16748 val = tr32(GRC_MODE);
16749 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16750 tg3_asic_rev(tp) == ASIC_REV_5762)
16751 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16752 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16753 GRC_MODE_B2HRX_ENABLE |
16754 GRC_MODE_HTX2B_ENABLE |
16755 GRC_MODE_HOST_STACKUP);
16757 val &= GRC_MODE_HOST_STACKUP;
16759 tw32(GRC_MODE, val | tp->grc_mode);
16761 tg3_switch_clocks(tp);
16763 /* Clear this out for sanity. */
16764 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16766 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16767 tw32(TG3PCI_REG_BASE_ADDR, 0);
16769 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16771 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16772 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16773 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16774 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16775 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16776 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16777 void __iomem *sram_base;
16779 /* Write some dummy words into the SRAM status block
16780 * area, see if it reads back correctly. If the return
16781 * value is bad, force enable the PCIX workaround.
16783 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16785 writel(0x00000000, sram_base);
16786 writel(0x00000000, sram_base + 4);
16787 writel(0xffffffff, sram_base + 4);
16788 if (readl(sram_base) != 0x00000000)
16789 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16794 tg3_nvram_init(tp);
16796 /* If the device has an NVRAM, no need to load patch firmware */
16797 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16798 !tg3_flag(tp, NO_NVRAM))
16799 tp->fw_needed = NULL;
16801 grc_misc_cfg = tr32(GRC_MISC_CFG);
16802 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16804 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16805 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16806 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16807 tg3_flag_set(tp, IS_5788);
16809 if (!tg3_flag(tp, IS_5788) &&
16810 tg3_asic_rev(tp) != ASIC_REV_5700)
16811 tg3_flag_set(tp, TAGGED_STATUS);
16812 if (tg3_flag(tp, TAGGED_STATUS)) {
16813 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16814 HOSTCC_MODE_CLRTICK_TXBD);
16816 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16817 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16818 tp->misc_host_ctrl);
16821 /* Preserve the APE MAC_MODE bits */
16822 if (tg3_flag(tp, ENABLE_APE))
16823 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16827 if (tg3_10_100_only_device(tp, ent))
16828 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16830 err = tg3_phy_probe(tp);
16832 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16833 /* ... but do not return immediately ... */
16838 tg3_read_fw_ver(tp);
16840 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16841 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16843 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16844 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16846 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16849 /* 5700 {AX,BX} chips have a broken status block link
16850 * change bit implementation, so we must use the
16851 * status register in those cases.
16853 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16854 tg3_flag_set(tp, USE_LINKCHG_REG);
16856 tg3_flag_clear(tp, USE_LINKCHG_REG);
16858 /* The led_ctrl is set during tg3_phy_probe, here we might
16859 * have to force the link status polling mechanism based
16860 * upon subsystem IDs.
16862 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16863 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16864 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16865 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16866 tg3_flag_set(tp, USE_LINKCHG_REG);
16869 /* For all SERDES we poll the MAC status register. */
16870 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16871 tg3_flag_set(tp, POLL_SERDES);
16873 tg3_flag_clear(tp, POLL_SERDES);
16875 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16876 tg3_flag_set(tp, POLL_CPMU_LINK);
16878 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16879 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16880 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16881 tg3_flag(tp, PCIX_MODE)) {
16882 tp->rx_offset = NET_SKB_PAD;
16883 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16884 tp->rx_copy_thresh = ~(u16)0;
16888 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16889 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16890 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16892 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16894 /* Increment the rx prod index on the rx std ring by at most
16895 * 8 for these chips to workaround hw errata.
16897 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16898 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16899 tg3_asic_rev(tp) == ASIC_REV_5755)
16900 tp->rx_std_max_post = 8;
16902 if (tg3_flag(tp, ASPM_WORKAROUND))
16903 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16904 PCIE_PWR_MGMT_L1_THRESH_MSK;
16909 static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
16911 u32 hi, lo, mac_offset;
16915 if (!eth_platform_get_mac_address(&tp->pdev->dev, addr))
16918 if (tg3_flag(tp, IS_SSB_CORE)) {
16919 err = ssb_gige_get_macaddr(tp->pdev, addr);
16920 if (!err && is_valid_ether_addr(addr))
16925 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16926 tg3_flag(tp, 5780_CLASS)) {
16927 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16929 if (tg3_nvram_lock(tp))
16930 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16932 tg3_nvram_unlock(tp);
16933 } else if (tg3_flag(tp, 5717_PLUS)) {
16934 if (tp->pci_fn & 1)
16936 if (tp->pci_fn > 1)
16937 mac_offset += 0x18c;
16938 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16941 /* First try to get it from MAC address mailbox. */
16942 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16943 if ((hi >> 16) == 0x484b) {
16944 addr[0] = (hi >> 8) & 0xff;
16945 addr[1] = (hi >> 0) & 0xff;
16947 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16948 addr[2] = (lo >> 24) & 0xff;
16949 addr[3] = (lo >> 16) & 0xff;
16950 addr[4] = (lo >> 8) & 0xff;
16951 addr[5] = (lo >> 0) & 0xff;
16953 /* Some old bootcode may report a 0 MAC address in SRAM */
16954 addr_ok = is_valid_ether_addr(addr);
16957 /* Next, try NVRAM. */
16958 if (!tg3_flag(tp, NO_NVRAM) &&
16959 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16960 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16961 memcpy(&addr[0], ((char *)&hi) + 2, 2);
16962 memcpy(&addr[2], (char *)&lo, sizeof(lo));
16964 /* Finally just fetch it out of the MAC control regs. */
16966 hi = tr32(MAC_ADDR_0_HIGH);
16967 lo = tr32(MAC_ADDR_0_LOW);
16969 addr[5] = lo & 0xff;
16970 addr[4] = (lo >> 8) & 0xff;
16971 addr[3] = (lo >> 16) & 0xff;
16972 addr[2] = (lo >> 24) & 0xff;
16973 addr[1] = hi & 0xff;
16974 addr[0] = (hi >> 8) & 0xff;
16978 if (!is_valid_ether_addr(addr))
16983 #define BOUNDARY_SINGLE_CACHELINE 1
16984 #define BOUNDARY_MULTI_CACHELINE 2
16986 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16988 int cacheline_size;
16992 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16994 cacheline_size = 1024;
16996 cacheline_size = (int) byte * 4;
16998 /* On 5703 and later chips, the boundary bits have no
17001 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17002 tg3_asic_rev(tp) != ASIC_REV_5701 &&
17003 !tg3_flag(tp, PCI_EXPRESS))
17006 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17007 goal = BOUNDARY_MULTI_CACHELINE;
17009 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17010 goal = BOUNDARY_SINGLE_CACHELINE;
17016 if (tg3_flag(tp, 57765_PLUS)) {
17017 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17024 /* PCI controllers on most RISC systems tend to disconnect
17025 * when a device tries to burst across a cache-line boundary.
17026 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17028 * Unfortunately, for PCI-E there are only limited
17029 * write-side controls for this, and thus for reads
17030 * we will still get the disconnects. We'll also waste
17031 * these PCI cycles for both read and write for chips
17032 * other than 5700 and 5701 which do not implement the
17035 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17036 switch (cacheline_size) {
17041 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17042 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17043 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17045 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17046 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17051 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17052 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17056 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17057 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17060 } else if (tg3_flag(tp, PCI_EXPRESS)) {
17061 switch (cacheline_size) {
17065 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17066 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17067 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17073 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17074 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17078 switch (cacheline_size) {
17080 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17081 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17082 DMA_RWCTRL_WRITE_BNDRY_16);
17087 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17088 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17089 DMA_RWCTRL_WRITE_BNDRY_32);
17094 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17095 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17096 DMA_RWCTRL_WRITE_BNDRY_64);
17101 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17102 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17103 DMA_RWCTRL_WRITE_BNDRY_128);
17108 val |= (DMA_RWCTRL_READ_BNDRY_256 |
17109 DMA_RWCTRL_WRITE_BNDRY_256);
17112 val |= (DMA_RWCTRL_READ_BNDRY_512 |
17113 DMA_RWCTRL_WRITE_BNDRY_512);
17117 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17118 DMA_RWCTRL_WRITE_BNDRY_1024);
17127 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17128 int size, bool to_device)
17130 struct tg3_internal_buffer_desc test_desc;
17131 u32 sram_dma_descs;
17134 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17136 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17137 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17138 tw32(RDMAC_STATUS, 0);
17139 tw32(WDMAC_STATUS, 0);
17141 tw32(BUFMGR_MODE, 0);
17142 tw32(FTQ_RESET, 0);
17144 test_desc.addr_hi = ((u64) buf_dma) >> 32;
17145 test_desc.addr_lo = buf_dma & 0xffffffff;
17146 test_desc.nic_mbuf = 0x00002100;
17147 test_desc.len = size;
17150 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17151 * the *second* time the tg3 driver was getting loaded after an
17154 * Broadcom tells me:
17155 * ...the DMA engine is connected to the GRC block and a DMA
17156 * reset may affect the GRC block in some unpredictable way...
17157 * The behavior of resets to individual blocks has not been tested.
17159 * Broadcom noted the GRC reset will also reset all sub-components.
17162 test_desc.cqid_sqid = (13 << 8) | 2;
17164 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17167 test_desc.cqid_sqid = (16 << 8) | 7;
17169 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17172 test_desc.flags = 0x00000005;
17174 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17177 val = *(((u32 *)&test_desc) + i);
17178 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17179 sram_dma_descs + (i * sizeof(u32)));
17180 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17182 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17185 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17187 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17190 for (i = 0; i < 40; i++) {
17194 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17196 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17197 if ((val & 0xffff) == sram_dma_descs) {
17208 #define TEST_BUFFER_SIZE 0x2000
17210 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17211 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17215 static int tg3_test_dma(struct tg3 *tp)
17217 dma_addr_t buf_dma;
17218 u32 *buf, saved_dma_rwctrl;
17221 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17222 &buf_dma, GFP_KERNEL);
17228 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17229 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17231 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17233 if (tg3_flag(tp, 57765_PLUS))
17236 if (tg3_flag(tp, PCI_EXPRESS)) {
17237 /* DMA read watermark not used on PCIE */
17238 tp->dma_rwctrl |= 0x00180000;
17239 } else if (!tg3_flag(tp, PCIX_MODE)) {
17240 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17241 tg3_asic_rev(tp) == ASIC_REV_5750)
17242 tp->dma_rwctrl |= 0x003f0000;
17244 tp->dma_rwctrl |= 0x003f000f;
17246 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17247 tg3_asic_rev(tp) == ASIC_REV_5704) {
17248 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17249 u32 read_water = 0x7;
17251 /* If the 5704 is behind the EPB bridge, we can
17252 * do the less restrictive ONE_DMA workaround for
17253 * better performance.
17255 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17256 tg3_asic_rev(tp) == ASIC_REV_5704)
17257 tp->dma_rwctrl |= 0x8000;
17258 else if (ccval == 0x6 || ccval == 0x7)
17259 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17261 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17263 /* Set bit 23 to enable PCIX hw bug fix */
17265 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17266 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17268 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17269 /* 5780 always in PCIX mode */
17270 tp->dma_rwctrl |= 0x00144000;
17271 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17272 /* 5714 always in PCIX mode */
17273 tp->dma_rwctrl |= 0x00148000;
17275 tp->dma_rwctrl |= 0x001b000f;
17278 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17279 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17281 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17282 tg3_asic_rev(tp) == ASIC_REV_5704)
17283 tp->dma_rwctrl &= 0xfffffff0;
17285 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17286 tg3_asic_rev(tp) == ASIC_REV_5701) {
17287 /* Remove this if it causes problems for some boards. */
17288 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17290 /* On 5700/5701 chips, we need to set this bit.
17291 * Otherwise the chip will issue cacheline transactions
17292 * to streamable DMA memory with not all the byte
17293 * enables turned on. This is an error on several
17294 * RISC PCI controllers, in particular sparc64.
17296 * On 5703/5704 chips, this bit has been reassigned
17297 * a different meaning. In particular, it is used
17298 * on those chips to enable a PCI-X workaround.
17300 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17303 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17306 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17307 tg3_asic_rev(tp) != ASIC_REV_5701)
17310 /* It is best to perform DMA test with maximum write burst size
17311 * to expose the 5700/5701 write DMA bug.
17313 saved_dma_rwctrl = tp->dma_rwctrl;
17314 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17315 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17320 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17323 /* Send the buffer to the chip. */
17324 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17326 dev_err(&tp->pdev->dev,
17327 "%s: Buffer write failed. err = %d\n",
17332 /* Now read it back. */
17333 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17335 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17336 "err = %d\n", __func__, ret);
17341 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17345 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17346 DMA_RWCTRL_WRITE_BNDRY_16) {
17347 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17348 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17349 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17352 dev_err(&tp->pdev->dev,
17353 "%s: Buffer corrupted on read back! "
17354 "(%d != %d)\n", __func__, p[i], i);
17360 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17366 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17367 DMA_RWCTRL_WRITE_BNDRY_16) {
17368 /* DMA test passed without adjusting DMA boundary,
17369 * now look for chipsets that are known to expose the
17370 * DMA bug without failing the test.
17372 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17373 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17374 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17376 /* Safe to use the calculated DMA boundary. */
17377 tp->dma_rwctrl = saved_dma_rwctrl;
17380 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17384 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17389 static void tg3_init_bufmgr_config(struct tg3 *tp)
17391 if (tg3_flag(tp, 57765_PLUS)) {
17392 tp->bufmgr_config.mbuf_read_dma_low_water =
17393 DEFAULT_MB_RDMA_LOW_WATER_5705;
17394 tp->bufmgr_config.mbuf_mac_rx_low_water =
17395 DEFAULT_MB_MACRX_LOW_WATER_57765;
17396 tp->bufmgr_config.mbuf_high_water =
17397 DEFAULT_MB_HIGH_WATER_57765;
17399 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17400 DEFAULT_MB_RDMA_LOW_WATER_5705;
17401 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17402 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17403 tp->bufmgr_config.mbuf_high_water_jumbo =
17404 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17405 } else if (tg3_flag(tp, 5705_PLUS)) {
17406 tp->bufmgr_config.mbuf_read_dma_low_water =
17407 DEFAULT_MB_RDMA_LOW_WATER_5705;
17408 tp->bufmgr_config.mbuf_mac_rx_low_water =
17409 DEFAULT_MB_MACRX_LOW_WATER_5705;
17410 tp->bufmgr_config.mbuf_high_water =
17411 DEFAULT_MB_HIGH_WATER_5705;
17412 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17413 tp->bufmgr_config.mbuf_mac_rx_low_water =
17414 DEFAULT_MB_MACRX_LOW_WATER_5906;
17415 tp->bufmgr_config.mbuf_high_water =
17416 DEFAULT_MB_HIGH_WATER_5906;
17419 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17420 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17421 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17422 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17423 tp->bufmgr_config.mbuf_high_water_jumbo =
17424 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17426 tp->bufmgr_config.mbuf_read_dma_low_water =
17427 DEFAULT_MB_RDMA_LOW_WATER;
17428 tp->bufmgr_config.mbuf_mac_rx_low_water =
17429 DEFAULT_MB_MACRX_LOW_WATER;
17430 tp->bufmgr_config.mbuf_high_water =
17431 DEFAULT_MB_HIGH_WATER;
17433 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17434 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17435 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17436 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17437 tp->bufmgr_config.mbuf_high_water_jumbo =
17438 DEFAULT_MB_HIGH_WATER_JUMBO;
17441 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17442 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17445 static char *tg3_phy_string(struct tg3 *tp)
17447 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17448 case TG3_PHY_ID_BCM5400: return "5400";
17449 case TG3_PHY_ID_BCM5401: return "5401";
17450 case TG3_PHY_ID_BCM5411: return "5411";
17451 case TG3_PHY_ID_BCM5701: return "5701";
17452 case TG3_PHY_ID_BCM5703: return "5703";
17453 case TG3_PHY_ID_BCM5704: return "5704";
17454 case TG3_PHY_ID_BCM5705: return "5705";
17455 case TG3_PHY_ID_BCM5750: return "5750";
17456 case TG3_PHY_ID_BCM5752: return "5752";
17457 case TG3_PHY_ID_BCM5714: return "5714";
17458 case TG3_PHY_ID_BCM5780: return "5780";
17459 case TG3_PHY_ID_BCM5755: return "5755";
17460 case TG3_PHY_ID_BCM5787: return "5787";
17461 case TG3_PHY_ID_BCM5784: return "5784";
17462 case TG3_PHY_ID_BCM5756: return "5722/5756";
17463 case TG3_PHY_ID_BCM5906: return "5906";
17464 case TG3_PHY_ID_BCM5761: return "5761";
17465 case TG3_PHY_ID_BCM5718C: return "5718C";
17466 case TG3_PHY_ID_BCM5718S: return "5718S";
17467 case TG3_PHY_ID_BCM57765: return "57765";
17468 case TG3_PHY_ID_BCM5719C: return "5719C";
17469 case TG3_PHY_ID_BCM5720C: return "5720C";
17470 case TG3_PHY_ID_BCM5762: return "5762C";
17471 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17472 case 0: return "serdes";
17473 default: return "unknown";
17477 static char *tg3_bus_string(struct tg3 *tp, char *str)
17479 if (tg3_flag(tp, PCI_EXPRESS)) {
17480 strcpy(str, "PCI Express");
17482 } else if (tg3_flag(tp, PCIX_MODE)) {
17483 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17485 strcpy(str, "PCIX:");
17487 if ((clock_ctrl == 7) ||
17488 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17489 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17490 strcat(str, "133MHz");
17491 else if (clock_ctrl == 0)
17492 strcat(str, "33MHz");
17493 else if (clock_ctrl == 2)
17494 strcat(str, "50MHz");
17495 else if (clock_ctrl == 4)
17496 strcat(str, "66MHz");
17497 else if (clock_ctrl == 6)
17498 strcat(str, "100MHz");
17500 strcpy(str, "PCI:");
17501 if (tg3_flag(tp, PCI_HIGH_SPEED))
17502 strcat(str, "66MHz");
17504 strcat(str, "33MHz");
17506 if (tg3_flag(tp, PCI_32BIT))
17507 strcat(str, ":32-bit");
17509 strcat(str, ":64-bit");
17513 static void tg3_init_coal(struct tg3 *tp)
17515 struct ethtool_coalesce *ec = &tp->coal;
17517 memset(ec, 0, sizeof(*ec));
17518 ec->cmd = ETHTOOL_GCOALESCE;
17519 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17520 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17521 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17522 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17523 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17524 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17525 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17526 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17527 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17529 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17530 HOSTCC_MODE_CLRTICK_TXBD)) {
17531 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17532 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17533 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17534 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17537 if (tg3_flag(tp, 5705_PLUS)) {
17538 ec->rx_coalesce_usecs_irq = 0;
17539 ec->tx_coalesce_usecs_irq = 0;
17540 ec->stats_block_coalesce_usecs = 0;
17544 static int tg3_init_one(struct pci_dev *pdev,
17545 const struct pci_device_id *ent)
17547 struct net_device *dev;
17550 u32 sndmbx, rcvmbx, intmbx;
17552 u64 dma_mask, persist_dma_mask;
17553 netdev_features_t features = 0;
17554 u8 addr[ETH_ALEN] __aligned(2);
17556 err = pci_enable_device(pdev);
17558 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17562 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17564 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17565 goto err_out_disable_pdev;
17568 pci_set_master(pdev);
17570 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17573 goto err_out_free_res;
17576 SET_NETDEV_DEV(dev, &pdev->dev);
17578 tp = netdev_priv(dev);
17581 tp->rx_mode = TG3_DEF_RX_MODE;
17582 tp->tx_mode = TG3_DEF_TX_MODE;
17584 tp->pcierr_recovery = false;
17587 tp->msg_enable = tg3_debug;
17589 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17591 if (pdev_is_ssb_gige_core(pdev)) {
17592 tg3_flag_set(tp, IS_SSB_CORE);
17593 if (ssb_gige_must_flush_posted_writes(pdev))
17594 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17595 if (ssb_gige_one_dma_at_once(pdev))
17596 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17597 if (ssb_gige_have_roboswitch(pdev)) {
17598 tg3_flag_set(tp, USE_PHYLIB);
17599 tg3_flag_set(tp, ROBOSWITCH);
17601 if (ssb_gige_is_rgmii(pdev))
17602 tg3_flag_set(tp, RGMII_MODE);
17605 /* The word/byte swap controls here control register access byte
17606 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17609 tp->misc_host_ctrl =
17610 MISC_HOST_CTRL_MASK_PCI_INT |
17611 MISC_HOST_CTRL_WORD_SWAP |
17612 MISC_HOST_CTRL_INDIR_ACCESS |
17613 MISC_HOST_CTRL_PCISTATE_RW;
17615 /* The NONFRM (non-frame) byte/word swap controls take effect
17616 * on descriptor entries, anything which isn't packet data.
17618 * The StrongARM chips on the board (one for tx, one for rx)
17619 * are running in big-endian mode.
17621 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17622 GRC_MODE_WSWAP_NONFRM_DATA);
17623 #ifdef __BIG_ENDIAN
17624 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17626 spin_lock_init(&tp->lock);
17627 spin_lock_init(&tp->indirect_lock);
17628 INIT_WORK(&tp->reset_task, tg3_reset_task);
17630 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17632 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17634 goto err_out_free_dev;
17637 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17638 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17639 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17640 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17641 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17642 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17643 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17644 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17645 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17646 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17647 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17648 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17649 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17650 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17651 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17652 tg3_flag_set(tp, ENABLE_APE);
17653 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17654 if (!tp->aperegs) {
17655 dev_err(&pdev->dev,
17656 "Cannot map APE registers, aborting\n");
17658 goto err_out_iounmap;
17662 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17663 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17665 dev->ethtool_ops = &tg3_ethtool_ops;
17666 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17667 dev->netdev_ops = &tg3_netdev_ops;
17668 dev->irq = pdev->irq;
17670 err = tg3_get_invariants(tp, ent);
17672 dev_err(&pdev->dev,
17673 "Problem fetching invariants of chip, aborting\n");
17674 goto err_out_apeunmap;
17677 /* The EPB bridge inside 5714, 5715, and 5780 and any
17678 * device behind the EPB cannot support DMA addresses > 40-bit.
17679 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17680 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17681 * do DMA address check in tg3_start_xmit().
17683 if (tg3_flag(tp, IS_5788))
17684 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17685 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17686 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17687 #ifdef CONFIG_HIGHMEM
17688 dma_mask = DMA_BIT_MASK(64);
17691 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17693 /* Configure DMA attributes. */
17694 if (dma_mask > DMA_BIT_MASK(32)) {
17695 err = dma_set_mask(&pdev->dev, dma_mask);
17697 features |= NETIF_F_HIGHDMA;
17698 err = dma_set_coherent_mask(&pdev->dev,
17701 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17702 "DMA for consistent allocations\n");
17703 goto err_out_apeunmap;
17707 if (err || dma_mask == DMA_BIT_MASK(32)) {
17708 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
17710 dev_err(&pdev->dev,
17711 "No usable DMA configuration, aborting\n");
17712 goto err_out_apeunmap;
17716 tg3_init_bufmgr_config(tp);
17718 /* 5700 B0 chips do not support checksumming correctly due
17719 * to hardware bugs.
17721 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17722 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17724 if (tg3_flag(tp, 5755_PLUS))
17725 features |= NETIF_F_IPV6_CSUM;
17728 /* TSO is on by default on chips that support hardware TSO.
17729 * Firmware TSO on older chips gives lower performance, so it
17730 * is off by default, but can be enabled using ethtool.
17732 if ((tg3_flag(tp, HW_TSO_1) ||
17733 tg3_flag(tp, HW_TSO_2) ||
17734 tg3_flag(tp, HW_TSO_3)) &&
17735 (features & NETIF_F_IP_CSUM))
17736 features |= NETIF_F_TSO;
17737 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17738 if (features & NETIF_F_IPV6_CSUM)
17739 features |= NETIF_F_TSO6;
17740 if (tg3_flag(tp, HW_TSO_3) ||
17741 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17742 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17743 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17744 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17745 tg3_asic_rev(tp) == ASIC_REV_57780)
17746 features |= NETIF_F_TSO_ECN;
17749 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17750 NETIF_F_HW_VLAN_CTAG_RX;
17751 dev->vlan_features |= features;
17754 * Add loopback capability only for a subset of devices that support
17755 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17756 * loopback for the remaining devices.
17758 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17759 !tg3_flag(tp, CPMU_PRESENT))
17760 /* Add the loopback capability */
17761 features |= NETIF_F_LOOPBACK;
17763 dev->hw_features |= features;
17764 dev->priv_flags |= IFF_UNICAST_FLT;
17766 /* MTU range: 60 - 9000 or 1500, depending on hardware */
17767 dev->min_mtu = TG3_MIN_MTU;
17768 dev->max_mtu = TG3_MAX_MTU(tp);
17770 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17771 !tg3_flag(tp, TSO_CAPABLE) &&
17772 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17773 tg3_flag_set(tp, MAX_RXPEND_64);
17774 tp->rx_pending = 63;
17777 err = tg3_get_device_address(tp, addr);
17779 dev_err(&pdev->dev,
17780 "Could not obtain valid ethernet address, aborting\n");
17781 goto err_out_apeunmap;
17783 eth_hw_addr_set(dev, addr);
17785 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17786 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17787 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17788 for (i = 0; i < tp->irq_max; i++) {
17789 struct tg3_napi *tnapi = &tp->napi[i];
17792 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17794 tnapi->int_mbox = intmbx;
17800 tnapi->consmbox = rcvmbx;
17801 tnapi->prodmbox = sndmbx;
17804 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17806 tnapi->coal_now = HOSTCC_MODE_NOW;
17808 if (!tg3_flag(tp, SUPPORT_MSIX))
17812 * If we support MSIX, we'll be using RSS. If we're using
17813 * RSS, the first vector only handles link interrupts and the
17814 * remaining vectors handle rx and tx interrupts. Reuse the
17815 * mailbox values for the next iteration. The values we setup
17816 * above are still useful for the single vectored mode.
17830 * Reset chip in case UNDI or EFI driver did not shutdown
17831 * DMA self test will enable WDMAC and we'll see (spurious)
17832 * pending DMA on the PCI bus at that point.
17834 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17835 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17836 tg3_full_lock(tp, 0);
17837 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17838 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17839 tg3_full_unlock(tp);
17842 err = tg3_test_dma(tp);
17844 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17845 goto err_out_apeunmap;
17850 pci_set_drvdata(pdev, dev);
17852 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17853 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17854 tg3_asic_rev(tp) == ASIC_REV_5762)
17855 tg3_flag_set(tp, PTP_CAPABLE);
17857 tg3_timer_init(tp);
17859 tg3_carrier_off(tp);
17861 err = register_netdev(dev);
17863 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17864 goto err_out_apeunmap;
17867 if (tg3_flag(tp, PTP_CAPABLE)) {
17869 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17871 if (IS_ERR(tp->ptp_clock))
17872 tp->ptp_clock = NULL;
17875 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17876 tp->board_part_number,
17877 tg3_chip_rev_id(tp),
17878 tg3_bus_string(tp, str),
17881 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17884 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17885 ethtype = "10/100Base-TX";
17886 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17887 ethtype = "1000Base-SX";
17889 ethtype = "10/100/1000Base-T";
17891 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17892 "(WireSpeed[%d], EEE[%d])\n",
17893 tg3_phy_string(tp), ethtype,
17894 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17895 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17898 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17899 (dev->features & NETIF_F_RXCSUM) != 0,
17900 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17901 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17902 tg3_flag(tp, ENABLE_ASF) != 0,
17903 tg3_flag(tp, TSO_CAPABLE) != 0);
17904 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17906 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17907 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17909 pci_save_state(pdev);
17915 iounmap(tp->aperegs);
17916 tp->aperegs = NULL;
17929 pci_release_regions(pdev);
17931 err_out_disable_pdev:
17932 if (pci_is_enabled(pdev))
17933 pci_disable_device(pdev);
17937 static void tg3_remove_one(struct pci_dev *pdev)
17939 struct net_device *dev = pci_get_drvdata(pdev);
17942 struct tg3 *tp = netdev_priv(dev);
17946 release_firmware(tp->fw);
17948 tg3_reset_task_cancel(tp);
17950 if (tg3_flag(tp, USE_PHYLIB)) {
17955 unregister_netdev(dev);
17957 iounmap(tp->aperegs);
17958 tp->aperegs = NULL;
17965 pci_release_regions(pdev);
17966 pci_disable_device(pdev);
17970 #ifdef CONFIG_PM_SLEEP
17971 static int tg3_suspend(struct device *device)
17973 struct net_device *dev = dev_get_drvdata(device);
17974 struct tg3 *tp = netdev_priv(dev);
17979 if (!netif_running(dev))
17982 tg3_reset_task_cancel(tp);
17984 tg3_netif_stop(tp);
17986 tg3_timer_stop(tp);
17988 tg3_full_lock(tp, 1);
17989 tg3_disable_ints(tp);
17990 tg3_full_unlock(tp);
17992 netif_device_detach(dev);
17994 tg3_full_lock(tp, 0);
17995 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17996 tg3_flag_clear(tp, INIT_COMPLETE);
17997 tg3_full_unlock(tp);
17999 err = tg3_power_down_prepare(tp);
18003 tg3_full_lock(tp, 0);
18005 tg3_flag_set(tp, INIT_COMPLETE);
18006 err2 = tg3_restart_hw(tp, true);
18010 tg3_timer_start(tp);
18012 netif_device_attach(dev);
18013 tg3_netif_start(tp);
18016 tg3_full_unlock(tp);
18027 static int tg3_resume(struct device *device)
18029 struct net_device *dev = dev_get_drvdata(device);
18030 struct tg3 *tp = netdev_priv(dev);
18035 if (!netif_running(dev))
18038 netif_device_attach(dev);
18040 tg3_full_lock(tp, 0);
18042 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18044 tg3_flag_set(tp, INIT_COMPLETE);
18045 err = tg3_restart_hw(tp,
18046 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18050 tg3_timer_start(tp);
18052 tg3_netif_start(tp);
18055 tg3_full_unlock(tp);
18064 #endif /* CONFIG_PM_SLEEP */
18066 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18068 static void tg3_shutdown(struct pci_dev *pdev)
18070 struct net_device *dev = pci_get_drvdata(pdev);
18071 struct tg3 *tp = netdev_priv(dev);
18073 tg3_reset_task_cancel(tp);
18077 netif_device_detach(dev);
18079 if (netif_running(dev))
18082 tg3_power_down(tp);
18086 pci_disable_device(pdev);
18090 * tg3_io_error_detected - called when PCI error is detected
18091 * @pdev: Pointer to PCI device
18092 * @state: The current pci connection state
18094 * This function is called after a PCI bus error affecting
18095 * this device has been detected.
18097 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18098 pci_channel_state_t state)
18100 struct net_device *netdev = pci_get_drvdata(pdev);
18101 struct tg3 *tp = netdev_priv(netdev);
18102 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18104 netdev_info(netdev, "PCI I/O error detected\n");
18106 /* Want to make sure that the reset task doesn't run */
18107 tg3_reset_task_cancel(tp);
18111 /* Could be second call or maybe we don't have netdev yet */
18112 if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18115 /* We needn't recover from permanent error */
18116 if (state == pci_channel_io_frozen)
18117 tp->pcierr_recovery = true;
18121 tg3_netif_stop(tp);
18123 tg3_timer_stop(tp);
18125 netif_device_detach(netdev);
18127 /* Clean up software state, even if MMIO is blocked */
18128 tg3_full_lock(tp, 0);
18129 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18130 tg3_full_unlock(tp);
18133 if (state == pci_channel_io_perm_failure) {
18135 tg3_napi_enable(tp);
18138 err = PCI_ERS_RESULT_DISCONNECT;
18140 pci_disable_device(pdev);
18149 * tg3_io_slot_reset - called after the pci bus has been reset.
18150 * @pdev: Pointer to PCI device
18152 * Restart the card from scratch, as if from a cold-boot.
18153 * At this point, the card has exprienced a hard reset,
18154 * followed by fixups by BIOS, and has its config space
18155 * set up identically to what it was at cold boot.
18157 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18159 struct net_device *netdev = pci_get_drvdata(pdev);
18160 struct tg3 *tp = netdev_priv(netdev);
18161 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18166 if (pci_enable_device(pdev)) {
18167 dev_err(&pdev->dev,
18168 "Cannot re-enable PCI device after reset.\n");
18172 pci_set_master(pdev);
18173 pci_restore_state(pdev);
18174 pci_save_state(pdev);
18176 if (!netdev || !netif_running(netdev)) {
18177 rc = PCI_ERS_RESULT_RECOVERED;
18181 err = tg3_power_up(tp);
18185 rc = PCI_ERS_RESULT_RECOVERED;
18188 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18189 tg3_napi_enable(tp);
18198 * tg3_io_resume - called when traffic can start flowing again.
18199 * @pdev: Pointer to PCI device
18201 * This callback is called when the error recovery driver tells
18202 * us that its OK to resume normal operation.
18204 static void tg3_io_resume(struct pci_dev *pdev)
18206 struct net_device *netdev = pci_get_drvdata(pdev);
18207 struct tg3 *tp = netdev_priv(netdev);
18212 if (!netdev || !netif_running(netdev))
18215 tg3_full_lock(tp, 0);
18216 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18217 tg3_flag_set(tp, INIT_COMPLETE);
18218 err = tg3_restart_hw(tp, true);
18220 tg3_full_unlock(tp);
18221 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18225 netif_device_attach(netdev);
18227 tg3_timer_start(tp);
18229 tg3_netif_start(tp);
18231 tg3_full_unlock(tp);
18236 tp->pcierr_recovery = false;
18240 static const struct pci_error_handlers tg3_err_handler = {
18241 .error_detected = tg3_io_error_detected,
18242 .slot_reset = tg3_io_slot_reset,
18243 .resume = tg3_io_resume
18246 static struct pci_driver tg3_driver = {
18247 .name = DRV_MODULE_NAME,
18248 .id_table = tg3_pci_tbl,
18249 .probe = tg3_init_one,
18250 .remove = tg3_remove_one,
18251 .err_handler = &tg3_err_handler,
18252 .driver.pm = &tg3_pm_ops,
18253 .shutdown = tg3_shutdown,
18256 module_pci_driver(tg3_driver);