2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2016 Broadcom Corporation.
8 * Copyright (C) 2016-2017 Broadcom Limited.
9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10 * refers to Broadcom Inc. and/or its subsidiaries.
13 * Derived from proprietary unpublished source code,
14 * Copyright (C) 2000-2016 Broadcom Corporation.
15 * Copyright (C) 2016-2017 Broadcom Ltd.
16 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17 * refers to Broadcom Inc. and/or its subsidiaries.
19 * Permission is hereby granted for the distribution of this firmware
20 * data in hexadecimal or equivalent format, provided this copyright
21 * notice is accompanying it.
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
47 #include <linux/if_vlan.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
59 #include <net/checksum.h>
64 #include <asm/byteorder.h>
65 #include <linux/uaccess.h>
67 #include <uapi/linux/net_tstamp.h>
68 #include <linux/ptp_clock_kernel.h>
75 /* Functions & macros to verify TG3_FLAGS types */
77 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
79 return test_bit(flag, bits);
82 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
87 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
89 clear_bit(flag, bits);
92 #define tg3_flag(tp, flag) \
93 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
94 #define tg3_flag_set(tp, flag) \
95 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
96 #define tg3_flag_clear(tp, flag) \
97 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
99 #define DRV_MODULE_NAME "tg3"
100 /* DO NOT UPDATE TG3_*_NUM defines */
101 #define TG3_MAJ_NUM 3
102 #define TG3_MIN_NUM 137
104 #define RESET_KIND_SHUTDOWN 0
105 #define RESET_KIND_INIT 1
106 #define RESET_KIND_SUSPEND 2
108 #define TG3_DEF_RX_MODE 0
109 #define TG3_DEF_TX_MODE 0
110 #define TG3_DEF_MSG_ENABLE \
120 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
122 /* length of time before we decide the hardware is borked,
123 * and dev->tx_timeout() should be called to fix the problem
126 #define TG3_TX_TIMEOUT (5 * HZ)
128 /* hardware minimum and maximum for a single frame's data payload */
129 #define TG3_MIN_MTU ETH_ZLEN
130 #define TG3_MAX_MTU(tp) \
131 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
133 /* These numbers seem to be hard coded in the NIC firmware somehow.
134 * You can't change the ring sizes, but you can change where you place
135 * them in the NIC onboard memory.
137 #define TG3_RX_STD_RING_SIZE(tp) \
138 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
139 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
140 #define TG3_DEF_RX_RING_PENDING 200
141 #define TG3_RX_JMB_RING_SIZE(tp) \
142 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
143 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
144 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
146 /* Do not place this n-ring entries value into the tp struct itself,
147 * we really want to expose these constants to GCC so that modulo et
148 * al. operations are done with shifts and masks instead of with
149 * hw multiply/modulo instructions. Another solution would be to
150 * replace things like '% foo' with '& (foo - 1)'.
153 #define TG3_TX_RING_SIZE 512
154 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
156 #define TG3_RX_STD_RING_BYTES(tp) \
157 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
158 #define TG3_RX_JMB_RING_BYTES(tp) \
159 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
160 #define TG3_RX_RCB_RING_BYTES(tp) \
161 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
162 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
164 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
166 #define TG3_DMA_BYTE_ENAB 64
168 #define TG3_RX_STD_DMA_SZ 1536
169 #define TG3_RX_JMB_DMA_SZ 9046
171 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
173 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
174 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
176 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
177 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
179 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
180 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
182 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
183 * that are at least dword aligned when used in PCIX mode. The driver
184 * works around this bug by double copying the packet. This workaround
185 * is built into the normal double copy length check for efficiency.
187 * However, the double copy is only necessary on those architectures
188 * where unaligned memory accesses are inefficient. For those architectures
189 * where unaligned memory accesses incur little penalty, we can reintegrate
190 * the 5701 in the normal rx path. Doing so saves a device structure
191 * dereference by hardcoding the double copy threshold in place.
193 #define TG3_RX_COPY_THRESHOLD 256
194 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
195 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
197 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
200 #if (NET_IP_ALIGN != 0)
201 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
203 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
206 /* minimum number of free TX descriptors required to wake up TX process */
207 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
208 #define TG3_TX_BD_DMA_MAX_2K 2048
209 #define TG3_TX_BD_DMA_MAX_4K 4096
211 #define TG3_RAW_IP_ALIGN 2
213 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
214 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
216 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
217 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
219 #define FIRMWARE_TG3 "tigon/tg3.bin"
220 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
221 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
222 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
224 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
225 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
226 MODULE_LICENSE("GPL");
227 MODULE_FIRMWARE(FIRMWARE_TG3);
228 MODULE_FIRMWARE(FIRMWARE_TG357766);
229 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
230 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
232 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
233 module_param(tg3_debug, int, 0);
234 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
236 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
237 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
239 static const struct pci_device_id tg3_pci_tbl[] = {
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
259 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
260 TG3_DRV_DATA_FLAG_5705_10_100},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
262 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
263 TG3_DRV_DATA_FLAG_5705_10_100},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
266 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
267 TG3_DRV_DATA_FLAG_5705_10_100},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
274 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
280 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
288 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
289 PCI_VENDOR_ID_LENOVO,
290 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
291 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
294 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
313 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
314 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
315 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
316 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
317 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
318 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
322 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
334 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
347 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
348 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
349 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
350 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
351 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
352 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
353 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
354 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
358 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
360 static const struct {
361 const char string[ETH_GSTRING_LEN];
362 } ethtool_stats_keys[] = {
365 { "rx_ucast_packets" },
366 { "rx_mcast_packets" },
367 { "rx_bcast_packets" },
369 { "rx_align_errors" },
370 { "rx_xon_pause_rcvd" },
371 { "rx_xoff_pause_rcvd" },
372 { "rx_mac_ctrl_rcvd" },
373 { "rx_xoff_entered" },
374 { "rx_frame_too_long_errors" },
376 { "rx_undersize_packets" },
377 { "rx_in_length_errors" },
378 { "rx_out_length_errors" },
379 { "rx_64_or_less_octet_packets" },
380 { "rx_65_to_127_octet_packets" },
381 { "rx_128_to_255_octet_packets" },
382 { "rx_256_to_511_octet_packets" },
383 { "rx_512_to_1023_octet_packets" },
384 { "rx_1024_to_1522_octet_packets" },
385 { "rx_1523_to_2047_octet_packets" },
386 { "rx_2048_to_4095_octet_packets" },
387 { "rx_4096_to_8191_octet_packets" },
388 { "rx_8192_to_9022_octet_packets" },
395 { "tx_flow_control" },
397 { "tx_single_collisions" },
398 { "tx_mult_collisions" },
400 { "tx_excessive_collisions" },
401 { "tx_late_collisions" },
402 { "tx_collide_2times" },
403 { "tx_collide_3times" },
404 { "tx_collide_4times" },
405 { "tx_collide_5times" },
406 { "tx_collide_6times" },
407 { "tx_collide_7times" },
408 { "tx_collide_8times" },
409 { "tx_collide_9times" },
410 { "tx_collide_10times" },
411 { "tx_collide_11times" },
412 { "tx_collide_12times" },
413 { "tx_collide_13times" },
414 { "tx_collide_14times" },
415 { "tx_collide_15times" },
416 { "tx_ucast_packets" },
417 { "tx_mcast_packets" },
418 { "tx_bcast_packets" },
419 { "tx_carrier_sense_errors" },
423 { "dma_writeq_full" },
424 { "dma_write_prioq_full" },
428 { "rx_threshold_hit" },
430 { "dma_readq_full" },
431 { "dma_read_prioq_full" },
432 { "tx_comp_queue_full" },
434 { "ring_set_send_prod_index" },
435 { "ring_status_update" },
437 { "nic_avoided_irqs" },
438 { "nic_tx_threshold_hit" },
440 { "mbuf_lwm_thresh_hit" },
443 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
444 #define TG3_NVRAM_TEST 0
445 #define TG3_LINK_TEST 1
446 #define TG3_REGISTER_TEST 2
447 #define TG3_MEMORY_TEST 3
448 #define TG3_MAC_LOOPB_TEST 4
449 #define TG3_PHY_LOOPB_TEST 5
450 #define TG3_EXT_LOOPB_TEST 6
451 #define TG3_INTERRUPT_TEST 7
454 static const struct {
455 const char string[ETH_GSTRING_LEN];
456 } ethtool_test_keys[] = {
457 [TG3_NVRAM_TEST] = { "nvram test (online) " },
458 [TG3_LINK_TEST] = { "link test (online) " },
459 [TG3_REGISTER_TEST] = { "register test (offline)" },
460 [TG3_MEMORY_TEST] = { "memory test (offline)" },
461 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
462 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
463 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
464 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
467 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
470 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
472 writel(val, tp->regs + off);
475 static u32 tg3_read32(struct tg3 *tp, u32 off)
477 return readl(tp->regs + off);
480 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
482 writel(val, tp->aperegs + off);
485 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
487 return readl(tp->aperegs + off);
490 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
494 spin_lock_irqsave(&tp->indirect_lock, flags);
495 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
496 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
497 spin_unlock_irqrestore(&tp->indirect_lock, flags);
500 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
502 writel(val, tp->regs + off);
503 readl(tp->regs + off);
506 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
511 spin_lock_irqsave(&tp->indirect_lock, flags);
512 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
513 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
514 spin_unlock_irqrestore(&tp->indirect_lock, flags);
518 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
522 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
523 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
524 TG3_64BIT_REG_LOW, val);
527 if (off == TG3_RX_STD_PROD_IDX_REG) {
528 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
529 TG3_64BIT_REG_LOW, val);
533 spin_lock_irqsave(&tp->indirect_lock, flags);
534 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
535 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
536 spin_unlock_irqrestore(&tp->indirect_lock, flags);
538 /* In indirect mode when disabling interrupts, we also need
539 * to clear the interrupt bit in the GRC local ctrl register.
541 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
543 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
544 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
548 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
553 spin_lock_irqsave(&tp->indirect_lock, flags);
554 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
555 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
556 spin_unlock_irqrestore(&tp->indirect_lock, flags);
560 /* usec_wait specifies the wait time in usec when writing to certain registers
561 * where it is unsafe to read back the register without some delay.
562 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
563 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
565 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
567 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
568 /* Non-posted methods */
569 tp->write32(tp, off, val);
572 tg3_write32(tp, off, val);
577 /* Wait again after the read for the posted method to guarantee that
578 * the wait time is met.
584 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
586 tp->write32_mbox(tp, off, val);
587 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
588 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
589 !tg3_flag(tp, ICH_WORKAROUND)))
590 tp->read32_mbox(tp, off);
593 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
595 void __iomem *mbox = tp->regs + off;
597 if (tg3_flag(tp, TXD_MBOX_HWBUG))
599 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
600 tg3_flag(tp, FLUSH_POSTED_WRITES))
604 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
606 return readl(tp->regs + off + GRCMBOX_BASE);
609 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
611 writel(val, tp->regs + off + GRCMBOX_BASE);
614 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
615 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
616 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
617 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
618 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
620 #define tw32(reg, val) tp->write32(tp, reg, val)
621 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
622 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
623 #define tr32(reg) tp->read32(tp, reg)
625 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
629 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
630 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
633 spin_lock_irqsave(&tp->indirect_lock, flags);
634 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
635 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
636 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
638 /* Always leave this as zero. */
639 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
641 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
642 tw32_f(TG3PCI_MEM_WIN_DATA, val);
644 /* Always leave this as zero. */
645 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
647 spin_unlock_irqrestore(&tp->indirect_lock, flags);
650 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
654 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
655 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
660 spin_lock_irqsave(&tp->indirect_lock, flags);
661 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
662 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
663 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
665 /* Always leave this as zero. */
666 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
668 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
669 *val = tr32(TG3PCI_MEM_WIN_DATA);
671 /* Always leave this as zero. */
672 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
674 spin_unlock_irqrestore(&tp->indirect_lock, flags);
677 static void tg3_ape_lock_init(struct tg3 *tp)
682 if (tg3_asic_rev(tp) == ASIC_REV_5761)
683 regbase = TG3_APE_LOCK_GRANT;
685 regbase = TG3_APE_PER_LOCK_GRANT;
687 /* Make sure the driver hasn't any stale locks. */
688 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
690 case TG3_APE_LOCK_PHY0:
691 case TG3_APE_LOCK_PHY1:
692 case TG3_APE_LOCK_PHY2:
693 case TG3_APE_LOCK_PHY3:
694 bit = APE_LOCK_GRANT_DRIVER;
698 bit = APE_LOCK_GRANT_DRIVER;
700 bit = 1 << tp->pci_fn;
702 tg3_ape_write32(tp, regbase + 4 * i, bit);
707 static int tg3_ape_lock(struct tg3 *tp, int locknum)
711 u32 status, req, gnt, bit;
713 if (!tg3_flag(tp, ENABLE_APE))
717 case TG3_APE_LOCK_GPIO:
718 if (tg3_asic_rev(tp) == ASIC_REV_5761)
721 case TG3_APE_LOCK_GRC:
722 case TG3_APE_LOCK_MEM:
724 bit = APE_LOCK_REQ_DRIVER;
726 bit = 1 << tp->pci_fn;
728 case TG3_APE_LOCK_PHY0:
729 case TG3_APE_LOCK_PHY1:
730 case TG3_APE_LOCK_PHY2:
731 case TG3_APE_LOCK_PHY3:
732 bit = APE_LOCK_REQ_DRIVER;
738 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
739 req = TG3_APE_LOCK_REQ;
740 gnt = TG3_APE_LOCK_GRANT;
742 req = TG3_APE_PER_LOCK_REQ;
743 gnt = TG3_APE_PER_LOCK_GRANT;
748 tg3_ape_write32(tp, req + off, bit);
750 /* Wait for up to 1 millisecond to acquire lock. */
751 for (i = 0; i < 100; i++) {
752 status = tg3_ape_read32(tp, gnt + off);
755 if (pci_channel_offline(tp->pdev))
762 /* Revoke the lock request. */
763 tg3_ape_write32(tp, gnt + off, bit);
770 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
774 if (!tg3_flag(tp, ENABLE_APE))
778 case TG3_APE_LOCK_GPIO:
779 if (tg3_asic_rev(tp) == ASIC_REV_5761)
782 case TG3_APE_LOCK_GRC:
783 case TG3_APE_LOCK_MEM:
785 bit = APE_LOCK_GRANT_DRIVER;
787 bit = 1 << tp->pci_fn;
789 case TG3_APE_LOCK_PHY0:
790 case TG3_APE_LOCK_PHY1:
791 case TG3_APE_LOCK_PHY2:
792 case TG3_APE_LOCK_PHY3:
793 bit = APE_LOCK_GRANT_DRIVER;
799 if (tg3_asic_rev(tp) == ASIC_REV_5761)
800 gnt = TG3_APE_LOCK_GRANT;
802 gnt = TG3_APE_PER_LOCK_GRANT;
804 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
807 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
812 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
815 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
816 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
819 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
822 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
825 return timeout_us ? 0 : -EBUSY;
828 #ifdef CONFIG_TIGON3_HWMON
829 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
833 for (i = 0; i < timeout_us / 10; i++) {
834 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
836 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
842 return i == timeout_us / 10;
845 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
849 u32 i, bufoff, msgoff, maxlen, apedata;
851 if (!tg3_flag(tp, APE_HAS_NCSI))
854 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
855 if (apedata != APE_SEG_SIG_MAGIC)
858 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
859 if (!(apedata & APE_FW_STATUS_READY))
862 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
864 msgoff = bufoff + 2 * sizeof(u32);
865 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
870 /* Cap xfer sizes to scratchpad limits. */
871 length = (len > maxlen) ? maxlen : len;
874 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
875 if (!(apedata & APE_FW_STATUS_READY))
878 /* Wait for up to 1 msec for APE to service previous event. */
879 err = tg3_ape_event_lock(tp, 1000);
883 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
884 APE_EVENT_STATUS_SCRTCHPD_READ |
885 APE_EVENT_STATUS_EVENT_PENDING;
886 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
888 tg3_ape_write32(tp, bufoff, base_off);
889 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
891 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
892 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
896 if (tg3_ape_wait_for_event(tp, 30000))
899 for (i = 0; length; i += 4, length -= 4) {
900 u32 val = tg3_ape_read32(tp, msgoff + i);
901 memcpy(data, &val, sizeof(u32));
910 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
915 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
916 if (apedata != APE_SEG_SIG_MAGIC)
919 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
920 if (!(apedata & APE_FW_STATUS_READY))
923 /* Wait for up to 20 millisecond for APE to service previous event. */
924 err = tg3_ape_event_lock(tp, 20000);
928 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
929 event | APE_EVENT_STATUS_EVENT_PENDING);
931 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
932 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
937 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
942 if (!tg3_flag(tp, ENABLE_APE))
946 case RESET_KIND_INIT:
947 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
948 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
949 APE_HOST_SEG_SIG_MAGIC);
950 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
951 APE_HOST_SEG_LEN_MAGIC);
952 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
953 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
954 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
955 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
956 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
957 APE_HOST_BEHAV_NO_PHYLOCK);
958 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
959 TG3_APE_HOST_DRVR_STATE_START);
961 event = APE_EVENT_STATUS_STATE_START;
963 case RESET_KIND_SHUTDOWN:
964 if (device_may_wakeup(&tp->pdev->dev) &&
965 tg3_flag(tp, WOL_ENABLE)) {
966 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
967 TG3_APE_HOST_WOL_SPEED_AUTO);
968 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
970 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
972 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
974 event = APE_EVENT_STATUS_STATE_UNLOAD;
980 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
982 tg3_ape_send_event(tp, event);
985 static void tg3_send_ape_heartbeat(struct tg3 *tp,
986 unsigned long interval)
988 /* Check if hb interval has exceeded */
989 if (!tg3_flag(tp, ENABLE_APE) ||
990 time_before(jiffies, tp->ape_hb_jiffies + interval))
993 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
994 tp->ape_hb_jiffies = jiffies;
997 static void tg3_disable_ints(struct tg3 *tp)
1001 tw32(TG3PCI_MISC_HOST_CTRL,
1002 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1003 for (i = 0; i < tp->irq_max; i++)
1004 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1007 static void tg3_enable_ints(struct tg3 *tp)
1014 tw32(TG3PCI_MISC_HOST_CTRL,
1015 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1017 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1018 for (i = 0; i < tp->irq_cnt; i++) {
1019 struct tg3_napi *tnapi = &tp->napi[i];
1021 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1022 if (tg3_flag(tp, 1SHOT_MSI))
1023 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1025 tp->coal_now |= tnapi->coal_now;
1028 /* Force an initial interrupt */
1029 if (!tg3_flag(tp, TAGGED_STATUS) &&
1030 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1031 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1033 tw32(HOSTCC_MODE, tp->coal_now);
1035 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1038 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1040 struct tg3 *tp = tnapi->tp;
1041 struct tg3_hw_status *sblk = tnapi->hw_status;
1042 unsigned int work_exists = 0;
1044 /* check for phy events */
1045 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1046 if (sblk->status & SD_STATUS_LINK_CHG)
1050 /* check for TX work to do */
1051 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1054 /* check for RX work to do */
1055 if (tnapi->rx_rcb_prod_idx &&
1056 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1063 * similar to tg3_enable_ints, but it accurately determines whether there
1064 * is new work pending and can return without flushing the PIO write
1065 * which reenables interrupts
1067 static void tg3_int_reenable(struct tg3_napi *tnapi)
1069 struct tg3 *tp = tnapi->tp;
1071 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1073 /* When doing tagged status, this work check is unnecessary.
1074 * The last_tag we write above tells the chip which piece of
1075 * work we've completed.
1077 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1078 tw32(HOSTCC_MODE, tp->coalesce_mode |
1079 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1082 static void tg3_switch_clocks(struct tg3 *tp)
1085 u32 orig_clock_ctrl;
1087 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1090 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1092 orig_clock_ctrl = clock_ctrl;
1093 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1094 CLOCK_CTRL_CLKRUN_OENABLE |
1096 tp->pci_clock_ctrl = clock_ctrl;
1098 if (tg3_flag(tp, 5705_PLUS)) {
1099 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1100 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1101 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1103 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1104 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1106 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1108 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1109 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1112 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1115 #define PHY_BUSY_LOOPS 5000
1117 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1124 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1126 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1130 tg3_ape_lock(tp, tp->phy_ape_lock);
1134 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1135 MI_COM_PHY_ADDR_MASK);
1136 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1137 MI_COM_REG_ADDR_MASK);
1138 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1140 tw32_f(MAC_MI_COM, frame_val);
1142 loops = PHY_BUSY_LOOPS;
1143 while (loops != 0) {
1145 frame_val = tr32(MAC_MI_COM);
1147 if ((frame_val & MI_COM_BUSY) == 0) {
1149 frame_val = tr32(MAC_MI_COM);
1157 *val = frame_val & MI_COM_DATA_MASK;
1161 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1162 tw32_f(MAC_MI_MODE, tp->mi_mode);
1166 tg3_ape_unlock(tp, tp->phy_ape_lock);
1171 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1173 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1176 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1183 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1184 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1187 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1189 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1193 tg3_ape_lock(tp, tp->phy_ape_lock);
1195 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1196 MI_COM_PHY_ADDR_MASK);
1197 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1198 MI_COM_REG_ADDR_MASK);
1199 frame_val |= (val & MI_COM_DATA_MASK);
1200 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1202 tw32_f(MAC_MI_COM, frame_val);
1204 loops = PHY_BUSY_LOOPS;
1205 while (loops != 0) {
1207 frame_val = tr32(MAC_MI_COM);
1208 if ((frame_val & MI_COM_BUSY) == 0) {
1210 frame_val = tr32(MAC_MI_COM);
1220 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1221 tw32_f(MAC_MI_MODE, tp->mi_mode);
1225 tg3_ape_unlock(tp, tp->phy_ape_lock);
1230 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1232 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1235 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1239 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1243 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1247 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1248 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1252 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1258 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1262 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1266 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1270 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1271 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1275 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1281 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1285 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1287 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1292 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1296 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1298 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1303 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1307 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1308 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1309 MII_TG3_AUXCTL_SHDWSEL_MISC);
1311 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1316 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1318 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1319 set |= MII_TG3_AUXCTL_MISC_WREN;
1321 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1324 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1329 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1335 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1337 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1339 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1340 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1345 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1347 return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1348 reg | val | MII_TG3_MISC_SHDW_WREN);
1351 static int tg3_bmcr_reset(struct tg3 *tp)
1356 /* OK, reset it, and poll the BMCR_RESET bit until it
1357 * clears or we time out.
1359 phy_control = BMCR_RESET;
1360 err = tg3_writephy(tp, MII_BMCR, phy_control);
1366 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1370 if ((phy_control & BMCR_RESET) == 0) {
1382 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1384 struct tg3 *tp = bp->priv;
1387 spin_lock_bh(&tp->lock);
1389 if (__tg3_readphy(tp, mii_id, reg, &val))
1392 spin_unlock_bh(&tp->lock);
1397 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1399 struct tg3 *tp = bp->priv;
1402 spin_lock_bh(&tp->lock);
1404 if (__tg3_writephy(tp, mii_id, reg, val))
1407 spin_unlock_bh(&tp->lock);
1412 static void tg3_mdio_config_5785(struct tg3 *tp)
1415 struct phy_device *phydev;
1417 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1418 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1419 case PHY_ID_BCM50610:
1420 case PHY_ID_BCM50610M:
1421 val = MAC_PHYCFG2_50610_LED_MODES;
1423 case PHY_ID_BCMAC131:
1424 val = MAC_PHYCFG2_AC131_LED_MODES;
1426 case PHY_ID_RTL8211C:
1427 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1429 case PHY_ID_RTL8201E:
1430 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1436 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1437 tw32(MAC_PHYCFG2, val);
1439 val = tr32(MAC_PHYCFG1);
1440 val &= ~(MAC_PHYCFG1_RGMII_INT |
1441 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1442 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1443 tw32(MAC_PHYCFG1, val);
1448 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1449 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1450 MAC_PHYCFG2_FMODE_MASK_MASK |
1451 MAC_PHYCFG2_GMODE_MASK_MASK |
1452 MAC_PHYCFG2_ACT_MASK_MASK |
1453 MAC_PHYCFG2_QUAL_MASK_MASK |
1454 MAC_PHYCFG2_INBAND_ENABLE;
1456 tw32(MAC_PHYCFG2, val);
1458 val = tr32(MAC_PHYCFG1);
1459 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1460 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1461 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1462 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1463 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1464 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1465 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1467 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1468 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1469 tw32(MAC_PHYCFG1, val);
1471 val = tr32(MAC_EXT_RGMII_MODE);
1472 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1473 MAC_RGMII_MODE_RX_QUALITY |
1474 MAC_RGMII_MODE_RX_ACTIVITY |
1475 MAC_RGMII_MODE_RX_ENG_DET |
1476 MAC_RGMII_MODE_TX_ENABLE |
1477 MAC_RGMII_MODE_TX_LOWPWR |
1478 MAC_RGMII_MODE_TX_RESET);
1479 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1480 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1481 val |= MAC_RGMII_MODE_RX_INT_B |
1482 MAC_RGMII_MODE_RX_QUALITY |
1483 MAC_RGMII_MODE_RX_ACTIVITY |
1484 MAC_RGMII_MODE_RX_ENG_DET;
1485 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1486 val |= MAC_RGMII_MODE_TX_ENABLE |
1487 MAC_RGMII_MODE_TX_LOWPWR |
1488 MAC_RGMII_MODE_TX_RESET;
1490 tw32(MAC_EXT_RGMII_MODE, val);
1493 static void tg3_mdio_start(struct tg3 *tp)
1495 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1496 tw32_f(MAC_MI_MODE, tp->mi_mode);
1499 if (tg3_flag(tp, MDIOBUS_INITED) &&
1500 tg3_asic_rev(tp) == ASIC_REV_5785)
1501 tg3_mdio_config_5785(tp);
1504 static int tg3_mdio_init(struct tg3 *tp)
1508 struct phy_device *phydev;
1510 if (tg3_flag(tp, 5717_PLUS)) {
1513 tp->phy_addr = tp->pci_fn + 1;
1515 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1516 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1518 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1519 TG3_CPMU_PHY_STRAP_IS_SERDES;
1522 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1525 addr = ssb_gige_get_phyaddr(tp->pdev);
1528 tp->phy_addr = addr;
1530 tp->phy_addr = TG3_PHY_MII_ADDR;
1534 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1537 tp->mdio_bus = mdiobus_alloc();
1538 if (tp->mdio_bus == NULL)
1541 tp->mdio_bus->name = "tg3 mdio bus";
1542 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(tp->pdev));
1543 tp->mdio_bus->priv = tp;
1544 tp->mdio_bus->parent = &tp->pdev->dev;
1545 tp->mdio_bus->read = &tg3_mdio_read;
1546 tp->mdio_bus->write = &tg3_mdio_write;
1547 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1549 /* The bus registration will look for all the PHYs on the mdio bus.
1550 * Unfortunately, it does not ensure the PHY is powered up before
1551 * accessing the PHY ID registers. A chip reset is the
1552 * quickest way to bring the device back to an operational state..
1554 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1557 i = mdiobus_register(tp->mdio_bus);
1559 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1560 mdiobus_free(tp->mdio_bus);
1564 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1566 if (!phydev || !phydev->drv) {
1567 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1568 mdiobus_unregister(tp->mdio_bus);
1569 mdiobus_free(tp->mdio_bus);
1573 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1574 case PHY_ID_BCM57780:
1575 phydev->interface = PHY_INTERFACE_MODE_GMII;
1576 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1578 case PHY_ID_BCM50610:
1579 case PHY_ID_BCM50610M:
1580 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1581 PHY_BRCM_RX_REFCLK_UNUSED |
1582 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1583 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1585 case PHY_ID_RTL8211C:
1586 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1588 case PHY_ID_RTL8201E:
1589 case PHY_ID_BCMAC131:
1590 phydev->interface = PHY_INTERFACE_MODE_MII;
1591 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1592 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1596 tg3_flag_set(tp, MDIOBUS_INITED);
1598 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1599 tg3_mdio_config_5785(tp);
1604 static void tg3_mdio_fini(struct tg3 *tp)
1606 if (tg3_flag(tp, MDIOBUS_INITED)) {
1607 tg3_flag_clear(tp, MDIOBUS_INITED);
1608 mdiobus_unregister(tp->mdio_bus);
1609 mdiobus_free(tp->mdio_bus);
1613 /* tp->lock is held. */
1614 static inline void tg3_generate_fw_event(struct tg3 *tp)
1618 val = tr32(GRC_RX_CPU_EVENT);
1619 val |= GRC_RX_CPU_DRIVER_EVENT;
1620 tw32_f(GRC_RX_CPU_EVENT, val);
1622 tp->last_event_jiffies = jiffies;
1625 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1627 /* tp->lock is held. */
1628 static void tg3_wait_for_event_ack(struct tg3 *tp)
1631 unsigned int delay_cnt;
1634 /* If enough time has passed, no wait is necessary. */
1635 time_remain = (long)(tp->last_event_jiffies + 1 +
1636 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1638 if (time_remain < 0)
1641 /* Check if we can shorten the wait time. */
1642 delay_cnt = jiffies_to_usecs(time_remain);
1643 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1644 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1645 delay_cnt = (delay_cnt >> 3) + 1;
1647 for (i = 0; i < delay_cnt; i++) {
1648 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1650 if (pci_channel_offline(tp->pdev))
1657 /* tp->lock is held. */
1658 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1663 if (!tg3_readphy(tp, MII_BMCR, ®))
1665 if (!tg3_readphy(tp, MII_BMSR, ®))
1666 val |= (reg & 0xffff);
1670 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1672 if (!tg3_readphy(tp, MII_LPA, ®))
1673 val |= (reg & 0xffff);
1677 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1678 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1680 if (!tg3_readphy(tp, MII_STAT1000, ®))
1681 val |= (reg & 0xffff);
1685 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1692 /* tp->lock is held. */
1693 static void tg3_ump_link_report(struct tg3 *tp)
1697 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1700 tg3_phy_gather_ump_data(tp, data);
1702 tg3_wait_for_event_ack(tp);
1704 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1705 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1707 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1708 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1709 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1711 tg3_generate_fw_event(tp);
1714 /* tp->lock is held. */
1715 static void tg3_stop_fw(struct tg3 *tp)
1717 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1718 /* Wait for RX cpu to ACK the previous event. */
1719 tg3_wait_for_event_ack(tp);
1721 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1723 tg3_generate_fw_event(tp);
1725 /* Wait for RX cpu to ACK this event. */
1726 tg3_wait_for_event_ack(tp);
1730 /* tp->lock is held. */
1731 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1733 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1734 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1736 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1738 case RESET_KIND_INIT:
1739 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1743 case RESET_KIND_SHUTDOWN:
1744 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1748 case RESET_KIND_SUSPEND:
1749 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1759 /* tp->lock is held. */
1760 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1762 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1764 case RESET_KIND_INIT:
1765 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1766 DRV_STATE_START_DONE);
1769 case RESET_KIND_SHUTDOWN:
1770 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1771 DRV_STATE_UNLOAD_DONE);
1780 /* tp->lock is held. */
1781 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1783 if (tg3_flag(tp, ENABLE_ASF)) {
1785 case RESET_KIND_INIT:
1786 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1790 case RESET_KIND_SHUTDOWN:
1791 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1795 case RESET_KIND_SUSPEND:
1796 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1806 static int tg3_poll_fw(struct tg3 *tp)
1811 if (tg3_flag(tp, NO_FWARE_REPORTED))
1814 if (tg3_flag(tp, IS_SSB_CORE)) {
1815 /* We don't use firmware. */
1819 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1820 /* Wait up to 20ms for init done. */
1821 for (i = 0; i < 200; i++) {
1822 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1824 if (pci_channel_offline(tp->pdev))
1832 /* Wait for firmware initialization to complete. */
1833 for (i = 0; i < 100000; i++) {
1834 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1835 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1837 if (pci_channel_offline(tp->pdev)) {
1838 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1839 tg3_flag_set(tp, NO_FWARE_REPORTED);
1840 netdev_info(tp->dev, "No firmware running\n");
1849 /* Chip might not be fitted with firmware. Some Sun onboard
1850 * parts are configured like that. So don't signal the timeout
1851 * of the above loop as an error, but do report the lack of
1852 * running firmware once.
1854 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1855 tg3_flag_set(tp, NO_FWARE_REPORTED);
1857 netdev_info(tp->dev, "No firmware running\n");
1860 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1861 /* The 57765 A0 needs a little more
1862 * time to do some important work.
1870 static void tg3_link_report(struct tg3 *tp)
1872 if (!netif_carrier_ok(tp->dev)) {
1873 netif_info(tp, link, tp->dev, "Link is down\n");
1874 tg3_ump_link_report(tp);
1875 } else if (netif_msg_link(tp)) {
1876 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1877 (tp->link_config.active_speed == SPEED_1000 ?
1879 (tp->link_config.active_speed == SPEED_100 ?
1881 (tp->link_config.active_duplex == DUPLEX_FULL ?
1884 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1885 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1887 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1890 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1891 netdev_info(tp->dev, "EEE is %s\n",
1892 tp->setlpicnt ? "enabled" : "disabled");
1894 tg3_ump_link_report(tp);
1897 tp->link_up = netif_carrier_ok(tp->dev);
1900 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1904 if (adv & ADVERTISE_PAUSE_CAP) {
1905 flowctrl |= FLOW_CTRL_RX;
1906 if (!(adv & ADVERTISE_PAUSE_ASYM))
1907 flowctrl |= FLOW_CTRL_TX;
1908 } else if (adv & ADVERTISE_PAUSE_ASYM)
1909 flowctrl |= FLOW_CTRL_TX;
1914 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1918 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1919 miireg = ADVERTISE_1000XPAUSE;
1920 else if (flow_ctrl & FLOW_CTRL_TX)
1921 miireg = ADVERTISE_1000XPSE_ASYM;
1922 else if (flow_ctrl & FLOW_CTRL_RX)
1923 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1930 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1934 if (adv & ADVERTISE_1000XPAUSE) {
1935 flowctrl |= FLOW_CTRL_RX;
1936 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1937 flowctrl |= FLOW_CTRL_TX;
1938 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1939 flowctrl |= FLOW_CTRL_TX;
1944 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1948 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1949 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1950 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1951 if (lcladv & ADVERTISE_1000XPAUSE)
1953 if (rmtadv & ADVERTISE_1000XPAUSE)
1960 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1964 u32 old_rx_mode = tp->rx_mode;
1965 u32 old_tx_mode = tp->tx_mode;
1967 if (tg3_flag(tp, USE_PHYLIB))
1968 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1970 autoneg = tp->link_config.autoneg;
1972 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1973 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1974 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1976 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1978 flowctrl = tp->link_config.flowctrl;
1980 tp->link_config.active_flowctrl = flowctrl;
1982 if (flowctrl & FLOW_CTRL_RX)
1983 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1985 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1987 if (old_rx_mode != tp->rx_mode)
1988 tw32_f(MAC_RX_MODE, tp->rx_mode);
1990 if (flowctrl & FLOW_CTRL_TX)
1991 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1993 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1995 if (old_tx_mode != tp->tx_mode)
1996 tw32_f(MAC_TX_MODE, tp->tx_mode);
1999 static void tg3_adjust_link(struct net_device *dev)
2001 u8 oldflowctrl, linkmesg = 0;
2002 u32 mac_mode, lcl_adv, rmt_adv;
2003 struct tg3 *tp = netdev_priv(dev);
2004 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2006 spin_lock_bh(&tp->lock);
2008 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2009 MAC_MODE_HALF_DUPLEX);
2011 oldflowctrl = tp->link_config.active_flowctrl;
2017 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2018 mac_mode |= MAC_MODE_PORT_MODE_MII;
2019 else if (phydev->speed == SPEED_1000 ||
2020 tg3_asic_rev(tp) != ASIC_REV_5785)
2021 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2023 mac_mode |= MAC_MODE_PORT_MODE_MII;
2025 if (phydev->duplex == DUPLEX_HALF)
2026 mac_mode |= MAC_MODE_HALF_DUPLEX;
2028 lcl_adv = mii_advertise_flowctrl(
2029 tp->link_config.flowctrl);
2032 rmt_adv = LPA_PAUSE_CAP;
2033 if (phydev->asym_pause)
2034 rmt_adv |= LPA_PAUSE_ASYM;
2037 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2039 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2041 if (mac_mode != tp->mac_mode) {
2042 tp->mac_mode = mac_mode;
2043 tw32_f(MAC_MODE, tp->mac_mode);
2047 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2048 if (phydev->speed == SPEED_10)
2050 MAC_MI_STAT_10MBPS_MODE |
2051 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2053 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2056 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2057 tw32(MAC_TX_LENGTHS,
2058 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2059 (6 << TX_LENGTHS_IPG_SHIFT) |
2060 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2062 tw32(MAC_TX_LENGTHS,
2063 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2064 (6 << TX_LENGTHS_IPG_SHIFT) |
2065 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2067 if (phydev->link != tp->old_link ||
2068 phydev->speed != tp->link_config.active_speed ||
2069 phydev->duplex != tp->link_config.active_duplex ||
2070 oldflowctrl != tp->link_config.active_flowctrl)
2073 tp->old_link = phydev->link;
2074 tp->link_config.active_speed = phydev->speed;
2075 tp->link_config.active_duplex = phydev->duplex;
2077 spin_unlock_bh(&tp->lock);
2080 tg3_link_report(tp);
2083 static int tg3_phy_init(struct tg3 *tp)
2085 struct phy_device *phydev;
2087 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2090 /* Bring the PHY back to a known state. */
2093 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2095 /* Attach the MAC to the PHY. */
2096 phydev = phy_connect(tp->dev, phydev_name(phydev),
2097 tg3_adjust_link, phydev->interface);
2098 if (IS_ERR(phydev)) {
2099 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2100 return PTR_ERR(phydev);
2103 /* Mask with MAC supported features. */
2104 switch (phydev->interface) {
2105 case PHY_INTERFACE_MODE_GMII:
2106 case PHY_INTERFACE_MODE_RGMII:
2107 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2108 phy_set_max_speed(phydev, SPEED_1000);
2109 phy_support_asym_pause(phydev);
2113 case PHY_INTERFACE_MODE_MII:
2114 phy_set_max_speed(phydev, SPEED_100);
2115 phy_support_asym_pause(phydev);
2118 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2122 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2124 phy_attached_info(phydev);
2129 static void tg3_phy_start(struct tg3 *tp)
2131 struct phy_device *phydev;
2133 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2136 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2138 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2139 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2140 phydev->speed = tp->link_config.speed;
2141 phydev->duplex = tp->link_config.duplex;
2142 phydev->autoneg = tp->link_config.autoneg;
2143 ethtool_convert_legacy_u32_to_link_mode(
2144 phydev->advertising, tp->link_config.advertising);
2149 phy_start_aneg(phydev);
2152 static void tg3_phy_stop(struct tg3 *tp)
2154 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2157 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2160 static void tg3_phy_fini(struct tg3 *tp)
2162 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2163 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2164 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2168 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2173 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2176 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2177 /* Cannot do read-modify-write on 5401 */
2178 err = tg3_phy_auxctl_write(tp,
2179 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2180 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2185 err = tg3_phy_auxctl_read(tp,
2186 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2190 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2191 err = tg3_phy_auxctl_write(tp,
2192 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2198 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2202 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2205 tg3_writephy(tp, MII_TG3_FET_TEST,
2206 phytest | MII_TG3_FET_SHADOW_EN);
2207 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2209 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2211 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2212 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2214 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2218 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2222 if (!tg3_flag(tp, 5705_PLUS) ||
2223 (tg3_flag(tp, 5717_PLUS) &&
2224 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2227 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2228 tg3_phy_fet_toggle_apd(tp, enable);
2232 reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2233 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2234 MII_TG3_MISC_SHDW_SCR5_SDTL |
2235 MII_TG3_MISC_SHDW_SCR5_C125OE;
2236 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2237 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2239 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2242 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2244 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2246 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2249 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2253 if (!tg3_flag(tp, 5705_PLUS) ||
2254 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2257 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2260 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2261 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2263 tg3_writephy(tp, MII_TG3_FET_TEST,
2264 ephy | MII_TG3_FET_SHADOW_EN);
2265 if (!tg3_readphy(tp, reg, &phy)) {
2267 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2269 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2270 tg3_writephy(tp, reg, phy);
2272 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2277 ret = tg3_phy_auxctl_read(tp,
2278 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2281 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2283 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2284 tg3_phy_auxctl_write(tp,
2285 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2290 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2295 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2298 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2300 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2301 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2304 static void tg3_phy_apply_otp(struct tg3 *tp)
2313 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2316 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2317 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2318 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2320 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2321 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2322 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2324 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2325 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2326 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2328 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2329 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2331 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2332 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2334 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2335 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2336 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2338 tg3_phy_toggle_auxctl_smdsp(tp, false);
2341 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2344 struct ethtool_eee *dest = &tp->eee;
2346 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2352 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2355 /* Pull eee_active */
2356 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2357 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2358 dest->eee_active = 1;
2360 dest->eee_active = 0;
2362 /* Pull lp advertised settings */
2363 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2365 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2367 /* Pull advertised and eee_enabled settings */
2368 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2370 dest->eee_enabled = !!val;
2371 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2373 /* Pull tx_lpi_enabled */
2374 val = tr32(TG3_CPMU_EEE_MODE);
2375 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2377 /* Pull lpi timer value */
2378 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2381 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2385 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2390 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2392 tp->link_config.active_duplex == DUPLEX_FULL &&
2393 (tp->link_config.active_speed == SPEED_100 ||
2394 tp->link_config.active_speed == SPEED_1000)) {
2397 if (tp->link_config.active_speed == SPEED_1000)
2398 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2400 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2402 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2404 tg3_eee_pull_config(tp, NULL);
2405 if (tp->eee.eee_active)
2409 if (!tp->setlpicnt) {
2410 if (current_link_up &&
2411 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2412 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2413 tg3_phy_toggle_auxctl_smdsp(tp, false);
2416 val = tr32(TG3_CPMU_EEE_MODE);
2417 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2421 static void tg3_phy_eee_enable(struct tg3 *tp)
2425 if (tp->link_config.active_speed == SPEED_1000 &&
2426 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2427 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2428 tg3_flag(tp, 57765_CLASS)) &&
2429 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2430 val = MII_TG3_DSP_TAP26_ALNOKO |
2431 MII_TG3_DSP_TAP26_RMRXSTO;
2432 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2433 tg3_phy_toggle_auxctl_smdsp(tp, false);
2436 val = tr32(TG3_CPMU_EEE_MODE);
2437 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2440 static int tg3_wait_macro_done(struct tg3 *tp)
2447 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2448 if ((tmp32 & 0x1000) == 0)
2458 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2460 static const u32 test_pat[4][6] = {
2461 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2462 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2463 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2464 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2468 for (chan = 0; chan < 4; chan++) {
2471 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2472 (chan * 0x2000) | 0x0200);
2473 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2475 for (i = 0; i < 6; i++)
2476 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2479 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2480 if (tg3_wait_macro_done(tp)) {
2485 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2486 (chan * 0x2000) | 0x0200);
2487 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2488 if (tg3_wait_macro_done(tp)) {
2493 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2494 if (tg3_wait_macro_done(tp)) {
2499 for (i = 0; i < 6; i += 2) {
2502 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2503 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2504 tg3_wait_macro_done(tp)) {
2510 if (low != test_pat[chan][i] ||
2511 high != test_pat[chan][i+1]) {
2512 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2513 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2514 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2524 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2528 for (chan = 0; chan < 4; chan++) {
2531 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2532 (chan * 0x2000) | 0x0200);
2533 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2534 for (i = 0; i < 6; i++)
2535 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2536 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2537 if (tg3_wait_macro_done(tp))
2544 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2546 u32 reg32, phy9_orig;
2547 int retries, do_phy_reset, err;
2553 err = tg3_bmcr_reset(tp);
2559 /* Disable transmitter and interrupt. */
2560 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2564 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2566 /* Set full-duplex, 1000 mbps. */
2567 tg3_writephy(tp, MII_BMCR,
2568 BMCR_FULLDPLX | BMCR_SPEED1000);
2570 /* Set to master mode. */
2571 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2574 tg3_writephy(tp, MII_CTRL1000,
2575 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2577 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2581 /* Block the PHY control access. */
2582 tg3_phydsp_write(tp, 0x8005, 0x0800);
2584 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2587 } while (--retries);
2589 err = tg3_phy_reset_chanpat(tp);
2593 tg3_phydsp_write(tp, 0x8005, 0x0000);
2595 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2596 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2598 tg3_phy_toggle_auxctl_smdsp(tp, false);
2600 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2602 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
2607 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2612 static void tg3_carrier_off(struct tg3 *tp)
2614 netif_carrier_off(tp->dev);
2615 tp->link_up = false;
2618 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2620 if (tg3_flag(tp, ENABLE_ASF))
2621 netdev_warn(tp->dev,
2622 "Management side-band traffic will be interrupted during phy settings change\n");
2625 /* This will reset the tigon3 PHY if there is no valid
2626 * link unless the FORCE argument is non-zero.
2628 static int tg3_phy_reset(struct tg3 *tp)
2633 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2634 val = tr32(GRC_MISC_CFG);
2635 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2638 err = tg3_readphy(tp, MII_BMSR, &val);
2639 err |= tg3_readphy(tp, MII_BMSR, &val);
2643 if (netif_running(tp->dev) && tp->link_up) {
2644 netif_carrier_off(tp->dev);
2645 tg3_link_report(tp);
2648 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2649 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2650 tg3_asic_rev(tp) == ASIC_REV_5705) {
2651 err = tg3_phy_reset_5703_4_5(tp);
2658 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2659 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2660 cpmuctrl = tr32(TG3_CPMU_CTRL);
2661 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2663 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2666 err = tg3_bmcr_reset(tp);
2670 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2671 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2672 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2674 tw32(TG3_CPMU_CTRL, cpmuctrl);
2677 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2678 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2679 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2680 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2681 CPMU_LSPD_1000MB_MACCLK_12_5) {
2682 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2684 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2688 if (tg3_flag(tp, 5717_PLUS) &&
2689 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2692 tg3_phy_apply_otp(tp);
2694 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2695 tg3_phy_toggle_apd(tp, true);
2697 tg3_phy_toggle_apd(tp, false);
2700 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2701 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2702 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2703 tg3_phydsp_write(tp, 0x000a, 0x0323);
2704 tg3_phy_toggle_auxctl_smdsp(tp, false);
2707 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2708 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2709 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2712 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2713 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2714 tg3_phydsp_write(tp, 0x000a, 0x310b);
2715 tg3_phydsp_write(tp, 0x201f, 0x9506);
2716 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2717 tg3_phy_toggle_auxctl_smdsp(tp, false);
2719 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2720 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2721 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2722 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2723 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2724 tg3_writephy(tp, MII_TG3_TEST1,
2725 MII_TG3_TEST1_TRIM_EN | 0x4);
2727 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2729 tg3_phy_toggle_auxctl_smdsp(tp, false);
2733 /* Set Extended packet length bit (bit 14) on all chips that */
2734 /* support jumbo frames */
2735 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2736 /* Cannot do read-modify-write on 5401 */
2737 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2738 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2739 /* Set bit 14 with read-modify-write to preserve other bits */
2740 err = tg3_phy_auxctl_read(tp,
2741 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2743 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2744 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2747 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2748 * jumbo frames transmission.
2750 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2751 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2752 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2753 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2756 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2757 /* adjust output voltage */
2758 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2761 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2762 tg3_phydsp_write(tp, 0xffb, 0x4000);
2764 tg3_phy_toggle_automdix(tp, true);
2765 tg3_phy_set_wirespeed(tp);
2769 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2770 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2771 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2772 TG3_GPIO_MSG_NEED_VAUX)
2773 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2774 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2775 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2776 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2777 (TG3_GPIO_MSG_DRVR_PRES << 12))
2779 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2780 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2781 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2782 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2783 (TG3_GPIO_MSG_NEED_VAUX << 12))
2785 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2789 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2790 tg3_asic_rev(tp) == ASIC_REV_5719)
2791 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2793 status = tr32(TG3_CPMU_DRV_STATUS);
2795 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2796 status &= ~(TG3_GPIO_MSG_MASK << shift);
2797 status |= (newstat << shift);
2799 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2800 tg3_asic_rev(tp) == ASIC_REV_5719)
2801 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2803 tw32(TG3_CPMU_DRV_STATUS, status);
2805 return status >> TG3_APE_GPIO_MSG_SHIFT;
2808 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2810 if (!tg3_flag(tp, IS_NIC))
2813 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2814 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2815 tg3_asic_rev(tp) == ASIC_REV_5720) {
2816 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2819 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2821 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2822 TG3_GRC_LCLCTL_PWRSW_DELAY);
2824 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2826 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2827 TG3_GRC_LCLCTL_PWRSW_DELAY);
2833 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2837 if (!tg3_flag(tp, IS_NIC) ||
2838 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2839 tg3_asic_rev(tp) == ASIC_REV_5701)
2842 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2844 tw32_wait_f(GRC_LOCAL_CTRL,
2845 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2846 TG3_GRC_LCLCTL_PWRSW_DELAY);
2848 tw32_wait_f(GRC_LOCAL_CTRL,
2850 TG3_GRC_LCLCTL_PWRSW_DELAY);
2852 tw32_wait_f(GRC_LOCAL_CTRL,
2853 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2854 TG3_GRC_LCLCTL_PWRSW_DELAY);
2857 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2859 if (!tg3_flag(tp, IS_NIC))
2862 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2863 tg3_asic_rev(tp) == ASIC_REV_5701) {
2864 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2865 (GRC_LCLCTRL_GPIO_OE0 |
2866 GRC_LCLCTRL_GPIO_OE1 |
2867 GRC_LCLCTRL_GPIO_OE2 |
2868 GRC_LCLCTRL_GPIO_OUTPUT0 |
2869 GRC_LCLCTRL_GPIO_OUTPUT1),
2870 TG3_GRC_LCLCTL_PWRSW_DELAY);
2871 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2872 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2873 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2874 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2875 GRC_LCLCTRL_GPIO_OE1 |
2876 GRC_LCLCTRL_GPIO_OE2 |
2877 GRC_LCLCTRL_GPIO_OUTPUT0 |
2878 GRC_LCLCTRL_GPIO_OUTPUT1 |
2880 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2881 TG3_GRC_LCLCTL_PWRSW_DELAY);
2883 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2884 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2885 TG3_GRC_LCLCTL_PWRSW_DELAY);
2887 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2888 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2889 TG3_GRC_LCLCTL_PWRSW_DELAY);
2892 u32 grc_local_ctrl = 0;
2894 /* Workaround to prevent overdrawing Amps. */
2895 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2896 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2897 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2899 TG3_GRC_LCLCTL_PWRSW_DELAY);
2902 /* On 5753 and variants, GPIO2 cannot be used. */
2903 no_gpio2 = tp->nic_sram_data_cfg &
2904 NIC_SRAM_DATA_CFG_NO_GPIO2;
2906 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2907 GRC_LCLCTRL_GPIO_OE1 |
2908 GRC_LCLCTRL_GPIO_OE2 |
2909 GRC_LCLCTRL_GPIO_OUTPUT1 |
2910 GRC_LCLCTRL_GPIO_OUTPUT2;
2912 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2913 GRC_LCLCTRL_GPIO_OUTPUT2);
2915 tw32_wait_f(GRC_LOCAL_CTRL,
2916 tp->grc_local_ctrl | grc_local_ctrl,
2917 TG3_GRC_LCLCTL_PWRSW_DELAY);
2919 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2921 tw32_wait_f(GRC_LOCAL_CTRL,
2922 tp->grc_local_ctrl | grc_local_ctrl,
2923 TG3_GRC_LCLCTL_PWRSW_DELAY);
2926 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2927 tw32_wait_f(GRC_LOCAL_CTRL,
2928 tp->grc_local_ctrl | grc_local_ctrl,
2929 TG3_GRC_LCLCTL_PWRSW_DELAY);
2934 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2938 /* Serialize power state transitions */
2939 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2942 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2943 msg = TG3_GPIO_MSG_NEED_VAUX;
2945 msg = tg3_set_function_status(tp, msg);
2947 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2950 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2951 tg3_pwrsrc_switch_to_vaux(tp);
2953 tg3_pwrsrc_die_with_vmain(tp);
2956 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2959 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2961 bool need_vaux = false;
2963 /* The GPIOs do something completely different on 57765. */
2964 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2967 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2968 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2969 tg3_asic_rev(tp) == ASIC_REV_5720) {
2970 tg3_frob_aux_power_5717(tp, include_wol ?
2971 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2975 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2976 struct net_device *dev_peer;
2978 dev_peer = pci_get_drvdata(tp->pdev_peer);
2980 /* remove_one() may have been run on the peer. */
2982 struct tg3 *tp_peer = netdev_priv(dev_peer);
2984 if (tg3_flag(tp_peer, INIT_COMPLETE))
2987 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2988 tg3_flag(tp_peer, ENABLE_ASF))
2993 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2994 tg3_flag(tp, ENABLE_ASF))
2998 tg3_pwrsrc_switch_to_vaux(tp);
3000 tg3_pwrsrc_die_with_vmain(tp);
3003 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3005 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3007 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3008 if (speed != SPEED_10)
3010 } else if (speed == SPEED_10)
3016 static bool tg3_phy_power_bug(struct tg3 *tp)
3018 switch (tg3_asic_rev(tp)) {
3023 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3032 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3041 static bool tg3_phy_led_bug(struct tg3 *tp)
3043 switch (tg3_asic_rev(tp)) {
3046 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3055 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3059 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3062 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3063 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3064 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3065 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3068 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3069 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3070 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3075 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3077 val = tr32(GRC_MISC_CFG);
3078 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3081 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3083 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3086 tg3_writephy(tp, MII_ADVERTISE, 0);
3087 tg3_writephy(tp, MII_BMCR,
3088 BMCR_ANENABLE | BMCR_ANRESTART);
3090 tg3_writephy(tp, MII_TG3_FET_TEST,
3091 phytest | MII_TG3_FET_SHADOW_EN);
3092 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3093 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3095 MII_TG3_FET_SHDW_AUXMODE4,
3098 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3101 } else if (do_low_power) {
3102 if (!tg3_phy_led_bug(tp))
3103 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3104 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3106 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3107 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3108 MII_TG3_AUXCTL_PCTL_VREG_11V;
3109 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3112 /* The PHY should not be powered down on some chips because
3115 if (tg3_phy_power_bug(tp))
3118 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3119 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3120 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3121 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3122 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3123 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3126 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3129 /* tp->lock is held. */
3130 static int tg3_nvram_lock(struct tg3 *tp)
3132 if (tg3_flag(tp, NVRAM)) {
3135 if (tp->nvram_lock_cnt == 0) {
3136 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3137 for (i = 0; i < 8000; i++) {
3138 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3143 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3147 tp->nvram_lock_cnt++;
3152 /* tp->lock is held. */
3153 static void tg3_nvram_unlock(struct tg3 *tp)
3155 if (tg3_flag(tp, NVRAM)) {
3156 if (tp->nvram_lock_cnt > 0)
3157 tp->nvram_lock_cnt--;
3158 if (tp->nvram_lock_cnt == 0)
3159 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3163 /* tp->lock is held. */
3164 static void tg3_enable_nvram_access(struct tg3 *tp)
3166 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3167 u32 nvaccess = tr32(NVRAM_ACCESS);
3169 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3173 /* tp->lock is held. */
3174 static void tg3_disable_nvram_access(struct tg3 *tp)
3176 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3177 u32 nvaccess = tr32(NVRAM_ACCESS);
3179 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3183 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3184 u32 offset, u32 *val)
3189 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3192 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3193 EEPROM_ADDR_DEVID_MASK |
3195 tw32(GRC_EEPROM_ADDR,
3197 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3198 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3199 EEPROM_ADDR_ADDR_MASK) |
3200 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3202 for (i = 0; i < 1000; i++) {
3203 tmp = tr32(GRC_EEPROM_ADDR);
3205 if (tmp & EEPROM_ADDR_COMPLETE)
3209 if (!(tmp & EEPROM_ADDR_COMPLETE))
3212 tmp = tr32(GRC_EEPROM_DATA);
3215 * The data will always be opposite the native endian
3216 * format. Perform a blind byteswap to compensate.
3223 #define NVRAM_CMD_TIMEOUT 10000
3225 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3229 tw32(NVRAM_CMD, nvram_cmd);
3230 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3231 usleep_range(10, 40);
3232 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3238 if (i == NVRAM_CMD_TIMEOUT)
3244 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3246 if (tg3_flag(tp, NVRAM) &&
3247 tg3_flag(tp, NVRAM_BUFFERED) &&
3248 tg3_flag(tp, FLASH) &&
3249 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3250 (tp->nvram_jedecnum == JEDEC_ATMEL))
3252 addr = ((addr / tp->nvram_pagesize) <<
3253 ATMEL_AT45DB0X1B_PAGE_POS) +
3254 (addr % tp->nvram_pagesize);
3259 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3261 if (tg3_flag(tp, NVRAM) &&
3262 tg3_flag(tp, NVRAM_BUFFERED) &&
3263 tg3_flag(tp, FLASH) &&
3264 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3265 (tp->nvram_jedecnum == JEDEC_ATMEL))
3267 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3268 tp->nvram_pagesize) +
3269 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3274 /* NOTE: Data read in from NVRAM is byteswapped according to
3275 * the byteswapping settings for all other register accesses.
3276 * tg3 devices are BE devices, so on a BE machine, the data
3277 * returned will be exactly as it is seen in NVRAM. On a LE
3278 * machine, the 32-bit value will be byteswapped.
3280 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3284 if (!tg3_flag(tp, NVRAM))
3285 return tg3_nvram_read_using_eeprom(tp, offset, val);
3287 offset = tg3_nvram_phys_addr(tp, offset);
3289 if (offset > NVRAM_ADDR_MSK)
3292 ret = tg3_nvram_lock(tp);
3296 tg3_enable_nvram_access(tp);
3298 tw32(NVRAM_ADDR, offset);
3299 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3300 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3303 *val = tr32(NVRAM_RDDATA);
3305 tg3_disable_nvram_access(tp);
3307 tg3_nvram_unlock(tp);
3312 /* Ensures NVRAM data is in bytestream format. */
3313 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3316 int res = tg3_nvram_read(tp, offset, &v);
3318 *val = cpu_to_be32(v);
3322 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3323 u32 offset, u32 len, u8 *buf)
3328 for (i = 0; i < len; i += 4) {
3334 memcpy(&data, buf + i, 4);
3337 * The SEEPROM interface expects the data to always be opposite
3338 * the native endian format. We accomplish this by reversing
3339 * all the operations that would have been performed on the
3340 * data from a call to tg3_nvram_read_be32().
3342 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3344 val = tr32(GRC_EEPROM_ADDR);
3345 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3347 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3349 tw32(GRC_EEPROM_ADDR, val |
3350 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3351 (addr & EEPROM_ADDR_ADDR_MASK) |
3355 for (j = 0; j < 1000; j++) {
3356 val = tr32(GRC_EEPROM_ADDR);
3358 if (val & EEPROM_ADDR_COMPLETE)
3362 if (!(val & EEPROM_ADDR_COMPLETE)) {
3371 /* offset and length are dword aligned */
3372 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3376 u32 pagesize = tp->nvram_pagesize;
3377 u32 pagemask = pagesize - 1;
3381 tmp = kmalloc(pagesize, GFP_KERNEL);
3387 u32 phy_addr, page_off, size;
3389 phy_addr = offset & ~pagemask;
3391 for (j = 0; j < pagesize; j += 4) {
3392 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3393 (__be32 *) (tmp + j));
3400 page_off = offset & pagemask;
3407 memcpy(tmp + page_off, buf, size);
3409 offset = offset + (pagesize - page_off);
3411 tg3_enable_nvram_access(tp);
3414 * Before we can erase the flash page, we need
3415 * to issue a special "write enable" command.
3417 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3419 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3422 /* Erase the target page */
3423 tw32(NVRAM_ADDR, phy_addr);
3425 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3426 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3428 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3431 /* Issue another write enable to start the write. */
3432 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3434 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3437 for (j = 0; j < pagesize; j += 4) {
3440 data = *((__be32 *) (tmp + j));
3442 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3444 tw32(NVRAM_ADDR, phy_addr + j);
3446 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3450 nvram_cmd |= NVRAM_CMD_FIRST;
3451 else if (j == (pagesize - 4))
3452 nvram_cmd |= NVRAM_CMD_LAST;
3454 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3462 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3463 tg3_nvram_exec_cmd(tp, nvram_cmd);
3470 /* offset and length are dword aligned */
3471 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3476 for (i = 0; i < len; i += 4, offset += 4) {
3477 u32 page_off, phy_addr, nvram_cmd;
3480 memcpy(&data, buf + i, 4);
3481 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3483 page_off = offset % tp->nvram_pagesize;
3485 phy_addr = tg3_nvram_phys_addr(tp, offset);
3487 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3489 if (page_off == 0 || i == 0)
3490 nvram_cmd |= NVRAM_CMD_FIRST;
3491 if (page_off == (tp->nvram_pagesize - 4))
3492 nvram_cmd |= NVRAM_CMD_LAST;
3495 nvram_cmd |= NVRAM_CMD_LAST;
3497 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3498 !tg3_flag(tp, FLASH) ||
3499 !tg3_flag(tp, 57765_PLUS))
3500 tw32(NVRAM_ADDR, phy_addr);
3502 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3503 !tg3_flag(tp, 5755_PLUS) &&
3504 (tp->nvram_jedecnum == JEDEC_ST) &&
3505 (nvram_cmd & NVRAM_CMD_FIRST)) {
3508 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3509 ret = tg3_nvram_exec_cmd(tp, cmd);
3513 if (!tg3_flag(tp, FLASH)) {
3514 /* We always do complete word writes to eeprom. */
3515 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3518 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3525 /* offset and length are dword aligned */
3526 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3530 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3531 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3532 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3536 if (!tg3_flag(tp, NVRAM)) {
3537 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3541 ret = tg3_nvram_lock(tp);
3545 tg3_enable_nvram_access(tp);
3546 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3547 tw32(NVRAM_WRITE1, 0x406);
3549 grc_mode = tr32(GRC_MODE);
3550 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3552 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3553 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3556 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3560 grc_mode = tr32(GRC_MODE);
3561 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3563 tg3_disable_nvram_access(tp);
3564 tg3_nvram_unlock(tp);
3567 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3568 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3575 #define RX_CPU_SCRATCH_BASE 0x30000
3576 #define RX_CPU_SCRATCH_SIZE 0x04000
3577 #define TX_CPU_SCRATCH_BASE 0x34000
3578 #define TX_CPU_SCRATCH_SIZE 0x04000
3580 /* tp->lock is held. */
3581 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3584 const int iters = 10000;
3586 for (i = 0; i < iters; i++) {
3587 tw32(cpu_base + CPU_STATE, 0xffffffff);
3588 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3589 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3591 if (pci_channel_offline(tp->pdev))
3595 return (i == iters) ? -EBUSY : 0;
3598 /* tp->lock is held. */
3599 static int tg3_rxcpu_pause(struct tg3 *tp)
3601 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3603 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3604 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3610 /* tp->lock is held. */
3611 static int tg3_txcpu_pause(struct tg3 *tp)
3613 return tg3_pause_cpu(tp, TX_CPU_BASE);
3616 /* tp->lock is held. */
3617 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3619 tw32(cpu_base + CPU_STATE, 0xffffffff);
3620 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3623 /* tp->lock is held. */
3624 static void tg3_rxcpu_resume(struct tg3 *tp)
3626 tg3_resume_cpu(tp, RX_CPU_BASE);
3629 /* tp->lock is held. */
3630 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3634 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3636 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3637 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3639 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3642 if (cpu_base == RX_CPU_BASE) {
3643 rc = tg3_rxcpu_pause(tp);
3646 * There is only an Rx CPU for the 5750 derivative in the
3649 if (tg3_flag(tp, IS_SSB_CORE))
3652 rc = tg3_txcpu_pause(tp);
3656 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3657 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3661 /* Clear firmware's nvram arbitration. */
3662 if (tg3_flag(tp, NVRAM))
3663 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3667 static int tg3_fw_data_len(struct tg3 *tp,
3668 const struct tg3_firmware_hdr *fw_hdr)
3672 /* Non fragmented firmware have one firmware header followed by a
3673 * contiguous chunk of data to be written. The length field in that
3674 * header is not the length of data to be written but the complete
3675 * length of the bss. The data length is determined based on
3676 * tp->fw->size minus headers.
3678 * Fragmented firmware have a main header followed by multiple
3679 * fragments. Each fragment is identical to non fragmented firmware
3680 * with a firmware header followed by a contiguous chunk of data. In
3681 * the main header, the length field is unused and set to 0xffffffff.
3682 * In each fragment header the length is the entire size of that
3683 * fragment i.e. fragment data + header length. Data length is
3684 * therefore length field in the header minus TG3_FW_HDR_LEN.
3686 if (tp->fw_len == 0xffffffff)
3687 fw_len = be32_to_cpu(fw_hdr->len);
3689 fw_len = tp->fw->size;
3691 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3694 /* tp->lock is held. */
3695 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3696 u32 cpu_scratch_base, int cpu_scratch_size,
3697 const struct tg3_firmware_hdr *fw_hdr)
3700 void (*write_op)(struct tg3 *, u32, u32);
3701 int total_len = tp->fw->size;
3703 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3705 "%s: Trying to load TX cpu firmware which is 5705\n",
3710 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3711 write_op = tg3_write_mem;
3713 write_op = tg3_write_indirect_reg32;
3715 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3716 /* It is possible that bootcode is still loading at this point.
3717 * Get the nvram lock first before halting the cpu.
3719 int lock_err = tg3_nvram_lock(tp);
3720 err = tg3_halt_cpu(tp, cpu_base);
3722 tg3_nvram_unlock(tp);
3726 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3727 write_op(tp, cpu_scratch_base + i, 0);
3728 tw32(cpu_base + CPU_STATE, 0xffffffff);
3729 tw32(cpu_base + CPU_MODE,
3730 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3732 /* Subtract additional main header for fragmented firmware and
3733 * advance to the first fragment
3735 total_len -= TG3_FW_HDR_LEN;
3740 u32 *fw_data = (u32 *)(fw_hdr + 1);
3741 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3742 write_op(tp, cpu_scratch_base +
3743 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3745 be32_to_cpu(fw_data[i]));
3747 total_len -= be32_to_cpu(fw_hdr->len);
3749 /* Advance to next fragment */
3750 fw_hdr = (struct tg3_firmware_hdr *)
3751 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3752 } while (total_len > 0);
3760 /* tp->lock is held. */
3761 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3764 const int iters = 5;
3766 tw32(cpu_base + CPU_STATE, 0xffffffff);
3767 tw32_f(cpu_base + CPU_PC, pc);
3769 for (i = 0; i < iters; i++) {
3770 if (tr32(cpu_base + CPU_PC) == pc)
3772 tw32(cpu_base + CPU_STATE, 0xffffffff);
3773 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3774 tw32_f(cpu_base + CPU_PC, pc);
3778 return (i == iters) ? -EBUSY : 0;
3781 /* tp->lock is held. */
3782 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3784 const struct tg3_firmware_hdr *fw_hdr;
3787 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3789 /* Firmware blob starts with version numbers, followed by
3790 start address and length. We are setting complete length.
3791 length = end_address_of_bss - start_address_of_text.
3792 Remainder is the blob to be loaded contiguously
3793 from start address. */
3795 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3796 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3801 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3802 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3807 /* Now startup only the RX cpu. */
3808 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3809 be32_to_cpu(fw_hdr->base_addr));
3811 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3812 "should be %08x\n", __func__,
3813 tr32(RX_CPU_BASE + CPU_PC),
3814 be32_to_cpu(fw_hdr->base_addr));
3818 tg3_rxcpu_resume(tp);
3823 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3825 const int iters = 1000;
3829 /* Wait for boot code to complete initialization and enter service
3830 * loop. It is then safe to download service patches
3832 for (i = 0; i < iters; i++) {
3833 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3840 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3844 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3846 netdev_warn(tp->dev,
3847 "Other patches exist. Not downloading EEE patch\n");
3854 /* tp->lock is held. */
3855 static void tg3_load_57766_firmware(struct tg3 *tp)
3857 struct tg3_firmware_hdr *fw_hdr;
3859 if (!tg3_flag(tp, NO_NVRAM))
3862 if (tg3_validate_rxcpu_state(tp))
3868 /* This firmware blob has a different format than older firmware
3869 * releases as given below. The main difference is we have fragmented
3870 * data to be written to non-contiguous locations.
3872 * In the beginning we have a firmware header identical to other
3873 * firmware which consists of version, base addr and length. The length
3874 * here is unused and set to 0xffffffff.
3876 * This is followed by a series of firmware fragments which are
3877 * individually identical to previous firmware. i.e. they have the
3878 * firmware header and followed by data for that fragment. The version
3879 * field of the individual fragment header is unused.
3882 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3883 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3886 if (tg3_rxcpu_pause(tp))
3889 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3890 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3892 tg3_rxcpu_resume(tp);
3895 /* tp->lock is held. */
3896 static int tg3_load_tso_firmware(struct tg3 *tp)
3898 const struct tg3_firmware_hdr *fw_hdr;
3899 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3902 if (!tg3_flag(tp, FW_TSO))
3905 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3907 /* Firmware blob starts with version numbers, followed by
3908 start address and length. We are setting complete length.
3909 length = end_address_of_bss - start_address_of_text.
3910 Remainder is the blob to be loaded contiguously
3911 from start address. */
3913 cpu_scratch_size = tp->fw_len;
3915 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3916 cpu_base = RX_CPU_BASE;
3917 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3919 cpu_base = TX_CPU_BASE;
3920 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3921 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3924 err = tg3_load_firmware_cpu(tp, cpu_base,
3925 cpu_scratch_base, cpu_scratch_size,
3930 /* Now startup the cpu. */
3931 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3932 be32_to_cpu(fw_hdr->base_addr));
3935 "%s fails to set CPU PC, is %08x should be %08x\n",
3936 __func__, tr32(cpu_base + CPU_PC),
3937 be32_to_cpu(fw_hdr->base_addr));
3941 tg3_resume_cpu(tp, cpu_base);
3945 /* tp->lock is held. */
3946 static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr,
3949 u32 addr_high, addr_low;
3951 addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3952 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3953 (mac_addr[4] << 8) | mac_addr[5]);
3956 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3957 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3960 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3961 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3965 /* tp->lock is held. */
3966 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3971 for (i = 0; i < 4; i++) {
3972 if (i == 1 && skip_mac_1)
3974 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3977 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3978 tg3_asic_rev(tp) == ASIC_REV_5704) {
3979 for (i = 4; i < 16; i++)
3980 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3983 addr_high = (tp->dev->dev_addr[0] +
3984 tp->dev->dev_addr[1] +
3985 tp->dev->dev_addr[2] +
3986 tp->dev->dev_addr[3] +
3987 tp->dev->dev_addr[4] +
3988 tp->dev->dev_addr[5]) &
3989 TX_BACKOFF_SEED_MASK;
3990 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3993 static void tg3_enable_register_access(struct tg3 *tp)
3996 * Make sure register accesses (indirect or otherwise) will function
3999 pci_write_config_dword(tp->pdev,
4000 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4003 static int tg3_power_up(struct tg3 *tp)
4007 tg3_enable_register_access(tp);
4009 err = pci_set_power_state(tp->pdev, PCI_D0);
4011 /* Switch out of Vaux if it is a NIC */
4012 tg3_pwrsrc_switch_to_vmain(tp);
4014 netdev_err(tp->dev, "Transition to D0 failed\n");
4020 static int tg3_setup_phy(struct tg3 *, bool);
4022 static int tg3_power_down_prepare(struct tg3 *tp)
4025 bool device_should_wake, do_low_power;
4027 tg3_enable_register_access(tp);
4029 /* Restore the CLKREQ setting. */
4030 if (tg3_flag(tp, CLKREQ_BUG))
4031 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4032 PCI_EXP_LNKCTL_CLKREQ_EN);
4034 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4035 tw32(TG3PCI_MISC_HOST_CTRL,
4036 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4038 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4039 tg3_flag(tp, WOL_ENABLE);
4041 if (tg3_flag(tp, USE_PHYLIB)) {
4042 do_low_power = false;
4043 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4044 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4045 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4046 struct phy_device *phydev;
4049 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4051 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4053 tp->link_config.speed = phydev->speed;
4054 tp->link_config.duplex = phydev->duplex;
4055 tp->link_config.autoneg = phydev->autoneg;
4056 ethtool_convert_link_mode_to_legacy_u32(
4057 &tp->link_config.advertising,
4058 phydev->advertising);
4060 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4061 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4063 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4065 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4068 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4069 if (tg3_flag(tp, WOL_SPEED_100MB)) {
4070 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4072 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4074 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4077 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4082 linkmode_copy(phydev->advertising, advertising);
4083 phy_start_aneg(phydev);
4085 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4086 if (phyid != PHY_ID_BCMAC131) {
4087 phyid &= PHY_BCM_OUI_MASK;
4088 if (phyid == PHY_BCM_OUI_1 ||
4089 phyid == PHY_BCM_OUI_2 ||
4090 phyid == PHY_BCM_OUI_3)
4091 do_low_power = true;
4095 do_low_power = true;
4097 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4098 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4100 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4101 tg3_setup_phy(tp, false);
4104 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4107 val = tr32(GRC_VCPU_EXT_CTRL);
4108 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4109 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4113 for (i = 0; i < 200; i++) {
4114 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4115 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4120 if (tg3_flag(tp, WOL_CAP))
4121 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4122 WOL_DRV_STATE_SHUTDOWN |
4126 if (device_should_wake) {
4129 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4131 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4132 tg3_phy_auxctl_write(tp,
4133 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4134 MII_TG3_AUXCTL_PCTL_WOL_EN |
4135 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4136 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4140 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4141 mac_mode = MAC_MODE_PORT_MODE_GMII;
4142 else if (tp->phy_flags &
4143 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4144 if (tp->link_config.active_speed == SPEED_1000)
4145 mac_mode = MAC_MODE_PORT_MODE_GMII;
4147 mac_mode = MAC_MODE_PORT_MODE_MII;
4149 mac_mode = MAC_MODE_PORT_MODE_MII;
4151 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4152 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4153 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4154 SPEED_100 : SPEED_10;
4155 if (tg3_5700_link_polarity(tp, speed))
4156 mac_mode |= MAC_MODE_LINK_POLARITY;
4158 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4161 mac_mode = MAC_MODE_PORT_MODE_TBI;
4164 if (!tg3_flag(tp, 5750_PLUS))
4165 tw32(MAC_LED_CTRL, tp->led_ctrl);
4167 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4168 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4169 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4170 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4172 if (tg3_flag(tp, ENABLE_APE))
4173 mac_mode |= MAC_MODE_APE_TX_EN |
4174 MAC_MODE_APE_RX_EN |
4175 MAC_MODE_TDE_ENABLE;
4177 tw32_f(MAC_MODE, mac_mode);
4180 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4184 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4185 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4186 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4189 base_val = tp->pci_clock_ctrl;
4190 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4191 CLOCK_CTRL_TXCLK_DISABLE);
4193 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4194 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4195 } else if (tg3_flag(tp, 5780_CLASS) ||
4196 tg3_flag(tp, CPMU_PRESENT) ||
4197 tg3_asic_rev(tp) == ASIC_REV_5906) {
4199 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4200 u32 newbits1, newbits2;
4202 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4203 tg3_asic_rev(tp) == ASIC_REV_5701) {
4204 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4205 CLOCK_CTRL_TXCLK_DISABLE |
4207 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4208 } else if (tg3_flag(tp, 5705_PLUS)) {
4209 newbits1 = CLOCK_CTRL_625_CORE;
4210 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4212 newbits1 = CLOCK_CTRL_ALTCLK;
4213 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4216 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4219 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4222 if (!tg3_flag(tp, 5705_PLUS)) {
4225 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4226 tg3_asic_rev(tp) == ASIC_REV_5701) {
4227 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4228 CLOCK_CTRL_TXCLK_DISABLE |
4229 CLOCK_CTRL_44MHZ_CORE);
4231 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4234 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4235 tp->pci_clock_ctrl | newbits3, 40);
4239 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4240 tg3_power_down_phy(tp, do_low_power);
4242 tg3_frob_aux_power(tp, true);
4244 /* Workaround for unstable PLL clock */
4245 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4246 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4247 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4248 u32 val = tr32(0x7d00);
4250 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4252 if (!tg3_flag(tp, ENABLE_ASF)) {
4255 err = tg3_nvram_lock(tp);
4256 tg3_halt_cpu(tp, RX_CPU_BASE);
4258 tg3_nvram_unlock(tp);
4262 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4264 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4269 static void tg3_power_down(struct tg3 *tp)
4271 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4272 pci_set_power_state(tp->pdev, PCI_D3hot);
4275 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4277 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4278 case MII_TG3_AUX_STAT_10HALF:
4280 *duplex = DUPLEX_HALF;
4283 case MII_TG3_AUX_STAT_10FULL:
4285 *duplex = DUPLEX_FULL;
4288 case MII_TG3_AUX_STAT_100HALF:
4290 *duplex = DUPLEX_HALF;
4293 case MII_TG3_AUX_STAT_100FULL:
4295 *duplex = DUPLEX_FULL;
4298 case MII_TG3_AUX_STAT_1000HALF:
4299 *speed = SPEED_1000;
4300 *duplex = DUPLEX_HALF;
4303 case MII_TG3_AUX_STAT_1000FULL:
4304 *speed = SPEED_1000;
4305 *duplex = DUPLEX_FULL;
4309 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4310 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4312 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4316 *speed = SPEED_UNKNOWN;
4317 *duplex = DUPLEX_UNKNOWN;
4322 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4327 new_adv = ADVERTISE_CSMA;
4328 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4329 new_adv |= mii_advertise_flowctrl(flowctrl);
4331 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4335 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4336 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4338 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4339 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4340 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4342 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4347 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4350 tw32(TG3_CPMU_EEE_MODE,
4351 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4353 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4358 /* Advertise 100-BaseTX EEE ability */
4359 if (advertise & ADVERTISED_100baseT_Full)
4360 val |= MDIO_AN_EEE_ADV_100TX;
4361 /* Advertise 1000-BaseT EEE ability */
4362 if (advertise & ADVERTISED_1000baseT_Full)
4363 val |= MDIO_AN_EEE_ADV_1000T;
4365 if (!tp->eee.eee_enabled) {
4367 tp->eee.advertised = 0;
4369 tp->eee.advertised = advertise &
4370 (ADVERTISED_100baseT_Full |
4371 ADVERTISED_1000baseT_Full);
4374 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4378 switch (tg3_asic_rev(tp)) {
4380 case ASIC_REV_57765:
4381 case ASIC_REV_57766:
4383 /* If we advertised any eee advertisements above... */
4385 val = MII_TG3_DSP_TAP26_ALNOKO |
4386 MII_TG3_DSP_TAP26_RMRXSTO |
4387 MII_TG3_DSP_TAP26_OPCSINPT;
4388 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4392 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4393 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4394 MII_TG3_DSP_CH34TP2_HIBW01);
4397 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4406 static void tg3_phy_copper_begin(struct tg3 *tp)
4408 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4409 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4412 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4413 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4414 adv = ADVERTISED_10baseT_Half |
4415 ADVERTISED_10baseT_Full;
4416 if (tg3_flag(tp, WOL_SPEED_100MB))
4417 adv |= ADVERTISED_100baseT_Half |
4418 ADVERTISED_100baseT_Full;
4419 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4420 if (!(tp->phy_flags &
4421 TG3_PHYFLG_DISABLE_1G_HD_ADV))
4422 adv |= ADVERTISED_1000baseT_Half;
4423 adv |= ADVERTISED_1000baseT_Full;
4426 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4428 adv = tp->link_config.advertising;
4429 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4430 adv &= ~(ADVERTISED_1000baseT_Half |
4431 ADVERTISED_1000baseT_Full);
4433 fc = tp->link_config.flowctrl;
4436 tg3_phy_autoneg_cfg(tp, adv, fc);
4438 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4439 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4440 /* Normally during power down we want to autonegotiate
4441 * the lowest possible speed for WOL. However, to avoid
4442 * link flap, we leave it untouched.
4447 tg3_writephy(tp, MII_BMCR,
4448 BMCR_ANENABLE | BMCR_ANRESTART);
4451 u32 bmcr, orig_bmcr;
4453 tp->link_config.active_speed = tp->link_config.speed;
4454 tp->link_config.active_duplex = tp->link_config.duplex;
4456 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4457 /* With autoneg disabled, 5715 only links up when the
4458 * advertisement register has the configured speed
4461 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4465 switch (tp->link_config.speed) {
4471 bmcr |= BMCR_SPEED100;
4475 bmcr |= BMCR_SPEED1000;
4479 if (tp->link_config.duplex == DUPLEX_FULL)
4480 bmcr |= BMCR_FULLDPLX;
4482 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4483 (bmcr != orig_bmcr)) {
4484 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4485 for (i = 0; i < 1500; i++) {
4489 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4490 tg3_readphy(tp, MII_BMSR, &tmp))
4492 if (!(tmp & BMSR_LSTATUS)) {
4497 tg3_writephy(tp, MII_BMCR, bmcr);
4503 static int tg3_phy_pull_config(struct tg3 *tp)
4508 err = tg3_readphy(tp, MII_BMCR, &val);
4512 if (!(val & BMCR_ANENABLE)) {
4513 tp->link_config.autoneg = AUTONEG_DISABLE;
4514 tp->link_config.advertising = 0;
4515 tg3_flag_clear(tp, PAUSE_AUTONEG);
4519 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4521 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4524 tp->link_config.speed = SPEED_10;
4527 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4530 tp->link_config.speed = SPEED_100;
4532 case BMCR_SPEED1000:
4533 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4534 tp->link_config.speed = SPEED_1000;
4542 if (val & BMCR_FULLDPLX)
4543 tp->link_config.duplex = DUPLEX_FULL;
4545 tp->link_config.duplex = DUPLEX_HALF;
4547 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4553 tp->link_config.autoneg = AUTONEG_ENABLE;
4554 tp->link_config.advertising = ADVERTISED_Autoneg;
4555 tg3_flag_set(tp, PAUSE_AUTONEG);
4557 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4560 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4564 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4565 tp->link_config.advertising |= adv | ADVERTISED_TP;
4567 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4569 tp->link_config.advertising |= ADVERTISED_FIBRE;
4572 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4575 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4576 err = tg3_readphy(tp, MII_CTRL1000, &val);
4580 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4582 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4586 adv = tg3_decode_flowctrl_1000X(val);
4587 tp->link_config.flowctrl = adv;
4589 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4590 adv = mii_adv_to_ethtool_adv_x(val);
4593 tp->link_config.advertising |= adv;
4600 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4604 /* Turn off tap power management. */
4605 /* Set Extended packet length bit */
4606 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4608 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4609 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4610 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4611 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4612 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4619 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4621 struct ethtool_eee eee;
4623 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4626 tg3_eee_pull_config(tp, &eee);
4628 if (tp->eee.eee_enabled) {
4629 if (tp->eee.advertised != eee.advertised ||
4630 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4631 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4634 /* EEE is disabled but we're advertising */
4642 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4644 u32 advmsk, tgtadv, advertising;
4646 advertising = tp->link_config.advertising;
4647 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4649 advmsk = ADVERTISE_ALL;
4650 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4651 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4652 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4655 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4658 if ((*lcladv & advmsk) != tgtadv)
4661 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4664 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4666 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4670 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4671 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4672 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4673 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4674 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4676 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4679 if (tg3_ctrl != tgtadv)
4686 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4690 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4693 if (tg3_readphy(tp, MII_STAT1000, &val))
4696 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4699 if (tg3_readphy(tp, MII_LPA, rmtadv))
4702 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4703 tp->link_config.rmt_adv = lpeth;
4708 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4710 if (curr_link_up != tp->link_up) {
4712 netif_carrier_on(tp->dev);
4714 netif_carrier_off(tp->dev);
4715 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4716 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4719 tg3_link_report(tp);
4726 static void tg3_clear_mac_status(struct tg3 *tp)
4731 MAC_STATUS_SYNC_CHANGED |
4732 MAC_STATUS_CFG_CHANGED |
4733 MAC_STATUS_MI_COMPLETION |
4734 MAC_STATUS_LNKSTATE_CHANGED);
4738 static void tg3_setup_eee(struct tg3 *tp)
4742 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4743 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4744 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4745 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4747 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4749 tw32_f(TG3_CPMU_EEE_CTRL,
4750 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4752 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4753 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4754 TG3_CPMU_EEEMD_LPI_IN_RX |
4755 TG3_CPMU_EEEMD_EEE_ENABLE;
4757 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4758 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4760 if (tg3_flag(tp, ENABLE_APE))
4761 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4763 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4765 tw32_f(TG3_CPMU_EEE_DBTMR1,
4766 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4767 (tp->eee.tx_lpi_timer & 0xffff));
4769 tw32_f(TG3_CPMU_EEE_DBTMR2,
4770 TG3_CPMU_DBTMR2_APE_TX_2047US |
4771 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4774 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4776 bool current_link_up;
4778 u32 lcl_adv, rmt_adv;
4783 tg3_clear_mac_status(tp);
4785 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4787 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4791 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4793 /* Some third-party PHYs need to be reset on link going
4796 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4797 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4798 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4800 tg3_readphy(tp, MII_BMSR, &bmsr);
4801 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4802 !(bmsr & BMSR_LSTATUS))
4808 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4809 tg3_readphy(tp, MII_BMSR, &bmsr);
4810 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4811 !tg3_flag(tp, INIT_COMPLETE))
4814 if (!(bmsr & BMSR_LSTATUS)) {
4815 err = tg3_init_5401phy_dsp(tp);
4819 tg3_readphy(tp, MII_BMSR, &bmsr);
4820 for (i = 0; i < 1000; i++) {
4822 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4823 (bmsr & BMSR_LSTATUS)) {
4829 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4830 TG3_PHY_REV_BCM5401_B0 &&
4831 !(bmsr & BMSR_LSTATUS) &&
4832 tp->link_config.active_speed == SPEED_1000) {
4833 err = tg3_phy_reset(tp);
4835 err = tg3_init_5401phy_dsp(tp);
4840 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4841 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4842 /* 5701 {A0,B0} CRC bug workaround */
4843 tg3_writephy(tp, 0x15, 0x0a75);
4844 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4845 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4846 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4849 /* Clear pending interrupts... */
4850 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4851 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4853 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4854 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4855 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4856 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4858 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4859 tg3_asic_rev(tp) == ASIC_REV_5701) {
4860 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4861 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4862 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4864 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4867 current_link_up = false;
4868 current_speed = SPEED_UNKNOWN;
4869 current_duplex = DUPLEX_UNKNOWN;
4870 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4871 tp->link_config.rmt_adv = 0;
4873 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4874 err = tg3_phy_auxctl_read(tp,
4875 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4877 if (!err && !(val & (1 << 10))) {
4878 tg3_phy_auxctl_write(tp,
4879 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4886 for (i = 0; i < 100; i++) {
4887 tg3_readphy(tp, MII_BMSR, &bmsr);
4888 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4889 (bmsr & BMSR_LSTATUS))
4894 if (bmsr & BMSR_LSTATUS) {
4897 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4898 for (i = 0; i < 2000; i++) {
4900 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4905 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4910 for (i = 0; i < 200; i++) {
4911 tg3_readphy(tp, MII_BMCR, &bmcr);
4912 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4914 if (bmcr && bmcr != 0x7fff)
4922 tp->link_config.active_speed = current_speed;
4923 tp->link_config.active_duplex = current_duplex;
4925 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4926 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4928 if ((bmcr & BMCR_ANENABLE) &&
4930 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4931 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4932 current_link_up = true;
4934 /* EEE settings changes take effect only after a phy
4935 * reset. If we have skipped a reset due to Link Flap
4936 * Avoidance being enabled, do it now.
4938 if (!eee_config_ok &&
4939 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4945 if (!(bmcr & BMCR_ANENABLE) &&
4946 tp->link_config.speed == current_speed &&
4947 tp->link_config.duplex == current_duplex) {
4948 current_link_up = true;
4952 if (current_link_up &&
4953 tp->link_config.active_duplex == DUPLEX_FULL) {
4956 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4957 reg = MII_TG3_FET_GEN_STAT;
4958 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4960 reg = MII_TG3_EXT_STAT;
4961 bit = MII_TG3_EXT_STAT_MDIX;
4964 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4965 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4967 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4972 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4973 tg3_phy_copper_begin(tp);
4975 if (tg3_flag(tp, ROBOSWITCH)) {
4976 current_link_up = true;
4977 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4978 current_speed = SPEED_1000;
4979 current_duplex = DUPLEX_FULL;
4980 tp->link_config.active_speed = current_speed;
4981 tp->link_config.active_duplex = current_duplex;
4984 tg3_readphy(tp, MII_BMSR, &bmsr);
4985 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4986 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4987 current_link_up = true;
4990 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4991 if (current_link_up) {
4992 if (tp->link_config.active_speed == SPEED_100 ||
4993 tp->link_config.active_speed == SPEED_10)
4994 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4996 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4997 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4998 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5000 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5002 /* In order for the 5750 core in BCM4785 chip to work properly
5003 * in RGMII mode, the Led Control Register must be set up.
5005 if (tg3_flag(tp, RGMII_MODE)) {
5006 u32 led_ctrl = tr32(MAC_LED_CTRL);
5007 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5009 if (tp->link_config.active_speed == SPEED_10)
5010 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5011 else if (tp->link_config.active_speed == SPEED_100)
5012 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5013 LED_CTRL_100MBPS_ON);
5014 else if (tp->link_config.active_speed == SPEED_1000)
5015 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5016 LED_CTRL_1000MBPS_ON);
5018 tw32(MAC_LED_CTRL, led_ctrl);
5022 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5023 if (tp->link_config.active_duplex == DUPLEX_HALF)
5024 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5026 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5027 if (current_link_up &&
5028 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5029 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5031 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5034 /* ??? Without this setting Netgear GA302T PHY does not
5035 * ??? send/receive packets...
5037 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5038 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5039 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5040 tw32_f(MAC_MI_MODE, tp->mi_mode);
5044 tw32_f(MAC_MODE, tp->mac_mode);
5047 tg3_phy_eee_adjust(tp, current_link_up);
5049 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5050 /* Polled via timer. */
5051 tw32_f(MAC_EVENT, 0);
5053 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5057 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5059 tp->link_config.active_speed == SPEED_1000 &&
5060 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5063 (MAC_STATUS_SYNC_CHANGED |
5064 MAC_STATUS_CFG_CHANGED));
5067 NIC_SRAM_FIRMWARE_MBOX,
5068 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5071 /* Prevent send BD corruption. */
5072 if (tg3_flag(tp, CLKREQ_BUG)) {
5073 if (tp->link_config.active_speed == SPEED_100 ||
5074 tp->link_config.active_speed == SPEED_10)
5075 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5076 PCI_EXP_LNKCTL_CLKREQ_EN);
5078 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5079 PCI_EXP_LNKCTL_CLKREQ_EN);
5082 tg3_test_and_report_link_chg(tp, current_link_up);
5087 struct tg3_fiber_aneginfo {
5089 #define ANEG_STATE_UNKNOWN 0
5090 #define ANEG_STATE_AN_ENABLE 1
5091 #define ANEG_STATE_RESTART_INIT 2
5092 #define ANEG_STATE_RESTART 3
5093 #define ANEG_STATE_DISABLE_LINK_OK 4
5094 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5095 #define ANEG_STATE_ABILITY_DETECT 6
5096 #define ANEG_STATE_ACK_DETECT_INIT 7
5097 #define ANEG_STATE_ACK_DETECT 8
5098 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5099 #define ANEG_STATE_COMPLETE_ACK 10
5100 #define ANEG_STATE_IDLE_DETECT_INIT 11
5101 #define ANEG_STATE_IDLE_DETECT 12
5102 #define ANEG_STATE_LINK_OK 13
5103 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5104 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5107 #define MR_AN_ENABLE 0x00000001
5108 #define MR_RESTART_AN 0x00000002
5109 #define MR_AN_COMPLETE 0x00000004
5110 #define MR_PAGE_RX 0x00000008
5111 #define MR_NP_LOADED 0x00000010
5112 #define MR_TOGGLE_TX 0x00000020
5113 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5114 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5115 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5116 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5117 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5118 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5119 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5120 #define MR_TOGGLE_RX 0x00002000
5121 #define MR_NP_RX 0x00004000
5123 #define MR_LINK_OK 0x80000000
5125 unsigned long link_time, cur_time;
5127 u32 ability_match_cfg;
5128 int ability_match_count;
5130 char ability_match, idle_match, ack_match;
5132 u32 txconfig, rxconfig;
5133 #define ANEG_CFG_NP 0x00000080
5134 #define ANEG_CFG_ACK 0x00000040
5135 #define ANEG_CFG_RF2 0x00000020
5136 #define ANEG_CFG_RF1 0x00000010
5137 #define ANEG_CFG_PS2 0x00000001
5138 #define ANEG_CFG_PS1 0x00008000
5139 #define ANEG_CFG_HD 0x00004000
5140 #define ANEG_CFG_FD 0x00002000
5141 #define ANEG_CFG_INVAL 0x00001f06
5146 #define ANEG_TIMER_ENAB 2
5147 #define ANEG_FAILED -1
5149 #define ANEG_STATE_SETTLE_TIME 10000
5151 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5152 struct tg3_fiber_aneginfo *ap)
5155 unsigned long delta;
5159 if (ap->state == ANEG_STATE_UNKNOWN) {
5163 ap->ability_match_cfg = 0;
5164 ap->ability_match_count = 0;
5165 ap->ability_match = 0;
5171 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5172 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5174 if (rx_cfg_reg != ap->ability_match_cfg) {
5175 ap->ability_match_cfg = rx_cfg_reg;
5176 ap->ability_match = 0;
5177 ap->ability_match_count = 0;
5179 if (++ap->ability_match_count > 1) {
5180 ap->ability_match = 1;
5181 ap->ability_match_cfg = rx_cfg_reg;
5184 if (rx_cfg_reg & ANEG_CFG_ACK)
5192 ap->ability_match_cfg = 0;
5193 ap->ability_match_count = 0;
5194 ap->ability_match = 0;
5200 ap->rxconfig = rx_cfg_reg;
5203 switch (ap->state) {
5204 case ANEG_STATE_UNKNOWN:
5205 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5206 ap->state = ANEG_STATE_AN_ENABLE;
5209 case ANEG_STATE_AN_ENABLE:
5210 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5211 if (ap->flags & MR_AN_ENABLE) {
5214 ap->ability_match_cfg = 0;
5215 ap->ability_match_count = 0;
5216 ap->ability_match = 0;
5220 ap->state = ANEG_STATE_RESTART_INIT;
5222 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5226 case ANEG_STATE_RESTART_INIT:
5227 ap->link_time = ap->cur_time;
5228 ap->flags &= ~(MR_NP_LOADED);
5230 tw32(MAC_TX_AUTO_NEG, 0);
5231 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5232 tw32_f(MAC_MODE, tp->mac_mode);
5235 ret = ANEG_TIMER_ENAB;
5236 ap->state = ANEG_STATE_RESTART;
5239 case ANEG_STATE_RESTART:
5240 delta = ap->cur_time - ap->link_time;
5241 if (delta > ANEG_STATE_SETTLE_TIME)
5242 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5244 ret = ANEG_TIMER_ENAB;
5247 case ANEG_STATE_DISABLE_LINK_OK:
5251 case ANEG_STATE_ABILITY_DETECT_INIT:
5252 ap->flags &= ~(MR_TOGGLE_TX);
5253 ap->txconfig = ANEG_CFG_FD;
5254 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5255 if (flowctrl & ADVERTISE_1000XPAUSE)
5256 ap->txconfig |= ANEG_CFG_PS1;
5257 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5258 ap->txconfig |= ANEG_CFG_PS2;
5259 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5260 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5261 tw32_f(MAC_MODE, tp->mac_mode);
5264 ap->state = ANEG_STATE_ABILITY_DETECT;
5267 case ANEG_STATE_ABILITY_DETECT:
5268 if (ap->ability_match != 0 && ap->rxconfig != 0)
5269 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5272 case ANEG_STATE_ACK_DETECT_INIT:
5273 ap->txconfig |= ANEG_CFG_ACK;
5274 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5275 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5276 tw32_f(MAC_MODE, tp->mac_mode);
5279 ap->state = ANEG_STATE_ACK_DETECT;
5282 case ANEG_STATE_ACK_DETECT:
5283 if (ap->ack_match != 0) {
5284 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5285 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5286 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5288 ap->state = ANEG_STATE_AN_ENABLE;
5290 } else if (ap->ability_match != 0 &&
5291 ap->rxconfig == 0) {
5292 ap->state = ANEG_STATE_AN_ENABLE;
5296 case ANEG_STATE_COMPLETE_ACK_INIT:
5297 if (ap->rxconfig & ANEG_CFG_INVAL) {
5301 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5302 MR_LP_ADV_HALF_DUPLEX |
5303 MR_LP_ADV_SYM_PAUSE |
5304 MR_LP_ADV_ASYM_PAUSE |
5305 MR_LP_ADV_REMOTE_FAULT1 |
5306 MR_LP_ADV_REMOTE_FAULT2 |
5307 MR_LP_ADV_NEXT_PAGE |
5310 if (ap->rxconfig & ANEG_CFG_FD)
5311 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5312 if (ap->rxconfig & ANEG_CFG_HD)
5313 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5314 if (ap->rxconfig & ANEG_CFG_PS1)
5315 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5316 if (ap->rxconfig & ANEG_CFG_PS2)
5317 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5318 if (ap->rxconfig & ANEG_CFG_RF1)
5319 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5320 if (ap->rxconfig & ANEG_CFG_RF2)
5321 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5322 if (ap->rxconfig & ANEG_CFG_NP)
5323 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5325 ap->link_time = ap->cur_time;
5327 ap->flags ^= (MR_TOGGLE_TX);
5328 if (ap->rxconfig & 0x0008)
5329 ap->flags |= MR_TOGGLE_RX;
5330 if (ap->rxconfig & ANEG_CFG_NP)
5331 ap->flags |= MR_NP_RX;
5332 ap->flags |= MR_PAGE_RX;
5334 ap->state = ANEG_STATE_COMPLETE_ACK;
5335 ret = ANEG_TIMER_ENAB;
5338 case ANEG_STATE_COMPLETE_ACK:
5339 if (ap->ability_match != 0 &&
5340 ap->rxconfig == 0) {
5341 ap->state = ANEG_STATE_AN_ENABLE;
5344 delta = ap->cur_time - ap->link_time;
5345 if (delta > ANEG_STATE_SETTLE_TIME) {
5346 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5347 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5349 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5350 !(ap->flags & MR_NP_RX)) {
5351 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5359 case ANEG_STATE_IDLE_DETECT_INIT:
5360 ap->link_time = ap->cur_time;
5361 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5362 tw32_f(MAC_MODE, tp->mac_mode);
5365 ap->state = ANEG_STATE_IDLE_DETECT;
5366 ret = ANEG_TIMER_ENAB;
5369 case ANEG_STATE_IDLE_DETECT:
5370 if (ap->ability_match != 0 &&
5371 ap->rxconfig == 0) {
5372 ap->state = ANEG_STATE_AN_ENABLE;
5375 delta = ap->cur_time - ap->link_time;
5376 if (delta > ANEG_STATE_SETTLE_TIME) {
5377 /* XXX another gem from the Broadcom driver :( */
5378 ap->state = ANEG_STATE_LINK_OK;
5382 case ANEG_STATE_LINK_OK:
5383 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5387 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5388 /* ??? unimplemented */
5391 case ANEG_STATE_NEXT_PAGE_WAIT:
5392 /* ??? unimplemented */
5403 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5406 struct tg3_fiber_aneginfo aninfo;
5407 int status = ANEG_FAILED;
5411 tw32_f(MAC_TX_AUTO_NEG, 0);
5413 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5414 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5417 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5420 memset(&aninfo, 0, sizeof(aninfo));
5421 aninfo.flags |= MR_AN_ENABLE;
5422 aninfo.state = ANEG_STATE_UNKNOWN;
5423 aninfo.cur_time = 0;
5425 while (++tick < 195000) {
5426 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5427 if (status == ANEG_DONE || status == ANEG_FAILED)
5433 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5434 tw32_f(MAC_MODE, tp->mac_mode);
5437 *txflags = aninfo.txconfig;
5438 *rxflags = aninfo.flags;
5440 if (status == ANEG_DONE &&
5441 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5442 MR_LP_ADV_FULL_DUPLEX)))
5448 static void tg3_init_bcm8002(struct tg3 *tp)
5450 u32 mac_status = tr32(MAC_STATUS);
5453 /* Reset when initting first time or we have a link. */
5454 if (tg3_flag(tp, INIT_COMPLETE) &&
5455 !(mac_status & MAC_STATUS_PCS_SYNCED))
5458 /* Set PLL lock range. */
5459 tg3_writephy(tp, 0x16, 0x8007);
5462 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5464 /* Wait for reset to complete. */
5465 /* XXX schedule_timeout() ... */
5466 for (i = 0; i < 500; i++)
5469 /* Config mode; select PMA/Ch 1 regs. */
5470 tg3_writephy(tp, 0x10, 0x8411);
5472 /* Enable auto-lock and comdet, select txclk for tx. */
5473 tg3_writephy(tp, 0x11, 0x0a10);
5475 tg3_writephy(tp, 0x18, 0x00a0);
5476 tg3_writephy(tp, 0x16, 0x41ff);
5478 /* Assert and deassert POR. */
5479 tg3_writephy(tp, 0x13, 0x0400);
5481 tg3_writephy(tp, 0x13, 0x0000);
5483 tg3_writephy(tp, 0x11, 0x0a50);
5485 tg3_writephy(tp, 0x11, 0x0a10);
5487 /* Wait for signal to stabilize */
5488 /* XXX schedule_timeout() ... */
5489 for (i = 0; i < 15000; i++)
5492 /* Deselect the channel register so we can read the PHYID
5495 tg3_writephy(tp, 0x10, 0x8011);
5498 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5501 bool current_link_up;
5502 u32 sg_dig_ctrl, sg_dig_status;
5503 u32 serdes_cfg, expected_sg_dig_ctrl;
5504 int workaround, port_a;
5509 current_link_up = false;
5511 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5512 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5514 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5517 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5518 /* preserve bits 20-23 for voltage regulator */
5519 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5522 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5524 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5525 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5527 u32 val = serdes_cfg;
5533 tw32_f(MAC_SERDES_CFG, val);
5536 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5538 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5539 tg3_setup_flow_control(tp, 0, 0);
5540 current_link_up = true;
5545 /* Want auto-negotiation. */
5546 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5548 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5549 if (flowctrl & ADVERTISE_1000XPAUSE)
5550 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5551 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5552 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5554 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5555 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5556 tp->serdes_counter &&
5557 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5558 MAC_STATUS_RCVD_CFG)) ==
5559 MAC_STATUS_PCS_SYNCED)) {
5560 tp->serdes_counter--;
5561 current_link_up = true;
5566 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5567 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5569 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5571 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5572 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5573 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5574 MAC_STATUS_SIGNAL_DET)) {
5575 sg_dig_status = tr32(SG_DIG_STATUS);
5576 mac_status = tr32(MAC_STATUS);
5578 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5579 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5580 u32 local_adv = 0, remote_adv = 0;
5582 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5583 local_adv |= ADVERTISE_1000XPAUSE;
5584 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5585 local_adv |= ADVERTISE_1000XPSE_ASYM;
5587 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5588 remote_adv |= LPA_1000XPAUSE;
5589 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5590 remote_adv |= LPA_1000XPAUSE_ASYM;
5592 tp->link_config.rmt_adv =
5593 mii_adv_to_ethtool_adv_x(remote_adv);
5595 tg3_setup_flow_control(tp, local_adv, remote_adv);
5596 current_link_up = true;
5597 tp->serdes_counter = 0;
5598 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5599 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5600 if (tp->serdes_counter)
5601 tp->serdes_counter--;
5604 u32 val = serdes_cfg;
5611 tw32_f(MAC_SERDES_CFG, val);
5614 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5617 /* Link parallel detection - link is up */
5618 /* only if we have PCS_SYNC and not */
5619 /* receiving config code words */
5620 mac_status = tr32(MAC_STATUS);
5621 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5622 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5623 tg3_setup_flow_control(tp, 0, 0);
5624 current_link_up = true;
5626 TG3_PHYFLG_PARALLEL_DETECT;
5627 tp->serdes_counter =
5628 SERDES_PARALLEL_DET_TIMEOUT;
5630 goto restart_autoneg;
5634 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5635 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5639 return current_link_up;
5642 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5644 bool current_link_up = false;
5646 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5649 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5650 u32 txflags, rxflags;
5653 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5654 u32 local_adv = 0, remote_adv = 0;
5656 if (txflags & ANEG_CFG_PS1)
5657 local_adv |= ADVERTISE_1000XPAUSE;
5658 if (txflags & ANEG_CFG_PS2)
5659 local_adv |= ADVERTISE_1000XPSE_ASYM;
5661 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5662 remote_adv |= LPA_1000XPAUSE;
5663 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5664 remote_adv |= LPA_1000XPAUSE_ASYM;
5666 tp->link_config.rmt_adv =
5667 mii_adv_to_ethtool_adv_x(remote_adv);
5669 tg3_setup_flow_control(tp, local_adv, remote_adv);
5671 current_link_up = true;
5673 for (i = 0; i < 30; i++) {
5676 (MAC_STATUS_SYNC_CHANGED |
5677 MAC_STATUS_CFG_CHANGED));
5679 if ((tr32(MAC_STATUS) &
5680 (MAC_STATUS_SYNC_CHANGED |
5681 MAC_STATUS_CFG_CHANGED)) == 0)
5685 mac_status = tr32(MAC_STATUS);
5686 if (!current_link_up &&
5687 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5688 !(mac_status & MAC_STATUS_RCVD_CFG))
5689 current_link_up = true;
5691 tg3_setup_flow_control(tp, 0, 0);
5693 /* Forcing 1000FD link up. */
5694 current_link_up = true;
5696 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5699 tw32_f(MAC_MODE, tp->mac_mode);
5704 return current_link_up;
5707 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5710 u32 orig_active_speed;
5711 u8 orig_active_duplex;
5713 bool current_link_up;
5716 orig_pause_cfg = tp->link_config.active_flowctrl;
5717 orig_active_speed = tp->link_config.active_speed;
5718 orig_active_duplex = tp->link_config.active_duplex;
5720 if (!tg3_flag(tp, HW_AUTONEG) &&
5722 tg3_flag(tp, INIT_COMPLETE)) {
5723 mac_status = tr32(MAC_STATUS);
5724 mac_status &= (MAC_STATUS_PCS_SYNCED |
5725 MAC_STATUS_SIGNAL_DET |
5726 MAC_STATUS_CFG_CHANGED |
5727 MAC_STATUS_RCVD_CFG);
5728 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5729 MAC_STATUS_SIGNAL_DET)) {
5730 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5731 MAC_STATUS_CFG_CHANGED));
5736 tw32_f(MAC_TX_AUTO_NEG, 0);
5738 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5739 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5740 tw32_f(MAC_MODE, tp->mac_mode);
5743 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5744 tg3_init_bcm8002(tp);
5746 /* Enable link change event even when serdes polling. */
5747 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5750 tp->link_config.rmt_adv = 0;
5751 mac_status = tr32(MAC_STATUS);
5753 if (tg3_flag(tp, HW_AUTONEG))
5754 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5756 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5758 tp->napi[0].hw_status->status =
5759 (SD_STATUS_UPDATED |
5760 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5762 for (i = 0; i < 100; i++) {
5763 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5764 MAC_STATUS_CFG_CHANGED));
5766 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5767 MAC_STATUS_CFG_CHANGED |
5768 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5772 mac_status = tr32(MAC_STATUS);
5773 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5774 current_link_up = false;
5775 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5776 tp->serdes_counter == 0) {
5777 tw32_f(MAC_MODE, (tp->mac_mode |
5778 MAC_MODE_SEND_CONFIGS));
5780 tw32_f(MAC_MODE, tp->mac_mode);
5784 if (current_link_up) {
5785 tp->link_config.active_speed = SPEED_1000;
5786 tp->link_config.active_duplex = DUPLEX_FULL;
5787 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5788 LED_CTRL_LNKLED_OVERRIDE |
5789 LED_CTRL_1000MBPS_ON));
5791 tp->link_config.active_speed = SPEED_UNKNOWN;
5792 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5793 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5794 LED_CTRL_LNKLED_OVERRIDE |
5795 LED_CTRL_TRAFFIC_OVERRIDE));
5798 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5799 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5800 if (orig_pause_cfg != now_pause_cfg ||
5801 orig_active_speed != tp->link_config.active_speed ||
5802 orig_active_duplex != tp->link_config.active_duplex)
5803 tg3_link_report(tp);
5809 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5813 u32 current_speed = SPEED_UNKNOWN;
5814 u8 current_duplex = DUPLEX_UNKNOWN;
5815 bool current_link_up = false;
5816 u32 local_adv, remote_adv, sgsr;
5818 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5819 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5820 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5821 (sgsr & SERDES_TG3_SGMII_MODE)) {
5826 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5828 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5829 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5831 current_link_up = true;
5832 if (sgsr & SERDES_TG3_SPEED_1000) {
5833 current_speed = SPEED_1000;
5834 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5835 } else if (sgsr & SERDES_TG3_SPEED_100) {
5836 current_speed = SPEED_100;
5837 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5839 current_speed = SPEED_10;
5840 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5843 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5844 current_duplex = DUPLEX_FULL;
5846 current_duplex = DUPLEX_HALF;
5849 tw32_f(MAC_MODE, tp->mac_mode);
5852 tg3_clear_mac_status(tp);
5854 goto fiber_setup_done;
5857 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5858 tw32_f(MAC_MODE, tp->mac_mode);
5861 tg3_clear_mac_status(tp);
5866 tp->link_config.rmt_adv = 0;
5868 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5869 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5870 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5871 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5872 bmsr |= BMSR_LSTATUS;
5874 bmsr &= ~BMSR_LSTATUS;
5877 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5879 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5880 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5881 /* do nothing, just check for link up at the end */
5882 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5885 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5886 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5887 ADVERTISE_1000XPAUSE |
5888 ADVERTISE_1000XPSE_ASYM |
5891 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5892 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5894 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5895 tg3_writephy(tp, MII_ADVERTISE, newadv);
5896 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5897 tg3_writephy(tp, MII_BMCR, bmcr);
5899 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5900 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5901 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5908 bmcr &= ~BMCR_SPEED1000;
5909 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5911 if (tp->link_config.duplex == DUPLEX_FULL)
5912 new_bmcr |= BMCR_FULLDPLX;
5914 if (new_bmcr != bmcr) {
5915 /* BMCR_SPEED1000 is a reserved bit that needs
5916 * to be set on write.
5918 new_bmcr |= BMCR_SPEED1000;
5920 /* Force a linkdown */
5924 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5925 adv &= ~(ADVERTISE_1000XFULL |
5926 ADVERTISE_1000XHALF |
5928 tg3_writephy(tp, MII_ADVERTISE, adv);
5929 tg3_writephy(tp, MII_BMCR, bmcr |
5933 tg3_carrier_off(tp);
5935 tg3_writephy(tp, MII_BMCR, new_bmcr);
5937 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5938 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5939 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5940 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5941 bmsr |= BMSR_LSTATUS;
5943 bmsr &= ~BMSR_LSTATUS;
5945 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5949 if (bmsr & BMSR_LSTATUS) {
5950 current_speed = SPEED_1000;
5951 current_link_up = true;
5952 if (bmcr & BMCR_FULLDPLX)
5953 current_duplex = DUPLEX_FULL;
5955 current_duplex = DUPLEX_HALF;
5960 if (bmcr & BMCR_ANENABLE) {
5963 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5964 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5965 common = local_adv & remote_adv;
5966 if (common & (ADVERTISE_1000XHALF |
5967 ADVERTISE_1000XFULL)) {
5968 if (common & ADVERTISE_1000XFULL)
5969 current_duplex = DUPLEX_FULL;
5971 current_duplex = DUPLEX_HALF;
5973 tp->link_config.rmt_adv =
5974 mii_adv_to_ethtool_adv_x(remote_adv);
5975 } else if (!tg3_flag(tp, 5780_CLASS)) {
5976 /* Link is up via parallel detect */
5978 current_link_up = false;
5984 if (current_link_up && current_duplex == DUPLEX_FULL)
5985 tg3_setup_flow_control(tp, local_adv, remote_adv);
5987 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5988 if (tp->link_config.active_duplex == DUPLEX_HALF)
5989 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5991 tw32_f(MAC_MODE, tp->mac_mode);
5994 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5996 tp->link_config.active_speed = current_speed;
5997 tp->link_config.active_duplex = current_duplex;
5999 tg3_test_and_report_link_chg(tp, current_link_up);
6003 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6005 if (tp->serdes_counter) {
6006 /* Give autoneg time to complete. */
6007 tp->serdes_counter--;
6012 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6015 tg3_readphy(tp, MII_BMCR, &bmcr);
6016 if (bmcr & BMCR_ANENABLE) {
6019 /* Select shadow register 0x1f */
6020 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6021 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6023 /* Select expansion interrupt status register */
6024 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6025 MII_TG3_DSP_EXP1_INT_STAT);
6026 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6027 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6029 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6030 /* We have signal detect and not receiving
6031 * config code words, link is up by parallel
6035 bmcr &= ~BMCR_ANENABLE;
6036 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6037 tg3_writephy(tp, MII_BMCR, bmcr);
6038 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6041 } else if (tp->link_up &&
6042 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6043 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6046 /* Select expansion interrupt status register */
6047 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6048 MII_TG3_DSP_EXP1_INT_STAT);
6049 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6053 /* Config code words received, turn on autoneg. */
6054 tg3_readphy(tp, MII_BMCR, &bmcr);
6055 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6057 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6063 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6068 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6069 err = tg3_setup_fiber_phy(tp, force_reset);
6070 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6071 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6073 err = tg3_setup_copper_phy(tp, force_reset);
6075 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6078 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6079 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6081 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6086 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6087 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6088 tw32(GRC_MISC_CFG, val);
6091 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6092 (6 << TX_LENGTHS_IPG_SHIFT);
6093 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6094 tg3_asic_rev(tp) == ASIC_REV_5762)
6095 val |= tr32(MAC_TX_LENGTHS) &
6096 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6097 TX_LENGTHS_CNT_DWN_VAL_MSK);
6099 if (tp->link_config.active_speed == SPEED_1000 &&
6100 tp->link_config.active_duplex == DUPLEX_HALF)
6101 tw32(MAC_TX_LENGTHS, val |
6102 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6104 tw32(MAC_TX_LENGTHS, val |
6105 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6107 if (!tg3_flag(tp, 5705_PLUS)) {
6109 tw32(HOSTCC_STAT_COAL_TICKS,
6110 tp->coal.stats_block_coalesce_usecs);
6112 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6116 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6117 val = tr32(PCIE_PWR_MGMT_THRESH);
6119 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6122 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6123 tw32(PCIE_PWR_MGMT_THRESH, val);
6129 /* tp->lock must be held */
6130 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6134 ptp_read_system_prets(sts);
6135 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6136 ptp_read_system_postts(sts);
6137 stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6142 /* tp->lock must be held */
6143 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6145 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6147 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6148 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6149 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6150 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6153 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6154 static inline void tg3_full_unlock(struct tg3 *tp);
6155 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6157 struct tg3 *tp = netdev_priv(dev);
6159 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6160 SOF_TIMESTAMPING_RX_SOFTWARE |
6161 SOF_TIMESTAMPING_SOFTWARE;
6163 if (tg3_flag(tp, PTP_CAPABLE)) {
6164 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6165 SOF_TIMESTAMPING_RX_HARDWARE |
6166 SOF_TIMESTAMPING_RAW_HARDWARE;
6170 info->phc_index = ptp_clock_index(tp->ptp_clock);
6172 info->phc_index = -1;
6174 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6176 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6177 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6178 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6179 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6183 static int tg3_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
6185 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6189 /* Frequency adjustment is performed using hardware with a 24 bit
6190 * accumulator and a programmable correction value. On each clk, the
6191 * correction value gets added to the accumulator and when it
6192 * overflows, the time counter is incremented/decremented.
6194 neg_adj = diff_by_scaled_ppm(1 << 24, scaled_ppm, &correction);
6196 tg3_full_lock(tp, 0);
6199 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6200 TG3_EAV_REF_CLK_CORRECT_EN |
6201 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) |
6202 ((u32)correction & TG3_EAV_REF_CLK_CORRECT_MASK));
6204 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6206 tg3_full_unlock(tp);
6211 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6213 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6215 tg3_full_lock(tp, 0);
6216 tp->ptp_adjust += delta;
6217 tg3_full_unlock(tp);
6222 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6223 struct ptp_system_timestamp *sts)
6226 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6228 tg3_full_lock(tp, 0);
6229 ns = tg3_refclk_read(tp, sts);
6230 ns += tp->ptp_adjust;
6231 tg3_full_unlock(tp);
6233 *ts = ns_to_timespec64(ns);
6238 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6239 const struct timespec64 *ts)
6242 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6244 ns = timespec64_to_ns(ts);
6246 tg3_full_lock(tp, 0);
6247 tg3_refclk_write(tp, ns);
6249 tg3_full_unlock(tp);
6254 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6255 struct ptp_clock_request *rq, int on)
6257 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6262 case PTP_CLK_REQ_PEROUT:
6263 /* Reject requests with unsupported flags */
6264 if (rq->perout.flags)
6267 if (rq->perout.index != 0)
6270 tg3_full_lock(tp, 0);
6271 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6272 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6277 nsec = rq->perout.start.sec * 1000000000ULL +
6278 rq->perout.start.nsec;
6280 if (rq->perout.period.sec || rq->perout.period.nsec) {
6281 netdev_warn(tp->dev,
6282 "Device supports only a one-shot timesync output, period must be 0\n");
6287 if (nsec & (1ULL << 63)) {
6288 netdev_warn(tp->dev,
6289 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6294 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6295 tw32(TG3_EAV_WATCHDOG0_MSB,
6296 TG3_EAV_WATCHDOG0_EN |
6297 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6299 tw32(TG3_EAV_REF_CLCK_CTL,
6300 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6302 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6303 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6307 tg3_full_unlock(tp);
6317 static const struct ptp_clock_info tg3_ptp_caps = {
6318 .owner = THIS_MODULE,
6319 .name = "tg3 clock",
6320 .max_adj = 250000000,
6326 .adjfine = tg3_ptp_adjfine,
6327 .adjtime = tg3_ptp_adjtime,
6328 .gettimex64 = tg3_ptp_gettimex,
6329 .settime64 = tg3_ptp_settime,
6330 .enable = tg3_ptp_enable,
6333 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6334 struct skb_shared_hwtstamps *timestamp)
6336 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6337 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6341 /* tp->lock must be held */
6342 static void tg3_ptp_init(struct tg3 *tp)
6344 if (!tg3_flag(tp, PTP_CAPABLE))
6347 /* Initialize the hardware clock to the system time. */
6348 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6350 tp->ptp_info = tg3_ptp_caps;
6353 /* tp->lock must be held */
6354 static void tg3_ptp_resume(struct tg3 *tp)
6356 if (!tg3_flag(tp, PTP_CAPABLE))
6359 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6363 static void tg3_ptp_fini(struct tg3 *tp)
6365 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6368 ptp_clock_unregister(tp->ptp_clock);
6369 tp->ptp_clock = NULL;
6373 static inline int tg3_irq_sync(struct tg3 *tp)
6375 return tp->irq_sync;
6378 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6382 dst = (u32 *)((u8 *)dst + off);
6383 for (i = 0; i < len; i += sizeof(u32))
6384 *dst++ = tr32(off + i);
6387 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6389 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6390 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6391 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6392 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6393 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6394 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6395 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6396 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6397 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6398 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6399 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6400 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6401 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6402 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6403 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6404 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6405 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6406 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6407 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6409 if (tg3_flag(tp, SUPPORT_MSIX))
6410 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6412 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6413 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6414 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6415 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6416 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6417 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6418 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6419 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6421 if (!tg3_flag(tp, 5705_PLUS)) {
6422 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6423 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6424 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6427 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6428 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6429 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6430 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6431 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6433 if (tg3_flag(tp, NVRAM))
6434 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6437 static void tg3_dump_state(struct tg3 *tp)
6442 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6446 if (tg3_flag(tp, PCI_EXPRESS)) {
6447 /* Read up to but not including private PCI registers */
6448 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6449 regs[i / sizeof(u32)] = tr32(i);
6451 tg3_dump_legacy_regs(tp, regs);
6453 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6454 if (!regs[i + 0] && !regs[i + 1] &&
6455 !regs[i + 2] && !regs[i + 3])
6458 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6460 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6465 for (i = 0; i < tp->irq_cnt; i++) {
6466 struct tg3_napi *tnapi = &tp->napi[i];
6468 /* SW status block */
6470 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6472 tnapi->hw_status->status,
6473 tnapi->hw_status->status_tag,
6474 tnapi->hw_status->rx_jumbo_consumer,
6475 tnapi->hw_status->rx_consumer,
6476 tnapi->hw_status->rx_mini_consumer,
6477 tnapi->hw_status->idx[0].rx_producer,
6478 tnapi->hw_status->idx[0].tx_consumer);
6481 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6483 tnapi->last_tag, tnapi->last_irq_tag,
6484 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6486 tnapi->prodring.rx_std_prod_idx,
6487 tnapi->prodring.rx_std_cons_idx,
6488 tnapi->prodring.rx_jmb_prod_idx,
6489 tnapi->prodring.rx_jmb_cons_idx);
6493 /* This is called whenever we suspect that the system chipset is re-
6494 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6495 * is bogus tx completions. We try to recover by setting the
6496 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6499 static void tg3_tx_recover(struct tg3 *tp)
6501 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6502 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6504 netdev_warn(tp->dev,
6505 "The system may be re-ordering memory-mapped I/O "
6506 "cycles to the network device, attempting to recover. "
6507 "Please report the problem to the driver maintainer "
6508 "and include system chipset information.\n");
6510 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6513 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6515 /* Tell compiler to fetch tx indices from memory. */
6517 return tnapi->tx_pending -
6518 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6521 /* Tigon3 never reports partial packet sends. So we do not
6522 * need special logic to handle SKBs that have not had all
6523 * of their frags sent yet, like SunGEM does.
6525 static void tg3_tx(struct tg3_napi *tnapi)
6527 struct tg3 *tp = tnapi->tp;
6528 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6529 u32 sw_idx = tnapi->tx_cons;
6530 struct netdev_queue *txq;
6531 int index = tnapi - tp->napi;
6532 unsigned int pkts_compl = 0, bytes_compl = 0;
6534 if (tg3_flag(tp, ENABLE_TSS))
6537 txq = netdev_get_tx_queue(tp->dev, index);
6539 while (sw_idx != hw_idx) {
6540 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6541 struct sk_buff *skb = ri->skb;
6544 if (unlikely(skb == NULL)) {
6549 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6550 struct skb_shared_hwtstamps timestamp;
6551 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6552 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6554 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6556 skb_tstamp_tx(skb, ×tamp);
6559 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping),
6560 skb_headlen(skb), DMA_TO_DEVICE);
6564 while (ri->fragmented) {
6565 ri->fragmented = false;
6566 sw_idx = NEXT_TX(sw_idx);
6567 ri = &tnapi->tx_buffers[sw_idx];
6570 sw_idx = NEXT_TX(sw_idx);
6572 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6573 ri = &tnapi->tx_buffers[sw_idx];
6574 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6577 dma_unmap_page(&tp->pdev->dev,
6578 dma_unmap_addr(ri, mapping),
6579 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6582 while (ri->fragmented) {
6583 ri->fragmented = false;
6584 sw_idx = NEXT_TX(sw_idx);
6585 ri = &tnapi->tx_buffers[sw_idx];
6588 sw_idx = NEXT_TX(sw_idx);
6592 bytes_compl += skb->len;
6594 dev_consume_skb_any(skb);
6596 if (unlikely(tx_bug)) {
6602 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6604 tnapi->tx_cons = sw_idx;
6606 /* Need to make the tx_cons update visible to tg3_start_xmit()
6607 * before checking for netif_queue_stopped(). Without the
6608 * memory barrier, there is a small possibility that tg3_start_xmit()
6609 * will miss it and cause the queue to be stopped forever.
6613 if (unlikely(netif_tx_queue_stopped(txq) &&
6614 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6615 __netif_tx_lock(txq, smp_processor_id());
6616 if (netif_tx_queue_stopped(txq) &&
6617 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6618 netif_tx_wake_queue(txq);
6619 __netif_tx_unlock(txq);
6623 static void tg3_frag_free(bool is_frag, void *data)
6626 skb_free_frag(data);
6631 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6633 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6634 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6639 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz,
6641 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6646 /* Returns size of skb allocated or < 0 on error.
6648 * We only need to fill in the address because the other members
6649 * of the RX descriptor are invariant, see tg3_init_rings.
6651 * Note the purposeful assymetry of cpu vs. chip accesses. For
6652 * posting buffers we only dirty the first cache line of the RX
6653 * descriptor (containing the address). Whereas for the RX status
6654 * buffers the cpu only reads the last cacheline of the RX descriptor
6655 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6657 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6658 u32 opaque_key, u32 dest_idx_unmasked,
6659 unsigned int *frag_size)
6661 struct tg3_rx_buffer_desc *desc;
6662 struct ring_info *map;
6665 int skb_size, data_size, dest_idx;
6667 switch (opaque_key) {
6668 case RXD_OPAQUE_RING_STD:
6669 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6670 desc = &tpr->rx_std[dest_idx];
6671 map = &tpr->rx_std_buffers[dest_idx];
6672 data_size = tp->rx_pkt_map_sz;
6675 case RXD_OPAQUE_RING_JUMBO:
6676 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6677 desc = &tpr->rx_jmb[dest_idx].std;
6678 map = &tpr->rx_jmb_buffers[dest_idx];
6679 data_size = TG3_RX_JMB_MAP_SZ;
6686 /* Do not overwrite any of the map or rp information
6687 * until we are sure we can commit to a new buffer.
6689 * Callers depend upon this behavior and assume that
6690 * we leave everything unchanged if we fail.
6692 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6693 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6694 if (skb_size <= PAGE_SIZE) {
6695 data = napi_alloc_frag(skb_size);
6696 *frag_size = skb_size;
6698 data = kmalloc(skb_size, GFP_ATOMIC);
6704 mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp),
6705 data_size, DMA_FROM_DEVICE);
6706 if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
6707 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6712 dma_unmap_addr_set(map, mapping, mapping);
6714 desc->addr_hi = ((u64)mapping >> 32);
6715 desc->addr_lo = ((u64)mapping & 0xffffffff);
6720 /* We only need to move over in the address because the other
6721 * members of the RX descriptor are invariant. See notes above
6722 * tg3_alloc_rx_data for full details.
6724 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6725 struct tg3_rx_prodring_set *dpr,
6726 u32 opaque_key, int src_idx,
6727 u32 dest_idx_unmasked)
6729 struct tg3 *tp = tnapi->tp;
6730 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6731 struct ring_info *src_map, *dest_map;
6732 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6735 switch (opaque_key) {
6736 case RXD_OPAQUE_RING_STD:
6737 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6738 dest_desc = &dpr->rx_std[dest_idx];
6739 dest_map = &dpr->rx_std_buffers[dest_idx];
6740 src_desc = &spr->rx_std[src_idx];
6741 src_map = &spr->rx_std_buffers[src_idx];
6744 case RXD_OPAQUE_RING_JUMBO:
6745 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6746 dest_desc = &dpr->rx_jmb[dest_idx].std;
6747 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6748 src_desc = &spr->rx_jmb[src_idx].std;
6749 src_map = &spr->rx_jmb_buffers[src_idx];
6756 dest_map->data = src_map->data;
6757 dma_unmap_addr_set(dest_map, mapping,
6758 dma_unmap_addr(src_map, mapping));
6759 dest_desc->addr_hi = src_desc->addr_hi;
6760 dest_desc->addr_lo = src_desc->addr_lo;
6762 /* Ensure that the update to the skb happens after the physical
6763 * addresses have been transferred to the new BD location.
6767 src_map->data = NULL;
6770 /* The RX ring scheme is composed of multiple rings which post fresh
6771 * buffers to the chip, and one special ring the chip uses to report
6772 * status back to the host.
6774 * The special ring reports the status of received packets to the
6775 * host. The chip does not write into the original descriptor the
6776 * RX buffer was obtained from. The chip simply takes the original
6777 * descriptor as provided by the host, updates the status and length
6778 * field, then writes this into the next status ring entry.
6780 * Each ring the host uses to post buffers to the chip is described
6781 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6782 * it is first placed into the on-chip ram. When the packet's length
6783 * is known, it walks down the TG3_BDINFO entries to select the ring.
6784 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6785 * which is within the range of the new packet's length is chosen.
6787 * The "separate ring for rx status" scheme may sound queer, but it makes
6788 * sense from a cache coherency perspective. If only the host writes
6789 * to the buffer post rings, and only the chip writes to the rx status
6790 * rings, then cache lines never move beyond shared-modified state.
6791 * If both the host and chip were to write into the same ring, cache line
6792 * eviction could occur since both entities want it in an exclusive state.
6794 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6796 struct tg3 *tp = tnapi->tp;
6797 u32 work_mask, rx_std_posted = 0;
6798 u32 std_prod_idx, jmb_prod_idx;
6799 u32 sw_idx = tnapi->rx_rcb_ptr;
6802 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6804 hw_idx = *(tnapi->rx_rcb_prod_idx);
6806 * We need to order the read of hw_idx and the read of
6807 * the opaque cookie.
6812 std_prod_idx = tpr->rx_std_prod_idx;
6813 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6814 while (sw_idx != hw_idx && budget > 0) {
6815 struct ring_info *ri;
6816 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6818 struct sk_buff *skb;
6819 dma_addr_t dma_addr;
6820 u32 opaque_key, desc_idx, *post_ptr;
6824 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6825 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6826 if (opaque_key == RXD_OPAQUE_RING_STD) {
6827 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6828 dma_addr = dma_unmap_addr(ri, mapping);
6830 post_ptr = &std_prod_idx;
6832 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6833 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6834 dma_addr = dma_unmap_addr(ri, mapping);
6836 post_ptr = &jmb_prod_idx;
6838 goto next_pkt_nopost;
6840 work_mask |= opaque_key;
6842 if (desc->err_vlan & RXD_ERR_MASK) {
6844 tg3_recycle_rx(tnapi, tpr, opaque_key,
6845 desc_idx, *post_ptr);
6847 /* Other statistics kept track of by card. */
6852 prefetch(data + TG3_RX_OFFSET(tp));
6853 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6856 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6857 RXD_FLAG_PTPSTAT_PTPV1 ||
6858 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6859 RXD_FLAG_PTPSTAT_PTPV2) {
6860 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6861 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6864 if (len > TG3_RX_COPY_THRESH(tp)) {
6866 unsigned int frag_size;
6868 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6869 *post_ptr, &frag_size);
6873 dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size,
6876 /* Ensure that the update to the data happens
6877 * after the usage of the old DMA mapping.
6884 skb = build_skb(data, frag_size);
6886 skb = slab_build_skb(data);
6888 tg3_frag_free(frag_size != 0, data);
6889 goto drop_it_no_recycle;
6891 skb_reserve(skb, TG3_RX_OFFSET(tp));
6893 tg3_recycle_rx(tnapi, tpr, opaque_key,
6894 desc_idx, *post_ptr);
6896 skb = netdev_alloc_skb(tp->dev,
6897 len + TG3_RAW_IP_ALIGN);
6899 goto drop_it_no_recycle;
6901 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6902 dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len,
6905 data + TG3_RX_OFFSET(tp),
6907 dma_sync_single_for_device(&tp->pdev->dev, dma_addr,
6908 len, DMA_FROM_DEVICE);
6913 tg3_hwclock_to_timestamp(tp, tstamp,
6914 skb_hwtstamps(skb));
6916 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6917 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6918 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6919 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6920 skb->ip_summed = CHECKSUM_UNNECESSARY;
6922 skb_checksum_none_assert(skb);
6924 skb->protocol = eth_type_trans(skb, tp->dev);
6926 if (len > (tp->dev->mtu + ETH_HLEN) &&
6927 skb->protocol != htons(ETH_P_8021Q) &&
6928 skb->protocol != htons(ETH_P_8021AD)) {
6929 dev_kfree_skb_any(skb);
6930 goto drop_it_no_recycle;
6933 if (desc->type_flags & RXD_FLAG_VLAN &&
6934 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6935 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6936 desc->err_vlan & RXD_VLAN_MASK);
6938 napi_gro_receive(&tnapi->napi, skb);
6946 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6947 tpr->rx_std_prod_idx = std_prod_idx &
6948 tp->rx_std_ring_mask;
6949 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6950 tpr->rx_std_prod_idx);
6951 work_mask &= ~RXD_OPAQUE_RING_STD;
6956 sw_idx &= tp->rx_ret_ring_mask;
6958 /* Refresh hw_idx to see if there is new work */
6959 if (sw_idx == hw_idx) {
6960 hw_idx = *(tnapi->rx_rcb_prod_idx);
6965 /* ACK the status ring. */
6966 tnapi->rx_rcb_ptr = sw_idx;
6967 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6969 /* Refill RX ring(s). */
6970 if (!tg3_flag(tp, ENABLE_RSS)) {
6971 /* Sync BD data before updating mailbox */
6974 if (work_mask & RXD_OPAQUE_RING_STD) {
6975 tpr->rx_std_prod_idx = std_prod_idx &
6976 tp->rx_std_ring_mask;
6977 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6978 tpr->rx_std_prod_idx);
6980 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6981 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6982 tp->rx_jmb_ring_mask;
6983 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6984 tpr->rx_jmb_prod_idx);
6986 } else if (work_mask) {
6987 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6988 * updated before the producer indices can be updated.
6992 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6993 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6995 if (tnapi != &tp->napi[1]) {
6996 tp->rx_refill = true;
6997 napi_schedule(&tp->napi[1].napi);
7004 static void tg3_poll_link(struct tg3 *tp)
7006 /* handle link change and other phy events */
7007 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7008 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7010 if (sblk->status & SD_STATUS_LINK_CHG) {
7011 sblk->status = SD_STATUS_UPDATED |
7012 (sblk->status & ~SD_STATUS_LINK_CHG);
7013 spin_lock(&tp->lock);
7014 if (tg3_flag(tp, USE_PHYLIB)) {
7016 (MAC_STATUS_SYNC_CHANGED |
7017 MAC_STATUS_CFG_CHANGED |
7018 MAC_STATUS_MI_COMPLETION |
7019 MAC_STATUS_LNKSTATE_CHANGED));
7022 tg3_setup_phy(tp, false);
7023 spin_unlock(&tp->lock);
7028 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7029 struct tg3_rx_prodring_set *dpr,
7030 struct tg3_rx_prodring_set *spr)
7032 u32 si, di, cpycnt, src_prod_idx;
7036 src_prod_idx = spr->rx_std_prod_idx;
7038 /* Make sure updates to the rx_std_buffers[] entries and the
7039 * standard producer index are seen in the correct order.
7043 if (spr->rx_std_cons_idx == src_prod_idx)
7046 if (spr->rx_std_cons_idx < src_prod_idx)
7047 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7049 cpycnt = tp->rx_std_ring_mask + 1 -
7050 spr->rx_std_cons_idx;
7052 cpycnt = min(cpycnt,
7053 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7055 si = spr->rx_std_cons_idx;
7056 di = dpr->rx_std_prod_idx;
7058 for (i = di; i < di + cpycnt; i++) {
7059 if (dpr->rx_std_buffers[i].data) {
7069 /* Ensure that updates to the rx_std_buffers ring and the
7070 * shadowed hardware producer ring from tg3_recycle_skb() are
7071 * ordered correctly WRT the skb check above.
7075 memcpy(&dpr->rx_std_buffers[di],
7076 &spr->rx_std_buffers[si],
7077 cpycnt * sizeof(struct ring_info));
7079 for (i = 0; i < cpycnt; i++, di++, si++) {
7080 struct tg3_rx_buffer_desc *sbd, *dbd;
7081 sbd = &spr->rx_std[si];
7082 dbd = &dpr->rx_std[di];
7083 dbd->addr_hi = sbd->addr_hi;
7084 dbd->addr_lo = sbd->addr_lo;
7087 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7088 tp->rx_std_ring_mask;
7089 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7090 tp->rx_std_ring_mask;
7094 src_prod_idx = spr->rx_jmb_prod_idx;
7096 /* Make sure updates to the rx_jmb_buffers[] entries and
7097 * the jumbo producer index are seen in the correct order.
7101 if (spr->rx_jmb_cons_idx == src_prod_idx)
7104 if (spr->rx_jmb_cons_idx < src_prod_idx)
7105 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7107 cpycnt = tp->rx_jmb_ring_mask + 1 -
7108 spr->rx_jmb_cons_idx;
7110 cpycnt = min(cpycnt,
7111 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7113 si = spr->rx_jmb_cons_idx;
7114 di = dpr->rx_jmb_prod_idx;
7116 for (i = di; i < di + cpycnt; i++) {
7117 if (dpr->rx_jmb_buffers[i].data) {
7127 /* Ensure that updates to the rx_jmb_buffers ring and the
7128 * shadowed hardware producer ring from tg3_recycle_skb() are
7129 * ordered correctly WRT the skb check above.
7133 memcpy(&dpr->rx_jmb_buffers[di],
7134 &spr->rx_jmb_buffers[si],
7135 cpycnt * sizeof(struct ring_info));
7137 for (i = 0; i < cpycnt; i++, di++, si++) {
7138 struct tg3_rx_buffer_desc *sbd, *dbd;
7139 sbd = &spr->rx_jmb[si].std;
7140 dbd = &dpr->rx_jmb[di].std;
7141 dbd->addr_hi = sbd->addr_hi;
7142 dbd->addr_lo = sbd->addr_lo;
7145 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7146 tp->rx_jmb_ring_mask;
7147 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7148 tp->rx_jmb_ring_mask;
7154 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7156 struct tg3 *tp = tnapi->tp;
7158 /* run TX completion thread */
7159 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7161 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7165 if (!tnapi->rx_rcb_prod_idx)
7168 /* run RX thread, within the bounds set by NAPI.
7169 * All RX "locking" is done by ensuring outside
7170 * code synchronizes with tg3->napi.poll()
7172 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7173 work_done += tg3_rx(tnapi, budget - work_done);
7175 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7176 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7178 u32 std_prod_idx = dpr->rx_std_prod_idx;
7179 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7181 tp->rx_refill = false;
7182 for (i = 1; i <= tp->rxq_cnt; i++)
7183 err |= tg3_rx_prodring_xfer(tp, dpr,
7184 &tp->napi[i].prodring);
7188 if (std_prod_idx != dpr->rx_std_prod_idx)
7189 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7190 dpr->rx_std_prod_idx);
7192 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7193 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7194 dpr->rx_jmb_prod_idx);
7197 tw32_f(HOSTCC_MODE, tp->coal_now);
7203 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7205 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7206 schedule_work(&tp->reset_task);
7209 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7211 if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7212 cancel_work_sync(&tp->reset_task);
7213 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7216 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7218 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7219 struct tg3 *tp = tnapi->tp;
7221 struct tg3_hw_status *sblk = tnapi->hw_status;
7224 work_done = tg3_poll_work(tnapi, work_done, budget);
7226 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7229 if (unlikely(work_done >= budget))
7232 /* tp->last_tag is used in tg3_int_reenable() below
7233 * to tell the hw how much work has been processed,
7234 * so we must read it before checking for more work.
7236 tnapi->last_tag = sblk->status_tag;
7237 tnapi->last_irq_tag = tnapi->last_tag;
7240 /* check for RX/TX work to do */
7241 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7242 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7244 /* This test here is not race free, but will reduce
7245 * the number of interrupts by looping again.
7247 if (tnapi == &tp->napi[1] && tp->rx_refill)
7250 napi_complete_done(napi, work_done);
7251 /* Reenable interrupts. */
7252 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7254 /* This test here is synchronized by napi_schedule()
7255 * and napi_complete() to close the race condition.
7257 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7258 tw32(HOSTCC_MODE, tp->coalesce_mode |
7259 HOSTCC_MODE_ENABLE |
7266 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7270 /* work_done is guaranteed to be less than budget. */
7271 napi_complete(napi);
7272 tg3_reset_task_schedule(tp);
7276 static void tg3_process_error(struct tg3 *tp)
7279 bool real_error = false;
7281 if (tg3_flag(tp, ERROR_PROCESSED))
7284 /* Check Flow Attention register */
7285 val = tr32(HOSTCC_FLOW_ATTN);
7286 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7287 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7291 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7292 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7296 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7297 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7306 tg3_flag_set(tp, ERROR_PROCESSED);
7307 tg3_reset_task_schedule(tp);
7310 static int tg3_poll(struct napi_struct *napi, int budget)
7312 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7313 struct tg3 *tp = tnapi->tp;
7315 struct tg3_hw_status *sblk = tnapi->hw_status;
7318 if (sblk->status & SD_STATUS_ERROR)
7319 tg3_process_error(tp);
7323 work_done = tg3_poll_work(tnapi, work_done, budget);
7325 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7328 if (unlikely(work_done >= budget))
7331 if (tg3_flag(tp, TAGGED_STATUS)) {
7332 /* tp->last_tag is used in tg3_int_reenable() below
7333 * to tell the hw how much work has been processed,
7334 * so we must read it before checking for more work.
7336 tnapi->last_tag = sblk->status_tag;
7337 tnapi->last_irq_tag = tnapi->last_tag;
7340 sblk->status &= ~SD_STATUS_UPDATED;
7342 if (likely(!tg3_has_work(tnapi))) {
7343 napi_complete_done(napi, work_done);
7344 tg3_int_reenable(tnapi);
7349 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7353 /* work_done is guaranteed to be less than budget. */
7354 napi_complete(napi);
7355 tg3_reset_task_schedule(tp);
7359 static void tg3_napi_disable(struct tg3 *tp)
7363 for (i = tp->irq_cnt - 1; i >= 0; i--)
7364 napi_disable(&tp->napi[i].napi);
7367 static void tg3_napi_enable(struct tg3 *tp)
7371 for (i = 0; i < tp->irq_cnt; i++)
7372 napi_enable(&tp->napi[i].napi);
7375 static void tg3_napi_init(struct tg3 *tp)
7379 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll);
7380 for (i = 1; i < tp->irq_cnt; i++)
7381 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix);
7384 static void tg3_napi_fini(struct tg3 *tp)
7388 for (i = 0; i < tp->irq_cnt; i++)
7389 netif_napi_del(&tp->napi[i].napi);
7392 static inline void tg3_netif_stop(struct tg3 *tp)
7394 netif_trans_update(tp->dev); /* prevent tx timeout */
7395 tg3_napi_disable(tp);
7396 netif_carrier_off(tp->dev);
7397 netif_tx_disable(tp->dev);
7400 /* tp->lock must be held */
7401 static inline void tg3_netif_start(struct tg3 *tp)
7405 /* NOTE: unconditional netif_tx_wake_all_queues is only
7406 * appropriate so long as all callers are assured to
7407 * have free tx slots (such as after tg3_init_hw)
7409 netif_tx_wake_all_queues(tp->dev);
7412 netif_carrier_on(tp->dev);
7414 tg3_napi_enable(tp);
7415 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7416 tg3_enable_ints(tp);
7419 static void tg3_irq_quiesce(struct tg3 *tp)
7420 __releases(tp->lock)
7421 __acquires(tp->lock)
7425 BUG_ON(tp->irq_sync);
7430 spin_unlock_bh(&tp->lock);
7432 for (i = 0; i < tp->irq_cnt; i++)
7433 synchronize_irq(tp->napi[i].irq_vec);
7435 spin_lock_bh(&tp->lock);
7438 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7439 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7440 * with as well. Most of the time, this is not necessary except when
7441 * shutting down the device.
7443 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7445 spin_lock_bh(&tp->lock);
7447 tg3_irq_quiesce(tp);
7450 static inline void tg3_full_unlock(struct tg3 *tp)
7452 spin_unlock_bh(&tp->lock);
7455 /* One-shot MSI handler - Chip automatically disables interrupt
7456 * after sending MSI so driver doesn't have to do it.
7458 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7460 struct tg3_napi *tnapi = dev_id;
7461 struct tg3 *tp = tnapi->tp;
7463 prefetch(tnapi->hw_status);
7465 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7467 if (likely(!tg3_irq_sync(tp)))
7468 napi_schedule(&tnapi->napi);
7473 /* MSI ISR - No need to check for interrupt sharing and no need to
7474 * flush status block and interrupt mailbox. PCI ordering rules
7475 * guarantee that MSI will arrive after the status block.
7477 static irqreturn_t tg3_msi(int irq, void *dev_id)
7479 struct tg3_napi *tnapi = dev_id;
7480 struct tg3 *tp = tnapi->tp;
7482 prefetch(tnapi->hw_status);
7484 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7486 * Writing any value to intr-mbox-0 clears PCI INTA# and
7487 * chip-internal interrupt pending events.
7488 * Writing non-zero to intr-mbox-0 additional tells the
7489 * NIC to stop sending us irqs, engaging "in-intr-handler"
7492 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7493 if (likely(!tg3_irq_sync(tp)))
7494 napi_schedule(&tnapi->napi);
7496 return IRQ_RETVAL(1);
7499 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7501 struct tg3_napi *tnapi = dev_id;
7502 struct tg3 *tp = tnapi->tp;
7503 struct tg3_hw_status *sblk = tnapi->hw_status;
7504 unsigned int handled = 1;
7506 /* In INTx mode, it is possible for the interrupt to arrive at
7507 * the CPU before the status block posted prior to the interrupt.
7508 * Reading the PCI State register will confirm whether the
7509 * interrupt is ours and will flush the status block.
7511 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7512 if (tg3_flag(tp, CHIP_RESETTING) ||
7513 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7520 * Writing any value to intr-mbox-0 clears PCI INTA# and
7521 * chip-internal interrupt pending events.
7522 * Writing non-zero to intr-mbox-0 additional tells the
7523 * NIC to stop sending us irqs, engaging "in-intr-handler"
7526 * Flush the mailbox to de-assert the IRQ immediately to prevent
7527 * spurious interrupts. The flush impacts performance but
7528 * excessive spurious interrupts can be worse in some cases.
7530 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7531 if (tg3_irq_sync(tp))
7533 sblk->status &= ~SD_STATUS_UPDATED;
7534 if (likely(tg3_has_work(tnapi))) {
7535 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7536 napi_schedule(&tnapi->napi);
7538 /* No work, shared interrupt perhaps? re-enable
7539 * interrupts, and flush that PCI write
7541 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7545 return IRQ_RETVAL(handled);
7548 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7550 struct tg3_napi *tnapi = dev_id;
7551 struct tg3 *tp = tnapi->tp;
7552 struct tg3_hw_status *sblk = tnapi->hw_status;
7553 unsigned int handled = 1;
7555 /* In INTx mode, it is possible for the interrupt to arrive at
7556 * the CPU before the status block posted prior to the interrupt.
7557 * Reading the PCI State register will confirm whether the
7558 * interrupt is ours and will flush the status block.
7560 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7561 if (tg3_flag(tp, CHIP_RESETTING) ||
7562 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7569 * writing any value to intr-mbox-0 clears PCI INTA# and
7570 * chip-internal interrupt pending events.
7571 * writing non-zero to intr-mbox-0 additional tells the
7572 * NIC to stop sending us irqs, engaging "in-intr-handler"
7575 * Flush the mailbox to de-assert the IRQ immediately to prevent
7576 * spurious interrupts. The flush impacts performance but
7577 * excessive spurious interrupts can be worse in some cases.
7579 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7582 * In a shared interrupt configuration, sometimes other devices'
7583 * interrupts will scream. We record the current status tag here
7584 * so that the above check can report that the screaming interrupts
7585 * are unhandled. Eventually they will be silenced.
7587 tnapi->last_irq_tag = sblk->status_tag;
7589 if (tg3_irq_sync(tp))
7592 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7594 napi_schedule(&tnapi->napi);
7597 return IRQ_RETVAL(handled);
7600 /* ISR for interrupt test */
7601 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7603 struct tg3_napi *tnapi = dev_id;
7604 struct tg3 *tp = tnapi->tp;
7605 struct tg3_hw_status *sblk = tnapi->hw_status;
7607 if ((sblk->status & SD_STATUS_UPDATED) ||
7608 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7609 tg3_disable_ints(tp);
7610 return IRQ_RETVAL(1);
7612 return IRQ_RETVAL(0);
7615 #ifdef CONFIG_NET_POLL_CONTROLLER
7616 static void tg3_poll_controller(struct net_device *dev)
7619 struct tg3 *tp = netdev_priv(dev);
7621 if (tg3_irq_sync(tp))
7624 for (i = 0; i < tp->irq_cnt; i++)
7625 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7629 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
7631 struct tg3 *tp = netdev_priv(dev);
7633 if (netif_msg_tx_err(tp)) {
7634 netdev_err(dev, "transmit timed out, resetting\n");
7638 tg3_reset_task_schedule(tp);
7641 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7642 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7644 u32 base = (u32) mapping & 0xffffffff;
7646 return base + len + 8 < base;
7649 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7650 * of any 4GB boundaries: 4G, 8G, etc
7652 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7655 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7656 u32 base = (u32) mapping & 0xffffffff;
7658 return ((base + len + (mss & 0x3fff)) < base);
7663 /* Test for DMA addresses > 40-bit */
7664 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7667 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7668 if (tg3_flag(tp, 40BIT_DMA_BUG))
7669 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7676 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7677 dma_addr_t mapping, u32 len, u32 flags,
7680 txbd->addr_hi = ((u64) mapping >> 32);
7681 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7682 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7683 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7686 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7687 dma_addr_t map, u32 len, u32 flags,
7690 struct tg3 *tp = tnapi->tp;
7693 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7696 if (tg3_4g_overflow_test(map, len))
7699 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7702 if (tg3_40bit_overflow_test(tp, map, len))
7705 if (tp->dma_limit) {
7706 u32 prvidx = *entry;
7707 u32 tmp_flag = flags & ~TXD_FLAG_END;
7708 while (len > tp->dma_limit && *budget) {
7709 u32 frag_len = tp->dma_limit;
7710 len -= tp->dma_limit;
7712 /* Avoid the 8byte DMA problem */
7714 len += tp->dma_limit / 2;
7715 frag_len = tp->dma_limit / 2;
7718 tnapi->tx_buffers[*entry].fragmented = true;
7720 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7721 frag_len, tmp_flag, mss, vlan);
7724 *entry = NEXT_TX(*entry);
7731 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7732 len, flags, mss, vlan);
7734 *entry = NEXT_TX(*entry);
7737 tnapi->tx_buffers[prvidx].fragmented = false;
7741 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7742 len, flags, mss, vlan);
7743 *entry = NEXT_TX(*entry);
7749 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7752 struct sk_buff *skb;
7753 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7758 dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping),
7759 skb_headlen(skb), DMA_TO_DEVICE);
7761 while (txb->fragmented) {
7762 txb->fragmented = false;
7763 entry = NEXT_TX(entry);
7764 txb = &tnapi->tx_buffers[entry];
7767 for (i = 0; i <= last; i++) {
7768 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7770 entry = NEXT_TX(entry);
7771 txb = &tnapi->tx_buffers[entry];
7773 dma_unmap_page(&tnapi->tp->pdev->dev,
7774 dma_unmap_addr(txb, mapping),
7775 skb_frag_size(frag), DMA_TO_DEVICE);
7777 while (txb->fragmented) {
7778 txb->fragmented = false;
7779 entry = NEXT_TX(entry);
7780 txb = &tnapi->tx_buffers[entry];
7785 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7786 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7787 struct sk_buff **pskb,
7788 u32 *entry, u32 *budget,
7789 u32 base_flags, u32 mss, u32 vlan)
7791 struct tg3 *tp = tnapi->tp;
7792 struct sk_buff *new_skb, *skb = *pskb;
7793 dma_addr_t new_addr = 0;
7796 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7797 new_skb = skb_copy(skb, GFP_ATOMIC);
7799 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7801 new_skb = skb_copy_expand(skb,
7802 skb_headroom(skb) + more_headroom,
7803 skb_tailroom(skb), GFP_ATOMIC);
7809 /* New SKB is guaranteed to be linear. */
7810 new_addr = dma_map_single(&tp->pdev->dev, new_skb->data,
7811 new_skb->len, DMA_TO_DEVICE);
7812 /* Make sure the mapping succeeded */
7813 if (dma_mapping_error(&tp->pdev->dev, new_addr)) {
7814 dev_kfree_skb_any(new_skb);
7817 u32 save_entry = *entry;
7819 base_flags |= TXD_FLAG_END;
7821 tnapi->tx_buffers[*entry].skb = new_skb;
7822 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7825 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7826 new_skb->len, base_flags,
7828 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7829 dev_kfree_skb_any(new_skb);
7835 dev_consume_skb_any(skb);
7840 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7842 /* Check if we will never have enough descriptors,
7843 * as gso_segs can be more than current ring size
7845 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7848 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7850 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7851 * indicated in tg3_tx_frag_set()
7853 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7854 struct netdev_queue *txq, struct sk_buff *skb)
7856 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7857 struct sk_buff *segs, *seg, *next;
7859 /* Estimate the number of fragments in the worst case */
7860 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7861 netif_tx_stop_queue(txq);
7863 /* netif_tx_stop_queue() must be done before checking
7864 * checking tx index in tg3_tx_avail() below, because in
7865 * tg3_tx(), we update tx index before checking for
7866 * netif_tx_queue_stopped().
7869 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7870 return NETDEV_TX_BUSY;
7872 netif_tx_wake_queue(txq);
7875 segs = skb_gso_segment(skb, tp->dev->features &
7876 ~(NETIF_F_TSO | NETIF_F_TSO6));
7877 if (IS_ERR(segs) || !segs)
7878 goto tg3_tso_bug_end;
7880 skb_list_walk_safe(segs, seg, next) {
7881 skb_mark_not_on_list(seg);
7882 tg3_start_xmit(seg, tp->dev);
7886 dev_consume_skb_any(skb);
7888 return NETDEV_TX_OK;
7891 /* hard_start_xmit for all devices */
7892 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7894 struct tg3 *tp = netdev_priv(dev);
7895 u32 len, entry, base_flags, mss, vlan = 0;
7897 int i = -1, would_hit_hwbug;
7899 struct tg3_napi *tnapi;
7900 struct netdev_queue *txq;
7902 struct iphdr *iph = NULL;
7903 struct tcphdr *tcph = NULL;
7904 __sum16 tcp_csum = 0, ip_csum = 0;
7905 __be16 ip_tot_len = 0;
7907 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7908 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7909 if (tg3_flag(tp, ENABLE_TSS))
7912 budget = tg3_tx_avail(tnapi);
7914 /* We are running in BH disabled context with netif_tx_lock
7915 * and TX reclaim runs via tp->napi.poll inside of a software
7916 * interrupt. Furthermore, IRQ processing runs lockless so we have
7917 * no IRQ context deadlocks to worry about either. Rejoice!
7919 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7920 if (!netif_tx_queue_stopped(txq)) {
7921 netif_tx_stop_queue(txq);
7923 /* This is a hard error, log it. */
7925 "BUG! Tx Ring full when queue awake!\n");
7927 return NETDEV_TX_BUSY;
7930 entry = tnapi->tx_prod;
7933 mss = skb_shinfo(skb)->gso_size;
7935 u32 tcp_opt_len, hdr_len;
7937 if (skb_cow_head(skb, 0))
7941 tcp_opt_len = tcp_optlen(skb);
7943 hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN;
7945 /* HW/FW can not correctly segment packets that have been
7946 * vlan encapsulated.
7948 if (skb->protocol == htons(ETH_P_8021Q) ||
7949 skb->protocol == htons(ETH_P_8021AD)) {
7950 if (tg3_tso_bug_gso_check(tnapi, skb))
7951 return tg3_tso_bug(tp, tnapi, txq, skb);
7955 if (!skb_is_gso_v6(skb)) {
7956 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7957 tg3_flag(tp, TSO_BUG)) {
7958 if (tg3_tso_bug_gso_check(tnapi, skb))
7959 return tg3_tso_bug(tp, tnapi, txq, skb);
7962 ip_csum = iph->check;
7963 ip_tot_len = iph->tot_len;
7965 iph->tot_len = htons(mss + hdr_len);
7968 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7969 TXD_FLAG_CPU_POST_DMA);
7971 tcph = tcp_hdr(skb);
7972 tcp_csum = tcph->check;
7974 if (tg3_flag(tp, HW_TSO_1) ||
7975 tg3_flag(tp, HW_TSO_2) ||
7976 tg3_flag(tp, HW_TSO_3)) {
7978 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7980 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7984 if (tg3_flag(tp, HW_TSO_3)) {
7985 mss |= (hdr_len & 0xc) << 12;
7987 base_flags |= 0x00000010;
7988 base_flags |= (hdr_len & 0x3e0) << 5;
7989 } else if (tg3_flag(tp, HW_TSO_2))
7990 mss |= hdr_len << 9;
7991 else if (tg3_flag(tp, HW_TSO_1) ||
7992 tg3_asic_rev(tp) == ASIC_REV_5705) {
7993 if (tcp_opt_len || iph->ihl > 5) {
7996 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7997 mss |= (tsflags << 11);
8000 if (tcp_opt_len || iph->ihl > 5) {
8003 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8004 base_flags |= tsflags << 12;
8007 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8008 /* HW/FW can not correctly checksum packets that have been
8009 * vlan encapsulated.
8011 if (skb->protocol == htons(ETH_P_8021Q) ||
8012 skb->protocol == htons(ETH_P_8021AD)) {
8013 if (skb_checksum_help(skb))
8016 base_flags |= TXD_FLAG_TCPUDP_CSUM;
8020 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8021 !mss && skb->len > VLAN_ETH_FRAME_LEN)
8022 base_flags |= TXD_FLAG_JMB_PKT;
8024 if (skb_vlan_tag_present(skb)) {
8025 base_flags |= TXD_FLAG_VLAN;
8026 vlan = skb_vlan_tag_get(skb);
8029 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8030 tg3_flag(tp, TX_TSTAMP_EN)) {
8031 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8032 base_flags |= TXD_FLAG_HWTSTAMP;
8035 len = skb_headlen(skb);
8037 mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
8039 if (dma_mapping_error(&tp->pdev->dev, mapping))
8043 tnapi->tx_buffers[entry].skb = skb;
8044 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8046 would_hit_hwbug = 0;
8048 if (tg3_flag(tp, 5701_DMA_BUG))
8049 would_hit_hwbug = 1;
8051 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8052 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8054 would_hit_hwbug = 1;
8055 } else if (skb_shinfo(skb)->nr_frags > 0) {
8058 if (!tg3_flag(tp, HW_TSO_1) &&
8059 !tg3_flag(tp, HW_TSO_2) &&
8060 !tg3_flag(tp, HW_TSO_3))
8063 /* Now loop through additional data
8064 * fragments, and queue them.
8066 last = skb_shinfo(skb)->nr_frags - 1;
8067 for (i = 0; i <= last; i++) {
8068 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8070 len = skb_frag_size(frag);
8071 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8072 len, DMA_TO_DEVICE);
8074 tnapi->tx_buffers[entry].skb = NULL;
8075 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8077 if (dma_mapping_error(&tp->pdev->dev, mapping))
8081 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8083 ((i == last) ? TXD_FLAG_END : 0),
8085 would_hit_hwbug = 1;
8091 if (would_hit_hwbug) {
8092 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8094 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8095 /* If it's a TSO packet, do GSO instead of
8096 * allocating and copying to a large linear SKB
8099 iph->check = ip_csum;
8100 iph->tot_len = ip_tot_len;
8102 tcph->check = tcp_csum;
8103 return tg3_tso_bug(tp, tnapi, txq, skb);
8106 /* If the workaround fails due to memory/mapping
8107 * failure, silently drop this packet.
8109 entry = tnapi->tx_prod;
8110 budget = tg3_tx_avail(tnapi);
8111 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8112 base_flags, mss, vlan))
8116 skb_tx_timestamp(skb);
8117 netdev_tx_sent_queue(txq, skb->len);
8119 /* Sync BD data before updating mailbox */
8122 tnapi->tx_prod = entry;
8123 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8124 netif_tx_stop_queue(txq);
8126 /* netif_tx_stop_queue() must be done before checking
8127 * checking tx index in tg3_tx_avail() below, because in
8128 * tg3_tx(), we update tx index before checking for
8129 * netif_tx_queue_stopped().
8132 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8133 netif_tx_wake_queue(txq);
8136 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
8137 /* Packets are ready, update Tx producer idx on card. */
8138 tw32_tx_mbox(tnapi->prodmbox, entry);
8141 return NETDEV_TX_OK;
8144 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8145 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8147 dev_kfree_skb_any(skb);
8150 return NETDEV_TX_OK;
8153 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8156 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8157 MAC_MODE_PORT_MODE_MASK);
8159 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8161 if (!tg3_flag(tp, 5705_PLUS))
8162 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8164 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8165 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8167 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8169 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8171 if (tg3_flag(tp, 5705_PLUS) ||
8172 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8173 tg3_asic_rev(tp) == ASIC_REV_5700)
8174 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8177 tw32(MAC_MODE, tp->mac_mode);
8181 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8183 u32 val, bmcr, mac_mode, ptest = 0;
8185 tg3_phy_toggle_apd(tp, false);
8186 tg3_phy_toggle_automdix(tp, false);
8188 if (extlpbk && tg3_phy_set_extloopbk(tp))
8191 bmcr = BMCR_FULLDPLX;
8196 bmcr |= BMCR_SPEED100;
8200 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8202 bmcr |= BMCR_SPEED100;
8205 bmcr |= BMCR_SPEED1000;
8210 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8211 tg3_readphy(tp, MII_CTRL1000, &val);
8212 val |= CTL1000_AS_MASTER |
8213 CTL1000_ENABLE_MASTER;
8214 tg3_writephy(tp, MII_CTRL1000, val);
8216 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8217 MII_TG3_FET_PTEST_TRIM_2;
8218 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8221 bmcr |= BMCR_LOOPBACK;
8223 tg3_writephy(tp, MII_BMCR, bmcr);
8225 /* The write needs to be flushed for the FETs */
8226 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8227 tg3_readphy(tp, MII_BMCR, &bmcr);
8231 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8232 tg3_asic_rev(tp) == ASIC_REV_5785) {
8233 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8234 MII_TG3_FET_PTEST_FRC_TX_LINK |
8235 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8237 /* The write needs to be flushed for the AC131 */
8238 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8241 /* Reset to prevent losing 1st rx packet intermittently */
8242 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8243 tg3_flag(tp, 5780_CLASS)) {
8244 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8246 tw32_f(MAC_RX_MODE, tp->rx_mode);
8249 mac_mode = tp->mac_mode &
8250 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8251 if (speed == SPEED_1000)
8252 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8254 mac_mode |= MAC_MODE_PORT_MODE_MII;
8256 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8257 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8259 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8260 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8261 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8262 mac_mode |= MAC_MODE_LINK_POLARITY;
8264 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8265 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8268 tw32(MAC_MODE, mac_mode);
8274 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8276 struct tg3 *tp = netdev_priv(dev);
8278 if (features & NETIF_F_LOOPBACK) {
8279 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8282 spin_lock_bh(&tp->lock);
8283 tg3_mac_loopback(tp, true);
8284 netif_carrier_on(tp->dev);
8285 spin_unlock_bh(&tp->lock);
8286 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8288 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8291 spin_lock_bh(&tp->lock);
8292 tg3_mac_loopback(tp, false);
8293 /* Force link status check */
8294 tg3_setup_phy(tp, true);
8295 spin_unlock_bh(&tp->lock);
8296 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8300 static netdev_features_t tg3_fix_features(struct net_device *dev,
8301 netdev_features_t features)
8303 struct tg3 *tp = netdev_priv(dev);
8305 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8306 features &= ~NETIF_F_ALL_TSO;
8311 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8313 netdev_features_t changed = dev->features ^ features;
8315 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8316 tg3_set_loopback(dev, features);
8321 static void tg3_rx_prodring_free(struct tg3 *tp,
8322 struct tg3_rx_prodring_set *tpr)
8326 if (tpr != &tp->napi[0].prodring) {
8327 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8328 i = (i + 1) & tp->rx_std_ring_mask)
8329 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8332 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8333 for (i = tpr->rx_jmb_cons_idx;
8334 i != tpr->rx_jmb_prod_idx;
8335 i = (i + 1) & tp->rx_jmb_ring_mask) {
8336 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8344 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8345 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8348 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8349 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8350 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8355 /* Initialize rx rings for packet processing.
8357 * The chip has been shut down and the driver detached from
8358 * the networking, so no interrupts or new tx packets will
8359 * end up in the driver. tp->{tx,}lock are held and thus
8362 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8363 struct tg3_rx_prodring_set *tpr)
8365 u32 i, rx_pkt_dma_sz;
8367 tpr->rx_std_cons_idx = 0;
8368 tpr->rx_std_prod_idx = 0;
8369 tpr->rx_jmb_cons_idx = 0;
8370 tpr->rx_jmb_prod_idx = 0;
8372 if (tpr != &tp->napi[0].prodring) {
8373 memset(&tpr->rx_std_buffers[0], 0,
8374 TG3_RX_STD_BUFF_RING_SIZE(tp));
8375 if (tpr->rx_jmb_buffers)
8376 memset(&tpr->rx_jmb_buffers[0], 0,
8377 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8381 /* Zero out all descriptors. */
8382 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8384 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8385 if (tg3_flag(tp, 5780_CLASS) &&
8386 tp->dev->mtu > ETH_DATA_LEN)
8387 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8388 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8390 /* Initialize invariants of the rings, we only set this
8391 * stuff once. This works because the card does not
8392 * write into the rx buffer posting rings.
8394 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8395 struct tg3_rx_buffer_desc *rxd;
8397 rxd = &tpr->rx_std[i];
8398 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8399 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8400 rxd->opaque = (RXD_OPAQUE_RING_STD |
8401 (i << RXD_OPAQUE_INDEX_SHIFT));
8404 /* Now allocate fresh SKBs for each rx ring. */
8405 for (i = 0; i < tp->rx_pending; i++) {
8406 unsigned int frag_size;
8408 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8410 netdev_warn(tp->dev,
8411 "Using a smaller RX standard ring. Only "
8412 "%d out of %d buffers were allocated "
8413 "successfully\n", i, tp->rx_pending);
8421 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8424 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8426 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8429 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8430 struct tg3_rx_buffer_desc *rxd;
8432 rxd = &tpr->rx_jmb[i].std;
8433 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8434 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8436 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8437 (i << RXD_OPAQUE_INDEX_SHIFT));
8440 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8441 unsigned int frag_size;
8443 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8445 netdev_warn(tp->dev,
8446 "Using a smaller RX jumbo ring. Only %d "
8447 "out of %d buffers were allocated "
8448 "successfully\n", i, tp->rx_jumbo_pending);
8451 tp->rx_jumbo_pending = i;
8460 tg3_rx_prodring_free(tp, tpr);
8464 static void tg3_rx_prodring_fini(struct tg3 *tp,
8465 struct tg3_rx_prodring_set *tpr)
8467 kfree(tpr->rx_std_buffers);
8468 tpr->rx_std_buffers = NULL;
8469 kfree(tpr->rx_jmb_buffers);
8470 tpr->rx_jmb_buffers = NULL;
8472 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8473 tpr->rx_std, tpr->rx_std_mapping);
8477 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8478 tpr->rx_jmb, tpr->rx_jmb_mapping);
8483 static int tg3_rx_prodring_init(struct tg3 *tp,
8484 struct tg3_rx_prodring_set *tpr)
8486 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8488 if (!tpr->rx_std_buffers)
8491 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8492 TG3_RX_STD_RING_BYTES(tp),
8493 &tpr->rx_std_mapping,
8498 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8499 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8501 if (!tpr->rx_jmb_buffers)
8504 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8505 TG3_RX_JMB_RING_BYTES(tp),
8506 &tpr->rx_jmb_mapping,
8515 tg3_rx_prodring_fini(tp, tpr);
8519 /* Free up pending packets in all rx/tx rings.
8521 * The chip has been shut down and the driver detached from
8522 * the networking, so no interrupts or new tx packets will
8523 * end up in the driver. tp->{tx,}lock is not held and we are not
8524 * in an interrupt context and thus may sleep.
8526 static void tg3_free_rings(struct tg3 *tp)
8530 for (j = 0; j < tp->irq_cnt; j++) {
8531 struct tg3_napi *tnapi = &tp->napi[j];
8533 tg3_rx_prodring_free(tp, &tnapi->prodring);
8535 if (!tnapi->tx_buffers)
8538 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8539 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8544 tg3_tx_skb_unmap(tnapi, i,
8545 skb_shinfo(skb)->nr_frags - 1);
8547 dev_consume_skb_any(skb);
8549 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8553 /* Initialize tx/rx rings for packet processing.
8555 * The chip has been shut down and the driver detached from
8556 * the networking, so no interrupts or new tx packets will
8557 * end up in the driver. tp->{tx,}lock are held and thus
8560 static int tg3_init_rings(struct tg3 *tp)
8564 /* Free up all the SKBs. */
8567 for (i = 0; i < tp->irq_cnt; i++) {
8568 struct tg3_napi *tnapi = &tp->napi[i];
8570 tnapi->last_tag = 0;
8571 tnapi->last_irq_tag = 0;
8572 tnapi->hw_status->status = 0;
8573 tnapi->hw_status->status_tag = 0;
8574 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8579 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8581 tnapi->rx_rcb_ptr = 0;
8583 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8585 if (tnapi->prodring.rx_std &&
8586 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8595 static void tg3_mem_tx_release(struct tg3 *tp)
8599 for (i = 0; i < tp->irq_max; i++) {
8600 struct tg3_napi *tnapi = &tp->napi[i];
8602 if (tnapi->tx_ring) {
8603 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8604 tnapi->tx_ring, tnapi->tx_desc_mapping);
8605 tnapi->tx_ring = NULL;
8608 kfree(tnapi->tx_buffers);
8609 tnapi->tx_buffers = NULL;
8613 static int tg3_mem_tx_acquire(struct tg3 *tp)
8616 struct tg3_napi *tnapi = &tp->napi[0];
8618 /* If multivector TSS is enabled, vector 0 does not handle
8619 * tx interrupts. Don't allocate any resources for it.
8621 if (tg3_flag(tp, ENABLE_TSS))
8624 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8625 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8626 sizeof(struct tg3_tx_ring_info),
8628 if (!tnapi->tx_buffers)
8631 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8633 &tnapi->tx_desc_mapping,
8635 if (!tnapi->tx_ring)
8642 tg3_mem_tx_release(tp);
8646 static void tg3_mem_rx_release(struct tg3 *tp)
8650 for (i = 0; i < tp->irq_max; i++) {
8651 struct tg3_napi *tnapi = &tp->napi[i];
8653 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8658 dma_free_coherent(&tp->pdev->dev,
8659 TG3_RX_RCB_RING_BYTES(tp),
8661 tnapi->rx_rcb_mapping);
8662 tnapi->rx_rcb = NULL;
8666 static int tg3_mem_rx_acquire(struct tg3 *tp)
8668 unsigned int i, limit;
8670 limit = tp->rxq_cnt;
8672 /* If RSS is enabled, we need a (dummy) producer ring
8673 * set on vector zero. This is the true hw prodring.
8675 if (tg3_flag(tp, ENABLE_RSS))
8678 for (i = 0; i < limit; i++) {
8679 struct tg3_napi *tnapi = &tp->napi[i];
8681 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8684 /* If multivector RSS is enabled, vector 0
8685 * does not handle rx or tx interrupts.
8686 * Don't allocate any resources for it.
8688 if (!i && tg3_flag(tp, ENABLE_RSS))
8691 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8692 TG3_RX_RCB_RING_BYTES(tp),
8693 &tnapi->rx_rcb_mapping,
8702 tg3_mem_rx_release(tp);
8707 * Must not be invoked with interrupt sources disabled and
8708 * the hardware shutdown down.
8710 static void tg3_free_consistent(struct tg3 *tp)
8714 for (i = 0; i < tp->irq_cnt; i++) {
8715 struct tg3_napi *tnapi = &tp->napi[i];
8717 if (tnapi->hw_status) {
8718 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8720 tnapi->status_mapping);
8721 tnapi->hw_status = NULL;
8725 tg3_mem_rx_release(tp);
8726 tg3_mem_tx_release(tp);
8728 /* tp->hw_stats can be referenced safely:
8729 * 1. under rtnl_lock
8730 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8733 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8734 tp->hw_stats, tp->stats_mapping);
8735 tp->hw_stats = NULL;
8740 * Must not be invoked with interrupt sources disabled and
8741 * the hardware shutdown down. Can sleep.
8743 static int tg3_alloc_consistent(struct tg3 *tp)
8747 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8748 sizeof(struct tg3_hw_stats),
8749 &tp->stats_mapping, GFP_KERNEL);
8753 for (i = 0; i < tp->irq_cnt; i++) {
8754 struct tg3_napi *tnapi = &tp->napi[i];
8755 struct tg3_hw_status *sblk;
8757 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8759 &tnapi->status_mapping,
8761 if (!tnapi->hw_status)
8764 sblk = tnapi->hw_status;
8766 if (tg3_flag(tp, ENABLE_RSS)) {
8767 u16 *prodptr = NULL;
8770 * When RSS is enabled, the status block format changes
8771 * slightly. The "rx_jumbo_consumer", "reserved",
8772 * and "rx_mini_consumer" members get mapped to the
8773 * other three rx return ring producer indexes.
8777 prodptr = &sblk->idx[0].rx_producer;
8780 prodptr = &sblk->rx_jumbo_consumer;
8783 prodptr = &sblk->reserved;
8786 prodptr = &sblk->rx_mini_consumer;
8789 tnapi->rx_rcb_prod_idx = prodptr;
8791 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8795 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8801 tg3_free_consistent(tp);
8805 #define MAX_WAIT_CNT 1000
8807 /* To stop a block, clear the enable bit and poll till it
8808 * clears. tp->lock is held.
8810 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8815 if (tg3_flag(tp, 5705_PLUS)) {
8822 /* We can't enable/disable these bits of the
8823 * 5705/5750, just say success.
8836 for (i = 0; i < MAX_WAIT_CNT; i++) {
8837 if (pci_channel_offline(tp->pdev)) {
8838 dev_err(&tp->pdev->dev,
8839 "tg3_stop_block device offline, "
8840 "ofs=%lx enable_bit=%x\n",
8847 if ((val & enable_bit) == 0)
8851 if (i == MAX_WAIT_CNT && !silent) {
8852 dev_err(&tp->pdev->dev,
8853 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8861 /* tp->lock is held. */
8862 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8866 tg3_disable_ints(tp);
8868 if (pci_channel_offline(tp->pdev)) {
8869 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8870 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8875 tp->rx_mode &= ~RX_MODE_ENABLE;
8876 tw32_f(MAC_RX_MODE, tp->rx_mode);
8879 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8880 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8881 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8882 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8883 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8884 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8886 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8887 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8888 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8889 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8890 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8891 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8892 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8894 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8895 tw32_f(MAC_MODE, tp->mac_mode);
8898 tp->tx_mode &= ~TX_MODE_ENABLE;
8899 tw32_f(MAC_TX_MODE, tp->tx_mode);
8901 for (i = 0; i < MAX_WAIT_CNT; i++) {
8903 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8906 if (i >= MAX_WAIT_CNT) {
8907 dev_err(&tp->pdev->dev,
8908 "%s timed out, TX_MODE_ENABLE will not clear "
8909 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8913 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8914 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8915 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8917 tw32(FTQ_RESET, 0xffffffff);
8918 tw32(FTQ_RESET, 0x00000000);
8920 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8921 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8924 for (i = 0; i < tp->irq_cnt; i++) {
8925 struct tg3_napi *tnapi = &tp->napi[i];
8926 if (tnapi->hw_status)
8927 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8933 /* Save PCI command register before chip reset */
8934 static void tg3_save_pci_state(struct tg3 *tp)
8936 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8939 /* Restore PCI state after chip reset */
8940 static void tg3_restore_pci_state(struct tg3 *tp)
8944 /* Re-enable indirect register accesses. */
8945 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8946 tp->misc_host_ctrl);
8948 /* Set MAX PCI retry to zero. */
8949 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8950 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8951 tg3_flag(tp, PCIX_MODE))
8952 val |= PCISTATE_RETRY_SAME_DMA;
8953 /* Allow reads and writes to the APE register and memory space. */
8954 if (tg3_flag(tp, ENABLE_APE))
8955 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8956 PCISTATE_ALLOW_APE_SHMEM_WR |
8957 PCISTATE_ALLOW_APE_PSPACE_WR;
8958 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8960 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8962 if (!tg3_flag(tp, PCI_EXPRESS)) {
8963 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8964 tp->pci_cacheline_sz);
8965 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8969 /* Make sure PCI-X relaxed ordering bit is clear. */
8970 if (tg3_flag(tp, PCIX_MODE)) {
8973 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8975 pcix_cmd &= ~PCI_X_CMD_ERO;
8976 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8980 if (tg3_flag(tp, 5780_CLASS)) {
8982 /* Chip reset on 5780 will reset MSI enable bit,
8983 * so need to restore it.
8985 if (tg3_flag(tp, USING_MSI)) {
8988 pci_read_config_word(tp->pdev,
8989 tp->msi_cap + PCI_MSI_FLAGS,
8991 pci_write_config_word(tp->pdev,
8992 tp->msi_cap + PCI_MSI_FLAGS,
8993 ctrl | PCI_MSI_FLAGS_ENABLE);
8994 val = tr32(MSGINT_MODE);
8995 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9000 static void tg3_override_clk(struct tg3 *tp)
9004 switch (tg3_asic_rev(tp)) {
9006 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9007 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9008 TG3_CPMU_MAC_ORIDE_ENABLE);
9013 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9021 static void tg3_restore_clk(struct tg3 *tp)
9025 switch (tg3_asic_rev(tp)) {
9027 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9028 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9029 val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9034 val = tr32(TG3_CPMU_CLCK_ORIDE);
9035 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9043 /* tp->lock is held. */
9044 static int tg3_chip_reset(struct tg3 *tp)
9045 __releases(tp->lock)
9046 __acquires(tp->lock)
9049 void (*write_op)(struct tg3 *, u32, u32);
9052 if (!pci_device_is_present(tp->pdev))
9057 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9059 /* No matching tg3_nvram_unlock() after this because
9060 * chip reset below will undo the nvram lock.
9062 tp->nvram_lock_cnt = 0;
9064 /* GRC_MISC_CFG core clock reset will clear the memory
9065 * enable bit in PCI register 4 and the MSI enable bit
9066 * on some chips, so we save relevant registers here.
9068 tg3_save_pci_state(tp);
9070 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9071 tg3_flag(tp, 5755_PLUS))
9072 tw32(GRC_FASTBOOT_PC, 0);
9075 * We must avoid the readl() that normally takes place.
9076 * It locks machines, causes machine checks, and other
9077 * fun things. So, temporarily disable the 5701
9078 * hardware workaround, while we do the reset.
9080 write_op = tp->write32;
9081 if (write_op == tg3_write_flush_reg32)
9082 tp->write32 = tg3_write32;
9084 /* Prevent the irq handler from reading or writing PCI registers
9085 * during chip reset when the memory enable bit in the PCI command
9086 * register may be cleared. The chip does not generate interrupt
9087 * at this time, but the irq handler may still be called due to irq
9088 * sharing or irqpoll.
9090 tg3_flag_set(tp, CHIP_RESETTING);
9091 for (i = 0; i < tp->irq_cnt; i++) {
9092 struct tg3_napi *tnapi = &tp->napi[i];
9093 if (tnapi->hw_status) {
9094 tnapi->hw_status->status = 0;
9095 tnapi->hw_status->status_tag = 0;
9097 tnapi->last_tag = 0;
9098 tnapi->last_irq_tag = 0;
9102 tg3_full_unlock(tp);
9104 for (i = 0; i < tp->irq_cnt; i++)
9105 synchronize_irq(tp->napi[i].irq_vec);
9107 tg3_full_lock(tp, 0);
9109 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9110 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9111 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9115 val = GRC_MISC_CFG_CORECLK_RESET;
9117 if (tg3_flag(tp, PCI_EXPRESS)) {
9118 /* Force PCIe 1.0a mode */
9119 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9120 !tg3_flag(tp, 57765_PLUS) &&
9121 tr32(TG3_PCIE_PHY_TSTCTL) ==
9122 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9123 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9125 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9126 tw32(GRC_MISC_CFG, (1 << 29));
9131 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9132 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9133 tw32(GRC_VCPU_EXT_CTRL,
9134 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9137 /* Set the clock to the highest frequency to avoid timeouts. With link
9138 * aware mode, the clock speed could be slow and bootcode does not
9139 * complete within the expected time. Override the clock to allow the
9140 * bootcode to finish sooner and then restore it.
9142 tg3_override_clk(tp);
9144 /* Manage gphy power for all CPMU absent PCIe devices. */
9145 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9146 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9148 tw32(GRC_MISC_CFG, val);
9150 /* restore 5701 hardware bug workaround write method */
9151 tp->write32 = write_op;
9153 /* Unfortunately, we have to delay before the PCI read back.
9154 * Some 575X chips even will not respond to a PCI cfg access
9155 * when the reset command is given to the chip.
9157 * How do these hardware designers expect things to work
9158 * properly if the PCI write is posted for a long period
9159 * of time? It is always necessary to have some method by
9160 * which a register read back can occur to push the write
9161 * out which does the reset.
9163 * For most tg3 variants the trick below was working.
9168 /* Flush PCI posted writes. The normal MMIO registers
9169 * are inaccessible at this time so this is the only
9170 * way to make this reliably (actually, this is no longer
9171 * the case, see above). I tried to use indirect
9172 * register read/write but this upset some 5701 variants.
9174 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9178 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9181 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9185 /* Wait for link training to complete. */
9186 for (j = 0; j < 5000; j++)
9189 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9190 pci_write_config_dword(tp->pdev, 0xc4,
9191 cfg_val | (1 << 15));
9194 /* Clear the "no snoop" and "relaxed ordering" bits. */
9195 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9197 * Older PCIe devices only support the 128 byte
9198 * MPS setting. Enforce the restriction.
9200 if (!tg3_flag(tp, CPMU_PRESENT))
9201 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9202 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9204 /* Clear error status */
9205 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9206 PCI_EXP_DEVSTA_CED |
9207 PCI_EXP_DEVSTA_NFED |
9208 PCI_EXP_DEVSTA_FED |
9209 PCI_EXP_DEVSTA_URD);
9212 tg3_restore_pci_state(tp);
9214 tg3_flag_clear(tp, CHIP_RESETTING);
9215 tg3_flag_clear(tp, ERROR_PROCESSED);
9218 if (tg3_flag(tp, 5780_CLASS))
9219 val = tr32(MEMARB_MODE);
9220 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9222 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9224 tw32(0x5000, 0x400);
9227 if (tg3_flag(tp, IS_SSB_CORE)) {
9229 * BCM4785: In order to avoid repercussions from using
9230 * potentially defective internal ROM, stop the Rx RISC CPU,
9231 * which is not required.
9234 tg3_halt_cpu(tp, RX_CPU_BASE);
9237 err = tg3_poll_fw(tp);
9241 tw32(GRC_MODE, tp->grc_mode);
9243 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9246 tw32(0xc4, val | (1 << 15));
9249 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9250 tg3_asic_rev(tp) == ASIC_REV_5705) {
9251 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9252 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9253 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9254 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9257 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9258 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9260 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9261 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9266 tw32_f(MAC_MODE, val);
9269 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9273 if (tg3_flag(tp, PCI_EXPRESS) &&
9274 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9275 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9276 !tg3_flag(tp, 57765_PLUS)) {
9279 tw32(0x7c00, val | (1 << 25));
9282 tg3_restore_clk(tp);
9284 /* Increase the core clock speed to fix tx timeout issue for 5762
9285 * with 100Mbps link speed.
9287 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9288 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9289 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9290 TG3_CPMU_MAC_ORIDE_ENABLE);
9293 /* Reprobe ASF enable state. */
9294 tg3_flag_clear(tp, ENABLE_ASF);
9295 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9296 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9298 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9299 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9300 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9303 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9304 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9305 tg3_flag_set(tp, ENABLE_ASF);
9306 tp->last_event_jiffies = jiffies;
9307 if (tg3_flag(tp, 5750_PLUS))
9308 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9310 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9311 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9312 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9313 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9314 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9321 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9322 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9323 static void __tg3_set_rx_mode(struct net_device *);
9325 /* tp->lock is held. */
9326 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9332 tg3_write_sig_pre_reset(tp, kind);
9334 tg3_abort_hw(tp, silent);
9335 err = tg3_chip_reset(tp);
9337 __tg3_set_mac_addr(tp, false);
9339 tg3_write_sig_legacy(tp, kind);
9340 tg3_write_sig_post_reset(tp, kind);
9343 /* Save the stats across chip resets... */
9344 tg3_get_nstats(tp, &tp->net_stats_prev);
9345 tg3_get_estats(tp, &tp->estats_prev);
9347 /* And make sure the next sample is new data */
9348 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9354 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9356 struct tg3 *tp = netdev_priv(dev);
9357 struct sockaddr *addr = p;
9359 bool skip_mac_1 = false;
9361 if (!is_valid_ether_addr(addr->sa_data))
9362 return -EADDRNOTAVAIL;
9364 eth_hw_addr_set(dev, addr->sa_data);
9366 if (!netif_running(dev))
9369 if (tg3_flag(tp, ENABLE_ASF)) {
9370 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9372 addr0_high = tr32(MAC_ADDR_0_HIGH);
9373 addr0_low = tr32(MAC_ADDR_0_LOW);
9374 addr1_high = tr32(MAC_ADDR_1_HIGH);
9375 addr1_low = tr32(MAC_ADDR_1_LOW);
9377 /* Skip MAC addr 1 if ASF is using it. */
9378 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9379 !(addr1_high == 0 && addr1_low == 0))
9382 spin_lock_bh(&tp->lock);
9383 __tg3_set_mac_addr(tp, skip_mac_1);
9384 __tg3_set_rx_mode(dev);
9385 spin_unlock_bh(&tp->lock);
9390 /* tp->lock is held. */
9391 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9392 dma_addr_t mapping, u32 maxlen_flags,
9396 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9397 ((u64) mapping >> 32));
9399 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9400 ((u64) mapping & 0xffffffff));
9402 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9405 if (!tg3_flag(tp, 5705_PLUS))
9407 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9412 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9416 if (!tg3_flag(tp, ENABLE_TSS)) {
9417 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9418 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9419 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9421 tw32(HOSTCC_TXCOL_TICKS, 0);
9422 tw32(HOSTCC_TXMAX_FRAMES, 0);
9423 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9425 for (; i < tp->txq_cnt; i++) {
9428 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9429 tw32(reg, ec->tx_coalesce_usecs);
9430 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9431 tw32(reg, ec->tx_max_coalesced_frames);
9432 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9433 tw32(reg, ec->tx_max_coalesced_frames_irq);
9437 for (; i < tp->irq_max - 1; i++) {
9438 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9439 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9440 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9444 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9447 u32 limit = tp->rxq_cnt;
9449 if (!tg3_flag(tp, ENABLE_RSS)) {
9450 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9451 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9452 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9455 tw32(HOSTCC_RXCOL_TICKS, 0);
9456 tw32(HOSTCC_RXMAX_FRAMES, 0);
9457 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9460 for (; i < limit; i++) {
9463 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9464 tw32(reg, ec->rx_coalesce_usecs);
9465 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9466 tw32(reg, ec->rx_max_coalesced_frames);
9467 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9468 tw32(reg, ec->rx_max_coalesced_frames_irq);
9471 for (; i < tp->irq_max - 1; i++) {
9472 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9473 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9474 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9478 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9480 tg3_coal_tx_init(tp, ec);
9481 tg3_coal_rx_init(tp, ec);
9483 if (!tg3_flag(tp, 5705_PLUS)) {
9484 u32 val = ec->stats_block_coalesce_usecs;
9486 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9487 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9492 tw32(HOSTCC_STAT_COAL_TICKS, val);
9496 /* tp->lock is held. */
9497 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9501 /* Disable all transmit rings but the first. */
9502 if (!tg3_flag(tp, 5705_PLUS))
9503 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9504 else if (tg3_flag(tp, 5717_PLUS))
9505 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9506 else if (tg3_flag(tp, 57765_CLASS) ||
9507 tg3_asic_rev(tp) == ASIC_REV_5762)
9508 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9510 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9512 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9513 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9514 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9515 BDINFO_FLAGS_DISABLED);
9518 /* tp->lock is held. */
9519 static void tg3_tx_rcbs_init(struct tg3 *tp)
9522 u32 txrcb = NIC_SRAM_SEND_RCB;
9524 if (tg3_flag(tp, ENABLE_TSS))
9527 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9528 struct tg3_napi *tnapi = &tp->napi[i];
9530 if (!tnapi->tx_ring)
9533 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9534 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9535 NIC_SRAM_TX_BUFFER_DESC);
9539 /* tp->lock is held. */
9540 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9544 /* Disable all receive return rings but the first. */
9545 if (tg3_flag(tp, 5717_PLUS))
9546 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9547 else if (!tg3_flag(tp, 5705_PLUS))
9548 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9549 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9550 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9551 tg3_flag(tp, 57765_CLASS))
9552 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9554 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9556 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9557 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9558 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9559 BDINFO_FLAGS_DISABLED);
9562 /* tp->lock is held. */
9563 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9566 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9568 if (tg3_flag(tp, ENABLE_RSS))
9571 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9572 struct tg3_napi *tnapi = &tp->napi[i];
9577 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9578 (tp->rx_ret_ring_mask + 1) <<
9579 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9583 /* tp->lock is held. */
9584 static void tg3_rings_reset(struct tg3 *tp)
9588 struct tg3_napi *tnapi = &tp->napi[0];
9590 tg3_tx_rcbs_disable(tp);
9592 tg3_rx_ret_rcbs_disable(tp);
9594 /* Disable interrupts */
9595 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9596 tp->napi[0].chk_msi_cnt = 0;
9597 tp->napi[0].last_rx_cons = 0;
9598 tp->napi[0].last_tx_cons = 0;
9600 /* Zero mailbox registers. */
9601 if (tg3_flag(tp, SUPPORT_MSIX)) {
9602 for (i = 1; i < tp->irq_max; i++) {
9603 tp->napi[i].tx_prod = 0;
9604 tp->napi[i].tx_cons = 0;
9605 if (tg3_flag(tp, ENABLE_TSS))
9606 tw32_mailbox(tp->napi[i].prodmbox, 0);
9607 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9608 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9609 tp->napi[i].chk_msi_cnt = 0;
9610 tp->napi[i].last_rx_cons = 0;
9611 tp->napi[i].last_tx_cons = 0;
9613 if (!tg3_flag(tp, ENABLE_TSS))
9614 tw32_mailbox(tp->napi[0].prodmbox, 0);
9616 tp->napi[0].tx_prod = 0;
9617 tp->napi[0].tx_cons = 0;
9618 tw32_mailbox(tp->napi[0].prodmbox, 0);
9619 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9622 /* Make sure the NIC-based send BD rings are disabled. */
9623 if (!tg3_flag(tp, 5705_PLUS)) {
9624 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9625 for (i = 0; i < 16; i++)
9626 tw32_tx_mbox(mbox + i * 8, 0);
9629 /* Clear status block in ram. */
9630 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9632 /* Set status block DMA address */
9633 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9634 ((u64) tnapi->status_mapping >> 32));
9635 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9636 ((u64) tnapi->status_mapping & 0xffffffff));
9638 stblk = HOSTCC_STATBLCK_RING1;
9640 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9641 u64 mapping = (u64)tnapi->status_mapping;
9642 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9643 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9646 /* Clear status block in ram. */
9647 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9650 tg3_tx_rcbs_init(tp);
9651 tg3_rx_ret_rcbs_init(tp);
9654 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9656 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9658 if (!tg3_flag(tp, 5750_PLUS) ||
9659 tg3_flag(tp, 5780_CLASS) ||
9660 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9661 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9662 tg3_flag(tp, 57765_PLUS))
9663 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9664 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9665 tg3_asic_rev(tp) == ASIC_REV_5787)
9666 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9668 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9670 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9671 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9673 val = min(nic_rep_thresh, host_rep_thresh);
9674 tw32(RCVBDI_STD_THRESH, val);
9676 if (tg3_flag(tp, 57765_PLUS))
9677 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9679 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9682 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9684 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9686 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9687 tw32(RCVBDI_JUMBO_THRESH, val);
9689 if (tg3_flag(tp, 57765_PLUS))
9690 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9693 static inline u32 calc_crc(unsigned char *buf, int len)
9701 for (j = 0; j < len; j++) {
9704 for (k = 0; k < 8; k++) {
9710 reg ^= CRC32_POLY_LE;
9717 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9719 /* accept or reject all multicast frames */
9720 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9721 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9722 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9723 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9726 static void __tg3_set_rx_mode(struct net_device *dev)
9728 struct tg3 *tp = netdev_priv(dev);
9731 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9732 RX_MODE_KEEP_VLAN_TAG);
9734 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9735 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9738 if (!tg3_flag(tp, ENABLE_ASF))
9739 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9742 if (dev->flags & IFF_PROMISC) {
9743 /* Promiscuous mode. */
9744 rx_mode |= RX_MODE_PROMISC;
9745 } else if (dev->flags & IFF_ALLMULTI) {
9746 /* Accept all multicast. */
9747 tg3_set_multi(tp, 1);
9748 } else if (netdev_mc_empty(dev)) {
9749 /* Reject all multicast. */
9750 tg3_set_multi(tp, 0);
9752 /* Accept one or more multicast(s). */
9753 struct netdev_hw_addr *ha;
9754 u32 mc_filter[4] = { 0, };
9759 netdev_for_each_mc_addr(ha, dev) {
9760 crc = calc_crc(ha->addr, ETH_ALEN);
9762 regidx = (bit & 0x60) >> 5;
9764 mc_filter[regidx] |= (1 << bit);
9767 tw32(MAC_HASH_REG_0, mc_filter[0]);
9768 tw32(MAC_HASH_REG_1, mc_filter[1]);
9769 tw32(MAC_HASH_REG_2, mc_filter[2]);
9770 tw32(MAC_HASH_REG_3, mc_filter[3]);
9773 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9774 rx_mode |= RX_MODE_PROMISC;
9775 } else if (!(dev->flags & IFF_PROMISC)) {
9776 /* Add all entries into to the mac addr filter list */
9778 struct netdev_hw_addr *ha;
9780 netdev_for_each_uc_addr(ha, dev) {
9781 __tg3_set_one_mac_addr(tp, ha->addr,
9782 i + TG3_UCAST_ADDR_IDX(tp));
9787 if (rx_mode != tp->rx_mode) {
9788 tp->rx_mode = rx_mode;
9789 tw32_f(MAC_RX_MODE, rx_mode);
9794 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9798 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9799 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9802 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9806 if (!tg3_flag(tp, SUPPORT_MSIX))
9809 if (tp->rxq_cnt == 1) {
9810 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9814 /* Validate table against current IRQ count */
9815 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9816 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9820 if (i != TG3_RSS_INDIR_TBL_SIZE)
9821 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9824 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9827 u32 reg = MAC_RSS_INDIR_TBL_0;
9829 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9830 u32 val = tp->rss_ind_tbl[i];
9832 for (; i % 8; i++) {
9834 val |= tp->rss_ind_tbl[i];
9841 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9843 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9844 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9846 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9849 /* tp->lock is held. */
9850 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9852 u32 val, rdmac_mode;
9854 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9856 tg3_disable_ints(tp);
9860 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9862 if (tg3_flag(tp, INIT_COMPLETE))
9863 tg3_abort_hw(tp, 1);
9865 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9866 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9867 tg3_phy_pull_config(tp);
9868 tg3_eee_pull_config(tp, NULL);
9869 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9872 /* Enable MAC control of LPI */
9873 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9879 err = tg3_chip_reset(tp);
9883 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9885 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9886 val = tr32(TG3_CPMU_CTRL);
9887 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9888 tw32(TG3_CPMU_CTRL, val);
9890 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9891 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9892 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9893 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9895 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9896 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9897 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9898 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9900 val = tr32(TG3_CPMU_HST_ACC);
9901 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9902 val |= CPMU_HST_ACC_MACCLK_6_25;
9903 tw32(TG3_CPMU_HST_ACC, val);
9906 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9907 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9908 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9909 PCIE_PWR_MGMT_L1_THRESH_4MS;
9910 tw32(PCIE_PWR_MGMT_THRESH, val);
9912 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9913 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9915 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9917 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9918 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9921 if (tg3_flag(tp, L1PLLPD_EN)) {
9922 u32 grc_mode = tr32(GRC_MODE);
9924 /* Access the lower 1K of PL PCIE block registers. */
9925 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9926 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9928 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9929 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9930 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9932 tw32(GRC_MODE, grc_mode);
9935 if (tg3_flag(tp, 57765_CLASS)) {
9936 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9937 u32 grc_mode = tr32(GRC_MODE);
9939 /* Access the lower 1K of PL PCIE block registers. */
9940 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9941 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9943 val = tr32(TG3_PCIE_TLDLPL_PORT +
9944 TG3_PCIE_PL_LO_PHYCTL5);
9945 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9946 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9948 tw32(GRC_MODE, grc_mode);
9951 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9954 /* Fix transmit hangs */
9955 val = tr32(TG3_CPMU_PADRNG_CTL);
9956 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9957 tw32(TG3_CPMU_PADRNG_CTL, val);
9959 grc_mode = tr32(GRC_MODE);
9961 /* Access the lower 1K of DL PCIE block registers. */
9962 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9963 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9965 val = tr32(TG3_PCIE_TLDLPL_PORT +
9966 TG3_PCIE_DL_LO_FTSMAX);
9967 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9968 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9969 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9971 tw32(GRC_MODE, grc_mode);
9974 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9975 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9976 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9977 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9980 /* This works around an issue with Athlon chipsets on
9981 * B3 tigon3 silicon. This bit has no effect on any
9982 * other revision. But do not set this on PCI Express
9983 * chips and don't even touch the clocks if the CPMU is present.
9985 if (!tg3_flag(tp, CPMU_PRESENT)) {
9986 if (!tg3_flag(tp, PCI_EXPRESS))
9987 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9988 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9991 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9992 tg3_flag(tp, PCIX_MODE)) {
9993 val = tr32(TG3PCI_PCISTATE);
9994 val |= PCISTATE_RETRY_SAME_DMA;
9995 tw32(TG3PCI_PCISTATE, val);
9998 if (tg3_flag(tp, ENABLE_APE)) {
9999 /* Allow reads and writes to the
10000 * APE register and memory space.
10002 val = tr32(TG3PCI_PCISTATE);
10003 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10004 PCISTATE_ALLOW_APE_SHMEM_WR |
10005 PCISTATE_ALLOW_APE_PSPACE_WR;
10006 tw32(TG3PCI_PCISTATE, val);
10009 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10010 /* Enable some hw fixes. */
10011 val = tr32(TG3PCI_MSI_DATA);
10012 val |= (1 << 26) | (1 << 28) | (1 << 29);
10013 tw32(TG3PCI_MSI_DATA, val);
10016 /* Descriptor ring init may make accesses to the
10017 * NIC SRAM area to setup the TX descriptors, so we
10018 * can only do this after the hardware has been
10019 * successfully reset.
10021 err = tg3_init_rings(tp);
10025 if (tg3_flag(tp, 57765_PLUS)) {
10026 val = tr32(TG3PCI_DMA_RW_CTRL) &
10027 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10028 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10029 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10030 if (!tg3_flag(tp, 57765_CLASS) &&
10031 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10032 tg3_asic_rev(tp) != ASIC_REV_5762)
10033 val |= DMA_RWCTRL_TAGGED_STAT_WA;
10034 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10035 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10036 tg3_asic_rev(tp) != ASIC_REV_5761) {
10037 /* This value is determined during the probe time DMA
10038 * engine test, tg3_test_dma.
10040 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10043 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10044 GRC_MODE_4X_NIC_SEND_RINGS |
10045 GRC_MODE_NO_TX_PHDR_CSUM |
10046 GRC_MODE_NO_RX_PHDR_CSUM);
10047 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10049 /* Pseudo-header checksum is done by hardware logic and not
10050 * the offload processers, so make the chip do the pseudo-
10051 * header checksums on receive. For transmit it is more
10052 * convenient to do the pseudo-header checksum in software
10053 * as Linux does that on transmit for us in all cases.
10055 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10057 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10059 tw32(TG3_RX_PTP_CTL,
10060 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10062 if (tg3_flag(tp, PTP_CAPABLE))
10063 val |= GRC_MODE_TIME_SYNC_ENABLE;
10065 tw32(GRC_MODE, tp->grc_mode | val);
10067 /* On one of the AMD platform, MRRS is restricted to 4000 because of
10068 * south bridge limitation. As a workaround, Driver is setting MRRS
10069 * to 2048 instead of default 4096.
10071 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10072 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10073 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10074 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10077 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10078 val = tr32(GRC_MISC_CFG);
10080 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10081 tw32(GRC_MISC_CFG, val);
10083 /* Initialize MBUF/DESC pool. */
10084 if (tg3_flag(tp, 5750_PLUS)) {
10086 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10087 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10088 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10089 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10091 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10092 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10093 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10094 } else if (tg3_flag(tp, TSO_CAPABLE)) {
10097 fw_len = tp->fw_len;
10098 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10099 tw32(BUFMGR_MB_POOL_ADDR,
10100 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10101 tw32(BUFMGR_MB_POOL_SIZE,
10102 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10105 if (tp->dev->mtu <= ETH_DATA_LEN) {
10106 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10107 tp->bufmgr_config.mbuf_read_dma_low_water);
10108 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10109 tp->bufmgr_config.mbuf_mac_rx_low_water);
10110 tw32(BUFMGR_MB_HIGH_WATER,
10111 tp->bufmgr_config.mbuf_high_water);
10113 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10114 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10115 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10116 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10117 tw32(BUFMGR_MB_HIGH_WATER,
10118 tp->bufmgr_config.mbuf_high_water_jumbo);
10120 tw32(BUFMGR_DMA_LOW_WATER,
10121 tp->bufmgr_config.dma_low_water);
10122 tw32(BUFMGR_DMA_HIGH_WATER,
10123 tp->bufmgr_config.dma_high_water);
10125 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10126 if (tg3_asic_rev(tp) == ASIC_REV_5719)
10127 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10128 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10129 tg3_asic_rev(tp) == ASIC_REV_5762 ||
10130 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10131 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10132 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10133 tw32(BUFMGR_MODE, val);
10134 for (i = 0; i < 2000; i++) {
10135 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10140 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10144 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10145 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10147 tg3_setup_rxbd_thresholds(tp);
10149 /* Initialize TG3_BDINFO's at:
10150 * RCVDBDI_STD_BD: standard eth size rx ring
10151 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10152 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10155 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10156 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10157 * ring attribute flags
10158 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10160 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10161 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10163 * The size of each ring is fixed in the firmware, but the location is
10166 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10167 ((u64) tpr->rx_std_mapping >> 32));
10168 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10169 ((u64) tpr->rx_std_mapping & 0xffffffff));
10170 if (!tg3_flag(tp, 5717_PLUS))
10171 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10172 NIC_SRAM_RX_BUFFER_DESC);
10174 /* Disable the mini ring */
10175 if (!tg3_flag(tp, 5705_PLUS))
10176 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10177 BDINFO_FLAGS_DISABLED);
10179 /* Program the jumbo buffer descriptor ring control
10180 * blocks on those devices that have them.
10182 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10183 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10185 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10186 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10187 ((u64) tpr->rx_jmb_mapping >> 32));
10188 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10189 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10190 val = TG3_RX_JMB_RING_SIZE(tp) <<
10191 BDINFO_FLAGS_MAXLEN_SHIFT;
10192 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10193 val | BDINFO_FLAGS_USE_EXT_RECV);
10194 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10195 tg3_flag(tp, 57765_CLASS) ||
10196 tg3_asic_rev(tp) == ASIC_REV_5762)
10197 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10198 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10200 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10201 BDINFO_FLAGS_DISABLED);
10204 if (tg3_flag(tp, 57765_PLUS)) {
10205 val = TG3_RX_STD_RING_SIZE(tp);
10206 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10207 val |= (TG3_RX_STD_DMA_SZ << 2);
10209 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10211 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10213 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10215 tpr->rx_std_prod_idx = tp->rx_pending;
10216 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10218 tpr->rx_jmb_prod_idx =
10219 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10220 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10222 tg3_rings_reset(tp);
10224 /* Initialize MAC address and backoff seed. */
10225 __tg3_set_mac_addr(tp, false);
10227 /* MTU + ethernet header + FCS + optional VLAN tag */
10228 tw32(MAC_RX_MTU_SIZE,
10229 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10231 /* The slot time is changed by tg3_setup_phy if we
10232 * run at gigabit with half duplex.
10234 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10235 (6 << TX_LENGTHS_IPG_SHIFT) |
10236 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10238 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10239 tg3_asic_rev(tp) == ASIC_REV_5762)
10240 val |= tr32(MAC_TX_LENGTHS) &
10241 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10242 TX_LENGTHS_CNT_DWN_VAL_MSK);
10244 tw32(MAC_TX_LENGTHS, val);
10246 /* Receive rules. */
10247 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10248 tw32(RCVLPC_CONFIG, 0x0181);
10250 /* Calculate RDMAC_MODE setting early, we need it to determine
10251 * the RCVLPC_STATE_ENABLE mask.
10253 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10254 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10255 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10256 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10257 RDMAC_MODE_LNGREAD_ENAB);
10259 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10260 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10262 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10263 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10264 tg3_asic_rev(tp) == ASIC_REV_57780)
10265 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10266 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10267 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10269 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10270 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10271 if (tg3_flag(tp, TSO_CAPABLE)) {
10272 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10273 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10274 !tg3_flag(tp, IS_5788)) {
10275 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10279 if (tg3_flag(tp, PCI_EXPRESS))
10280 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10282 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10284 if (tp->dev->mtu <= ETH_DATA_LEN) {
10285 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10286 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10290 if (tg3_flag(tp, HW_TSO_1) ||
10291 tg3_flag(tp, HW_TSO_2) ||
10292 tg3_flag(tp, HW_TSO_3))
10293 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10295 if (tg3_flag(tp, 57765_PLUS) ||
10296 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10297 tg3_asic_rev(tp) == ASIC_REV_57780)
10298 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10300 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10301 tg3_asic_rev(tp) == ASIC_REV_5762)
10302 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10304 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10305 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10306 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10307 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10308 tg3_flag(tp, 57765_PLUS)) {
10311 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10312 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10314 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10316 val = tr32(tgtreg);
10317 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10318 tg3_asic_rev(tp) == ASIC_REV_5762) {
10319 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10320 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10321 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10322 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10323 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10324 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10326 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10329 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10330 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10331 tg3_asic_rev(tp) == ASIC_REV_5762) {
10334 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10335 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10337 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10339 val = tr32(tgtreg);
10341 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10342 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10345 /* Receive/send statistics. */
10346 if (tg3_flag(tp, 5750_PLUS)) {
10347 val = tr32(RCVLPC_STATS_ENABLE);
10348 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10349 tw32(RCVLPC_STATS_ENABLE, val);
10350 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10351 tg3_flag(tp, TSO_CAPABLE)) {
10352 val = tr32(RCVLPC_STATS_ENABLE);
10353 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10354 tw32(RCVLPC_STATS_ENABLE, val);
10356 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10358 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10359 tw32(SNDDATAI_STATSENAB, 0xffffff);
10360 tw32(SNDDATAI_STATSCTRL,
10361 (SNDDATAI_SCTRL_ENABLE |
10362 SNDDATAI_SCTRL_FASTUPD));
10364 /* Setup host coalescing engine. */
10365 tw32(HOSTCC_MODE, 0);
10366 for (i = 0; i < 2000; i++) {
10367 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10372 __tg3_set_coalesce(tp, &tp->coal);
10374 if (!tg3_flag(tp, 5705_PLUS)) {
10375 /* Status/statistics block address. See tg3_timer,
10376 * the tg3_periodic_fetch_stats call there, and
10377 * tg3_get_stats to see how this works for 5705/5750 chips.
10379 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10380 ((u64) tp->stats_mapping >> 32));
10381 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10382 ((u64) tp->stats_mapping & 0xffffffff));
10383 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10385 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10387 /* Clear statistics and status block memory areas */
10388 for (i = NIC_SRAM_STATS_BLK;
10389 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10390 i += sizeof(u32)) {
10391 tg3_write_mem(tp, i, 0);
10396 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10398 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10399 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10400 if (!tg3_flag(tp, 5705_PLUS))
10401 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10403 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10404 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10405 /* reset to prevent losing 1st rx packet intermittently */
10406 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10410 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10411 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10412 MAC_MODE_FHDE_ENABLE;
10413 if (tg3_flag(tp, ENABLE_APE))
10414 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10415 if (!tg3_flag(tp, 5705_PLUS) &&
10416 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10417 tg3_asic_rev(tp) != ASIC_REV_5700)
10418 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10419 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10422 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10423 * If TG3_FLAG_IS_NIC is zero, we should read the
10424 * register to preserve the GPIO settings for LOMs. The GPIOs,
10425 * whether used as inputs or outputs, are set by boot code after
10428 if (!tg3_flag(tp, IS_NIC)) {
10431 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10432 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10433 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10435 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10436 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10437 GRC_LCLCTRL_GPIO_OUTPUT3;
10439 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10440 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10442 tp->grc_local_ctrl &= ~gpio_mask;
10443 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10445 /* GPIO1 must be driven high for eeprom write protect */
10446 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10447 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10448 GRC_LCLCTRL_GPIO_OUTPUT1);
10450 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10453 if (tg3_flag(tp, USING_MSIX)) {
10454 val = tr32(MSGINT_MODE);
10455 val |= MSGINT_MODE_ENABLE;
10456 if (tp->irq_cnt > 1)
10457 val |= MSGINT_MODE_MULTIVEC_EN;
10458 if (!tg3_flag(tp, 1SHOT_MSI))
10459 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10460 tw32(MSGINT_MODE, val);
10463 if (!tg3_flag(tp, 5705_PLUS)) {
10464 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10468 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10469 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10470 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10471 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10472 WDMAC_MODE_LNGREAD_ENAB);
10474 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10475 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10476 if (tg3_flag(tp, TSO_CAPABLE) &&
10477 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10478 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10480 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10481 !tg3_flag(tp, IS_5788)) {
10482 val |= WDMAC_MODE_RX_ACCEL;
10486 /* Enable host coalescing bug fix */
10487 if (tg3_flag(tp, 5755_PLUS))
10488 val |= WDMAC_MODE_STATUS_TAG_FIX;
10490 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10491 val |= WDMAC_MODE_BURST_ALL_DATA;
10493 tw32_f(WDMAC_MODE, val);
10496 if (tg3_flag(tp, PCIX_MODE)) {
10499 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10501 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10502 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10503 pcix_cmd |= PCI_X_CMD_READ_2K;
10504 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10505 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10506 pcix_cmd |= PCI_X_CMD_READ_2K;
10508 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10512 tw32_f(RDMAC_MODE, rdmac_mode);
10515 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10516 tg3_asic_rev(tp) == ASIC_REV_5720) {
10517 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10518 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10521 if (i < TG3_NUM_RDMA_CHANNELS) {
10522 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10523 val |= tg3_lso_rd_dma_workaround_bit(tp);
10524 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10525 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10529 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10530 if (!tg3_flag(tp, 5705_PLUS))
10531 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10533 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10534 tw32(SNDDATAC_MODE,
10535 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10537 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10539 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10540 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10541 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10542 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10543 val |= RCVDBDI_MODE_LRG_RING_SZ;
10544 tw32(RCVDBDI_MODE, val);
10545 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10546 if (tg3_flag(tp, HW_TSO_1) ||
10547 tg3_flag(tp, HW_TSO_2) ||
10548 tg3_flag(tp, HW_TSO_3))
10549 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10550 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10551 if (tg3_flag(tp, ENABLE_TSS))
10552 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10553 tw32(SNDBDI_MODE, val);
10554 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10556 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10557 err = tg3_load_5701_a0_firmware_fix(tp);
10562 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10563 /* Ignore any errors for the firmware download. If download
10564 * fails, the device will operate with EEE disabled
10566 tg3_load_57766_firmware(tp);
10569 if (tg3_flag(tp, TSO_CAPABLE)) {
10570 err = tg3_load_tso_firmware(tp);
10575 tp->tx_mode = TX_MODE_ENABLE;
10577 if (tg3_flag(tp, 5755_PLUS) ||
10578 tg3_asic_rev(tp) == ASIC_REV_5906)
10579 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10581 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10582 tg3_asic_rev(tp) == ASIC_REV_5762) {
10583 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10584 tp->tx_mode &= ~val;
10585 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10588 tw32_f(MAC_TX_MODE, tp->tx_mode);
10591 if (tg3_flag(tp, ENABLE_RSS)) {
10594 tg3_rss_write_indir_tbl(tp);
10596 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10598 for (i = 0; i < 10 ; i++)
10599 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10602 tp->rx_mode = RX_MODE_ENABLE;
10603 if (tg3_flag(tp, 5755_PLUS))
10604 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10606 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10607 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10609 if (tg3_flag(tp, ENABLE_RSS))
10610 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10611 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10612 RX_MODE_RSS_IPV6_HASH_EN |
10613 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10614 RX_MODE_RSS_IPV4_HASH_EN |
10615 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10617 tw32_f(MAC_RX_MODE, tp->rx_mode);
10620 tw32(MAC_LED_CTRL, tp->led_ctrl);
10622 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10623 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10624 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10627 tw32_f(MAC_RX_MODE, tp->rx_mode);
10630 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10631 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10632 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10633 /* Set drive transmission level to 1.2V */
10634 /* only if the signal pre-emphasis bit is not set */
10635 val = tr32(MAC_SERDES_CFG);
10638 tw32(MAC_SERDES_CFG, val);
10640 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10641 tw32(MAC_SERDES_CFG, 0x616000);
10644 /* Prevent chip from dropping frames when flow control
10647 if (tg3_flag(tp, 57765_CLASS))
10651 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10653 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10654 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10655 /* Use hardware link auto-negotiation */
10656 tg3_flag_set(tp, HW_AUTONEG);
10659 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10660 tg3_asic_rev(tp) == ASIC_REV_5714) {
10663 tmp = tr32(SERDES_RX_CTRL);
10664 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10665 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10666 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10667 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10670 if (!tg3_flag(tp, USE_PHYLIB)) {
10671 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10672 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10674 err = tg3_setup_phy(tp, false);
10678 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10679 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10682 /* Clear CRC stats. */
10683 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10684 tg3_writephy(tp, MII_TG3_TEST1,
10685 tmp | MII_TG3_TEST1_CRC_EN);
10686 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10691 __tg3_set_rx_mode(tp->dev);
10693 /* Initialize receive rules. */
10694 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10695 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10696 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10697 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10699 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10703 if (tg3_flag(tp, ENABLE_ASF))
10707 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10710 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10713 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10716 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10719 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10722 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10725 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10728 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10731 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10734 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10737 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10740 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10743 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10745 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10753 if (tg3_flag(tp, ENABLE_APE))
10754 /* Write our heartbeat update interval to APE. */
10755 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10756 APE_HOST_HEARTBEAT_INT_5SEC);
10758 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10763 /* Called at device open time to get the chip ready for
10764 * packet processing. Invoked with tp->lock held.
10766 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10768 /* Chip may have been just powered on. If so, the boot code may still
10769 * be running initialization. Wait for it to finish to avoid races in
10770 * accessing the hardware.
10772 tg3_enable_register_access(tp);
10775 tg3_switch_clocks(tp);
10777 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10779 return tg3_reset_hw(tp, reset_phy);
10782 #ifdef CONFIG_TIGON3_HWMON
10783 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10785 u32 off, len = TG3_OCIR_LEN;
10788 for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
10789 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10791 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10792 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10793 memset(ocir, 0, len);
10797 /* sysfs attributes for hwmon */
10798 static ssize_t tg3_show_temp(struct device *dev,
10799 struct device_attribute *devattr, char *buf)
10801 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10802 struct tg3 *tp = dev_get_drvdata(dev);
10805 spin_lock_bh(&tp->lock);
10806 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10807 sizeof(temperature));
10808 spin_unlock_bh(&tp->lock);
10809 return sprintf(buf, "%u\n", temperature * 1000);
10813 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10814 TG3_TEMP_SENSOR_OFFSET);
10815 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10816 TG3_TEMP_CAUTION_OFFSET);
10817 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10818 TG3_TEMP_MAX_OFFSET);
10820 static struct attribute *tg3_attrs[] = {
10821 &sensor_dev_attr_temp1_input.dev_attr.attr,
10822 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10823 &sensor_dev_attr_temp1_max.dev_attr.attr,
10826 ATTRIBUTE_GROUPS(tg3);
10828 static void tg3_hwmon_close(struct tg3 *tp)
10830 if (tp->hwmon_dev) {
10831 hwmon_device_unregister(tp->hwmon_dev);
10832 tp->hwmon_dev = NULL;
10836 static void tg3_hwmon_open(struct tg3 *tp)
10840 struct pci_dev *pdev = tp->pdev;
10841 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10843 tg3_sd_scan_scratchpad(tp, ocirs);
10845 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10846 if (!ocirs[i].src_data_length)
10849 size += ocirs[i].src_hdr_length;
10850 size += ocirs[i].src_data_length;
10856 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10858 if (IS_ERR(tp->hwmon_dev)) {
10859 tp->hwmon_dev = NULL;
10860 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10864 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10865 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10866 #endif /* CONFIG_TIGON3_HWMON */
10869 #define TG3_STAT_ADD32(PSTAT, REG) \
10870 do { u32 __val = tr32(REG); \
10871 (PSTAT)->low += __val; \
10872 if ((PSTAT)->low < __val) \
10873 (PSTAT)->high += 1; \
10876 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10878 struct tg3_hw_stats *sp = tp->hw_stats;
10883 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10884 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10885 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10886 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10887 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10888 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10889 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10890 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10891 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10892 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10893 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10894 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10895 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10896 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10897 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10898 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10901 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10902 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10903 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10904 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10907 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10908 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10909 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10910 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10911 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10912 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10913 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10914 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10915 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10916 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10917 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10918 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10919 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10920 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10922 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10923 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10924 tg3_asic_rev(tp) != ASIC_REV_5762 &&
10925 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10926 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10927 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10929 u32 val = tr32(HOSTCC_FLOW_ATTN);
10930 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10932 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10933 sp->rx_discards.low += val;
10934 if (sp->rx_discards.low < val)
10935 sp->rx_discards.high += 1;
10937 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10939 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10942 static void tg3_chk_missed_msi(struct tg3 *tp)
10946 for (i = 0; i < tp->irq_cnt; i++) {
10947 struct tg3_napi *tnapi = &tp->napi[i];
10949 if (tg3_has_work(tnapi)) {
10950 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10951 tnapi->last_tx_cons == tnapi->tx_cons) {
10952 if (tnapi->chk_msi_cnt < 1) {
10953 tnapi->chk_msi_cnt++;
10959 tnapi->chk_msi_cnt = 0;
10960 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10961 tnapi->last_tx_cons = tnapi->tx_cons;
10965 static void tg3_timer(struct timer_list *t)
10967 struct tg3 *tp = from_timer(tp, t, timer);
10969 spin_lock(&tp->lock);
10971 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10972 spin_unlock(&tp->lock);
10973 goto restart_timer;
10976 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10977 tg3_flag(tp, 57765_CLASS))
10978 tg3_chk_missed_msi(tp);
10980 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10981 /* BCM4785: Flush posted writes from GbE to host memory. */
10985 if (!tg3_flag(tp, TAGGED_STATUS)) {
10986 /* All of this garbage is because when using non-tagged
10987 * IRQ status the mailbox/status_block protocol the chip
10988 * uses with the cpu is race prone.
10990 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10991 tw32(GRC_LOCAL_CTRL,
10992 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10994 tw32(HOSTCC_MODE, tp->coalesce_mode |
10995 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10998 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10999 spin_unlock(&tp->lock);
11000 tg3_reset_task_schedule(tp);
11001 goto restart_timer;
11005 /* This part only runs once per second. */
11006 if (!--tp->timer_counter) {
11007 if (tg3_flag(tp, 5705_PLUS))
11008 tg3_periodic_fetch_stats(tp);
11010 if (tp->setlpicnt && !--tp->setlpicnt)
11011 tg3_phy_eee_enable(tp);
11013 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11017 mac_stat = tr32(MAC_STATUS);
11020 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11021 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11023 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11027 tg3_setup_phy(tp, false);
11028 } else if (tg3_flag(tp, POLL_SERDES)) {
11029 u32 mac_stat = tr32(MAC_STATUS);
11030 int need_setup = 0;
11033 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11036 if (!tp->link_up &&
11037 (mac_stat & (MAC_STATUS_PCS_SYNCED |
11038 MAC_STATUS_SIGNAL_DET))) {
11042 if (!tp->serdes_counter) {
11045 ~MAC_MODE_PORT_MODE_MASK));
11047 tw32_f(MAC_MODE, tp->mac_mode);
11050 tg3_setup_phy(tp, false);
11052 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11053 tg3_flag(tp, 5780_CLASS)) {
11054 tg3_serdes_parallel_detect(tp);
11055 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11056 u32 cpmu = tr32(TG3_CPMU_STATUS);
11057 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11058 TG3_CPMU_STATUS_LINK_MASK);
11060 if (link_up != tp->link_up)
11061 tg3_setup_phy(tp, false);
11064 tp->timer_counter = tp->timer_multiplier;
11067 /* Heartbeat is only sent once every 2 seconds.
11069 * The heartbeat is to tell the ASF firmware that the host
11070 * driver is still alive. In the event that the OS crashes,
11071 * ASF needs to reset the hardware to free up the FIFO space
11072 * that may be filled with rx packets destined for the host.
11073 * If the FIFO is full, ASF will no longer function properly.
11075 * Unintended resets have been reported on real time kernels
11076 * where the timer doesn't run on time. Netpoll will also have
11079 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11080 * to check the ring condition when the heartbeat is expiring
11081 * before doing the reset. This will prevent most unintended
11084 if (!--tp->asf_counter) {
11085 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11086 tg3_wait_for_event_ack(tp);
11088 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11089 FWCMD_NICDRV_ALIVE3);
11090 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11091 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11092 TG3_FW_UPDATE_TIMEOUT_SEC);
11094 tg3_generate_fw_event(tp);
11096 tp->asf_counter = tp->asf_multiplier;
11099 /* Update the APE heartbeat every 5 seconds.*/
11100 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11102 spin_unlock(&tp->lock);
11105 tp->timer.expires = jiffies + tp->timer_offset;
11106 add_timer(&tp->timer);
11109 static void tg3_timer_init(struct tg3 *tp)
11111 if (tg3_flag(tp, TAGGED_STATUS) &&
11112 tg3_asic_rev(tp) != ASIC_REV_5717 &&
11113 !tg3_flag(tp, 57765_CLASS))
11114 tp->timer_offset = HZ;
11116 tp->timer_offset = HZ / 10;
11118 BUG_ON(tp->timer_offset > HZ);
11120 tp->timer_multiplier = (HZ / tp->timer_offset);
11121 tp->asf_multiplier = (HZ / tp->timer_offset) *
11122 TG3_FW_UPDATE_FREQ_SEC;
11124 timer_setup(&tp->timer, tg3_timer, 0);
11127 static void tg3_timer_start(struct tg3 *tp)
11129 tp->asf_counter = tp->asf_multiplier;
11130 tp->timer_counter = tp->timer_multiplier;
11132 tp->timer.expires = jiffies + tp->timer_offset;
11133 add_timer(&tp->timer);
11136 static void tg3_timer_stop(struct tg3 *tp)
11138 del_timer_sync(&tp->timer);
11141 /* Restart hardware after configuration changes, self-test, etc.
11142 * Invoked with tp->lock held.
11144 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11145 __releases(tp->lock)
11146 __acquires(tp->lock)
11150 err = tg3_init_hw(tp, reset_phy);
11152 netdev_err(tp->dev,
11153 "Failed to re-initialize device, aborting\n");
11154 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11155 tg3_full_unlock(tp);
11156 tg3_timer_stop(tp);
11158 tg3_napi_enable(tp);
11159 dev_close(tp->dev);
11160 tg3_full_lock(tp, 0);
11165 static void tg3_reset_task(struct work_struct *work)
11167 struct tg3 *tp = container_of(work, struct tg3, reset_task);
11171 tg3_full_lock(tp, 0);
11173 if (tp->pcierr_recovery || !netif_running(tp->dev)) {
11174 tg3_flag_clear(tp, RESET_TASK_PENDING);
11175 tg3_full_unlock(tp);
11180 tg3_full_unlock(tp);
11184 tg3_netif_stop(tp);
11186 tg3_full_lock(tp, 1);
11188 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11189 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11190 tp->write32_rx_mbox = tg3_write_flush_reg32;
11191 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11192 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11195 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11196 err = tg3_init_hw(tp, true);
11198 tg3_full_unlock(tp);
11200 tg3_napi_enable(tp);
11201 /* Clear this flag so that tg3_reset_task_cancel() will not
11202 * call cancel_work_sync() and wait forever.
11204 tg3_flag_clear(tp, RESET_TASK_PENDING);
11205 dev_close(tp->dev);
11209 tg3_netif_start(tp);
11210 tg3_full_unlock(tp);
11212 tg3_flag_clear(tp, RESET_TASK_PENDING);
11217 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11220 unsigned long flags;
11222 struct tg3_napi *tnapi = &tp->napi[irq_num];
11224 if (tp->irq_cnt == 1)
11225 name = tp->dev->name;
11227 name = &tnapi->irq_lbl[0];
11228 if (tnapi->tx_buffers && tnapi->rx_rcb)
11229 snprintf(name, IFNAMSIZ,
11230 "%s-txrx-%d", tp->dev->name, irq_num);
11231 else if (tnapi->tx_buffers)
11232 snprintf(name, IFNAMSIZ,
11233 "%s-tx-%d", tp->dev->name, irq_num);
11234 else if (tnapi->rx_rcb)
11235 snprintf(name, IFNAMSIZ,
11236 "%s-rx-%d", tp->dev->name, irq_num);
11238 snprintf(name, IFNAMSIZ,
11239 "%s-%d", tp->dev->name, irq_num);
11240 name[IFNAMSIZ-1] = 0;
11243 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11245 if (tg3_flag(tp, 1SHOT_MSI))
11246 fn = tg3_msi_1shot;
11249 fn = tg3_interrupt;
11250 if (tg3_flag(tp, TAGGED_STATUS))
11251 fn = tg3_interrupt_tagged;
11252 flags = IRQF_SHARED;
11255 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11258 static int tg3_test_interrupt(struct tg3 *tp)
11260 struct tg3_napi *tnapi = &tp->napi[0];
11261 struct net_device *dev = tp->dev;
11262 int err, i, intr_ok = 0;
11265 if (!netif_running(dev))
11268 tg3_disable_ints(tp);
11270 free_irq(tnapi->irq_vec, tnapi);
11273 * Turn off MSI one shot mode. Otherwise this test has no
11274 * observable way to know whether the interrupt was delivered.
11276 if (tg3_flag(tp, 57765_PLUS)) {
11277 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11278 tw32(MSGINT_MODE, val);
11281 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11282 IRQF_SHARED, dev->name, tnapi);
11286 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11287 tg3_enable_ints(tp);
11289 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11292 for (i = 0; i < 5; i++) {
11293 u32 int_mbox, misc_host_ctrl;
11295 int_mbox = tr32_mailbox(tnapi->int_mbox);
11296 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11298 if ((int_mbox != 0) ||
11299 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11304 if (tg3_flag(tp, 57765_PLUS) &&
11305 tnapi->hw_status->status_tag != tnapi->last_tag)
11306 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11311 tg3_disable_ints(tp);
11313 free_irq(tnapi->irq_vec, tnapi);
11315 err = tg3_request_irq(tp, 0);
11321 /* Reenable MSI one shot mode. */
11322 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11323 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11324 tw32(MSGINT_MODE, val);
11332 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11333 * successfully restored
11335 static int tg3_test_msi(struct tg3 *tp)
11340 if (!tg3_flag(tp, USING_MSI))
11343 /* Turn off SERR reporting in case MSI terminates with Master
11346 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11347 pci_write_config_word(tp->pdev, PCI_COMMAND,
11348 pci_cmd & ~PCI_COMMAND_SERR);
11350 err = tg3_test_interrupt(tp);
11352 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11357 /* other failures */
11361 /* MSI test failed, go back to INTx mode */
11362 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11363 "to INTx mode. Please report this failure to the PCI "
11364 "maintainer and include system chipset information\n");
11366 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11368 pci_disable_msi(tp->pdev);
11370 tg3_flag_clear(tp, USING_MSI);
11371 tp->napi[0].irq_vec = tp->pdev->irq;
11373 err = tg3_request_irq(tp, 0);
11377 /* Need to reset the chip because the MSI cycle may have terminated
11378 * with Master Abort.
11380 tg3_full_lock(tp, 1);
11382 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11383 err = tg3_init_hw(tp, true);
11385 tg3_full_unlock(tp);
11388 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11393 static int tg3_request_firmware(struct tg3 *tp)
11395 const struct tg3_firmware_hdr *fw_hdr;
11397 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11398 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11403 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11405 /* Firmware blob starts with version numbers, followed by
11406 * start address and _full_ length including BSS sections
11407 * (which must be longer than the actual data, of course
11410 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11411 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11412 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11413 tp->fw_len, tp->fw_needed);
11414 release_firmware(tp->fw);
11419 /* We no longer need firmware; we have it. */
11420 tp->fw_needed = NULL;
11424 static u32 tg3_irq_count(struct tg3 *tp)
11426 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11429 /* We want as many rx rings enabled as there are cpus.
11430 * In multiqueue MSI-X mode, the first MSI-X vector
11431 * only deals with link interrupts, etc, so we add
11432 * one to the number of vectors we are requesting.
11434 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11440 static bool tg3_enable_msix(struct tg3 *tp)
11443 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11445 tp->txq_cnt = tp->txq_req;
11446 tp->rxq_cnt = tp->rxq_req;
11448 tp->rxq_cnt = netif_get_num_default_rss_queues();
11449 if (tp->rxq_cnt > tp->rxq_max)
11450 tp->rxq_cnt = tp->rxq_max;
11452 /* Disable multiple TX rings by default. Simple round-robin hardware
11453 * scheduling of the TX rings can cause starvation of rings with
11454 * small packets when other rings have TSO or jumbo packets.
11459 tp->irq_cnt = tg3_irq_count(tp);
11461 for (i = 0; i < tp->irq_max; i++) {
11462 msix_ent[i].entry = i;
11463 msix_ent[i].vector = 0;
11466 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11469 } else if (rc < tp->irq_cnt) {
11470 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11473 tp->rxq_cnt = max(rc - 1, 1);
11475 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11478 for (i = 0; i < tp->irq_max; i++)
11479 tp->napi[i].irq_vec = msix_ent[i].vector;
11481 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11482 pci_disable_msix(tp->pdev);
11486 if (tp->irq_cnt == 1)
11489 tg3_flag_set(tp, ENABLE_RSS);
11491 if (tp->txq_cnt > 1)
11492 tg3_flag_set(tp, ENABLE_TSS);
11494 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11499 static void tg3_ints_init(struct tg3 *tp)
11501 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11502 !tg3_flag(tp, TAGGED_STATUS)) {
11503 /* All MSI supporting chips should support tagged
11504 * status. Assert that this is the case.
11506 netdev_warn(tp->dev,
11507 "MSI without TAGGED_STATUS? Not using MSI\n");
11511 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11512 tg3_flag_set(tp, USING_MSIX);
11513 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11514 tg3_flag_set(tp, USING_MSI);
11516 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11517 u32 msi_mode = tr32(MSGINT_MODE);
11518 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11519 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11520 if (!tg3_flag(tp, 1SHOT_MSI))
11521 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11522 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11525 if (!tg3_flag(tp, USING_MSIX)) {
11527 tp->napi[0].irq_vec = tp->pdev->irq;
11530 if (tp->irq_cnt == 1) {
11533 netif_set_real_num_tx_queues(tp->dev, 1);
11534 netif_set_real_num_rx_queues(tp->dev, 1);
11538 static void tg3_ints_fini(struct tg3 *tp)
11540 if (tg3_flag(tp, USING_MSIX))
11541 pci_disable_msix(tp->pdev);
11542 else if (tg3_flag(tp, USING_MSI))
11543 pci_disable_msi(tp->pdev);
11544 tg3_flag_clear(tp, USING_MSI);
11545 tg3_flag_clear(tp, USING_MSIX);
11546 tg3_flag_clear(tp, ENABLE_RSS);
11547 tg3_flag_clear(tp, ENABLE_TSS);
11550 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11553 struct net_device *dev = tp->dev;
11557 * Setup interrupts first so we know how
11558 * many NAPI resources to allocate
11562 tg3_rss_check_indir_tbl(tp);
11564 /* The placement of this call is tied
11565 * to the setup and use of Host TX descriptors.
11567 err = tg3_alloc_consistent(tp);
11569 goto out_ints_fini;
11573 tg3_napi_enable(tp);
11575 for (i = 0; i < tp->irq_cnt; i++) {
11576 err = tg3_request_irq(tp, i);
11578 for (i--; i >= 0; i--) {
11579 struct tg3_napi *tnapi = &tp->napi[i];
11581 free_irq(tnapi->irq_vec, tnapi);
11583 goto out_napi_fini;
11587 tg3_full_lock(tp, 0);
11590 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11592 err = tg3_init_hw(tp, reset_phy);
11594 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11595 tg3_free_rings(tp);
11598 tg3_full_unlock(tp);
11603 if (test_irq && tg3_flag(tp, USING_MSI)) {
11604 err = tg3_test_msi(tp);
11607 tg3_full_lock(tp, 0);
11608 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11609 tg3_free_rings(tp);
11610 tg3_full_unlock(tp);
11612 goto out_napi_fini;
11615 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11616 u32 val = tr32(PCIE_TRANSACTION_CFG);
11618 tw32(PCIE_TRANSACTION_CFG,
11619 val | PCIE_TRANS_CFG_1SHOT_MSI);
11625 tg3_hwmon_open(tp);
11627 tg3_full_lock(tp, 0);
11629 tg3_timer_start(tp);
11630 tg3_flag_set(tp, INIT_COMPLETE);
11631 tg3_enable_ints(tp);
11633 tg3_ptp_resume(tp);
11635 tg3_full_unlock(tp);
11637 netif_tx_start_all_queues(dev);
11640 * Reset loopback feature if it was turned on while the device was down
11641 * make sure that it's installed properly now.
11643 if (dev->features & NETIF_F_LOOPBACK)
11644 tg3_set_loopback(dev, dev->features);
11649 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11650 struct tg3_napi *tnapi = &tp->napi[i];
11651 free_irq(tnapi->irq_vec, tnapi);
11655 tg3_napi_disable(tp);
11657 tg3_free_consistent(tp);
11665 static void tg3_stop(struct tg3 *tp)
11669 tg3_reset_task_cancel(tp);
11670 tg3_netif_stop(tp);
11672 tg3_timer_stop(tp);
11674 tg3_hwmon_close(tp);
11678 tg3_full_lock(tp, 1);
11680 tg3_disable_ints(tp);
11682 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11683 tg3_free_rings(tp);
11684 tg3_flag_clear(tp, INIT_COMPLETE);
11686 tg3_full_unlock(tp);
11688 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11689 struct tg3_napi *tnapi = &tp->napi[i];
11690 free_irq(tnapi->irq_vec, tnapi);
11697 tg3_free_consistent(tp);
11700 static int tg3_open(struct net_device *dev)
11702 struct tg3 *tp = netdev_priv(dev);
11705 if (tp->pcierr_recovery) {
11706 netdev_err(dev, "Failed to open device. PCI error recovery "
11711 if (tp->fw_needed) {
11712 err = tg3_request_firmware(tp);
11713 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11715 netdev_warn(tp->dev, "EEE capability disabled\n");
11716 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11717 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11718 netdev_warn(tp->dev, "EEE capability restored\n");
11719 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11721 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11725 netdev_warn(tp->dev, "TSO capability disabled\n");
11726 tg3_flag_clear(tp, TSO_CAPABLE);
11727 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11728 netdev_notice(tp->dev, "TSO capability restored\n");
11729 tg3_flag_set(tp, TSO_CAPABLE);
11733 tg3_carrier_off(tp);
11735 err = tg3_power_up(tp);
11739 tg3_full_lock(tp, 0);
11741 tg3_disable_ints(tp);
11742 tg3_flag_clear(tp, INIT_COMPLETE);
11744 tg3_full_unlock(tp);
11746 err = tg3_start(tp,
11747 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11750 tg3_frob_aux_power(tp, false);
11751 pci_set_power_state(tp->pdev, PCI_D3hot);
11757 static int tg3_close(struct net_device *dev)
11759 struct tg3 *tp = netdev_priv(dev);
11761 if (tp->pcierr_recovery) {
11762 netdev_err(dev, "Failed to close device. PCI error recovery "
11769 if (pci_device_is_present(tp->pdev)) {
11770 tg3_power_down_prepare(tp);
11772 tg3_carrier_off(tp);
11777 static inline u64 get_stat64(tg3_stat64_t *val)
11779 return ((u64)val->high << 32) | ((u64)val->low);
11782 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11784 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11786 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11787 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11788 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11791 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11792 tg3_writephy(tp, MII_TG3_TEST1,
11793 val | MII_TG3_TEST1_CRC_EN);
11794 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11798 tp->phy_crc_errors += val;
11800 return tp->phy_crc_errors;
11803 return get_stat64(&hw_stats->rx_fcs_errors);
11806 #define ESTAT_ADD(member) \
11807 estats->member = old_estats->member + \
11808 get_stat64(&hw_stats->member)
11810 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11812 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11813 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11815 ESTAT_ADD(rx_octets);
11816 ESTAT_ADD(rx_fragments);
11817 ESTAT_ADD(rx_ucast_packets);
11818 ESTAT_ADD(rx_mcast_packets);
11819 ESTAT_ADD(rx_bcast_packets);
11820 ESTAT_ADD(rx_fcs_errors);
11821 ESTAT_ADD(rx_align_errors);
11822 ESTAT_ADD(rx_xon_pause_rcvd);
11823 ESTAT_ADD(rx_xoff_pause_rcvd);
11824 ESTAT_ADD(rx_mac_ctrl_rcvd);
11825 ESTAT_ADD(rx_xoff_entered);
11826 ESTAT_ADD(rx_frame_too_long_errors);
11827 ESTAT_ADD(rx_jabbers);
11828 ESTAT_ADD(rx_undersize_packets);
11829 ESTAT_ADD(rx_in_length_errors);
11830 ESTAT_ADD(rx_out_length_errors);
11831 ESTAT_ADD(rx_64_or_less_octet_packets);
11832 ESTAT_ADD(rx_65_to_127_octet_packets);
11833 ESTAT_ADD(rx_128_to_255_octet_packets);
11834 ESTAT_ADD(rx_256_to_511_octet_packets);
11835 ESTAT_ADD(rx_512_to_1023_octet_packets);
11836 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11837 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11838 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11839 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11840 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11842 ESTAT_ADD(tx_octets);
11843 ESTAT_ADD(tx_collisions);
11844 ESTAT_ADD(tx_xon_sent);
11845 ESTAT_ADD(tx_xoff_sent);
11846 ESTAT_ADD(tx_flow_control);
11847 ESTAT_ADD(tx_mac_errors);
11848 ESTAT_ADD(tx_single_collisions);
11849 ESTAT_ADD(tx_mult_collisions);
11850 ESTAT_ADD(tx_deferred);
11851 ESTAT_ADD(tx_excessive_collisions);
11852 ESTAT_ADD(tx_late_collisions);
11853 ESTAT_ADD(tx_collide_2times);
11854 ESTAT_ADD(tx_collide_3times);
11855 ESTAT_ADD(tx_collide_4times);
11856 ESTAT_ADD(tx_collide_5times);
11857 ESTAT_ADD(tx_collide_6times);
11858 ESTAT_ADD(tx_collide_7times);
11859 ESTAT_ADD(tx_collide_8times);
11860 ESTAT_ADD(tx_collide_9times);
11861 ESTAT_ADD(tx_collide_10times);
11862 ESTAT_ADD(tx_collide_11times);
11863 ESTAT_ADD(tx_collide_12times);
11864 ESTAT_ADD(tx_collide_13times);
11865 ESTAT_ADD(tx_collide_14times);
11866 ESTAT_ADD(tx_collide_15times);
11867 ESTAT_ADD(tx_ucast_packets);
11868 ESTAT_ADD(tx_mcast_packets);
11869 ESTAT_ADD(tx_bcast_packets);
11870 ESTAT_ADD(tx_carrier_sense_errors);
11871 ESTAT_ADD(tx_discards);
11872 ESTAT_ADD(tx_errors);
11874 ESTAT_ADD(dma_writeq_full);
11875 ESTAT_ADD(dma_write_prioq_full);
11876 ESTAT_ADD(rxbds_empty);
11877 ESTAT_ADD(rx_discards);
11878 ESTAT_ADD(rx_errors);
11879 ESTAT_ADD(rx_threshold_hit);
11881 ESTAT_ADD(dma_readq_full);
11882 ESTAT_ADD(dma_read_prioq_full);
11883 ESTAT_ADD(tx_comp_queue_full);
11885 ESTAT_ADD(ring_set_send_prod_index);
11886 ESTAT_ADD(ring_status_update);
11887 ESTAT_ADD(nic_irqs);
11888 ESTAT_ADD(nic_avoided_irqs);
11889 ESTAT_ADD(nic_tx_threshold_hit);
11891 ESTAT_ADD(mbuf_lwm_thresh_hit);
11894 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11896 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11897 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11899 stats->rx_packets = old_stats->rx_packets +
11900 get_stat64(&hw_stats->rx_ucast_packets) +
11901 get_stat64(&hw_stats->rx_mcast_packets) +
11902 get_stat64(&hw_stats->rx_bcast_packets);
11904 stats->tx_packets = old_stats->tx_packets +
11905 get_stat64(&hw_stats->tx_ucast_packets) +
11906 get_stat64(&hw_stats->tx_mcast_packets) +
11907 get_stat64(&hw_stats->tx_bcast_packets);
11909 stats->rx_bytes = old_stats->rx_bytes +
11910 get_stat64(&hw_stats->rx_octets);
11911 stats->tx_bytes = old_stats->tx_bytes +
11912 get_stat64(&hw_stats->tx_octets);
11914 stats->rx_errors = old_stats->rx_errors +
11915 get_stat64(&hw_stats->rx_errors);
11916 stats->tx_errors = old_stats->tx_errors +
11917 get_stat64(&hw_stats->tx_errors) +
11918 get_stat64(&hw_stats->tx_mac_errors) +
11919 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11920 get_stat64(&hw_stats->tx_discards);
11922 stats->multicast = old_stats->multicast +
11923 get_stat64(&hw_stats->rx_mcast_packets);
11924 stats->collisions = old_stats->collisions +
11925 get_stat64(&hw_stats->tx_collisions);
11927 stats->rx_length_errors = old_stats->rx_length_errors +
11928 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11929 get_stat64(&hw_stats->rx_undersize_packets);
11931 stats->rx_frame_errors = old_stats->rx_frame_errors +
11932 get_stat64(&hw_stats->rx_align_errors);
11933 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11934 get_stat64(&hw_stats->tx_discards);
11935 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11936 get_stat64(&hw_stats->tx_carrier_sense_errors);
11938 stats->rx_crc_errors = old_stats->rx_crc_errors +
11939 tg3_calc_crc_errors(tp);
11941 stats->rx_missed_errors = old_stats->rx_missed_errors +
11942 get_stat64(&hw_stats->rx_discards);
11944 stats->rx_dropped = tp->rx_dropped;
11945 stats->tx_dropped = tp->tx_dropped;
11948 static int tg3_get_regs_len(struct net_device *dev)
11950 return TG3_REG_BLK_SIZE;
11953 static void tg3_get_regs(struct net_device *dev,
11954 struct ethtool_regs *regs, void *_p)
11956 struct tg3 *tp = netdev_priv(dev);
11960 memset(_p, 0, TG3_REG_BLK_SIZE);
11962 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11965 tg3_full_lock(tp, 0);
11967 tg3_dump_legacy_regs(tp, (u32 *)_p);
11969 tg3_full_unlock(tp);
11972 static int tg3_get_eeprom_len(struct net_device *dev)
11974 struct tg3 *tp = netdev_priv(dev);
11976 return tp->nvram_size;
11979 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11981 struct tg3 *tp = netdev_priv(dev);
11982 int ret, cpmu_restore = 0;
11984 u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11987 if (tg3_flag(tp, NO_NVRAM))
11990 offset = eeprom->offset;
11994 eeprom->magic = TG3_EEPROM_MAGIC;
11996 /* Override clock, link aware and link idle modes */
11997 if (tg3_flag(tp, CPMU_PRESENT)) {
11998 cpmu_val = tr32(TG3_CPMU_CTRL);
11999 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12000 CPMU_CTRL_LINK_IDLE_MODE)) {
12001 tw32(TG3_CPMU_CTRL, cpmu_val &
12002 ~(CPMU_CTRL_LINK_AWARE_MODE |
12003 CPMU_CTRL_LINK_IDLE_MODE));
12007 tg3_override_clk(tp);
12010 /* adjustments to start on required 4 byte boundary */
12011 b_offset = offset & 3;
12012 b_count = 4 - b_offset;
12013 if (b_count > len) {
12014 /* i.e. offset=1 len=2 */
12017 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12020 memcpy(data, ((char *)&val) + b_offset, b_count);
12023 eeprom->len += b_count;
12026 /* read bytes up to the last 4 byte boundary */
12027 pd = &data[eeprom->len];
12028 for (i = 0; i < (len - (len & 3)); i += 4) {
12029 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12036 memcpy(pd + i, &val, 4);
12037 if (need_resched()) {
12038 if (signal_pending(current)) {
12049 /* read last bytes not ending on 4 byte boundary */
12050 pd = &data[eeprom->len];
12052 b_offset = offset + len - b_count;
12053 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12056 memcpy(pd, &val, b_count);
12057 eeprom->len += b_count;
12062 /* Restore clock, link aware and link idle modes */
12063 tg3_restore_clk(tp);
12065 tw32(TG3_CPMU_CTRL, cpmu_val);
12070 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12072 struct tg3 *tp = netdev_priv(dev);
12074 u32 offset, len, b_offset, odd_len;
12076 __be32 start = 0, end;
12078 if (tg3_flag(tp, NO_NVRAM) ||
12079 eeprom->magic != TG3_EEPROM_MAGIC)
12082 offset = eeprom->offset;
12085 if ((b_offset = (offset & 3))) {
12086 /* adjustments to start on required 4 byte boundary */
12087 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12098 /* adjustments to end on required 4 byte boundary */
12100 len = (len + 3) & ~3;
12101 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12107 if (b_offset || odd_len) {
12108 buf = kmalloc(len, GFP_KERNEL);
12112 memcpy(buf, &start, 4);
12114 memcpy(buf+len-4, &end, 4);
12115 memcpy(buf + b_offset, data, eeprom->len);
12118 ret = tg3_nvram_write_block(tp, offset, len, buf);
12126 static int tg3_get_link_ksettings(struct net_device *dev,
12127 struct ethtool_link_ksettings *cmd)
12129 struct tg3 *tp = netdev_priv(dev);
12130 u32 supported, advertising;
12132 if (tg3_flag(tp, USE_PHYLIB)) {
12133 struct phy_device *phydev;
12134 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12136 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12137 phy_ethtool_ksettings_get(phydev, cmd);
12142 supported = (SUPPORTED_Autoneg);
12144 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12145 supported |= (SUPPORTED_1000baseT_Half |
12146 SUPPORTED_1000baseT_Full);
12148 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12149 supported |= (SUPPORTED_100baseT_Half |
12150 SUPPORTED_100baseT_Full |
12151 SUPPORTED_10baseT_Half |
12152 SUPPORTED_10baseT_Full |
12154 cmd->base.port = PORT_TP;
12156 supported |= SUPPORTED_FIBRE;
12157 cmd->base.port = PORT_FIBRE;
12159 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12162 advertising = tp->link_config.advertising;
12163 if (tg3_flag(tp, PAUSE_AUTONEG)) {
12164 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12165 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12166 advertising |= ADVERTISED_Pause;
12168 advertising |= ADVERTISED_Pause |
12169 ADVERTISED_Asym_Pause;
12171 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12172 advertising |= ADVERTISED_Asym_Pause;
12175 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12178 if (netif_running(dev) && tp->link_up) {
12179 cmd->base.speed = tp->link_config.active_speed;
12180 cmd->base.duplex = tp->link_config.active_duplex;
12181 ethtool_convert_legacy_u32_to_link_mode(
12182 cmd->link_modes.lp_advertising,
12183 tp->link_config.rmt_adv);
12185 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12186 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12187 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12189 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12192 cmd->base.speed = SPEED_UNKNOWN;
12193 cmd->base.duplex = DUPLEX_UNKNOWN;
12194 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12196 cmd->base.phy_address = tp->phy_addr;
12197 cmd->base.autoneg = tp->link_config.autoneg;
12201 static int tg3_set_link_ksettings(struct net_device *dev,
12202 const struct ethtool_link_ksettings *cmd)
12204 struct tg3 *tp = netdev_priv(dev);
12205 u32 speed = cmd->base.speed;
12208 if (tg3_flag(tp, USE_PHYLIB)) {
12209 struct phy_device *phydev;
12210 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12212 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12213 return phy_ethtool_ksettings_set(phydev, cmd);
12216 if (cmd->base.autoneg != AUTONEG_ENABLE &&
12217 cmd->base.autoneg != AUTONEG_DISABLE)
12220 if (cmd->base.autoneg == AUTONEG_DISABLE &&
12221 cmd->base.duplex != DUPLEX_FULL &&
12222 cmd->base.duplex != DUPLEX_HALF)
12225 ethtool_convert_link_mode_to_legacy_u32(&advertising,
12226 cmd->link_modes.advertising);
12228 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12229 u32 mask = ADVERTISED_Autoneg |
12231 ADVERTISED_Asym_Pause;
12233 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12234 mask |= ADVERTISED_1000baseT_Half |
12235 ADVERTISED_1000baseT_Full;
12237 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12238 mask |= ADVERTISED_100baseT_Half |
12239 ADVERTISED_100baseT_Full |
12240 ADVERTISED_10baseT_Half |
12241 ADVERTISED_10baseT_Full |
12244 mask |= ADVERTISED_FIBRE;
12246 if (advertising & ~mask)
12249 mask &= (ADVERTISED_1000baseT_Half |
12250 ADVERTISED_1000baseT_Full |
12251 ADVERTISED_100baseT_Half |
12252 ADVERTISED_100baseT_Full |
12253 ADVERTISED_10baseT_Half |
12254 ADVERTISED_10baseT_Full);
12256 advertising &= mask;
12258 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12259 if (speed != SPEED_1000)
12262 if (cmd->base.duplex != DUPLEX_FULL)
12265 if (speed != SPEED_100 &&
12271 tg3_full_lock(tp, 0);
12273 tp->link_config.autoneg = cmd->base.autoneg;
12274 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12275 tp->link_config.advertising = (advertising |
12276 ADVERTISED_Autoneg);
12277 tp->link_config.speed = SPEED_UNKNOWN;
12278 tp->link_config.duplex = DUPLEX_UNKNOWN;
12280 tp->link_config.advertising = 0;
12281 tp->link_config.speed = speed;
12282 tp->link_config.duplex = cmd->base.duplex;
12285 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12287 tg3_warn_mgmt_link_flap(tp);
12289 if (netif_running(dev))
12290 tg3_setup_phy(tp, true);
12292 tg3_full_unlock(tp);
12297 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12299 struct tg3 *tp = netdev_priv(dev);
12301 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12302 strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12303 strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12306 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12308 struct tg3 *tp = netdev_priv(dev);
12310 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12311 wol->supported = WAKE_MAGIC;
12313 wol->supported = 0;
12315 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12316 wol->wolopts = WAKE_MAGIC;
12317 memset(&wol->sopass, 0, sizeof(wol->sopass));
12320 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12322 struct tg3 *tp = netdev_priv(dev);
12323 struct device *dp = &tp->pdev->dev;
12325 if (wol->wolopts & ~WAKE_MAGIC)
12327 if ((wol->wolopts & WAKE_MAGIC) &&
12328 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12331 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12333 if (device_may_wakeup(dp))
12334 tg3_flag_set(tp, WOL_ENABLE);
12336 tg3_flag_clear(tp, WOL_ENABLE);
12341 static u32 tg3_get_msglevel(struct net_device *dev)
12343 struct tg3 *tp = netdev_priv(dev);
12344 return tp->msg_enable;
12347 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12349 struct tg3 *tp = netdev_priv(dev);
12350 tp->msg_enable = value;
12353 static int tg3_nway_reset(struct net_device *dev)
12355 struct tg3 *tp = netdev_priv(dev);
12358 if (!netif_running(dev))
12361 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12364 tg3_warn_mgmt_link_flap(tp);
12366 if (tg3_flag(tp, USE_PHYLIB)) {
12367 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12369 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12373 spin_lock_bh(&tp->lock);
12375 tg3_readphy(tp, MII_BMCR, &bmcr);
12376 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12377 ((bmcr & BMCR_ANENABLE) ||
12378 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12379 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12383 spin_unlock_bh(&tp->lock);
12389 static void tg3_get_ringparam(struct net_device *dev,
12390 struct ethtool_ringparam *ering,
12391 struct kernel_ethtool_ringparam *kernel_ering,
12392 struct netlink_ext_ack *extack)
12394 struct tg3 *tp = netdev_priv(dev);
12396 ering->rx_max_pending = tp->rx_std_ring_mask;
12397 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12398 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12400 ering->rx_jumbo_max_pending = 0;
12402 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12404 ering->rx_pending = tp->rx_pending;
12405 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12406 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12408 ering->rx_jumbo_pending = 0;
12410 ering->tx_pending = tp->napi[0].tx_pending;
12413 static int tg3_set_ringparam(struct net_device *dev,
12414 struct ethtool_ringparam *ering,
12415 struct kernel_ethtool_ringparam *kernel_ering,
12416 struct netlink_ext_ack *extack)
12418 struct tg3 *tp = netdev_priv(dev);
12419 int i, irq_sync = 0, err = 0;
12420 bool reset_phy = false;
12422 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12423 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12424 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12425 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12426 (tg3_flag(tp, TSO_BUG) &&
12427 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12430 if (netif_running(dev)) {
12432 tg3_netif_stop(tp);
12436 tg3_full_lock(tp, irq_sync);
12438 tp->rx_pending = ering->rx_pending;
12440 if (tg3_flag(tp, MAX_RXPEND_64) &&
12441 tp->rx_pending > 63)
12442 tp->rx_pending = 63;
12444 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12445 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12447 for (i = 0; i < tp->irq_max; i++)
12448 tp->napi[i].tx_pending = ering->tx_pending;
12450 if (netif_running(dev)) {
12451 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12452 /* Reset PHY to avoid PHY lock up */
12453 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12454 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12455 tg3_asic_rev(tp) == ASIC_REV_5720)
12458 err = tg3_restart_hw(tp, reset_phy);
12460 tg3_netif_start(tp);
12463 tg3_full_unlock(tp);
12465 if (irq_sync && !err)
12471 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12473 struct tg3 *tp = netdev_priv(dev);
12475 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12477 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12478 epause->rx_pause = 1;
12480 epause->rx_pause = 0;
12482 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12483 epause->tx_pause = 1;
12485 epause->tx_pause = 0;
12488 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12490 struct tg3 *tp = netdev_priv(dev);
12492 bool reset_phy = false;
12494 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12495 tg3_warn_mgmt_link_flap(tp);
12497 if (tg3_flag(tp, USE_PHYLIB)) {
12498 struct phy_device *phydev;
12500 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12502 if (!phy_validate_pause(phydev, epause))
12505 tp->link_config.flowctrl = 0;
12506 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12507 if (epause->rx_pause) {
12508 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12510 if (epause->tx_pause) {
12511 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12513 } else if (epause->tx_pause) {
12514 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12517 if (epause->autoneg)
12518 tg3_flag_set(tp, PAUSE_AUTONEG);
12520 tg3_flag_clear(tp, PAUSE_AUTONEG);
12522 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12523 if (phydev->autoneg) {
12524 /* phy_set_asym_pause() will
12525 * renegotiate the link to inform our
12526 * link partner of our flow control
12527 * settings, even if the flow control
12528 * is forced. Let tg3_adjust_link()
12529 * do the final flow control setup.
12534 if (!epause->autoneg)
12535 tg3_setup_flow_control(tp, 0, 0);
12540 if (netif_running(dev)) {
12541 tg3_netif_stop(tp);
12545 tg3_full_lock(tp, irq_sync);
12547 if (epause->autoneg)
12548 tg3_flag_set(tp, PAUSE_AUTONEG);
12550 tg3_flag_clear(tp, PAUSE_AUTONEG);
12551 if (epause->rx_pause)
12552 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12554 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12555 if (epause->tx_pause)
12556 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12558 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12560 if (netif_running(dev)) {
12561 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12562 /* Reset PHY to avoid PHY lock up */
12563 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12564 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12565 tg3_asic_rev(tp) == ASIC_REV_5720)
12568 err = tg3_restart_hw(tp, reset_phy);
12570 tg3_netif_start(tp);
12573 tg3_full_unlock(tp);
12576 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12581 static int tg3_get_sset_count(struct net_device *dev, int sset)
12585 return TG3_NUM_TEST;
12587 return TG3_NUM_STATS;
12589 return -EOPNOTSUPP;
12593 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12594 u32 *rules __always_unused)
12596 struct tg3 *tp = netdev_priv(dev);
12598 if (!tg3_flag(tp, SUPPORT_MSIX))
12599 return -EOPNOTSUPP;
12601 switch (info->cmd) {
12602 case ETHTOOL_GRXRINGS:
12603 if (netif_running(tp->dev))
12604 info->data = tp->rxq_cnt;
12606 info->data = num_online_cpus();
12607 if (info->data > TG3_RSS_MAX_NUM_QS)
12608 info->data = TG3_RSS_MAX_NUM_QS;
12614 return -EOPNOTSUPP;
12618 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12621 struct tg3 *tp = netdev_priv(dev);
12623 if (tg3_flag(tp, SUPPORT_MSIX))
12624 size = TG3_RSS_INDIR_TBL_SIZE;
12629 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12631 struct tg3 *tp = netdev_priv(dev);
12635 *hfunc = ETH_RSS_HASH_TOP;
12639 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12640 indir[i] = tp->rss_ind_tbl[i];
12645 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12648 struct tg3 *tp = netdev_priv(dev);
12651 /* We require at least one supported parameter to be changed and no
12652 * change in any of the unsupported parameters
12655 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12656 return -EOPNOTSUPP;
12661 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12662 tp->rss_ind_tbl[i] = indir[i];
12664 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12667 /* It is legal to write the indirection
12668 * table while the device is running.
12670 tg3_full_lock(tp, 0);
12671 tg3_rss_write_indir_tbl(tp);
12672 tg3_full_unlock(tp);
12677 static void tg3_get_channels(struct net_device *dev,
12678 struct ethtool_channels *channel)
12680 struct tg3 *tp = netdev_priv(dev);
12681 u32 deflt_qs = netif_get_num_default_rss_queues();
12683 channel->max_rx = tp->rxq_max;
12684 channel->max_tx = tp->txq_max;
12686 if (netif_running(dev)) {
12687 channel->rx_count = tp->rxq_cnt;
12688 channel->tx_count = tp->txq_cnt;
12691 channel->rx_count = tp->rxq_req;
12693 channel->rx_count = min(deflt_qs, tp->rxq_max);
12696 channel->tx_count = tp->txq_req;
12698 channel->tx_count = min(deflt_qs, tp->txq_max);
12702 static int tg3_set_channels(struct net_device *dev,
12703 struct ethtool_channels *channel)
12705 struct tg3 *tp = netdev_priv(dev);
12707 if (!tg3_flag(tp, SUPPORT_MSIX))
12708 return -EOPNOTSUPP;
12710 if (channel->rx_count > tp->rxq_max ||
12711 channel->tx_count > tp->txq_max)
12714 tp->rxq_req = channel->rx_count;
12715 tp->txq_req = channel->tx_count;
12717 if (!netif_running(dev))
12722 tg3_carrier_off(tp);
12724 tg3_start(tp, true, false, false);
12729 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12731 switch (stringset) {
12733 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12736 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12739 WARN_ON(1); /* we need a WARN() */
12744 static int tg3_set_phys_id(struct net_device *dev,
12745 enum ethtool_phys_id_state state)
12747 struct tg3 *tp = netdev_priv(dev);
12750 case ETHTOOL_ID_ACTIVE:
12751 return 1; /* cycle on/off once per second */
12753 case ETHTOOL_ID_ON:
12754 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12755 LED_CTRL_1000MBPS_ON |
12756 LED_CTRL_100MBPS_ON |
12757 LED_CTRL_10MBPS_ON |
12758 LED_CTRL_TRAFFIC_OVERRIDE |
12759 LED_CTRL_TRAFFIC_BLINK |
12760 LED_CTRL_TRAFFIC_LED);
12763 case ETHTOOL_ID_OFF:
12764 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12765 LED_CTRL_TRAFFIC_OVERRIDE);
12768 case ETHTOOL_ID_INACTIVE:
12769 tw32(MAC_LED_CTRL, tp->led_ctrl);
12776 static void tg3_get_ethtool_stats(struct net_device *dev,
12777 struct ethtool_stats *estats, u64 *tmp_stats)
12779 struct tg3 *tp = netdev_priv(dev);
12782 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12784 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12787 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen)
12791 u32 offset = 0, len = 0;
12794 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12797 if (magic == TG3_EEPROM_MAGIC) {
12798 for (offset = TG3_NVM_DIR_START;
12799 offset < TG3_NVM_DIR_END;
12800 offset += TG3_NVM_DIRENT_SIZE) {
12801 if (tg3_nvram_read(tp, offset, &val))
12804 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12805 TG3_NVM_DIRTYPE_EXTVPD)
12809 if (offset != TG3_NVM_DIR_END) {
12810 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12811 if (tg3_nvram_read(tp, offset + 4, &offset))
12814 offset = tg3_nvram_logical_addr(tp, offset);
12817 if (!offset || !len) {
12818 offset = TG3_NVM_VPD_OFF;
12819 len = TG3_NVM_VPD_LEN;
12822 buf = kmalloc(len, GFP_KERNEL);
12826 for (i = 0; i < len; i += 4) {
12827 /* The data is in little-endian format in NVRAM.
12828 * Use the big-endian read routines to preserve
12829 * the byte order as it exists in NVRAM.
12831 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12836 buf = pci_vpd_alloc(tp->pdev, vpdlen);
12848 #define NVRAM_TEST_SIZE 0x100
12849 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12850 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12851 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12852 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12853 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12854 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12855 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12856 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12858 static int tg3_test_nvram(struct tg3 *tp)
12862 int i, j, k, err = 0, size;
12865 if (tg3_flag(tp, NO_NVRAM))
12868 if (tg3_nvram_read(tp, 0, &magic) != 0)
12871 if (magic == TG3_EEPROM_MAGIC)
12872 size = NVRAM_TEST_SIZE;
12873 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12874 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12875 TG3_EEPROM_SB_FORMAT_1) {
12876 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12877 case TG3_EEPROM_SB_REVISION_0:
12878 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12880 case TG3_EEPROM_SB_REVISION_2:
12881 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12883 case TG3_EEPROM_SB_REVISION_3:
12884 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12886 case TG3_EEPROM_SB_REVISION_4:
12887 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12889 case TG3_EEPROM_SB_REVISION_5:
12890 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12892 case TG3_EEPROM_SB_REVISION_6:
12893 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12900 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12901 size = NVRAM_SELFBOOT_HW_SIZE;
12905 buf = kmalloc(size, GFP_KERNEL);
12910 for (i = 0, j = 0; i < size; i += 4, j++) {
12911 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12918 /* Selfboot format */
12919 magic = be32_to_cpu(buf[0]);
12920 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12921 TG3_EEPROM_MAGIC_FW) {
12922 u8 *buf8 = (u8 *) buf, csum8 = 0;
12924 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12925 TG3_EEPROM_SB_REVISION_2) {
12926 /* For rev 2, the csum doesn't include the MBA. */
12927 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12929 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12932 for (i = 0; i < size; i++)
12945 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12946 TG3_EEPROM_MAGIC_HW) {
12947 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12948 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12949 u8 *buf8 = (u8 *) buf;
12951 /* Separate the parity bits and the data bytes. */
12952 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12953 if ((i == 0) || (i == 8)) {
12957 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12958 parity[k++] = buf8[i] & msk;
12960 } else if (i == 16) {
12964 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12965 parity[k++] = buf8[i] & msk;
12968 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12969 parity[k++] = buf8[i] & msk;
12972 data[j++] = buf8[i];
12976 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12977 u8 hw8 = hweight8(data[i]);
12979 if ((hw8 & 0x1) && parity[i])
12981 else if (!(hw8 & 0x1) && !parity[i])
12990 /* Bootstrap checksum at offset 0x10 */
12991 csum = calc_crc((unsigned char *) buf, 0x10);
12992 if (csum != le32_to_cpu(buf[0x10/4]))
12995 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12996 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12997 if (csum != le32_to_cpu(buf[0xfc/4]))
13002 buf = tg3_vpd_readblock(tp, &len);
13006 err = pci_vpd_check_csum(buf, len);
13007 /* go on if no checksum found */
13015 #define TG3_SERDES_TIMEOUT_SEC 2
13016 #define TG3_COPPER_TIMEOUT_SEC 6
13018 static int tg3_test_link(struct tg3 *tp)
13022 if (!netif_running(tp->dev))
13025 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13026 max = TG3_SERDES_TIMEOUT_SEC;
13028 max = TG3_COPPER_TIMEOUT_SEC;
13030 for (i = 0; i < max; i++) {
13034 if (msleep_interruptible(1000))
13041 /* Only test the commonly used registers */
13042 static int tg3_test_registers(struct tg3 *tp)
13044 int i, is_5705, is_5750;
13045 u32 offset, read_mask, write_mask, val, save_val, read_val;
13049 #define TG3_FL_5705 0x1
13050 #define TG3_FL_NOT_5705 0x2
13051 #define TG3_FL_NOT_5788 0x4
13052 #define TG3_FL_NOT_5750 0x8
13056 /* MAC Control Registers */
13057 { MAC_MODE, TG3_FL_NOT_5705,
13058 0x00000000, 0x00ef6f8c },
13059 { MAC_MODE, TG3_FL_5705,
13060 0x00000000, 0x01ef6b8c },
13061 { MAC_STATUS, TG3_FL_NOT_5705,
13062 0x03800107, 0x00000000 },
13063 { MAC_STATUS, TG3_FL_5705,
13064 0x03800100, 0x00000000 },
13065 { MAC_ADDR_0_HIGH, 0x0000,
13066 0x00000000, 0x0000ffff },
13067 { MAC_ADDR_0_LOW, 0x0000,
13068 0x00000000, 0xffffffff },
13069 { MAC_RX_MTU_SIZE, 0x0000,
13070 0x00000000, 0x0000ffff },
13071 { MAC_TX_MODE, 0x0000,
13072 0x00000000, 0x00000070 },
13073 { MAC_TX_LENGTHS, 0x0000,
13074 0x00000000, 0x00003fff },
13075 { MAC_RX_MODE, TG3_FL_NOT_5705,
13076 0x00000000, 0x000007fc },
13077 { MAC_RX_MODE, TG3_FL_5705,
13078 0x00000000, 0x000007dc },
13079 { MAC_HASH_REG_0, 0x0000,
13080 0x00000000, 0xffffffff },
13081 { MAC_HASH_REG_1, 0x0000,
13082 0x00000000, 0xffffffff },
13083 { MAC_HASH_REG_2, 0x0000,
13084 0x00000000, 0xffffffff },
13085 { MAC_HASH_REG_3, 0x0000,
13086 0x00000000, 0xffffffff },
13088 /* Receive Data and Receive BD Initiator Control Registers. */
13089 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13090 0x00000000, 0xffffffff },
13091 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13092 0x00000000, 0xffffffff },
13093 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13094 0x00000000, 0x00000003 },
13095 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13096 0x00000000, 0xffffffff },
13097 { RCVDBDI_STD_BD+0, 0x0000,
13098 0x00000000, 0xffffffff },
13099 { RCVDBDI_STD_BD+4, 0x0000,
13100 0x00000000, 0xffffffff },
13101 { RCVDBDI_STD_BD+8, 0x0000,
13102 0x00000000, 0xffff0002 },
13103 { RCVDBDI_STD_BD+0xc, 0x0000,
13104 0x00000000, 0xffffffff },
13106 /* Receive BD Initiator Control Registers. */
13107 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13108 0x00000000, 0xffffffff },
13109 { RCVBDI_STD_THRESH, TG3_FL_5705,
13110 0x00000000, 0x000003ff },
13111 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13112 0x00000000, 0xffffffff },
13114 /* Host Coalescing Control Registers. */
13115 { HOSTCC_MODE, TG3_FL_NOT_5705,
13116 0x00000000, 0x00000004 },
13117 { HOSTCC_MODE, TG3_FL_5705,
13118 0x00000000, 0x000000f6 },
13119 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13120 0x00000000, 0xffffffff },
13121 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13122 0x00000000, 0x000003ff },
13123 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13124 0x00000000, 0xffffffff },
13125 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13126 0x00000000, 0x000003ff },
13127 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13128 0x00000000, 0xffffffff },
13129 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13130 0x00000000, 0x000000ff },
13131 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13132 0x00000000, 0xffffffff },
13133 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13134 0x00000000, 0x000000ff },
13135 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13136 0x00000000, 0xffffffff },
13137 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13138 0x00000000, 0xffffffff },
13139 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13140 0x00000000, 0xffffffff },
13141 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13142 0x00000000, 0x000000ff },
13143 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13144 0x00000000, 0xffffffff },
13145 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13146 0x00000000, 0x000000ff },
13147 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13148 0x00000000, 0xffffffff },
13149 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13150 0x00000000, 0xffffffff },
13151 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13152 0x00000000, 0xffffffff },
13153 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13154 0x00000000, 0xffffffff },
13155 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13156 0x00000000, 0xffffffff },
13157 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13158 0xffffffff, 0x00000000 },
13159 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13160 0xffffffff, 0x00000000 },
13162 /* Buffer Manager Control Registers. */
13163 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13164 0x00000000, 0x007fff80 },
13165 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13166 0x00000000, 0x007fffff },
13167 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13168 0x00000000, 0x0000003f },
13169 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13170 0x00000000, 0x000001ff },
13171 { BUFMGR_MB_HIGH_WATER, 0x0000,
13172 0x00000000, 0x000001ff },
13173 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13174 0xffffffff, 0x00000000 },
13175 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13176 0xffffffff, 0x00000000 },
13178 /* Mailbox Registers */
13179 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13180 0x00000000, 0x000001ff },
13181 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13182 0x00000000, 0x000001ff },
13183 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13184 0x00000000, 0x000007ff },
13185 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13186 0x00000000, 0x000001ff },
13188 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13191 is_5705 = is_5750 = 0;
13192 if (tg3_flag(tp, 5705_PLUS)) {
13194 if (tg3_flag(tp, 5750_PLUS))
13198 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13199 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13202 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13205 if (tg3_flag(tp, IS_5788) &&
13206 (reg_tbl[i].flags & TG3_FL_NOT_5788))
13209 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13212 offset = (u32) reg_tbl[i].offset;
13213 read_mask = reg_tbl[i].read_mask;
13214 write_mask = reg_tbl[i].write_mask;
13216 /* Save the original register content */
13217 save_val = tr32(offset);
13219 /* Determine the read-only value. */
13220 read_val = save_val & read_mask;
13222 /* Write zero to the register, then make sure the read-only bits
13223 * are not changed and the read/write bits are all zeros.
13227 val = tr32(offset);
13229 /* Test the read-only and read/write bits. */
13230 if (((val & read_mask) != read_val) || (val & write_mask))
13233 /* Write ones to all the bits defined by RdMask and WrMask, then
13234 * make sure the read-only bits are not changed and the
13235 * read/write bits are all ones.
13237 tw32(offset, read_mask | write_mask);
13239 val = tr32(offset);
13241 /* Test the read-only bits. */
13242 if ((val & read_mask) != read_val)
13245 /* Test the read/write bits. */
13246 if ((val & write_mask) != write_mask)
13249 tw32(offset, save_val);
13255 if (netif_msg_hw(tp))
13256 netdev_err(tp->dev,
13257 "Register test failed at offset %x\n", offset);
13258 tw32(offset, save_val);
13262 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13264 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13268 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13269 for (j = 0; j < len; j += 4) {
13272 tg3_write_mem(tp, offset + j, test_pattern[i]);
13273 tg3_read_mem(tp, offset + j, &val);
13274 if (val != test_pattern[i])
13281 static int tg3_test_memory(struct tg3 *tp)
13283 static struct mem_entry {
13286 } mem_tbl_570x[] = {
13287 { 0x00000000, 0x00b50},
13288 { 0x00002000, 0x1c000},
13289 { 0xffffffff, 0x00000}
13290 }, mem_tbl_5705[] = {
13291 { 0x00000100, 0x0000c},
13292 { 0x00000200, 0x00008},
13293 { 0x00004000, 0x00800},
13294 { 0x00006000, 0x01000},
13295 { 0x00008000, 0x02000},
13296 { 0x00010000, 0x0e000},
13297 { 0xffffffff, 0x00000}
13298 }, mem_tbl_5755[] = {
13299 { 0x00000200, 0x00008},
13300 { 0x00004000, 0x00800},
13301 { 0x00006000, 0x00800},
13302 { 0x00008000, 0x02000},
13303 { 0x00010000, 0x0c000},
13304 { 0xffffffff, 0x00000}
13305 }, mem_tbl_5906[] = {
13306 { 0x00000200, 0x00008},
13307 { 0x00004000, 0x00400},
13308 { 0x00006000, 0x00400},
13309 { 0x00008000, 0x01000},
13310 { 0x00010000, 0x01000},
13311 { 0xffffffff, 0x00000}
13312 }, mem_tbl_5717[] = {
13313 { 0x00000200, 0x00008},
13314 { 0x00010000, 0x0a000},
13315 { 0x00020000, 0x13c00},
13316 { 0xffffffff, 0x00000}
13317 }, mem_tbl_57765[] = {
13318 { 0x00000200, 0x00008},
13319 { 0x00004000, 0x00800},
13320 { 0x00006000, 0x09800},
13321 { 0x00010000, 0x0a000},
13322 { 0xffffffff, 0x00000}
13324 struct mem_entry *mem_tbl;
13328 if (tg3_flag(tp, 5717_PLUS))
13329 mem_tbl = mem_tbl_5717;
13330 else if (tg3_flag(tp, 57765_CLASS) ||
13331 tg3_asic_rev(tp) == ASIC_REV_5762)
13332 mem_tbl = mem_tbl_57765;
13333 else if (tg3_flag(tp, 5755_PLUS))
13334 mem_tbl = mem_tbl_5755;
13335 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13336 mem_tbl = mem_tbl_5906;
13337 else if (tg3_flag(tp, 5705_PLUS))
13338 mem_tbl = mem_tbl_5705;
13340 mem_tbl = mem_tbl_570x;
13342 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13343 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13351 #define TG3_TSO_MSS 500
13353 #define TG3_TSO_IP_HDR_LEN 20
13354 #define TG3_TSO_TCP_HDR_LEN 20
13355 #define TG3_TSO_TCP_OPT_LEN 12
13357 static const u8 tg3_tso_header[] = {
13359 0x45, 0x00, 0x00, 0x00,
13360 0x00, 0x00, 0x40, 0x00,
13361 0x40, 0x06, 0x00, 0x00,
13362 0x0a, 0x00, 0x00, 0x01,
13363 0x0a, 0x00, 0x00, 0x02,
13364 0x0d, 0x00, 0xe0, 0x00,
13365 0x00, 0x00, 0x01, 0x00,
13366 0x00, 0x00, 0x02, 0x00,
13367 0x80, 0x10, 0x10, 0x00,
13368 0x14, 0x09, 0x00, 0x00,
13369 0x01, 0x01, 0x08, 0x0a,
13370 0x11, 0x11, 0x11, 0x11,
13371 0x11, 0x11, 0x11, 0x11,
13374 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13376 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13377 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13379 struct sk_buff *skb;
13380 u8 *tx_data, *rx_data;
13382 int num_pkts, tx_len, rx_len, i, err;
13383 struct tg3_rx_buffer_desc *desc;
13384 struct tg3_napi *tnapi, *rnapi;
13385 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13387 tnapi = &tp->napi[0];
13388 rnapi = &tp->napi[0];
13389 if (tp->irq_cnt > 1) {
13390 if (tg3_flag(tp, ENABLE_RSS))
13391 rnapi = &tp->napi[1];
13392 if (tg3_flag(tp, ENABLE_TSS))
13393 tnapi = &tp->napi[1];
13395 coal_now = tnapi->coal_now | rnapi->coal_now;
13400 skb = netdev_alloc_skb(tp->dev, tx_len);
13404 tx_data = skb_put(skb, tx_len);
13405 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13406 memset(tx_data + ETH_ALEN, 0x0, 8);
13408 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13410 if (tso_loopback) {
13411 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13413 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13414 TG3_TSO_TCP_OPT_LEN;
13416 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13417 sizeof(tg3_tso_header));
13420 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13421 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13423 /* Set the total length field in the IP header */
13424 iph->tot_len = htons((u16)(mss + hdr_len));
13426 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13427 TXD_FLAG_CPU_POST_DMA);
13429 if (tg3_flag(tp, HW_TSO_1) ||
13430 tg3_flag(tp, HW_TSO_2) ||
13431 tg3_flag(tp, HW_TSO_3)) {
13433 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13434 th = (struct tcphdr *)&tx_data[val];
13437 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13439 if (tg3_flag(tp, HW_TSO_3)) {
13440 mss |= (hdr_len & 0xc) << 12;
13441 if (hdr_len & 0x10)
13442 base_flags |= 0x00000010;
13443 base_flags |= (hdr_len & 0x3e0) << 5;
13444 } else if (tg3_flag(tp, HW_TSO_2))
13445 mss |= hdr_len << 9;
13446 else if (tg3_flag(tp, HW_TSO_1) ||
13447 tg3_asic_rev(tp) == ASIC_REV_5705) {
13448 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13450 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13453 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13456 data_off = ETH_HLEN;
13458 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13459 tx_len > VLAN_ETH_FRAME_LEN)
13460 base_flags |= TXD_FLAG_JMB_PKT;
13463 for (i = data_off; i < tx_len; i++)
13464 tx_data[i] = (u8) (i & 0xff);
13466 map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE);
13467 if (dma_mapping_error(&tp->pdev->dev, map)) {
13468 dev_kfree_skb(skb);
13472 val = tnapi->tx_prod;
13473 tnapi->tx_buffers[val].skb = skb;
13474 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13476 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13481 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13483 budget = tg3_tx_avail(tnapi);
13484 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13485 base_flags | TXD_FLAG_END, mss, 0)) {
13486 tnapi->tx_buffers[val].skb = NULL;
13487 dev_kfree_skb(skb);
13493 /* Sync BD data before updating mailbox */
13496 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13497 tr32_mailbox(tnapi->prodmbox);
13501 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13502 for (i = 0; i < 35; i++) {
13503 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13508 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13509 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13510 if ((tx_idx == tnapi->tx_prod) &&
13511 (rx_idx == (rx_start_idx + num_pkts)))
13515 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13516 dev_kfree_skb(skb);
13518 if (tx_idx != tnapi->tx_prod)
13521 if (rx_idx != rx_start_idx + num_pkts)
13525 while (rx_idx != rx_start_idx) {
13526 desc = &rnapi->rx_rcb[rx_start_idx++];
13527 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13528 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13530 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13531 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13534 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13537 if (!tso_loopback) {
13538 if (rx_len != tx_len)
13541 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13542 if (opaque_key != RXD_OPAQUE_RING_STD)
13545 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13548 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13549 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13550 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13554 if (opaque_key == RXD_OPAQUE_RING_STD) {
13555 rx_data = tpr->rx_std_buffers[desc_idx].data;
13556 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13558 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13559 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13560 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13565 dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len,
13568 rx_data += TG3_RX_OFFSET(tp);
13569 for (i = data_off; i < rx_len; i++, val++) {
13570 if (*(rx_data + i) != (u8) (val & 0xff))
13577 /* tg3_free_rings will unmap and free the rx_data */
13582 #define TG3_STD_LOOPBACK_FAILED 1
13583 #define TG3_JMB_LOOPBACK_FAILED 2
13584 #define TG3_TSO_LOOPBACK_FAILED 4
13585 #define TG3_LOOPBACK_FAILED \
13586 (TG3_STD_LOOPBACK_FAILED | \
13587 TG3_JMB_LOOPBACK_FAILED | \
13588 TG3_TSO_LOOPBACK_FAILED)
13590 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13594 u32 jmb_pkt_sz = 9000;
13597 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13599 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13600 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13602 if (!netif_running(tp->dev)) {
13603 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13604 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13606 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13610 err = tg3_reset_hw(tp, true);
13612 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13613 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13615 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13619 if (tg3_flag(tp, ENABLE_RSS)) {
13622 /* Reroute all rx packets to the 1st queue */
13623 for (i = MAC_RSS_INDIR_TBL_0;
13624 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13628 /* HW errata - mac loopback fails in some cases on 5780.
13629 * Normal traffic and PHY loopback are not affected by
13630 * errata. Also, the MAC loopback test is deprecated for
13631 * all newer ASIC revisions.
13633 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13634 !tg3_flag(tp, CPMU_PRESENT)) {
13635 tg3_mac_loopback(tp, true);
13637 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13638 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13640 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13641 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13642 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13644 tg3_mac_loopback(tp, false);
13647 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13648 !tg3_flag(tp, USE_PHYLIB)) {
13651 tg3_phy_lpbk_set(tp, 0, false);
13653 /* Wait for link */
13654 for (i = 0; i < 100; i++) {
13655 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13660 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13661 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13662 if (tg3_flag(tp, TSO_CAPABLE) &&
13663 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13664 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13665 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13666 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13667 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13670 tg3_phy_lpbk_set(tp, 0, true);
13672 /* All link indications report up, but the hardware
13673 * isn't really ready for about 20 msec. Double it
13678 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13679 data[TG3_EXT_LOOPB_TEST] |=
13680 TG3_STD_LOOPBACK_FAILED;
13681 if (tg3_flag(tp, TSO_CAPABLE) &&
13682 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13683 data[TG3_EXT_LOOPB_TEST] |=
13684 TG3_TSO_LOOPBACK_FAILED;
13685 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13686 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13687 data[TG3_EXT_LOOPB_TEST] |=
13688 TG3_JMB_LOOPBACK_FAILED;
13691 /* Re-enable gphy autopowerdown. */
13692 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13693 tg3_phy_toggle_apd(tp, true);
13696 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13697 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13700 tp->phy_flags |= eee_cap;
13705 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13708 struct tg3 *tp = netdev_priv(dev);
13709 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13711 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13712 if (tg3_power_up(tp)) {
13713 etest->flags |= ETH_TEST_FL_FAILED;
13714 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13717 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13720 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13722 if (tg3_test_nvram(tp) != 0) {
13723 etest->flags |= ETH_TEST_FL_FAILED;
13724 data[TG3_NVRAM_TEST] = 1;
13726 if (!doextlpbk && tg3_test_link(tp)) {
13727 etest->flags |= ETH_TEST_FL_FAILED;
13728 data[TG3_LINK_TEST] = 1;
13730 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13731 int err, err2 = 0, irq_sync = 0;
13733 if (netif_running(dev)) {
13735 tg3_netif_stop(tp);
13739 tg3_full_lock(tp, irq_sync);
13740 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13741 err = tg3_nvram_lock(tp);
13742 tg3_halt_cpu(tp, RX_CPU_BASE);
13743 if (!tg3_flag(tp, 5705_PLUS))
13744 tg3_halt_cpu(tp, TX_CPU_BASE);
13746 tg3_nvram_unlock(tp);
13748 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13751 if (tg3_test_registers(tp) != 0) {
13752 etest->flags |= ETH_TEST_FL_FAILED;
13753 data[TG3_REGISTER_TEST] = 1;
13756 if (tg3_test_memory(tp) != 0) {
13757 etest->flags |= ETH_TEST_FL_FAILED;
13758 data[TG3_MEMORY_TEST] = 1;
13762 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13764 if (tg3_test_loopback(tp, data, doextlpbk))
13765 etest->flags |= ETH_TEST_FL_FAILED;
13767 tg3_full_unlock(tp);
13769 if (tg3_test_interrupt(tp) != 0) {
13770 etest->flags |= ETH_TEST_FL_FAILED;
13771 data[TG3_INTERRUPT_TEST] = 1;
13774 tg3_full_lock(tp, 0);
13776 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13777 if (netif_running(dev)) {
13778 tg3_flag_set(tp, INIT_COMPLETE);
13779 err2 = tg3_restart_hw(tp, true);
13781 tg3_netif_start(tp);
13784 tg3_full_unlock(tp);
13786 if (irq_sync && !err2)
13789 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13790 tg3_power_down_prepare(tp);
13794 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13796 struct tg3 *tp = netdev_priv(dev);
13797 struct hwtstamp_config stmpconf;
13799 if (!tg3_flag(tp, PTP_CAPABLE))
13800 return -EOPNOTSUPP;
13802 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13805 if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13806 stmpconf.tx_type != HWTSTAMP_TX_OFF)
13809 switch (stmpconf.rx_filter) {
13810 case HWTSTAMP_FILTER_NONE:
13813 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13814 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13815 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13817 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13818 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13819 TG3_RX_PTP_CTL_SYNC_EVNT;
13821 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13822 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13823 TG3_RX_PTP_CTL_DELAY_REQ;
13825 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13826 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13827 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13829 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13830 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13831 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13833 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13834 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13835 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13837 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13838 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13839 TG3_RX_PTP_CTL_SYNC_EVNT;
13841 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13842 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13843 TG3_RX_PTP_CTL_SYNC_EVNT;
13845 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13846 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13847 TG3_RX_PTP_CTL_SYNC_EVNT;
13849 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13850 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13851 TG3_RX_PTP_CTL_DELAY_REQ;
13853 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13854 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13855 TG3_RX_PTP_CTL_DELAY_REQ;
13857 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13858 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13859 TG3_RX_PTP_CTL_DELAY_REQ;
13865 if (netif_running(dev) && tp->rxptpctl)
13866 tw32(TG3_RX_PTP_CTL,
13867 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13869 if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13870 tg3_flag_set(tp, TX_TSTAMP_EN);
13872 tg3_flag_clear(tp, TX_TSTAMP_EN);
13874 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13878 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13880 struct tg3 *tp = netdev_priv(dev);
13881 struct hwtstamp_config stmpconf;
13883 if (!tg3_flag(tp, PTP_CAPABLE))
13884 return -EOPNOTSUPP;
13886 stmpconf.flags = 0;
13887 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13888 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13890 switch (tp->rxptpctl) {
13892 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13894 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13895 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13897 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13898 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13900 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13901 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13903 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13904 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13906 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13907 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13909 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13910 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13912 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13913 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13915 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13916 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13918 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13919 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13921 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13922 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13924 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13925 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13927 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13928 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13935 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13939 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13941 struct mii_ioctl_data *data = if_mii(ifr);
13942 struct tg3 *tp = netdev_priv(dev);
13945 if (tg3_flag(tp, USE_PHYLIB)) {
13946 struct phy_device *phydev;
13947 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13949 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13950 return phy_mii_ioctl(phydev, ifr, cmd);
13955 data->phy_id = tp->phy_addr;
13958 case SIOCGMIIREG: {
13961 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13962 break; /* We have no PHY */
13964 if (!netif_running(dev))
13967 spin_lock_bh(&tp->lock);
13968 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13969 data->reg_num & 0x1f, &mii_regval);
13970 spin_unlock_bh(&tp->lock);
13972 data->val_out = mii_regval;
13978 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13979 break; /* We have no PHY */
13981 if (!netif_running(dev))
13984 spin_lock_bh(&tp->lock);
13985 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13986 data->reg_num & 0x1f, data->val_in);
13987 spin_unlock_bh(&tp->lock);
13991 case SIOCSHWTSTAMP:
13992 return tg3_hwtstamp_set(dev, ifr);
13994 case SIOCGHWTSTAMP:
13995 return tg3_hwtstamp_get(dev, ifr);
14001 return -EOPNOTSUPP;
14004 static int tg3_get_coalesce(struct net_device *dev,
14005 struct ethtool_coalesce *ec,
14006 struct kernel_ethtool_coalesce *kernel_coal,
14007 struct netlink_ext_ack *extack)
14009 struct tg3 *tp = netdev_priv(dev);
14011 memcpy(ec, &tp->coal, sizeof(*ec));
14015 static int tg3_set_coalesce(struct net_device *dev,
14016 struct ethtool_coalesce *ec,
14017 struct kernel_ethtool_coalesce *kernel_coal,
14018 struct netlink_ext_ack *extack)
14020 struct tg3 *tp = netdev_priv(dev);
14021 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14022 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14024 if (!tg3_flag(tp, 5705_PLUS)) {
14025 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14026 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14027 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14028 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14031 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14032 (!ec->rx_coalesce_usecs) ||
14033 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14034 (!ec->tx_coalesce_usecs) ||
14035 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14036 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14037 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14038 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14039 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14040 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14041 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14042 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14045 /* Only copy relevant parameters, ignore all others. */
14046 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14047 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14048 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14049 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14050 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14051 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14052 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14053 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14054 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14056 if (netif_running(dev)) {
14057 tg3_full_lock(tp, 0);
14058 __tg3_set_coalesce(tp, &tp->coal);
14059 tg3_full_unlock(tp);
14064 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14066 struct tg3 *tp = netdev_priv(dev);
14068 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14069 netdev_warn(tp->dev, "Board does not support EEE!\n");
14070 return -EOPNOTSUPP;
14073 if (edata->advertised != tp->eee.advertised) {
14074 netdev_warn(tp->dev,
14075 "Direct manipulation of EEE advertisement is not supported\n");
14079 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14080 netdev_warn(tp->dev,
14081 "Maximal Tx Lpi timer supported is %#x(u)\n",
14082 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14088 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14089 tg3_warn_mgmt_link_flap(tp);
14091 if (netif_running(tp->dev)) {
14092 tg3_full_lock(tp, 0);
14095 tg3_full_unlock(tp);
14101 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14103 struct tg3 *tp = netdev_priv(dev);
14105 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14106 netdev_warn(tp->dev,
14107 "Board does not support EEE!\n");
14108 return -EOPNOTSUPP;
14115 static const struct ethtool_ops tg3_ethtool_ops = {
14116 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
14117 ETHTOOL_COALESCE_MAX_FRAMES |
14118 ETHTOOL_COALESCE_USECS_IRQ |
14119 ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
14120 ETHTOOL_COALESCE_STATS_BLOCK_USECS,
14121 .get_drvinfo = tg3_get_drvinfo,
14122 .get_regs_len = tg3_get_regs_len,
14123 .get_regs = tg3_get_regs,
14124 .get_wol = tg3_get_wol,
14125 .set_wol = tg3_set_wol,
14126 .get_msglevel = tg3_get_msglevel,
14127 .set_msglevel = tg3_set_msglevel,
14128 .nway_reset = tg3_nway_reset,
14129 .get_link = ethtool_op_get_link,
14130 .get_eeprom_len = tg3_get_eeprom_len,
14131 .get_eeprom = tg3_get_eeprom,
14132 .set_eeprom = tg3_set_eeprom,
14133 .get_ringparam = tg3_get_ringparam,
14134 .set_ringparam = tg3_set_ringparam,
14135 .get_pauseparam = tg3_get_pauseparam,
14136 .set_pauseparam = tg3_set_pauseparam,
14137 .self_test = tg3_self_test,
14138 .get_strings = tg3_get_strings,
14139 .set_phys_id = tg3_set_phys_id,
14140 .get_ethtool_stats = tg3_get_ethtool_stats,
14141 .get_coalesce = tg3_get_coalesce,
14142 .set_coalesce = tg3_set_coalesce,
14143 .get_sset_count = tg3_get_sset_count,
14144 .get_rxnfc = tg3_get_rxnfc,
14145 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
14146 .get_rxfh = tg3_get_rxfh,
14147 .set_rxfh = tg3_set_rxfh,
14148 .get_channels = tg3_get_channels,
14149 .set_channels = tg3_set_channels,
14150 .get_ts_info = tg3_get_ts_info,
14151 .get_eee = tg3_get_eee,
14152 .set_eee = tg3_set_eee,
14153 .get_link_ksettings = tg3_get_link_ksettings,
14154 .set_link_ksettings = tg3_set_link_ksettings,
14157 static void tg3_get_stats64(struct net_device *dev,
14158 struct rtnl_link_stats64 *stats)
14160 struct tg3 *tp = netdev_priv(dev);
14162 spin_lock_bh(&tp->lock);
14163 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14164 *stats = tp->net_stats_prev;
14165 spin_unlock_bh(&tp->lock);
14169 tg3_get_nstats(tp, stats);
14170 spin_unlock_bh(&tp->lock);
14173 static void tg3_set_rx_mode(struct net_device *dev)
14175 struct tg3 *tp = netdev_priv(dev);
14177 if (!netif_running(dev))
14180 tg3_full_lock(tp, 0);
14181 __tg3_set_rx_mode(dev);
14182 tg3_full_unlock(tp);
14185 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14188 dev->mtu = new_mtu;
14190 if (new_mtu > ETH_DATA_LEN) {
14191 if (tg3_flag(tp, 5780_CLASS)) {
14192 netdev_update_features(dev);
14193 tg3_flag_clear(tp, TSO_CAPABLE);
14195 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14198 if (tg3_flag(tp, 5780_CLASS)) {
14199 tg3_flag_set(tp, TSO_CAPABLE);
14200 netdev_update_features(dev);
14202 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14206 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14208 struct tg3 *tp = netdev_priv(dev);
14210 bool reset_phy = false;
14212 if (!netif_running(dev)) {
14213 /* We'll just catch it later when the
14216 tg3_set_mtu(dev, tp, new_mtu);
14222 tg3_netif_stop(tp);
14224 tg3_set_mtu(dev, tp, new_mtu);
14226 tg3_full_lock(tp, 1);
14228 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14230 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14231 * breaks all requests to 256 bytes.
14233 if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14234 tg3_asic_rev(tp) == ASIC_REV_5717 ||
14235 tg3_asic_rev(tp) == ASIC_REV_5719 ||
14236 tg3_asic_rev(tp) == ASIC_REV_5720)
14239 err = tg3_restart_hw(tp, reset_phy);
14242 tg3_netif_start(tp);
14244 tg3_full_unlock(tp);
14252 static const struct net_device_ops tg3_netdev_ops = {
14253 .ndo_open = tg3_open,
14254 .ndo_stop = tg3_close,
14255 .ndo_start_xmit = tg3_start_xmit,
14256 .ndo_get_stats64 = tg3_get_stats64,
14257 .ndo_validate_addr = eth_validate_addr,
14258 .ndo_set_rx_mode = tg3_set_rx_mode,
14259 .ndo_set_mac_address = tg3_set_mac_addr,
14260 .ndo_eth_ioctl = tg3_ioctl,
14261 .ndo_tx_timeout = tg3_tx_timeout,
14262 .ndo_change_mtu = tg3_change_mtu,
14263 .ndo_fix_features = tg3_fix_features,
14264 .ndo_set_features = tg3_set_features,
14265 #ifdef CONFIG_NET_POLL_CONTROLLER
14266 .ndo_poll_controller = tg3_poll_controller,
14270 static void tg3_get_eeprom_size(struct tg3 *tp)
14272 u32 cursize, val, magic;
14274 tp->nvram_size = EEPROM_CHIP_SIZE;
14276 if (tg3_nvram_read(tp, 0, &magic) != 0)
14279 if ((magic != TG3_EEPROM_MAGIC) &&
14280 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14281 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14285 * Size the chip by reading offsets at increasing powers of two.
14286 * When we encounter our validation signature, we know the addressing
14287 * has wrapped around, and thus have our chip size.
14291 while (cursize < tp->nvram_size) {
14292 if (tg3_nvram_read(tp, cursize, &val) != 0)
14301 tp->nvram_size = cursize;
14304 static void tg3_get_nvram_size(struct tg3 *tp)
14308 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14311 /* Selfboot format */
14312 if (val != TG3_EEPROM_MAGIC) {
14313 tg3_get_eeprom_size(tp);
14317 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14319 /* This is confusing. We want to operate on the
14320 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14321 * call will read from NVRAM and byteswap the data
14322 * according to the byteswapping settings for all
14323 * other register accesses. This ensures the data we
14324 * want will always reside in the lower 16-bits.
14325 * However, the data in NVRAM is in LE format, which
14326 * means the data from the NVRAM read will always be
14327 * opposite the endianness of the CPU. The 16-bit
14328 * byteswap then brings the data to CPU endianness.
14330 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14334 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14337 static void tg3_get_nvram_info(struct tg3 *tp)
14341 nvcfg1 = tr32(NVRAM_CFG1);
14342 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14343 tg3_flag_set(tp, FLASH);
14345 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14346 tw32(NVRAM_CFG1, nvcfg1);
14349 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14350 tg3_flag(tp, 5780_CLASS)) {
14351 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14352 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14353 tp->nvram_jedecnum = JEDEC_ATMEL;
14354 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14355 tg3_flag_set(tp, NVRAM_BUFFERED);
14357 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14358 tp->nvram_jedecnum = JEDEC_ATMEL;
14359 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14361 case FLASH_VENDOR_ATMEL_EEPROM:
14362 tp->nvram_jedecnum = JEDEC_ATMEL;
14363 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14364 tg3_flag_set(tp, NVRAM_BUFFERED);
14366 case FLASH_VENDOR_ST:
14367 tp->nvram_jedecnum = JEDEC_ST;
14368 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14369 tg3_flag_set(tp, NVRAM_BUFFERED);
14371 case FLASH_VENDOR_SAIFUN:
14372 tp->nvram_jedecnum = JEDEC_SAIFUN;
14373 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14375 case FLASH_VENDOR_SST_SMALL:
14376 case FLASH_VENDOR_SST_LARGE:
14377 tp->nvram_jedecnum = JEDEC_SST;
14378 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14382 tp->nvram_jedecnum = JEDEC_ATMEL;
14383 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14384 tg3_flag_set(tp, NVRAM_BUFFERED);
14388 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14390 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14391 case FLASH_5752PAGE_SIZE_256:
14392 tp->nvram_pagesize = 256;
14394 case FLASH_5752PAGE_SIZE_512:
14395 tp->nvram_pagesize = 512;
14397 case FLASH_5752PAGE_SIZE_1K:
14398 tp->nvram_pagesize = 1024;
14400 case FLASH_5752PAGE_SIZE_2K:
14401 tp->nvram_pagesize = 2048;
14403 case FLASH_5752PAGE_SIZE_4K:
14404 tp->nvram_pagesize = 4096;
14406 case FLASH_5752PAGE_SIZE_264:
14407 tp->nvram_pagesize = 264;
14409 case FLASH_5752PAGE_SIZE_528:
14410 tp->nvram_pagesize = 528;
14415 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14419 nvcfg1 = tr32(NVRAM_CFG1);
14421 /* NVRAM protection for TPM */
14422 if (nvcfg1 & (1 << 27))
14423 tg3_flag_set(tp, PROTECTED_NVRAM);
14425 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14426 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14427 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14428 tp->nvram_jedecnum = JEDEC_ATMEL;
14429 tg3_flag_set(tp, NVRAM_BUFFERED);
14431 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14432 tp->nvram_jedecnum = JEDEC_ATMEL;
14433 tg3_flag_set(tp, NVRAM_BUFFERED);
14434 tg3_flag_set(tp, FLASH);
14436 case FLASH_5752VENDOR_ST_M45PE10:
14437 case FLASH_5752VENDOR_ST_M45PE20:
14438 case FLASH_5752VENDOR_ST_M45PE40:
14439 tp->nvram_jedecnum = JEDEC_ST;
14440 tg3_flag_set(tp, NVRAM_BUFFERED);
14441 tg3_flag_set(tp, FLASH);
14445 if (tg3_flag(tp, FLASH)) {
14446 tg3_nvram_get_pagesize(tp, nvcfg1);
14448 /* For eeprom, set pagesize to maximum eeprom size */
14449 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14451 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14452 tw32(NVRAM_CFG1, nvcfg1);
14456 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14458 u32 nvcfg1, protect = 0;
14460 nvcfg1 = tr32(NVRAM_CFG1);
14462 /* NVRAM protection for TPM */
14463 if (nvcfg1 & (1 << 27)) {
14464 tg3_flag_set(tp, PROTECTED_NVRAM);
14468 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14470 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14471 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14472 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14473 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14474 tp->nvram_jedecnum = JEDEC_ATMEL;
14475 tg3_flag_set(tp, NVRAM_BUFFERED);
14476 tg3_flag_set(tp, FLASH);
14477 tp->nvram_pagesize = 264;
14478 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14479 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14480 tp->nvram_size = (protect ? 0x3e200 :
14481 TG3_NVRAM_SIZE_512KB);
14482 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14483 tp->nvram_size = (protect ? 0x1f200 :
14484 TG3_NVRAM_SIZE_256KB);
14486 tp->nvram_size = (protect ? 0x1f200 :
14487 TG3_NVRAM_SIZE_128KB);
14489 case FLASH_5752VENDOR_ST_M45PE10:
14490 case FLASH_5752VENDOR_ST_M45PE20:
14491 case FLASH_5752VENDOR_ST_M45PE40:
14492 tp->nvram_jedecnum = JEDEC_ST;
14493 tg3_flag_set(tp, NVRAM_BUFFERED);
14494 tg3_flag_set(tp, FLASH);
14495 tp->nvram_pagesize = 256;
14496 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14497 tp->nvram_size = (protect ?
14498 TG3_NVRAM_SIZE_64KB :
14499 TG3_NVRAM_SIZE_128KB);
14500 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14501 tp->nvram_size = (protect ?
14502 TG3_NVRAM_SIZE_64KB :
14503 TG3_NVRAM_SIZE_256KB);
14505 tp->nvram_size = (protect ?
14506 TG3_NVRAM_SIZE_128KB :
14507 TG3_NVRAM_SIZE_512KB);
14512 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14516 nvcfg1 = tr32(NVRAM_CFG1);
14518 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14519 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14520 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14521 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14522 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14523 tp->nvram_jedecnum = JEDEC_ATMEL;
14524 tg3_flag_set(tp, NVRAM_BUFFERED);
14525 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14527 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14528 tw32(NVRAM_CFG1, nvcfg1);
14530 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14531 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14532 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14533 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14534 tp->nvram_jedecnum = JEDEC_ATMEL;
14535 tg3_flag_set(tp, NVRAM_BUFFERED);
14536 tg3_flag_set(tp, FLASH);
14537 tp->nvram_pagesize = 264;
14539 case FLASH_5752VENDOR_ST_M45PE10:
14540 case FLASH_5752VENDOR_ST_M45PE20:
14541 case FLASH_5752VENDOR_ST_M45PE40:
14542 tp->nvram_jedecnum = JEDEC_ST;
14543 tg3_flag_set(tp, NVRAM_BUFFERED);
14544 tg3_flag_set(tp, FLASH);
14545 tp->nvram_pagesize = 256;
14550 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14552 u32 nvcfg1, protect = 0;
14554 nvcfg1 = tr32(NVRAM_CFG1);
14556 /* NVRAM protection for TPM */
14557 if (nvcfg1 & (1 << 27)) {
14558 tg3_flag_set(tp, PROTECTED_NVRAM);
14562 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14564 case FLASH_5761VENDOR_ATMEL_ADB021D:
14565 case FLASH_5761VENDOR_ATMEL_ADB041D:
14566 case FLASH_5761VENDOR_ATMEL_ADB081D:
14567 case FLASH_5761VENDOR_ATMEL_ADB161D:
14568 case FLASH_5761VENDOR_ATMEL_MDB021D:
14569 case FLASH_5761VENDOR_ATMEL_MDB041D:
14570 case FLASH_5761VENDOR_ATMEL_MDB081D:
14571 case FLASH_5761VENDOR_ATMEL_MDB161D:
14572 tp->nvram_jedecnum = JEDEC_ATMEL;
14573 tg3_flag_set(tp, NVRAM_BUFFERED);
14574 tg3_flag_set(tp, FLASH);
14575 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14576 tp->nvram_pagesize = 256;
14578 case FLASH_5761VENDOR_ST_A_M45PE20:
14579 case FLASH_5761VENDOR_ST_A_M45PE40:
14580 case FLASH_5761VENDOR_ST_A_M45PE80:
14581 case FLASH_5761VENDOR_ST_A_M45PE16:
14582 case FLASH_5761VENDOR_ST_M_M45PE20:
14583 case FLASH_5761VENDOR_ST_M_M45PE40:
14584 case FLASH_5761VENDOR_ST_M_M45PE80:
14585 case FLASH_5761VENDOR_ST_M_M45PE16:
14586 tp->nvram_jedecnum = JEDEC_ST;
14587 tg3_flag_set(tp, NVRAM_BUFFERED);
14588 tg3_flag_set(tp, FLASH);
14589 tp->nvram_pagesize = 256;
14594 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14597 case FLASH_5761VENDOR_ATMEL_ADB161D:
14598 case FLASH_5761VENDOR_ATMEL_MDB161D:
14599 case FLASH_5761VENDOR_ST_A_M45PE16:
14600 case FLASH_5761VENDOR_ST_M_M45PE16:
14601 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14603 case FLASH_5761VENDOR_ATMEL_ADB081D:
14604 case FLASH_5761VENDOR_ATMEL_MDB081D:
14605 case FLASH_5761VENDOR_ST_A_M45PE80:
14606 case FLASH_5761VENDOR_ST_M_M45PE80:
14607 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14609 case FLASH_5761VENDOR_ATMEL_ADB041D:
14610 case FLASH_5761VENDOR_ATMEL_MDB041D:
14611 case FLASH_5761VENDOR_ST_A_M45PE40:
14612 case FLASH_5761VENDOR_ST_M_M45PE40:
14613 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14615 case FLASH_5761VENDOR_ATMEL_ADB021D:
14616 case FLASH_5761VENDOR_ATMEL_MDB021D:
14617 case FLASH_5761VENDOR_ST_A_M45PE20:
14618 case FLASH_5761VENDOR_ST_M_M45PE20:
14619 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14625 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14627 tp->nvram_jedecnum = JEDEC_ATMEL;
14628 tg3_flag_set(tp, NVRAM_BUFFERED);
14629 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14632 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14636 nvcfg1 = tr32(NVRAM_CFG1);
14638 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14639 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14640 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14641 tp->nvram_jedecnum = JEDEC_ATMEL;
14642 tg3_flag_set(tp, NVRAM_BUFFERED);
14643 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14645 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14646 tw32(NVRAM_CFG1, nvcfg1);
14648 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14649 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14650 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14651 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14652 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14653 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14654 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14655 tp->nvram_jedecnum = JEDEC_ATMEL;
14656 tg3_flag_set(tp, NVRAM_BUFFERED);
14657 tg3_flag_set(tp, FLASH);
14659 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14660 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14661 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14662 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14663 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14665 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14666 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14667 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14669 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14670 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14671 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14675 case FLASH_5752VENDOR_ST_M45PE10:
14676 case FLASH_5752VENDOR_ST_M45PE20:
14677 case FLASH_5752VENDOR_ST_M45PE40:
14678 tp->nvram_jedecnum = JEDEC_ST;
14679 tg3_flag_set(tp, NVRAM_BUFFERED);
14680 tg3_flag_set(tp, FLASH);
14682 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14683 case FLASH_5752VENDOR_ST_M45PE10:
14684 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14686 case FLASH_5752VENDOR_ST_M45PE20:
14687 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14689 case FLASH_5752VENDOR_ST_M45PE40:
14690 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14695 tg3_flag_set(tp, NO_NVRAM);
14699 tg3_nvram_get_pagesize(tp, nvcfg1);
14700 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14701 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14705 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14709 nvcfg1 = tr32(NVRAM_CFG1);
14711 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14712 case FLASH_5717VENDOR_ATMEL_EEPROM:
14713 case FLASH_5717VENDOR_MICRO_EEPROM:
14714 tp->nvram_jedecnum = JEDEC_ATMEL;
14715 tg3_flag_set(tp, NVRAM_BUFFERED);
14716 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14718 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14719 tw32(NVRAM_CFG1, nvcfg1);
14721 case FLASH_5717VENDOR_ATMEL_MDB011D:
14722 case FLASH_5717VENDOR_ATMEL_ADB011B:
14723 case FLASH_5717VENDOR_ATMEL_ADB011D:
14724 case FLASH_5717VENDOR_ATMEL_MDB021D:
14725 case FLASH_5717VENDOR_ATMEL_ADB021B:
14726 case FLASH_5717VENDOR_ATMEL_ADB021D:
14727 case FLASH_5717VENDOR_ATMEL_45USPT:
14728 tp->nvram_jedecnum = JEDEC_ATMEL;
14729 tg3_flag_set(tp, NVRAM_BUFFERED);
14730 tg3_flag_set(tp, FLASH);
14732 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14733 case FLASH_5717VENDOR_ATMEL_MDB021D:
14734 /* Detect size with tg3_nvram_get_size() */
14736 case FLASH_5717VENDOR_ATMEL_ADB021B:
14737 case FLASH_5717VENDOR_ATMEL_ADB021D:
14738 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14741 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14745 case FLASH_5717VENDOR_ST_M_M25PE10:
14746 case FLASH_5717VENDOR_ST_A_M25PE10:
14747 case FLASH_5717VENDOR_ST_M_M45PE10:
14748 case FLASH_5717VENDOR_ST_A_M45PE10:
14749 case FLASH_5717VENDOR_ST_M_M25PE20:
14750 case FLASH_5717VENDOR_ST_A_M25PE20:
14751 case FLASH_5717VENDOR_ST_M_M45PE20:
14752 case FLASH_5717VENDOR_ST_A_M45PE20:
14753 case FLASH_5717VENDOR_ST_25USPT:
14754 case FLASH_5717VENDOR_ST_45USPT:
14755 tp->nvram_jedecnum = JEDEC_ST;
14756 tg3_flag_set(tp, NVRAM_BUFFERED);
14757 tg3_flag_set(tp, FLASH);
14759 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14760 case FLASH_5717VENDOR_ST_M_M25PE20:
14761 case FLASH_5717VENDOR_ST_M_M45PE20:
14762 /* Detect size with tg3_nvram_get_size() */
14764 case FLASH_5717VENDOR_ST_A_M25PE20:
14765 case FLASH_5717VENDOR_ST_A_M45PE20:
14766 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14769 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14774 tg3_flag_set(tp, NO_NVRAM);
14778 tg3_nvram_get_pagesize(tp, nvcfg1);
14779 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14780 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14783 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14785 u32 nvcfg1, nvmpinstrp, nv_status;
14787 nvcfg1 = tr32(NVRAM_CFG1);
14788 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14790 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14791 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14792 tg3_flag_set(tp, NO_NVRAM);
14796 switch (nvmpinstrp) {
14797 case FLASH_5762_MX25L_100:
14798 case FLASH_5762_MX25L_200:
14799 case FLASH_5762_MX25L_400:
14800 case FLASH_5762_MX25L_800:
14801 case FLASH_5762_MX25L_160_320:
14802 tp->nvram_pagesize = 4096;
14803 tp->nvram_jedecnum = JEDEC_MACRONIX;
14804 tg3_flag_set(tp, NVRAM_BUFFERED);
14805 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14806 tg3_flag_set(tp, FLASH);
14807 nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14809 (1 << (nv_status >> AUTOSENSE_DEVID &
14810 AUTOSENSE_DEVID_MASK)
14811 << AUTOSENSE_SIZE_IN_MB);
14814 case FLASH_5762_EEPROM_HD:
14815 nvmpinstrp = FLASH_5720_EEPROM_HD;
14817 case FLASH_5762_EEPROM_LD:
14818 nvmpinstrp = FLASH_5720_EEPROM_LD;
14820 case FLASH_5720VENDOR_M_ST_M45PE20:
14821 /* This pinstrap supports multiple sizes, so force it
14822 * to read the actual size from location 0xf0.
14824 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14829 switch (nvmpinstrp) {
14830 case FLASH_5720_EEPROM_HD:
14831 case FLASH_5720_EEPROM_LD:
14832 tp->nvram_jedecnum = JEDEC_ATMEL;
14833 tg3_flag_set(tp, NVRAM_BUFFERED);
14835 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14836 tw32(NVRAM_CFG1, nvcfg1);
14837 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14838 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14840 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14842 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14843 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14844 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14845 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14846 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14847 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14848 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14849 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14850 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14851 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14852 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14853 case FLASH_5720VENDOR_ATMEL_45USPT:
14854 tp->nvram_jedecnum = JEDEC_ATMEL;
14855 tg3_flag_set(tp, NVRAM_BUFFERED);
14856 tg3_flag_set(tp, FLASH);
14858 switch (nvmpinstrp) {
14859 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14860 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14861 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14862 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14864 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14865 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14866 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14867 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14869 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14870 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14871 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14874 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14875 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14879 case FLASH_5720VENDOR_M_ST_M25PE10:
14880 case FLASH_5720VENDOR_M_ST_M45PE10:
14881 case FLASH_5720VENDOR_A_ST_M25PE10:
14882 case FLASH_5720VENDOR_A_ST_M45PE10:
14883 case FLASH_5720VENDOR_M_ST_M25PE20:
14884 case FLASH_5720VENDOR_M_ST_M45PE20:
14885 case FLASH_5720VENDOR_A_ST_M25PE20:
14886 case FLASH_5720VENDOR_A_ST_M45PE20:
14887 case FLASH_5720VENDOR_M_ST_M25PE40:
14888 case FLASH_5720VENDOR_M_ST_M45PE40:
14889 case FLASH_5720VENDOR_A_ST_M25PE40:
14890 case FLASH_5720VENDOR_A_ST_M45PE40:
14891 case FLASH_5720VENDOR_M_ST_M25PE80:
14892 case FLASH_5720VENDOR_M_ST_M45PE80:
14893 case FLASH_5720VENDOR_A_ST_M25PE80:
14894 case FLASH_5720VENDOR_A_ST_M45PE80:
14895 case FLASH_5720VENDOR_ST_25USPT:
14896 case FLASH_5720VENDOR_ST_45USPT:
14897 tp->nvram_jedecnum = JEDEC_ST;
14898 tg3_flag_set(tp, NVRAM_BUFFERED);
14899 tg3_flag_set(tp, FLASH);
14901 switch (nvmpinstrp) {
14902 case FLASH_5720VENDOR_M_ST_M25PE20:
14903 case FLASH_5720VENDOR_M_ST_M45PE20:
14904 case FLASH_5720VENDOR_A_ST_M25PE20:
14905 case FLASH_5720VENDOR_A_ST_M45PE20:
14906 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14908 case FLASH_5720VENDOR_M_ST_M25PE40:
14909 case FLASH_5720VENDOR_M_ST_M45PE40:
14910 case FLASH_5720VENDOR_A_ST_M25PE40:
14911 case FLASH_5720VENDOR_A_ST_M45PE40:
14912 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14914 case FLASH_5720VENDOR_M_ST_M25PE80:
14915 case FLASH_5720VENDOR_M_ST_M45PE80:
14916 case FLASH_5720VENDOR_A_ST_M25PE80:
14917 case FLASH_5720VENDOR_A_ST_M45PE80:
14918 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14921 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14922 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14927 tg3_flag_set(tp, NO_NVRAM);
14931 tg3_nvram_get_pagesize(tp, nvcfg1);
14932 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14933 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14935 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14938 if (tg3_nvram_read(tp, 0, &val))
14941 if (val != TG3_EEPROM_MAGIC &&
14942 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14943 tg3_flag_set(tp, NO_NVRAM);
14947 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14948 static void tg3_nvram_init(struct tg3 *tp)
14950 if (tg3_flag(tp, IS_SSB_CORE)) {
14951 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14952 tg3_flag_clear(tp, NVRAM);
14953 tg3_flag_clear(tp, NVRAM_BUFFERED);
14954 tg3_flag_set(tp, NO_NVRAM);
14958 tw32_f(GRC_EEPROM_ADDR,
14959 (EEPROM_ADDR_FSM_RESET |
14960 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14961 EEPROM_ADDR_CLKPERD_SHIFT)));
14965 /* Enable seeprom accesses. */
14966 tw32_f(GRC_LOCAL_CTRL,
14967 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14970 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14971 tg3_asic_rev(tp) != ASIC_REV_5701) {
14972 tg3_flag_set(tp, NVRAM);
14974 if (tg3_nvram_lock(tp)) {
14975 netdev_warn(tp->dev,
14976 "Cannot get nvram lock, %s failed\n",
14980 tg3_enable_nvram_access(tp);
14982 tp->nvram_size = 0;
14984 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14985 tg3_get_5752_nvram_info(tp);
14986 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14987 tg3_get_5755_nvram_info(tp);
14988 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14989 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14990 tg3_asic_rev(tp) == ASIC_REV_5785)
14991 tg3_get_5787_nvram_info(tp);
14992 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14993 tg3_get_5761_nvram_info(tp);
14994 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14995 tg3_get_5906_nvram_info(tp);
14996 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14997 tg3_flag(tp, 57765_CLASS))
14998 tg3_get_57780_nvram_info(tp);
14999 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15000 tg3_asic_rev(tp) == ASIC_REV_5719)
15001 tg3_get_5717_nvram_info(tp);
15002 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15003 tg3_asic_rev(tp) == ASIC_REV_5762)
15004 tg3_get_5720_nvram_info(tp);
15006 tg3_get_nvram_info(tp);
15008 if (tp->nvram_size == 0)
15009 tg3_get_nvram_size(tp);
15011 tg3_disable_nvram_access(tp);
15012 tg3_nvram_unlock(tp);
15015 tg3_flag_clear(tp, NVRAM);
15016 tg3_flag_clear(tp, NVRAM_BUFFERED);
15018 tg3_get_eeprom_size(tp);
15022 struct subsys_tbl_ent {
15023 u16 subsys_vendor, subsys_devid;
15027 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15028 /* Broadcom boards. */
15029 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15030 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15031 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15032 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15033 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15034 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15035 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15036 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15037 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15038 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15039 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15040 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15041 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15042 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15043 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15044 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15045 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15046 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15047 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15048 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15049 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15050 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15053 { TG3PCI_SUBVENDOR_ID_3COM,
15054 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15055 { TG3PCI_SUBVENDOR_ID_3COM,
15056 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15057 { TG3PCI_SUBVENDOR_ID_3COM,
15058 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15059 { TG3PCI_SUBVENDOR_ID_3COM,
15060 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15061 { TG3PCI_SUBVENDOR_ID_3COM,
15062 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15065 { TG3PCI_SUBVENDOR_ID_DELL,
15066 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15067 { TG3PCI_SUBVENDOR_ID_DELL,
15068 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15069 { TG3PCI_SUBVENDOR_ID_DELL,
15070 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15071 { TG3PCI_SUBVENDOR_ID_DELL,
15072 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15074 /* Compaq boards. */
15075 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15076 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15077 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15078 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15079 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15080 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15081 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15082 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15083 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15084 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15087 { TG3PCI_SUBVENDOR_ID_IBM,
15088 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15091 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15095 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15096 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15097 tp->pdev->subsystem_vendor) &&
15098 (subsys_id_to_phy_id[i].subsys_devid ==
15099 tp->pdev->subsystem_device))
15100 return &subsys_id_to_phy_id[i];
15105 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15109 tp->phy_id = TG3_PHY_ID_INVALID;
15110 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15112 /* Assume an onboard device and WOL capable by default. */
15113 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15114 tg3_flag_set(tp, WOL_CAP);
15116 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15117 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15118 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15119 tg3_flag_set(tp, IS_NIC);
15121 val = tr32(VCPU_CFGSHDW);
15122 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15123 tg3_flag_set(tp, ASPM_WORKAROUND);
15124 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15125 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15126 tg3_flag_set(tp, WOL_ENABLE);
15127 device_set_wakeup_enable(&tp->pdev->dev, true);
15132 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15133 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15134 u32 nic_cfg, led_cfg;
15135 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15136 u32 nic_phy_id, ver, eeprom_phy_id;
15137 int eeprom_phy_serdes = 0;
15139 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15140 tp->nic_sram_data_cfg = nic_cfg;
15142 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15143 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15144 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15145 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15146 tg3_asic_rev(tp) != ASIC_REV_5703 &&
15147 (ver > 0) && (ver < 0x100))
15148 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15150 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15151 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15153 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15154 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15155 tg3_asic_rev(tp) == ASIC_REV_5720)
15156 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15158 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15159 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15160 eeprom_phy_serdes = 1;
15162 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15163 if (nic_phy_id != 0) {
15164 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15165 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15167 eeprom_phy_id = (id1 >> 16) << 10;
15168 eeprom_phy_id |= (id2 & 0xfc00) << 16;
15169 eeprom_phy_id |= (id2 & 0x03ff) << 0;
15173 tp->phy_id = eeprom_phy_id;
15174 if (eeprom_phy_serdes) {
15175 if (!tg3_flag(tp, 5705_PLUS))
15176 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15178 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15181 if (tg3_flag(tp, 5750_PLUS))
15182 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15183 SHASTA_EXT_LED_MODE_MASK);
15185 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15189 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15190 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15193 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15194 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15197 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15198 tp->led_ctrl = LED_CTRL_MODE_MAC;
15200 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15201 * read on some older 5700/5701 bootcode.
15203 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15204 tg3_asic_rev(tp) == ASIC_REV_5701)
15205 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15209 case SHASTA_EXT_LED_SHARED:
15210 tp->led_ctrl = LED_CTRL_MODE_SHARED;
15211 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15212 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15213 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15214 LED_CTRL_MODE_PHY_2);
15216 if (tg3_flag(tp, 5717_PLUS) ||
15217 tg3_asic_rev(tp) == ASIC_REV_5762)
15218 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15219 LED_CTRL_BLINK_RATE_MASK;
15223 case SHASTA_EXT_LED_MAC:
15224 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15227 case SHASTA_EXT_LED_COMBO:
15228 tp->led_ctrl = LED_CTRL_MODE_COMBO;
15229 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15230 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15231 LED_CTRL_MODE_PHY_2);
15236 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15237 tg3_asic_rev(tp) == ASIC_REV_5701) &&
15238 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15239 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15241 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15242 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15244 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15245 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15246 if ((tp->pdev->subsystem_vendor ==
15247 PCI_VENDOR_ID_ARIMA) &&
15248 (tp->pdev->subsystem_device == 0x205a ||
15249 tp->pdev->subsystem_device == 0x2063))
15250 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15252 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15253 tg3_flag_set(tp, IS_NIC);
15256 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15257 tg3_flag_set(tp, ENABLE_ASF);
15258 if (tg3_flag(tp, 5750_PLUS))
15259 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15262 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15263 tg3_flag(tp, 5750_PLUS))
15264 tg3_flag_set(tp, ENABLE_APE);
15266 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15267 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15268 tg3_flag_clear(tp, WOL_CAP);
15270 if (tg3_flag(tp, WOL_CAP) &&
15271 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15272 tg3_flag_set(tp, WOL_ENABLE);
15273 device_set_wakeup_enable(&tp->pdev->dev, true);
15276 if (cfg2 & (1 << 17))
15277 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15279 /* serdes signal pre-emphasis in register 0x590 set by */
15280 /* bootcode if bit 18 is set */
15281 if (cfg2 & (1 << 18))
15282 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15284 if ((tg3_flag(tp, 57765_PLUS) ||
15285 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15286 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15287 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15288 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15290 if (tg3_flag(tp, PCI_EXPRESS)) {
15293 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15294 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15295 !tg3_flag(tp, 57765_PLUS) &&
15296 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15297 tg3_flag_set(tp, ASPM_WORKAROUND);
15298 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15299 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15300 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15301 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15304 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15305 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15306 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15307 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15308 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15309 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15311 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15312 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15315 if (tg3_flag(tp, WOL_CAP))
15316 device_set_wakeup_enable(&tp->pdev->dev,
15317 tg3_flag(tp, WOL_ENABLE));
15319 device_set_wakeup_capable(&tp->pdev->dev, false);
15322 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15325 u32 val2, off = offset * 8;
15327 err = tg3_nvram_lock(tp);
15331 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15332 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15333 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15334 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15337 for (i = 0; i < 100; i++) {
15338 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15339 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15340 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15346 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15348 tg3_nvram_unlock(tp);
15349 if (val2 & APE_OTP_STATUS_CMD_DONE)
15355 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15360 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15361 tw32(OTP_CTRL, cmd);
15363 /* Wait for up to 1 ms for command to execute. */
15364 for (i = 0; i < 100; i++) {
15365 val = tr32(OTP_STATUS);
15366 if (val & OTP_STATUS_CMD_DONE)
15371 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15374 /* Read the gphy configuration from the OTP region of the chip. The gphy
15375 * configuration is a 32-bit value that straddles the alignment boundary.
15376 * We do two 32-bit reads and then shift and merge the results.
15378 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15380 u32 bhalf_otp, thalf_otp;
15382 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15384 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15387 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15389 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15392 thalf_otp = tr32(OTP_READ_DATA);
15394 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15396 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15399 bhalf_otp = tr32(OTP_READ_DATA);
15401 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15404 static void tg3_phy_init_link_config(struct tg3 *tp)
15406 u32 adv = ADVERTISED_Autoneg;
15408 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15409 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15410 adv |= ADVERTISED_1000baseT_Half;
15411 adv |= ADVERTISED_1000baseT_Full;
15414 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15415 adv |= ADVERTISED_100baseT_Half |
15416 ADVERTISED_100baseT_Full |
15417 ADVERTISED_10baseT_Half |
15418 ADVERTISED_10baseT_Full |
15421 adv |= ADVERTISED_FIBRE;
15423 tp->link_config.advertising = adv;
15424 tp->link_config.speed = SPEED_UNKNOWN;
15425 tp->link_config.duplex = DUPLEX_UNKNOWN;
15426 tp->link_config.autoneg = AUTONEG_ENABLE;
15427 tp->link_config.active_speed = SPEED_UNKNOWN;
15428 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15433 static int tg3_phy_probe(struct tg3 *tp)
15435 u32 hw_phy_id_1, hw_phy_id_2;
15436 u32 hw_phy_id, hw_phy_id_masked;
15439 /* flow control autonegotiation is default behavior */
15440 tg3_flag_set(tp, PAUSE_AUTONEG);
15441 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15443 if (tg3_flag(tp, ENABLE_APE)) {
15444 switch (tp->pci_fn) {
15446 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15449 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15452 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15455 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15460 if (!tg3_flag(tp, ENABLE_ASF) &&
15461 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15462 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15463 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15464 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15466 if (tg3_flag(tp, USE_PHYLIB))
15467 return tg3_phy_init(tp);
15469 /* Reading the PHY ID register can conflict with ASF
15470 * firmware access to the PHY hardware.
15473 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15474 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15476 /* Now read the physical PHY_ID from the chip and verify
15477 * that it is sane. If it doesn't look good, we fall back
15478 * to either the hard-coded table based PHY_ID and failing
15479 * that the value found in the eeprom area.
15481 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15482 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15484 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15485 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15486 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15488 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15491 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15492 tp->phy_id = hw_phy_id;
15493 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15494 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15496 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15498 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15499 /* Do nothing, phy ID already set up in
15500 * tg3_get_eeprom_hw_cfg().
15503 struct subsys_tbl_ent *p;
15505 /* No eeprom signature? Try the hardcoded
15506 * subsys device table.
15508 p = tg3_lookup_by_subsys(tp);
15510 tp->phy_id = p->phy_id;
15511 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15512 /* For now we saw the IDs 0xbc050cd0,
15513 * 0xbc050f80 and 0xbc050c30 on devices
15514 * connected to an BCM4785 and there are
15515 * probably more. Just assume that the phy is
15516 * supported when it is connected to a SSB core
15523 tp->phy_id == TG3_PHY_ID_BCM8002)
15524 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15528 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15529 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15530 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15531 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15532 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15533 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15534 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15535 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15536 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15537 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15539 tp->eee.supported = SUPPORTED_100baseT_Full |
15540 SUPPORTED_1000baseT_Full;
15541 tp->eee.advertised = ADVERTISED_100baseT_Full |
15542 ADVERTISED_1000baseT_Full;
15543 tp->eee.eee_enabled = 1;
15544 tp->eee.tx_lpi_enabled = 1;
15545 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15548 tg3_phy_init_link_config(tp);
15550 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15551 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15552 !tg3_flag(tp, ENABLE_APE) &&
15553 !tg3_flag(tp, ENABLE_ASF)) {
15556 tg3_readphy(tp, MII_BMSR, &bmsr);
15557 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15558 (bmsr & BMSR_LSTATUS))
15559 goto skip_phy_reset;
15561 err = tg3_phy_reset(tp);
15565 tg3_phy_set_wirespeed(tp);
15567 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15568 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15569 tp->link_config.flowctrl);
15571 tg3_writephy(tp, MII_BMCR,
15572 BMCR_ANENABLE | BMCR_ANRESTART);
15577 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15578 err = tg3_init_5401phy_dsp(tp);
15582 err = tg3_init_5401phy_dsp(tp);
15588 static void tg3_read_vpd(struct tg3 *tp)
15591 unsigned int len, vpdlen;
15594 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15598 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15599 PCI_VPD_RO_KEYWORD_MFR_ID, &len);
15603 if (len != 4 || memcmp(vpd_data + i, "1028", 4))
15606 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15607 PCI_VPD_RO_KEYWORD_VENDOR0, &len);
15611 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15612 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i);
15615 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15616 PCI_VPD_RO_KEYWORD_PARTNO, &len);
15618 goto out_not_found;
15620 if (len > TG3_BPN_SIZE)
15621 goto out_not_found;
15623 memcpy(tp->board_part_number, &vpd_data[i], len);
15627 if (tp->board_part_number[0])
15631 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15632 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15633 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15634 strcpy(tp->board_part_number, "BCM5717");
15635 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15636 strcpy(tp->board_part_number, "BCM5718");
15639 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15640 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15641 strcpy(tp->board_part_number, "BCM57780");
15642 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15643 strcpy(tp->board_part_number, "BCM57760");
15644 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15645 strcpy(tp->board_part_number, "BCM57790");
15646 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15647 strcpy(tp->board_part_number, "BCM57788");
15650 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15651 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15652 strcpy(tp->board_part_number, "BCM57761");
15653 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15654 strcpy(tp->board_part_number, "BCM57765");
15655 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15656 strcpy(tp->board_part_number, "BCM57781");
15657 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15658 strcpy(tp->board_part_number, "BCM57785");
15659 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15660 strcpy(tp->board_part_number, "BCM57791");
15661 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15662 strcpy(tp->board_part_number, "BCM57795");
15665 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15666 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15667 strcpy(tp->board_part_number, "BCM57762");
15668 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15669 strcpy(tp->board_part_number, "BCM57766");
15670 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15671 strcpy(tp->board_part_number, "BCM57782");
15672 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15673 strcpy(tp->board_part_number, "BCM57786");
15676 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15677 strcpy(tp->board_part_number, "BCM95906");
15680 strcpy(tp->board_part_number, "none");
15684 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15688 if (tg3_nvram_read(tp, offset, &val) ||
15689 (val & 0xfc000000) != 0x0c000000 ||
15690 tg3_nvram_read(tp, offset + 4, &val) ||
15697 static void tg3_read_bc_ver(struct tg3 *tp)
15699 u32 val, offset, start, ver_offset;
15701 bool newver = false;
15703 if (tg3_nvram_read(tp, 0xc, &offset) ||
15704 tg3_nvram_read(tp, 0x4, &start))
15707 offset = tg3_nvram_logical_addr(tp, offset);
15709 if (tg3_nvram_read(tp, offset, &val))
15712 if ((val & 0xfc000000) == 0x0c000000) {
15713 if (tg3_nvram_read(tp, offset + 4, &val))
15720 dst_off = strlen(tp->fw_ver);
15723 if (TG3_VER_SIZE - dst_off < 16 ||
15724 tg3_nvram_read(tp, offset + 8, &ver_offset))
15727 offset = offset + ver_offset - start;
15728 for (i = 0; i < 16; i += 4) {
15730 if (tg3_nvram_read_be32(tp, offset + i, &v))
15733 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15738 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15741 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15742 TG3_NVM_BCVER_MAJSFT;
15743 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15744 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15745 "v%d.%02d", major, minor);
15749 static void tg3_read_hwsb_ver(struct tg3 *tp)
15751 u32 val, major, minor;
15753 /* Use native endian representation */
15754 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15757 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15758 TG3_NVM_HWSB_CFG1_MAJSFT;
15759 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15760 TG3_NVM_HWSB_CFG1_MINSFT;
15762 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15765 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15767 u32 offset, major, minor, build;
15769 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15771 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15774 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15775 case TG3_EEPROM_SB_REVISION_0:
15776 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15778 case TG3_EEPROM_SB_REVISION_2:
15779 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15781 case TG3_EEPROM_SB_REVISION_3:
15782 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15784 case TG3_EEPROM_SB_REVISION_4:
15785 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15787 case TG3_EEPROM_SB_REVISION_5:
15788 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15790 case TG3_EEPROM_SB_REVISION_6:
15791 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15797 if (tg3_nvram_read(tp, offset, &val))
15800 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15801 TG3_EEPROM_SB_EDH_BLD_SHFT;
15802 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15803 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15804 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15806 if (minor > 99 || build > 26)
15809 offset = strlen(tp->fw_ver);
15810 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15811 " v%d.%02d", major, minor);
15814 offset = strlen(tp->fw_ver);
15815 if (offset < TG3_VER_SIZE - 1)
15816 tp->fw_ver[offset] = 'a' + build - 1;
15820 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15822 u32 val, offset, start;
15825 for (offset = TG3_NVM_DIR_START;
15826 offset < TG3_NVM_DIR_END;
15827 offset += TG3_NVM_DIRENT_SIZE) {
15828 if (tg3_nvram_read(tp, offset, &val))
15831 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15835 if (offset == TG3_NVM_DIR_END)
15838 if (!tg3_flag(tp, 5705_PLUS))
15839 start = 0x08000000;
15840 else if (tg3_nvram_read(tp, offset - 4, &start))
15843 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15844 !tg3_fw_img_is_valid(tp, offset) ||
15845 tg3_nvram_read(tp, offset + 8, &val))
15848 offset += val - start;
15850 vlen = strlen(tp->fw_ver);
15852 tp->fw_ver[vlen++] = ',';
15853 tp->fw_ver[vlen++] = ' ';
15855 for (i = 0; i < 4; i++) {
15857 if (tg3_nvram_read_be32(tp, offset, &v))
15860 offset += sizeof(v);
15862 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15863 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15867 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15872 static void tg3_probe_ncsi(struct tg3 *tp)
15876 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15877 if (apedata != APE_SEG_SIG_MAGIC)
15880 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15881 if (!(apedata & APE_FW_STATUS_READY))
15884 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15885 tg3_flag_set(tp, APE_HAS_NCSI);
15888 static void tg3_read_dash_ver(struct tg3 *tp)
15894 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15896 if (tg3_flag(tp, APE_HAS_NCSI))
15898 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15903 vlen = strlen(tp->fw_ver);
15905 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15907 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15908 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15909 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15910 (apedata & APE_FW_VERSION_BLDMSK));
15913 static void tg3_read_otp_ver(struct tg3 *tp)
15917 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15920 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15921 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15922 TG3_OTP_MAGIC0_VALID(val)) {
15923 u64 val64 = (u64) val << 32 | val2;
15927 for (i = 0; i < 7; i++) {
15928 if ((val64 & 0xff) == 0)
15930 ver = val64 & 0xff;
15933 vlen = strlen(tp->fw_ver);
15934 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15938 static void tg3_read_fw_ver(struct tg3 *tp)
15941 bool vpd_vers = false;
15943 if (tp->fw_ver[0] != 0)
15946 if (tg3_flag(tp, NO_NVRAM)) {
15947 strcat(tp->fw_ver, "sb");
15948 tg3_read_otp_ver(tp);
15952 if (tg3_nvram_read(tp, 0, &val))
15955 if (val == TG3_EEPROM_MAGIC)
15956 tg3_read_bc_ver(tp);
15957 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15958 tg3_read_sb_ver(tp, val);
15959 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15960 tg3_read_hwsb_ver(tp);
15962 if (tg3_flag(tp, ENABLE_ASF)) {
15963 if (tg3_flag(tp, ENABLE_APE)) {
15964 tg3_probe_ncsi(tp);
15966 tg3_read_dash_ver(tp);
15967 } else if (!vpd_vers) {
15968 tg3_read_mgmtfw_ver(tp);
15972 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15975 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15977 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15978 return TG3_RX_RET_MAX_SIZE_5717;
15979 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15980 return TG3_RX_RET_MAX_SIZE_5700;
15982 return TG3_RX_RET_MAX_SIZE_5705;
15985 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
15986 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15987 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15988 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15992 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15994 struct pci_dev *peer;
15995 unsigned int func, devnr = tp->pdev->devfn & ~7;
15997 for (func = 0; func < 8; func++) {
15998 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15999 if (peer && peer != tp->pdev)
16003 /* 5704 can be configured in single-port mode, set peer to
16004 * tp->pdev in that case.
16012 * We don't need to keep the refcount elevated; there's no way
16013 * to remove one half of this device without removing the other
16020 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16022 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16023 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16026 /* All devices that use the alternate
16027 * ASIC REV location have a CPMU.
16029 tg3_flag_set(tp, CPMU_PRESENT);
16031 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16032 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16033 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16034 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16035 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16036 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16037 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16038 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16039 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16040 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16041 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16042 reg = TG3PCI_GEN2_PRODID_ASICREV;
16043 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16044 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16045 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16046 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16047 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16048 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16049 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16050 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16051 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16052 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16053 reg = TG3PCI_GEN15_PRODID_ASICREV;
16055 reg = TG3PCI_PRODID_ASICREV;
16057 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16060 /* Wrong chip ID in 5752 A0. This code can be removed later
16061 * as A0 is not in production.
16063 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16064 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16066 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16067 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16069 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16070 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16071 tg3_asic_rev(tp) == ASIC_REV_5720)
16072 tg3_flag_set(tp, 5717_PLUS);
16074 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16075 tg3_asic_rev(tp) == ASIC_REV_57766)
16076 tg3_flag_set(tp, 57765_CLASS);
16078 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16079 tg3_asic_rev(tp) == ASIC_REV_5762)
16080 tg3_flag_set(tp, 57765_PLUS);
16082 /* Intentionally exclude ASIC_REV_5906 */
16083 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16084 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16085 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16086 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16087 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16088 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16089 tg3_flag(tp, 57765_PLUS))
16090 tg3_flag_set(tp, 5755_PLUS);
16092 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16093 tg3_asic_rev(tp) == ASIC_REV_5714)
16094 tg3_flag_set(tp, 5780_CLASS);
16096 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16097 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16098 tg3_asic_rev(tp) == ASIC_REV_5906 ||
16099 tg3_flag(tp, 5755_PLUS) ||
16100 tg3_flag(tp, 5780_CLASS))
16101 tg3_flag_set(tp, 5750_PLUS);
16103 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16104 tg3_flag(tp, 5750_PLUS))
16105 tg3_flag_set(tp, 5705_PLUS);
16108 static bool tg3_10_100_only_device(struct tg3 *tp,
16109 const struct pci_device_id *ent)
16111 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16113 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16114 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16115 (tp->phy_flags & TG3_PHYFLG_IS_FET))
16118 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16119 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16120 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16130 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16133 u32 pci_state_reg, grc_misc_cfg;
16138 /* Force memory write invalidate off. If we leave it on,
16139 * then on 5700_BX chips we have to enable a workaround.
16140 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16141 * to match the cacheline size. The Broadcom driver have this
16142 * workaround but turns MWI off all the times so never uses
16143 * it. This seems to suggest that the workaround is insufficient.
16145 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16146 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16147 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16149 /* Important! -- Make sure register accesses are byteswapped
16150 * correctly. Also, for those chips that require it, make
16151 * sure that indirect register accesses are enabled before
16152 * the first operation.
16154 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16156 tp->misc_host_ctrl |= (misc_ctrl_reg &
16157 MISC_HOST_CTRL_CHIPREV);
16158 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16159 tp->misc_host_ctrl);
16161 tg3_detect_asic_rev(tp, misc_ctrl_reg);
16163 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16164 * we need to disable memory and use config. cycles
16165 * only to access all registers. The 5702/03 chips
16166 * can mistakenly decode the special cycles from the
16167 * ICH chipsets as memory write cycles, causing corruption
16168 * of register and memory space. Only certain ICH bridges
16169 * will drive special cycles with non-zero data during the
16170 * address phase which can fall within the 5703's address
16171 * range. This is not an ICH bug as the PCI spec allows
16172 * non-zero address during special cycles. However, only
16173 * these ICH bridges are known to drive non-zero addresses
16174 * during special cycles.
16176 * Since special cycles do not cross PCI bridges, we only
16177 * enable this workaround if the 5703 is on the secondary
16178 * bus of these ICH bridges.
16180 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16181 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16182 static struct tg3_dev_id {
16186 } ich_chipsets[] = {
16187 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16189 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16191 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16193 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16197 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16198 struct pci_dev *bridge = NULL;
16200 while (pci_id->vendor != 0) {
16201 bridge = pci_get_device(pci_id->vendor, pci_id->device,
16207 if (pci_id->rev != PCI_ANY_ID) {
16208 if (bridge->revision > pci_id->rev)
16211 if (bridge->subordinate &&
16212 (bridge->subordinate->number ==
16213 tp->pdev->bus->number)) {
16214 tg3_flag_set(tp, ICH_WORKAROUND);
16215 pci_dev_put(bridge);
16221 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16222 static struct tg3_dev_id {
16225 } bridge_chipsets[] = {
16226 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16227 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16230 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16231 struct pci_dev *bridge = NULL;
16233 while (pci_id->vendor != 0) {
16234 bridge = pci_get_device(pci_id->vendor,
16241 if (bridge->subordinate &&
16242 (bridge->subordinate->number <=
16243 tp->pdev->bus->number) &&
16244 (bridge->subordinate->busn_res.end >=
16245 tp->pdev->bus->number)) {
16246 tg3_flag_set(tp, 5701_DMA_BUG);
16247 pci_dev_put(bridge);
16253 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16254 * DMA addresses > 40-bit. This bridge may have other additional
16255 * 57xx devices behind it in some 4-port NIC designs for example.
16256 * Any tg3 device found behind the bridge will also need the 40-bit
16259 if (tg3_flag(tp, 5780_CLASS)) {
16260 tg3_flag_set(tp, 40BIT_DMA_BUG);
16261 tp->msi_cap = tp->pdev->msi_cap;
16263 struct pci_dev *bridge = NULL;
16266 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16267 PCI_DEVICE_ID_SERVERWORKS_EPB,
16269 if (bridge && bridge->subordinate &&
16270 (bridge->subordinate->number <=
16271 tp->pdev->bus->number) &&
16272 (bridge->subordinate->busn_res.end >=
16273 tp->pdev->bus->number)) {
16274 tg3_flag_set(tp, 40BIT_DMA_BUG);
16275 pci_dev_put(bridge);
16281 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16282 tg3_asic_rev(tp) == ASIC_REV_5714)
16283 tp->pdev_peer = tg3_find_peer(tp);
16285 /* Determine TSO capabilities */
16286 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16287 ; /* Do nothing. HW bug. */
16288 else if (tg3_flag(tp, 57765_PLUS))
16289 tg3_flag_set(tp, HW_TSO_3);
16290 else if (tg3_flag(tp, 5755_PLUS) ||
16291 tg3_asic_rev(tp) == ASIC_REV_5906)
16292 tg3_flag_set(tp, HW_TSO_2);
16293 else if (tg3_flag(tp, 5750_PLUS)) {
16294 tg3_flag_set(tp, HW_TSO_1);
16295 tg3_flag_set(tp, TSO_BUG);
16296 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16297 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16298 tg3_flag_clear(tp, TSO_BUG);
16299 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16300 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16301 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16302 tg3_flag_set(tp, FW_TSO);
16303 tg3_flag_set(tp, TSO_BUG);
16304 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16305 tp->fw_needed = FIRMWARE_TG3TSO5;
16307 tp->fw_needed = FIRMWARE_TG3TSO;
16310 /* Selectively allow TSO based on operating conditions */
16311 if (tg3_flag(tp, HW_TSO_1) ||
16312 tg3_flag(tp, HW_TSO_2) ||
16313 tg3_flag(tp, HW_TSO_3) ||
16314 tg3_flag(tp, FW_TSO)) {
16315 /* For firmware TSO, assume ASF is disabled.
16316 * We'll disable TSO later if we discover ASF
16317 * is enabled in tg3_get_eeprom_hw_cfg().
16319 tg3_flag_set(tp, TSO_CAPABLE);
16321 tg3_flag_clear(tp, TSO_CAPABLE);
16322 tg3_flag_clear(tp, TSO_BUG);
16323 tp->fw_needed = NULL;
16326 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16327 tp->fw_needed = FIRMWARE_TG3;
16329 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16330 tp->fw_needed = FIRMWARE_TG357766;
16334 if (tg3_flag(tp, 5750_PLUS)) {
16335 tg3_flag_set(tp, SUPPORT_MSI);
16336 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16337 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16338 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16339 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16340 tp->pdev_peer == tp->pdev))
16341 tg3_flag_clear(tp, SUPPORT_MSI);
16343 if (tg3_flag(tp, 5755_PLUS) ||
16344 tg3_asic_rev(tp) == ASIC_REV_5906) {
16345 tg3_flag_set(tp, 1SHOT_MSI);
16348 if (tg3_flag(tp, 57765_PLUS)) {
16349 tg3_flag_set(tp, SUPPORT_MSIX);
16350 tp->irq_max = TG3_IRQ_MAX_VECS;
16356 if (tp->irq_max > 1) {
16357 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16358 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16360 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16361 tg3_asic_rev(tp) == ASIC_REV_5720)
16362 tp->txq_max = tp->irq_max - 1;
16365 if (tg3_flag(tp, 5755_PLUS) ||
16366 tg3_asic_rev(tp) == ASIC_REV_5906)
16367 tg3_flag_set(tp, SHORT_DMA_BUG);
16369 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16370 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16372 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16373 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16374 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16375 tg3_asic_rev(tp) == ASIC_REV_5762)
16376 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16378 if (tg3_flag(tp, 57765_PLUS) &&
16379 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16380 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16382 if (!tg3_flag(tp, 5705_PLUS) ||
16383 tg3_flag(tp, 5780_CLASS) ||
16384 tg3_flag(tp, USE_JUMBO_BDFLAG))
16385 tg3_flag_set(tp, JUMBO_CAPABLE);
16387 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16390 if (pci_is_pcie(tp->pdev)) {
16393 tg3_flag_set(tp, PCI_EXPRESS);
16395 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16396 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16397 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16398 tg3_flag_clear(tp, HW_TSO_2);
16399 tg3_flag_clear(tp, TSO_CAPABLE);
16401 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16402 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16403 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16404 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16405 tg3_flag_set(tp, CLKREQ_BUG);
16406 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16407 tg3_flag_set(tp, L1PLLPD_EN);
16409 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16410 /* BCM5785 devices are effectively PCIe devices, and should
16411 * follow PCIe codepaths, but do not have a PCIe capabilities
16414 tg3_flag_set(tp, PCI_EXPRESS);
16415 } else if (!tg3_flag(tp, 5705_PLUS) ||
16416 tg3_flag(tp, 5780_CLASS)) {
16417 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16418 if (!tp->pcix_cap) {
16419 dev_err(&tp->pdev->dev,
16420 "Cannot find PCI-X capability, aborting\n");
16424 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16425 tg3_flag_set(tp, PCIX_MODE);
16428 /* If we have an AMD 762 or VIA K8T800 chipset, write
16429 * reordering to the mailbox registers done by the host
16430 * controller can cause major troubles. We read back from
16431 * every mailbox register write to force the writes to be
16432 * posted to the chip in order.
16434 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16435 !tg3_flag(tp, PCI_EXPRESS))
16436 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16438 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16439 &tp->pci_cacheline_sz);
16440 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16441 &tp->pci_lat_timer);
16442 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16443 tp->pci_lat_timer < 64) {
16444 tp->pci_lat_timer = 64;
16445 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16446 tp->pci_lat_timer);
16449 /* Important! -- It is critical that the PCI-X hw workaround
16450 * situation is decided before the first MMIO register access.
16452 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16453 /* 5700 BX chips need to have their TX producer index
16454 * mailboxes written twice to workaround a bug.
16456 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16458 /* If we are in PCI-X mode, enable register write workaround.
16460 * The workaround is to use indirect register accesses
16461 * for all chip writes not to mailbox registers.
16463 if (tg3_flag(tp, PCIX_MODE)) {
16466 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16468 /* The chip can have it's power management PCI config
16469 * space registers clobbered due to this bug.
16470 * So explicitly force the chip into D0 here.
16472 pci_read_config_dword(tp->pdev,
16473 tp->pdev->pm_cap + PCI_PM_CTRL,
16475 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16476 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16477 pci_write_config_dword(tp->pdev,
16478 tp->pdev->pm_cap + PCI_PM_CTRL,
16481 /* Also, force SERR#/PERR# in PCI command. */
16482 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16483 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16484 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16488 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16489 tg3_flag_set(tp, PCI_HIGH_SPEED);
16490 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16491 tg3_flag_set(tp, PCI_32BIT);
16493 /* Chip-specific fixup from Broadcom driver */
16494 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16495 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16496 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16497 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16500 /* Default fast path register access methods */
16501 tp->read32 = tg3_read32;
16502 tp->write32 = tg3_write32;
16503 tp->read32_mbox = tg3_read32;
16504 tp->write32_mbox = tg3_write32;
16505 tp->write32_tx_mbox = tg3_write32;
16506 tp->write32_rx_mbox = tg3_write32;
16508 /* Various workaround register access methods */
16509 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16510 tp->write32 = tg3_write_indirect_reg32;
16511 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16512 (tg3_flag(tp, PCI_EXPRESS) &&
16513 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16515 * Back to back register writes can cause problems on these
16516 * chips, the workaround is to read back all reg writes
16517 * except those to mailbox regs.
16519 * See tg3_write_indirect_reg32().
16521 tp->write32 = tg3_write_flush_reg32;
16524 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16525 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16526 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16527 tp->write32_rx_mbox = tg3_write_flush_reg32;
16530 if (tg3_flag(tp, ICH_WORKAROUND)) {
16531 tp->read32 = tg3_read_indirect_reg32;
16532 tp->write32 = tg3_write_indirect_reg32;
16533 tp->read32_mbox = tg3_read_indirect_mbox;
16534 tp->write32_mbox = tg3_write_indirect_mbox;
16535 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16536 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16541 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16542 pci_cmd &= ~PCI_COMMAND_MEMORY;
16543 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16545 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16546 tp->read32_mbox = tg3_read32_mbox_5906;
16547 tp->write32_mbox = tg3_write32_mbox_5906;
16548 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16549 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16552 if (tp->write32 == tg3_write_indirect_reg32 ||
16553 (tg3_flag(tp, PCIX_MODE) &&
16554 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16555 tg3_asic_rev(tp) == ASIC_REV_5701)))
16556 tg3_flag_set(tp, SRAM_USE_CONFIG);
16558 /* The memory arbiter has to be enabled in order for SRAM accesses
16559 * to succeed. Normally on powerup the tg3 chip firmware will make
16560 * sure it is enabled, but other entities such as system netboot
16561 * code might disable it.
16563 val = tr32(MEMARB_MODE);
16564 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16566 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16567 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16568 tg3_flag(tp, 5780_CLASS)) {
16569 if (tg3_flag(tp, PCIX_MODE)) {
16570 pci_read_config_dword(tp->pdev,
16571 tp->pcix_cap + PCI_X_STATUS,
16573 tp->pci_fn = val & 0x7;
16575 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16576 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16577 tg3_asic_rev(tp) == ASIC_REV_5720) {
16578 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16579 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16580 val = tr32(TG3_CPMU_STATUS);
16582 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16583 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16585 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16586 TG3_CPMU_STATUS_FSHFT_5719;
16589 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16590 tp->write32_tx_mbox = tg3_write_flush_reg32;
16591 tp->write32_rx_mbox = tg3_write_flush_reg32;
16594 /* Get eeprom hw config before calling tg3_set_power_state().
16595 * In particular, the TG3_FLAG_IS_NIC flag must be
16596 * determined before calling tg3_set_power_state() so that
16597 * we know whether or not to switch out of Vaux power.
16598 * When the flag is set, it means that GPIO1 is used for eeprom
16599 * write protect and also implies that it is a LOM where GPIOs
16600 * are not used to switch power.
16602 tg3_get_eeprom_hw_cfg(tp);
16604 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16605 tg3_flag_clear(tp, TSO_CAPABLE);
16606 tg3_flag_clear(tp, TSO_BUG);
16607 tp->fw_needed = NULL;
16610 if (tg3_flag(tp, ENABLE_APE)) {
16611 /* Allow reads and writes to the
16612 * APE register and memory space.
16614 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16615 PCISTATE_ALLOW_APE_SHMEM_WR |
16616 PCISTATE_ALLOW_APE_PSPACE_WR;
16617 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16620 tg3_ape_lock_init(tp);
16621 tp->ape_hb_interval =
16622 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16625 /* Set up tp->grc_local_ctrl before calling
16626 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16627 * will bring 5700's external PHY out of reset.
16628 * It is also used as eeprom write protect on LOMs.
16630 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16631 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16632 tg3_flag(tp, EEPROM_WRITE_PROT))
16633 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16634 GRC_LCLCTRL_GPIO_OUTPUT1);
16635 /* Unused GPIO3 must be driven as output on 5752 because there
16636 * are no pull-up resistors on unused GPIO pins.
16638 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16639 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16641 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16642 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16643 tg3_flag(tp, 57765_CLASS))
16644 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16646 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16647 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16648 /* Turn off the debug UART. */
16649 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16650 if (tg3_flag(tp, IS_NIC))
16651 /* Keep VMain power. */
16652 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16653 GRC_LCLCTRL_GPIO_OUTPUT0;
16656 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16657 tp->grc_local_ctrl |=
16658 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16660 /* Switch out of Vaux if it is a NIC */
16661 tg3_pwrsrc_switch_to_vmain(tp);
16663 /* Derive initial jumbo mode from MTU assigned in
16664 * ether_setup() via the alloc_etherdev() call
16666 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16667 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16669 /* Determine WakeOnLan speed to use. */
16670 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16671 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16672 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16673 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16674 tg3_flag_clear(tp, WOL_SPEED_100MB);
16676 tg3_flag_set(tp, WOL_SPEED_100MB);
16679 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16680 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16682 /* A few boards don't want Ethernet@WireSpeed phy feature */
16683 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16684 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16685 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16686 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16687 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16688 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16689 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16691 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16692 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16693 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16694 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16695 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16697 if (tg3_flag(tp, 5705_PLUS) &&
16698 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16699 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16700 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16701 !tg3_flag(tp, 57765_PLUS)) {
16702 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16703 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16704 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16705 tg3_asic_rev(tp) == ASIC_REV_5761) {
16706 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16707 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16708 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16709 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16710 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16712 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16715 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16716 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16717 tp->phy_otp = tg3_read_otp_phycfg(tp);
16718 if (tp->phy_otp == 0)
16719 tp->phy_otp = TG3_OTP_DEFAULT;
16722 if (tg3_flag(tp, CPMU_PRESENT))
16723 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16725 tp->mi_mode = MAC_MI_MODE_BASE;
16727 tp->coalesce_mode = 0;
16728 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16729 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16730 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16732 /* Set these bits to enable statistics workaround. */
16733 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16734 tg3_asic_rev(tp) == ASIC_REV_5762 ||
16735 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16736 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16737 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16738 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16741 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16742 tg3_asic_rev(tp) == ASIC_REV_57780)
16743 tg3_flag_set(tp, USE_PHYLIB);
16745 err = tg3_mdio_init(tp);
16749 /* Initialize data/descriptor byte/word swapping. */
16750 val = tr32(GRC_MODE);
16751 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16752 tg3_asic_rev(tp) == ASIC_REV_5762)
16753 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16754 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16755 GRC_MODE_B2HRX_ENABLE |
16756 GRC_MODE_HTX2B_ENABLE |
16757 GRC_MODE_HOST_STACKUP);
16759 val &= GRC_MODE_HOST_STACKUP;
16761 tw32(GRC_MODE, val | tp->grc_mode);
16763 tg3_switch_clocks(tp);
16765 /* Clear this out for sanity. */
16766 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16768 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16769 tw32(TG3PCI_REG_BASE_ADDR, 0);
16771 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16773 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16774 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16775 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16776 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16777 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16778 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16779 void __iomem *sram_base;
16781 /* Write some dummy words into the SRAM status block
16782 * area, see if it reads back correctly. If the return
16783 * value is bad, force enable the PCIX workaround.
16785 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16787 writel(0x00000000, sram_base);
16788 writel(0x00000000, sram_base + 4);
16789 writel(0xffffffff, sram_base + 4);
16790 if (readl(sram_base) != 0x00000000)
16791 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16796 tg3_nvram_init(tp);
16798 /* If the device has an NVRAM, no need to load patch firmware */
16799 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16800 !tg3_flag(tp, NO_NVRAM))
16801 tp->fw_needed = NULL;
16803 grc_misc_cfg = tr32(GRC_MISC_CFG);
16804 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16806 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16807 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16808 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16809 tg3_flag_set(tp, IS_5788);
16811 if (!tg3_flag(tp, IS_5788) &&
16812 tg3_asic_rev(tp) != ASIC_REV_5700)
16813 tg3_flag_set(tp, TAGGED_STATUS);
16814 if (tg3_flag(tp, TAGGED_STATUS)) {
16815 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16816 HOSTCC_MODE_CLRTICK_TXBD);
16818 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16819 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16820 tp->misc_host_ctrl);
16823 /* Preserve the APE MAC_MODE bits */
16824 if (tg3_flag(tp, ENABLE_APE))
16825 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16829 if (tg3_10_100_only_device(tp, ent))
16830 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16832 err = tg3_phy_probe(tp);
16834 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16835 /* ... but do not return immediately ... */
16840 tg3_read_fw_ver(tp);
16842 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16843 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16845 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16846 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16848 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16851 /* 5700 {AX,BX} chips have a broken status block link
16852 * change bit implementation, so we must use the
16853 * status register in those cases.
16855 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16856 tg3_flag_set(tp, USE_LINKCHG_REG);
16858 tg3_flag_clear(tp, USE_LINKCHG_REG);
16860 /* The led_ctrl is set during tg3_phy_probe, here we might
16861 * have to force the link status polling mechanism based
16862 * upon subsystem IDs.
16864 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16865 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16866 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16867 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16868 tg3_flag_set(tp, USE_LINKCHG_REG);
16871 /* For all SERDES we poll the MAC status register. */
16872 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16873 tg3_flag_set(tp, POLL_SERDES);
16875 tg3_flag_clear(tp, POLL_SERDES);
16877 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16878 tg3_flag_set(tp, POLL_CPMU_LINK);
16880 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16881 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16882 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16883 tg3_flag(tp, PCIX_MODE)) {
16884 tp->rx_offset = NET_SKB_PAD;
16885 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16886 tp->rx_copy_thresh = ~(u16)0;
16890 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16891 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16892 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16894 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16896 /* Increment the rx prod index on the rx std ring by at most
16897 * 8 for these chips to workaround hw errata.
16899 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16900 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16901 tg3_asic_rev(tp) == ASIC_REV_5755)
16902 tp->rx_std_max_post = 8;
16904 if (tg3_flag(tp, ASPM_WORKAROUND))
16905 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16906 PCIE_PWR_MGMT_L1_THRESH_MSK;
16911 static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
16913 u32 hi, lo, mac_offset;
16917 if (!eth_platform_get_mac_address(&tp->pdev->dev, addr))
16920 if (tg3_flag(tp, IS_SSB_CORE)) {
16921 err = ssb_gige_get_macaddr(tp->pdev, addr);
16922 if (!err && is_valid_ether_addr(addr))
16927 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16928 tg3_flag(tp, 5780_CLASS)) {
16929 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16931 if (tg3_nvram_lock(tp))
16932 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16934 tg3_nvram_unlock(tp);
16935 } else if (tg3_flag(tp, 5717_PLUS)) {
16936 if (tp->pci_fn & 1)
16938 if (tp->pci_fn > 1)
16939 mac_offset += 0x18c;
16940 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16943 /* First try to get it from MAC address mailbox. */
16944 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16945 if ((hi >> 16) == 0x484b) {
16946 addr[0] = (hi >> 8) & 0xff;
16947 addr[1] = (hi >> 0) & 0xff;
16949 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16950 addr[2] = (lo >> 24) & 0xff;
16951 addr[3] = (lo >> 16) & 0xff;
16952 addr[4] = (lo >> 8) & 0xff;
16953 addr[5] = (lo >> 0) & 0xff;
16955 /* Some old bootcode may report a 0 MAC address in SRAM */
16956 addr_ok = is_valid_ether_addr(addr);
16959 /* Next, try NVRAM. */
16960 if (!tg3_flag(tp, NO_NVRAM) &&
16961 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16962 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16963 memcpy(&addr[0], ((char *)&hi) + 2, 2);
16964 memcpy(&addr[2], (char *)&lo, sizeof(lo));
16966 /* Finally just fetch it out of the MAC control regs. */
16968 hi = tr32(MAC_ADDR_0_HIGH);
16969 lo = tr32(MAC_ADDR_0_LOW);
16971 addr[5] = lo & 0xff;
16972 addr[4] = (lo >> 8) & 0xff;
16973 addr[3] = (lo >> 16) & 0xff;
16974 addr[2] = (lo >> 24) & 0xff;
16975 addr[1] = hi & 0xff;
16976 addr[0] = (hi >> 8) & 0xff;
16980 if (!is_valid_ether_addr(addr))
16985 #define BOUNDARY_SINGLE_CACHELINE 1
16986 #define BOUNDARY_MULTI_CACHELINE 2
16988 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16990 int cacheline_size;
16994 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16996 cacheline_size = 1024;
16998 cacheline_size = (int) byte * 4;
17000 /* On 5703 and later chips, the boundary bits have no
17003 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17004 tg3_asic_rev(tp) != ASIC_REV_5701 &&
17005 !tg3_flag(tp, PCI_EXPRESS))
17008 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17009 goal = BOUNDARY_MULTI_CACHELINE;
17011 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17012 goal = BOUNDARY_SINGLE_CACHELINE;
17018 if (tg3_flag(tp, 57765_PLUS)) {
17019 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17026 /* PCI controllers on most RISC systems tend to disconnect
17027 * when a device tries to burst across a cache-line boundary.
17028 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17030 * Unfortunately, for PCI-E there are only limited
17031 * write-side controls for this, and thus for reads
17032 * we will still get the disconnects. We'll also waste
17033 * these PCI cycles for both read and write for chips
17034 * other than 5700 and 5701 which do not implement the
17037 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17038 switch (cacheline_size) {
17043 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17044 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17045 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17047 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17048 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17053 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17054 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17058 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17059 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17062 } else if (tg3_flag(tp, PCI_EXPRESS)) {
17063 switch (cacheline_size) {
17067 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17068 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17069 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17075 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17076 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17080 switch (cacheline_size) {
17082 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17083 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17084 DMA_RWCTRL_WRITE_BNDRY_16);
17089 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17090 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17091 DMA_RWCTRL_WRITE_BNDRY_32);
17096 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17097 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17098 DMA_RWCTRL_WRITE_BNDRY_64);
17103 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17104 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17105 DMA_RWCTRL_WRITE_BNDRY_128);
17110 val |= (DMA_RWCTRL_READ_BNDRY_256 |
17111 DMA_RWCTRL_WRITE_BNDRY_256);
17114 val |= (DMA_RWCTRL_READ_BNDRY_512 |
17115 DMA_RWCTRL_WRITE_BNDRY_512);
17119 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17120 DMA_RWCTRL_WRITE_BNDRY_1024);
17129 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17130 int size, bool to_device)
17132 struct tg3_internal_buffer_desc test_desc;
17133 u32 sram_dma_descs;
17136 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17138 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17139 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17140 tw32(RDMAC_STATUS, 0);
17141 tw32(WDMAC_STATUS, 0);
17143 tw32(BUFMGR_MODE, 0);
17144 tw32(FTQ_RESET, 0);
17146 test_desc.addr_hi = ((u64) buf_dma) >> 32;
17147 test_desc.addr_lo = buf_dma & 0xffffffff;
17148 test_desc.nic_mbuf = 0x00002100;
17149 test_desc.len = size;
17152 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17153 * the *second* time the tg3 driver was getting loaded after an
17156 * Broadcom tells me:
17157 * ...the DMA engine is connected to the GRC block and a DMA
17158 * reset may affect the GRC block in some unpredictable way...
17159 * The behavior of resets to individual blocks has not been tested.
17161 * Broadcom noted the GRC reset will also reset all sub-components.
17164 test_desc.cqid_sqid = (13 << 8) | 2;
17166 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17169 test_desc.cqid_sqid = (16 << 8) | 7;
17171 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17174 test_desc.flags = 0x00000005;
17176 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17179 val = *(((u32 *)&test_desc) + i);
17180 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17181 sram_dma_descs + (i * sizeof(u32)));
17182 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17184 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17187 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17189 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17192 for (i = 0; i < 40; i++) {
17196 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17198 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17199 if ((val & 0xffff) == sram_dma_descs) {
17210 #define TEST_BUFFER_SIZE 0x2000
17212 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17213 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17217 static int tg3_test_dma(struct tg3 *tp)
17219 dma_addr_t buf_dma;
17220 u32 *buf, saved_dma_rwctrl;
17223 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17224 &buf_dma, GFP_KERNEL);
17230 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17231 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17233 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17235 if (tg3_flag(tp, 57765_PLUS))
17238 if (tg3_flag(tp, PCI_EXPRESS)) {
17239 /* DMA read watermark not used on PCIE */
17240 tp->dma_rwctrl |= 0x00180000;
17241 } else if (!tg3_flag(tp, PCIX_MODE)) {
17242 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17243 tg3_asic_rev(tp) == ASIC_REV_5750)
17244 tp->dma_rwctrl |= 0x003f0000;
17246 tp->dma_rwctrl |= 0x003f000f;
17248 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17249 tg3_asic_rev(tp) == ASIC_REV_5704) {
17250 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17251 u32 read_water = 0x7;
17253 /* If the 5704 is behind the EPB bridge, we can
17254 * do the less restrictive ONE_DMA workaround for
17255 * better performance.
17257 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17258 tg3_asic_rev(tp) == ASIC_REV_5704)
17259 tp->dma_rwctrl |= 0x8000;
17260 else if (ccval == 0x6 || ccval == 0x7)
17261 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17263 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17265 /* Set bit 23 to enable PCIX hw bug fix */
17267 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17268 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17270 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17271 /* 5780 always in PCIX mode */
17272 tp->dma_rwctrl |= 0x00144000;
17273 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17274 /* 5714 always in PCIX mode */
17275 tp->dma_rwctrl |= 0x00148000;
17277 tp->dma_rwctrl |= 0x001b000f;
17280 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17281 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17283 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17284 tg3_asic_rev(tp) == ASIC_REV_5704)
17285 tp->dma_rwctrl &= 0xfffffff0;
17287 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17288 tg3_asic_rev(tp) == ASIC_REV_5701) {
17289 /* Remove this if it causes problems for some boards. */
17290 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17292 /* On 5700/5701 chips, we need to set this bit.
17293 * Otherwise the chip will issue cacheline transactions
17294 * to streamable DMA memory with not all the byte
17295 * enables turned on. This is an error on several
17296 * RISC PCI controllers, in particular sparc64.
17298 * On 5703/5704 chips, this bit has been reassigned
17299 * a different meaning. In particular, it is used
17300 * on those chips to enable a PCI-X workaround.
17302 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17305 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17308 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17309 tg3_asic_rev(tp) != ASIC_REV_5701)
17312 /* It is best to perform DMA test with maximum write burst size
17313 * to expose the 5700/5701 write DMA bug.
17315 saved_dma_rwctrl = tp->dma_rwctrl;
17316 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17317 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17322 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17325 /* Send the buffer to the chip. */
17326 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17328 dev_err(&tp->pdev->dev,
17329 "%s: Buffer write failed. err = %d\n",
17334 /* Now read it back. */
17335 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17337 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17338 "err = %d\n", __func__, ret);
17343 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17347 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17348 DMA_RWCTRL_WRITE_BNDRY_16) {
17349 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17350 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17351 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17354 dev_err(&tp->pdev->dev,
17355 "%s: Buffer corrupted on read back! "
17356 "(%d != %d)\n", __func__, p[i], i);
17362 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17368 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17369 DMA_RWCTRL_WRITE_BNDRY_16) {
17370 /* DMA test passed without adjusting DMA boundary,
17371 * now look for chipsets that are known to expose the
17372 * DMA bug without failing the test.
17374 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17375 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17376 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17378 /* Safe to use the calculated DMA boundary. */
17379 tp->dma_rwctrl = saved_dma_rwctrl;
17382 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17386 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17391 static void tg3_init_bufmgr_config(struct tg3 *tp)
17393 if (tg3_flag(tp, 57765_PLUS)) {
17394 tp->bufmgr_config.mbuf_read_dma_low_water =
17395 DEFAULT_MB_RDMA_LOW_WATER_5705;
17396 tp->bufmgr_config.mbuf_mac_rx_low_water =
17397 DEFAULT_MB_MACRX_LOW_WATER_57765;
17398 tp->bufmgr_config.mbuf_high_water =
17399 DEFAULT_MB_HIGH_WATER_57765;
17401 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17402 DEFAULT_MB_RDMA_LOW_WATER_5705;
17403 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17404 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17405 tp->bufmgr_config.mbuf_high_water_jumbo =
17406 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17407 } else if (tg3_flag(tp, 5705_PLUS)) {
17408 tp->bufmgr_config.mbuf_read_dma_low_water =
17409 DEFAULT_MB_RDMA_LOW_WATER_5705;
17410 tp->bufmgr_config.mbuf_mac_rx_low_water =
17411 DEFAULT_MB_MACRX_LOW_WATER_5705;
17412 tp->bufmgr_config.mbuf_high_water =
17413 DEFAULT_MB_HIGH_WATER_5705;
17414 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17415 tp->bufmgr_config.mbuf_mac_rx_low_water =
17416 DEFAULT_MB_MACRX_LOW_WATER_5906;
17417 tp->bufmgr_config.mbuf_high_water =
17418 DEFAULT_MB_HIGH_WATER_5906;
17421 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17422 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17423 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17424 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17425 tp->bufmgr_config.mbuf_high_water_jumbo =
17426 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17428 tp->bufmgr_config.mbuf_read_dma_low_water =
17429 DEFAULT_MB_RDMA_LOW_WATER;
17430 tp->bufmgr_config.mbuf_mac_rx_low_water =
17431 DEFAULT_MB_MACRX_LOW_WATER;
17432 tp->bufmgr_config.mbuf_high_water =
17433 DEFAULT_MB_HIGH_WATER;
17435 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17436 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17437 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17438 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17439 tp->bufmgr_config.mbuf_high_water_jumbo =
17440 DEFAULT_MB_HIGH_WATER_JUMBO;
17443 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17444 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17447 static char *tg3_phy_string(struct tg3 *tp)
17449 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17450 case TG3_PHY_ID_BCM5400: return "5400";
17451 case TG3_PHY_ID_BCM5401: return "5401";
17452 case TG3_PHY_ID_BCM5411: return "5411";
17453 case TG3_PHY_ID_BCM5701: return "5701";
17454 case TG3_PHY_ID_BCM5703: return "5703";
17455 case TG3_PHY_ID_BCM5704: return "5704";
17456 case TG3_PHY_ID_BCM5705: return "5705";
17457 case TG3_PHY_ID_BCM5750: return "5750";
17458 case TG3_PHY_ID_BCM5752: return "5752";
17459 case TG3_PHY_ID_BCM5714: return "5714";
17460 case TG3_PHY_ID_BCM5780: return "5780";
17461 case TG3_PHY_ID_BCM5755: return "5755";
17462 case TG3_PHY_ID_BCM5787: return "5787";
17463 case TG3_PHY_ID_BCM5784: return "5784";
17464 case TG3_PHY_ID_BCM5756: return "5722/5756";
17465 case TG3_PHY_ID_BCM5906: return "5906";
17466 case TG3_PHY_ID_BCM5761: return "5761";
17467 case TG3_PHY_ID_BCM5718C: return "5718C";
17468 case TG3_PHY_ID_BCM5718S: return "5718S";
17469 case TG3_PHY_ID_BCM57765: return "57765";
17470 case TG3_PHY_ID_BCM5719C: return "5719C";
17471 case TG3_PHY_ID_BCM5720C: return "5720C";
17472 case TG3_PHY_ID_BCM5762: return "5762C";
17473 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17474 case 0: return "serdes";
17475 default: return "unknown";
17479 static char *tg3_bus_string(struct tg3 *tp, char *str)
17481 if (tg3_flag(tp, PCI_EXPRESS)) {
17482 strcpy(str, "PCI Express");
17484 } else if (tg3_flag(tp, PCIX_MODE)) {
17485 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17487 strcpy(str, "PCIX:");
17489 if ((clock_ctrl == 7) ||
17490 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17491 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17492 strcat(str, "133MHz");
17493 else if (clock_ctrl == 0)
17494 strcat(str, "33MHz");
17495 else if (clock_ctrl == 2)
17496 strcat(str, "50MHz");
17497 else if (clock_ctrl == 4)
17498 strcat(str, "66MHz");
17499 else if (clock_ctrl == 6)
17500 strcat(str, "100MHz");
17502 strcpy(str, "PCI:");
17503 if (tg3_flag(tp, PCI_HIGH_SPEED))
17504 strcat(str, "66MHz");
17506 strcat(str, "33MHz");
17508 if (tg3_flag(tp, PCI_32BIT))
17509 strcat(str, ":32-bit");
17511 strcat(str, ":64-bit");
17515 static void tg3_init_coal(struct tg3 *tp)
17517 struct ethtool_coalesce *ec = &tp->coal;
17519 memset(ec, 0, sizeof(*ec));
17520 ec->cmd = ETHTOOL_GCOALESCE;
17521 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17522 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17523 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17524 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17525 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17526 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17527 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17528 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17529 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17531 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17532 HOSTCC_MODE_CLRTICK_TXBD)) {
17533 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17534 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17535 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17536 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17539 if (tg3_flag(tp, 5705_PLUS)) {
17540 ec->rx_coalesce_usecs_irq = 0;
17541 ec->tx_coalesce_usecs_irq = 0;
17542 ec->stats_block_coalesce_usecs = 0;
17546 static int tg3_init_one(struct pci_dev *pdev,
17547 const struct pci_device_id *ent)
17549 struct net_device *dev;
17552 u32 sndmbx, rcvmbx, intmbx;
17554 u64 dma_mask, persist_dma_mask;
17555 netdev_features_t features = 0;
17556 u8 addr[ETH_ALEN] __aligned(2);
17558 err = pci_enable_device(pdev);
17560 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17564 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17566 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17567 goto err_out_disable_pdev;
17570 pci_set_master(pdev);
17572 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17575 goto err_out_free_res;
17578 SET_NETDEV_DEV(dev, &pdev->dev);
17580 tp = netdev_priv(dev);
17583 tp->rx_mode = TG3_DEF_RX_MODE;
17584 tp->tx_mode = TG3_DEF_TX_MODE;
17586 tp->pcierr_recovery = false;
17589 tp->msg_enable = tg3_debug;
17591 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17593 if (pdev_is_ssb_gige_core(pdev)) {
17594 tg3_flag_set(tp, IS_SSB_CORE);
17595 if (ssb_gige_must_flush_posted_writes(pdev))
17596 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17597 if (ssb_gige_one_dma_at_once(pdev))
17598 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17599 if (ssb_gige_have_roboswitch(pdev)) {
17600 tg3_flag_set(tp, USE_PHYLIB);
17601 tg3_flag_set(tp, ROBOSWITCH);
17603 if (ssb_gige_is_rgmii(pdev))
17604 tg3_flag_set(tp, RGMII_MODE);
17607 /* The word/byte swap controls here control register access byte
17608 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17611 tp->misc_host_ctrl =
17612 MISC_HOST_CTRL_MASK_PCI_INT |
17613 MISC_HOST_CTRL_WORD_SWAP |
17614 MISC_HOST_CTRL_INDIR_ACCESS |
17615 MISC_HOST_CTRL_PCISTATE_RW;
17617 /* The NONFRM (non-frame) byte/word swap controls take effect
17618 * on descriptor entries, anything which isn't packet data.
17620 * The StrongARM chips on the board (one for tx, one for rx)
17621 * are running in big-endian mode.
17623 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17624 GRC_MODE_WSWAP_NONFRM_DATA);
17625 #ifdef __BIG_ENDIAN
17626 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17628 spin_lock_init(&tp->lock);
17629 spin_lock_init(&tp->indirect_lock);
17630 INIT_WORK(&tp->reset_task, tg3_reset_task);
17632 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17634 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17636 goto err_out_free_dev;
17639 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17640 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17641 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17642 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17643 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17644 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17645 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17646 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17647 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17648 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17649 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17650 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17651 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17652 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17653 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17654 tg3_flag_set(tp, ENABLE_APE);
17655 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17656 if (!tp->aperegs) {
17657 dev_err(&pdev->dev,
17658 "Cannot map APE registers, aborting\n");
17660 goto err_out_iounmap;
17664 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17665 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17667 dev->ethtool_ops = &tg3_ethtool_ops;
17668 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17669 dev->netdev_ops = &tg3_netdev_ops;
17670 dev->irq = pdev->irq;
17672 err = tg3_get_invariants(tp, ent);
17674 dev_err(&pdev->dev,
17675 "Problem fetching invariants of chip, aborting\n");
17676 goto err_out_apeunmap;
17679 /* The EPB bridge inside 5714, 5715, and 5780 and any
17680 * device behind the EPB cannot support DMA addresses > 40-bit.
17681 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17682 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17683 * do DMA address check in tg3_start_xmit().
17685 if (tg3_flag(tp, IS_5788))
17686 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17687 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17688 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17689 #ifdef CONFIG_HIGHMEM
17690 dma_mask = DMA_BIT_MASK(64);
17693 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17695 /* Configure DMA attributes. */
17696 if (dma_mask > DMA_BIT_MASK(32)) {
17697 err = dma_set_mask(&pdev->dev, dma_mask);
17699 features |= NETIF_F_HIGHDMA;
17700 err = dma_set_coherent_mask(&pdev->dev,
17703 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17704 "DMA for consistent allocations\n");
17705 goto err_out_apeunmap;
17709 if (err || dma_mask == DMA_BIT_MASK(32)) {
17710 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
17712 dev_err(&pdev->dev,
17713 "No usable DMA configuration, aborting\n");
17714 goto err_out_apeunmap;
17718 tg3_init_bufmgr_config(tp);
17720 /* 5700 B0 chips do not support checksumming correctly due
17721 * to hardware bugs.
17723 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17724 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17726 if (tg3_flag(tp, 5755_PLUS))
17727 features |= NETIF_F_IPV6_CSUM;
17730 /* TSO is on by default on chips that support hardware TSO.
17731 * Firmware TSO on older chips gives lower performance, so it
17732 * is off by default, but can be enabled using ethtool.
17734 if ((tg3_flag(tp, HW_TSO_1) ||
17735 tg3_flag(tp, HW_TSO_2) ||
17736 tg3_flag(tp, HW_TSO_3)) &&
17737 (features & NETIF_F_IP_CSUM))
17738 features |= NETIF_F_TSO;
17739 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17740 if (features & NETIF_F_IPV6_CSUM)
17741 features |= NETIF_F_TSO6;
17742 if (tg3_flag(tp, HW_TSO_3) ||
17743 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17744 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17745 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17746 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17747 tg3_asic_rev(tp) == ASIC_REV_57780)
17748 features |= NETIF_F_TSO_ECN;
17751 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17752 NETIF_F_HW_VLAN_CTAG_RX;
17753 dev->vlan_features |= features;
17756 * Add loopback capability only for a subset of devices that support
17757 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17758 * loopback for the remaining devices.
17760 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17761 !tg3_flag(tp, CPMU_PRESENT))
17762 /* Add the loopback capability */
17763 features |= NETIF_F_LOOPBACK;
17765 dev->hw_features |= features;
17766 dev->priv_flags |= IFF_UNICAST_FLT;
17768 /* MTU range: 60 - 9000 or 1500, depending on hardware */
17769 dev->min_mtu = TG3_MIN_MTU;
17770 dev->max_mtu = TG3_MAX_MTU(tp);
17772 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17773 !tg3_flag(tp, TSO_CAPABLE) &&
17774 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17775 tg3_flag_set(tp, MAX_RXPEND_64);
17776 tp->rx_pending = 63;
17779 err = tg3_get_device_address(tp, addr);
17781 dev_err(&pdev->dev,
17782 "Could not obtain valid ethernet address, aborting\n");
17783 goto err_out_apeunmap;
17785 eth_hw_addr_set(dev, addr);
17787 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17788 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17789 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17790 for (i = 0; i < tp->irq_max; i++) {
17791 struct tg3_napi *tnapi = &tp->napi[i];
17794 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17796 tnapi->int_mbox = intmbx;
17799 tnapi->consmbox = rcvmbx;
17800 tnapi->prodmbox = sndmbx;
17803 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17805 tnapi->coal_now = HOSTCC_MODE_NOW;
17807 if (!tg3_flag(tp, SUPPORT_MSIX))
17811 * If we support MSIX, we'll be using RSS. If we're using
17812 * RSS, the first vector only handles link interrupts and the
17813 * remaining vectors handle rx and tx interrupts. Reuse the
17814 * mailbox values for the next iteration. The values we setup
17815 * above are still useful for the single vectored mode.
17829 * Reset chip in case UNDI or EFI driver did not shutdown
17830 * DMA self test will enable WDMAC and we'll see (spurious)
17831 * pending DMA on the PCI bus at that point.
17833 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17834 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17835 tg3_full_lock(tp, 0);
17836 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17837 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17838 tg3_full_unlock(tp);
17841 err = tg3_test_dma(tp);
17843 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17844 goto err_out_apeunmap;
17849 pci_set_drvdata(pdev, dev);
17851 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17852 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17853 tg3_asic_rev(tp) == ASIC_REV_5762)
17854 tg3_flag_set(tp, PTP_CAPABLE);
17856 tg3_timer_init(tp);
17858 tg3_carrier_off(tp);
17860 err = register_netdev(dev);
17862 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17863 goto err_out_apeunmap;
17866 if (tg3_flag(tp, PTP_CAPABLE)) {
17868 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17870 if (IS_ERR(tp->ptp_clock))
17871 tp->ptp_clock = NULL;
17874 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17875 tp->board_part_number,
17876 tg3_chip_rev_id(tp),
17877 tg3_bus_string(tp, str),
17880 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17883 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17884 ethtype = "10/100Base-TX";
17885 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17886 ethtype = "1000Base-SX";
17888 ethtype = "10/100/1000Base-T";
17890 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17891 "(WireSpeed[%d], EEE[%d])\n",
17892 tg3_phy_string(tp), ethtype,
17893 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17894 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17897 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17898 (dev->features & NETIF_F_RXCSUM) != 0,
17899 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17900 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17901 tg3_flag(tp, ENABLE_ASF) != 0,
17902 tg3_flag(tp, TSO_CAPABLE) != 0);
17903 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17905 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17906 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17908 pci_save_state(pdev);
17914 iounmap(tp->aperegs);
17915 tp->aperegs = NULL;
17928 pci_release_regions(pdev);
17930 err_out_disable_pdev:
17931 if (pci_is_enabled(pdev))
17932 pci_disable_device(pdev);
17936 static void tg3_remove_one(struct pci_dev *pdev)
17938 struct net_device *dev = pci_get_drvdata(pdev);
17941 struct tg3 *tp = netdev_priv(dev);
17945 release_firmware(tp->fw);
17947 tg3_reset_task_cancel(tp);
17949 if (tg3_flag(tp, USE_PHYLIB)) {
17954 unregister_netdev(dev);
17956 iounmap(tp->aperegs);
17957 tp->aperegs = NULL;
17964 pci_release_regions(pdev);
17965 pci_disable_device(pdev);
17969 #ifdef CONFIG_PM_SLEEP
17970 static int tg3_suspend(struct device *device)
17972 struct net_device *dev = dev_get_drvdata(device);
17973 struct tg3 *tp = netdev_priv(dev);
17978 if (!netif_running(dev))
17981 tg3_reset_task_cancel(tp);
17983 tg3_netif_stop(tp);
17985 tg3_timer_stop(tp);
17987 tg3_full_lock(tp, 1);
17988 tg3_disable_ints(tp);
17989 tg3_full_unlock(tp);
17991 netif_device_detach(dev);
17993 tg3_full_lock(tp, 0);
17994 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17995 tg3_flag_clear(tp, INIT_COMPLETE);
17996 tg3_full_unlock(tp);
17998 err = tg3_power_down_prepare(tp);
18002 tg3_full_lock(tp, 0);
18004 tg3_flag_set(tp, INIT_COMPLETE);
18005 err2 = tg3_restart_hw(tp, true);
18009 tg3_timer_start(tp);
18011 netif_device_attach(dev);
18012 tg3_netif_start(tp);
18015 tg3_full_unlock(tp);
18026 static int tg3_resume(struct device *device)
18028 struct net_device *dev = dev_get_drvdata(device);
18029 struct tg3 *tp = netdev_priv(dev);
18034 if (!netif_running(dev))
18037 netif_device_attach(dev);
18039 tg3_full_lock(tp, 0);
18041 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18043 tg3_flag_set(tp, INIT_COMPLETE);
18044 err = tg3_restart_hw(tp,
18045 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18049 tg3_timer_start(tp);
18051 tg3_netif_start(tp);
18054 tg3_full_unlock(tp);
18063 #endif /* CONFIG_PM_SLEEP */
18065 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18067 static void tg3_shutdown(struct pci_dev *pdev)
18069 struct net_device *dev = pci_get_drvdata(pdev);
18070 struct tg3 *tp = netdev_priv(dev);
18072 tg3_reset_task_cancel(tp);
18076 netif_device_detach(dev);
18078 if (netif_running(dev))
18081 tg3_power_down(tp);
18085 pci_disable_device(pdev);
18089 * tg3_io_error_detected - called when PCI error is detected
18090 * @pdev: Pointer to PCI device
18091 * @state: The current pci connection state
18093 * This function is called after a PCI bus error affecting
18094 * this device has been detected.
18096 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18097 pci_channel_state_t state)
18099 struct net_device *netdev = pci_get_drvdata(pdev);
18100 struct tg3 *tp = netdev_priv(netdev);
18101 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18103 netdev_info(netdev, "PCI I/O error detected\n");
18105 /* Want to make sure that the reset task doesn't run */
18106 tg3_reset_task_cancel(tp);
18110 /* Could be second call or maybe we don't have netdev yet */
18111 if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18114 /* We needn't recover from permanent error */
18115 if (state == pci_channel_io_frozen)
18116 tp->pcierr_recovery = true;
18120 tg3_netif_stop(tp);
18122 tg3_timer_stop(tp);
18124 netif_device_detach(netdev);
18126 /* Clean up software state, even if MMIO is blocked */
18127 tg3_full_lock(tp, 0);
18128 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18129 tg3_full_unlock(tp);
18132 if (state == pci_channel_io_perm_failure) {
18134 tg3_napi_enable(tp);
18137 err = PCI_ERS_RESULT_DISCONNECT;
18139 pci_disable_device(pdev);
18148 * tg3_io_slot_reset - called after the pci bus has been reset.
18149 * @pdev: Pointer to PCI device
18151 * Restart the card from scratch, as if from a cold-boot.
18152 * At this point, the card has exprienced a hard reset,
18153 * followed by fixups by BIOS, and has its config space
18154 * set up identically to what it was at cold boot.
18156 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18158 struct net_device *netdev = pci_get_drvdata(pdev);
18159 struct tg3 *tp = netdev_priv(netdev);
18160 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18165 if (pci_enable_device(pdev)) {
18166 dev_err(&pdev->dev,
18167 "Cannot re-enable PCI device after reset.\n");
18171 pci_set_master(pdev);
18172 pci_restore_state(pdev);
18173 pci_save_state(pdev);
18175 if (!netdev || !netif_running(netdev)) {
18176 rc = PCI_ERS_RESULT_RECOVERED;
18180 err = tg3_power_up(tp);
18184 rc = PCI_ERS_RESULT_RECOVERED;
18187 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18188 tg3_napi_enable(tp);
18197 * tg3_io_resume - called when traffic can start flowing again.
18198 * @pdev: Pointer to PCI device
18200 * This callback is called when the error recovery driver tells
18201 * us that its OK to resume normal operation.
18203 static void tg3_io_resume(struct pci_dev *pdev)
18205 struct net_device *netdev = pci_get_drvdata(pdev);
18206 struct tg3 *tp = netdev_priv(netdev);
18211 if (!netdev || !netif_running(netdev))
18214 tg3_full_lock(tp, 0);
18215 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18216 tg3_flag_set(tp, INIT_COMPLETE);
18217 err = tg3_restart_hw(tp, true);
18219 tg3_full_unlock(tp);
18220 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18224 netif_device_attach(netdev);
18226 tg3_timer_start(tp);
18228 tg3_netif_start(tp);
18230 tg3_full_unlock(tp);
18235 tp->pcierr_recovery = false;
18239 static const struct pci_error_handlers tg3_err_handler = {
18240 .error_detected = tg3_io_error_detected,
18241 .slot_reset = tg3_io_slot_reset,
18242 .resume = tg3_io_resume
18245 static struct pci_driver tg3_driver = {
18246 .name = DRV_MODULE_NAME,
18247 .id_table = tg3_pci_tbl,
18248 .probe = tg3_init_one,
18249 .remove = tg3_remove_one,
18250 .err_handler = &tg3_err_handler,
18251 .driver.pm = &tg3_pm_ops,
18252 .shutdown = tg3_shutdown,
18255 module_pci_driver(tg3_driver);