2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2014 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/interrupt.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
51 #include <net/checksum.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
62 #include <asm/idprom.h>
71 /* Functions & macros to verify TG3_FLAGS types */
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
75 return test_bit(flag, bits);
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
85 clear_bit(flag, bits);
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define DRV_MODULE_NAME "tg3"
97 #define TG3_MIN_NUM 137
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "May 11, 2014"
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
124 #define TG3_TX_TIMEOUT (5 * HZ)
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
164 #define TG3_DMA_BYTE_ENAB 64
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
209 #define TG3_RAW_IP_ALIGN 2
211 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
212 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
214 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
215 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
217 #define FIRMWARE_TG3 "tigon/tg3.bin"
218 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
219 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
220 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
222 static char version[] =
223 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
225 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
226 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
227 MODULE_LICENSE("GPL");
228 MODULE_VERSION(DRV_MODULE_VERSION);
229 MODULE_FIRMWARE(FIRMWARE_TG3);
230 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
231 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
233 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
234 module_param(tg3_debug, int, 0);
235 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
237 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
238 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
240 static const struct pci_device_id tg3_pci_tbl[] = {
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
263 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264 TG3_DRV_DATA_FLAG_5705_10_100},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
267 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
268 TG3_DRV_DATA_FLAG_5705_10_100},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
275 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
281 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
289 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
290 PCI_VENDOR_ID_LENOVO,
291 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
295 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
313 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
318 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
319 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
323 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
333 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
335 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
347 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
348 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
349 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
350 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
351 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
352 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
353 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
354 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
355 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
359 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
361 static const struct {
362 const char string[ETH_GSTRING_LEN];
363 } ethtool_stats_keys[] = {
366 { "rx_ucast_packets" },
367 { "rx_mcast_packets" },
368 { "rx_bcast_packets" },
370 { "rx_align_errors" },
371 { "rx_xon_pause_rcvd" },
372 { "rx_xoff_pause_rcvd" },
373 { "rx_mac_ctrl_rcvd" },
374 { "rx_xoff_entered" },
375 { "rx_frame_too_long_errors" },
377 { "rx_undersize_packets" },
378 { "rx_in_length_errors" },
379 { "rx_out_length_errors" },
380 { "rx_64_or_less_octet_packets" },
381 { "rx_65_to_127_octet_packets" },
382 { "rx_128_to_255_octet_packets" },
383 { "rx_256_to_511_octet_packets" },
384 { "rx_512_to_1023_octet_packets" },
385 { "rx_1024_to_1522_octet_packets" },
386 { "rx_1523_to_2047_octet_packets" },
387 { "rx_2048_to_4095_octet_packets" },
388 { "rx_4096_to_8191_octet_packets" },
389 { "rx_8192_to_9022_octet_packets" },
396 { "tx_flow_control" },
398 { "tx_single_collisions" },
399 { "tx_mult_collisions" },
401 { "tx_excessive_collisions" },
402 { "tx_late_collisions" },
403 { "tx_collide_2times" },
404 { "tx_collide_3times" },
405 { "tx_collide_4times" },
406 { "tx_collide_5times" },
407 { "tx_collide_6times" },
408 { "tx_collide_7times" },
409 { "tx_collide_8times" },
410 { "tx_collide_9times" },
411 { "tx_collide_10times" },
412 { "tx_collide_11times" },
413 { "tx_collide_12times" },
414 { "tx_collide_13times" },
415 { "tx_collide_14times" },
416 { "tx_collide_15times" },
417 { "tx_ucast_packets" },
418 { "tx_mcast_packets" },
419 { "tx_bcast_packets" },
420 { "tx_carrier_sense_errors" },
424 { "dma_writeq_full" },
425 { "dma_write_prioq_full" },
429 { "rx_threshold_hit" },
431 { "dma_readq_full" },
432 { "dma_read_prioq_full" },
433 { "tx_comp_queue_full" },
435 { "ring_set_send_prod_index" },
436 { "ring_status_update" },
438 { "nic_avoided_irqs" },
439 { "nic_tx_threshold_hit" },
441 { "mbuf_lwm_thresh_hit" },
444 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
445 #define TG3_NVRAM_TEST 0
446 #define TG3_LINK_TEST 1
447 #define TG3_REGISTER_TEST 2
448 #define TG3_MEMORY_TEST 3
449 #define TG3_MAC_LOOPB_TEST 4
450 #define TG3_PHY_LOOPB_TEST 5
451 #define TG3_EXT_LOOPB_TEST 6
452 #define TG3_INTERRUPT_TEST 7
455 static const struct {
456 const char string[ETH_GSTRING_LEN];
457 } ethtool_test_keys[] = {
458 [TG3_NVRAM_TEST] = { "nvram test (online) " },
459 [TG3_LINK_TEST] = { "link test (online) " },
460 [TG3_REGISTER_TEST] = { "register test (offline)" },
461 [TG3_MEMORY_TEST] = { "memory test (offline)" },
462 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
463 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
464 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
465 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
468 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
471 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
473 writel(val, tp->regs + off);
476 static u32 tg3_read32(struct tg3 *tp, u32 off)
478 return readl(tp->regs + off);
481 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
483 writel(val, tp->aperegs + off);
486 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
488 return readl(tp->aperegs + off);
491 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
495 spin_lock_irqsave(&tp->indirect_lock, flags);
496 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
497 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
498 spin_unlock_irqrestore(&tp->indirect_lock, flags);
501 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
503 writel(val, tp->regs + off);
504 readl(tp->regs + off);
507 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
512 spin_lock_irqsave(&tp->indirect_lock, flags);
513 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
514 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
515 spin_unlock_irqrestore(&tp->indirect_lock, flags);
519 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
523 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
524 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
525 TG3_64BIT_REG_LOW, val);
528 if (off == TG3_RX_STD_PROD_IDX_REG) {
529 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
530 TG3_64BIT_REG_LOW, val);
534 spin_lock_irqsave(&tp->indirect_lock, flags);
535 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
536 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
537 spin_unlock_irqrestore(&tp->indirect_lock, flags);
539 /* In indirect mode when disabling interrupts, we also need
540 * to clear the interrupt bit in the GRC local ctrl register.
542 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
544 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
545 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
549 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
554 spin_lock_irqsave(&tp->indirect_lock, flags);
555 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
556 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
557 spin_unlock_irqrestore(&tp->indirect_lock, flags);
561 /* usec_wait specifies the wait time in usec when writing to certain registers
562 * where it is unsafe to read back the register without some delay.
563 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
564 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
566 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
568 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
569 /* Non-posted methods */
570 tp->write32(tp, off, val);
573 tg3_write32(tp, off, val);
578 /* Wait again after the read for the posted method to guarantee that
579 * the wait time is met.
585 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
587 tp->write32_mbox(tp, off, val);
588 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
589 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
590 !tg3_flag(tp, ICH_WORKAROUND)))
591 tp->read32_mbox(tp, off);
594 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
596 void __iomem *mbox = tp->regs + off;
598 if (tg3_flag(tp, TXD_MBOX_HWBUG))
600 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
601 tg3_flag(tp, FLUSH_POSTED_WRITES))
605 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
607 return readl(tp->regs + off + GRCMBOX_BASE);
610 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
612 writel(val, tp->regs + off + GRCMBOX_BASE);
615 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
616 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
617 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
618 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
619 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
621 #define tw32(reg, val) tp->write32(tp, reg, val)
622 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
623 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
624 #define tr32(reg) tp->read32(tp, reg)
626 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
630 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
631 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
634 spin_lock_irqsave(&tp->indirect_lock, flags);
635 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
636 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
637 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
639 /* Always leave this as zero. */
640 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
642 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
643 tw32_f(TG3PCI_MEM_WIN_DATA, val);
645 /* Always leave this as zero. */
646 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
648 spin_unlock_irqrestore(&tp->indirect_lock, flags);
651 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
655 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
656 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
661 spin_lock_irqsave(&tp->indirect_lock, flags);
662 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
663 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
664 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
666 /* Always leave this as zero. */
667 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
669 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
670 *val = tr32(TG3PCI_MEM_WIN_DATA);
672 /* Always leave this as zero. */
673 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
675 spin_unlock_irqrestore(&tp->indirect_lock, flags);
678 static void tg3_ape_lock_init(struct tg3 *tp)
683 if (tg3_asic_rev(tp) == ASIC_REV_5761)
684 regbase = TG3_APE_LOCK_GRANT;
686 regbase = TG3_APE_PER_LOCK_GRANT;
688 /* Make sure the driver hasn't any stale locks. */
689 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
691 case TG3_APE_LOCK_PHY0:
692 case TG3_APE_LOCK_PHY1:
693 case TG3_APE_LOCK_PHY2:
694 case TG3_APE_LOCK_PHY3:
695 bit = APE_LOCK_GRANT_DRIVER;
699 bit = APE_LOCK_GRANT_DRIVER;
701 bit = 1 << tp->pci_fn;
703 tg3_ape_write32(tp, regbase + 4 * i, bit);
708 static int tg3_ape_lock(struct tg3 *tp, int locknum)
712 u32 status, req, gnt, bit;
714 if (!tg3_flag(tp, ENABLE_APE))
718 case TG3_APE_LOCK_GPIO:
719 if (tg3_asic_rev(tp) == ASIC_REV_5761)
721 case TG3_APE_LOCK_GRC:
722 case TG3_APE_LOCK_MEM:
724 bit = APE_LOCK_REQ_DRIVER;
726 bit = 1 << tp->pci_fn;
728 case TG3_APE_LOCK_PHY0:
729 case TG3_APE_LOCK_PHY1:
730 case TG3_APE_LOCK_PHY2:
731 case TG3_APE_LOCK_PHY3:
732 bit = APE_LOCK_REQ_DRIVER;
738 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
739 req = TG3_APE_LOCK_REQ;
740 gnt = TG3_APE_LOCK_GRANT;
742 req = TG3_APE_PER_LOCK_REQ;
743 gnt = TG3_APE_PER_LOCK_GRANT;
748 tg3_ape_write32(tp, req + off, bit);
750 /* Wait for up to 1 millisecond to acquire lock. */
751 for (i = 0; i < 100; i++) {
752 status = tg3_ape_read32(tp, gnt + off);
755 if (pci_channel_offline(tp->pdev))
762 /* Revoke the lock request. */
763 tg3_ape_write32(tp, gnt + off, bit);
770 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
774 if (!tg3_flag(tp, ENABLE_APE))
778 case TG3_APE_LOCK_GPIO:
779 if (tg3_asic_rev(tp) == ASIC_REV_5761)
781 case TG3_APE_LOCK_GRC:
782 case TG3_APE_LOCK_MEM:
784 bit = APE_LOCK_GRANT_DRIVER;
786 bit = 1 << tp->pci_fn;
788 case TG3_APE_LOCK_PHY0:
789 case TG3_APE_LOCK_PHY1:
790 case TG3_APE_LOCK_PHY2:
791 case TG3_APE_LOCK_PHY3:
792 bit = APE_LOCK_GRANT_DRIVER;
798 if (tg3_asic_rev(tp) == ASIC_REV_5761)
799 gnt = TG3_APE_LOCK_GRANT;
801 gnt = TG3_APE_PER_LOCK_GRANT;
803 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
806 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
811 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
814 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
815 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
818 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
821 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
824 return timeout_us ? 0 : -EBUSY;
827 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
831 for (i = 0; i < timeout_us / 10; i++) {
832 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
834 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
840 return i == timeout_us / 10;
843 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
847 u32 i, bufoff, msgoff, maxlen, apedata;
849 if (!tg3_flag(tp, APE_HAS_NCSI))
852 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
853 if (apedata != APE_SEG_SIG_MAGIC)
856 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
857 if (!(apedata & APE_FW_STATUS_READY))
860 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
862 msgoff = bufoff + 2 * sizeof(u32);
863 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
868 /* Cap xfer sizes to scratchpad limits. */
869 length = (len > maxlen) ? maxlen : len;
872 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
873 if (!(apedata & APE_FW_STATUS_READY))
876 /* Wait for up to 1 msec for APE to service previous event. */
877 err = tg3_ape_event_lock(tp, 1000);
881 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
882 APE_EVENT_STATUS_SCRTCHPD_READ |
883 APE_EVENT_STATUS_EVENT_PENDING;
884 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
886 tg3_ape_write32(tp, bufoff, base_off);
887 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
889 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
890 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
894 if (tg3_ape_wait_for_event(tp, 30000))
897 for (i = 0; length; i += 4, length -= 4) {
898 u32 val = tg3_ape_read32(tp, msgoff + i);
899 memcpy(data, &val, sizeof(u32));
907 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
912 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
913 if (apedata != APE_SEG_SIG_MAGIC)
916 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
917 if (!(apedata & APE_FW_STATUS_READY))
920 /* Wait for up to 1 millisecond for APE to service previous event. */
921 err = tg3_ape_event_lock(tp, 1000);
925 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
926 event | APE_EVENT_STATUS_EVENT_PENDING);
928 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
929 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
934 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
939 if (!tg3_flag(tp, ENABLE_APE))
943 case RESET_KIND_INIT:
944 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
945 APE_HOST_SEG_SIG_MAGIC);
946 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
947 APE_HOST_SEG_LEN_MAGIC);
948 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
949 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
950 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
951 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
952 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
953 APE_HOST_BEHAV_NO_PHYLOCK);
954 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
955 TG3_APE_HOST_DRVR_STATE_START);
957 event = APE_EVENT_STATUS_STATE_START;
959 case RESET_KIND_SHUTDOWN:
960 /* With the interface we are currently using,
961 * APE does not track driver state. Wiping
962 * out the HOST SEGMENT SIGNATURE forces
963 * the APE to assume OS absent status.
965 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
967 if (device_may_wakeup(&tp->pdev->dev) &&
968 tg3_flag(tp, WOL_ENABLE)) {
969 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
970 TG3_APE_HOST_WOL_SPEED_AUTO);
971 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
973 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
975 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
977 event = APE_EVENT_STATUS_STATE_UNLOAD;
983 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
985 tg3_ape_send_event(tp, event);
988 static void tg3_disable_ints(struct tg3 *tp)
992 tw32(TG3PCI_MISC_HOST_CTRL,
993 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
994 for (i = 0; i < tp->irq_max; i++)
995 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
998 static void tg3_enable_ints(struct tg3 *tp)
1005 tw32(TG3PCI_MISC_HOST_CTRL,
1006 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1008 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1009 for (i = 0; i < tp->irq_cnt; i++) {
1010 struct tg3_napi *tnapi = &tp->napi[i];
1012 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1013 if (tg3_flag(tp, 1SHOT_MSI))
1014 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1016 tp->coal_now |= tnapi->coal_now;
1019 /* Force an initial interrupt */
1020 if (!tg3_flag(tp, TAGGED_STATUS) &&
1021 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1022 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1024 tw32(HOSTCC_MODE, tp->coal_now);
1026 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1029 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1031 struct tg3 *tp = tnapi->tp;
1032 struct tg3_hw_status *sblk = tnapi->hw_status;
1033 unsigned int work_exists = 0;
1035 /* check for phy events */
1036 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1037 if (sblk->status & SD_STATUS_LINK_CHG)
1041 /* check for TX work to do */
1042 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1045 /* check for RX work to do */
1046 if (tnapi->rx_rcb_prod_idx &&
1047 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1054 * similar to tg3_enable_ints, but it accurately determines whether there
1055 * is new work pending and can return without flushing the PIO write
1056 * which reenables interrupts
1058 static void tg3_int_reenable(struct tg3_napi *tnapi)
1060 struct tg3 *tp = tnapi->tp;
1062 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1065 /* When doing tagged status, this work check is unnecessary.
1066 * The last_tag we write above tells the chip which piece of
1067 * work we've completed.
1069 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1070 tw32(HOSTCC_MODE, tp->coalesce_mode |
1071 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1074 static void tg3_switch_clocks(struct tg3 *tp)
1077 u32 orig_clock_ctrl;
1079 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1082 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1084 orig_clock_ctrl = clock_ctrl;
1085 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1086 CLOCK_CTRL_CLKRUN_OENABLE |
1088 tp->pci_clock_ctrl = clock_ctrl;
1090 if (tg3_flag(tp, 5705_PLUS)) {
1091 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1092 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1095 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1096 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1098 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1100 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1101 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1104 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1107 #define PHY_BUSY_LOOPS 5000
1109 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1116 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1118 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1122 tg3_ape_lock(tp, tp->phy_ape_lock);
1126 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1127 MI_COM_PHY_ADDR_MASK);
1128 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1129 MI_COM_REG_ADDR_MASK);
1130 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1132 tw32_f(MAC_MI_COM, frame_val);
1134 loops = PHY_BUSY_LOOPS;
1135 while (loops != 0) {
1137 frame_val = tr32(MAC_MI_COM);
1139 if ((frame_val & MI_COM_BUSY) == 0) {
1141 frame_val = tr32(MAC_MI_COM);
1149 *val = frame_val & MI_COM_DATA_MASK;
1153 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1154 tw32_f(MAC_MI_MODE, tp->mi_mode);
1158 tg3_ape_unlock(tp, tp->phy_ape_lock);
1163 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1165 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1168 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1175 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1176 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1179 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1181 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1185 tg3_ape_lock(tp, tp->phy_ape_lock);
1187 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1188 MI_COM_PHY_ADDR_MASK);
1189 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1190 MI_COM_REG_ADDR_MASK);
1191 frame_val |= (val & MI_COM_DATA_MASK);
1192 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1194 tw32_f(MAC_MI_COM, frame_val);
1196 loops = PHY_BUSY_LOOPS;
1197 while (loops != 0) {
1199 frame_val = tr32(MAC_MI_COM);
1200 if ((frame_val & MI_COM_BUSY) == 0) {
1202 frame_val = tr32(MAC_MI_COM);
1212 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1213 tw32_f(MAC_MI_MODE, tp->mi_mode);
1217 tg3_ape_unlock(tp, tp->phy_ape_lock);
1222 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1224 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1227 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1231 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1235 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1239 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1240 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1244 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1250 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1254 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1258 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1262 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1263 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1267 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1273 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1277 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1279 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1284 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1288 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1290 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1295 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1299 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1300 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1301 MII_TG3_AUXCTL_SHDWSEL_MISC);
1303 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1308 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1310 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1311 set |= MII_TG3_AUXCTL_MISC_WREN;
1313 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1316 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1321 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1327 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1329 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1331 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1332 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1337 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1339 return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1340 reg | val | MII_TG3_MISC_SHDW_WREN);
1343 static int tg3_bmcr_reset(struct tg3 *tp)
1348 /* OK, reset it, and poll the BMCR_RESET bit until it
1349 * clears or we time out.
1351 phy_control = BMCR_RESET;
1352 err = tg3_writephy(tp, MII_BMCR, phy_control);
1358 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1362 if ((phy_control & BMCR_RESET) == 0) {
1374 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1376 struct tg3 *tp = bp->priv;
1379 spin_lock_bh(&tp->lock);
1381 if (__tg3_readphy(tp, mii_id, reg, &val))
1384 spin_unlock_bh(&tp->lock);
1389 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1391 struct tg3 *tp = bp->priv;
1394 spin_lock_bh(&tp->lock);
1396 if (__tg3_writephy(tp, mii_id, reg, val))
1399 spin_unlock_bh(&tp->lock);
1404 static void tg3_mdio_config_5785(struct tg3 *tp)
1407 struct phy_device *phydev;
1409 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1410 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1411 case PHY_ID_BCM50610:
1412 case PHY_ID_BCM50610M:
1413 val = MAC_PHYCFG2_50610_LED_MODES;
1415 case PHY_ID_BCMAC131:
1416 val = MAC_PHYCFG2_AC131_LED_MODES;
1418 case PHY_ID_RTL8211C:
1419 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1421 case PHY_ID_RTL8201E:
1422 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1428 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1429 tw32(MAC_PHYCFG2, val);
1431 val = tr32(MAC_PHYCFG1);
1432 val &= ~(MAC_PHYCFG1_RGMII_INT |
1433 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1434 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1435 tw32(MAC_PHYCFG1, val);
1440 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1441 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1442 MAC_PHYCFG2_FMODE_MASK_MASK |
1443 MAC_PHYCFG2_GMODE_MASK_MASK |
1444 MAC_PHYCFG2_ACT_MASK_MASK |
1445 MAC_PHYCFG2_QUAL_MASK_MASK |
1446 MAC_PHYCFG2_INBAND_ENABLE;
1448 tw32(MAC_PHYCFG2, val);
1450 val = tr32(MAC_PHYCFG1);
1451 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1452 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1453 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1454 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1455 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1456 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1457 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1459 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1460 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1461 tw32(MAC_PHYCFG1, val);
1463 val = tr32(MAC_EXT_RGMII_MODE);
1464 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1465 MAC_RGMII_MODE_RX_QUALITY |
1466 MAC_RGMII_MODE_RX_ACTIVITY |
1467 MAC_RGMII_MODE_RX_ENG_DET |
1468 MAC_RGMII_MODE_TX_ENABLE |
1469 MAC_RGMII_MODE_TX_LOWPWR |
1470 MAC_RGMII_MODE_TX_RESET);
1471 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1472 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1473 val |= MAC_RGMII_MODE_RX_INT_B |
1474 MAC_RGMII_MODE_RX_QUALITY |
1475 MAC_RGMII_MODE_RX_ACTIVITY |
1476 MAC_RGMII_MODE_RX_ENG_DET;
1477 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1478 val |= MAC_RGMII_MODE_TX_ENABLE |
1479 MAC_RGMII_MODE_TX_LOWPWR |
1480 MAC_RGMII_MODE_TX_RESET;
1482 tw32(MAC_EXT_RGMII_MODE, val);
1485 static void tg3_mdio_start(struct tg3 *tp)
1487 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1488 tw32_f(MAC_MI_MODE, tp->mi_mode);
1491 if (tg3_flag(tp, MDIOBUS_INITED) &&
1492 tg3_asic_rev(tp) == ASIC_REV_5785)
1493 tg3_mdio_config_5785(tp);
1496 static int tg3_mdio_init(struct tg3 *tp)
1500 struct phy_device *phydev;
1502 if (tg3_flag(tp, 5717_PLUS)) {
1505 tp->phy_addr = tp->pci_fn + 1;
1507 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1508 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1510 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1511 TG3_CPMU_PHY_STRAP_IS_SERDES;
1514 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1517 addr = ssb_gige_get_phyaddr(tp->pdev);
1520 tp->phy_addr = addr;
1522 tp->phy_addr = TG3_PHY_MII_ADDR;
1526 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1529 tp->mdio_bus = mdiobus_alloc();
1530 if (tp->mdio_bus == NULL)
1533 tp->mdio_bus->name = "tg3 mdio bus";
1534 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1535 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1536 tp->mdio_bus->priv = tp;
1537 tp->mdio_bus->parent = &tp->pdev->dev;
1538 tp->mdio_bus->read = &tg3_mdio_read;
1539 tp->mdio_bus->write = &tg3_mdio_write;
1540 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1542 /* The bus registration will look for all the PHYs on the mdio bus.
1543 * Unfortunately, it does not ensure the PHY is powered up before
1544 * accessing the PHY ID registers. A chip reset is the
1545 * quickest way to bring the device back to an operational state..
1547 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1550 i = mdiobus_register(tp->mdio_bus);
1552 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1553 mdiobus_free(tp->mdio_bus);
1557 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1559 if (!phydev || !phydev->drv) {
1560 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1561 mdiobus_unregister(tp->mdio_bus);
1562 mdiobus_free(tp->mdio_bus);
1566 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1567 case PHY_ID_BCM57780:
1568 phydev->interface = PHY_INTERFACE_MODE_GMII;
1569 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1571 case PHY_ID_BCM50610:
1572 case PHY_ID_BCM50610M:
1573 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1574 PHY_BRCM_RX_REFCLK_UNUSED |
1575 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1576 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1577 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1578 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1579 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1580 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1581 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1582 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1584 case PHY_ID_RTL8211C:
1585 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1587 case PHY_ID_RTL8201E:
1588 case PHY_ID_BCMAC131:
1589 phydev->interface = PHY_INTERFACE_MODE_MII;
1590 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1591 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1595 tg3_flag_set(tp, MDIOBUS_INITED);
1597 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1598 tg3_mdio_config_5785(tp);
1603 static void tg3_mdio_fini(struct tg3 *tp)
1605 if (tg3_flag(tp, MDIOBUS_INITED)) {
1606 tg3_flag_clear(tp, MDIOBUS_INITED);
1607 mdiobus_unregister(tp->mdio_bus);
1608 mdiobus_free(tp->mdio_bus);
1612 /* tp->lock is held. */
1613 static inline void tg3_generate_fw_event(struct tg3 *tp)
1617 val = tr32(GRC_RX_CPU_EVENT);
1618 val |= GRC_RX_CPU_DRIVER_EVENT;
1619 tw32_f(GRC_RX_CPU_EVENT, val);
1621 tp->last_event_jiffies = jiffies;
1624 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1626 /* tp->lock is held. */
1627 static void tg3_wait_for_event_ack(struct tg3 *tp)
1630 unsigned int delay_cnt;
1633 /* If enough time has passed, no wait is necessary. */
1634 time_remain = (long)(tp->last_event_jiffies + 1 +
1635 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1637 if (time_remain < 0)
1640 /* Check if we can shorten the wait time. */
1641 delay_cnt = jiffies_to_usecs(time_remain);
1642 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1643 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1644 delay_cnt = (delay_cnt >> 3) + 1;
1646 for (i = 0; i < delay_cnt; i++) {
1647 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1649 if (pci_channel_offline(tp->pdev))
1656 /* tp->lock is held. */
1657 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1662 if (!tg3_readphy(tp, MII_BMCR, ®))
1664 if (!tg3_readphy(tp, MII_BMSR, ®))
1665 val |= (reg & 0xffff);
1669 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1671 if (!tg3_readphy(tp, MII_LPA, ®))
1672 val |= (reg & 0xffff);
1676 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1677 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1679 if (!tg3_readphy(tp, MII_STAT1000, ®))
1680 val |= (reg & 0xffff);
1684 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1691 /* tp->lock is held. */
1692 static void tg3_ump_link_report(struct tg3 *tp)
1696 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1699 tg3_phy_gather_ump_data(tp, data);
1701 tg3_wait_for_event_ack(tp);
1703 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1704 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1705 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1707 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1708 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1710 tg3_generate_fw_event(tp);
1713 /* tp->lock is held. */
1714 static void tg3_stop_fw(struct tg3 *tp)
1716 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1717 /* Wait for RX cpu to ACK the previous event. */
1718 tg3_wait_for_event_ack(tp);
1720 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1722 tg3_generate_fw_event(tp);
1724 /* Wait for RX cpu to ACK this event. */
1725 tg3_wait_for_event_ack(tp);
1729 /* tp->lock is held. */
1730 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1732 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1733 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1735 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1737 case RESET_KIND_INIT:
1738 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1742 case RESET_KIND_SHUTDOWN:
1743 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1747 case RESET_KIND_SUSPEND:
1748 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1758 /* tp->lock is held. */
1759 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1761 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1763 case RESET_KIND_INIT:
1764 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1765 DRV_STATE_START_DONE);
1768 case RESET_KIND_SHUTDOWN:
1769 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1770 DRV_STATE_UNLOAD_DONE);
1779 /* tp->lock is held. */
1780 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1782 if (tg3_flag(tp, ENABLE_ASF)) {
1784 case RESET_KIND_INIT:
1785 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1789 case RESET_KIND_SHUTDOWN:
1790 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1794 case RESET_KIND_SUSPEND:
1795 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1805 static int tg3_poll_fw(struct tg3 *tp)
1810 if (tg3_flag(tp, NO_FWARE_REPORTED))
1813 if (tg3_flag(tp, IS_SSB_CORE)) {
1814 /* We don't use firmware. */
1818 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1819 /* Wait up to 20ms for init done. */
1820 for (i = 0; i < 200; i++) {
1821 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1823 if (pci_channel_offline(tp->pdev))
1831 /* Wait for firmware initialization to complete. */
1832 for (i = 0; i < 100000; i++) {
1833 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1834 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1836 if (pci_channel_offline(tp->pdev)) {
1837 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1838 tg3_flag_set(tp, NO_FWARE_REPORTED);
1839 netdev_info(tp->dev, "No firmware running\n");
1848 /* Chip might not be fitted with firmware. Some Sun onboard
1849 * parts are configured like that. So don't signal the timeout
1850 * of the above loop as an error, but do report the lack of
1851 * running firmware once.
1853 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1854 tg3_flag_set(tp, NO_FWARE_REPORTED);
1856 netdev_info(tp->dev, "No firmware running\n");
1859 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1860 /* The 57765 A0 needs a little more
1861 * time to do some important work.
1869 static void tg3_link_report(struct tg3 *tp)
1871 if (!netif_carrier_ok(tp->dev)) {
1872 netif_info(tp, link, tp->dev, "Link is down\n");
1873 tg3_ump_link_report(tp);
1874 } else if (netif_msg_link(tp)) {
1875 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1876 (tp->link_config.active_speed == SPEED_1000 ?
1878 (tp->link_config.active_speed == SPEED_100 ?
1880 (tp->link_config.active_duplex == DUPLEX_FULL ?
1883 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1884 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1886 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1889 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1890 netdev_info(tp->dev, "EEE is %s\n",
1891 tp->setlpicnt ? "enabled" : "disabled");
1893 tg3_ump_link_report(tp);
1896 tp->link_up = netif_carrier_ok(tp->dev);
1899 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1903 if (adv & ADVERTISE_PAUSE_CAP) {
1904 flowctrl |= FLOW_CTRL_RX;
1905 if (!(adv & ADVERTISE_PAUSE_ASYM))
1906 flowctrl |= FLOW_CTRL_TX;
1907 } else if (adv & ADVERTISE_PAUSE_ASYM)
1908 flowctrl |= FLOW_CTRL_TX;
1913 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1917 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1918 miireg = ADVERTISE_1000XPAUSE;
1919 else if (flow_ctrl & FLOW_CTRL_TX)
1920 miireg = ADVERTISE_1000XPSE_ASYM;
1921 else if (flow_ctrl & FLOW_CTRL_RX)
1922 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1929 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1933 if (adv & ADVERTISE_1000XPAUSE) {
1934 flowctrl |= FLOW_CTRL_RX;
1935 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1936 flowctrl |= FLOW_CTRL_TX;
1937 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1938 flowctrl |= FLOW_CTRL_TX;
1943 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1947 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1948 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1949 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1950 if (lcladv & ADVERTISE_1000XPAUSE)
1952 if (rmtadv & ADVERTISE_1000XPAUSE)
1959 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1963 u32 old_rx_mode = tp->rx_mode;
1964 u32 old_tx_mode = tp->tx_mode;
1966 if (tg3_flag(tp, USE_PHYLIB))
1967 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1969 autoneg = tp->link_config.autoneg;
1971 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1972 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1973 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1975 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1977 flowctrl = tp->link_config.flowctrl;
1979 tp->link_config.active_flowctrl = flowctrl;
1981 if (flowctrl & FLOW_CTRL_RX)
1982 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1984 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1986 if (old_rx_mode != tp->rx_mode)
1987 tw32_f(MAC_RX_MODE, tp->rx_mode);
1989 if (flowctrl & FLOW_CTRL_TX)
1990 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1992 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1994 if (old_tx_mode != tp->tx_mode)
1995 tw32_f(MAC_TX_MODE, tp->tx_mode);
1998 static void tg3_adjust_link(struct net_device *dev)
2000 u8 oldflowctrl, linkmesg = 0;
2001 u32 mac_mode, lcl_adv, rmt_adv;
2002 struct tg3 *tp = netdev_priv(dev);
2003 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2005 spin_lock_bh(&tp->lock);
2007 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2008 MAC_MODE_HALF_DUPLEX);
2010 oldflowctrl = tp->link_config.active_flowctrl;
2016 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2017 mac_mode |= MAC_MODE_PORT_MODE_MII;
2018 else if (phydev->speed == SPEED_1000 ||
2019 tg3_asic_rev(tp) != ASIC_REV_5785)
2020 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2022 mac_mode |= MAC_MODE_PORT_MODE_MII;
2024 if (phydev->duplex == DUPLEX_HALF)
2025 mac_mode |= MAC_MODE_HALF_DUPLEX;
2027 lcl_adv = mii_advertise_flowctrl(
2028 tp->link_config.flowctrl);
2031 rmt_adv = LPA_PAUSE_CAP;
2032 if (phydev->asym_pause)
2033 rmt_adv |= LPA_PAUSE_ASYM;
2036 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2038 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2040 if (mac_mode != tp->mac_mode) {
2041 tp->mac_mode = mac_mode;
2042 tw32_f(MAC_MODE, tp->mac_mode);
2046 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2047 if (phydev->speed == SPEED_10)
2049 MAC_MI_STAT_10MBPS_MODE |
2050 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2052 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2055 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2056 tw32(MAC_TX_LENGTHS,
2057 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2058 (6 << TX_LENGTHS_IPG_SHIFT) |
2059 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2061 tw32(MAC_TX_LENGTHS,
2062 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2063 (6 << TX_LENGTHS_IPG_SHIFT) |
2064 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2066 if (phydev->link != tp->old_link ||
2067 phydev->speed != tp->link_config.active_speed ||
2068 phydev->duplex != tp->link_config.active_duplex ||
2069 oldflowctrl != tp->link_config.active_flowctrl)
2072 tp->old_link = phydev->link;
2073 tp->link_config.active_speed = phydev->speed;
2074 tp->link_config.active_duplex = phydev->duplex;
2076 spin_unlock_bh(&tp->lock);
2079 tg3_link_report(tp);
2082 static int tg3_phy_init(struct tg3 *tp)
2084 struct phy_device *phydev;
2086 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2089 /* Bring the PHY back to a known state. */
2092 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2094 /* Attach the MAC to the PHY. */
2095 phydev = phy_connect(tp->dev, phydev_name(phydev),
2096 tg3_adjust_link, phydev->interface);
2097 if (IS_ERR(phydev)) {
2098 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2099 return PTR_ERR(phydev);
2102 /* Mask with MAC supported features. */
2103 switch (phydev->interface) {
2104 case PHY_INTERFACE_MODE_GMII:
2105 case PHY_INTERFACE_MODE_RGMII:
2106 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2107 phydev->supported &= (PHY_GBIT_FEATURES |
2109 SUPPORTED_Asym_Pause);
2113 case PHY_INTERFACE_MODE_MII:
2114 phydev->supported &= (PHY_BASIC_FEATURES |
2116 SUPPORTED_Asym_Pause);
2119 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2123 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2125 phydev->advertising = phydev->supported;
2127 phy_attached_info(phydev);
2132 static void tg3_phy_start(struct tg3 *tp)
2134 struct phy_device *phydev;
2136 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2139 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2141 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2142 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2143 phydev->speed = tp->link_config.speed;
2144 phydev->duplex = tp->link_config.duplex;
2145 phydev->autoneg = tp->link_config.autoneg;
2146 phydev->advertising = tp->link_config.advertising;
2151 phy_start_aneg(phydev);
2154 static void tg3_phy_stop(struct tg3 *tp)
2156 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2159 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2162 static void tg3_phy_fini(struct tg3 *tp)
2164 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2165 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2166 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2170 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2175 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2178 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2179 /* Cannot do read-modify-write on 5401 */
2180 err = tg3_phy_auxctl_write(tp,
2181 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2182 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2187 err = tg3_phy_auxctl_read(tp,
2188 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2192 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2193 err = tg3_phy_auxctl_write(tp,
2194 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2200 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2204 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2207 tg3_writephy(tp, MII_TG3_FET_TEST,
2208 phytest | MII_TG3_FET_SHADOW_EN);
2209 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2211 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2213 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2214 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2216 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2220 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2224 if (!tg3_flag(tp, 5705_PLUS) ||
2225 (tg3_flag(tp, 5717_PLUS) &&
2226 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2229 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2230 tg3_phy_fet_toggle_apd(tp, enable);
2234 reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2235 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2236 MII_TG3_MISC_SHDW_SCR5_SDTL |
2237 MII_TG3_MISC_SHDW_SCR5_C125OE;
2238 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2239 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2241 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2244 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2246 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2248 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2251 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2255 if (!tg3_flag(tp, 5705_PLUS) ||
2256 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2259 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2262 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2263 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2265 tg3_writephy(tp, MII_TG3_FET_TEST,
2266 ephy | MII_TG3_FET_SHADOW_EN);
2267 if (!tg3_readphy(tp, reg, &phy)) {
2269 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2271 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2272 tg3_writephy(tp, reg, phy);
2274 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2279 ret = tg3_phy_auxctl_read(tp,
2280 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2283 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2285 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2286 tg3_phy_auxctl_write(tp,
2287 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2292 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2297 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2300 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2302 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2303 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2306 static void tg3_phy_apply_otp(struct tg3 *tp)
2315 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2318 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2319 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2320 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2322 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2323 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2324 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2326 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2327 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2328 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2330 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2331 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2333 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2334 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2336 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2337 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2338 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2340 tg3_phy_toggle_auxctl_smdsp(tp, false);
2343 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2346 struct ethtool_eee *dest = &tp->eee;
2348 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2354 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2357 /* Pull eee_active */
2358 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2359 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2360 dest->eee_active = 1;
2362 dest->eee_active = 0;
2364 /* Pull lp advertised settings */
2365 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2367 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2369 /* Pull advertised and eee_enabled settings */
2370 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2372 dest->eee_enabled = !!val;
2373 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2375 /* Pull tx_lpi_enabled */
2376 val = tr32(TG3_CPMU_EEE_MODE);
2377 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2379 /* Pull lpi timer value */
2380 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2383 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2387 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2392 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2394 tp->link_config.active_duplex == DUPLEX_FULL &&
2395 (tp->link_config.active_speed == SPEED_100 ||
2396 tp->link_config.active_speed == SPEED_1000)) {
2399 if (tp->link_config.active_speed == SPEED_1000)
2400 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2402 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2404 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2406 tg3_eee_pull_config(tp, NULL);
2407 if (tp->eee.eee_active)
2411 if (!tp->setlpicnt) {
2412 if (current_link_up &&
2413 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2414 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2415 tg3_phy_toggle_auxctl_smdsp(tp, false);
2418 val = tr32(TG3_CPMU_EEE_MODE);
2419 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2423 static void tg3_phy_eee_enable(struct tg3 *tp)
2427 if (tp->link_config.active_speed == SPEED_1000 &&
2428 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2429 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2430 tg3_flag(tp, 57765_CLASS)) &&
2431 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2432 val = MII_TG3_DSP_TAP26_ALNOKO |
2433 MII_TG3_DSP_TAP26_RMRXSTO;
2434 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2435 tg3_phy_toggle_auxctl_smdsp(tp, false);
2438 val = tr32(TG3_CPMU_EEE_MODE);
2439 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2442 static int tg3_wait_macro_done(struct tg3 *tp)
2449 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2450 if ((tmp32 & 0x1000) == 0)
2460 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2462 static const u32 test_pat[4][6] = {
2463 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2464 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2465 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2466 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2470 for (chan = 0; chan < 4; chan++) {
2473 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2474 (chan * 0x2000) | 0x0200);
2475 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2477 for (i = 0; i < 6; i++)
2478 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2481 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2482 if (tg3_wait_macro_done(tp)) {
2487 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2488 (chan * 0x2000) | 0x0200);
2489 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2490 if (tg3_wait_macro_done(tp)) {
2495 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2496 if (tg3_wait_macro_done(tp)) {
2501 for (i = 0; i < 6; i += 2) {
2504 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2505 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2506 tg3_wait_macro_done(tp)) {
2512 if (low != test_pat[chan][i] ||
2513 high != test_pat[chan][i+1]) {
2514 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2515 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2516 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2526 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2530 for (chan = 0; chan < 4; chan++) {
2533 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2534 (chan * 0x2000) | 0x0200);
2535 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2536 for (i = 0; i < 6; i++)
2537 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2538 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2539 if (tg3_wait_macro_done(tp))
2546 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2548 u32 reg32, phy9_orig;
2549 int retries, do_phy_reset, err;
2555 err = tg3_bmcr_reset(tp);
2561 /* Disable transmitter and interrupt. */
2562 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2566 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2568 /* Set full-duplex, 1000 mbps. */
2569 tg3_writephy(tp, MII_BMCR,
2570 BMCR_FULLDPLX | BMCR_SPEED1000);
2572 /* Set to master mode. */
2573 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2576 tg3_writephy(tp, MII_CTRL1000,
2577 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2579 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2583 /* Block the PHY control access. */
2584 tg3_phydsp_write(tp, 0x8005, 0x0800);
2586 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2589 } while (--retries);
2591 err = tg3_phy_reset_chanpat(tp);
2595 tg3_phydsp_write(tp, 0x8005, 0x0000);
2597 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2598 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2600 tg3_phy_toggle_auxctl_smdsp(tp, false);
2602 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2604 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
2609 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2614 static void tg3_carrier_off(struct tg3 *tp)
2616 netif_carrier_off(tp->dev);
2617 tp->link_up = false;
2620 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2622 if (tg3_flag(tp, ENABLE_ASF))
2623 netdev_warn(tp->dev,
2624 "Management side-band traffic will be interrupted during phy settings change\n");
2627 /* This will reset the tigon3 PHY if there is no valid
2628 * link unless the FORCE argument is non-zero.
2630 static int tg3_phy_reset(struct tg3 *tp)
2635 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2636 val = tr32(GRC_MISC_CFG);
2637 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2640 err = tg3_readphy(tp, MII_BMSR, &val);
2641 err |= tg3_readphy(tp, MII_BMSR, &val);
2645 if (netif_running(tp->dev) && tp->link_up) {
2646 netif_carrier_off(tp->dev);
2647 tg3_link_report(tp);
2650 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2651 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2652 tg3_asic_rev(tp) == ASIC_REV_5705) {
2653 err = tg3_phy_reset_5703_4_5(tp);
2660 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2661 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2662 cpmuctrl = tr32(TG3_CPMU_CTRL);
2663 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2665 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2668 err = tg3_bmcr_reset(tp);
2672 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2673 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2674 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2676 tw32(TG3_CPMU_CTRL, cpmuctrl);
2679 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2680 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2681 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2682 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2683 CPMU_LSPD_1000MB_MACCLK_12_5) {
2684 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2686 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2690 if (tg3_flag(tp, 5717_PLUS) &&
2691 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2694 tg3_phy_apply_otp(tp);
2696 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2697 tg3_phy_toggle_apd(tp, true);
2699 tg3_phy_toggle_apd(tp, false);
2702 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2703 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2704 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2705 tg3_phydsp_write(tp, 0x000a, 0x0323);
2706 tg3_phy_toggle_auxctl_smdsp(tp, false);
2709 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2710 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2711 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2714 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2715 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2716 tg3_phydsp_write(tp, 0x000a, 0x310b);
2717 tg3_phydsp_write(tp, 0x201f, 0x9506);
2718 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2719 tg3_phy_toggle_auxctl_smdsp(tp, false);
2721 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2722 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2723 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2724 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2725 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2726 tg3_writephy(tp, MII_TG3_TEST1,
2727 MII_TG3_TEST1_TRIM_EN | 0x4);
2729 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2731 tg3_phy_toggle_auxctl_smdsp(tp, false);
2735 /* Set Extended packet length bit (bit 14) on all chips that */
2736 /* support jumbo frames */
2737 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2738 /* Cannot do read-modify-write on 5401 */
2739 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2740 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2741 /* Set bit 14 with read-modify-write to preserve other bits */
2742 err = tg3_phy_auxctl_read(tp,
2743 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2745 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2746 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2749 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2750 * jumbo frames transmission.
2752 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2753 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2754 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2755 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2758 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2759 /* adjust output voltage */
2760 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2763 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2764 tg3_phydsp_write(tp, 0xffb, 0x4000);
2766 tg3_phy_toggle_automdix(tp, true);
2767 tg3_phy_set_wirespeed(tp);
2771 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2772 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2773 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2774 TG3_GPIO_MSG_NEED_VAUX)
2775 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2776 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2777 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2778 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2779 (TG3_GPIO_MSG_DRVR_PRES << 12))
2781 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2782 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2783 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2784 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2785 (TG3_GPIO_MSG_NEED_VAUX << 12))
2787 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2791 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2792 tg3_asic_rev(tp) == ASIC_REV_5719)
2793 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2795 status = tr32(TG3_CPMU_DRV_STATUS);
2797 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2798 status &= ~(TG3_GPIO_MSG_MASK << shift);
2799 status |= (newstat << shift);
2801 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2802 tg3_asic_rev(tp) == ASIC_REV_5719)
2803 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2805 tw32(TG3_CPMU_DRV_STATUS, status);
2807 return status >> TG3_APE_GPIO_MSG_SHIFT;
2810 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2812 if (!tg3_flag(tp, IS_NIC))
2815 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2816 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2817 tg3_asic_rev(tp) == ASIC_REV_5720) {
2818 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2821 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2823 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2824 TG3_GRC_LCLCTL_PWRSW_DELAY);
2826 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2828 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2829 TG3_GRC_LCLCTL_PWRSW_DELAY);
2835 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2839 if (!tg3_flag(tp, IS_NIC) ||
2840 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2841 tg3_asic_rev(tp) == ASIC_REV_5701)
2844 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2846 tw32_wait_f(GRC_LOCAL_CTRL,
2847 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2848 TG3_GRC_LCLCTL_PWRSW_DELAY);
2850 tw32_wait_f(GRC_LOCAL_CTRL,
2852 TG3_GRC_LCLCTL_PWRSW_DELAY);
2854 tw32_wait_f(GRC_LOCAL_CTRL,
2855 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2856 TG3_GRC_LCLCTL_PWRSW_DELAY);
2859 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2861 if (!tg3_flag(tp, IS_NIC))
2864 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2865 tg3_asic_rev(tp) == ASIC_REV_5701) {
2866 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2867 (GRC_LCLCTRL_GPIO_OE0 |
2868 GRC_LCLCTRL_GPIO_OE1 |
2869 GRC_LCLCTRL_GPIO_OE2 |
2870 GRC_LCLCTRL_GPIO_OUTPUT0 |
2871 GRC_LCLCTRL_GPIO_OUTPUT1),
2872 TG3_GRC_LCLCTL_PWRSW_DELAY);
2873 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2874 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2875 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2876 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2877 GRC_LCLCTRL_GPIO_OE1 |
2878 GRC_LCLCTRL_GPIO_OE2 |
2879 GRC_LCLCTRL_GPIO_OUTPUT0 |
2880 GRC_LCLCTRL_GPIO_OUTPUT1 |
2882 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2883 TG3_GRC_LCLCTL_PWRSW_DELAY);
2885 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2886 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2887 TG3_GRC_LCLCTL_PWRSW_DELAY);
2889 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2890 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2891 TG3_GRC_LCLCTL_PWRSW_DELAY);
2894 u32 grc_local_ctrl = 0;
2896 /* Workaround to prevent overdrawing Amps. */
2897 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2898 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2899 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2901 TG3_GRC_LCLCTL_PWRSW_DELAY);
2904 /* On 5753 and variants, GPIO2 cannot be used. */
2905 no_gpio2 = tp->nic_sram_data_cfg &
2906 NIC_SRAM_DATA_CFG_NO_GPIO2;
2908 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2909 GRC_LCLCTRL_GPIO_OE1 |
2910 GRC_LCLCTRL_GPIO_OE2 |
2911 GRC_LCLCTRL_GPIO_OUTPUT1 |
2912 GRC_LCLCTRL_GPIO_OUTPUT2;
2914 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2915 GRC_LCLCTRL_GPIO_OUTPUT2);
2917 tw32_wait_f(GRC_LOCAL_CTRL,
2918 tp->grc_local_ctrl | grc_local_ctrl,
2919 TG3_GRC_LCLCTL_PWRSW_DELAY);
2921 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2923 tw32_wait_f(GRC_LOCAL_CTRL,
2924 tp->grc_local_ctrl | grc_local_ctrl,
2925 TG3_GRC_LCLCTL_PWRSW_DELAY);
2928 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2929 tw32_wait_f(GRC_LOCAL_CTRL,
2930 tp->grc_local_ctrl | grc_local_ctrl,
2931 TG3_GRC_LCLCTL_PWRSW_DELAY);
2936 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2940 /* Serialize power state transitions */
2941 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2944 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2945 msg = TG3_GPIO_MSG_NEED_VAUX;
2947 msg = tg3_set_function_status(tp, msg);
2949 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2952 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2953 tg3_pwrsrc_switch_to_vaux(tp);
2955 tg3_pwrsrc_die_with_vmain(tp);
2958 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2961 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2963 bool need_vaux = false;
2965 /* The GPIOs do something completely different on 57765. */
2966 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2969 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2970 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2971 tg3_asic_rev(tp) == ASIC_REV_5720) {
2972 tg3_frob_aux_power_5717(tp, include_wol ?
2973 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2977 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2978 struct net_device *dev_peer;
2980 dev_peer = pci_get_drvdata(tp->pdev_peer);
2982 /* remove_one() may have been run on the peer. */
2984 struct tg3 *tp_peer = netdev_priv(dev_peer);
2986 if (tg3_flag(tp_peer, INIT_COMPLETE))
2989 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2990 tg3_flag(tp_peer, ENABLE_ASF))
2995 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2996 tg3_flag(tp, ENABLE_ASF))
3000 tg3_pwrsrc_switch_to_vaux(tp);
3002 tg3_pwrsrc_die_with_vmain(tp);
3005 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3007 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3009 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3010 if (speed != SPEED_10)
3012 } else if (speed == SPEED_10)
3018 static bool tg3_phy_power_bug(struct tg3 *tp)
3020 switch (tg3_asic_rev(tp)) {
3025 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3034 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3043 static bool tg3_phy_led_bug(struct tg3 *tp)
3045 switch (tg3_asic_rev(tp)) {
3048 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3057 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3061 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3064 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3065 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3066 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3067 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3070 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3071 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3072 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3077 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3079 val = tr32(GRC_MISC_CFG);
3080 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3083 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3085 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3088 tg3_writephy(tp, MII_ADVERTISE, 0);
3089 tg3_writephy(tp, MII_BMCR,
3090 BMCR_ANENABLE | BMCR_ANRESTART);
3092 tg3_writephy(tp, MII_TG3_FET_TEST,
3093 phytest | MII_TG3_FET_SHADOW_EN);
3094 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3095 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3097 MII_TG3_FET_SHDW_AUXMODE4,
3100 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3103 } else if (do_low_power) {
3104 if (!tg3_phy_led_bug(tp))
3105 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3106 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3108 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3109 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3110 MII_TG3_AUXCTL_PCTL_VREG_11V;
3111 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3114 /* The PHY should not be powered down on some chips because
3117 if (tg3_phy_power_bug(tp))
3120 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3121 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3122 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3123 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3124 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3125 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3128 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3131 /* tp->lock is held. */
3132 static int tg3_nvram_lock(struct tg3 *tp)
3134 if (tg3_flag(tp, NVRAM)) {
3137 if (tp->nvram_lock_cnt == 0) {
3138 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3139 for (i = 0; i < 8000; i++) {
3140 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3145 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3149 tp->nvram_lock_cnt++;
3154 /* tp->lock is held. */
3155 static void tg3_nvram_unlock(struct tg3 *tp)
3157 if (tg3_flag(tp, NVRAM)) {
3158 if (tp->nvram_lock_cnt > 0)
3159 tp->nvram_lock_cnt--;
3160 if (tp->nvram_lock_cnt == 0)
3161 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3165 /* tp->lock is held. */
3166 static void tg3_enable_nvram_access(struct tg3 *tp)
3168 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3169 u32 nvaccess = tr32(NVRAM_ACCESS);
3171 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3175 /* tp->lock is held. */
3176 static void tg3_disable_nvram_access(struct tg3 *tp)
3178 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3179 u32 nvaccess = tr32(NVRAM_ACCESS);
3181 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3185 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3186 u32 offset, u32 *val)
3191 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3194 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3195 EEPROM_ADDR_DEVID_MASK |
3197 tw32(GRC_EEPROM_ADDR,
3199 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3200 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3201 EEPROM_ADDR_ADDR_MASK) |
3202 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3204 for (i = 0; i < 1000; i++) {
3205 tmp = tr32(GRC_EEPROM_ADDR);
3207 if (tmp & EEPROM_ADDR_COMPLETE)
3211 if (!(tmp & EEPROM_ADDR_COMPLETE))
3214 tmp = tr32(GRC_EEPROM_DATA);
3217 * The data will always be opposite the native endian
3218 * format. Perform a blind byteswap to compensate.
3225 #define NVRAM_CMD_TIMEOUT 5000
3227 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3231 tw32(NVRAM_CMD, nvram_cmd);
3232 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3233 usleep_range(10, 40);
3234 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3240 if (i == NVRAM_CMD_TIMEOUT)
3246 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3248 if (tg3_flag(tp, NVRAM) &&
3249 tg3_flag(tp, NVRAM_BUFFERED) &&
3250 tg3_flag(tp, FLASH) &&
3251 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3252 (tp->nvram_jedecnum == JEDEC_ATMEL))
3254 addr = ((addr / tp->nvram_pagesize) <<
3255 ATMEL_AT45DB0X1B_PAGE_POS) +
3256 (addr % tp->nvram_pagesize);
3261 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3263 if (tg3_flag(tp, NVRAM) &&
3264 tg3_flag(tp, NVRAM_BUFFERED) &&
3265 tg3_flag(tp, FLASH) &&
3266 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3267 (tp->nvram_jedecnum == JEDEC_ATMEL))
3269 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3270 tp->nvram_pagesize) +
3271 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3276 /* NOTE: Data read in from NVRAM is byteswapped according to
3277 * the byteswapping settings for all other register accesses.
3278 * tg3 devices are BE devices, so on a BE machine, the data
3279 * returned will be exactly as it is seen in NVRAM. On a LE
3280 * machine, the 32-bit value will be byteswapped.
3282 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3286 if (!tg3_flag(tp, NVRAM))
3287 return tg3_nvram_read_using_eeprom(tp, offset, val);
3289 offset = tg3_nvram_phys_addr(tp, offset);
3291 if (offset > NVRAM_ADDR_MSK)
3294 ret = tg3_nvram_lock(tp);
3298 tg3_enable_nvram_access(tp);
3300 tw32(NVRAM_ADDR, offset);
3301 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3302 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3305 *val = tr32(NVRAM_RDDATA);
3307 tg3_disable_nvram_access(tp);
3309 tg3_nvram_unlock(tp);
3314 /* Ensures NVRAM data is in bytestream format. */
3315 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3318 int res = tg3_nvram_read(tp, offset, &v);
3320 *val = cpu_to_be32(v);
3324 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3325 u32 offset, u32 len, u8 *buf)
3330 for (i = 0; i < len; i += 4) {
3336 memcpy(&data, buf + i, 4);
3339 * The SEEPROM interface expects the data to always be opposite
3340 * the native endian format. We accomplish this by reversing
3341 * all the operations that would have been performed on the
3342 * data from a call to tg3_nvram_read_be32().
3344 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3346 val = tr32(GRC_EEPROM_ADDR);
3347 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3349 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3351 tw32(GRC_EEPROM_ADDR, val |
3352 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3353 (addr & EEPROM_ADDR_ADDR_MASK) |
3357 for (j = 0; j < 1000; j++) {
3358 val = tr32(GRC_EEPROM_ADDR);
3360 if (val & EEPROM_ADDR_COMPLETE)
3364 if (!(val & EEPROM_ADDR_COMPLETE)) {
3373 /* offset and length are dword aligned */
3374 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3378 u32 pagesize = tp->nvram_pagesize;
3379 u32 pagemask = pagesize - 1;
3383 tmp = kmalloc(pagesize, GFP_KERNEL);
3389 u32 phy_addr, page_off, size;
3391 phy_addr = offset & ~pagemask;
3393 for (j = 0; j < pagesize; j += 4) {
3394 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3395 (__be32 *) (tmp + j));
3402 page_off = offset & pagemask;
3409 memcpy(tmp + page_off, buf, size);
3411 offset = offset + (pagesize - page_off);
3413 tg3_enable_nvram_access(tp);
3416 * Before we can erase the flash page, we need
3417 * to issue a special "write enable" command.
3419 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3421 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3424 /* Erase the target page */
3425 tw32(NVRAM_ADDR, phy_addr);
3427 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3428 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3430 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3433 /* Issue another write enable to start the write. */
3434 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3436 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3439 for (j = 0; j < pagesize; j += 4) {
3442 data = *((__be32 *) (tmp + j));
3444 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3446 tw32(NVRAM_ADDR, phy_addr + j);
3448 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3452 nvram_cmd |= NVRAM_CMD_FIRST;
3453 else if (j == (pagesize - 4))
3454 nvram_cmd |= NVRAM_CMD_LAST;
3456 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3464 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3465 tg3_nvram_exec_cmd(tp, nvram_cmd);
3472 /* offset and length are dword aligned */
3473 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3478 for (i = 0; i < len; i += 4, offset += 4) {
3479 u32 page_off, phy_addr, nvram_cmd;
3482 memcpy(&data, buf + i, 4);
3483 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3485 page_off = offset % tp->nvram_pagesize;
3487 phy_addr = tg3_nvram_phys_addr(tp, offset);
3489 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3491 if (page_off == 0 || i == 0)
3492 nvram_cmd |= NVRAM_CMD_FIRST;
3493 if (page_off == (tp->nvram_pagesize - 4))
3494 nvram_cmd |= NVRAM_CMD_LAST;
3497 nvram_cmd |= NVRAM_CMD_LAST;
3499 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3500 !tg3_flag(tp, FLASH) ||
3501 !tg3_flag(tp, 57765_PLUS))
3502 tw32(NVRAM_ADDR, phy_addr);
3504 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3505 !tg3_flag(tp, 5755_PLUS) &&
3506 (tp->nvram_jedecnum == JEDEC_ST) &&
3507 (nvram_cmd & NVRAM_CMD_FIRST)) {
3510 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3511 ret = tg3_nvram_exec_cmd(tp, cmd);
3515 if (!tg3_flag(tp, FLASH)) {
3516 /* We always do complete word writes to eeprom. */
3517 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3520 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3527 /* offset and length are dword aligned */
3528 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3532 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3533 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3534 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3538 if (!tg3_flag(tp, NVRAM)) {
3539 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3543 ret = tg3_nvram_lock(tp);
3547 tg3_enable_nvram_access(tp);
3548 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3549 tw32(NVRAM_WRITE1, 0x406);
3551 grc_mode = tr32(GRC_MODE);
3552 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3554 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3555 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3558 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3562 grc_mode = tr32(GRC_MODE);
3563 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3565 tg3_disable_nvram_access(tp);
3566 tg3_nvram_unlock(tp);
3569 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3570 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3577 #define RX_CPU_SCRATCH_BASE 0x30000
3578 #define RX_CPU_SCRATCH_SIZE 0x04000
3579 #define TX_CPU_SCRATCH_BASE 0x34000
3580 #define TX_CPU_SCRATCH_SIZE 0x04000
3582 /* tp->lock is held. */
3583 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3586 const int iters = 10000;
3588 for (i = 0; i < iters; i++) {
3589 tw32(cpu_base + CPU_STATE, 0xffffffff);
3590 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3591 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3593 if (pci_channel_offline(tp->pdev))
3597 return (i == iters) ? -EBUSY : 0;
3600 /* tp->lock is held. */
3601 static int tg3_rxcpu_pause(struct tg3 *tp)
3603 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3605 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3606 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3612 /* tp->lock is held. */
3613 static int tg3_txcpu_pause(struct tg3 *tp)
3615 return tg3_pause_cpu(tp, TX_CPU_BASE);
3618 /* tp->lock is held. */
3619 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3621 tw32(cpu_base + CPU_STATE, 0xffffffff);
3622 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3625 /* tp->lock is held. */
3626 static void tg3_rxcpu_resume(struct tg3 *tp)
3628 tg3_resume_cpu(tp, RX_CPU_BASE);
3631 /* tp->lock is held. */
3632 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3636 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3638 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3639 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3641 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3644 if (cpu_base == RX_CPU_BASE) {
3645 rc = tg3_rxcpu_pause(tp);
3648 * There is only an Rx CPU for the 5750 derivative in the
3651 if (tg3_flag(tp, IS_SSB_CORE))
3654 rc = tg3_txcpu_pause(tp);
3658 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3659 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3663 /* Clear firmware's nvram arbitration. */
3664 if (tg3_flag(tp, NVRAM))
3665 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3669 static int tg3_fw_data_len(struct tg3 *tp,
3670 const struct tg3_firmware_hdr *fw_hdr)
3674 /* Non fragmented firmware have one firmware header followed by a
3675 * contiguous chunk of data to be written. The length field in that
3676 * header is not the length of data to be written but the complete
3677 * length of the bss. The data length is determined based on
3678 * tp->fw->size minus headers.
3680 * Fragmented firmware have a main header followed by multiple
3681 * fragments. Each fragment is identical to non fragmented firmware
3682 * with a firmware header followed by a contiguous chunk of data. In
3683 * the main header, the length field is unused and set to 0xffffffff.
3684 * In each fragment header the length is the entire size of that
3685 * fragment i.e. fragment data + header length. Data length is
3686 * therefore length field in the header minus TG3_FW_HDR_LEN.
3688 if (tp->fw_len == 0xffffffff)
3689 fw_len = be32_to_cpu(fw_hdr->len);
3691 fw_len = tp->fw->size;
3693 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3696 /* tp->lock is held. */
3697 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3698 u32 cpu_scratch_base, int cpu_scratch_size,
3699 const struct tg3_firmware_hdr *fw_hdr)
3702 void (*write_op)(struct tg3 *, u32, u32);
3703 int total_len = tp->fw->size;
3705 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3707 "%s: Trying to load TX cpu firmware which is 5705\n",
3712 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3713 write_op = tg3_write_mem;
3715 write_op = tg3_write_indirect_reg32;
3717 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3718 /* It is possible that bootcode is still loading at this point.
3719 * Get the nvram lock first before halting the cpu.
3721 int lock_err = tg3_nvram_lock(tp);
3722 err = tg3_halt_cpu(tp, cpu_base);
3724 tg3_nvram_unlock(tp);
3728 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3729 write_op(tp, cpu_scratch_base + i, 0);
3730 tw32(cpu_base + CPU_STATE, 0xffffffff);
3731 tw32(cpu_base + CPU_MODE,
3732 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3734 /* Subtract additional main header for fragmented firmware and
3735 * advance to the first fragment
3737 total_len -= TG3_FW_HDR_LEN;
3742 u32 *fw_data = (u32 *)(fw_hdr + 1);
3743 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3744 write_op(tp, cpu_scratch_base +
3745 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3747 be32_to_cpu(fw_data[i]));
3749 total_len -= be32_to_cpu(fw_hdr->len);
3751 /* Advance to next fragment */
3752 fw_hdr = (struct tg3_firmware_hdr *)
3753 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3754 } while (total_len > 0);
3762 /* tp->lock is held. */
3763 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3766 const int iters = 5;
3768 tw32(cpu_base + CPU_STATE, 0xffffffff);
3769 tw32_f(cpu_base + CPU_PC, pc);
3771 for (i = 0; i < iters; i++) {
3772 if (tr32(cpu_base + CPU_PC) == pc)
3774 tw32(cpu_base + CPU_STATE, 0xffffffff);
3775 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3776 tw32_f(cpu_base + CPU_PC, pc);
3780 return (i == iters) ? -EBUSY : 0;
3783 /* tp->lock is held. */
3784 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3786 const struct tg3_firmware_hdr *fw_hdr;
3789 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3791 /* Firmware blob starts with version numbers, followed by
3792 start address and length. We are setting complete length.
3793 length = end_address_of_bss - start_address_of_text.
3794 Remainder is the blob to be loaded contiguously
3795 from start address. */
3797 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3798 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3803 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3804 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3809 /* Now startup only the RX cpu. */
3810 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3811 be32_to_cpu(fw_hdr->base_addr));
3813 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3814 "should be %08x\n", __func__,
3815 tr32(RX_CPU_BASE + CPU_PC),
3816 be32_to_cpu(fw_hdr->base_addr));
3820 tg3_rxcpu_resume(tp);
3825 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3827 const int iters = 1000;
3831 /* Wait for boot code to complete initialization and enter service
3832 * loop. It is then safe to download service patches
3834 for (i = 0; i < iters; i++) {
3835 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3842 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3846 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3848 netdev_warn(tp->dev,
3849 "Other patches exist. Not downloading EEE patch\n");
3856 /* tp->lock is held. */
3857 static void tg3_load_57766_firmware(struct tg3 *tp)
3859 struct tg3_firmware_hdr *fw_hdr;
3861 if (!tg3_flag(tp, NO_NVRAM))
3864 if (tg3_validate_rxcpu_state(tp))
3870 /* This firmware blob has a different format than older firmware
3871 * releases as given below. The main difference is we have fragmented
3872 * data to be written to non-contiguous locations.
3874 * In the beginning we have a firmware header identical to other
3875 * firmware which consists of version, base addr and length. The length
3876 * here is unused and set to 0xffffffff.
3878 * This is followed by a series of firmware fragments which are
3879 * individually identical to previous firmware. i.e. they have the
3880 * firmware header and followed by data for that fragment. The version
3881 * field of the individual fragment header is unused.
3884 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3885 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3888 if (tg3_rxcpu_pause(tp))
3891 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3892 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3894 tg3_rxcpu_resume(tp);
3897 /* tp->lock is held. */
3898 static int tg3_load_tso_firmware(struct tg3 *tp)
3900 const struct tg3_firmware_hdr *fw_hdr;
3901 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3904 if (!tg3_flag(tp, FW_TSO))
3907 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3909 /* Firmware blob starts with version numbers, followed by
3910 start address and length. We are setting complete length.
3911 length = end_address_of_bss - start_address_of_text.
3912 Remainder is the blob to be loaded contiguously
3913 from start address. */
3915 cpu_scratch_size = tp->fw_len;
3917 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3918 cpu_base = RX_CPU_BASE;
3919 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3921 cpu_base = TX_CPU_BASE;
3922 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3923 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3926 err = tg3_load_firmware_cpu(tp, cpu_base,
3927 cpu_scratch_base, cpu_scratch_size,
3932 /* Now startup the cpu. */
3933 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3934 be32_to_cpu(fw_hdr->base_addr));
3937 "%s fails to set CPU PC, is %08x should be %08x\n",
3938 __func__, tr32(cpu_base + CPU_PC),
3939 be32_to_cpu(fw_hdr->base_addr));
3943 tg3_resume_cpu(tp, cpu_base);
3947 /* tp->lock is held. */
3948 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3950 u32 addr_high, addr_low;
3952 addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3953 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3954 (mac_addr[4] << 8) | mac_addr[5]);
3957 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3958 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3961 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3962 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3966 /* tp->lock is held. */
3967 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3972 for (i = 0; i < 4; i++) {
3973 if (i == 1 && skip_mac_1)
3975 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3978 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3979 tg3_asic_rev(tp) == ASIC_REV_5704) {
3980 for (i = 4; i < 16; i++)
3981 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3984 addr_high = (tp->dev->dev_addr[0] +
3985 tp->dev->dev_addr[1] +
3986 tp->dev->dev_addr[2] +
3987 tp->dev->dev_addr[3] +
3988 tp->dev->dev_addr[4] +
3989 tp->dev->dev_addr[5]) &
3990 TX_BACKOFF_SEED_MASK;
3991 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3994 static void tg3_enable_register_access(struct tg3 *tp)
3997 * Make sure register accesses (indirect or otherwise) will function
4000 pci_write_config_dword(tp->pdev,
4001 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4004 static int tg3_power_up(struct tg3 *tp)
4008 tg3_enable_register_access(tp);
4010 err = pci_set_power_state(tp->pdev, PCI_D0);
4012 /* Switch out of Vaux if it is a NIC */
4013 tg3_pwrsrc_switch_to_vmain(tp);
4015 netdev_err(tp->dev, "Transition to D0 failed\n");
4021 static int tg3_setup_phy(struct tg3 *, bool);
4023 static int tg3_power_down_prepare(struct tg3 *tp)
4026 bool device_should_wake, do_low_power;
4028 tg3_enable_register_access(tp);
4030 /* Restore the CLKREQ setting. */
4031 if (tg3_flag(tp, CLKREQ_BUG))
4032 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4033 PCI_EXP_LNKCTL_CLKREQ_EN);
4035 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4036 tw32(TG3PCI_MISC_HOST_CTRL,
4037 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4039 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4040 tg3_flag(tp, WOL_ENABLE);
4042 if (tg3_flag(tp, USE_PHYLIB)) {
4043 do_low_power = false;
4044 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4045 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4046 struct phy_device *phydev;
4047 u32 phyid, advertising;
4049 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4051 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4053 tp->link_config.speed = phydev->speed;
4054 tp->link_config.duplex = phydev->duplex;
4055 tp->link_config.autoneg = phydev->autoneg;
4056 tp->link_config.advertising = phydev->advertising;
4058 advertising = ADVERTISED_TP |
4060 ADVERTISED_Autoneg |
4061 ADVERTISED_10baseT_Half;
4063 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4064 if (tg3_flag(tp, WOL_SPEED_100MB))
4066 ADVERTISED_100baseT_Half |
4067 ADVERTISED_100baseT_Full |
4068 ADVERTISED_10baseT_Full;
4070 advertising |= ADVERTISED_10baseT_Full;
4073 phydev->advertising = advertising;
4075 phy_start_aneg(phydev);
4077 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4078 if (phyid != PHY_ID_BCMAC131) {
4079 phyid &= PHY_BCM_OUI_MASK;
4080 if (phyid == PHY_BCM_OUI_1 ||
4081 phyid == PHY_BCM_OUI_2 ||
4082 phyid == PHY_BCM_OUI_3)
4083 do_low_power = true;
4087 do_low_power = true;
4089 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4090 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4092 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4093 tg3_setup_phy(tp, false);
4096 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4099 val = tr32(GRC_VCPU_EXT_CTRL);
4100 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4101 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4105 for (i = 0; i < 200; i++) {
4106 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4107 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4112 if (tg3_flag(tp, WOL_CAP))
4113 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4114 WOL_DRV_STATE_SHUTDOWN |
4118 if (device_should_wake) {
4121 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4123 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4124 tg3_phy_auxctl_write(tp,
4125 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4126 MII_TG3_AUXCTL_PCTL_WOL_EN |
4127 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4128 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4132 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4133 mac_mode = MAC_MODE_PORT_MODE_GMII;
4134 else if (tp->phy_flags &
4135 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4136 if (tp->link_config.active_speed == SPEED_1000)
4137 mac_mode = MAC_MODE_PORT_MODE_GMII;
4139 mac_mode = MAC_MODE_PORT_MODE_MII;
4141 mac_mode = MAC_MODE_PORT_MODE_MII;
4143 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4144 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4145 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4146 SPEED_100 : SPEED_10;
4147 if (tg3_5700_link_polarity(tp, speed))
4148 mac_mode |= MAC_MODE_LINK_POLARITY;
4150 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4153 mac_mode = MAC_MODE_PORT_MODE_TBI;
4156 if (!tg3_flag(tp, 5750_PLUS))
4157 tw32(MAC_LED_CTRL, tp->led_ctrl);
4159 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4160 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4161 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4162 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4164 if (tg3_flag(tp, ENABLE_APE))
4165 mac_mode |= MAC_MODE_APE_TX_EN |
4166 MAC_MODE_APE_RX_EN |
4167 MAC_MODE_TDE_ENABLE;
4169 tw32_f(MAC_MODE, mac_mode);
4172 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4176 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4177 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4178 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4181 base_val = tp->pci_clock_ctrl;
4182 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4183 CLOCK_CTRL_TXCLK_DISABLE);
4185 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4186 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4187 } else if (tg3_flag(tp, 5780_CLASS) ||
4188 tg3_flag(tp, CPMU_PRESENT) ||
4189 tg3_asic_rev(tp) == ASIC_REV_5906) {
4191 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4192 u32 newbits1, newbits2;
4194 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4195 tg3_asic_rev(tp) == ASIC_REV_5701) {
4196 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4197 CLOCK_CTRL_TXCLK_DISABLE |
4199 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4200 } else if (tg3_flag(tp, 5705_PLUS)) {
4201 newbits1 = CLOCK_CTRL_625_CORE;
4202 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4204 newbits1 = CLOCK_CTRL_ALTCLK;
4205 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4208 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4211 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4214 if (!tg3_flag(tp, 5705_PLUS)) {
4217 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4218 tg3_asic_rev(tp) == ASIC_REV_5701) {
4219 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4220 CLOCK_CTRL_TXCLK_DISABLE |
4221 CLOCK_CTRL_44MHZ_CORE);
4223 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4226 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4227 tp->pci_clock_ctrl | newbits3, 40);
4231 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4232 tg3_power_down_phy(tp, do_low_power);
4234 tg3_frob_aux_power(tp, true);
4236 /* Workaround for unstable PLL clock */
4237 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4238 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4239 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4240 u32 val = tr32(0x7d00);
4242 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4244 if (!tg3_flag(tp, ENABLE_ASF)) {
4247 err = tg3_nvram_lock(tp);
4248 tg3_halt_cpu(tp, RX_CPU_BASE);
4250 tg3_nvram_unlock(tp);
4254 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4256 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4261 static void tg3_power_down(struct tg3 *tp)
4263 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4264 pci_set_power_state(tp->pdev, PCI_D3hot);
4267 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4269 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4270 case MII_TG3_AUX_STAT_10HALF:
4272 *duplex = DUPLEX_HALF;
4275 case MII_TG3_AUX_STAT_10FULL:
4277 *duplex = DUPLEX_FULL;
4280 case MII_TG3_AUX_STAT_100HALF:
4282 *duplex = DUPLEX_HALF;
4285 case MII_TG3_AUX_STAT_100FULL:
4287 *duplex = DUPLEX_FULL;
4290 case MII_TG3_AUX_STAT_1000HALF:
4291 *speed = SPEED_1000;
4292 *duplex = DUPLEX_HALF;
4295 case MII_TG3_AUX_STAT_1000FULL:
4296 *speed = SPEED_1000;
4297 *duplex = DUPLEX_FULL;
4301 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4302 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4304 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4308 *speed = SPEED_UNKNOWN;
4309 *duplex = DUPLEX_UNKNOWN;
4314 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4319 new_adv = ADVERTISE_CSMA;
4320 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4321 new_adv |= mii_advertise_flowctrl(flowctrl);
4323 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4327 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4328 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4330 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4331 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4332 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4334 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4339 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4342 tw32(TG3_CPMU_EEE_MODE,
4343 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4345 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4350 /* Advertise 100-BaseTX EEE ability */
4351 if (advertise & ADVERTISED_100baseT_Full)
4352 val |= MDIO_AN_EEE_ADV_100TX;
4353 /* Advertise 1000-BaseT EEE ability */
4354 if (advertise & ADVERTISED_1000baseT_Full)
4355 val |= MDIO_AN_EEE_ADV_1000T;
4357 if (!tp->eee.eee_enabled) {
4359 tp->eee.advertised = 0;
4361 tp->eee.advertised = advertise &
4362 (ADVERTISED_100baseT_Full |
4363 ADVERTISED_1000baseT_Full);
4366 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4370 switch (tg3_asic_rev(tp)) {
4372 case ASIC_REV_57765:
4373 case ASIC_REV_57766:
4375 /* If we advertised any eee advertisements above... */
4377 val = MII_TG3_DSP_TAP26_ALNOKO |
4378 MII_TG3_DSP_TAP26_RMRXSTO |
4379 MII_TG3_DSP_TAP26_OPCSINPT;
4380 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4384 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4385 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4386 MII_TG3_DSP_CH34TP2_HIBW01);
4389 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4398 static void tg3_phy_copper_begin(struct tg3 *tp)
4400 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4401 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4404 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4405 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4406 adv = ADVERTISED_10baseT_Half |
4407 ADVERTISED_10baseT_Full;
4408 if (tg3_flag(tp, WOL_SPEED_100MB))
4409 adv |= ADVERTISED_100baseT_Half |
4410 ADVERTISED_100baseT_Full;
4411 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4412 if (!(tp->phy_flags &
4413 TG3_PHYFLG_DISABLE_1G_HD_ADV))
4414 adv |= ADVERTISED_1000baseT_Half;
4415 adv |= ADVERTISED_1000baseT_Full;
4418 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4420 adv = tp->link_config.advertising;
4421 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4422 adv &= ~(ADVERTISED_1000baseT_Half |
4423 ADVERTISED_1000baseT_Full);
4425 fc = tp->link_config.flowctrl;
4428 tg3_phy_autoneg_cfg(tp, adv, fc);
4430 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4431 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4432 /* Normally during power down we want to autonegotiate
4433 * the lowest possible speed for WOL. However, to avoid
4434 * link flap, we leave it untouched.
4439 tg3_writephy(tp, MII_BMCR,
4440 BMCR_ANENABLE | BMCR_ANRESTART);
4443 u32 bmcr, orig_bmcr;
4445 tp->link_config.active_speed = tp->link_config.speed;
4446 tp->link_config.active_duplex = tp->link_config.duplex;
4448 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4449 /* With autoneg disabled, 5715 only links up when the
4450 * advertisement register has the configured speed
4453 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4457 switch (tp->link_config.speed) {
4463 bmcr |= BMCR_SPEED100;
4467 bmcr |= BMCR_SPEED1000;
4471 if (tp->link_config.duplex == DUPLEX_FULL)
4472 bmcr |= BMCR_FULLDPLX;
4474 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4475 (bmcr != orig_bmcr)) {
4476 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4477 for (i = 0; i < 1500; i++) {
4481 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4482 tg3_readphy(tp, MII_BMSR, &tmp))
4484 if (!(tmp & BMSR_LSTATUS)) {
4489 tg3_writephy(tp, MII_BMCR, bmcr);
4495 static int tg3_phy_pull_config(struct tg3 *tp)
4500 err = tg3_readphy(tp, MII_BMCR, &val);
4504 if (!(val & BMCR_ANENABLE)) {
4505 tp->link_config.autoneg = AUTONEG_DISABLE;
4506 tp->link_config.advertising = 0;
4507 tg3_flag_clear(tp, PAUSE_AUTONEG);
4511 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4513 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4516 tp->link_config.speed = SPEED_10;
4519 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4522 tp->link_config.speed = SPEED_100;
4524 case BMCR_SPEED1000:
4525 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4526 tp->link_config.speed = SPEED_1000;
4534 if (val & BMCR_FULLDPLX)
4535 tp->link_config.duplex = DUPLEX_FULL;
4537 tp->link_config.duplex = DUPLEX_HALF;
4539 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4545 tp->link_config.autoneg = AUTONEG_ENABLE;
4546 tp->link_config.advertising = ADVERTISED_Autoneg;
4547 tg3_flag_set(tp, PAUSE_AUTONEG);
4549 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4552 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4556 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4557 tp->link_config.advertising |= adv | ADVERTISED_TP;
4559 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4561 tp->link_config.advertising |= ADVERTISED_FIBRE;
4564 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4567 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4568 err = tg3_readphy(tp, MII_CTRL1000, &val);
4572 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4574 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4578 adv = tg3_decode_flowctrl_1000X(val);
4579 tp->link_config.flowctrl = adv;
4581 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4582 adv = mii_adv_to_ethtool_adv_x(val);
4585 tp->link_config.advertising |= adv;
4592 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4596 /* Turn off tap power management. */
4597 /* Set Extended packet length bit */
4598 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4600 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4601 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4602 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4603 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4604 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4611 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4613 struct ethtool_eee eee;
4615 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4618 tg3_eee_pull_config(tp, &eee);
4620 if (tp->eee.eee_enabled) {
4621 if (tp->eee.advertised != eee.advertised ||
4622 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4623 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4626 /* EEE is disabled but we're advertising */
4634 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4636 u32 advmsk, tgtadv, advertising;
4638 advertising = tp->link_config.advertising;
4639 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4641 advmsk = ADVERTISE_ALL;
4642 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4643 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4644 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4647 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4650 if ((*lcladv & advmsk) != tgtadv)
4653 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4656 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4658 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4662 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4663 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4664 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4665 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4666 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4668 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4671 if (tg3_ctrl != tgtadv)
4678 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4682 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4685 if (tg3_readphy(tp, MII_STAT1000, &val))
4688 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4691 if (tg3_readphy(tp, MII_LPA, rmtadv))
4694 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4695 tp->link_config.rmt_adv = lpeth;
4700 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4702 if (curr_link_up != tp->link_up) {
4704 netif_carrier_on(tp->dev);
4706 netif_carrier_off(tp->dev);
4707 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4708 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4711 tg3_link_report(tp);
4718 static void tg3_clear_mac_status(struct tg3 *tp)
4723 MAC_STATUS_SYNC_CHANGED |
4724 MAC_STATUS_CFG_CHANGED |
4725 MAC_STATUS_MI_COMPLETION |
4726 MAC_STATUS_LNKSTATE_CHANGED);
4730 static void tg3_setup_eee(struct tg3 *tp)
4734 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4735 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4736 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4737 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4739 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4741 tw32_f(TG3_CPMU_EEE_CTRL,
4742 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4744 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4745 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4746 TG3_CPMU_EEEMD_LPI_IN_RX |
4747 TG3_CPMU_EEEMD_EEE_ENABLE;
4749 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4750 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4752 if (tg3_flag(tp, ENABLE_APE))
4753 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4755 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4757 tw32_f(TG3_CPMU_EEE_DBTMR1,
4758 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4759 (tp->eee.tx_lpi_timer & 0xffff));
4761 tw32_f(TG3_CPMU_EEE_DBTMR2,
4762 TG3_CPMU_DBTMR2_APE_TX_2047US |
4763 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4766 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4768 bool current_link_up;
4770 u32 lcl_adv, rmt_adv;
4775 tg3_clear_mac_status(tp);
4777 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4779 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4783 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4785 /* Some third-party PHYs need to be reset on link going
4788 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4789 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4790 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4792 tg3_readphy(tp, MII_BMSR, &bmsr);
4793 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4794 !(bmsr & BMSR_LSTATUS))
4800 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4801 tg3_readphy(tp, MII_BMSR, &bmsr);
4802 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4803 !tg3_flag(tp, INIT_COMPLETE))
4806 if (!(bmsr & BMSR_LSTATUS)) {
4807 err = tg3_init_5401phy_dsp(tp);
4811 tg3_readphy(tp, MII_BMSR, &bmsr);
4812 for (i = 0; i < 1000; i++) {
4814 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4815 (bmsr & BMSR_LSTATUS)) {
4821 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4822 TG3_PHY_REV_BCM5401_B0 &&
4823 !(bmsr & BMSR_LSTATUS) &&
4824 tp->link_config.active_speed == SPEED_1000) {
4825 err = tg3_phy_reset(tp);
4827 err = tg3_init_5401phy_dsp(tp);
4832 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4833 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4834 /* 5701 {A0,B0} CRC bug workaround */
4835 tg3_writephy(tp, 0x15, 0x0a75);
4836 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4837 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4838 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4841 /* Clear pending interrupts... */
4842 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4843 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4845 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4846 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4847 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4848 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4850 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4851 tg3_asic_rev(tp) == ASIC_REV_5701) {
4852 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4853 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4854 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4856 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4859 current_link_up = false;
4860 current_speed = SPEED_UNKNOWN;
4861 current_duplex = DUPLEX_UNKNOWN;
4862 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4863 tp->link_config.rmt_adv = 0;
4865 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4866 err = tg3_phy_auxctl_read(tp,
4867 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4869 if (!err && !(val & (1 << 10))) {
4870 tg3_phy_auxctl_write(tp,
4871 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4878 for (i = 0; i < 100; i++) {
4879 tg3_readphy(tp, MII_BMSR, &bmsr);
4880 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4881 (bmsr & BMSR_LSTATUS))
4886 if (bmsr & BMSR_LSTATUS) {
4889 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4890 for (i = 0; i < 2000; i++) {
4892 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4897 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4902 for (i = 0; i < 200; i++) {
4903 tg3_readphy(tp, MII_BMCR, &bmcr);
4904 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4906 if (bmcr && bmcr != 0x7fff)
4914 tp->link_config.active_speed = current_speed;
4915 tp->link_config.active_duplex = current_duplex;
4917 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4918 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4920 if ((bmcr & BMCR_ANENABLE) &&
4922 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4923 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4924 current_link_up = true;
4926 /* EEE settings changes take effect only after a phy
4927 * reset. If we have skipped a reset due to Link Flap
4928 * Avoidance being enabled, do it now.
4930 if (!eee_config_ok &&
4931 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4937 if (!(bmcr & BMCR_ANENABLE) &&
4938 tp->link_config.speed == current_speed &&
4939 tp->link_config.duplex == current_duplex) {
4940 current_link_up = true;
4944 if (current_link_up &&
4945 tp->link_config.active_duplex == DUPLEX_FULL) {
4948 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4949 reg = MII_TG3_FET_GEN_STAT;
4950 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4952 reg = MII_TG3_EXT_STAT;
4953 bit = MII_TG3_EXT_STAT_MDIX;
4956 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4957 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4959 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4964 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4965 tg3_phy_copper_begin(tp);
4967 if (tg3_flag(tp, ROBOSWITCH)) {
4968 current_link_up = true;
4969 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4970 current_speed = SPEED_1000;
4971 current_duplex = DUPLEX_FULL;
4972 tp->link_config.active_speed = current_speed;
4973 tp->link_config.active_duplex = current_duplex;
4976 tg3_readphy(tp, MII_BMSR, &bmsr);
4977 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4978 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4979 current_link_up = true;
4982 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4983 if (current_link_up) {
4984 if (tp->link_config.active_speed == SPEED_100 ||
4985 tp->link_config.active_speed == SPEED_10)
4986 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4988 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4989 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4990 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4992 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4994 /* In order for the 5750 core in BCM4785 chip to work properly
4995 * in RGMII mode, the Led Control Register must be set up.
4997 if (tg3_flag(tp, RGMII_MODE)) {
4998 u32 led_ctrl = tr32(MAC_LED_CTRL);
4999 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5001 if (tp->link_config.active_speed == SPEED_10)
5002 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5003 else if (tp->link_config.active_speed == SPEED_100)
5004 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5005 LED_CTRL_100MBPS_ON);
5006 else if (tp->link_config.active_speed == SPEED_1000)
5007 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5008 LED_CTRL_1000MBPS_ON);
5010 tw32(MAC_LED_CTRL, led_ctrl);
5014 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5015 if (tp->link_config.active_duplex == DUPLEX_HALF)
5016 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5018 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5019 if (current_link_up &&
5020 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5021 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5023 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5026 /* ??? Without this setting Netgear GA302T PHY does not
5027 * ??? send/receive packets...
5029 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5030 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5031 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5032 tw32_f(MAC_MI_MODE, tp->mi_mode);
5036 tw32_f(MAC_MODE, tp->mac_mode);
5039 tg3_phy_eee_adjust(tp, current_link_up);
5041 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5042 /* Polled via timer. */
5043 tw32_f(MAC_EVENT, 0);
5045 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5049 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5051 tp->link_config.active_speed == SPEED_1000 &&
5052 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5055 (MAC_STATUS_SYNC_CHANGED |
5056 MAC_STATUS_CFG_CHANGED));
5059 NIC_SRAM_FIRMWARE_MBOX,
5060 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5063 /* Prevent send BD corruption. */
5064 if (tg3_flag(tp, CLKREQ_BUG)) {
5065 if (tp->link_config.active_speed == SPEED_100 ||
5066 tp->link_config.active_speed == SPEED_10)
5067 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5068 PCI_EXP_LNKCTL_CLKREQ_EN);
5070 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5071 PCI_EXP_LNKCTL_CLKREQ_EN);
5074 tg3_test_and_report_link_chg(tp, current_link_up);
5079 struct tg3_fiber_aneginfo {
5081 #define ANEG_STATE_UNKNOWN 0
5082 #define ANEG_STATE_AN_ENABLE 1
5083 #define ANEG_STATE_RESTART_INIT 2
5084 #define ANEG_STATE_RESTART 3
5085 #define ANEG_STATE_DISABLE_LINK_OK 4
5086 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5087 #define ANEG_STATE_ABILITY_DETECT 6
5088 #define ANEG_STATE_ACK_DETECT_INIT 7
5089 #define ANEG_STATE_ACK_DETECT 8
5090 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5091 #define ANEG_STATE_COMPLETE_ACK 10
5092 #define ANEG_STATE_IDLE_DETECT_INIT 11
5093 #define ANEG_STATE_IDLE_DETECT 12
5094 #define ANEG_STATE_LINK_OK 13
5095 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5096 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5099 #define MR_AN_ENABLE 0x00000001
5100 #define MR_RESTART_AN 0x00000002
5101 #define MR_AN_COMPLETE 0x00000004
5102 #define MR_PAGE_RX 0x00000008
5103 #define MR_NP_LOADED 0x00000010
5104 #define MR_TOGGLE_TX 0x00000020
5105 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5106 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5107 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5108 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5109 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5110 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5111 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5112 #define MR_TOGGLE_RX 0x00002000
5113 #define MR_NP_RX 0x00004000
5115 #define MR_LINK_OK 0x80000000
5117 unsigned long link_time, cur_time;
5119 u32 ability_match_cfg;
5120 int ability_match_count;
5122 char ability_match, idle_match, ack_match;
5124 u32 txconfig, rxconfig;
5125 #define ANEG_CFG_NP 0x00000080
5126 #define ANEG_CFG_ACK 0x00000040
5127 #define ANEG_CFG_RF2 0x00000020
5128 #define ANEG_CFG_RF1 0x00000010
5129 #define ANEG_CFG_PS2 0x00000001
5130 #define ANEG_CFG_PS1 0x00008000
5131 #define ANEG_CFG_HD 0x00004000
5132 #define ANEG_CFG_FD 0x00002000
5133 #define ANEG_CFG_INVAL 0x00001f06
5138 #define ANEG_TIMER_ENAB 2
5139 #define ANEG_FAILED -1
5141 #define ANEG_STATE_SETTLE_TIME 10000
5143 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5144 struct tg3_fiber_aneginfo *ap)
5147 unsigned long delta;
5151 if (ap->state == ANEG_STATE_UNKNOWN) {
5155 ap->ability_match_cfg = 0;
5156 ap->ability_match_count = 0;
5157 ap->ability_match = 0;
5163 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5164 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5166 if (rx_cfg_reg != ap->ability_match_cfg) {
5167 ap->ability_match_cfg = rx_cfg_reg;
5168 ap->ability_match = 0;
5169 ap->ability_match_count = 0;
5171 if (++ap->ability_match_count > 1) {
5172 ap->ability_match = 1;
5173 ap->ability_match_cfg = rx_cfg_reg;
5176 if (rx_cfg_reg & ANEG_CFG_ACK)
5184 ap->ability_match_cfg = 0;
5185 ap->ability_match_count = 0;
5186 ap->ability_match = 0;
5192 ap->rxconfig = rx_cfg_reg;
5195 switch (ap->state) {
5196 case ANEG_STATE_UNKNOWN:
5197 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5198 ap->state = ANEG_STATE_AN_ENABLE;
5201 case ANEG_STATE_AN_ENABLE:
5202 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5203 if (ap->flags & MR_AN_ENABLE) {
5206 ap->ability_match_cfg = 0;
5207 ap->ability_match_count = 0;
5208 ap->ability_match = 0;
5212 ap->state = ANEG_STATE_RESTART_INIT;
5214 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5218 case ANEG_STATE_RESTART_INIT:
5219 ap->link_time = ap->cur_time;
5220 ap->flags &= ~(MR_NP_LOADED);
5222 tw32(MAC_TX_AUTO_NEG, 0);
5223 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5224 tw32_f(MAC_MODE, tp->mac_mode);
5227 ret = ANEG_TIMER_ENAB;
5228 ap->state = ANEG_STATE_RESTART;
5231 case ANEG_STATE_RESTART:
5232 delta = ap->cur_time - ap->link_time;
5233 if (delta > ANEG_STATE_SETTLE_TIME)
5234 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5236 ret = ANEG_TIMER_ENAB;
5239 case ANEG_STATE_DISABLE_LINK_OK:
5243 case ANEG_STATE_ABILITY_DETECT_INIT:
5244 ap->flags &= ~(MR_TOGGLE_TX);
5245 ap->txconfig = ANEG_CFG_FD;
5246 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5247 if (flowctrl & ADVERTISE_1000XPAUSE)
5248 ap->txconfig |= ANEG_CFG_PS1;
5249 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5250 ap->txconfig |= ANEG_CFG_PS2;
5251 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5252 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5253 tw32_f(MAC_MODE, tp->mac_mode);
5256 ap->state = ANEG_STATE_ABILITY_DETECT;
5259 case ANEG_STATE_ABILITY_DETECT:
5260 if (ap->ability_match != 0 && ap->rxconfig != 0)
5261 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5264 case ANEG_STATE_ACK_DETECT_INIT:
5265 ap->txconfig |= ANEG_CFG_ACK;
5266 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5267 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5268 tw32_f(MAC_MODE, tp->mac_mode);
5271 ap->state = ANEG_STATE_ACK_DETECT;
5274 case ANEG_STATE_ACK_DETECT:
5275 if (ap->ack_match != 0) {
5276 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5277 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5278 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5280 ap->state = ANEG_STATE_AN_ENABLE;
5282 } else if (ap->ability_match != 0 &&
5283 ap->rxconfig == 0) {
5284 ap->state = ANEG_STATE_AN_ENABLE;
5288 case ANEG_STATE_COMPLETE_ACK_INIT:
5289 if (ap->rxconfig & ANEG_CFG_INVAL) {
5293 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5294 MR_LP_ADV_HALF_DUPLEX |
5295 MR_LP_ADV_SYM_PAUSE |
5296 MR_LP_ADV_ASYM_PAUSE |
5297 MR_LP_ADV_REMOTE_FAULT1 |
5298 MR_LP_ADV_REMOTE_FAULT2 |
5299 MR_LP_ADV_NEXT_PAGE |
5302 if (ap->rxconfig & ANEG_CFG_FD)
5303 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5304 if (ap->rxconfig & ANEG_CFG_HD)
5305 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5306 if (ap->rxconfig & ANEG_CFG_PS1)
5307 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5308 if (ap->rxconfig & ANEG_CFG_PS2)
5309 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5310 if (ap->rxconfig & ANEG_CFG_RF1)
5311 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5312 if (ap->rxconfig & ANEG_CFG_RF2)
5313 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5314 if (ap->rxconfig & ANEG_CFG_NP)
5315 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5317 ap->link_time = ap->cur_time;
5319 ap->flags ^= (MR_TOGGLE_TX);
5320 if (ap->rxconfig & 0x0008)
5321 ap->flags |= MR_TOGGLE_RX;
5322 if (ap->rxconfig & ANEG_CFG_NP)
5323 ap->flags |= MR_NP_RX;
5324 ap->flags |= MR_PAGE_RX;
5326 ap->state = ANEG_STATE_COMPLETE_ACK;
5327 ret = ANEG_TIMER_ENAB;
5330 case ANEG_STATE_COMPLETE_ACK:
5331 if (ap->ability_match != 0 &&
5332 ap->rxconfig == 0) {
5333 ap->state = ANEG_STATE_AN_ENABLE;
5336 delta = ap->cur_time - ap->link_time;
5337 if (delta > ANEG_STATE_SETTLE_TIME) {
5338 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5339 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5341 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5342 !(ap->flags & MR_NP_RX)) {
5343 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5351 case ANEG_STATE_IDLE_DETECT_INIT:
5352 ap->link_time = ap->cur_time;
5353 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5354 tw32_f(MAC_MODE, tp->mac_mode);
5357 ap->state = ANEG_STATE_IDLE_DETECT;
5358 ret = ANEG_TIMER_ENAB;
5361 case ANEG_STATE_IDLE_DETECT:
5362 if (ap->ability_match != 0 &&
5363 ap->rxconfig == 0) {
5364 ap->state = ANEG_STATE_AN_ENABLE;
5367 delta = ap->cur_time - ap->link_time;
5368 if (delta > ANEG_STATE_SETTLE_TIME) {
5369 /* XXX another gem from the Broadcom driver :( */
5370 ap->state = ANEG_STATE_LINK_OK;
5374 case ANEG_STATE_LINK_OK:
5375 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5379 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5380 /* ??? unimplemented */
5383 case ANEG_STATE_NEXT_PAGE_WAIT:
5384 /* ??? unimplemented */
5395 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5398 struct tg3_fiber_aneginfo aninfo;
5399 int status = ANEG_FAILED;
5403 tw32_f(MAC_TX_AUTO_NEG, 0);
5405 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5406 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5409 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5412 memset(&aninfo, 0, sizeof(aninfo));
5413 aninfo.flags |= MR_AN_ENABLE;
5414 aninfo.state = ANEG_STATE_UNKNOWN;
5415 aninfo.cur_time = 0;
5417 while (++tick < 195000) {
5418 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5419 if (status == ANEG_DONE || status == ANEG_FAILED)
5425 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5426 tw32_f(MAC_MODE, tp->mac_mode);
5429 *txflags = aninfo.txconfig;
5430 *rxflags = aninfo.flags;
5432 if (status == ANEG_DONE &&
5433 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5434 MR_LP_ADV_FULL_DUPLEX)))
5440 static void tg3_init_bcm8002(struct tg3 *tp)
5442 u32 mac_status = tr32(MAC_STATUS);
5445 /* Reset when initting first time or we have a link. */
5446 if (tg3_flag(tp, INIT_COMPLETE) &&
5447 !(mac_status & MAC_STATUS_PCS_SYNCED))
5450 /* Set PLL lock range. */
5451 tg3_writephy(tp, 0x16, 0x8007);
5454 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5456 /* Wait for reset to complete. */
5457 /* XXX schedule_timeout() ... */
5458 for (i = 0; i < 500; i++)
5461 /* Config mode; select PMA/Ch 1 regs. */
5462 tg3_writephy(tp, 0x10, 0x8411);
5464 /* Enable auto-lock and comdet, select txclk for tx. */
5465 tg3_writephy(tp, 0x11, 0x0a10);
5467 tg3_writephy(tp, 0x18, 0x00a0);
5468 tg3_writephy(tp, 0x16, 0x41ff);
5470 /* Assert and deassert POR. */
5471 tg3_writephy(tp, 0x13, 0x0400);
5473 tg3_writephy(tp, 0x13, 0x0000);
5475 tg3_writephy(tp, 0x11, 0x0a50);
5477 tg3_writephy(tp, 0x11, 0x0a10);
5479 /* Wait for signal to stabilize */
5480 /* XXX schedule_timeout() ... */
5481 for (i = 0; i < 15000; i++)
5484 /* Deselect the channel register so we can read the PHYID
5487 tg3_writephy(tp, 0x10, 0x8011);
5490 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5493 bool current_link_up;
5494 u32 sg_dig_ctrl, sg_dig_status;
5495 u32 serdes_cfg, expected_sg_dig_ctrl;
5496 int workaround, port_a;
5499 expected_sg_dig_ctrl = 0;
5502 current_link_up = false;
5504 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5505 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5507 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5510 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5511 /* preserve bits 20-23 for voltage regulator */
5512 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5515 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5517 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5518 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5520 u32 val = serdes_cfg;
5526 tw32_f(MAC_SERDES_CFG, val);
5529 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5531 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5532 tg3_setup_flow_control(tp, 0, 0);
5533 current_link_up = true;
5538 /* Want auto-negotiation. */
5539 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5541 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5542 if (flowctrl & ADVERTISE_1000XPAUSE)
5543 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5544 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5545 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5547 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5548 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5549 tp->serdes_counter &&
5550 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5551 MAC_STATUS_RCVD_CFG)) ==
5552 MAC_STATUS_PCS_SYNCED)) {
5553 tp->serdes_counter--;
5554 current_link_up = true;
5559 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5560 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5562 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5564 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5565 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5566 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5567 MAC_STATUS_SIGNAL_DET)) {
5568 sg_dig_status = tr32(SG_DIG_STATUS);
5569 mac_status = tr32(MAC_STATUS);
5571 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5572 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5573 u32 local_adv = 0, remote_adv = 0;
5575 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5576 local_adv |= ADVERTISE_1000XPAUSE;
5577 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5578 local_adv |= ADVERTISE_1000XPSE_ASYM;
5580 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5581 remote_adv |= LPA_1000XPAUSE;
5582 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5583 remote_adv |= LPA_1000XPAUSE_ASYM;
5585 tp->link_config.rmt_adv =
5586 mii_adv_to_ethtool_adv_x(remote_adv);
5588 tg3_setup_flow_control(tp, local_adv, remote_adv);
5589 current_link_up = true;
5590 tp->serdes_counter = 0;
5591 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5592 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5593 if (tp->serdes_counter)
5594 tp->serdes_counter--;
5597 u32 val = serdes_cfg;
5604 tw32_f(MAC_SERDES_CFG, val);
5607 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5610 /* Link parallel detection - link is up */
5611 /* only if we have PCS_SYNC and not */
5612 /* receiving config code words */
5613 mac_status = tr32(MAC_STATUS);
5614 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5615 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5616 tg3_setup_flow_control(tp, 0, 0);
5617 current_link_up = true;
5619 TG3_PHYFLG_PARALLEL_DETECT;
5620 tp->serdes_counter =
5621 SERDES_PARALLEL_DET_TIMEOUT;
5623 goto restart_autoneg;
5627 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5628 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5632 return current_link_up;
5635 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5637 bool current_link_up = false;
5639 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5642 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5643 u32 txflags, rxflags;
5646 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5647 u32 local_adv = 0, remote_adv = 0;
5649 if (txflags & ANEG_CFG_PS1)
5650 local_adv |= ADVERTISE_1000XPAUSE;
5651 if (txflags & ANEG_CFG_PS2)
5652 local_adv |= ADVERTISE_1000XPSE_ASYM;
5654 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5655 remote_adv |= LPA_1000XPAUSE;
5656 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5657 remote_adv |= LPA_1000XPAUSE_ASYM;
5659 tp->link_config.rmt_adv =
5660 mii_adv_to_ethtool_adv_x(remote_adv);
5662 tg3_setup_flow_control(tp, local_adv, remote_adv);
5664 current_link_up = true;
5666 for (i = 0; i < 30; i++) {
5669 (MAC_STATUS_SYNC_CHANGED |
5670 MAC_STATUS_CFG_CHANGED));
5672 if ((tr32(MAC_STATUS) &
5673 (MAC_STATUS_SYNC_CHANGED |
5674 MAC_STATUS_CFG_CHANGED)) == 0)
5678 mac_status = tr32(MAC_STATUS);
5679 if (!current_link_up &&
5680 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5681 !(mac_status & MAC_STATUS_RCVD_CFG))
5682 current_link_up = true;
5684 tg3_setup_flow_control(tp, 0, 0);
5686 /* Forcing 1000FD link up. */
5687 current_link_up = true;
5689 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5692 tw32_f(MAC_MODE, tp->mac_mode);
5697 return current_link_up;
5700 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5703 u16 orig_active_speed;
5704 u8 orig_active_duplex;
5706 bool current_link_up;
5709 orig_pause_cfg = tp->link_config.active_flowctrl;
5710 orig_active_speed = tp->link_config.active_speed;
5711 orig_active_duplex = tp->link_config.active_duplex;
5713 if (!tg3_flag(tp, HW_AUTONEG) &&
5715 tg3_flag(tp, INIT_COMPLETE)) {
5716 mac_status = tr32(MAC_STATUS);
5717 mac_status &= (MAC_STATUS_PCS_SYNCED |
5718 MAC_STATUS_SIGNAL_DET |
5719 MAC_STATUS_CFG_CHANGED |
5720 MAC_STATUS_RCVD_CFG);
5721 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5722 MAC_STATUS_SIGNAL_DET)) {
5723 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5724 MAC_STATUS_CFG_CHANGED));
5729 tw32_f(MAC_TX_AUTO_NEG, 0);
5731 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5732 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5733 tw32_f(MAC_MODE, tp->mac_mode);
5736 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5737 tg3_init_bcm8002(tp);
5739 /* Enable link change event even when serdes polling. */
5740 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5743 current_link_up = false;
5744 tp->link_config.rmt_adv = 0;
5745 mac_status = tr32(MAC_STATUS);
5747 if (tg3_flag(tp, HW_AUTONEG))
5748 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5750 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5752 tp->napi[0].hw_status->status =
5753 (SD_STATUS_UPDATED |
5754 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5756 for (i = 0; i < 100; i++) {
5757 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5758 MAC_STATUS_CFG_CHANGED));
5760 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5761 MAC_STATUS_CFG_CHANGED |
5762 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5766 mac_status = tr32(MAC_STATUS);
5767 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5768 current_link_up = false;
5769 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5770 tp->serdes_counter == 0) {
5771 tw32_f(MAC_MODE, (tp->mac_mode |
5772 MAC_MODE_SEND_CONFIGS));
5774 tw32_f(MAC_MODE, tp->mac_mode);
5778 if (current_link_up) {
5779 tp->link_config.active_speed = SPEED_1000;
5780 tp->link_config.active_duplex = DUPLEX_FULL;
5781 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5782 LED_CTRL_LNKLED_OVERRIDE |
5783 LED_CTRL_1000MBPS_ON));
5785 tp->link_config.active_speed = SPEED_UNKNOWN;
5786 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5787 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5788 LED_CTRL_LNKLED_OVERRIDE |
5789 LED_CTRL_TRAFFIC_OVERRIDE));
5792 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5793 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5794 if (orig_pause_cfg != now_pause_cfg ||
5795 orig_active_speed != tp->link_config.active_speed ||
5796 orig_active_duplex != tp->link_config.active_duplex)
5797 tg3_link_report(tp);
5803 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5807 u16 current_speed = SPEED_UNKNOWN;
5808 u8 current_duplex = DUPLEX_UNKNOWN;
5809 bool current_link_up = false;
5810 u32 local_adv, remote_adv, sgsr;
5812 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5813 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5814 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5815 (sgsr & SERDES_TG3_SGMII_MODE)) {
5820 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5822 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5823 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5825 current_link_up = true;
5826 if (sgsr & SERDES_TG3_SPEED_1000) {
5827 current_speed = SPEED_1000;
5828 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5829 } else if (sgsr & SERDES_TG3_SPEED_100) {
5830 current_speed = SPEED_100;
5831 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5833 current_speed = SPEED_10;
5834 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5837 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5838 current_duplex = DUPLEX_FULL;
5840 current_duplex = DUPLEX_HALF;
5843 tw32_f(MAC_MODE, tp->mac_mode);
5846 tg3_clear_mac_status(tp);
5848 goto fiber_setup_done;
5851 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5852 tw32_f(MAC_MODE, tp->mac_mode);
5855 tg3_clear_mac_status(tp);
5860 tp->link_config.rmt_adv = 0;
5862 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5863 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5864 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5865 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5866 bmsr |= BMSR_LSTATUS;
5868 bmsr &= ~BMSR_LSTATUS;
5871 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5873 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5874 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5875 /* do nothing, just check for link up at the end */
5876 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5879 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5880 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5881 ADVERTISE_1000XPAUSE |
5882 ADVERTISE_1000XPSE_ASYM |
5885 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5886 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5888 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5889 tg3_writephy(tp, MII_ADVERTISE, newadv);
5890 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5891 tg3_writephy(tp, MII_BMCR, bmcr);
5893 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5894 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5895 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5902 bmcr &= ~BMCR_SPEED1000;
5903 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5905 if (tp->link_config.duplex == DUPLEX_FULL)
5906 new_bmcr |= BMCR_FULLDPLX;
5908 if (new_bmcr != bmcr) {
5909 /* BMCR_SPEED1000 is a reserved bit that needs
5910 * to be set on write.
5912 new_bmcr |= BMCR_SPEED1000;
5914 /* Force a linkdown */
5918 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5919 adv &= ~(ADVERTISE_1000XFULL |
5920 ADVERTISE_1000XHALF |
5922 tg3_writephy(tp, MII_ADVERTISE, adv);
5923 tg3_writephy(tp, MII_BMCR, bmcr |
5927 tg3_carrier_off(tp);
5929 tg3_writephy(tp, MII_BMCR, new_bmcr);
5931 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5932 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5933 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5934 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5935 bmsr |= BMSR_LSTATUS;
5937 bmsr &= ~BMSR_LSTATUS;
5939 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5943 if (bmsr & BMSR_LSTATUS) {
5944 current_speed = SPEED_1000;
5945 current_link_up = true;
5946 if (bmcr & BMCR_FULLDPLX)
5947 current_duplex = DUPLEX_FULL;
5949 current_duplex = DUPLEX_HALF;
5954 if (bmcr & BMCR_ANENABLE) {
5957 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5958 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5959 common = local_adv & remote_adv;
5960 if (common & (ADVERTISE_1000XHALF |
5961 ADVERTISE_1000XFULL)) {
5962 if (common & ADVERTISE_1000XFULL)
5963 current_duplex = DUPLEX_FULL;
5965 current_duplex = DUPLEX_HALF;
5967 tp->link_config.rmt_adv =
5968 mii_adv_to_ethtool_adv_x(remote_adv);
5969 } else if (!tg3_flag(tp, 5780_CLASS)) {
5970 /* Link is up via parallel detect */
5972 current_link_up = false;
5978 if (current_link_up && current_duplex == DUPLEX_FULL)
5979 tg3_setup_flow_control(tp, local_adv, remote_adv);
5981 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5982 if (tp->link_config.active_duplex == DUPLEX_HALF)
5983 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5985 tw32_f(MAC_MODE, tp->mac_mode);
5988 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5990 tp->link_config.active_speed = current_speed;
5991 tp->link_config.active_duplex = current_duplex;
5993 tg3_test_and_report_link_chg(tp, current_link_up);
5997 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5999 if (tp->serdes_counter) {
6000 /* Give autoneg time to complete. */
6001 tp->serdes_counter--;
6006 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6009 tg3_readphy(tp, MII_BMCR, &bmcr);
6010 if (bmcr & BMCR_ANENABLE) {
6013 /* Select shadow register 0x1f */
6014 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6015 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6017 /* Select expansion interrupt status register */
6018 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6019 MII_TG3_DSP_EXP1_INT_STAT);
6020 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6021 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6023 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6024 /* We have signal detect and not receiving
6025 * config code words, link is up by parallel
6029 bmcr &= ~BMCR_ANENABLE;
6030 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6031 tg3_writephy(tp, MII_BMCR, bmcr);
6032 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6035 } else if (tp->link_up &&
6036 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6037 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6040 /* Select expansion interrupt status register */
6041 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6042 MII_TG3_DSP_EXP1_INT_STAT);
6043 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6047 /* Config code words received, turn on autoneg. */
6048 tg3_readphy(tp, MII_BMCR, &bmcr);
6049 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6051 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6057 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6062 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6063 err = tg3_setup_fiber_phy(tp, force_reset);
6064 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6065 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6067 err = tg3_setup_copper_phy(tp, force_reset);
6069 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6072 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6073 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6075 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6080 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6081 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6082 tw32(GRC_MISC_CFG, val);
6085 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6086 (6 << TX_LENGTHS_IPG_SHIFT);
6087 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6088 tg3_asic_rev(tp) == ASIC_REV_5762)
6089 val |= tr32(MAC_TX_LENGTHS) &
6090 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6091 TX_LENGTHS_CNT_DWN_VAL_MSK);
6093 if (tp->link_config.active_speed == SPEED_1000 &&
6094 tp->link_config.active_duplex == DUPLEX_HALF)
6095 tw32(MAC_TX_LENGTHS, val |
6096 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6098 tw32(MAC_TX_LENGTHS, val |
6099 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6101 if (!tg3_flag(tp, 5705_PLUS)) {
6103 tw32(HOSTCC_STAT_COAL_TICKS,
6104 tp->coal.stats_block_coalesce_usecs);
6106 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6110 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6111 val = tr32(PCIE_PWR_MGMT_THRESH);
6113 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6116 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6117 tw32(PCIE_PWR_MGMT_THRESH, val);
6123 /* tp->lock must be held */
6124 static u64 tg3_refclk_read(struct tg3 *tp)
6126 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6127 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6130 /* tp->lock must be held */
6131 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6133 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6135 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6136 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6137 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6138 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6141 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6142 static inline void tg3_full_unlock(struct tg3 *tp);
6143 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6145 struct tg3 *tp = netdev_priv(dev);
6147 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6148 SOF_TIMESTAMPING_RX_SOFTWARE |
6149 SOF_TIMESTAMPING_SOFTWARE;
6151 if (tg3_flag(tp, PTP_CAPABLE)) {
6152 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6153 SOF_TIMESTAMPING_RX_HARDWARE |
6154 SOF_TIMESTAMPING_RAW_HARDWARE;
6158 info->phc_index = ptp_clock_index(tp->ptp_clock);
6160 info->phc_index = -1;
6162 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6164 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6165 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6166 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6167 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6171 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6173 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6174 bool neg_adj = false;
6182 /* Frequency adjustment is performed using hardware with a 24 bit
6183 * accumulator and a programmable correction value. On each clk, the
6184 * correction value gets added to the accumulator and when it
6185 * overflows, the time counter is incremented/decremented.
6187 * So conversion from ppb to correction value is
6188 * ppb * (1 << 24) / 1000000000
6190 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6191 TG3_EAV_REF_CLK_CORRECT_MASK;
6193 tg3_full_lock(tp, 0);
6196 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6197 TG3_EAV_REF_CLK_CORRECT_EN |
6198 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6200 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6202 tg3_full_unlock(tp);
6207 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6209 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6211 tg3_full_lock(tp, 0);
6212 tp->ptp_adjust += delta;
6213 tg3_full_unlock(tp);
6218 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
6221 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6223 tg3_full_lock(tp, 0);
6224 ns = tg3_refclk_read(tp);
6225 ns += tp->ptp_adjust;
6226 tg3_full_unlock(tp);
6228 *ts = ns_to_timespec64(ns);
6233 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6234 const struct timespec64 *ts)
6237 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6239 ns = timespec64_to_ns(ts);
6241 tg3_full_lock(tp, 0);
6242 tg3_refclk_write(tp, ns);
6244 tg3_full_unlock(tp);
6249 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6250 struct ptp_clock_request *rq, int on)
6252 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6257 case PTP_CLK_REQ_PEROUT:
6258 if (rq->perout.index != 0)
6261 tg3_full_lock(tp, 0);
6262 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6263 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6268 nsec = rq->perout.start.sec * 1000000000ULL +
6269 rq->perout.start.nsec;
6271 if (rq->perout.period.sec || rq->perout.period.nsec) {
6272 netdev_warn(tp->dev,
6273 "Device supports only a one-shot timesync output, period must be 0\n");
6278 if (nsec & (1ULL << 63)) {
6279 netdev_warn(tp->dev,
6280 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6285 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6286 tw32(TG3_EAV_WATCHDOG0_MSB,
6287 TG3_EAV_WATCHDOG0_EN |
6288 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6290 tw32(TG3_EAV_REF_CLCK_CTL,
6291 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6293 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6294 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6298 tg3_full_unlock(tp);
6308 static const struct ptp_clock_info tg3_ptp_caps = {
6309 .owner = THIS_MODULE,
6310 .name = "tg3 clock",
6311 .max_adj = 250000000,
6317 .adjfreq = tg3_ptp_adjfreq,
6318 .adjtime = tg3_ptp_adjtime,
6319 .gettime64 = tg3_ptp_gettime,
6320 .settime64 = tg3_ptp_settime,
6321 .enable = tg3_ptp_enable,
6324 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6325 struct skb_shared_hwtstamps *timestamp)
6327 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6328 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6332 /* tp->lock must be held */
6333 static void tg3_ptp_init(struct tg3 *tp)
6335 if (!tg3_flag(tp, PTP_CAPABLE))
6338 /* Initialize the hardware clock to the system time. */
6339 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6341 tp->ptp_info = tg3_ptp_caps;
6344 /* tp->lock must be held */
6345 static void tg3_ptp_resume(struct tg3 *tp)
6347 if (!tg3_flag(tp, PTP_CAPABLE))
6350 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6354 static void tg3_ptp_fini(struct tg3 *tp)
6356 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6359 ptp_clock_unregister(tp->ptp_clock);
6360 tp->ptp_clock = NULL;
6364 static inline int tg3_irq_sync(struct tg3 *tp)
6366 return tp->irq_sync;
6369 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6373 dst = (u32 *)((u8 *)dst + off);
6374 for (i = 0; i < len; i += sizeof(u32))
6375 *dst++ = tr32(off + i);
6378 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6380 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6381 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6382 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6383 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6384 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6385 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6386 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6387 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6388 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6389 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6390 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6391 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6392 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6393 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6394 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6395 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6396 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6397 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6398 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6400 if (tg3_flag(tp, SUPPORT_MSIX))
6401 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6403 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6404 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6405 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6406 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6407 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6408 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6409 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6410 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6412 if (!tg3_flag(tp, 5705_PLUS)) {
6413 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6414 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6415 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6418 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6419 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6420 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6421 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6422 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6424 if (tg3_flag(tp, NVRAM))
6425 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6428 static void tg3_dump_state(struct tg3 *tp)
6433 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6437 if (tg3_flag(tp, PCI_EXPRESS)) {
6438 /* Read up to but not including private PCI registers */
6439 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6440 regs[i / sizeof(u32)] = tr32(i);
6442 tg3_dump_legacy_regs(tp, regs);
6444 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6445 if (!regs[i + 0] && !regs[i + 1] &&
6446 !regs[i + 2] && !regs[i + 3])
6449 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6451 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6456 for (i = 0; i < tp->irq_cnt; i++) {
6457 struct tg3_napi *tnapi = &tp->napi[i];
6459 /* SW status block */
6461 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6463 tnapi->hw_status->status,
6464 tnapi->hw_status->status_tag,
6465 tnapi->hw_status->rx_jumbo_consumer,
6466 tnapi->hw_status->rx_consumer,
6467 tnapi->hw_status->rx_mini_consumer,
6468 tnapi->hw_status->idx[0].rx_producer,
6469 tnapi->hw_status->idx[0].tx_consumer);
6472 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6474 tnapi->last_tag, tnapi->last_irq_tag,
6475 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6477 tnapi->prodring.rx_std_prod_idx,
6478 tnapi->prodring.rx_std_cons_idx,
6479 tnapi->prodring.rx_jmb_prod_idx,
6480 tnapi->prodring.rx_jmb_cons_idx);
6484 /* This is called whenever we suspect that the system chipset is re-
6485 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6486 * is bogus tx completions. We try to recover by setting the
6487 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6490 static void tg3_tx_recover(struct tg3 *tp)
6492 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6493 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6495 netdev_warn(tp->dev,
6496 "The system may be re-ordering memory-mapped I/O "
6497 "cycles to the network device, attempting to recover. "
6498 "Please report the problem to the driver maintainer "
6499 "and include system chipset information.\n");
6501 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6504 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6506 /* Tell compiler to fetch tx indices from memory. */
6508 return tnapi->tx_pending -
6509 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6512 /* Tigon3 never reports partial packet sends. So we do not
6513 * need special logic to handle SKBs that have not had all
6514 * of their frags sent yet, like SunGEM does.
6516 static void tg3_tx(struct tg3_napi *tnapi)
6518 struct tg3 *tp = tnapi->tp;
6519 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6520 u32 sw_idx = tnapi->tx_cons;
6521 struct netdev_queue *txq;
6522 int index = tnapi - tp->napi;
6523 unsigned int pkts_compl = 0, bytes_compl = 0;
6525 if (tg3_flag(tp, ENABLE_TSS))
6528 txq = netdev_get_tx_queue(tp->dev, index);
6530 while (sw_idx != hw_idx) {
6531 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6532 struct sk_buff *skb = ri->skb;
6535 if (unlikely(skb == NULL)) {
6540 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6541 struct skb_shared_hwtstamps timestamp;
6542 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6543 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6545 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6547 skb_tstamp_tx(skb, ×tamp);
6550 pci_unmap_single(tp->pdev,
6551 dma_unmap_addr(ri, mapping),
6557 while (ri->fragmented) {
6558 ri->fragmented = false;
6559 sw_idx = NEXT_TX(sw_idx);
6560 ri = &tnapi->tx_buffers[sw_idx];
6563 sw_idx = NEXT_TX(sw_idx);
6565 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6566 ri = &tnapi->tx_buffers[sw_idx];
6567 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6570 pci_unmap_page(tp->pdev,
6571 dma_unmap_addr(ri, mapping),
6572 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6575 while (ri->fragmented) {
6576 ri->fragmented = false;
6577 sw_idx = NEXT_TX(sw_idx);
6578 ri = &tnapi->tx_buffers[sw_idx];
6581 sw_idx = NEXT_TX(sw_idx);
6585 bytes_compl += skb->len;
6587 dev_kfree_skb_any(skb);
6589 if (unlikely(tx_bug)) {
6595 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6597 tnapi->tx_cons = sw_idx;
6599 /* Need to make the tx_cons update visible to tg3_start_xmit()
6600 * before checking for netif_queue_stopped(). Without the
6601 * memory barrier, there is a small possibility that tg3_start_xmit()
6602 * will miss it and cause the queue to be stopped forever.
6606 if (unlikely(netif_tx_queue_stopped(txq) &&
6607 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6608 __netif_tx_lock(txq, smp_processor_id());
6609 if (netif_tx_queue_stopped(txq) &&
6610 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6611 netif_tx_wake_queue(txq);
6612 __netif_tx_unlock(txq);
6616 static void tg3_frag_free(bool is_frag, void *data)
6619 skb_free_frag(data);
6624 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6626 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6627 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6632 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6633 map_sz, PCI_DMA_FROMDEVICE);
6634 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6639 /* Returns size of skb allocated or < 0 on error.
6641 * We only need to fill in the address because the other members
6642 * of the RX descriptor are invariant, see tg3_init_rings.
6644 * Note the purposeful assymetry of cpu vs. chip accesses. For
6645 * posting buffers we only dirty the first cache line of the RX
6646 * descriptor (containing the address). Whereas for the RX status
6647 * buffers the cpu only reads the last cacheline of the RX descriptor
6648 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6650 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6651 u32 opaque_key, u32 dest_idx_unmasked,
6652 unsigned int *frag_size)
6654 struct tg3_rx_buffer_desc *desc;
6655 struct ring_info *map;
6658 int skb_size, data_size, dest_idx;
6660 switch (opaque_key) {
6661 case RXD_OPAQUE_RING_STD:
6662 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6663 desc = &tpr->rx_std[dest_idx];
6664 map = &tpr->rx_std_buffers[dest_idx];
6665 data_size = tp->rx_pkt_map_sz;
6668 case RXD_OPAQUE_RING_JUMBO:
6669 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6670 desc = &tpr->rx_jmb[dest_idx].std;
6671 map = &tpr->rx_jmb_buffers[dest_idx];
6672 data_size = TG3_RX_JMB_MAP_SZ;
6679 /* Do not overwrite any of the map or rp information
6680 * until we are sure we can commit to a new buffer.
6682 * Callers depend upon this behavior and assume that
6683 * we leave everything unchanged if we fail.
6685 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6686 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6687 if (skb_size <= PAGE_SIZE) {
6688 data = netdev_alloc_frag(skb_size);
6689 *frag_size = skb_size;
6691 data = kmalloc(skb_size, GFP_ATOMIC);
6697 mapping = pci_map_single(tp->pdev,
6698 data + TG3_RX_OFFSET(tp),
6700 PCI_DMA_FROMDEVICE);
6701 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6702 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6707 dma_unmap_addr_set(map, mapping, mapping);
6709 desc->addr_hi = ((u64)mapping >> 32);
6710 desc->addr_lo = ((u64)mapping & 0xffffffff);
6715 /* We only need to move over in the address because the other
6716 * members of the RX descriptor are invariant. See notes above
6717 * tg3_alloc_rx_data for full details.
6719 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6720 struct tg3_rx_prodring_set *dpr,
6721 u32 opaque_key, int src_idx,
6722 u32 dest_idx_unmasked)
6724 struct tg3 *tp = tnapi->tp;
6725 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6726 struct ring_info *src_map, *dest_map;
6727 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6730 switch (opaque_key) {
6731 case RXD_OPAQUE_RING_STD:
6732 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6733 dest_desc = &dpr->rx_std[dest_idx];
6734 dest_map = &dpr->rx_std_buffers[dest_idx];
6735 src_desc = &spr->rx_std[src_idx];
6736 src_map = &spr->rx_std_buffers[src_idx];
6739 case RXD_OPAQUE_RING_JUMBO:
6740 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6741 dest_desc = &dpr->rx_jmb[dest_idx].std;
6742 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6743 src_desc = &spr->rx_jmb[src_idx].std;
6744 src_map = &spr->rx_jmb_buffers[src_idx];
6751 dest_map->data = src_map->data;
6752 dma_unmap_addr_set(dest_map, mapping,
6753 dma_unmap_addr(src_map, mapping));
6754 dest_desc->addr_hi = src_desc->addr_hi;
6755 dest_desc->addr_lo = src_desc->addr_lo;
6757 /* Ensure that the update to the skb happens after the physical
6758 * addresses have been transferred to the new BD location.
6762 src_map->data = NULL;
6765 /* The RX ring scheme is composed of multiple rings which post fresh
6766 * buffers to the chip, and one special ring the chip uses to report
6767 * status back to the host.
6769 * The special ring reports the status of received packets to the
6770 * host. The chip does not write into the original descriptor the
6771 * RX buffer was obtained from. The chip simply takes the original
6772 * descriptor as provided by the host, updates the status and length
6773 * field, then writes this into the next status ring entry.
6775 * Each ring the host uses to post buffers to the chip is described
6776 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6777 * it is first placed into the on-chip ram. When the packet's length
6778 * is known, it walks down the TG3_BDINFO entries to select the ring.
6779 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6780 * which is within the range of the new packet's length is chosen.
6782 * The "separate ring for rx status" scheme may sound queer, but it makes
6783 * sense from a cache coherency perspective. If only the host writes
6784 * to the buffer post rings, and only the chip writes to the rx status
6785 * rings, then cache lines never move beyond shared-modified state.
6786 * If both the host and chip were to write into the same ring, cache line
6787 * eviction could occur since both entities want it in an exclusive state.
6789 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6791 struct tg3 *tp = tnapi->tp;
6792 u32 work_mask, rx_std_posted = 0;
6793 u32 std_prod_idx, jmb_prod_idx;
6794 u32 sw_idx = tnapi->rx_rcb_ptr;
6797 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6799 hw_idx = *(tnapi->rx_rcb_prod_idx);
6801 * We need to order the read of hw_idx and the read of
6802 * the opaque cookie.
6807 std_prod_idx = tpr->rx_std_prod_idx;
6808 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6809 while (sw_idx != hw_idx && budget > 0) {
6810 struct ring_info *ri;
6811 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6813 struct sk_buff *skb;
6814 dma_addr_t dma_addr;
6815 u32 opaque_key, desc_idx, *post_ptr;
6819 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6820 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6821 if (opaque_key == RXD_OPAQUE_RING_STD) {
6822 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6823 dma_addr = dma_unmap_addr(ri, mapping);
6825 post_ptr = &std_prod_idx;
6827 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6828 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6829 dma_addr = dma_unmap_addr(ri, mapping);
6831 post_ptr = &jmb_prod_idx;
6833 goto next_pkt_nopost;
6835 work_mask |= opaque_key;
6837 if (desc->err_vlan & RXD_ERR_MASK) {
6839 tg3_recycle_rx(tnapi, tpr, opaque_key,
6840 desc_idx, *post_ptr);
6842 /* Other statistics kept track of by card. */
6847 prefetch(data + TG3_RX_OFFSET(tp));
6848 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6851 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6852 RXD_FLAG_PTPSTAT_PTPV1 ||
6853 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6854 RXD_FLAG_PTPSTAT_PTPV2) {
6855 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6856 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6859 if (len > TG3_RX_COPY_THRESH(tp)) {
6861 unsigned int frag_size;
6863 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6864 *post_ptr, &frag_size);
6868 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6869 PCI_DMA_FROMDEVICE);
6871 /* Ensure that the update to the data happens
6872 * after the usage of the old DMA mapping.
6878 skb = build_skb(data, frag_size);
6880 tg3_frag_free(frag_size != 0, data);
6881 goto drop_it_no_recycle;
6883 skb_reserve(skb, TG3_RX_OFFSET(tp));
6885 tg3_recycle_rx(tnapi, tpr, opaque_key,
6886 desc_idx, *post_ptr);
6888 skb = netdev_alloc_skb(tp->dev,
6889 len + TG3_RAW_IP_ALIGN);
6891 goto drop_it_no_recycle;
6893 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6894 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6896 data + TG3_RX_OFFSET(tp),
6898 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6903 tg3_hwclock_to_timestamp(tp, tstamp,
6904 skb_hwtstamps(skb));
6906 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6907 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6908 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6909 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6910 skb->ip_summed = CHECKSUM_UNNECESSARY;
6912 skb_checksum_none_assert(skb);
6914 skb->protocol = eth_type_trans(skb, tp->dev);
6916 if (len > (tp->dev->mtu + ETH_HLEN) &&
6917 skb->protocol != htons(ETH_P_8021Q) &&
6918 skb->protocol != htons(ETH_P_8021AD)) {
6919 dev_kfree_skb_any(skb);
6920 goto drop_it_no_recycle;
6923 if (desc->type_flags & RXD_FLAG_VLAN &&
6924 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6925 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6926 desc->err_vlan & RXD_VLAN_MASK);
6928 napi_gro_receive(&tnapi->napi, skb);
6936 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6937 tpr->rx_std_prod_idx = std_prod_idx &
6938 tp->rx_std_ring_mask;
6939 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6940 tpr->rx_std_prod_idx);
6941 work_mask &= ~RXD_OPAQUE_RING_STD;
6946 sw_idx &= tp->rx_ret_ring_mask;
6948 /* Refresh hw_idx to see if there is new work */
6949 if (sw_idx == hw_idx) {
6950 hw_idx = *(tnapi->rx_rcb_prod_idx);
6955 /* ACK the status ring. */
6956 tnapi->rx_rcb_ptr = sw_idx;
6957 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6959 /* Refill RX ring(s). */
6960 if (!tg3_flag(tp, ENABLE_RSS)) {
6961 /* Sync BD data before updating mailbox */
6964 if (work_mask & RXD_OPAQUE_RING_STD) {
6965 tpr->rx_std_prod_idx = std_prod_idx &
6966 tp->rx_std_ring_mask;
6967 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6968 tpr->rx_std_prod_idx);
6970 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6971 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6972 tp->rx_jmb_ring_mask;
6973 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6974 tpr->rx_jmb_prod_idx);
6977 } else if (work_mask) {
6978 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6979 * updated before the producer indices can be updated.
6983 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6984 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6986 if (tnapi != &tp->napi[1]) {
6987 tp->rx_refill = true;
6988 napi_schedule(&tp->napi[1].napi);
6995 static void tg3_poll_link(struct tg3 *tp)
6997 /* handle link change and other phy events */
6998 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6999 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7001 if (sblk->status & SD_STATUS_LINK_CHG) {
7002 sblk->status = SD_STATUS_UPDATED |
7003 (sblk->status & ~SD_STATUS_LINK_CHG);
7004 spin_lock(&tp->lock);
7005 if (tg3_flag(tp, USE_PHYLIB)) {
7007 (MAC_STATUS_SYNC_CHANGED |
7008 MAC_STATUS_CFG_CHANGED |
7009 MAC_STATUS_MI_COMPLETION |
7010 MAC_STATUS_LNKSTATE_CHANGED));
7013 tg3_setup_phy(tp, false);
7014 spin_unlock(&tp->lock);
7019 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7020 struct tg3_rx_prodring_set *dpr,
7021 struct tg3_rx_prodring_set *spr)
7023 u32 si, di, cpycnt, src_prod_idx;
7027 src_prod_idx = spr->rx_std_prod_idx;
7029 /* Make sure updates to the rx_std_buffers[] entries and the
7030 * standard producer index are seen in the correct order.
7034 if (spr->rx_std_cons_idx == src_prod_idx)
7037 if (spr->rx_std_cons_idx < src_prod_idx)
7038 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7040 cpycnt = tp->rx_std_ring_mask + 1 -
7041 spr->rx_std_cons_idx;
7043 cpycnt = min(cpycnt,
7044 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7046 si = spr->rx_std_cons_idx;
7047 di = dpr->rx_std_prod_idx;
7049 for (i = di; i < di + cpycnt; i++) {
7050 if (dpr->rx_std_buffers[i].data) {
7060 /* Ensure that updates to the rx_std_buffers ring and the
7061 * shadowed hardware producer ring from tg3_recycle_skb() are
7062 * ordered correctly WRT the skb check above.
7066 memcpy(&dpr->rx_std_buffers[di],
7067 &spr->rx_std_buffers[si],
7068 cpycnt * sizeof(struct ring_info));
7070 for (i = 0; i < cpycnt; i++, di++, si++) {
7071 struct tg3_rx_buffer_desc *sbd, *dbd;
7072 sbd = &spr->rx_std[si];
7073 dbd = &dpr->rx_std[di];
7074 dbd->addr_hi = sbd->addr_hi;
7075 dbd->addr_lo = sbd->addr_lo;
7078 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7079 tp->rx_std_ring_mask;
7080 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7081 tp->rx_std_ring_mask;
7085 src_prod_idx = spr->rx_jmb_prod_idx;
7087 /* Make sure updates to the rx_jmb_buffers[] entries and
7088 * the jumbo producer index are seen in the correct order.
7092 if (spr->rx_jmb_cons_idx == src_prod_idx)
7095 if (spr->rx_jmb_cons_idx < src_prod_idx)
7096 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7098 cpycnt = tp->rx_jmb_ring_mask + 1 -
7099 spr->rx_jmb_cons_idx;
7101 cpycnt = min(cpycnt,
7102 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7104 si = spr->rx_jmb_cons_idx;
7105 di = dpr->rx_jmb_prod_idx;
7107 for (i = di; i < di + cpycnt; i++) {
7108 if (dpr->rx_jmb_buffers[i].data) {
7118 /* Ensure that updates to the rx_jmb_buffers ring and the
7119 * shadowed hardware producer ring from tg3_recycle_skb() are
7120 * ordered correctly WRT the skb check above.
7124 memcpy(&dpr->rx_jmb_buffers[di],
7125 &spr->rx_jmb_buffers[si],
7126 cpycnt * sizeof(struct ring_info));
7128 for (i = 0; i < cpycnt; i++, di++, si++) {
7129 struct tg3_rx_buffer_desc *sbd, *dbd;
7130 sbd = &spr->rx_jmb[si].std;
7131 dbd = &dpr->rx_jmb[di].std;
7132 dbd->addr_hi = sbd->addr_hi;
7133 dbd->addr_lo = sbd->addr_lo;
7136 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7137 tp->rx_jmb_ring_mask;
7138 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7139 tp->rx_jmb_ring_mask;
7145 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7147 struct tg3 *tp = tnapi->tp;
7149 /* run TX completion thread */
7150 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7152 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7156 if (!tnapi->rx_rcb_prod_idx)
7159 /* run RX thread, within the bounds set by NAPI.
7160 * All RX "locking" is done by ensuring outside
7161 * code synchronizes with tg3->napi.poll()
7163 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7164 work_done += tg3_rx(tnapi, budget - work_done);
7166 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7167 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7169 u32 std_prod_idx = dpr->rx_std_prod_idx;
7170 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7172 tp->rx_refill = false;
7173 for (i = 1; i <= tp->rxq_cnt; i++)
7174 err |= tg3_rx_prodring_xfer(tp, dpr,
7175 &tp->napi[i].prodring);
7179 if (std_prod_idx != dpr->rx_std_prod_idx)
7180 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7181 dpr->rx_std_prod_idx);
7183 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7184 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7185 dpr->rx_jmb_prod_idx);
7190 tw32_f(HOSTCC_MODE, tp->coal_now);
7196 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7198 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7199 schedule_work(&tp->reset_task);
7202 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7204 cancel_work_sync(&tp->reset_task);
7205 tg3_flag_clear(tp, RESET_TASK_PENDING);
7206 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7209 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7211 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7212 struct tg3 *tp = tnapi->tp;
7214 struct tg3_hw_status *sblk = tnapi->hw_status;
7217 work_done = tg3_poll_work(tnapi, work_done, budget);
7219 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7222 if (unlikely(work_done >= budget))
7225 /* tp->last_tag is used in tg3_int_reenable() below
7226 * to tell the hw how much work has been processed,
7227 * so we must read it before checking for more work.
7229 tnapi->last_tag = sblk->status_tag;
7230 tnapi->last_irq_tag = tnapi->last_tag;
7233 /* check for RX/TX work to do */
7234 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7235 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7237 /* This test here is not race free, but will reduce
7238 * the number of interrupts by looping again.
7240 if (tnapi == &tp->napi[1] && tp->rx_refill)
7243 napi_complete_done(napi, work_done);
7244 /* Reenable interrupts. */
7245 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7247 /* This test here is synchronized by napi_schedule()
7248 * and napi_complete() to close the race condition.
7250 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7251 tw32(HOSTCC_MODE, tp->coalesce_mode |
7252 HOSTCC_MODE_ENABLE |
7263 /* work_done is guaranteed to be less than budget. */
7264 napi_complete(napi);
7265 tg3_reset_task_schedule(tp);
7269 static void tg3_process_error(struct tg3 *tp)
7272 bool real_error = false;
7274 if (tg3_flag(tp, ERROR_PROCESSED))
7277 /* Check Flow Attention register */
7278 val = tr32(HOSTCC_FLOW_ATTN);
7279 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7280 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7284 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7285 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7289 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7290 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7299 tg3_flag_set(tp, ERROR_PROCESSED);
7300 tg3_reset_task_schedule(tp);
7303 static int tg3_poll(struct napi_struct *napi, int budget)
7305 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7306 struct tg3 *tp = tnapi->tp;
7308 struct tg3_hw_status *sblk = tnapi->hw_status;
7311 if (sblk->status & SD_STATUS_ERROR)
7312 tg3_process_error(tp);
7316 work_done = tg3_poll_work(tnapi, work_done, budget);
7318 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7321 if (unlikely(work_done >= budget))
7324 if (tg3_flag(tp, TAGGED_STATUS)) {
7325 /* tp->last_tag is used in tg3_int_reenable() below
7326 * to tell the hw how much work has been processed,
7327 * so we must read it before checking for more work.
7329 tnapi->last_tag = sblk->status_tag;
7330 tnapi->last_irq_tag = tnapi->last_tag;
7333 sblk->status &= ~SD_STATUS_UPDATED;
7335 if (likely(!tg3_has_work(tnapi))) {
7336 napi_complete_done(napi, work_done);
7337 tg3_int_reenable(tnapi);
7345 /* work_done is guaranteed to be less than budget. */
7346 napi_complete(napi);
7347 tg3_reset_task_schedule(tp);
7351 static void tg3_napi_disable(struct tg3 *tp)
7355 for (i = tp->irq_cnt - 1; i >= 0; i--)
7356 napi_disable(&tp->napi[i].napi);
7359 static void tg3_napi_enable(struct tg3 *tp)
7363 for (i = 0; i < tp->irq_cnt; i++)
7364 napi_enable(&tp->napi[i].napi);
7367 static void tg3_napi_init(struct tg3 *tp)
7371 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7372 for (i = 1; i < tp->irq_cnt; i++)
7373 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7376 static void tg3_napi_fini(struct tg3 *tp)
7380 for (i = 0; i < tp->irq_cnt; i++)
7381 netif_napi_del(&tp->napi[i].napi);
7384 static inline void tg3_netif_stop(struct tg3 *tp)
7386 netif_trans_update(tp->dev); /* prevent tx timeout */
7387 tg3_napi_disable(tp);
7388 netif_carrier_off(tp->dev);
7389 netif_tx_disable(tp->dev);
7392 /* tp->lock must be held */
7393 static inline void tg3_netif_start(struct tg3 *tp)
7397 /* NOTE: unconditional netif_tx_wake_all_queues is only
7398 * appropriate so long as all callers are assured to
7399 * have free tx slots (such as after tg3_init_hw)
7401 netif_tx_wake_all_queues(tp->dev);
7404 netif_carrier_on(tp->dev);
7406 tg3_napi_enable(tp);
7407 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7408 tg3_enable_ints(tp);
7411 static void tg3_irq_quiesce(struct tg3 *tp)
7412 __releases(tp->lock)
7413 __acquires(tp->lock)
7417 BUG_ON(tp->irq_sync);
7422 spin_unlock_bh(&tp->lock);
7424 for (i = 0; i < tp->irq_cnt; i++)
7425 synchronize_irq(tp->napi[i].irq_vec);
7427 spin_lock_bh(&tp->lock);
7430 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7431 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7432 * with as well. Most of the time, this is not necessary except when
7433 * shutting down the device.
7435 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7437 spin_lock_bh(&tp->lock);
7439 tg3_irq_quiesce(tp);
7442 static inline void tg3_full_unlock(struct tg3 *tp)
7444 spin_unlock_bh(&tp->lock);
7447 /* One-shot MSI handler - Chip automatically disables interrupt
7448 * after sending MSI so driver doesn't have to do it.
7450 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7452 struct tg3_napi *tnapi = dev_id;
7453 struct tg3 *tp = tnapi->tp;
7455 prefetch(tnapi->hw_status);
7457 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7459 if (likely(!tg3_irq_sync(tp)))
7460 napi_schedule(&tnapi->napi);
7465 /* MSI ISR - No need to check for interrupt sharing and no need to
7466 * flush status block and interrupt mailbox. PCI ordering rules
7467 * guarantee that MSI will arrive after the status block.
7469 static irqreturn_t tg3_msi(int irq, void *dev_id)
7471 struct tg3_napi *tnapi = dev_id;
7472 struct tg3 *tp = tnapi->tp;
7474 prefetch(tnapi->hw_status);
7476 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7478 * Writing any value to intr-mbox-0 clears PCI INTA# and
7479 * chip-internal interrupt pending events.
7480 * Writing non-zero to intr-mbox-0 additional tells the
7481 * NIC to stop sending us irqs, engaging "in-intr-handler"
7484 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7485 if (likely(!tg3_irq_sync(tp)))
7486 napi_schedule(&tnapi->napi);
7488 return IRQ_RETVAL(1);
7491 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7493 struct tg3_napi *tnapi = dev_id;
7494 struct tg3 *tp = tnapi->tp;
7495 struct tg3_hw_status *sblk = tnapi->hw_status;
7496 unsigned int handled = 1;
7498 /* In INTx mode, it is possible for the interrupt to arrive at
7499 * the CPU before the status block posted prior to the interrupt.
7500 * Reading the PCI State register will confirm whether the
7501 * interrupt is ours and will flush the status block.
7503 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7504 if (tg3_flag(tp, CHIP_RESETTING) ||
7505 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7512 * Writing any value to intr-mbox-0 clears PCI INTA# and
7513 * chip-internal interrupt pending events.
7514 * Writing non-zero to intr-mbox-0 additional tells the
7515 * NIC to stop sending us irqs, engaging "in-intr-handler"
7518 * Flush the mailbox to de-assert the IRQ immediately to prevent
7519 * spurious interrupts. The flush impacts performance but
7520 * excessive spurious interrupts can be worse in some cases.
7522 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7523 if (tg3_irq_sync(tp))
7525 sblk->status &= ~SD_STATUS_UPDATED;
7526 if (likely(tg3_has_work(tnapi))) {
7527 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7528 napi_schedule(&tnapi->napi);
7530 /* No work, shared interrupt perhaps? re-enable
7531 * interrupts, and flush that PCI write
7533 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7537 return IRQ_RETVAL(handled);
7540 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7542 struct tg3_napi *tnapi = dev_id;
7543 struct tg3 *tp = tnapi->tp;
7544 struct tg3_hw_status *sblk = tnapi->hw_status;
7545 unsigned int handled = 1;
7547 /* In INTx mode, it is possible for the interrupt to arrive at
7548 * the CPU before the status block posted prior to the interrupt.
7549 * Reading the PCI State register will confirm whether the
7550 * interrupt is ours and will flush the status block.
7552 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7553 if (tg3_flag(tp, CHIP_RESETTING) ||
7554 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7561 * writing any value to intr-mbox-0 clears PCI INTA# and
7562 * chip-internal interrupt pending events.
7563 * writing non-zero to intr-mbox-0 additional tells the
7564 * NIC to stop sending us irqs, engaging "in-intr-handler"
7567 * Flush the mailbox to de-assert the IRQ immediately to prevent
7568 * spurious interrupts. The flush impacts performance but
7569 * excessive spurious interrupts can be worse in some cases.
7571 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7574 * In a shared interrupt configuration, sometimes other devices'
7575 * interrupts will scream. We record the current status tag here
7576 * so that the above check can report that the screaming interrupts
7577 * are unhandled. Eventually they will be silenced.
7579 tnapi->last_irq_tag = sblk->status_tag;
7581 if (tg3_irq_sync(tp))
7584 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7586 napi_schedule(&tnapi->napi);
7589 return IRQ_RETVAL(handled);
7592 /* ISR for interrupt test */
7593 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7595 struct tg3_napi *tnapi = dev_id;
7596 struct tg3 *tp = tnapi->tp;
7597 struct tg3_hw_status *sblk = tnapi->hw_status;
7599 if ((sblk->status & SD_STATUS_UPDATED) ||
7600 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7601 tg3_disable_ints(tp);
7602 return IRQ_RETVAL(1);
7604 return IRQ_RETVAL(0);
7607 #ifdef CONFIG_NET_POLL_CONTROLLER
7608 static void tg3_poll_controller(struct net_device *dev)
7611 struct tg3 *tp = netdev_priv(dev);
7613 if (tg3_irq_sync(tp))
7616 for (i = 0; i < tp->irq_cnt; i++)
7617 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7621 static void tg3_tx_timeout(struct net_device *dev)
7623 struct tg3 *tp = netdev_priv(dev);
7625 if (netif_msg_tx_err(tp)) {
7626 netdev_err(dev, "transmit timed out, resetting\n");
7630 tg3_reset_task_schedule(tp);
7633 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7634 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7636 u32 base = (u32) mapping & 0xffffffff;
7638 return base + len + 8 < base;
7641 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7642 * of any 4GB boundaries: 4G, 8G, etc
7644 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7647 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7648 u32 base = (u32) mapping & 0xffffffff;
7650 return ((base + len + (mss & 0x3fff)) < base);
7655 /* Test for DMA addresses > 40-bit */
7656 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7659 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7660 if (tg3_flag(tp, 40BIT_DMA_BUG))
7661 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7668 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7669 dma_addr_t mapping, u32 len, u32 flags,
7672 txbd->addr_hi = ((u64) mapping >> 32);
7673 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7674 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7675 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7678 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7679 dma_addr_t map, u32 len, u32 flags,
7682 struct tg3 *tp = tnapi->tp;
7685 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7688 if (tg3_4g_overflow_test(map, len))
7691 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7694 if (tg3_40bit_overflow_test(tp, map, len))
7697 if (tp->dma_limit) {
7698 u32 prvidx = *entry;
7699 u32 tmp_flag = flags & ~TXD_FLAG_END;
7700 while (len > tp->dma_limit && *budget) {
7701 u32 frag_len = tp->dma_limit;
7702 len -= tp->dma_limit;
7704 /* Avoid the 8byte DMA problem */
7706 len += tp->dma_limit / 2;
7707 frag_len = tp->dma_limit / 2;
7710 tnapi->tx_buffers[*entry].fragmented = true;
7712 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7713 frag_len, tmp_flag, mss, vlan);
7716 *entry = NEXT_TX(*entry);
7723 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7724 len, flags, mss, vlan);
7726 *entry = NEXT_TX(*entry);
7729 tnapi->tx_buffers[prvidx].fragmented = false;
7733 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7734 len, flags, mss, vlan);
7735 *entry = NEXT_TX(*entry);
7741 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7744 struct sk_buff *skb;
7745 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7750 pci_unmap_single(tnapi->tp->pdev,
7751 dma_unmap_addr(txb, mapping),
7755 while (txb->fragmented) {
7756 txb->fragmented = false;
7757 entry = NEXT_TX(entry);
7758 txb = &tnapi->tx_buffers[entry];
7761 for (i = 0; i <= last; i++) {
7762 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7764 entry = NEXT_TX(entry);
7765 txb = &tnapi->tx_buffers[entry];
7767 pci_unmap_page(tnapi->tp->pdev,
7768 dma_unmap_addr(txb, mapping),
7769 skb_frag_size(frag), PCI_DMA_TODEVICE);
7771 while (txb->fragmented) {
7772 txb->fragmented = false;
7773 entry = NEXT_TX(entry);
7774 txb = &tnapi->tx_buffers[entry];
7779 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7780 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7781 struct sk_buff **pskb,
7782 u32 *entry, u32 *budget,
7783 u32 base_flags, u32 mss, u32 vlan)
7785 struct tg3 *tp = tnapi->tp;
7786 struct sk_buff *new_skb, *skb = *pskb;
7787 dma_addr_t new_addr = 0;
7790 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7791 new_skb = skb_copy(skb, GFP_ATOMIC);
7793 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7795 new_skb = skb_copy_expand(skb,
7796 skb_headroom(skb) + more_headroom,
7797 skb_tailroom(skb), GFP_ATOMIC);
7803 /* New SKB is guaranteed to be linear. */
7804 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7806 /* Make sure the mapping succeeded */
7807 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7808 dev_kfree_skb_any(new_skb);
7811 u32 save_entry = *entry;
7813 base_flags |= TXD_FLAG_END;
7815 tnapi->tx_buffers[*entry].skb = new_skb;
7816 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7819 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7820 new_skb->len, base_flags,
7822 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7823 dev_kfree_skb_any(new_skb);
7829 dev_kfree_skb_any(skb);
7834 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7836 /* Check if we will never have enough descriptors,
7837 * as gso_segs can be more than current ring size
7839 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7842 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7844 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7845 * indicated in tg3_tx_frag_set()
7847 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7848 struct netdev_queue *txq, struct sk_buff *skb)
7850 struct sk_buff *segs, *nskb;
7851 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7853 /* Estimate the number of fragments in the worst case */
7854 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7855 netif_tx_stop_queue(txq);
7857 /* netif_tx_stop_queue() must be done before checking
7858 * checking tx index in tg3_tx_avail() below, because in
7859 * tg3_tx(), we update tx index before checking for
7860 * netif_tx_queue_stopped().
7863 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7864 return NETDEV_TX_BUSY;
7866 netif_tx_wake_queue(txq);
7869 segs = skb_gso_segment(skb, tp->dev->features &
7870 ~(NETIF_F_TSO | NETIF_F_TSO6));
7871 if (IS_ERR(segs) || !segs)
7872 goto tg3_tso_bug_end;
7878 tg3_start_xmit(nskb, tp->dev);
7882 dev_kfree_skb_any(skb);
7884 return NETDEV_TX_OK;
7887 /* hard_start_xmit for all devices */
7888 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7890 struct tg3 *tp = netdev_priv(dev);
7891 u32 len, entry, base_flags, mss, vlan = 0;
7893 int i = -1, would_hit_hwbug;
7895 struct tg3_napi *tnapi;
7896 struct netdev_queue *txq;
7898 struct iphdr *iph = NULL;
7899 struct tcphdr *tcph = NULL;
7900 __sum16 tcp_csum = 0, ip_csum = 0;
7901 __be16 ip_tot_len = 0;
7903 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7904 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7905 if (tg3_flag(tp, ENABLE_TSS))
7908 budget = tg3_tx_avail(tnapi);
7910 /* We are running in BH disabled context with netif_tx_lock
7911 * and TX reclaim runs via tp->napi.poll inside of a software
7912 * interrupt. Furthermore, IRQ processing runs lockless so we have
7913 * no IRQ context deadlocks to worry about either. Rejoice!
7915 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7916 if (!netif_tx_queue_stopped(txq)) {
7917 netif_tx_stop_queue(txq);
7919 /* This is a hard error, log it. */
7921 "BUG! Tx Ring full when queue awake!\n");
7923 return NETDEV_TX_BUSY;
7926 entry = tnapi->tx_prod;
7929 mss = skb_shinfo(skb)->gso_size;
7931 u32 tcp_opt_len, hdr_len;
7933 if (skb_cow_head(skb, 0))
7937 tcp_opt_len = tcp_optlen(skb);
7939 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7941 /* HW/FW can not correctly segment packets that have been
7942 * vlan encapsulated.
7944 if (skb->protocol == htons(ETH_P_8021Q) ||
7945 skb->protocol == htons(ETH_P_8021AD)) {
7946 if (tg3_tso_bug_gso_check(tnapi, skb))
7947 return tg3_tso_bug(tp, tnapi, txq, skb);
7951 if (!skb_is_gso_v6(skb)) {
7952 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7953 tg3_flag(tp, TSO_BUG)) {
7954 if (tg3_tso_bug_gso_check(tnapi, skb))
7955 return tg3_tso_bug(tp, tnapi, txq, skb);
7958 ip_csum = iph->check;
7959 ip_tot_len = iph->tot_len;
7961 iph->tot_len = htons(mss + hdr_len);
7964 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7965 TXD_FLAG_CPU_POST_DMA);
7967 tcph = tcp_hdr(skb);
7968 tcp_csum = tcph->check;
7970 if (tg3_flag(tp, HW_TSO_1) ||
7971 tg3_flag(tp, HW_TSO_2) ||
7972 tg3_flag(tp, HW_TSO_3)) {
7974 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7976 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7980 if (tg3_flag(tp, HW_TSO_3)) {
7981 mss |= (hdr_len & 0xc) << 12;
7983 base_flags |= 0x00000010;
7984 base_flags |= (hdr_len & 0x3e0) << 5;
7985 } else if (tg3_flag(tp, HW_TSO_2))
7986 mss |= hdr_len << 9;
7987 else if (tg3_flag(tp, HW_TSO_1) ||
7988 tg3_asic_rev(tp) == ASIC_REV_5705) {
7989 if (tcp_opt_len || iph->ihl > 5) {
7992 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7993 mss |= (tsflags << 11);
7996 if (tcp_opt_len || iph->ihl > 5) {
7999 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8000 base_flags |= tsflags << 12;
8003 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8004 /* HW/FW can not correctly checksum packets that have been
8005 * vlan encapsulated.
8007 if (skb->protocol == htons(ETH_P_8021Q) ||
8008 skb->protocol == htons(ETH_P_8021AD)) {
8009 if (skb_checksum_help(skb))
8012 base_flags |= TXD_FLAG_TCPUDP_CSUM;
8016 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8017 !mss && skb->len > VLAN_ETH_FRAME_LEN)
8018 base_flags |= TXD_FLAG_JMB_PKT;
8020 if (skb_vlan_tag_present(skb)) {
8021 base_flags |= TXD_FLAG_VLAN;
8022 vlan = skb_vlan_tag_get(skb);
8025 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8026 tg3_flag(tp, TX_TSTAMP_EN)) {
8027 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8028 base_flags |= TXD_FLAG_HWTSTAMP;
8031 len = skb_headlen(skb);
8033 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8034 if (pci_dma_mapping_error(tp->pdev, mapping))
8038 tnapi->tx_buffers[entry].skb = skb;
8039 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8041 would_hit_hwbug = 0;
8043 if (tg3_flag(tp, 5701_DMA_BUG))
8044 would_hit_hwbug = 1;
8046 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8047 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8049 would_hit_hwbug = 1;
8050 } else if (skb_shinfo(skb)->nr_frags > 0) {
8053 if (!tg3_flag(tp, HW_TSO_1) &&
8054 !tg3_flag(tp, HW_TSO_2) &&
8055 !tg3_flag(tp, HW_TSO_3))
8058 /* Now loop through additional data
8059 * fragments, and queue them.
8061 last = skb_shinfo(skb)->nr_frags - 1;
8062 for (i = 0; i <= last; i++) {
8063 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8065 len = skb_frag_size(frag);
8066 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8067 len, DMA_TO_DEVICE);
8069 tnapi->tx_buffers[entry].skb = NULL;
8070 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8072 if (dma_mapping_error(&tp->pdev->dev, mapping))
8076 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8078 ((i == last) ? TXD_FLAG_END : 0),
8080 would_hit_hwbug = 1;
8086 if (would_hit_hwbug) {
8087 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8089 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8090 /* If it's a TSO packet, do GSO instead of
8091 * allocating and copying to a large linear SKB
8094 iph->check = ip_csum;
8095 iph->tot_len = ip_tot_len;
8097 tcph->check = tcp_csum;
8098 return tg3_tso_bug(tp, tnapi, txq, skb);
8101 /* If the workaround fails due to memory/mapping
8102 * failure, silently drop this packet.
8104 entry = tnapi->tx_prod;
8105 budget = tg3_tx_avail(tnapi);
8106 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8107 base_flags, mss, vlan))
8111 skb_tx_timestamp(skb);
8112 netdev_tx_sent_queue(txq, skb->len);
8114 /* Sync BD data before updating mailbox */
8117 tnapi->tx_prod = entry;
8118 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8119 netif_tx_stop_queue(txq);
8121 /* netif_tx_stop_queue() must be done before checking
8122 * checking tx index in tg3_tx_avail() below, because in
8123 * tg3_tx(), we update tx index before checking for
8124 * netif_tx_queue_stopped().
8127 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8128 netif_tx_wake_queue(txq);
8131 if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8132 /* Packets are ready, update Tx producer idx on card. */
8133 tw32_tx_mbox(tnapi->prodmbox, entry);
8137 return NETDEV_TX_OK;
8140 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8141 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8143 dev_kfree_skb_any(skb);
8146 return NETDEV_TX_OK;
8149 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8152 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8153 MAC_MODE_PORT_MODE_MASK);
8155 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8157 if (!tg3_flag(tp, 5705_PLUS))
8158 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8160 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8161 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8163 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8165 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8167 if (tg3_flag(tp, 5705_PLUS) ||
8168 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8169 tg3_asic_rev(tp) == ASIC_REV_5700)
8170 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8173 tw32(MAC_MODE, tp->mac_mode);
8177 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8179 u32 val, bmcr, mac_mode, ptest = 0;
8181 tg3_phy_toggle_apd(tp, false);
8182 tg3_phy_toggle_automdix(tp, false);
8184 if (extlpbk && tg3_phy_set_extloopbk(tp))
8187 bmcr = BMCR_FULLDPLX;
8192 bmcr |= BMCR_SPEED100;
8196 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8198 bmcr |= BMCR_SPEED100;
8201 bmcr |= BMCR_SPEED1000;
8206 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8207 tg3_readphy(tp, MII_CTRL1000, &val);
8208 val |= CTL1000_AS_MASTER |
8209 CTL1000_ENABLE_MASTER;
8210 tg3_writephy(tp, MII_CTRL1000, val);
8212 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8213 MII_TG3_FET_PTEST_TRIM_2;
8214 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8217 bmcr |= BMCR_LOOPBACK;
8219 tg3_writephy(tp, MII_BMCR, bmcr);
8221 /* The write needs to be flushed for the FETs */
8222 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8223 tg3_readphy(tp, MII_BMCR, &bmcr);
8227 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8228 tg3_asic_rev(tp) == ASIC_REV_5785) {
8229 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8230 MII_TG3_FET_PTEST_FRC_TX_LINK |
8231 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8233 /* The write needs to be flushed for the AC131 */
8234 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8237 /* Reset to prevent losing 1st rx packet intermittently */
8238 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8239 tg3_flag(tp, 5780_CLASS)) {
8240 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8242 tw32_f(MAC_RX_MODE, tp->rx_mode);
8245 mac_mode = tp->mac_mode &
8246 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8247 if (speed == SPEED_1000)
8248 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8250 mac_mode |= MAC_MODE_PORT_MODE_MII;
8252 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8253 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8255 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8256 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8257 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8258 mac_mode |= MAC_MODE_LINK_POLARITY;
8260 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8261 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8264 tw32(MAC_MODE, mac_mode);
8270 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8272 struct tg3 *tp = netdev_priv(dev);
8274 if (features & NETIF_F_LOOPBACK) {
8275 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8278 spin_lock_bh(&tp->lock);
8279 tg3_mac_loopback(tp, true);
8280 netif_carrier_on(tp->dev);
8281 spin_unlock_bh(&tp->lock);
8282 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8284 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8287 spin_lock_bh(&tp->lock);
8288 tg3_mac_loopback(tp, false);
8289 /* Force link status check */
8290 tg3_setup_phy(tp, true);
8291 spin_unlock_bh(&tp->lock);
8292 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8296 static netdev_features_t tg3_fix_features(struct net_device *dev,
8297 netdev_features_t features)
8299 struct tg3 *tp = netdev_priv(dev);
8301 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8302 features &= ~NETIF_F_ALL_TSO;
8307 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8309 netdev_features_t changed = dev->features ^ features;
8311 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8312 tg3_set_loopback(dev, features);
8317 static void tg3_rx_prodring_free(struct tg3 *tp,
8318 struct tg3_rx_prodring_set *tpr)
8322 if (tpr != &tp->napi[0].prodring) {
8323 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8324 i = (i + 1) & tp->rx_std_ring_mask)
8325 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8328 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8329 for (i = tpr->rx_jmb_cons_idx;
8330 i != tpr->rx_jmb_prod_idx;
8331 i = (i + 1) & tp->rx_jmb_ring_mask) {
8332 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8340 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8341 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8344 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8345 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8346 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8351 /* Initialize rx rings for packet processing.
8353 * The chip has been shut down and the driver detached from
8354 * the networking, so no interrupts or new tx packets will
8355 * end up in the driver. tp->{tx,}lock are held and thus
8358 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8359 struct tg3_rx_prodring_set *tpr)
8361 u32 i, rx_pkt_dma_sz;
8363 tpr->rx_std_cons_idx = 0;
8364 tpr->rx_std_prod_idx = 0;
8365 tpr->rx_jmb_cons_idx = 0;
8366 tpr->rx_jmb_prod_idx = 0;
8368 if (tpr != &tp->napi[0].prodring) {
8369 memset(&tpr->rx_std_buffers[0], 0,
8370 TG3_RX_STD_BUFF_RING_SIZE(tp));
8371 if (tpr->rx_jmb_buffers)
8372 memset(&tpr->rx_jmb_buffers[0], 0,
8373 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8377 /* Zero out all descriptors. */
8378 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8380 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8381 if (tg3_flag(tp, 5780_CLASS) &&
8382 tp->dev->mtu > ETH_DATA_LEN)
8383 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8384 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8386 /* Initialize invariants of the rings, we only set this
8387 * stuff once. This works because the card does not
8388 * write into the rx buffer posting rings.
8390 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8391 struct tg3_rx_buffer_desc *rxd;
8393 rxd = &tpr->rx_std[i];
8394 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8395 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8396 rxd->opaque = (RXD_OPAQUE_RING_STD |
8397 (i << RXD_OPAQUE_INDEX_SHIFT));
8400 /* Now allocate fresh SKBs for each rx ring. */
8401 for (i = 0; i < tp->rx_pending; i++) {
8402 unsigned int frag_size;
8404 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8406 netdev_warn(tp->dev,
8407 "Using a smaller RX standard ring. Only "
8408 "%d out of %d buffers were allocated "
8409 "successfully\n", i, tp->rx_pending);
8417 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8420 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8422 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8425 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8426 struct tg3_rx_buffer_desc *rxd;
8428 rxd = &tpr->rx_jmb[i].std;
8429 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8430 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8432 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8433 (i << RXD_OPAQUE_INDEX_SHIFT));
8436 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8437 unsigned int frag_size;
8439 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8441 netdev_warn(tp->dev,
8442 "Using a smaller RX jumbo ring. Only %d "
8443 "out of %d buffers were allocated "
8444 "successfully\n", i, tp->rx_jumbo_pending);
8447 tp->rx_jumbo_pending = i;
8456 tg3_rx_prodring_free(tp, tpr);
8460 static void tg3_rx_prodring_fini(struct tg3 *tp,
8461 struct tg3_rx_prodring_set *tpr)
8463 kfree(tpr->rx_std_buffers);
8464 tpr->rx_std_buffers = NULL;
8465 kfree(tpr->rx_jmb_buffers);
8466 tpr->rx_jmb_buffers = NULL;
8468 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8469 tpr->rx_std, tpr->rx_std_mapping);
8473 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8474 tpr->rx_jmb, tpr->rx_jmb_mapping);
8479 static int tg3_rx_prodring_init(struct tg3 *tp,
8480 struct tg3_rx_prodring_set *tpr)
8482 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8484 if (!tpr->rx_std_buffers)
8487 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8488 TG3_RX_STD_RING_BYTES(tp),
8489 &tpr->rx_std_mapping,
8494 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8495 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8497 if (!tpr->rx_jmb_buffers)
8500 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8501 TG3_RX_JMB_RING_BYTES(tp),
8502 &tpr->rx_jmb_mapping,
8511 tg3_rx_prodring_fini(tp, tpr);
8515 /* Free up pending packets in all rx/tx rings.
8517 * The chip has been shut down and the driver detached from
8518 * the networking, so no interrupts or new tx packets will
8519 * end up in the driver. tp->{tx,}lock is not held and we are not
8520 * in an interrupt context and thus may sleep.
8522 static void tg3_free_rings(struct tg3 *tp)
8526 for (j = 0; j < tp->irq_cnt; j++) {
8527 struct tg3_napi *tnapi = &tp->napi[j];
8529 tg3_rx_prodring_free(tp, &tnapi->prodring);
8531 if (!tnapi->tx_buffers)
8534 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8535 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8540 tg3_tx_skb_unmap(tnapi, i,
8541 skb_shinfo(skb)->nr_frags - 1);
8543 dev_kfree_skb_any(skb);
8545 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8549 /* Initialize tx/rx rings for packet processing.
8551 * The chip has been shut down and the driver detached from
8552 * the networking, so no interrupts or new tx packets will
8553 * end up in the driver. tp->{tx,}lock are held and thus
8556 static int tg3_init_rings(struct tg3 *tp)
8560 /* Free up all the SKBs. */
8563 for (i = 0; i < tp->irq_cnt; i++) {
8564 struct tg3_napi *tnapi = &tp->napi[i];
8566 tnapi->last_tag = 0;
8567 tnapi->last_irq_tag = 0;
8568 tnapi->hw_status->status = 0;
8569 tnapi->hw_status->status_tag = 0;
8570 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8575 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8577 tnapi->rx_rcb_ptr = 0;
8579 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8581 if (tnapi->prodring.rx_std &&
8582 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8591 static void tg3_mem_tx_release(struct tg3 *tp)
8595 for (i = 0; i < tp->irq_max; i++) {
8596 struct tg3_napi *tnapi = &tp->napi[i];
8598 if (tnapi->tx_ring) {
8599 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8600 tnapi->tx_ring, tnapi->tx_desc_mapping);
8601 tnapi->tx_ring = NULL;
8604 kfree(tnapi->tx_buffers);
8605 tnapi->tx_buffers = NULL;
8609 static int tg3_mem_tx_acquire(struct tg3 *tp)
8612 struct tg3_napi *tnapi = &tp->napi[0];
8614 /* If multivector TSS is enabled, vector 0 does not handle
8615 * tx interrupts. Don't allocate any resources for it.
8617 if (tg3_flag(tp, ENABLE_TSS))
8620 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8621 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8622 TG3_TX_RING_SIZE, GFP_KERNEL);
8623 if (!tnapi->tx_buffers)
8626 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8628 &tnapi->tx_desc_mapping,
8630 if (!tnapi->tx_ring)
8637 tg3_mem_tx_release(tp);
8641 static void tg3_mem_rx_release(struct tg3 *tp)
8645 for (i = 0; i < tp->irq_max; i++) {
8646 struct tg3_napi *tnapi = &tp->napi[i];
8648 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8653 dma_free_coherent(&tp->pdev->dev,
8654 TG3_RX_RCB_RING_BYTES(tp),
8656 tnapi->rx_rcb_mapping);
8657 tnapi->rx_rcb = NULL;
8661 static int tg3_mem_rx_acquire(struct tg3 *tp)
8663 unsigned int i, limit;
8665 limit = tp->rxq_cnt;
8667 /* If RSS is enabled, we need a (dummy) producer ring
8668 * set on vector zero. This is the true hw prodring.
8670 if (tg3_flag(tp, ENABLE_RSS))
8673 for (i = 0; i < limit; i++) {
8674 struct tg3_napi *tnapi = &tp->napi[i];
8676 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8679 /* If multivector RSS is enabled, vector 0
8680 * does not handle rx or tx interrupts.
8681 * Don't allocate any resources for it.
8683 if (!i && tg3_flag(tp, ENABLE_RSS))
8686 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8687 TG3_RX_RCB_RING_BYTES(tp),
8688 &tnapi->rx_rcb_mapping,
8697 tg3_mem_rx_release(tp);
8702 * Must not be invoked with interrupt sources disabled and
8703 * the hardware shutdown down.
8705 static void tg3_free_consistent(struct tg3 *tp)
8709 for (i = 0; i < tp->irq_cnt; i++) {
8710 struct tg3_napi *tnapi = &tp->napi[i];
8712 if (tnapi->hw_status) {
8713 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8715 tnapi->status_mapping);
8716 tnapi->hw_status = NULL;
8720 tg3_mem_rx_release(tp);
8721 tg3_mem_tx_release(tp);
8724 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8725 tp->hw_stats, tp->stats_mapping);
8726 tp->hw_stats = NULL;
8731 * Must not be invoked with interrupt sources disabled and
8732 * the hardware shutdown down. Can sleep.
8734 static int tg3_alloc_consistent(struct tg3 *tp)
8738 tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8739 sizeof(struct tg3_hw_stats),
8740 &tp->stats_mapping, GFP_KERNEL);
8744 for (i = 0; i < tp->irq_cnt; i++) {
8745 struct tg3_napi *tnapi = &tp->napi[i];
8746 struct tg3_hw_status *sblk;
8748 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8750 &tnapi->status_mapping,
8752 if (!tnapi->hw_status)
8755 sblk = tnapi->hw_status;
8757 if (tg3_flag(tp, ENABLE_RSS)) {
8758 u16 *prodptr = NULL;
8761 * When RSS is enabled, the status block format changes
8762 * slightly. The "rx_jumbo_consumer", "reserved",
8763 * and "rx_mini_consumer" members get mapped to the
8764 * other three rx return ring producer indexes.
8768 prodptr = &sblk->idx[0].rx_producer;
8771 prodptr = &sblk->rx_jumbo_consumer;
8774 prodptr = &sblk->reserved;
8777 prodptr = &sblk->rx_mini_consumer;
8780 tnapi->rx_rcb_prod_idx = prodptr;
8782 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8786 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8792 tg3_free_consistent(tp);
8796 #define MAX_WAIT_CNT 1000
8798 /* To stop a block, clear the enable bit and poll till it
8799 * clears. tp->lock is held.
8801 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8806 if (tg3_flag(tp, 5705_PLUS)) {
8813 /* We can't enable/disable these bits of the
8814 * 5705/5750, just say success.
8827 for (i = 0; i < MAX_WAIT_CNT; i++) {
8828 if (pci_channel_offline(tp->pdev)) {
8829 dev_err(&tp->pdev->dev,
8830 "tg3_stop_block device offline, "
8831 "ofs=%lx enable_bit=%x\n",
8838 if ((val & enable_bit) == 0)
8842 if (i == MAX_WAIT_CNT && !silent) {
8843 dev_err(&tp->pdev->dev,
8844 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8852 /* tp->lock is held. */
8853 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8857 tg3_disable_ints(tp);
8859 if (pci_channel_offline(tp->pdev)) {
8860 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8861 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8866 tp->rx_mode &= ~RX_MODE_ENABLE;
8867 tw32_f(MAC_RX_MODE, tp->rx_mode);
8870 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8871 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8872 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8873 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8874 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8875 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8877 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8878 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8879 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8880 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8881 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8882 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8883 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8885 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8886 tw32_f(MAC_MODE, tp->mac_mode);
8889 tp->tx_mode &= ~TX_MODE_ENABLE;
8890 tw32_f(MAC_TX_MODE, tp->tx_mode);
8892 for (i = 0; i < MAX_WAIT_CNT; i++) {
8894 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8897 if (i >= MAX_WAIT_CNT) {
8898 dev_err(&tp->pdev->dev,
8899 "%s timed out, TX_MODE_ENABLE will not clear "
8900 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8904 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8905 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8906 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8908 tw32(FTQ_RESET, 0xffffffff);
8909 tw32(FTQ_RESET, 0x00000000);
8911 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8912 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8915 for (i = 0; i < tp->irq_cnt; i++) {
8916 struct tg3_napi *tnapi = &tp->napi[i];
8917 if (tnapi->hw_status)
8918 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8924 /* Save PCI command register before chip reset */
8925 static void tg3_save_pci_state(struct tg3 *tp)
8927 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8930 /* Restore PCI state after chip reset */
8931 static void tg3_restore_pci_state(struct tg3 *tp)
8935 /* Re-enable indirect register accesses. */
8936 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8937 tp->misc_host_ctrl);
8939 /* Set MAX PCI retry to zero. */
8940 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8941 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8942 tg3_flag(tp, PCIX_MODE))
8943 val |= PCISTATE_RETRY_SAME_DMA;
8944 /* Allow reads and writes to the APE register and memory space. */
8945 if (tg3_flag(tp, ENABLE_APE))
8946 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8947 PCISTATE_ALLOW_APE_SHMEM_WR |
8948 PCISTATE_ALLOW_APE_PSPACE_WR;
8949 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8951 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8953 if (!tg3_flag(tp, PCI_EXPRESS)) {
8954 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8955 tp->pci_cacheline_sz);
8956 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8960 /* Make sure PCI-X relaxed ordering bit is clear. */
8961 if (tg3_flag(tp, PCIX_MODE)) {
8964 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8966 pcix_cmd &= ~PCI_X_CMD_ERO;
8967 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8971 if (tg3_flag(tp, 5780_CLASS)) {
8973 /* Chip reset on 5780 will reset MSI enable bit,
8974 * so need to restore it.
8976 if (tg3_flag(tp, USING_MSI)) {
8979 pci_read_config_word(tp->pdev,
8980 tp->msi_cap + PCI_MSI_FLAGS,
8982 pci_write_config_word(tp->pdev,
8983 tp->msi_cap + PCI_MSI_FLAGS,
8984 ctrl | PCI_MSI_FLAGS_ENABLE);
8985 val = tr32(MSGINT_MODE);
8986 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8991 static void tg3_override_clk(struct tg3 *tp)
8995 switch (tg3_asic_rev(tp)) {
8997 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
8998 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
8999 TG3_CPMU_MAC_ORIDE_ENABLE);
9004 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9012 static void tg3_restore_clk(struct tg3 *tp)
9016 switch (tg3_asic_rev(tp)) {
9018 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9019 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9020 val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9025 val = tr32(TG3_CPMU_CLCK_ORIDE);
9026 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9034 /* tp->lock is held. */
9035 static int tg3_chip_reset(struct tg3 *tp)
9036 __releases(tp->lock)
9037 __acquires(tp->lock)
9040 void (*write_op)(struct tg3 *, u32, u32);
9043 if (!pci_device_is_present(tp->pdev))
9048 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9050 /* No matching tg3_nvram_unlock() after this because
9051 * chip reset below will undo the nvram lock.
9053 tp->nvram_lock_cnt = 0;
9055 /* GRC_MISC_CFG core clock reset will clear the memory
9056 * enable bit in PCI register 4 and the MSI enable bit
9057 * on some chips, so we save relevant registers here.
9059 tg3_save_pci_state(tp);
9061 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9062 tg3_flag(tp, 5755_PLUS))
9063 tw32(GRC_FASTBOOT_PC, 0);
9066 * We must avoid the readl() that normally takes place.
9067 * It locks machines, causes machine checks, and other
9068 * fun things. So, temporarily disable the 5701
9069 * hardware workaround, while we do the reset.
9071 write_op = tp->write32;
9072 if (write_op == tg3_write_flush_reg32)
9073 tp->write32 = tg3_write32;
9075 /* Prevent the irq handler from reading or writing PCI registers
9076 * during chip reset when the memory enable bit in the PCI command
9077 * register may be cleared. The chip does not generate interrupt
9078 * at this time, but the irq handler may still be called due to irq
9079 * sharing or irqpoll.
9081 tg3_flag_set(tp, CHIP_RESETTING);
9082 for (i = 0; i < tp->irq_cnt; i++) {
9083 struct tg3_napi *tnapi = &tp->napi[i];
9084 if (tnapi->hw_status) {
9085 tnapi->hw_status->status = 0;
9086 tnapi->hw_status->status_tag = 0;
9088 tnapi->last_tag = 0;
9089 tnapi->last_irq_tag = 0;
9093 tg3_full_unlock(tp);
9095 for (i = 0; i < tp->irq_cnt; i++)
9096 synchronize_irq(tp->napi[i].irq_vec);
9098 tg3_full_lock(tp, 0);
9100 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9101 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9102 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9106 val = GRC_MISC_CFG_CORECLK_RESET;
9108 if (tg3_flag(tp, PCI_EXPRESS)) {
9109 /* Force PCIe 1.0a mode */
9110 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9111 !tg3_flag(tp, 57765_PLUS) &&
9112 tr32(TG3_PCIE_PHY_TSTCTL) ==
9113 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9114 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9116 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9117 tw32(GRC_MISC_CFG, (1 << 29));
9122 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9123 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9124 tw32(GRC_VCPU_EXT_CTRL,
9125 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9128 /* Set the clock to the highest frequency to avoid timeouts. With link
9129 * aware mode, the clock speed could be slow and bootcode does not
9130 * complete within the expected time. Override the clock to allow the
9131 * bootcode to finish sooner and then restore it.
9133 tg3_override_clk(tp);
9135 /* Manage gphy power for all CPMU absent PCIe devices. */
9136 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9137 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9139 tw32(GRC_MISC_CFG, val);
9141 /* restore 5701 hardware bug workaround write method */
9142 tp->write32 = write_op;
9144 /* Unfortunately, we have to delay before the PCI read back.
9145 * Some 575X chips even will not respond to a PCI cfg access
9146 * when the reset command is given to the chip.
9148 * How do these hardware designers expect things to work
9149 * properly if the PCI write is posted for a long period
9150 * of time? It is always necessary to have some method by
9151 * which a register read back can occur to push the write
9152 * out which does the reset.
9154 * For most tg3 variants the trick below was working.
9159 /* Flush PCI posted writes. The normal MMIO registers
9160 * are inaccessible at this time so this is the only
9161 * way to make this reliably (actually, this is no longer
9162 * the case, see above). I tried to use indirect
9163 * register read/write but this upset some 5701 variants.
9165 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9169 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9172 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9176 /* Wait for link training to complete. */
9177 for (j = 0; j < 5000; j++)
9180 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9181 pci_write_config_dword(tp->pdev, 0xc4,
9182 cfg_val | (1 << 15));
9185 /* Clear the "no snoop" and "relaxed ordering" bits. */
9186 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9188 * Older PCIe devices only support the 128 byte
9189 * MPS setting. Enforce the restriction.
9191 if (!tg3_flag(tp, CPMU_PRESENT))
9192 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9193 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9195 /* Clear error status */
9196 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9197 PCI_EXP_DEVSTA_CED |
9198 PCI_EXP_DEVSTA_NFED |
9199 PCI_EXP_DEVSTA_FED |
9200 PCI_EXP_DEVSTA_URD);
9203 tg3_restore_pci_state(tp);
9205 tg3_flag_clear(tp, CHIP_RESETTING);
9206 tg3_flag_clear(tp, ERROR_PROCESSED);
9209 if (tg3_flag(tp, 5780_CLASS))
9210 val = tr32(MEMARB_MODE);
9211 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9213 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9215 tw32(0x5000, 0x400);
9218 if (tg3_flag(tp, IS_SSB_CORE)) {
9220 * BCM4785: In order to avoid repercussions from using
9221 * potentially defective internal ROM, stop the Rx RISC CPU,
9222 * which is not required.
9225 tg3_halt_cpu(tp, RX_CPU_BASE);
9228 err = tg3_poll_fw(tp);
9232 tw32(GRC_MODE, tp->grc_mode);
9234 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9237 tw32(0xc4, val | (1 << 15));
9240 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9241 tg3_asic_rev(tp) == ASIC_REV_5705) {
9242 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9243 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9244 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9245 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9248 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9249 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9251 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9252 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9257 tw32_f(MAC_MODE, val);
9260 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9264 if (tg3_flag(tp, PCI_EXPRESS) &&
9265 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9266 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9267 !tg3_flag(tp, 57765_PLUS)) {
9270 tw32(0x7c00, val | (1 << 25));
9273 tg3_restore_clk(tp);
9275 /* Reprobe ASF enable state. */
9276 tg3_flag_clear(tp, ENABLE_ASF);
9277 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9278 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9280 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9281 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9282 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9285 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9286 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9287 tg3_flag_set(tp, ENABLE_ASF);
9288 tp->last_event_jiffies = jiffies;
9289 if (tg3_flag(tp, 5750_PLUS))
9290 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9292 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9293 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9294 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9295 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9296 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9303 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9304 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9305 static void __tg3_set_rx_mode(struct net_device *);
9307 /* tp->lock is held. */
9308 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9314 tg3_write_sig_pre_reset(tp, kind);
9316 tg3_abort_hw(tp, silent);
9317 err = tg3_chip_reset(tp);
9319 __tg3_set_mac_addr(tp, false);
9321 tg3_write_sig_legacy(tp, kind);
9322 tg3_write_sig_post_reset(tp, kind);
9325 /* Save the stats across chip resets... */
9326 tg3_get_nstats(tp, &tp->net_stats_prev);
9327 tg3_get_estats(tp, &tp->estats_prev);
9329 /* And make sure the next sample is new data */
9330 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9336 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9338 struct tg3 *tp = netdev_priv(dev);
9339 struct sockaddr *addr = p;
9341 bool skip_mac_1 = false;
9343 if (!is_valid_ether_addr(addr->sa_data))
9344 return -EADDRNOTAVAIL;
9346 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9348 if (!netif_running(dev))
9351 if (tg3_flag(tp, ENABLE_ASF)) {
9352 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9354 addr0_high = tr32(MAC_ADDR_0_HIGH);
9355 addr0_low = tr32(MAC_ADDR_0_LOW);
9356 addr1_high = tr32(MAC_ADDR_1_HIGH);
9357 addr1_low = tr32(MAC_ADDR_1_LOW);
9359 /* Skip MAC addr 1 if ASF is using it. */
9360 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9361 !(addr1_high == 0 && addr1_low == 0))
9364 spin_lock_bh(&tp->lock);
9365 __tg3_set_mac_addr(tp, skip_mac_1);
9366 __tg3_set_rx_mode(dev);
9367 spin_unlock_bh(&tp->lock);
9372 /* tp->lock is held. */
9373 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9374 dma_addr_t mapping, u32 maxlen_flags,
9378 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9379 ((u64) mapping >> 32));
9381 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9382 ((u64) mapping & 0xffffffff));
9384 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9387 if (!tg3_flag(tp, 5705_PLUS))
9389 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9394 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9398 if (!tg3_flag(tp, ENABLE_TSS)) {
9399 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9400 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9401 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9403 tw32(HOSTCC_TXCOL_TICKS, 0);
9404 tw32(HOSTCC_TXMAX_FRAMES, 0);
9405 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9407 for (; i < tp->txq_cnt; i++) {
9410 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9411 tw32(reg, ec->tx_coalesce_usecs);
9412 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9413 tw32(reg, ec->tx_max_coalesced_frames);
9414 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9415 tw32(reg, ec->tx_max_coalesced_frames_irq);
9419 for (; i < tp->irq_max - 1; i++) {
9420 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9421 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9422 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9426 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9429 u32 limit = tp->rxq_cnt;
9431 if (!tg3_flag(tp, ENABLE_RSS)) {
9432 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9433 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9434 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9437 tw32(HOSTCC_RXCOL_TICKS, 0);
9438 tw32(HOSTCC_RXMAX_FRAMES, 0);
9439 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9442 for (; i < limit; i++) {
9445 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9446 tw32(reg, ec->rx_coalesce_usecs);
9447 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9448 tw32(reg, ec->rx_max_coalesced_frames);
9449 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9450 tw32(reg, ec->rx_max_coalesced_frames_irq);
9453 for (; i < tp->irq_max - 1; i++) {
9454 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9455 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9456 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9460 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9462 tg3_coal_tx_init(tp, ec);
9463 tg3_coal_rx_init(tp, ec);
9465 if (!tg3_flag(tp, 5705_PLUS)) {
9466 u32 val = ec->stats_block_coalesce_usecs;
9468 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9469 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9474 tw32(HOSTCC_STAT_COAL_TICKS, val);
9478 /* tp->lock is held. */
9479 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9483 /* Disable all transmit rings but the first. */
9484 if (!tg3_flag(tp, 5705_PLUS))
9485 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9486 else if (tg3_flag(tp, 5717_PLUS))
9487 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9488 else if (tg3_flag(tp, 57765_CLASS) ||
9489 tg3_asic_rev(tp) == ASIC_REV_5762)
9490 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9492 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9494 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9495 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9496 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9497 BDINFO_FLAGS_DISABLED);
9500 /* tp->lock is held. */
9501 static void tg3_tx_rcbs_init(struct tg3 *tp)
9504 u32 txrcb = NIC_SRAM_SEND_RCB;
9506 if (tg3_flag(tp, ENABLE_TSS))
9509 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9510 struct tg3_napi *tnapi = &tp->napi[i];
9512 if (!tnapi->tx_ring)
9515 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9516 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9517 NIC_SRAM_TX_BUFFER_DESC);
9521 /* tp->lock is held. */
9522 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9526 /* Disable all receive return rings but the first. */
9527 if (tg3_flag(tp, 5717_PLUS))
9528 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9529 else if (!tg3_flag(tp, 5705_PLUS))
9530 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9531 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9532 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9533 tg3_flag(tp, 57765_CLASS))
9534 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9536 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9538 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9539 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9540 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9541 BDINFO_FLAGS_DISABLED);
9544 /* tp->lock is held. */
9545 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9548 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9550 if (tg3_flag(tp, ENABLE_RSS))
9553 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9554 struct tg3_napi *tnapi = &tp->napi[i];
9559 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9560 (tp->rx_ret_ring_mask + 1) <<
9561 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9565 /* tp->lock is held. */
9566 static void tg3_rings_reset(struct tg3 *tp)
9570 struct tg3_napi *tnapi = &tp->napi[0];
9572 tg3_tx_rcbs_disable(tp);
9574 tg3_rx_ret_rcbs_disable(tp);
9576 /* Disable interrupts */
9577 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9578 tp->napi[0].chk_msi_cnt = 0;
9579 tp->napi[0].last_rx_cons = 0;
9580 tp->napi[0].last_tx_cons = 0;
9582 /* Zero mailbox registers. */
9583 if (tg3_flag(tp, SUPPORT_MSIX)) {
9584 for (i = 1; i < tp->irq_max; i++) {
9585 tp->napi[i].tx_prod = 0;
9586 tp->napi[i].tx_cons = 0;
9587 if (tg3_flag(tp, ENABLE_TSS))
9588 tw32_mailbox(tp->napi[i].prodmbox, 0);
9589 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9590 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9591 tp->napi[i].chk_msi_cnt = 0;
9592 tp->napi[i].last_rx_cons = 0;
9593 tp->napi[i].last_tx_cons = 0;
9595 if (!tg3_flag(tp, ENABLE_TSS))
9596 tw32_mailbox(tp->napi[0].prodmbox, 0);
9598 tp->napi[0].tx_prod = 0;
9599 tp->napi[0].tx_cons = 0;
9600 tw32_mailbox(tp->napi[0].prodmbox, 0);
9601 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9604 /* Make sure the NIC-based send BD rings are disabled. */
9605 if (!tg3_flag(tp, 5705_PLUS)) {
9606 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9607 for (i = 0; i < 16; i++)
9608 tw32_tx_mbox(mbox + i * 8, 0);
9611 /* Clear status block in ram. */
9612 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9614 /* Set status block DMA address */
9615 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9616 ((u64) tnapi->status_mapping >> 32));
9617 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9618 ((u64) tnapi->status_mapping & 0xffffffff));
9620 stblk = HOSTCC_STATBLCK_RING1;
9622 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9623 u64 mapping = (u64)tnapi->status_mapping;
9624 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9625 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9628 /* Clear status block in ram. */
9629 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9632 tg3_tx_rcbs_init(tp);
9633 tg3_rx_ret_rcbs_init(tp);
9636 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9638 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9640 if (!tg3_flag(tp, 5750_PLUS) ||
9641 tg3_flag(tp, 5780_CLASS) ||
9642 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9643 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9644 tg3_flag(tp, 57765_PLUS))
9645 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9646 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9647 tg3_asic_rev(tp) == ASIC_REV_5787)
9648 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9650 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9652 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9653 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9655 val = min(nic_rep_thresh, host_rep_thresh);
9656 tw32(RCVBDI_STD_THRESH, val);
9658 if (tg3_flag(tp, 57765_PLUS))
9659 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9661 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9664 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9666 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9668 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9669 tw32(RCVBDI_JUMBO_THRESH, val);
9671 if (tg3_flag(tp, 57765_PLUS))
9672 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9675 static inline u32 calc_crc(unsigned char *buf, int len)
9683 for (j = 0; j < len; j++) {
9686 for (k = 0; k < 8; k++) {
9699 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9701 /* accept or reject all multicast frames */
9702 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9703 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9704 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9705 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9708 static void __tg3_set_rx_mode(struct net_device *dev)
9710 struct tg3 *tp = netdev_priv(dev);
9713 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9714 RX_MODE_KEEP_VLAN_TAG);
9716 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9717 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9720 if (!tg3_flag(tp, ENABLE_ASF))
9721 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9724 if (dev->flags & IFF_PROMISC) {
9725 /* Promiscuous mode. */
9726 rx_mode |= RX_MODE_PROMISC;
9727 } else if (dev->flags & IFF_ALLMULTI) {
9728 /* Accept all multicast. */
9729 tg3_set_multi(tp, 1);
9730 } else if (netdev_mc_empty(dev)) {
9731 /* Reject all multicast. */
9732 tg3_set_multi(tp, 0);
9734 /* Accept one or more multicast(s). */
9735 struct netdev_hw_addr *ha;
9736 u32 mc_filter[4] = { 0, };
9741 netdev_for_each_mc_addr(ha, dev) {
9742 crc = calc_crc(ha->addr, ETH_ALEN);
9744 regidx = (bit & 0x60) >> 5;
9746 mc_filter[regidx] |= (1 << bit);
9749 tw32(MAC_HASH_REG_0, mc_filter[0]);
9750 tw32(MAC_HASH_REG_1, mc_filter[1]);
9751 tw32(MAC_HASH_REG_2, mc_filter[2]);
9752 tw32(MAC_HASH_REG_3, mc_filter[3]);
9755 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9756 rx_mode |= RX_MODE_PROMISC;
9757 } else if (!(dev->flags & IFF_PROMISC)) {
9758 /* Add all entries into to the mac addr filter list */
9760 struct netdev_hw_addr *ha;
9762 netdev_for_each_uc_addr(ha, dev) {
9763 __tg3_set_one_mac_addr(tp, ha->addr,
9764 i + TG3_UCAST_ADDR_IDX(tp));
9769 if (rx_mode != tp->rx_mode) {
9770 tp->rx_mode = rx_mode;
9771 tw32_f(MAC_RX_MODE, rx_mode);
9776 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9780 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9781 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9784 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9788 if (!tg3_flag(tp, SUPPORT_MSIX))
9791 if (tp->rxq_cnt == 1) {
9792 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9796 /* Validate table against current IRQ count */
9797 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9798 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9802 if (i != TG3_RSS_INDIR_TBL_SIZE)
9803 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9806 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9809 u32 reg = MAC_RSS_INDIR_TBL_0;
9811 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9812 u32 val = tp->rss_ind_tbl[i];
9814 for (; i % 8; i++) {
9816 val |= tp->rss_ind_tbl[i];
9823 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9825 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9826 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9828 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9831 /* tp->lock is held. */
9832 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9834 u32 val, rdmac_mode;
9836 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9838 tg3_disable_ints(tp);
9842 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9844 if (tg3_flag(tp, INIT_COMPLETE))
9845 tg3_abort_hw(tp, 1);
9847 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9848 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9849 tg3_phy_pull_config(tp);
9850 tg3_eee_pull_config(tp, NULL);
9851 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9854 /* Enable MAC control of LPI */
9855 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9861 err = tg3_chip_reset(tp);
9865 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9867 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9868 val = tr32(TG3_CPMU_CTRL);
9869 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9870 tw32(TG3_CPMU_CTRL, val);
9872 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9873 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9874 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9875 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9877 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9878 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9879 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9880 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9882 val = tr32(TG3_CPMU_HST_ACC);
9883 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9884 val |= CPMU_HST_ACC_MACCLK_6_25;
9885 tw32(TG3_CPMU_HST_ACC, val);
9888 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9889 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9890 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9891 PCIE_PWR_MGMT_L1_THRESH_4MS;
9892 tw32(PCIE_PWR_MGMT_THRESH, val);
9894 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9895 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9897 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9899 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9900 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9903 if (tg3_flag(tp, L1PLLPD_EN)) {
9904 u32 grc_mode = tr32(GRC_MODE);
9906 /* Access the lower 1K of PL PCIE block registers. */
9907 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9908 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9910 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9911 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9912 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9914 tw32(GRC_MODE, grc_mode);
9917 if (tg3_flag(tp, 57765_CLASS)) {
9918 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9919 u32 grc_mode = tr32(GRC_MODE);
9921 /* Access the lower 1K of PL PCIE block registers. */
9922 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9923 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9925 val = tr32(TG3_PCIE_TLDLPL_PORT +
9926 TG3_PCIE_PL_LO_PHYCTL5);
9927 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9928 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9930 tw32(GRC_MODE, grc_mode);
9933 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9936 /* Fix transmit hangs */
9937 val = tr32(TG3_CPMU_PADRNG_CTL);
9938 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9939 tw32(TG3_CPMU_PADRNG_CTL, val);
9941 grc_mode = tr32(GRC_MODE);
9943 /* Access the lower 1K of DL PCIE block registers. */
9944 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9945 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9947 val = tr32(TG3_PCIE_TLDLPL_PORT +
9948 TG3_PCIE_DL_LO_FTSMAX);
9949 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9950 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9951 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9953 tw32(GRC_MODE, grc_mode);
9956 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9957 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9958 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9959 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9962 /* This works around an issue with Athlon chipsets on
9963 * B3 tigon3 silicon. This bit has no effect on any
9964 * other revision. But do not set this on PCI Express
9965 * chips and don't even touch the clocks if the CPMU is present.
9967 if (!tg3_flag(tp, CPMU_PRESENT)) {
9968 if (!tg3_flag(tp, PCI_EXPRESS))
9969 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9970 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9973 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9974 tg3_flag(tp, PCIX_MODE)) {
9975 val = tr32(TG3PCI_PCISTATE);
9976 val |= PCISTATE_RETRY_SAME_DMA;
9977 tw32(TG3PCI_PCISTATE, val);
9980 if (tg3_flag(tp, ENABLE_APE)) {
9981 /* Allow reads and writes to the
9982 * APE register and memory space.
9984 val = tr32(TG3PCI_PCISTATE);
9985 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9986 PCISTATE_ALLOW_APE_SHMEM_WR |
9987 PCISTATE_ALLOW_APE_PSPACE_WR;
9988 tw32(TG3PCI_PCISTATE, val);
9991 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9992 /* Enable some hw fixes. */
9993 val = tr32(TG3PCI_MSI_DATA);
9994 val |= (1 << 26) | (1 << 28) | (1 << 29);
9995 tw32(TG3PCI_MSI_DATA, val);
9998 /* Descriptor ring init may make accesses to the
9999 * NIC SRAM area to setup the TX descriptors, so we
10000 * can only do this after the hardware has been
10001 * successfully reset.
10003 err = tg3_init_rings(tp);
10007 if (tg3_flag(tp, 57765_PLUS)) {
10008 val = tr32(TG3PCI_DMA_RW_CTRL) &
10009 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10010 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10011 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10012 if (!tg3_flag(tp, 57765_CLASS) &&
10013 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10014 tg3_asic_rev(tp) != ASIC_REV_5762)
10015 val |= DMA_RWCTRL_TAGGED_STAT_WA;
10016 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10017 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10018 tg3_asic_rev(tp) != ASIC_REV_5761) {
10019 /* This value is determined during the probe time DMA
10020 * engine test, tg3_test_dma.
10022 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10025 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10026 GRC_MODE_4X_NIC_SEND_RINGS |
10027 GRC_MODE_NO_TX_PHDR_CSUM |
10028 GRC_MODE_NO_RX_PHDR_CSUM);
10029 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10031 /* Pseudo-header checksum is done by hardware logic and not
10032 * the offload processers, so make the chip do the pseudo-
10033 * header checksums on receive. For transmit it is more
10034 * convenient to do the pseudo-header checksum in software
10035 * as Linux does that on transmit for us in all cases.
10037 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10039 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10041 tw32(TG3_RX_PTP_CTL,
10042 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10044 if (tg3_flag(tp, PTP_CAPABLE))
10045 val |= GRC_MODE_TIME_SYNC_ENABLE;
10047 tw32(GRC_MODE, tp->grc_mode | val);
10049 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10050 val = tr32(GRC_MISC_CFG);
10052 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10053 tw32(GRC_MISC_CFG, val);
10055 /* Initialize MBUF/DESC pool. */
10056 if (tg3_flag(tp, 5750_PLUS)) {
10058 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10059 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10060 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10061 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10063 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10064 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10065 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10066 } else if (tg3_flag(tp, TSO_CAPABLE)) {
10069 fw_len = tp->fw_len;
10070 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10071 tw32(BUFMGR_MB_POOL_ADDR,
10072 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10073 tw32(BUFMGR_MB_POOL_SIZE,
10074 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10077 if (tp->dev->mtu <= ETH_DATA_LEN) {
10078 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10079 tp->bufmgr_config.mbuf_read_dma_low_water);
10080 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10081 tp->bufmgr_config.mbuf_mac_rx_low_water);
10082 tw32(BUFMGR_MB_HIGH_WATER,
10083 tp->bufmgr_config.mbuf_high_water);
10085 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10086 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10087 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10088 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10089 tw32(BUFMGR_MB_HIGH_WATER,
10090 tp->bufmgr_config.mbuf_high_water_jumbo);
10092 tw32(BUFMGR_DMA_LOW_WATER,
10093 tp->bufmgr_config.dma_low_water);
10094 tw32(BUFMGR_DMA_HIGH_WATER,
10095 tp->bufmgr_config.dma_high_water);
10097 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10098 if (tg3_asic_rev(tp) == ASIC_REV_5719)
10099 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10100 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10101 tg3_asic_rev(tp) == ASIC_REV_5762 ||
10102 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10103 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10104 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10105 tw32(BUFMGR_MODE, val);
10106 for (i = 0; i < 2000; i++) {
10107 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10112 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10116 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10117 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10119 tg3_setup_rxbd_thresholds(tp);
10121 /* Initialize TG3_BDINFO's at:
10122 * RCVDBDI_STD_BD: standard eth size rx ring
10123 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10124 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10127 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10128 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10129 * ring attribute flags
10130 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10132 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10133 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10135 * The size of each ring is fixed in the firmware, but the location is
10138 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10139 ((u64) tpr->rx_std_mapping >> 32));
10140 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10141 ((u64) tpr->rx_std_mapping & 0xffffffff));
10142 if (!tg3_flag(tp, 5717_PLUS))
10143 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10144 NIC_SRAM_RX_BUFFER_DESC);
10146 /* Disable the mini ring */
10147 if (!tg3_flag(tp, 5705_PLUS))
10148 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10149 BDINFO_FLAGS_DISABLED);
10151 /* Program the jumbo buffer descriptor ring control
10152 * blocks on those devices that have them.
10154 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10155 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10157 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10158 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10159 ((u64) tpr->rx_jmb_mapping >> 32));
10160 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10161 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10162 val = TG3_RX_JMB_RING_SIZE(tp) <<
10163 BDINFO_FLAGS_MAXLEN_SHIFT;
10164 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10165 val | BDINFO_FLAGS_USE_EXT_RECV);
10166 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10167 tg3_flag(tp, 57765_CLASS) ||
10168 tg3_asic_rev(tp) == ASIC_REV_5762)
10169 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10170 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10172 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10173 BDINFO_FLAGS_DISABLED);
10176 if (tg3_flag(tp, 57765_PLUS)) {
10177 val = TG3_RX_STD_RING_SIZE(tp);
10178 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10179 val |= (TG3_RX_STD_DMA_SZ << 2);
10181 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10183 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10185 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10187 tpr->rx_std_prod_idx = tp->rx_pending;
10188 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10190 tpr->rx_jmb_prod_idx =
10191 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10192 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10194 tg3_rings_reset(tp);
10196 /* Initialize MAC address and backoff seed. */
10197 __tg3_set_mac_addr(tp, false);
10199 /* MTU + ethernet header + FCS + optional VLAN tag */
10200 tw32(MAC_RX_MTU_SIZE,
10201 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10203 /* The slot time is changed by tg3_setup_phy if we
10204 * run at gigabit with half duplex.
10206 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10207 (6 << TX_LENGTHS_IPG_SHIFT) |
10208 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10210 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10211 tg3_asic_rev(tp) == ASIC_REV_5762)
10212 val |= tr32(MAC_TX_LENGTHS) &
10213 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10214 TX_LENGTHS_CNT_DWN_VAL_MSK);
10216 tw32(MAC_TX_LENGTHS, val);
10218 /* Receive rules. */
10219 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10220 tw32(RCVLPC_CONFIG, 0x0181);
10222 /* Calculate RDMAC_MODE setting early, we need it to determine
10223 * the RCVLPC_STATE_ENABLE mask.
10225 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10226 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10227 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10228 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10229 RDMAC_MODE_LNGREAD_ENAB);
10231 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10232 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10234 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10235 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10236 tg3_asic_rev(tp) == ASIC_REV_57780)
10237 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10238 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10239 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10241 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10242 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10243 if (tg3_flag(tp, TSO_CAPABLE) &&
10244 tg3_asic_rev(tp) == ASIC_REV_5705) {
10245 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10246 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10247 !tg3_flag(tp, IS_5788)) {
10248 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10252 if (tg3_flag(tp, PCI_EXPRESS))
10253 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10255 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10257 if (tp->dev->mtu <= ETH_DATA_LEN) {
10258 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10259 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10263 if (tg3_flag(tp, HW_TSO_1) ||
10264 tg3_flag(tp, HW_TSO_2) ||
10265 tg3_flag(tp, HW_TSO_3))
10266 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10268 if (tg3_flag(tp, 57765_PLUS) ||
10269 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10270 tg3_asic_rev(tp) == ASIC_REV_57780)
10271 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10273 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10274 tg3_asic_rev(tp) == ASIC_REV_5762)
10275 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10277 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10278 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10279 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10280 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10281 tg3_flag(tp, 57765_PLUS)) {
10284 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10285 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10287 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10289 val = tr32(tgtreg);
10290 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10291 tg3_asic_rev(tp) == ASIC_REV_5762) {
10292 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10293 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10294 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10295 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10296 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10297 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10299 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10302 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10303 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10304 tg3_asic_rev(tp) == ASIC_REV_5762) {
10307 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10308 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10310 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10312 val = tr32(tgtreg);
10314 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10315 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10318 /* Receive/send statistics. */
10319 if (tg3_flag(tp, 5750_PLUS)) {
10320 val = tr32(RCVLPC_STATS_ENABLE);
10321 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10322 tw32(RCVLPC_STATS_ENABLE, val);
10323 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10324 tg3_flag(tp, TSO_CAPABLE)) {
10325 val = tr32(RCVLPC_STATS_ENABLE);
10326 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10327 tw32(RCVLPC_STATS_ENABLE, val);
10329 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10331 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10332 tw32(SNDDATAI_STATSENAB, 0xffffff);
10333 tw32(SNDDATAI_STATSCTRL,
10334 (SNDDATAI_SCTRL_ENABLE |
10335 SNDDATAI_SCTRL_FASTUPD));
10337 /* Setup host coalescing engine. */
10338 tw32(HOSTCC_MODE, 0);
10339 for (i = 0; i < 2000; i++) {
10340 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10345 __tg3_set_coalesce(tp, &tp->coal);
10347 if (!tg3_flag(tp, 5705_PLUS)) {
10348 /* Status/statistics block address. See tg3_timer,
10349 * the tg3_periodic_fetch_stats call there, and
10350 * tg3_get_stats to see how this works for 5705/5750 chips.
10352 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10353 ((u64) tp->stats_mapping >> 32));
10354 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10355 ((u64) tp->stats_mapping & 0xffffffff));
10356 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10358 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10360 /* Clear statistics and status block memory areas */
10361 for (i = NIC_SRAM_STATS_BLK;
10362 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10363 i += sizeof(u32)) {
10364 tg3_write_mem(tp, i, 0);
10369 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10371 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10372 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10373 if (!tg3_flag(tp, 5705_PLUS))
10374 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10376 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10377 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10378 /* reset to prevent losing 1st rx packet intermittently */
10379 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10383 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10384 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10385 MAC_MODE_FHDE_ENABLE;
10386 if (tg3_flag(tp, ENABLE_APE))
10387 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10388 if (!tg3_flag(tp, 5705_PLUS) &&
10389 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10390 tg3_asic_rev(tp) != ASIC_REV_5700)
10391 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10392 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10395 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10396 * If TG3_FLAG_IS_NIC is zero, we should read the
10397 * register to preserve the GPIO settings for LOMs. The GPIOs,
10398 * whether used as inputs or outputs, are set by boot code after
10401 if (!tg3_flag(tp, IS_NIC)) {
10404 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10405 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10406 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10408 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10409 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10410 GRC_LCLCTRL_GPIO_OUTPUT3;
10412 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10413 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10415 tp->grc_local_ctrl &= ~gpio_mask;
10416 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10418 /* GPIO1 must be driven high for eeprom write protect */
10419 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10420 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10421 GRC_LCLCTRL_GPIO_OUTPUT1);
10423 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10426 if (tg3_flag(tp, USING_MSIX)) {
10427 val = tr32(MSGINT_MODE);
10428 val |= MSGINT_MODE_ENABLE;
10429 if (tp->irq_cnt > 1)
10430 val |= MSGINT_MODE_MULTIVEC_EN;
10431 if (!tg3_flag(tp, 1SHOT_MSI))
10432 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10433 tw32(MSGINT_MODE, val);
10436 if (!tg3_flag(tp, 5705_PLUS)) {
10437 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10441 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10442 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10443 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10444 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10445 WDMAC_MODE_LNGREAD_ENAB);
10447 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10448 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10449 if (tg3_flag(tp, TSO_CAPABLE) &&
10450 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10451 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10453 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10454 !tg3_flag(tp, IS_5788)) {
10455 val |= WDMAC_MODE_RX_ACCEL;
10459 /* Enable host coalescing bug fix */
10460 if (tg3_flag(tp, 5755_PLUS))
10461 val |= WDMAC_MODE_STATUS_TAG_FIX;
10463 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10464 val |= WDMAC_MODE_BURST_ALL_DATA;
10466 tw32_f(WDMAC_MODE, val);
10469 if (tg3_flag(tp, PCIX_MODE)) {
10472 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10474 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10475 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10476 pcix_cmd |= PCI_X_CMD_READ_2K;
10477 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10478 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10479 pcix_cmd |= PCI_X_CMD_READ_2K;
10481 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10485 tw32_f(RDMAC_MODE, rdmac_mode);
10488 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10489 tg3_asic_rev(tp) == ASIC_REV_5720) {
10490 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10491 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10494 if (i < TG3_NUM_RDMA_CHANNELS) {
10495 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10496 val |= tg3_lso_rd_dma_workaround_bit(tp);
10497 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10498 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10502 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10503 if (!tg3_flag(tp, 5705_PLUS))
10504 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10506 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10507 tw32(SNDDATAC_MODE,
10508 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10510 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10512 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10513 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10514 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10515 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10516 val |= RCVDBDI_MODE_LRG_RING_SZ;
10517 tw32(RCVDBDI_MODE, val);
10518 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10519 if (tg3_flag(tp, HW_TSO_1) ||
10520 tg3_flag(tp, HW_TSO_2) ||
10521 tg3_flag(tp, HW_TSO_3))
10522 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10523 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10524 if (tg3_flag(tp, ENABLE_TSS))
10525 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10526 tw32(SNDBDI_MODE, val);
10527 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10529 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10530 err = tg3_load_5701_a0_firmware_fix(tp);
10535 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10536 /* Ignore any errors for the firmware download. If download
10537 * fails, the device will operate with EEE disabled
10539 tg3_load_57766_firmware(tp);
10542 if (tg3_flag(tp, TSO_CAPABLE)) {
10543 err = tg3_load_tso_firmware(tp);
10548 tp->tx_mode = TX_MODE_ENABLE;
10550 if (tg3_flag(tp, 5755_PLUS) ||
10551 tg3_asic_rev(tp) == ASIC_REV_5906)
10552 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10554 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10555 tg3_asic_rev(tp) == ASIC_REV_5762) {
10556 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10557 tp->tx_mode &= ~val;
10558 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10561 tw32_f(MAC_TX_MODE, tp->tx_mode);
10564 if (tg3_flag(tp, ENABLE_RSS)) {
10567 tg3_rss_write_indir_tbl(tp);
10569 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10571 for (i = 0; i < 10 ; i++)
10572 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10575 tp->rx_mode = RX_MODE_ENABLE;
10576 if (tg3_flag(tp, 5755_PLUS))
10577 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10579 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10580 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10582 if (tg3_flag(tp, ENABLE_RSS))
10583 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10584 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10585 RX_MODE_RSS_IPV6_HASH_EN |
10586 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10587 RX_MODE_RSS_IPV4_HASH_EN |
10588 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10590 tw32_f(MAC_RX_MODE, tp->rx_mode);
10593 tw32(MAC_LED_CTRL, tp->led_ctrl);
10595 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10596 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10597 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10600 tw32_f(MAC_RX_MODE, tp->rx_mode);
10603 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10604 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10605 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10606 /* Set drive transmission level to 1.2V */
10607 /* only if the signal pre-emphasis bit is not set */
10608 val = tr32(MAC_SERDES_CFG);
10611 tw32(MAC_SERDES_CFG, val);
10613 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10614 tw32(MAC_SERDES_CFG, 0x616000);
10617 /* Prevent chip from dropping frames when flow control
10620 if (tg3_flag(tp, 57765_CLASS))
10624 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10626 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10627 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10628 /* Use hardware link auto-negotiation */
10629 tg3_flag_set(tp, HW_AUTONEG);
10632 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10633 tg3_asic_rev(tp) == ASIC_REV_5714) {
10636 tmp = tr32(SERDES_RX_CTRL);
10637 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10638 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10639 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10640 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10643 if (!tg3_flag(tp, USE_PHYLIB)) {
10644 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10645 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10647 err = tg3_setup_phy(tp, false);
10651 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10652 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10655 /* Clear CRC stats. */
10656 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10657 tg3_writephy(tp, MII_TG3_TEST1,
10658 tmp | MII_TG3_TEST1_CRC_EN);
10659 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10664 __tg3_set_rx_mode(tp->dev);
10666 /* Initialize receive rules. */
10667 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10668 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10669 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10670 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10672 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10676 if (tg3_flag(tp, ENABLE_ASF))
10680 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10682 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10684 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10686 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10688 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10690 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10692 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10694 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10696 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10698 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10700 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10702 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10704 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10706 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10714 if (tg3_flag(tp, ENABLE_APE))
10715 /* Write our heartbeat update interval to APE. */
10716 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10717 APE_HOST_HEARTBEAT_INT_DISABLE);
10719 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10724 /* Called at device open time to get the chip ready for
10725 * packet processing. Invoked with tp->lock held.
10727 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10729 /* Chip may have been just powered on. If so, the boot code may still
10730 * be running initialization. Wait for it to finish to avoid races in
10731 * accessing the hardware.
10733 tg3_enable_register_access(tp);
10736 tg3_switch_clocks(tp);
10738 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10740 return tg3_reset_hw(tp, reset_phy);
10743 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10747 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10748 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10750 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10753 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10754 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10755 memset(ocir, 0, TG3_OCIR_LEN);
10759 /* sysfs attributes for hwmon */
10760 static ssize_t tg3_show_temp(struct device *dev,
10761 struct device_attribute *devattr, char *buf)
10763 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10764 struct tg3 *tp = dev_get_drvdata(dev);
10767 spin_lock_bh(&tp->lock);
10768 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10769 sizeof(temperature));
10770 spin_unlock_bh(&tp->lock);
10771 return sprintf(buf, "%u\n", temperature * 1000);
10775 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10776 TG3_TEMP_SENSOR_OFFSET);
10777 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10778 TG3_TEMP_CAUTION_OFFSET);
10779 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10780 TG3_TEMP_MAX_OFFSET);
10782 static struct attribute *tg3_attrs[] = {
10783 &sensor_dev_attr_temp1_input.dev_attr.attr,
10784 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10785 &sensor_dev_attr_temp1_max.dev_attr.attr,
10788 ATTRIBUTE_GROUPS(tg3);
10790 static void tg3_hwmon_close(struct tg3 *tp)
10792 if (tp->hwmon_dev) {
10793 hwmon_device_unregister(tp->hwmon_dev);
10794 tp->hwmon_dev = NULL;
10798 static void tg3_hwmon_open(struct tg3 *tp)
10802 struct pci_dev *pdev = tp->pdev;
10803 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10805 tg3_sd_scan_scratchpad(tp, ocirs);
10807 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10808 if (!ocirs[i].src_data_length)
10811 size += ocirs[i].src_hdr_length;
10812 size += ocirs[i].src_data_length;
10818 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10820 if (IS_ERR(tp->hwmon_dev)) {
10821 tp->hwmon_dev = NULL;
10822 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10827 #define TG3_STAT_ADD32(PSTAT, REG) \
10828 do { u32 __val = tr32(REG); \
10829 (PSTAT)->low += __val; \
10830 if ((PSTAT)->low < __val) \
10831 (PSTAT)->high += 1; \
10834 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10836 struct tg3_hw_stats *sp = tp->hw_stats;
10841 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10842 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10843 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10844 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10845 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10846 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10847 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10848 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10849 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10850 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10851 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10852 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10853 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10854 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10855 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10856 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10859 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10860 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10861 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10862 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10865 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10866 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10867 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10868 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10869 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10870 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10871 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10872 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10873 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10874 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10875 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10876 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10877 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10878 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10880 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10881 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10882 tg3_asic_rev(tp) != ASIC_REV_5762 &&
10883 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10884 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10885 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10887 u32 val = tr32(HOSTCC_FLOW_ATTN);
10888 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10890 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10891 sp->rx_discards.low += val;
10892 if (sp->rx_discards.low < val)
10893 sp->rx_discards.high += 1;
10895 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10897 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10900 static void tg3_chk_missed_msi(struct tg3 *tp)
10904 for (i = 0; i < tp->irq_cnt; i++) {
10905 struct tg3_napi *tnapi = &tp->napi[i];
10907 if (tg3_has_work(tnapi)) {
10908 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10909 tnapi->last_tx_cons == tnapi->tx_cons) {
10910 if (tnapi->chk_msi_cnt < 1) {
10911 tnapi->chk_msi_cnt++;
10917 tnapi->chk_msi_cnt = 0;
10918 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10919 tnapi->last_tx_cons = tnapi->tx_cons;
10923 static void tg3_timer(unsigned long __opaque)
10925 struct tg3 *tp = (struct tg3 *) __opaque;
10927 spin_lock(&tp->lock);
10929 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10930 spin_unlock(&tp->lock);
10931 goto restart_timer;
10934 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10935 tg3_flag(tp, 57765_CLASS))
10936 tg3_chk_missed_msi(tp);
10938 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10939 /* BCM4785: Flush posted writes from GbE to host memory. */
10943 if (!tg3_flag(tp, TAGGED_STATUS)) {
10944 /* All of this garbage is because when using non-tagged
10945 * IRQ status the mailbox/status_block protocol the chip
10946 * uses with the cpu is race prone.
10948 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10949 tw32(GRC_LOCAL_CTRL,
10950 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10952 tw32(HOSTCC_MODE, tp->coalesce_mode |
10953 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10956 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10957 spin_unlock(&tp->lock);
10958 tg3_reset_task_schedule(tp);
10959 goto restart_timer;
10963 /* This part only runs once per second. */
10964 if (!--tp->timer_counter) {
10965 if (tg3_flag(tp, 5705_PLUS))
10966 tg3_periodic_fetch_stats(tp);
10968 if (tp->setlpicnt && !--tp->setlpicnt)
10969 tg3_phy_eee_enable(tp);
10971 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10975 mac_stat = tr32(MAC_STATUS);
10978 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10979 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10981 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10985 tg3_setup_phy(tp, false);
10986 } else if (tg3_flag(tp, POLL_SERDES)) {
10987 u32 mac_stat = tr32(MAC_STATUS);
10988 int need_setup = 0;
10991 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10994 if (!tp->link_up &&
10995 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10996 MAC_STATUS_SIGNAL_DET))) {
11000 if (!tp->serdes_counter) {
11003 ~MAC_MODE_PORT_MODE_MASK));
11005 tw32_f(MAC_MODE, tp->mac_mode);
11008 tg3_setup_phy(tp, false);
11010 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11011 tg3_flag(tp, 5780_CLASS)) {
11012 tg3_serdes_parallel_detect(tp);
11013 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11014 u32 cpmu = tr32(TG3_CPMU_STATUS);
11015 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11016 TG3_CPMU_STATUS_LINK_MASK);
11018 if (link_up != tp->link_up)
11019 tg3_setup_phy(tp, false);
11022 tp->timer_counter = tp->timer_multiplier;
11025 /* Heartbeat is only sent once every 2 seconds.
11027 * The heartbeat is to tell the ASF firmware that the host
11028 * driver is still alive. In the event that the OS crashes,
11029 * ASF needs to reset the hardware to free up the FIFO space
11030 * that may be filled with rx packets destined for the host.
11031 * If the FIFO is full, ASF will no longer function properly.
11033 * Unintended resets have been reported on real time kernels
11034 * where the timer doesn't run on time. Netpoll will also have
11037 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11038 * to check the ring condition when the heartbeat is expiring
11039 * before doing the reset. This will prevent most unintended
11042 if (!--tp->asf_counter) {
11043 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11044 tg3_wait_for_event_ack(tp);
11046 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11047 FWCMD_NICDRV_ALIVE3);
11048 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11049 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11050 TG3_FW_UPDATE_TIMEOUT_SEC);
11052 tg3_generate_fw_event(tp);
11054 tp->asf_counter = tp->asf_multiplier;
11057 spin_unlock(&tp->lock);
11060 tp->timer.expires = jiffies + tp->timer_offset;
11061 add_timer(&tp->timer);
11064 static void tg3_timer_init(struct tg3 *tp)
11066 if (tg3_flag(tp, TAGGED_STATUS) &&
11067 tg3_asic_rev(tp) != ASIC_REV_5717 &&
11068 !tg3_flag(tp, 57765_CLASS))
11069 tp->timer_offset = HZ;
11071 tp->timer_offset = HZ / 10;
11073 BUG_ON(tp->timer_offset > HZ);
11075 tp->timer_multiplier = (HZ / tp->timer_offset);
11076 tp->asf_multiplier = (HZ / tp->timer_offset) *
11077 TG3_FW_UPDATE_FREQ_SEC;
11079 init_timer(&tp->timer);
11080 tp->timer.data = (unsigned long) tp;
11081 tp->timer.function = tg3_timer;
11084 static void tg3_timer_start(struct tg3 *tp)
11086 tp->asf_counter = tp->asf_multiplier;
11087 tp->timer_counter = tp->timer_multiplier;
11089 tp->timer.expires = jiffies + tp->timer_offset;
11090 add_timer(&tp->timer);
11093 static void tg3_timer_stop(struct tg3 *tp)
11095 del_timer_sync(&tp->timer);
11098 /* Restart hardware after configuration changes, self-test, etc.
11099 * Invoked with tp->lock held.
11101 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11102 __releases(tp->lock)
11103 __acquires(tp->lock)
11107 err = tg3_init_hw(tp, reset_phy);
11109 netdev_err(tp->dev,
11110 "Failed to re-initialize device, aborting\n");
11111 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11112 tg3_full_unlock(tp);
11113 tg3_timer_stop(tp);
11115 tg3_napi_enable(tp);
11116 dev_close(tp->dev);
11117 tg3_full_lock(tp, 0);
11122 static void tg3_reset_task(struct work_struct *work)
11124 struct tg3 *tp = container_of(work, struct tg3, reset_task);
11128 tg3_full_lock(tp, 0);
11130 if (!netif_running(tp->dev)) {
11131 tg3_flag_clear(tp, RESET_TASK_PENDING);
11132 tg3_full_unlock(tp);
11137 tg3_full_unlock(tp);
11141 tg3_netif_stop(tp);
11143 tg3_full_lock(tp, 1);
11145 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11146 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11147 tp->write32_rx_mbox = tg3_write_flush_reg32;
11148 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11149 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11152 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11153 err = tg3_init_hw(tp, true);
11157 tg3_netif_start(tp);
11160 tg3_full_unlock(tp);
11165 tg3_flag_clear(tp, RESET_TASK_PENDING);
11169 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11172 unsigned long flags;
11174 struct tg3_napi *tnapi = &tp->napi[irq_num];
11176 if (tp->irq_cnt == 1)
11177 name = tp->dev->name;
11179 name = &tnapi->irq_lbl[0];
11180 if (tnapi->tx_buffers && tnapi->rx_rcb)
11181 snprintf(name, IFNAMSIZ,
11182 "%s-txrx-%d", tp->dev->name, irq_num);
11183 else if (tnapi->tx_buffers)
11184 snprintf(name, IFNAMSIZ,
11185 "%s-tx-%d", tp->dev->name, irq_num);
11186 else if (tnapi->rx_rcb)
11187 snprintf(name, IFNAMSIZ,
11188 "%s-rx-%d", tp->dev->name, irq_num);
11190 snprintf(name, IFNAMSIZ,
11191 "%s-%d", tp->dev->name, irq_num);
11192 name[IFNAMSIZ-1] = 0;
11195 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11197 if (tg3_flag(tp, 1SHOT_MSI))
11198 fn = tg3_msi_1shot;
11201 fn = tg3_interrupt;
11202 if (tg3_flag(tp, TAGGED_STATUS))
11203 fn = tg3_interrupt_tagged;
11204 flags = IRQF_SHARED;
11207 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11210 static int tg3_test_interrupt(struct tg3 *tp)
11212 struct tg3_napi *tnapi = &tp->napi[0];
11213 struct net_device *dev = tp->dev;
11214 int err, i, intr_ok = 0;
11217 if (!netif_running(dev))
11220 tg3_disable_ints(tp);
11222 free_irq(tnapi->irq_vec, tnapi);
11225 * Turn off MSI one shot mode. Otherwise this test has no
11226 * observable way to know whether the interrupt was delivered.
11228 if (tg3_flag(tp, 57765_PLUS)) {
11229 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11230 tw32(MSGINT_MODE, val);
11233 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11234 IRQF_SHARED, dev->name, tnapi);
11238 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11239 tg3_enable_ints(tp);
11241 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11244 for (i = 0; i < 5; i++) {
11245 u32 int_mbox, misc_host_ctrl;
11247 int_mbox = tr32_mailbox(tnapi->int_mbox);
11248 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11250 if ((int_mbox != 0) ||
11251 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11256 if (tg3_flag(tp, 57765_PLUS) &&
11257 tnapi->hw_status->status_tag != tnapi->last_tag)
11258 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11263 tg3_disable_ints(tp);
11265 free_irq(tnapi->irq_vec, tnapi);
11267 err = tg3_request_irq(tp, 0);
11273 /* Reenable MSI one shot mode. */
11274 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11275 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11276 tw32(MSGINT_MODE, val);
11284 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11285 * successfully restored
11287 static int tg3_test_msi(struct tg3 *tp)
11292 if (!tg3_flag(tp, USING_MSI))
11295 /* Turn off SERR reporting in case MSI terminates with Master
11298 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11299 pci_write_config_word(tp->pdev, PCI_COMMAND,
11300 pci_cmd & ~PCI_COMMAND_SERR);
11302 err = tg3_test_interrupt(tp);
11304 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11309 /* other failures */
11313 /* MSI test failed, go back to INTx mode */
11314 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11315 "to INTx mode. Please report this failure to the PCI "
11316 "maintainer and include system chipset information\n");
11318 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11320 pci_disable_msi(tp->pdev);
11322 tg3_flag_clear(tp, USING_MSI);
11323 tp->napi[0].irq_vec = tp->pdev->irq;
11325 err = tg3_request_irq(tp, 0);
11329 /* Need to reset the chip because the MSI cycle may have terminated
11330 * with Master Abort.
11332 tg3_full_lock(tp, 1);
11334 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11335 err = tg3_init_hw(tp, true);
11337 tg3_full_unlock(tp);
11340 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11345 static int tg3_request_firmware(struct tg3 *tp)
11347 const struct tg3_firmware_hdr *fw_hdr;
11349 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11350 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11355 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11357 /* Firmware blob starts with version numbers, followed by
11358 * start address and _full_ length including BSS sections
11359 * (which must be longer than the actual data, of course
11362 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11363 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11364 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11365 tp->fw_len, tp->fw_needed);
11366 release_firmware(tp->fw);
11371 /* We no longer need firmware; we have it. */
11372 tp->fw_needed = NULL;
11376 static u32 tg3_irq_count(struct tg3 *tp)
11378 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11381 /* We want as many rx rings enabled as there are cpus.
11382 * In multiqueue MSI-X mode, the first MSI-X vector
11383 * only deals with link interrupts, etc, so we add
11384 * one to the number of vectors we are requesting.
11386 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11392 static bool tg3_enable_msix(struct tg3 *tp)
11395 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11397 tp->txq_cnt = tp->txq_req;
11398 tp->rxq_cnt = tp->rxq_req;
11400 tp->rxq_cnt = netif_get_num_default_rss_queues();
11401 if (tp->rxq_cnt > tp->rxq_max)
11402 tp->rxq_cnt = tp->rxq_max;
11404 /* Disable multiple TX rings by default. Simple round-robin hardware
11405 * scheduling of the TX rings can cause starvation of rings with
11406 * small packets when other rings have TSO or jumbo packets.
11411 tp->irq_cnt = tg3_irq_count(tp);
11413 for (i = 0; i < tp->irq_max; i++) {
11414 msix_ent[i].entry = i;
11415 msix_ent[i].vector = 0;
11418 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11421 } else if (rc < tp->irq_cnt) {
11422 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11425 tp->rxq_cnt = max(rc - 1, 1);
11427 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11430 for (i = 0; i < tp->irq_max; i++)
11431 tp->napi[i].irq_vec = msix_ent[i].vector;
11433 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11434 pci_disable_msix(tp->pdev);
11438 if (tp->irq_cnt == 1)
11441 tg3_flag_set(tp, ENABLE_RSS);
11443 if (tp->txq_cnt > 1)
11444 tg3_flag_set(tp, ENABLE_TSS);
11446 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11451 static void tg3_ints_init(struct tg3 *tp)
11453 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11454 !tg3_flag(tp, TAGGED_STATUS)) {
11455 /* All MSI supporting chips should support tagged
11456 * status. Assert that this is the case.
11458 netdev_warn(tp->dev,
11459 "MSI without TAGGED_STATUS? Not using MSI\n");
11463 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11464 tg3_flag_set(tp, USING_MSIX);
11465 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11466 tg3_flag_set(tp, USING_MSI);
11468 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11469 u32 msi_mode = tr32(MSGINT_MODE);
11470 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11471 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11472 if (!tg3_flag(tp, 1SHOT_MSI))
11473 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11474 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11477 if (!tg3_flag(tp, USING_MSIX)) {
11479 tp->napi[0].irq_vec = tp->pdev->irq;
11482 if (tp->irq_cnt == 1) {
11485 netif_set_real_num_tx_queues(tp->dev, 1);
11486 netif_set_real_num_rx_queues(tp->dev, 1);
11490 static void tg3_ints_fini(struct tg3 *tp)
11492 if (tg3_flag(tp, USING_MSIX))
11493 pci_disable_msix(tp->pdev);
11494 else if (tg3_flag(tp, USING_MSI))
11495 pci_disable_msi(tp->pdev);
11496 tg3_flag_clear(tp, USING_MSI);
11497 tg3_flag_clear(tp, USING_MSIX);
11498 tg3_flag_clear(tp, ENABLE_RSS);
11499 tg3_flag_clear(tp, ENABLE_TSS);
11502 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11505 struct net_device *dev = tp->dev;
11509 * Setup interrupts first so we know how
11510 * many NAPI resources to allocate
11514 tg3_rss_check_indir_tbl(tp);
11516 /* The placement of this call is tied
11517 * to the setup and use of Host TX descriptors.
11519 err = tg3_alloc_consistent(tp);
11521 goto out_ints_fini;
11525 tg3_napi_enable(tp);
11527 for (i = 0; i < tp->irq_cnt; i++) {
11528 struct tg3_napi *tnapi = &tp->napi[i];
11529 err = tg3_request_irq(tp, i);
11531 for (i--; i >= 0; i--) {
11532 tnapi = &tp->napi[i];
11533 free_irq(tnapi->irq_vec, tnapi);
11535 goto out_napi_fini;
11539 tg3_full_lock(tp, 0);
11542 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11544 err = tg3_init_hw(tp, reset_phy);
11546 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11547 tg3_free_rings(tp);
11550 tg3_full_unlock(tp);
11555 if (test_irq && tg3_flag(tp, USING_MSI)) {
11556 err = tg3_test_msi(tp);
11559 tg3_full_lock(tp, 0);
11560 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11561 tg3_free_rings(tp);
11562 tg3_full_unlock(tp);
11564 goto out_napi_fini;
11567 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11568 u32 val = tr32(PCIE_TRANSACTION_CFG);
11570 tw32(PCIE_TRANSACTION_CFG,
11571 val | PCIE_TRANS_CFG_1SHOT_MSI);
11577 tg3_hwmon_open(tp);
11579 tg3_full_lock(tp, 0);
11581 tg3_timer_start(tp);
11582 tg3_flag_set(tp, INIT_COMPLETE);
11583 tg3_enable_ints(tp);
11585 tg3_ptp_resume(tp);
11587 tg3_full_unlock(tp);
11589 netif_tx_start_all_queues(dev);
11592 * Reset loopback feature if it was turned on while the device was down
11593 * make sure that it's installed properly now.
11595 if (dev->features & NETIF_F_LOOPBACK)
11596 tg3_set_loopback(dev, dev->features);
11601 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11602 struct tg3_napi *tnapi = &tp->napi[i];
11603 free_irq(tnapi->irq_vec, tnapi);
11607 tg3_napi_disable(tp);
11609 tg3_free_consistent(tp);
11617 static void tg3_stop(struct tg3 *tp)
11621 tg3_reset_task_cancel(tp);
11622 tg3_netif_stop(tp);
11624 tg3_timer_stop(tp);
11626 tg3_hwmon_close(tp);
11630 tg3_full_lock(tp, 1);
11632 tg3_disable_ints(tp);
11634 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11635 tg3_free_rings(tp);
11636 tg3_flag_clear(tp, INIT_COMPLETE);
11638 tg3_full_unlock(tp);
11640 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11641 struct tg3_napi *tnapi = &tp->napi[i];
11642 free_irq(tnapi->irq_vec, tnapi);
11649 tg3_free_consistent(tp);
11652 static int tg3_open(struct net_device *dev)
11654 struct tg3 *tp = netdev_priv(dev);
11657 if (tp->pcierr_recovery) {
11658 netdev_err(dev, "Failed to open device. PCI error recovery "
11663 if (tp->fw_needed) {
11664 err = tg3_request_firmware(tp);
11665 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11667 netdev_warn(tp->dev, "EEE capability disabled\n");
11668 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11669 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11670 netdev_warn(tp->dev, "EEE capability restored\n");
11671 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11673 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11677 netdev_warn(tp->dev, "TSO capability disabled\n");
11678 tg3_flag_clear(tp, TSO_CAPABLE);
11679 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11680 netdev_notice(tp->dev, "TSO capability restored\n");
11681 tg3_flag_set(tp, TSO_CAPABLE);
11685 tg3_carrier_off(tp);
11687 err = tg3_power_up(tp);
11691 tg3_full_lock(tp, 0);
11693 tg3_disable_ints(tp);
11694 tg3_flag_clear(tp, INIT_COMPLETE);
11696 tg3_full_unlock(tp);
11698 err = tg3_start(tp,
11699 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11702 tg3_frob_aux_power(tp, false);
11703 pci_set_power_state(tp->pdev, PCI_D3hot);
11709 static int tg3_close(struct net_device *dev)
11711 struct tg3 *tp = netdev_priv(dev);
11713 if (tp->pcierr_recovery) {
11714 netdev_err(dev, "Failed to close device. PCI error recovery "
11721 /* Clear stats across close / open calls */
11722 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11723 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11725 if (pci_device_is_present(tp->pdev)) {
11726 tg3_power_down_prepare(tp);
11728 tg3_carrier_off(tp);
11733 static inline u64 get_stat64(tg3_stat64_t *val)
11735 return ((u64)val->high << 32) | ((u64)val->low);
11738 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11740 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11742 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11743 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11744 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11747 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11748 tg3_writephy(tp, MII_TG3_TEST1,
11749 val | MII_TG3_TEST1_CRC_EN);
11750 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11754 tp->phy_crc_errors += val;
11756 return tp->phy_crc_errors;
11759 return get_stat64(&hw_stats->rx_fcs_errors);
11762 #define ESTAT_ADD(member) \
11763 estats->member = old_estats->member + \
11764 get_stat64(&hw_stats->member)
11766 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11768 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11769 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11771 ESTAT_ADD(rx_octets);
11772 ESTAT_ADD(rx_fragments);
11773 ESTAT_ADD(rx_ucast_packets);
11774 ESTAT_ADD(rx_mcast_packets);
11775 ESTAT_ADD(rx_bcast_packets);
11776 ESTAT_ADD(rx_fcs_errors);
11777 ESTAT_ADD(rx_align_errors);
11778 ESTAT_ADD(rx_xon_pause_rcvd);
11779 ESTAT_ADD(rx_xoff_pause_rcvd);
11780 ESTAT_ADD(rx_mac_ctrl_rcvd);
11781 ESTAT_ADD(rx_xoff_entered);
11782 ESTAT_ADD(rx_frame_too_long_errors);
11783 ESTAT_ADD(rx_jabbers);
11784 ESTAT_ADD(rx_undersize_packets);
11785 ESTAT_ADD(rx_in_length_errors);
11786 ESTAT_ADD(rx_out_length_errors);
11787 ESTAT_ADD(rx_64_or_less_octet_packets);
11788 ESTAT_ADD(rx_65_to_127_octet_packets);
11789 ESTAT_ADD(rx_128_to_255_octet_packets);
11790 ESTAT_ADD(rx_256_to_511_octet_packets);
11791 ESTAT_ADD(rx_512_to_1023_octet_packets);
11792 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11793 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11794 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11795 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11796 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11798 ESTAT_ADD(tx_octets);
11799 ESTAT_ADD(tx_collisions);
11800 ESTAT_ADD(tx_xon_sent);
11801 ESTAT_ADD(tx_xoff_sent);
11802 ESTAT_ADD(tx_flow_control);
11803 ESTAT_ADD(tx_mac_errors);
11804 ESTAT_ADD(tx_single_collisions);
11805 ESTAT_ADD(tx_mult_collisions);
11806 ESTAT_ADD(tx_deferred);
11807 ESTAT_ADD(tx_excessive_collisions);
11808 ESTAT_ADD(tx_late_collisions);
11809 ESTAT_ADD(tx_collide_2times);
11810 ESTAT_ADD(tx_collide_3times);
11811 ESTAT_ADD(tx_collide_4times);
11812 ESTAT_ADD(tx_collide_5times);
11813 ESTAT_ADD(tx_collide_6times);
11814 ESTAT_ADD(tx_collide_7times);
11815 ESTAT_ADD(tx_collide_8times);
11816 ESTAT_ADD(tx_collide_9times);
11817 ESTAT_ADD(tx_collide_10times);
11818 ESTAT_ADD(tx_collide_11times);
11819 ESTAT_ADD(tx_collide_12times);
11820 ESTAT_ADD(tx_collide_13times);
11821 ESTAT_ADD(tx_collide_14times);
11822 ESTAT_ADD(tx_collide_15times);
11823 ESTAT_ADD(tx_ucast_packets);
11824 ESTAT_ADD(tx_mcast_packets);
11825 ESTAT_ADD(tx_bcast_packets);
11826 ESTAT_ADD(tx_carrier_sense_errors);
11827 ESTAT_ADD(tx_discards);
11828 ESTAT_ADD(tx_errors);
11830 ESTAT_ADD(dma_writeq_full);
11831 ESTAT_ADD(dma_write_prioq_full);
11832 ESTAT_ADD(rxbds_empty);
11833 ESTAT_ADD(rx_discards);
11834 ESTAT_ADD(rx_errors);
11835 ESTAT_ADD(rx_threshold_hit);
11837 ESTAT_ADD(dma_readq_full);
11838 ESTAT_ADD(dma_read_prioq_full);
11839 ESTAT_ADD(tx_comp_queue_full);
11841 ESTAT_ADD(ring_set_send_prod_index);
11842 ESTAT_ADD(ring_status_update);
11843 ESTAT_ADD(nic_irqs);
11844 ESTAT_ADD(nic_avoided_irqs);
11845 ESTAT_ADD(nic_tx_threshold_hit);
11847 ESTAT_ADD(mbuf_lwm_thresh_hit);
11850 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11852 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11853 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11855 stats->rx_packets = old_stats->rx_packets +
11856 get_stat64(&hw_stats->rx_ucast_packets) +
11857 get_stat64(&hw_stats->rx_mcast_packets) +
11858 get_stat64(&hw_stats->rx_bcast_packets);
11860 stats->tx_packets = old_stats->tx_packets +
11861 get_stat64(&hw_stats->tx_ucast_packets) +
11862 get_stat64(&hw_stats->tx_mcast_packets) +
11863 get_stat64(&hw_stats->tx_bcast_packets);
11865 stats->rx_bytes = old_stats->rx_bytes +
11866 get_stat64(&hw_stats->rx_octets);
11867 stats->tx_bytes = old_stats->tx_bytes +
11868 get_stat64(&hw_stats->tx_octets);
11870 stats->rx_errors = old_stats->rx_errors +
11871 get_stat64(&hw_stats->rx_errors);
11872 stats->tx_errors = old_stats->tx_errors +
11873 get_stat64(&hw_stats->tx_errors) +
11874 get_stat64(&hw_stats->tx_mac_errors) +
11875 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11876 get_stat64(&hw_stats->tx_discards);
11878 stats->multicast = old_stats->multicast +
11879 get_stat64(&hw_stats->rx_mcast_packets);
11880 stats->collisions = old_stats->collisions +
11881 get_stat64(&hw_stats->tx_collisions);
11883 stats->rx_length_errors = old_stats->rx_length_errors +
11884 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11885 get_stat64(&hw_stats->rx_undersize_packets);
11887 stats->rx_frame_errors = old_stats->rx_frame_errors +
11888 get_stat64(&hw_stats->rx_align_errors);
11889 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11890 get_stat64(&hw_stats->tx_discards);
11891 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11892 get_stat64(&hw_stats->tx_carrier_sense_errors);
11894 stats->rx_crc_errors = old_stats->rx_crc_errors +
11895 tg3_calc_crc_errors(tp);
11897 stats->rx_missed_errors = old_stats->rx_missed_errors +
11898 get_stat64(&hw_stats->rx_discards);
11900 stats->rx_dropped = tp->rx_dropped;
11901 stats->tx_dropped = tp->tx_dropped;
11904 static int tg3_get_regs_len(struct net_device *dev)
11906 return TG3_REG_BLK_SIZE;
11909 static void tg3_get_regs(struct net_device *dev,
11910 struct ethtool_regs *regs, void *_p)
11912 struct tg3 *tp = netdev_priv(dev);
11916 memset(_p, 0, TG3_REG_BLK_SIZE);
11918 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11921 tg3_full_lock(tp, 0);
11923 tg3_dump_legacy_regs(tp, (u32 *)_p);
11925 tg3_full_unlock(tp);
11928 static int tg3_get_eeprom_len(struct net_device *dev)
11930 struct tg3 *tp = netdev_priv(dev);
11932 return tp->nvram_size;
11935 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11937 struct tg3 *tp = netdev_priv(dev);
11938 int ret, cpmu_restore = 0;
11940 u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11943 if (tg3_flag(tp, NO_NVRAM))
11946 offset = eeprom->offset;
11950 eeprom->magic = TG3_EEPROM_MAGIC;
11952 /* Override clock, link aware and link idle modes */
11953 if (tg3_flag(tp, CPMU_PRESENT)) {
11954 cpmu_val = tr32(TG3_CPMU_CTRL);
11955 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
11956 CPMU_CTRL_LINK_IDLE_MODE)) {
11957 tw32(TG3_CPMU_CTRL, cpmu_val &
11958 ~(CPMU_CTRL_LINK_AWARE_MODE |
11959 CPMU_CTRL_LINK_IDLE_MODE));
11963 tg3_override_clk(tp);
11966 /* adjustments to start on required 4 byte boundary */
11967 b_offset = offset & 3;
11968 b_count = 4 - b_offset;
11969 if (b_count > len) {
11970 /* i.e. offset=1 len=2 */
11973 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11976 memcpy(data, ((char *)&val) + b_offset, b_count);
11979 eeprom->len += b_count;
11982 /* read bytes up to the last 4 byte boundary */
11983 pd = &data[eeprom->len];
11984 for (i = 0; i < (len - (len & 3)); i += 4) {
11985 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11992 memcpy(pd + i, &val, 4);
11993 if (need_resched()) {
11994 if (signal_pending(current)) {
12005 /* read last bytes not ending on 4 byte boundary */
12006 pd = &data[eeprom->len];
12008 b_offset = offset + len - b_count;
12009 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12012 memcpy(pd, &val, b_count);
12013 eeprom->len += b_count;
12018 /* Restore clock, link aware and link idle modes */
12019 tg3_restore_clk(tp);
12021 tw32(TG3_CPMU_CTRL, cpmu_val);
12026 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12028 struct tg3 *tp = netdev_priv(dev);
12030 u32 offset, len, b_offset, odd_len;
12032 __be32 start = 0, end;
12034 if (tg3_flag(tp, NO_NVRAM) ||
12035 eeprom->magic != TG3_EEPROM_MAGIC)
12038 offset = eeprom->offset;
12041 if ((b_offset = (offset & 3))) {
12042 /* adjustments to start on required 4 byte boundary */
12043 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12054 /* adjustments to end on required 4 byte boundary */
12056 len = (len + 3) & ~3;
12057 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12063 if (b_offset || odd_len) {
12064 buf = kmalloc(len, GFP_KERNEL);
12068 memcpy(buf, &start, 4);
12070 memcpy(buf+len-4, &end, 4);
12071 memcpy(buf + b_offset, data, eeprom->len);
12074 ret = tg3_nvram_write_block(tp, offset, len, buf);
12082 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
12084 struct tg3 *tp = netdev_priv(dev);
12086 if (tg3_flag(tp, USE_PHYLIB)) {
12087 struct phy_device *phydev;
12088 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12090 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12091 return phy_ethtool_gset(phydev, cmd);
12094 cmd->supported = (SUPPORTED_Autoneg);
12096 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12097 cmd->supported |= (SUPPORTED_1000baseT_Half |
12098 SUPPORTED_1000baseT_Full);
12100 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12101 cmd->supported |= (SUPPORTED_100baseT_Half |
12102 SUPPORTED_100baseT_Full |
12103 SUPPORTED_10baseT_Half |
12104 SUPPORTED_10baseT_Full |
12106 cmd->port = PORT_TP;
12108 cmd->supported |= SUPPORTED_FIBRE;
12109 cmd->port = PORT_FIBRE;
12112 cmd->advertising = tp->link_config.advertising;
12113 if (tg3_flag(tp, PAUSE_AUTONEG)) {
12114 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12115 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12116 cmd->advertising |= ADVERTISED_Pause;
12118 cmd->advertising |= ADVERTISED_Pause |
12119 ADVERTISED_Asym_Pause;
12121 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12122 cmd->advertising |= ADVERTISED_Asym_Pause;
12125 if (netif_running(dev) && tp->link_up) {
12126 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
12127 cmd->duplex = tp->link_config.active_duplex;
12128 cmd->lp_advertising = tp->link_config.rmt_adv;
12129 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12130 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12131 cmd->eth_tp_mdix = ETH_TP_MDI_X;
12133 cmd->eth_tp_mdix = ETH_TP_MDI;
12136 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
12137 cmd->duplex = DUPLEX_UNKNOWN;
12138 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
12140 cmd->phy_address = tp->phy_addr;
12141 cmd->transceiver = XCVR_INTERNAL;
12142 cmd->autoneg = tp->link_config.autoneg;
12148 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
12150 struct tg3 *tp = netdev_priv(dev);
12151 u32 speed = ethtool_cmd_speed(cmd);
12153 if (tg3_flag(tp, USE_PHYLIB)) {
12154 struct phy_device *phydev;
12155 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12157 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12158 return phy_ethtool_sset(phydev, cmd);
12161 if (cmd->autoneg != AUTONEG_ENABLE &&
12162 cmd->autoneg != AUTONEG_DISABLE)
12165 if (cmd->autoneg == AUTONEG_DISABLE &&
12166 cmd->duplex != DUPLEX_FULL &&
12167 cmd->duplex != DUPLEX_HALF)
12170 if (cmd->autoneg == AUTONEG_ENABLE) {
12171 u32 mask = ADVERTISED_Autoneg |
12173 ADVERTISED_Asym_Pause;
12175 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12176 mask |= ADVERTISED_1000baseT_Half |
12177 ADVERTISED_1000baseT_Full;
12179 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12180 mask |= ADVERTISED_100baseT_Half |
12181 ADVERTISED_100baseT_Full |
12182 ADVERTISED_10baseT_Half |
12183 ADVERTISED_10baseT_Full |
12186 mask |= ADVERTISED_FIBRE;
12188 if (cmd->advertising & ~mask)
12191 mask &= (ADVERTISED_1000baseT_Half |
12192 ADVERTISED_1000baseT_Full |
12193 ADVERTISED_100baseT_Half |
12194 ADVERTISED_100baseT_Full |
12195 ADVERTISED_10baseT_Half |
12196 ADVERTISED_10baseT_Full);
12198 cmd->advertising &= mask;
12200 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12201 if (speed != SPEED_1000)
12204 if (cmd->duplex != DUPLEX_FULL)
12207 if (speed != SPEED_100 &&
12213 tg3_full_lock(tp, 0);
12215 tp->link_config.autoneg = cmd->autoneg;
12216 if (cmd->autoneg == AUTONEG_ENABLE) {
12217 tp->link_config.advertising = (cmd->advertising |
12218 ADVERTISED_Autoneg);
12219 tp->link_config.speed = SPEED_UNKNOWN;
12220 tp->link_config.duplex = DUPLEX_UNKNOWN;
12222 tp->link_config.advertising = 0;
12223 tp->link_config.speed = speed;
12224 tp->link_config.duplex = cmd->duplex;
12227 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12229 tg3_warn_mgmt_link_flap(tp);
12231 if (netif_running(dev))
12232 tg3_setup_phy(tp, true);
12234 tg3_full_unlock(tp);
12239 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12241 struct tg3 *tp = netdev_priv(dev);
12243 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12244 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12245 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12246 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12249 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12251 struct tg3 *tp = netdev_priv(dev);
12253 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12254 wol->supported = WAKE_MAGIC;
12256 wol->supported = 0;
12258 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12259 wol->wolopts = WAKE_MAGIC;
12260 memset(&wol->sopass, 0, sizeof(wol->sopass));
12263 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12265 struct tg3 *tp = netdev_priv(dev);
12266 struct device *dp = &tp->pdev->dev;
12268 if (wol->wolopts & ~WAKE_MAGIC)
12270 if ((wol->wolopts & WAKE_MAGIC) &&
12271 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12274 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12276 if (device_may_wakeup(dp))
12277 tg3_flag_set(tp, WOL_ENABLE);
12279 tg3_flag_clear(tp, WOL_ENABLE);
12284 static u32 tg3_get_msglevel(struct net_device *dev)
12286 struct tg3 *tp = netdev_priv(dev);
12287 return tp->msg_enable;
12290 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12292 struct tg3 *tp = netdev_priv(dev);
12293 tp->msg_enable = value;
12296 static int tg3_nway_reset(struct net_device *dev)
12298 struct tg3 *tp = netdev_priv(dev);
12301 if (!netif_running(dev))
12304 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12307 tg3_warn_mgmt_link_flap(tp);
12309 if (tg3_flag(tp, USE_PHYLIB)) {
12310 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12312 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12316 spin_lock_bh(&tp->lock);
12318 tg3_readphy(tp, MII_BMCR, &bmcr);
12319 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12320 ((bmcr & BMCR_ANENABLE) ||
12321 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12322 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12326 spin_unlock_bh(&tp->lock);
12332 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12334 struct tg3 *tp = netdev_priv(dev);
12336 ering->rx_max_pending = tp->rx_std_ring_mask;
12337 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12338 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12340 ering->rx_jumbo_max_pending = 0;
12342 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12344 ering->rx_pending = tp->rx_pending;
12345 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12346 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12348 ering->rx_jumbo_pending = 0;
12350 ering->tx_pending = tp->napi[0].tx_pending;
12353 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12355 struct tg3 *tp = netdev_priv(dev);
12356 int i, irq_sync = 0, err = 0;
12358 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12359 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12360 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12361 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12362 (tg3_flag(tp, TSO_BUG) &&
12363 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12366 if (netif_running(dev)) {
12368 tg3_netif_stop(tp);
12372 tg3_full_lock(tp, irq_sync);
12374 tp->rx_pending = ering->rx_pending;
12376 if (tg3_flag(tp, MAX_RXPEND_64) &&
12377 tp->rx_pending > 63)
12378 tp->rx_pending = 63;
12380 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12381 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12383 for (i = 0; i < tp->irq_max; i++)
12384 tp->napi[i].tx_pending = ering->tx_pending;
12386 if (netif_running(dev)) {
12387 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12388 err = tg3_restart_hw(tp, false);
12390 tg3_netif_start(tp);
12393 tg3_full_unlock(tp);
12395 if (irq_sync && !err)
12401 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12403 struct tg3 *tp = netdev_priv(dev);
12405 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12407 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12408 epause->rx_pause = 1;
12410 epause->rx_pause = 0;
12412 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12413 epause->tx_pause = 1;
12415 epause->tx_pause = 0;
12418 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12420 struct tg3 *tp = netdev_priv(dev);
12423 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12424 tg3_warn_mgmt_link_flap(tp);
12426 if (tg3_flag(tp, USE_PHYLIB)) {
12428 struct phy_device *phydev;
12430 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12432 if (!(phydev->supported & SUPPORTED_Pause) ||
12433 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12434 (epause->rx_pause != epause->tx_pause)))
12437 tp->link_config.flowctrl = 0;
12438 if (epause->rx_pause) {
12439 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12441 if (epause->tx_pause) {
12442 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12443 newadv = ADVERTISED_Pause;
12445 newadv = ADVERTISED_Pause |
12446 ADVERTISED_Asym_Pause;
12447 } else if (epause->tx_pause) {
12448 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12449 newadv = ADVERTISED_Asym_Pause;
12453 if (epause->autoneg)
12454 tg3_flag_set(tp, PAUSE_AUTONEG);
12456 tg3_flag_clear(tp, PAUSE_AUTONEG);
12458 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12459 u32 oldadv = phydev->advertising &
12460 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12461 if (oldadv != newadv) {
12462 phydev->advertising &=
12463 ~(ADVERTISED_Pause |
12464 ADVERTISED_Asym_Pause);
12465 phydev->advertising |= newadv;
12466 if (phydev->autoneg) {
12468 * Always renegotiate the link to
12469 * inform our link partner of our
12470 * flow control settings, even if the
12471 * flow control is forced. Let
12472 * tg3_adjust_link() do the final
12473 * flow control setup.
12475 return phy_start_aneg(phydev);
12479 if (!epause->autoneg)
12480 tg3_setup_flow_control(tp, 0, 0);
12482 tp->link_config.advertising &=
12483 ~(ADVERTISED_Pause |
12484 ADVERTISED_Asym_Pause);
12485 tp->link_config.advertising |= newadv;
12490 if (netif_running(dev)) {
12491 tg3_netif_stop(tp);
12495 tg3_full_lock(tp, irq_sync);
12497 if (epause->autoneg)
12498 tg3_flag_set(tp, PAUSE_AUTONEG);
12500 tg3_flag_clear(tp, PAUSE_AUTONEG);
12501 if (epause->rx_pause)
12502 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12504 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12505 if (epause->tx_pause)
12506 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12508 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12510 if (netif_running(dev)) {
12511 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12512 err = tg3_restart_hw(tp, false);
12514 tg3_netif_start(tp);
12517 tg3_full_unlock(tp);
12520 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12525 static int tg3_get_sset_count(struct net_device *dev, int sset)
12529 return TG3_NUM_TEST;
12531 return TG3_NUM_STATS;
12533 return -EOPNOTSUPP;
12537 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12538 u32 *rules __always_unused)
12540 struct tg3 *tp = netdev_priv(dev);
12542 if (!tg3_flag(tp, SUPPORT_MSIX))
12543 return -EOPNOTSUPP;
12545 switch (info->cmd) {
12546 case ETHTOOL_GRXRINGS:
12547 if (netif_running(tp->dev))
12548 info->data = tp->rxq_cnt;
12550 info->data = num_online_cpus();
12551 if (info->data > TG3_RSS_MAX_NUM_QS)
12552 info->data = TG3_RSS_MAX_NUM_QS;
12555 /* The first interrupt vector only
12556 * handles link interrupts.
12562 return -EOPNOTSUPP;
12566 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12569 struct tg3 *tp = netdev_priv(dev);
12571 if (tg3_flag(tp, SUPPORT_MSIX))
12572 size = TG3_RSS_INDIR_TBL_SIZE;
12577 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12579 struct tg3 *tp = netdev_priv(dev);
12583 *hfunc = ETH_RSS_HASH_TOP;
12587 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12588 indir[i] = tp->rss_ind_tbl[i];
12593 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12596 struct tg3 *tp = netdev_priv(dev);
12599 /* We require at least one supported parameter to be changed and no
12600 * change in any of the unsupported parameters
12603 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12604 return -EOPNOTSUPP;
12609 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12610 tp->rss_ind_tbl[i] = indir[i];
12612 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12615 /* It is legal to write the indirection
12616 * table while the device is running.
12618 tg3_full_lock(tp, 0);
12619 tg3_rss_write_indir_tbl(tp);
12620 tg3_full_unlock(tp);
12625 static void tg3_get_channels(struct net_device *dev,
12626 struct ethtool_channels *channel)
12628 struct tg3 *tp = netdev_priv(dev);
12629 u32 deflt_qs = netif_get_num_default_rss_queues();
12631 channel->max_rx = tp->rxq_max;
12632 channel->max_tx = tp->txq_max;
12634 if (netif_running(dev)) {
12635 channel->rx_count = tp->rxq_cnt;
12636 channel->tx_count = tp->txq_cnt;
12639 channel->rx_count = tp->rxq_req;
12641 channel->rx_count = min(deflt_qs, tp->rxq_max);
12644 channel->tx_count = tp->txq_req;
12646 channel->tx_count = min(deflt_qs, tp->txq_max);
12650 static int tg3_set_channels(struct net_device *dev,
12651 struct ethtool_channels *channel)
12653 struct tg3 *tp = netdev_priv(dev);
12655 if (!tg3_flag(tp, SUPPORT_MSIX))
12656 return -EOPNOTSUPP;
12658 if (channel->rx_count > tp->rxq_max ||
12659 channel->tx_count > tp->txq_max)
12662 tp->rxq_req = channel->rx_count;
12663 tp->txq_req = channel->tx_count;
12665 if (!netif_running(dev))
12670 tg3_carrier_off(tp);
12672 tg3_start(tp, true, false, false);
12677 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12679 switch (stringset) {
12681 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12684 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12687 WARN_ON(1); /* we need a WARN() */
12692 static int tg3_set_phys_id(struct net_device *dev,
12693 enum ethtool_phys_id_state state)
12695 struct tg3 *tp = netdev_priv(dev);
12697 if (!netif_running(tp->dev))
12701 case ETHTOOL_ID_ACTIVE:
12702 return 1; /* cycle on/off once per second */
12704 case ETHTOOL_ID_ON:
12705 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12706 LED_CTRL_1000MBPS_ON |
12707 LED_CTRL_100MBPS_ON |
12708 LED_CTRL_10MBPS_ON |
12709 LED_CTRL_TRAFFIC_OVERRIDE |
12710 LED_CTRL_TRAFFIC_BLINK |
12711 LED_CTRL_TRAFFIC_LED);
12714 case ETHTOOL_ID_OFF:
12715 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12716 LED_CTRL_TRAFFIC_OVERRIDE);
12719 case ETHTOOL_ID_INACTIVE:
12720 tw32(MAC_LED_CTRL, tp->led_ctrl);
12727 static void tg3_get_ethtool_stats(struct net_device *dev,
12728 struct ethtool_stats *estats, u64 *tmp_stats)
12730 struct tg3 *tp = netdev_priv(dev);
12733 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12735 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12738 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12742 u32 offset = 0, len = 0;
12745 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12748 if (magic == TG3_EEPROM_MAGIC) {
12749 for (offset = TG3_NVM_DIR_START;
12750 offset < TG3_NVM_DIR_END;
12751 offset += TG3_NVM_DIRENT_SIZE) {
12752 if (tg3_nvram_read(tp, offset, &val))
12755 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12756 TG3_NVM_DIRTYPE_EXTVPD)
12760 if (offset != TG3_NVM_DIR_END) {
12761 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12762 if (tg3_nvram_read(tp, offset + 4, &offset))
12765 offset = tg3_nvram_logical_addr(tp, offset);
12769 if (!offset || !len) {
12770 offset = TG3_NVM_VPD_OFF;
12771 len = TG3_NVM_VPD_LEN;
12774 buf = kmalloc(len, GFP_KERNEL);
12778 if (magic == TG3_EEPROM_MAGIC) {
12779 for (i = 0; i < len; i += 4) {
12780 /* The data is in little-endian format in NVRAM.
12781 * Use the big-endian read routines to preserve
12782 * the byte order as it exists in NVRAM.
12784 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12790 unsigned int pos = 0;
12792 ptr = (u8 *)&buf[0];
12793 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12794 cnt = pci_read_vpd(tp->pdev, pos,
12796 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12814 #define NVRAM_TEST_SIZE 0x100
12815 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12816 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12817 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12818 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12819 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12820 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12821 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12822 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12824 static int tg3_test_nvram(struct tg3 *tp)
12826 u32 csum, magic, len;
12828 int i, j, k, err = 0, size;
12830 if (tg3_flag(tp, NO_NVRAM))
12833 if (tg3_nvram_read(tp, 0, &magic) != 0)
12836 if (magic == TG3_EEPROM_MAGIC)
12837 size = NVRAM_TEST_SIZE;
12838 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12839 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12840 TG3_EEPROM_SB_FORMAT_1) {
12841 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12842 case TG3_EEPROM_SB_REVISION_0:
12843 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12845 case TG3_EEPROM_SB_REVISION_2:
12846 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12848 case TG3_EEPROM_SB_REVISION_3:
12849 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12851 case TG3_EEPROM_SB_REVISION_4:
12852 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12854 case TG3_EEPROM_SB_REVISION_5:
12855 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12857 case TG3_EEPROM_SB_REVISION_6:
12858 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12865 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12866 size = NVRAM_SELFBOOT_HW_SIZE;
12870 buf = kmalloc(size, GFP_KERNEL);
12875 for (i = 0, j = 0; i < size; i += 4, j++) {
12876 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12883 /* Selfboot format */
12884 magic = be32_to_cpu(buf[0]);
12885 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12886 TG3_EEPROM_MAGIC_FW) {
12887 u8 *buf8 = (u8 *) buf, csum8 = 0;
12889 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12890 TG3_EEPROM_SB_REVISION_2) {
12891 /* For rev 2, the csum doesn't include the MBA. */
12892 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12894 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12897 for (i = 0; i < size; i++)
12910 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12911 TG3_EEPROM_MAGIC_HW) {
12912 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12913 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12914 u8 *buf8 = (u8 *) buf;
12916 /* Separate the parity bits and the data bytes. */
12917 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12918 if ((i == 0) || (i == 8)) {
12922 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12923 parity[k++] = buf8[i] & msk;
12925 } else if (i == 16) {
12929 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12930 parity[k++] = buf8[i] & msk;
12933 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12934 parity[k++] = buf8[i] & msk;
12937 data[j++] = buf8[i];
12941 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12942 u8 hw8 = hweight8(data[i]);
12944 if ((hw8 & 0x1) && parity[i])
12946 else if (!(hw8 & 0x1) && !parity[i])
12955 /* Bootstrap checksum at offset 0x10 */
12956 csum = calc_crc((unsigned char *) buf, 0x10);
12957 if (csum != le32_to_cpu(buf[0x10/4]))
12960 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12961 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12962 if (csum != le32_to_cpu(buf[0xfc/4]))
12967 buf = tg3_vpd_readblock(tp, &len);
12971 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12973 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12977 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12980 i += PCI_VPD_LRDT_TAG_SIZE;
12981 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12982 PCI_VPD_RO_KEYWORD_CHKSUM);
12986 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12988 for (i = 0; i <= j; i++)
12989 csum8 += ((u8 *)buf)[i];
13003 #define TG3_SERDES_TIMEOUT_SEC 2
13004 #define TG3_COPPER_TIMEOUT_SEC 6
13006 static int tg3_test_link(struct tg3 *tp)
13010 if (!netif_running(tp->dev))
13013 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13014 max = TG3_SERDES_TIMEOUT_SEC;
13016 max = TG3_COPPER_TIMEOUT_SEC;
13018 for (i = 0; i < max; i++) {
13022 if (msleep_interruptible(1000))
13029 /* Only test the commonly used registers */
13030 static int tg3_test_registers(struct tg3 *tp)
13032 int i, is_5705, is_5750;
13033 u32 offset, read_mask, write_mask, val, save_val, read_val;
13037 #define TG3_FL_5705 0x1
13038 #define TG3_FL_NOT_5705 0x2
13039 #define TG3_FL_NOT_5788 0x4
13040 #define TG3_FL_NOT_5750 0x8
13044 /* MAC Control Registers */
13045 { MAC_MODE, TG3_FL_NOT_5705,
13046 0x00000000, 0x00ef6f8c },
13047 { MAC_MODE, TG3_FL_5705,
13048 0x00000000, 0x01ef6b8c },
13049 { MAC_STATUS, TG3_FL_NOT_5705,
13050 0x03800107, 0x00000000 },
13051 { MAC_STATUS, TG3_FL_5705,
13052 0x03800100, 0x00000000 },
13053 { MAC_ADDR_0_HIGH, 0x0000,
13054 0x00000000, 0x0000ffff },
13055 { MAC_ADDR_0_LOW, 0x0000,
13056 0x00000000, 0xffffffff },
13057 { MAC_RX_MTU_SIZE, 0x0000,
13058 0x00000000, 0x0000ffff },
13059 { MAC_TX_MODE, 0x0000,
13060 0x00000000, 0x00000070 },
13061 { MAC_TX_LENGTHS, 0x0000,
13062 0x00000000, 0x00003fff },
13063 { MAC_RX_MODE, TG3_FL_NOT_5705,
13064 0x00000000, 0x000007fc },
13065 { MAC_RX_MODE, TG3_FL_5705,
13066 0x00000000, 0x000007dc },
13067 { MAC_HASH_REG_0, 0x0000,
13068 0x00000000, 0xffffffff },
13069 { MAC_HASH_REG_1, 0x0000,
13070 0x00000000, 0xffffffff },
13071 { MAC_HASH_REG_2, 0x0000,
13072 0x00000000, 0xffffffff },
13073 { MAC_HASH_REG_3, 0x0000,
13074 0x00000000, 0xffffffff },
13076 /* Receive Data and Receive BD Initiator Control Registers. */
13077 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13078 0x00000000, 0xffffffff },
13079 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13080 0x00000000, 0xffffffff },
13081 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13082 0x00000000, 0x00000003 },
13083 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13084 0x00000000, 0xffffffff },
13085 { RCVDBDI_STD_BD+0, 0x0000,
13086 0x00000000, 0xffffffff },
13087 { RCVDBDI_STD_BD+4, 0x0000,
13088 0x00000000, 0xffffffff },
13089 { RCVDBDI_STD_BD+8, 0x0000,
13090 0x00000000, 0xffff0002 },
13091 { RCVDBDI_STD_BD+0xc, 0x0000,
13092 0x00000000, 0xffffffff },
13094 /* Receive BD Initiator Control Registers. */
13095 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13096 0x00000000, 0xffffffff },
13097 { RCVBDI_STD_THRESH, TG3_FL_5705,
13098 0x00000000, 0x000003ff },
13099 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13100 0x00000000, 0xffffffff },
13102 /* Host Coalescing Control Registers. */
13103 { HOSTCC_MODE, TG3_FL_NOT_5705,
13104 0x00000000, 0x00000004 },
13105 { HOSTCC_MODE, TG3_FL_5705,
13106 0x00000000, 0x000000f6 },
13107 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13108 0x00000000, 0xffffffff },
13109 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13110 0x00000000, 0x000003ff },
13111 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13112 0x00000000, 0xffffffff },
13113 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13114 0x00000000, 0x000003ff },
13115 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13116 0x00000000, 0xffffffff },
13117 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13118 0x00000000, 0x000000ff },
13119 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13120 0x00000000, 0xffffffff },
13121 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13122 0x00000000, 0x000000ff },
13123 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13124 0x00000000, 0xffffffff },
13125 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13126 0x00000000, 0xffffffff },
13127 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13128 0x00000000, 0xffffffff },
13129 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13130 0x00000000, 0x000000ff },
13131 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13132 0x00000000, 0xffffffff },
13133 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13134 0x00000000, 0x000000ff },
13135 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13136 0x00000000, 0xffffffff },
13137 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13138 0x00000000, 0xffffffff },
13139 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13140 0x00000000, 0xffffffff },
13141 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13142 0x00000000, 0xffffffff },
13143 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13144 0x00000000, 0xffffffff },
13145 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13146 0xffffffff, 0x00000000 },
13147 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13148 0xffffffff, 0x00000000 },
13150 /* Buffer Manager Control Registers. */
13151 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13152 0x00000000, 0x007fff80 },
13153 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13154 0x00000000, 0x007fffff },
13155 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13156 0x00000000, 0x0000003f },
13157 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13158 0x00000000, 0x000001ff },
13159 { BUFMGR_MB_HIGH_WATER, 0x0000,
13160 0x00000000, 0x000001ff },
13161 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13162 0xffffffff, 0x00000000 },
13163 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13164 0xffffffff, 0x00000000 },
13166 /* Mailbox Registers */
13167 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13168 0x00000000, 0x000001ff },
13169 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13170 0x00000000, 0x000001ff },
13171 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13172 0x00000000, 0x000007ff },
13173 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13174 0x00000000, 0x000001ff },
13176 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13179 is_5705 = is_5750 = 0;
13180 if (tg3_flag(tp, 5705_PLUS)) {
13182 if (tg3_flag(tp, 5750_PLUS))
13186 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13187 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13190 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13193 if (tg3_flag(tp, IS_5788) &&
13194 (reg_tbl[i].flags & TG3_FL_NOT_5788))
13197 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13200 offset = (u32) reg_tbl[i].offset;
13201 read_mask = reg_tbl[i].read_mask;
13202 write_mask = reg_tbl[i].write_mask;
13204 /* Save the original register content */
13205 save_val = tr32(offset);
13207 /* Determine the read-only value. */
13208 read_val = save_val & read_mask;
13210 /* Write zero to the register, then make sure the read-only bits
13211 * are not changed and the read/write bits are all zeros.
13215 val = tr32(offset);
13217 /* Test the read-only and read/write bits. */
13218 if (((val & read_mask) != read_val) || (val & write_mask))
13221 /* Write ones to all the bits defined by RdMask and WrMask, then
13222 * make sure the read-only bits are not changed and the
13223 * read/write bits are all ones.
13225 tw32(offset, read_mask | write_mask);
13227 val = tr32(offset);
13229 /* Test the read-only bits. */
13230 if ((val & read_mask) != read_val)
13233 /* Test the read/write bits. */
13234 if ((val & write_mask) != write_mask)
13237 tw32(offset, save_val);
13243 if (netif_msg_hw(tp))
13244 netdev_err(tp->dev,
13245 "Register test failed at offset %x\n", offset);
13246 tw32(offset, save_val);
13250 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13252 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13256 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13257 for (j = 0; j < len; j += 4) {
13260 tg3_write_mem(tp, offset + j, test_pattern[i]);
13261 tg3_read_mem(tp, offset + j, &val);
13262 if (val != test_pattern[i])
13269 static int tg3_test_memory(struct tg3 *tp)
13271 static struct mem_entry {
13274 } mem_tbl_570x[] = {
13275 { 0x00000000, 0x00b50},
13276 { 0x00002000, 0x1c000},
13277 { 0xffffffff, 0x00000}
13278 }, mem_tbl_5705[] = {
13279 { 0x00000100, 0x0000c},
13280 { 0x00000200, 0x00008},
13281 { 0x00004000, 0x00800},
13282 { 0x00006000, 0x01000},
13283 { 0x00008000, 0x02000},
13284 { 0x00010000, 0x0e000},
13285 { 0xffffffff, 0x00000}
13286 }, mem_tbl_5755[] = {
13287 { 0x00000200, 0x00008},
13288 { 0x00004000, 0x00800},
13289 { 0x00006000, 0x00800},
13290 { 0x00008000, 0x02000},
13291 { 0x00010000, 0x0c000},
13292 { 0xffffffff, 0x00000}
13293 }, mem_tbl_5906[] = {
13294 { 0x00000200, 0x00008},
13295 { 0x00004000, 0x00400},
13296 { 0x00006000, 0x00400},
13297 { 0x00008000, 0x01000},
13298 { 0x00010000, 0x01000},
13299 { 0xffffffff, 0x00000}
13300 }, mem_tbl_5717[] = {
13301 { 0x00000200, 0x00008},
13302 { 0x00010000, 0x0a000},
13303 { 0x00020000, 0x13c00},
13304 { 0xffffffff, 0x00000}
13305 }, mem_tbl_57765[] = {
13306 { 0x00000200, 0x00008},
13307 { 0x00004000, 0x00800},
13308 { 0x00006000, 0x09800},
13309 { 0x00010000, 0x0a000},
13310 { 0xffffffff, 0x00000}
13312 struct mem_entry *mem_tbl;
13316 if (tg3_flag(tp, 5717_PLUS))
13317 mem_tbl = mem_tbl_5717;
13318 else if (tg3_flag(tp, 57765_CLASS) ||
13319 tg3_asic_rev(tp) == ASIC_REV_5762)
13320 mem_tbl = mem_tbl_57765;
13321 else if (tg3_flag(tp, 5755_PLUS))
13322 mem_tbl = mem_tbl_5755;
13323 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13324 mem_tbl = mem_tbl_5906;
13325 else if (tg3_flag(tp, 5705_PLUS))
13326 mem_tbl = mem_tbl_5705;
13328 mem_tbl = mem_tbl_570x;
13330 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13331 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13339 #define TG3_TSO_MSS 500
13341 #define TG3_TSO_IP_HDR_LEN 20
13342 #define TG3_TSO_TCP_HDR_LEN 20
13343 #define TG3_TSO_TCP_OPT_LEN 12
13345 static const u8 tg3_tso_header[] = {
13347 0x45, 0x00, 0x00, 0x00,
13348 0x00, 0x00, 0x40, 0x00,
13349 0x40, 0x06, 0x00, 0x00,
13350 0x0a, 0x00, 0x00, 0x01,
13351 0x0a, 0x00, 0x00, 0x02,
13352 0x0d, 0x00, 0xe0, 0x00,
13353 0x00, 0x00, 0x01, 0x00,
13354 0x00, 0x00, 0x02, 0x00,
13355 0x80, 0x10, 0x10, 0x00,
13356 0x14, 0x09, 0x00, 0x00,
13357 0x01, 0x01, 0x08, 0x0a,
13358 0x11, 0x11, 0x11, 0x11,
13359 0x11, 0x11, 0x11, 0x11,
13362 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13364 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13365 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13367 struct sk_buff *skb;
13368 u8 *tx_data, *rx_data;
13370 int num_pkts, tx_len, rx_len, i, err;
13371 struct tg3_rx_buffer_desc *desc;
13372 struct tg3_napi *tnapi, *rnapi;
13373 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13375 tnapi = &tp->napi[0];
13376 rnapi = &tp->napi[0];
13377 if (tp->irq_cnt > 1) {
13378 if (tg3_flag(tp, ENABLE_RSS))
13379 rnapi = &tp->napi[1];
13380 if (tg3_flag(tp, ENABLE_TSS))
13381 tnapi = &tp->napi[1];
13383 coal_now = tnapi->coal_now | rnapi->coal_now;
13388 skb = netdev_alloc_skb(tp->dev, tx_len);
13392 tx_data = skb_put(skb, tx_len);
13393 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13394 memset(tx_data + ETH_ALEN, 0x0, 8);
13396 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13398 if (tso_loopback) {
13399 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13401 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13402 TG3_TSO_TCP_OPT_LEN;
13404 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13405 sizeof(tg3_tso_header));
13408 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13409 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13411 /* Set the total length field in the IP header */
13412 iph->tot_len = htons((u16)(mss + hdr_len));
13414 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13415 TXD_FLAG_CPU_POST_DMA);
13417 if (tg3_flag(tp, HW_TSO_1) ||
13418 tg3_flag(tp, HW_TSO_2) ||
13419 tg3_flag(tp, HW_TSO_3)) {
13421 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13422 th = (struct tcphdr *)&tx_data[val];
13425 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13427 if (tg3_flag(tp, HW_TSO_3)) {
13428 mss |= (hdr_len & 0xc) << 12;
13429 if (hdr_len & 0x10)
13430 base_flags |= 0x00000010;
13431 base_flags |= (hdr_len & 0x3e0) << 5;
13432 } else if (tg3_flag(tp, HW_TSO_2))
13433 mss |= hdr_len << 9;
13434 else if (tg3_flag(tp, HW_TSO_1) ||
13435 tg3_asic_rev(tp) == ASIC_REV_5705) {
13436 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13438 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13441 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13444 data_off = ETH_HLEN;
13446 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13447 tx_len > VLAN_ETH_FRAME_LEN)
13448 base_flags |= TXD_FLAG_JMB_PKT;
13451 for (i = data_off; i < tx_len; i++)
13452 tx_data[i] = (u8) (i & 0xff);
13454 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13455 if (pci_dma_mapping_error(tp->pdev, map)) {
13456 dev_kfree_skb(skb);
13460 val = tnapi->tx_prod;
13461 tnapi->tx_buffers[val].skb = skb;
13462 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13464 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13469 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13471 budget = tg3_tx_avail(tnapi);
13472 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13473 base_flags | TXD_FLAG_END, mss, 0)) {
13474 tnapi->tx_buffers[val].skb = NULL;
13475 dev_kfree_skb(skb);
13481 /* Sync BD data before updating mailbox */
13484 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13485 tr32_mailbox(tnapi->prodmbox);
13489 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13490 for (i = 0; i < 35; i++) {
13491 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13496 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13497 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13498 if ((tx_idx == tnapi->tx_prod) &&
13499 (rx_idx == (rx_start_idx + num_pkts)))
13503 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13504 dev_kfree_skb(skb);
13506 if (tx_idx != tnapi->tx_prod)
13509 if (rx_idx != rx_start_idx + num_pkts)
13513 while (rx_idx != rx_start_idx) {
13514 desc = &rnapi->rx_rcb[rx_start_idx++];
13515 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13516 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13518 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13519 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13522 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13525 if (!tso_loopback) {
13526 if (rx_len != tx_len)
13529 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13530 if (opaque_key != RXD_OPAQUE_RING_STD)
13533 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13536 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13537 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13538 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13542 if (opaque_key == RXD_OPAQUE_RING_STD) {
13543 rx_data = tpr->rx_std_buffers[desc_idx].data;
13544 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13546 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13547 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13548 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13553 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13554 PCI_DMA_FROMDEVICE);
13556 rx_data += TG3_RX_OFFSET(tp);
13557 for (i = data_off; i < rx_len; i++, val++) {
13558 if (*(rx_data + i) != (u8) (val & 0xff))
13565 /* tg3_free_rings will unmap and free the rx_data */
13570 #define TG3_STD_LOOPBACK_FAILED 1
13571 #define TG3_JMB_LOOPBACK_FAILED 2
13572 #define TG3_TSO_LOOPBACK_FAILED 4
13573 #define TG3_LOOPBACK_FAILED \
13574 (TG3_STD_LOOPBACK_FAILED | \
13575 TG3_JMB_LOOPBACK_FAILED | \
13576 TG3_TSO_LOOPBACK_FAILED)
13578 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13582 u32 jmb_pkt_sz = 9000;
13585 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13587 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13588 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13590 if (!netif_running(tp->dev)) {
13591 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13592 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13594 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13598 err = tg3_reset_hw(tp, true);
13600 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13601 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13603 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13607 if (tg3_flag(tp, ENABLE_RSS)) {
13610 /* Reroute all rx packets to the 1st queue */
13611 for (i = MAC_RSS_INDIR_TBL_0;
13612 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13616 /* HW errata - mac loopback fails in some cases on 5780.
13617 * Normal traffic and PHY loopback are not affected by
13618 * errata. Also, the MAC loopback test is deprecated for
13619 * all newer ASIC revisions.
13621 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13622 !tg3_flag(tp, CPMU_PRESENT)) {
13623 tg3_mac_loopback(tp, true);
13625 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13626 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13628 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13629 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13630 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13632 tg3_mac_loopback(tp, false);
13635 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13636 !tg3_flag(tp, USE_PHYLIB)) {
13639 tg3_phy_lpbk_set(tp, 0, false);
13641 /* Wait for link */
13642 for (i = 0; i < 100; i++) {
13643 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13648 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13649 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13650 if (tg3_flag(tp, TSO_CAPABLE) &&
13651 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13652 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13653 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13654 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13655 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13658 tg3_phy_lpbk_set(tp, 0, true);
13660 /* All link indications report up, but the hardware
13661 * isn't really ready for about 20 msec. Double it
13666 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13667 data[TG3_EXT_LOOPB_TEST] |=
13668 TG3_STD_LOOPBACK_FAILED;
13669 if (tg3_flag(tp, TSO_CAPABLE) &&
13670 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13671 data[TG3_EXT_LOOPB_TEST] |=
13672 TG3_TSO_LOOPBACK_FAILED;
13673 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13674 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13675 data[TG3_EXT_LOOPB_TEST] |=
13676 TG3_JMB_LOOPBACK_FAILED;
13679 /* Re-enable gphy autopowerdown. */
13680 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13681 tg3_phy_toggle_apd(tp, true);
13684 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13685 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13688 tp->phy_flags |= eee_cap;
13693 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13696 struct tg3 *tp = netdev_priv(dev);
13697 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13699 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13700 if (tg3_power_up(tp)) {
13701 etest->flags |= ETH_TEST_FL_FAILED;
13702 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13705 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13708 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13710 if (tg3_test_nvram(tp) != 0) {
13711 etest->flags |= ETH_TEST_FL_FAILED;
13712 data[TG3_NVRAM_TEST] = 1;
13714 if (!doextlpbk && tg3_test_link(tp)) {
13715 etest->flags |= ETH_TEST_FL_FAILED;
13716 data[TG3_LINK_TEST] = 1;
13718 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13719 int err, err2 = 0, irq_sync = 0;
13721 if (netif_running(dev)) {
13723 tg3_netif_stop(tp);
13727 tg3_full_lock(tp, irq_sync);
13728 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13729 err = tg3_nvram_lock(tp);
13730 tg3_halt_cpu(tp, RX_CPU_BASE);
13731 if (!tg3_flag(tp, 5705_PLUS))
13732 tg3_halt_cpu(tp, TX_CPU_BASE);
13734 tg3_nvram_unlock(tp);
13736 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13739 if (tg3_test_registers(tp) != 0) {
13740 etest->flags |= ETH_TEST_FL_FAILED;
13741 data[TG3_REGISTER_TEST] = 1;
13744 if (tg3_test_memory(tp) != 0) {
13745 etest->flags |= ETH_TEST_FL_FAILED;
13746 data[TG3_MEMORY_TEST] = 1;
13750 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13752 if (tg3_test_loopback(tp, data, doextlpbk))
13753 etest->flags |= ETH_TEST_FL_FAILED;
13755 tg3_full_unlock(tp);
13757 if (tg3_test_interrupt(tp) != 0) {
13758 etest->flags |= ETH_TEST_FL_FAILED;
13759 data[TG3_INTERRUPT_TEST] = 1;
13762 tg3_full_lock(tp, 0);
13764 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13765 if (netif_running(dev)) {
13766 tg3_flag_set(tp, INIT_COMPLETE);
13767 err2 = tg3_restart_hw(tp, true);
13769 tg3_netif_start(tp);
13772 tg3_full_unlock(tp);
13774 if (irq_sync && !err2)
13777 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13778 tg3_power_down_prepare(tp);
13782 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13784 struct tg3 *tp = netdev_priv(dev);
13785 struct hwtstamp_config stmpconf;
13787 if (!tg3_flag(tp, PTP_CAPABLE))
13788 return -EOPNOTSUPP;
13790 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13793 if (stmpconf.flags)
13796 if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13797 stmpconf.tx_type != HWTSTAMP_TX_OFF)
13800 switch (stmpconf.rx_filter) {
13801 case HWTSTAMP_FILTER_NONE:
13804 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13805 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13806 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13808 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13809 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13810 TG3_RX_PTP_CTL_SYNC_EVNT;
13812 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13813 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13814 TG3_RX_PTP_CTL_DELAY_REQ;
13816 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13817 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13818 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13820 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13821 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13822 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13824 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13825 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13826 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13828 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13829 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13830 TG3_RX_PTP_CTL_SYNC_EVNT;
13832 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13833 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13834 TG3_RX_PTP_CTL_SYNC_EVNT;
13836 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13837 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13838 TG3_RX_PTP_CTL_SYNC_EVNT;
13840 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13841 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13842 TG3_RX_PTP_CTL_DELAY_REQ;
13844 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13845 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13846 TG3_RX_PTP_CTL_DELAY_REQ;
13848 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13849 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13850 TG3_RX_PTP_CTL_DELAY_REQ;
13856 if (netif_running(dev) && tp->rxptpctl)
13857 tw32(TG3_RX_PTP_CTL,
13858 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13860 if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13861 tg3_flag_set(tp, TX_TSTAMP_EN);
13863 tg3_flag_clear(tp, TX_TSTAMP_EN);
13865 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13869 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13871 struct tg3 *tp = netdev_priv(dev);
13872 struct hwtstamp_config stmpconf;
13874 if (!tg3_flag(tp, PTP_CAPABLE))
13875 return -EOPNOTSUPP;
13877 stmpconf.flags = 0;
13878 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13879 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13881 switch (tp->rxptpctl) {
13883 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13885 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13886 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13888 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13889 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13891 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13892 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13894 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13895 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13897 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13898 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13900 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13901 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13903 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13904 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13906 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13907 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13909 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13910 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13912 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13913 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13915 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13916 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13918 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13919 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13926 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13930 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13932 struct mii_ioctl_data *data = if_mii(ifr);
13933 struct tg3 *tp = netdev_priv(dev);
13936 if (tg3_flag(tp, USE_PHYLIB)) {
13937 struct phy_device *phydev;
13938 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13940 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13941 return phy_mii_ioctl(phydev, ifr, cmd);
13946 data->phy_id = tp->phy_addr;
13949 case SIOCGMIIREG: {
13952 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13953 break; /* We have no PHY */
13955 if (!netif_running(dev))
13958 spin_lock_bh(&tp->lock);
13959 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13960 data->reg_num & 0x1f, &mii_regval);
13961 spin_unlock_bh(&tp->lock);
13963 data->val_out = mii_regval;
13969 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13970 break; /* We have no PHY */
13972 if (!netif_running(dev))
13975 spin_lock_bh(&tp->lock);
13976 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13977 data->reg_num & 0x1f, data->val_in);
13978 spin_unlock_bh(&tp->lock);
13982 case SIOCSHWTSTAMP:
13983 return tg3_hwtstamp_set(dev, ifr);
13985 case SIOCGHWTSTAMP:
13986 return tg3_hwtstamp_get(dev, ifr);
13992 return -EOPNOTSUPP;
13995 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13997 struct tg3 *tp = netdev_priv(dev);
13999 memcpy(ec, &tp->coal, sizeof(*ec));
14003 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14005 struct tg3 *tp = netdev_priv(dev);
14006 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14007 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14009 if (!tg3_flag(tp, 5705_PLUS)) {
14010 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14011 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14012 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14013 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14016 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14017 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14018 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14019 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14020 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14021 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14022 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14023 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14024 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14025 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14028 /* No rx interrupts will be generated if both are zero */
14029 if ((ec->rx_coalesce_usecs == 0) &&
14030 (ec->rx_max_coalesced_frames == 0))
14033 /* No tx interrupts will be generated if both are zero */
14034 if ((ec->tx_coalesce_usecs == 0) &&
14035 (ec->tx_max_coalesced_frames == 0))
14038 /* Only copy relevant parameters, ignore all others. */
14039 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14040 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14041 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14042 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14043 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14044 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14045 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14046 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14047 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14049 if (netif_running(dev)) {
14050 tg3_full_lock(tp, 0);
14051 __tg3_set_coalesce(tp, &tp->coal);
14052 tg3_full_unlock(tp);
14057 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14059 struct tg3 *tp = netdev_priv(dev);
14061 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14062 netdev_warn(tp->dev, "Board does not support EEE!\n");
14063 return -EOPNOTSUPP;
14066 if (edata->advertised != tp->eee.advertised) {
14067 netdev_warn(tp->dev,
14068 "Direct manipulation of EEE advertisement is not supported\n");
14072 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14073 netdev_warn(tp->dev,
14074 "Maximal Tx Lpi timer supported is %#x(u)\n",
14075 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14081 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14082 tg3_warn_mgmt_link_flap(tp);
14084 if (netif_running(tp->dev)) {
14085 tg3_full_lock(tp, 0);
14088 tg3_full_unlock(tp);
14094 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14096 struct tg3 *tp = netdev_priv(dev);
14098 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14099 netdev_warn(tp->dev,
14100 "Board does not support EEE!\n");
14101 return -EOPNOTSUPP;
14108 static const struct ethtool_ops tg3_ethtool_ops = {
14109 .get_settings = tg3_get_settings,
14110 .set_settings = tg3_set_settings,
14111 .get_drvinfo = tg3_get_drvinfo,
14112 .get_regs_len = tg3_get_regs_len,
14113 .get_regs = tg3_get_regs,
14114 .get_wol = tg3_get_wol,
14115 .set_wol = tg3_set_wol,
14116 .get_msglevel = tg3_get_msglevel,
14117 .set_msglevel = tg3_set_msglevel,
14118 .nway_reset = tg3_nway_reset,
14119 .get_link = ethtool_op_get_link,
14120 .get_eeprom_len = tg3_get_eeprom_len,
14121 .get_eeprom = tg3_get_eeprom,
14122 .set_eeprom = tg3_set_eeprom,
14123 .get_ringparam = tg3_get_ringparam,
14124 .set_ringparam = tg3_set_ringparam,
14125 .get_pauseparam = tg3_get_pauseparam,
14126 .set_pauseparam = tg3_set_pauseparam,
14127 .self_test = tg3_self_test,
14128 .get_strings = tg3_get_strings,
14129 .set_phys_id = tg3_set_phys_id,
14130 .get_ethtool_stats = tg3_get_ethtool_stats,
14131 .get_coalesce = tg3_get_coalesce,
14132 .set_coalesce = tg3_set_coalesce,
14133 .get_sset_count = tg3_get_sset_count,
14134 .get_rxnfc = tg3_get_rxnfc,
14135 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
14136 .get_rxfh = tg3_get_rxfh,
14137 .set_rxfh = tg3_set_rxfh,
14138 .get_channels = tg3_get_channels,
14139 .set_channels = tg3_set_channels,
14140 .get_ts_info = tg3_get_ts_info,
14141 .get_eee = tg3_get_eee,
14142 .set_eee = tg3_set_eee,
14145 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
14146 struct rtnl_link_stats64 *stats)
14148 struct tg3 *tp = netdev_priv(dev);
14150 spin_lock_bh(&tp->lock);
14151 if (!tp->hw_stats) {
14152 *stats = tp->net_stats_prev;
14153 spin_unlock_bh(&tp->lock);
14157 tg3_get_nstats(tp, stats);
14158 spin_unlock_bh(&tp->lock);
14163 static void tg3_set_rx_mode(struct net_device *dev)
14165 struct tg3 *tp = netdev_priv(dev);
14167 if (!netif_running(dev))
14170 tg3_full_lock(tp, 0);
14171 __tg3_set_rx_mode(dev);
14172 tg3_full_unlock(tp);
14175 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14178 dev->mtu = new_mtu;
14180 if (new_mtu > ETH_DATA_LEN) {
14181 if (tg3_flag(tp, 5780_CLASS)) {
14182 netdev_update_features(dev);
14183 tg3_flag_clear(tp, TSO_CAPABLE);
14185 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14188 if (tg3_flag(tp, 5780_CLASS)) {
14189 tg3_flag_set(tp, TSO_CAPABLE);
14190 netdev_update_features(dev);
14192 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14196 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14198 struct tg3 *tp = netdev_priv(dev);
14200 bool reset_phy = false;
14202 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
14205 if (!netif_running(dev)) {
14206 /* We'll just catch it later when the
14209 tg3_set_mtu(dev, tp, new_mtu);
14215 tg3_netif_stop(tp);
14217 tg3_set_mtu(dev, tp, new_mtu);
14219 tg3_full_lock(tp, 1);
14221 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14223 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14224 * breaks all requests to 256 bytes.
14226 if (tg3_asic_rev(tp) == ASIC_REV_57766)
14229 err = tg3_restart_hw(tp, reset_phy);
14232 tg3_netif_start(tp);
14234 tg3_full_unlock(tp);
14242 static const struct net_device_ops tg3_netdev_ops = {
14243 .ndo_open = tg3_open,
14244 .ndo_stop = tg3_close,
14245 .ndo_start_xmit = tg3_start_xmit,
14246 .ndo_get_stats64 = tg3_get_stats64,
14247 .ndo_validate_addr = eth_validate_addr,
14248 .ndo_set_rx_mode = tg3_set_rx_mode,
14249 .ndo_set_mac_address = tg3_set_mac_addr,
14250 .ndo_do_ioctl = tg3_ioctl,
14251 .ndo_tx_timeout = tg3_tx_timeout,
14252 .ndo_change_mtu = tg3_change_mtu,
14253 .ndo_fix_features = tg3_fix_features,
14254 .ndo_set_features = tg3_set_features,
14255 #ifdef CONFIG_NET_POLL_CONTROLLER
14256 .ndo_poll_controller = tg3_poll_controller,
14260 static void tg3_get_eeprom_size(struct tg3 *tp)
14262 u32 cursize, val, magic;
14264 tp->nvram_size = EEPROM_CHIP_SIZE;
14266 if (tg3_nvram_read(tp, 0, &magic) != 0)
14269 if ((magic != TG3_EEPROM_MAGIC) &&
14270 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14271 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14275 * Size the chip by reading offsets at increasing powers of two.
14276 * When we encounter our validation signature, we know the addressing
14277 * has wrapped around, and thus have our chip size.
14281 while (cursize < tp->nvram_size) {
14282 if (tg3_nvram_read(tp, cursize, &val) != 0)
14291 tp->nvram_size = cursize;
14294 static void tg3_get_nvram_size(struct tg3 *tp)
14298 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14301 /* Selfboot format */
14302 if (val != TG3_EEPROM_MAGIC) {
14303 tg3_get_eeprom_size(tp);
14307 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14309 /* This is confusing. We want to operate on the
14310 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14311 * call will read from NVRAM and byteswap the data
14312 * according to the byteswapping settings for all
14313 * other register accesses. This ensures the data we
14314 * want will always reside in the lower 16-bits.
14315 * However, the data in NVRAM is in LE format, which
14316 * means the data from the NVRAM read will always be
14317 * opposite the endianness of the CPU. The 16-bit
14318 * byteswap then brings the data to CPU endianness.
14320 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14324 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14327 static void tg3_get_nvram_info(struct tg3 *tp)
14331 nvcfg1 = tr32(NVRAM_CFG1);
14332 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14333 tg3_flag_set(tp, FLASH);
14335 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14336 tw32(NVRAM_CFG1, nvcfg1);
14339 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14340 tg3_flag(tp, 5780_CLASS)) {
14341 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14342 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14343 tp->nvram_jedecnum = JEDEC_ATMEL;
14344 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14345 tg3_flag_set(tp, NVRAM_BUFFERED);
14347 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14348 tp->nvram_jedecnum = JEDEC_ATMEL;
14349 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14351 case FLASH_VENDOR_ATMEL_EEPROM:
14352 tp->nvram_jedecnum = JEDEC_ATMEL;
14353 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14354 tg3_flag_set(tp, NVRAM_BUFFERED);
14356 case FLASH_VENDOR_ST:
14357 tp->nvram_jedecnum = JEDEC_ST;
14358 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14359 tg3_flag_set(tp, NVRAM_BUFFERED);
14361 case FLASH_VENDOR_SAIFUN:
14362 tp->nvram_jedecnum = JEDEC_SAIFUN;
14363 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14365 case FLASH_VENDOR_SST_SMALL:
14366 case FLASH_VENDOR_SST_LARGE:
14367 tp->nvram_jedecnum = JEDEC_SST;
14368 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14372 tp->nvram_jedecnum = JEDEC_ATMEL;
14373 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14374 tg3_flag_set(tp, NVRAM_BUFFERED);
14378 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14380 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14381 case FLASH_5752PAGE_SIZE_256:
14382 tp->nvram_pagesize = 256;
14384 case FLASH_5752PAGE_SIZE_512:
14385 tp->nvram_pagesize = 512;
14387 case FLASH_5752PAGE_SIZE_1K:
14388 tp->nvram_pagesize = 1024;
14390 case FLASH_5752PAGE_SIZE_2K:
14391 tp->nvram_pagesize = 2048;
14393 case FLASH_5752PAGE_SIZE_4K:
14394 tp->nvram_pagesize = 4096;
14396 case FLASH_5752PAGE_SIZE_264:
14397 tp->nvram_pagesize = 264;
14399 case FLASH_5752PAGE_SIZE_528:
14400 tp->nvram_pagesize = 528;
14405 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14409 nvcfg1 = tr32(NVRAM_CFG1);
14411 /* NVRAM protection for TPM */
14412 if (nvcfg1 & (1 << 27))
14413 tg3_flag_set(tp, PROTECTED_NVRAM);
14415 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14416 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14417 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14418 tp->nvram_jedecnum = JEDEC_ATMEL;
14419 tg3_flag_set(tp, NVRAM_BUFFERED);
14421 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14422 tp->nvram_jedecnum = JEDEC_ATMEL;
14423 tg3_flag_set(tp, NVRAM_BUFFERED);
14424 tg3_flag_set(tp, FLASH);
14426 case FLASH_5752VENDOR_ST_M45PE10:
14427 case FLASH_5752VENDOR_ST_M45PE20:
14428 case FLASH_5752VENDOR_ST_M45PE40:
14429 tp->nvram_jedecnum = JEDEC_ST;
14430 tg3_flag_set(tp, NVRAM_BUFFERED);
14431 tg3_flag_set(tp, FLASH);
14435 if (tg3_flag(tp, FLASH)) {
14436 tg3_nvram_get_pagesize(tp, nvcfg1);
14438 /* For eeprom, set pagesize to maximum eeprom size */
14439 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14441 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14442 tw32(NVRAM_CFG1, nvcfg1);
14446 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14448 u32 nvcfg1, protect = 0;
14450 nvcfg1 = tr32(NVRAM_CFG1);
14452 /* NVRAM protection for TPM */
14453 if (nvcfg1 & (1 << 27)) {
14454 tg3_flag_set(tp, PROTECTED_NVRAM);
14458 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14460 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14461 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14462 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14463 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14464 tp->nvram_jedecnum = JEDEC_ATMEL;
14465 tg3_flag_set(tp, NVRAM_BUFFERED);
14466 tg3_flag_set(tp, FLASH);
14467 tp->nvram_pagesize = 264;
14468 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14469 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14470 tp->nvram_size = (protect ? 0x3e200 :
14471 TG3_NVRAM_SIZE_512KB);
14472 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14473 tp->nvram_size = (protect ? 0x1f200 :
14474 TG3_NVRAM_SIZE_256KB);
14476 tp->nvram_size = (protect ? 0x1f200 :
14477 TG3_NVRAM_SIZE_128KB);
14479 case FLASH_5752VENDOR_ST_M45PE10:
14480 case FLASH_5752VENDOR_ST_M45PE20:
14481 case FLASH_5752VENDOR_ST_M45PE40:
14482 tp->nvram_jedecnum = JEDEC_ST;
14483 tg3_flag_set(tp, NVRAM_BUFFERED);
14484 tg3_flag_set(tp, FLASH);
14485 tp->nvram_pagesize = 256;
14486 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14487 tp->nvram_size = (protect ?
14488 TG3_NVRAM_SIZE_64KB :
14489 TG3_NVRAM_SIZE_128KB);
14490 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14491 tp->nvram_size = (protect ?
14492 TG3_NVRAM_SIZE_64KB :
14493 TG3_NVRAM_SIZE_256KB);
14495 tp->nvram_size = (protect ?
14496 TG3_NVRAM_SIZE_128KB :
14497 TG3_NVRAM_SIZE_512KB);
14502 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14506 nvcfg1 = tr32(NVRAM_CFG1);
14508 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14509 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14510 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14511 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14512 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14513 tp->nvram_jedecnum = JEDEC_ATMEL;
14514 tg3_flag_set(tp, NVRAM_BUFFERED);
14515 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14517 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14518 tw32(NVRAM_CFG1, nvcfg1);
14520 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14521 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14522 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14523 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14524 tp->nvram_jedecnum = JEDEC_ATMEL;
14525 tg3_flag_set(tp, NVRAM_BUFFERED);
14526 tg3_flag_set(tp, FLASH);
14527 tp->nvram_pagesize = 264;
14529 case FLASH_5752VENDOR_ST_M45PE10:
14530 case FLASH_5752VENDOR_ST_M45PE20:
14531 case FLASH_5752VENDOR_ST_M45PE40:
14532 tp->nvram_jedecnum = JEDEC_ST;
14533 tg3_flag_set(tp, NVRAM_BUFFERED);
14534 tg3_flag_set(tp, FLASH);
14535 tp->nvram_pagesize = 256;
14540 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14542 u32 nvcfg1, protect = 0;
14544 nvcfg1 = tr32(NVRAM_CFG1);
14546 /* NVRAM protection for TPM */
14547 if (nvcfg1 & (1 << 27)) {
14548 tg3_flag_set(tp, PROTECTED_NVRAM);
14552 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14554 case FLASH_5761VENDOR_ATMEL_ADB021D:
14555 case FLASH_5761VENDOR_ATMEL_ADB041D:
14556 case FLASH_5761VENDOR_ATMEL_ADB081D:
14557 case FLASH_5761VENDOR_ATMEL_ADB161D:
14558 case FLASH_5761VENDOR_ATMEL_MDB021D:
14559 case FLASH_5761VENDOR_ATMEL_MDB041D:
14560 case FLASH_5761VENDOR_ATMEL_MDB081D:
14561 case FLASH_5761VENDOR_ATMEL_MDB161D:
14562 tp->nvram_jedecnum = JEDEC_ATMEL;
14563 tg3_flag_set(tp, NVRAM_BUFFERED);
14564 tg3_flag_set(tp, FLASH);
14565 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14566 tp->nvram_pagesize = 256;
14568 case FLASH_5761VENDOR_ST_A_M45PE20:
14569 case FLASH_5761VENDOR_ST_A_M45PE40:
14570 case FLASH_5761VENDOR_ST_A_M45PE80:
14571 case FLASH_5761VENDOR_ST_A_M45PE16:
14572 case FLASH_5761VENDOR_ST_M_M45PE20:
14573 case FLASH_5761VENDOR_ST_M_M45PE40:
14574 case FLASH_5761VENDOR_ST_M_M45PE80:
14575 case FLASH_5761VENDOR_ST_M_M45PE16:
14576 tp->nvram_jedecnum = JEDEC_ST;
14577 tg3_flag_set(tp, NVRAM_BUFFERED);
14578 tg3_flag_set(tp, FLASH);
14579 tp->nvram_pagesize = 256;
14584 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14587 case FLASH_5761VENDOR_ATMEL_ADB161D:
14588 case FLASH_5761VENDOR_ATMEL_MDB161D:
14589 case FLASH_5761VENDOR_ST_A_M45PE16:
14590 case FLASH_5761VENDOR_ST_M_M45PE16:
14591 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14593 case FLASH_5761VENDOR_ATMEL_ADB081D:
14594 case FLASH_5761VENDOR_ATMEL_MDB081D:
14595 case FLASH_5761VENDOR_ST_A_M45PE80:
14596 case FLASH_5761VENDOR_ST_M_M45PE80:
14597 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14599 case FLASH_5761VENDOR_ATMEL_ADB041D:
14600 case FLASH_5761VENDOR_ATMEL_MDB041D:
14601 case FLASH_5761VENDOR_ST_A_M45PE40:
14602 case FLASH_5761VENDOR_ST_M_M45PE40:
14603 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14605 case FLASH_5761VENDOR_ATMEL_ADB021D:
14606 case FLASH_5761VENDOR_ATMEL_MDB021D:
14607 case FLASH_5761VENDOR_ST_A_M45PE20:
14608 case FLASH_5761VENDOR_ST_M_M45PE20:
14609 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14615 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14617 tp->nvram_jedecnum = JEDEC_ATMEL;
14618 tg3_flag_set(tp, NVRAM_BUFFERED);
14619 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14622 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14626 nvcfg1 = tr32(NVRAM_CFG1);
14628 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14629 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14630 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14631 tp->nvram_jedecnum = JEDEC_ATMEL;
14632 tg3_flag_set(tp, NVRAM_BUFFERED);
14633 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14635 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14636 tw32(NVRAM_CFG1, nvcfg1);
14638 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14639 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14640 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14641 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14642 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14643 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14644 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14645 tp->nvram_jedecnum = JEDEC_ATMEL;
14646 tg3_flag_set(tp, NVRAM_BUFFERED);
14647 tg3_flag_set(tp, FLASH);
14649 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14650 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14651 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14652 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14653 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14655 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14656 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14657 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14659 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14660 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14661 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14665 case FLASH_5752VENDOR_ST_M45PE10:
14666 case FLASH_5752VENDOR_ST_M45PE20:
14667 case FLASH_5752VENDOR_ST_M45PE40:
14668 tp->nvram_jedecnum = JEDEC_ST;
14669 tg3_flag_set(tp, NVRAM_BUFFERED);
14670 tg3_flag_set(tp, FLASH);
14672 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14673 case FLASH_5752VENDOR_ST_M45PE10:
14674 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14676 case FLASH_5752VENDOR_ST_M45PE20:
14677 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14679 case FLASH_5752VENDOR_ST_M45PE40:
14680 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14685 tg3_flag_set(tp, NO_NVRAM);
14689 tg3_nvram_get_pagesize(tp, nvcfg1);
14690 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14691 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14695 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14699 nvcfg1 = tr32(NVRAM_CFG1);
14701 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14702 case FLASH_5717VENDOR_ATMEL_EEPROM:
14703 case FLASH_5717VENDOR_MICRO_EEPROM:
14704 tp->nvram_jedecnum = JEDEC_ATMEL;
14705 tg3_flag_set(tp, NVRAM_BUFFERED);
14706 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14708 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14709 tw32(NVRAM_CFG1, nvcfg1);
14711 case FLASH_5717VENDOR_ATMEL_MDB011D:
14712 case FLASH_5717VENDOR_ATMEL_ADB011B:
14713 case FLASH_5717VENDOR_ATMEL_ADB011D:
14714 case FLASH_5717VENDOR_ATMEL_MDB021D:
14715 case FLASH_5717VENDOR_ATMEL_ADB021B:
14716 case FLASH_5717VENDOR_ATMEL_ADB021D:
14717 case FLASH_5717VENDOR_ATMEL_45USPT:
14718 tp->nvram_jedecnum = JEDEC_ATMEL;
14719 tg3_flag_set(tp, NVRAM_BUFFERED);
14720 tg3_flag_set(tp, FLASH);
14722 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14723 case FLASH_5717VENDOR_ATMEL_MDB021D:
14724 /* Detect size with tg3_nvram_get_size() */
14726 case FLASH_5717VENDOR_ATMEL_ADB021B:
14727 case FLASH_5717VENDOR_ATMEL_ADB021D:
14728 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14731 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14735 case FLASH_5717VENDOR_ST_M_M25PE10:
14736 case FLASH_5717VENDOR_ST_A_M25PE10:
14737 case FLASH_5717VENDOR_ST_M_M45PE10:
14738 case FLASH_5717VENDOR_ST_A_M45PE10:
14739 case FLASH_5717VENDOR_ST_M_M25PE20:
14740 case FLASH_5717VENDOR_ST_A_M25PE20:
14741 case FLASH_5717VENDOR_ST_M_M45PE20:
14742 case FLASH_5717VENDOR_ST_A_M45PE20:
14743 case FLASH_5717VENDOR_ST_25USPT:
14744 case FLASH_5717VENDOR_ST_45USPT:
14745 tp->nvram_jedecnum = JEDEC_ST;
14746 tg3_flag_set(tp, NVRAM_BUFFERED);
14747 tg3_flag_set(tp, FLASH);
14749 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14750 case FLASH_5717VENDOR_ST_M_M25PE20:
14751 case FLASH_5717VENDOR_ST_M_M45PE20:
14752 /* Detect size with tg3_nvram_get_size() */
14754 case FLASH_5717VENDOR_ST_A_M25PE20:
14755 case FLASH_5717VENDOR_ST_A_M45PE20:
14756 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14759 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14764 tg3_flag_set(tp, NO_NVRAM);
14768 tg3_nvram_get_pagesize(tp, nvcfg1);
14769 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14770 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14773 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14775 u32 nvcfg1, nvmpinstrp;
14777 nvcfg1 = tr32(NVRAM_CFG1);
14778 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14780 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14781 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14782 tg3_flag_set(tp, NO_NVRAM);
14786 switch (nvmpinstrp) {
14787 case FLASH_5762_EEPROM_HD:
14788 nvmpinstrp = FLASH_5720_EEPROM_HD;
14790 case FLASH_5762_EEPROM_LD:
14791 nvmpinstrp = FLASH_5720_EEPROM_LD;
14793 case FLASH_5720VENDOR_M_ST_M45PE20:
14794 /* This pinstrap supports multiple sizes, so force it
14795 * to read the actual size from location 0xf0.
14797 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14802 switch (nvmpinstrp) {
14803 case FLASH_5720_EEPROM_HD:
14804 case FLASH_5720_EEPROM_LD:
14805 tp->nvram_jedecnum = JEDEC_ATMEL;
14806 tg3_flag_set(tp, NVRAM_BUFFERED);
14808 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14809 tw32(NVRAM_CFG1, nvcfg1);
14810 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14811 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14813 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14815 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14816 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14817 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14818 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14819 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14820 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14821 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14822 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14823 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14824 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14825 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14826 case FLASH_5720VENDOR_ATMEL_45USPT:
14827 tp->nvram_jedecnum = JEDEC_ATMEL;
14828 tg3_flag_set(tp, NVRAM_BUFFERED);
14829 tg3_flag_set(tp, FLASH);
14831 switch (nvmpinstrp) {
14832 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14833 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14834 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14835 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14837 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14838 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14839 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14840 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14842 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14843 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14844 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14847 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14848 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14852 case FLASH_5720VENDOR_M_ST_M25PE10:
14853 case FLASH_5720VENDOR_M_ST_M45PE10:
14854 case FLASH_5720VENDOR_A_ST_M25PE10:
14855 case FLASH_5720VENDOR_A_ST_M45PE10:
14856 case FLASH_5720VENDOR_M_ST_M25PE20:
14857 case FLASH_5720VENDOR_M_ST_M45PE20:
14858 case FLASH_5720VENDOR_A_ST_M25PE20:
14859 case FLASH_5720VENDOR_A_ST_M45PE20:
14860 case FLASH_5720VENDOR_M_ST_M25PE40:
14861 case FLASH_5720VENDOR_M_ST_M45PE40:
14862 case FLASH_5720VENDOR_A_ST_M25PE40:
14863 case FLASH_5720VENDOR_A_ST_M45PE40:
14864 case FLASH_5720VENDOR_M_ST_M25PE80:
14865 case FLASH_5720VENDOR_M_ST_M45PE80:
14866 case FLASH_5720VENDOR_A_ST_M25PE80:
14867 case FLASH_5720VENDOR_A_ST_M45PE80:
14868 case FLASH_5720VENDOR_ST_25USPT:
14869 case FLASH_5720VENDOR_ST_45USPT:
14870 tp->nvram_jedecnum = JEDEC_ST;
14871 tg3_flag_set(tp, NVRAM_BUFFERED);
14872 tg3_flag_set(tp, FLASH);
14874 switch (nvmpinstrp) {
14875 case FLASH_5720VENDOR_M_ST_M25PE20:
14876 case FLASH_5720VENDOR_M_ST_M45PE20:
14877 case FLASH_5720VENDOR_A_ST_M25PE20:
14878 case FLASH_5720VENDOR_A_ST_M45PE20:
14879 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14881 case FLASH_5720VENDOR_M_ST_M25PE40:
14882 case FLASH_5720VENDOR_M_ST_M45PE40:
14883 case FLASH_5720VENDOR_A_ST_M25PE40:
14884 case FLASH_5720VENDOR_A_ST_M45PE40:
14885 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14887 case FLASH_5720VENDOR_M_ST_M25PE80:
14888 case FLASH_5720VENDOR_M_ST_M45PE80:
14889 case FLASH_5720VENDOR_A_ST_M25PE80:
14890 case FLASH_5720VENDOR_A_ST_M45PE80:
14891 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14894 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14895 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14900 tg3_flag_set(tp, NO_NVRAM);
14904 tg3_nvram_get_pagesize(tp, nvcfg1);
14905 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14906 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14908 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14911 if (tg3_nvram_read(tp, 0, &val))
14914 if (val != TG3_EEPROM_MAGIC &&
14915 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14916 tg3_flag_set(tp, NO_NVRAM);
14920 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14921 static void tg3_nvram_init(struct tg3 *tp)
14923 if (tg3_flag(tp, IS_SSB_CORE)) {
14924 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14925 tg3_flag_clear(tp, NVRAM);
14926 tg3_flag_clear(tp, NVRAM_BUFFERED);
14927 tg3_flag_set(tp, NO_NVRAM);
14931 tw32_f(GRC_EEPROM_ADDR,
14932 (EEPROM_ADDR_FSM_RESET |
14933 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14934 EEPROM_ADDR_CLKPERD_SHIFT)));
14938 /* Enable seeprom accesses. */
14939 tw32_f(GRC_LOCAL_CTRL,
14940 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14943 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14944 tg3_asic_rev(tp) != ASIC_REV_5701) {
14945 tg3_flag_set(tp, NVRAM);
14947 if (tg3_nvram_lock(tp)) {
14948 netdev_warn(tp->dev,
14949 "Cannot get nvram lock, %s failed\n",
14953 tg3_enable_nvram_access(tp);
14955 tp->nvram_size = 0;
14957 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14958 tg3_get_5752_nvram_info(tp);
14959 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14960 tg3_get_5755_nvram_info(tp);
14961 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14962 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14963 tg3_asic_rev(tp) == ASIC_REV_5785)
14964 tg3_get_5787_nvram_info(tp);
14965 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14966 tg3_get_5761_nvram_info(tp);
14967 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14968 tg3_get_5906_nvram_info(tp);
14969 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14970 tg3_flag(tp, 57765_CLASS))
14971 tg3_get_57780_nvram_info(tp);
14972 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14973 tg3_asic_rev(tp) == ASIC_REV_5719)
14974 tg3_get_5717_nvram_info(tp);
14975 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14976 tg3_asic_rev(tp) == ASIC_REV_5762)
14977 tg3_get_5720_nvram_info(tp);
14979 tg3_get_nvram_info(tp);
14981 if (tp->nvram_size == 0)
14982 tg3_get_nvram_size(tp);
14984 tg3_disable_nvram_access(tp);
14985 tg3_nvram_unlock(tp);
14988 tg3_flag_clear(tp, NVRAM);
14989 tg3_flag_clear(tp, NVRAM_BUFFERED);
14991 tg3_get_eeprom_size(tp);
14995 struct subsys_tbl_ent {
14996 u16 subsys_vendor, subsys_devid;
15000 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15001 /* Broadcom boards. */
15002 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15003 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15004 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15005 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15006 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15007 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15008 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15009 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15010 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15011 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15012 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15013 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15014 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15015 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15016 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15017 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15018 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15019 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15020 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15021 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15022 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15023 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15026 { TG3PCI_SUBVENDOR_ID_3COM,
15027 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15028 { TG3PCI_SUBVENDOR_ID_3COM,
15029 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15030 { TG3PCI_SUBVENDOR_ID_3COM,
15031 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15032 { TG3PCI_SUBVENDOR_ID_3COM,
15033 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15034 { TG3PCI_SUBVENDOR_ID_3COM,
15035 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15038 { TG3PCI_SUBVENDOR_ID_DELL,
15039 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15040 { TG3PCI_SUBVENDOR_ID_DELL,
15041 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15042 { TG3PCI_SUBVENDOR_ID_DELL,
15043 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15044 { TG3PCI_SUBVENDOR_ID_DELL,
15045 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15047 /* Compaq boards. */
15048 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15049 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15050 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15051 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15052 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15053 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15054 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15055 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15056 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15057 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15060 { TG3PCI_SUBVENDOR_ID_IBM,
15061 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15064 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15068 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15069 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15070 tp->pdev->subsystem_vendor) &&
15071 (subsys_id_to_phy_id[i].subsys_devid ==
15072 tp->pdev->subsystem_device))
15073 return &subsys_id_to_phy_id[i];
15078 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15082 tp->phy_id = TG3_PHY_ID_INVALID;
15083 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15085 /* Assume an onboard device and WOL capable by default. */
15086 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15087 tg3_flag_set(tp, WOL_CAP);
15089 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15090 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15091 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15092 tg3_flag_set(tp, IS_NIC);
15094 val = tr32(VCPU_CFGSHDW);
15095 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15096 tg3_flag_set(tp, ASPM_WORKAROUND);
15097 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15098 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15099 tg3_flag_set(tp, WOL_ENABLE);
15100 device_set_wakeup_enable(&tp->pdev->dev, true);
15105 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15106 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15107 u32 nic_cfg, led_cfg;
15108 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15109 u32 nic_phy_id, ver, eeprom_phy_id;
15110 int eeprom_phy_serdes = 0;
15112 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15113 tp->nic_sram_data_cfg = nic_cfg;
15115 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15116 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15117 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15118 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15119 tg3_asic_rev(tp) != ASIC_REV_5703 &&
15120 (ver > 0) && (ver < 0x100))
15121 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15123 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15124 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15126 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15127 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15128 tg3_asic_rev(tp) == ASIC_REV_5720)
15129 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15131 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15132 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15133 eeprom_phy_serdes = 1;
15135 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15136 if (nic_phy_id != 0) {
15137 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15138 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15140 eeprom_phy_id = (id1 >> 16) << 10;
15141 eeprom_phy_id |= (id2 & 0xfc00) << 16;
15142 eeprom_phy_id |= (id2 & 0x03ff) << 0;
15146 tp->phy_id = eeprom_phy_id;
15147 if (eeprom_phy_serdes) {
15148 if (!tg3_flag(tp, 5705_PLUS))
15149 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15151 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15154 if (tg3_flag(tp, 5750_PLUS))
15155 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15156 SHASTA_EXT_LED_MODE_MASK);
15158 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15162 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15163 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15166 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15167 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15170 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15171 tp->led_ctrl = LED_CTRL_MODE_MAC;
15173 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15174 * read on some older 5700/5701 bootcode.
15176 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15177 tg3_asic_rev(tp) == ASIC_REV_5701)
15178 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15182 case SHASTA_EXT_LED_SHARED:
15183 tp->led_ctrl = LED_CTRL_MODE_SHARED;
15184 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15185 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15186 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15187 LED_CTRL_MODE_PHY_2);
15189 if (tg3_flag(tp, 5717_PLUS) ||
15190 tg3_asic_rev(tp) == ASIC_REV_5762)
15191 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15192 LED_CTRL_BLINK_RATE_MASK;
15196 case SHASTA_EXT_LED_MAC:
15197 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15200 case SHASTA_EXT_LED_COMBO:
15201 tp->led_ctrl = LED_CTRL_MODE_COMBO;
15202 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15203 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15204 LED_CTRL_MODE_PHY_2);
15209 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15210 tg3_asic_rev(tp) == ASIC_REV_5701) &&
15211 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15212 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15214 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15215 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15217 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15218 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15219 if ((tp->pdev->subsystem_vendor ==
15220 PCI_VENDOR_ID_ARIMA) &&
15221 (tp->pdev->subsystem_device == 0x205a ||
15222 tp->pdev->subsystem_device == 0x2063))
15223 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15225 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15226 tg3_flag_set(tp, IS_NIC);
15229 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15230 tg3_flag_set(tp, ENABLE_ASF);
15231 if (tg3_flag(tp, 5750_PLUS))
15232 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15235 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15236 tg3_flag(tp, 5750_PLUS))
15237 tg3_flag_set(tp, ENABLE_APE);
15239 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15240 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15241 tg3_flag_clear(tp, WOL_CAP);
15243 if (tg3_flag(tp, WOL_CAP) &&
15244 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15245 tg3_flag_set(tp, WOL_ENABLE);
15246 device_set_wakeup_enable(&tp->pdev->dev, true);
15249 if (cfg2 & (1 << 17))
15250 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15252 /* serdes signal pre-emphasis in register 0x590 set by */
15253 /* bootcode if bit 18 is set */
15254 if (cfg2 & (1 << 18))
15255 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15257 if ((tg3_flag(tp, 57765_PLUS) ||
15258 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15259 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15260 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15261 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15263 if (tg3_flag(tp, PCI_EXPRESS)) {
15266 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15267 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15268 !tg3_flag(tp, 57765_PLUS) &&
15269 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15270 tg3_flag_set(tp, ASPM_WORKAROUND);
15271 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15272 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15273 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15274 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15277 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15278 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15279 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15280 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15281 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15282 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15284 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15285 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15288 if (tg3_flag(tp, WOL_CAP))
15289 device_set_wakeup_enable(&tp->pdev->dev,
15290 tg3_flag(tp, WOL_ENABLE));
15292 device_set_wakeup_capable(&tp->pdev->dev, false);
15295 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15298 u32 val2, off = offset * 8;
15300 err = tg3_nvram_lock(tp);
15304 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15305 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15306 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15307 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15310 for (i = 0; i < 100; i++) {
15311 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15312 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15313 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15319 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15321 tg3_nvram_unlock(tp);
15322 if (val2 & APE_OTP_STATUS_CMD_DONE)
15328 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15333 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15334 tw32(OTP_CTRL, cmd);
15336 /* Wait for up to 1 ms for command to execute. */
15337 for (i = 0; i < 100; i++) {
15338 val = tr32(OTP_STATUS);
15339 if (val & OTP_STATUS_CMD_DONE)
15344 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15347 /* Read the gphy configuration from the OTP region of the chip. The gphy
15348 * configuration is a 32-bit value that straddles the alignment boundary.
15349 * We do two 32-bit reads and then shift and merge the results.
15351 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15353 u32 bhalf_otp, thalf_otp;
15355 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15357 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15360 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15362 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15365 thalf_otp = tr32(OTP_READ_DATA);
15367 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15369 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15372 bhalf_otp = tr32(OTP_READ_DATA);
15374 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15377 static void tg3_phy_init_link_config(struct tg3 *tp)
15379 u32 adv = ADVERTISED_Autoneg;
15381 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15382 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15383 adv |= ADVERTISED_1000baseT_Half;
15384 adv |= ADVERTISED_1000baseT_Full;
15387 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15388 adv |= ADVERTISED_100baseT_Half |
15389 ADVERTISED_100baseT_Full |
15390 ADVERTISED_10baseT_Half |
15391 ADVERTISED_10baseT_Full |
15394 adv |= ADVERTISED_FIBRE;
15396 tp->link_config.advertising = adv;
15397 tp->link_config.speed = SPEED_UNKNOWN;
15398 tp->link_config.duplex = DUPLEX_UNKNOWN;
15399 tp->link_config.autoneg = AUTONEG_ENABLE;
15400 tp->link_config.active_speed = SPEED_UNKNOWN;
15401 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15406 static int tg3_phy_probe(struct tg3 *tp)
15408 u32 hw_phy_id_1, hw_phy_id_2;
15409 u32 hw_phy_id, hw_phy_id_masked;
15412 /* flow control autonegotiation is default behavior */
15413 tg3_flag_set(tp, PAUSE_AUTONEG);
15414 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15416 if (tg3_flag(tp, ENABLE_APE)) {
15417 switch (tp->pci_fn) {
15419 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15422 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15425 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15428 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15433 if (!tg3_flag(tp, ENABLE_ASF) &&
15434 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15435 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15436 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15437 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15439 if (tg3_flag(tp, USE_PHYLIB))
15440 return tg3_phy_init(tp);
15442 /* Reading the PHY ID register can conflict with ASF
15443 * firmware access to the PHY hardware.
15446 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15447 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15449 /* Now read the physical PHY_ID from the chip and verify
15450 * that it is sane. If it doesn't look good, we fall back
15451 * to either the hard-coded table based PHY_ID and failing
15452 * that the value found in the eeprom area.
15454 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15455 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15457 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15458 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15459 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15461 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15464 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15465 tp->phy_id = hw_phy_id;
15466 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15467 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15469 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15471 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15472 /* Do nothing, phy ID already set up in
15473 * tg3_get_eeprom_hw_cfg().
15476 struct subsys_tbl_ent *p;
15478 /* No eeprom signature? Try the hardcoded
15479 * subsys device table.
15481 p = tg3_lookup_by_subsys(tp);
15483 tp->phy_id = p->phy_id;
15484 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15485 /* For now we saw the IDs 0xbc050cd0,
15486 * 0xbc050f80 and 0xbc050c30 on devices
15487 * connected to an BCM4785 and there are
15488 * probably more. Just assume that the phy is
15489 * supported when it is connected to a SSB core
15496 tp->phy_id == TG3_PHY_ID_BCM8002)
15497 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15501 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15502 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15503 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15504 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15505 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15506 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15507 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15508 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15509 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15510 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15512 tp->eee.supported = SUPPORTED_100baseT_Full |
15513 SUPPORTED_1000baseT_Full;
15514 tp->eee.advertised = ADVERTISED_100baseT_Full |
15515 ADVERTISED_1000baseT_Full;
15516 tp->eee.eee_enabled = 1;
15517 tp->eee.tx_lpi_enabled = 1;
15518 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15521 tg3_phy_init_link_config(tp);
15523 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15524 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15525 !tg3_flag(tp, ENABLE_APE) &&
15526 !tg3_flag(tp, ENABLE_ASF)) {
15529 tg3_readphy(tp, MII_BMSR, &bmsr);
15530 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15531 (bmsr & BMSR_LSTATUS))
15532 goto skip_phy_reset;
15534 err = tg3_phy_reset(tp);
15538 tg3_phy_set_wirespeed(tp);
15540 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15541 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15542 tp->link_config.flowctrl);
15544 tg3_writephy(tp, MII_BMCR,
15545 BMCR_ANENABLE | BMCR_ANRESTART);
15550 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15551 err = tg3_init_5401phy_dsp(tp);
15555 err = tg3_init_5401phy_dsp(tp);
15561 static void tg3_read_vpd(struct tg3 *tp)
15564 unsigned int block_end, rosize, len;
15568 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15572 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15574 goto out_not_found;
15576 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15577 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15578 i += PCI_VPD_LRDT_TAG_SIZE;
15580 if (block_end > vpdlen)
15581 goto out_not_found;
15583 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15584 PCI_VPD_RO_KEYWORD_MFR_ID);
15586 len = pci_vpd_info_field_size(&vpd_data[j]);
15588 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15589 if (j + len > block_end || len != 4 ||
15590 memcmp(&vpd_data[j], "1028", 4))
15593 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15594 PCI_VPD_RO_KEYWORD_VENDOR0);
15598 len = pci_vpd_info_field_size(&vpd_data[j]);
15600 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15601 if (j + len > block_end)
15604 if (len >= sizeof(tp->fw_ver))
15605 len = sizeof(tp->fw_ver) - 1;
15606 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15607 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15612 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15613 PCI_VPD_RO_KEYWORD_PARTNO);
15615 goto out_not_found;
15617 len = pci_vpd_info_field_size(&vpd_data[i]);
15619 i += PCI_VPD_INFO_FLD_HDR_SIZE;
15620 if (len > TG3_BPN_SIZE ||
15621 (len + i) > vpdlen)
15622 goto out_not_found;
15624 memcpy(tp->board_part_number, &vpd_data[i], len);
15628 if (tp->board_part_number[0])
15632 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15633 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15634 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15635 strcpy(tp->board_part_number, "BCM5717");
15636 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15637 strcpy(tp->board_part_number, "BCM5718");
15640 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15641 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15642 strcpy(tp->board_part_number, "BCM57780");
15643 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15644 strcpy(tp->board_part_number, "BCM57760");
15645 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15646 strcpy(tp->board_part_number, "BCM57790");
15647 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15648 strcpy(tp->board_part_number, "BCM57788");
15651 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15652 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15653 strcpy(tp->board_part_number, "BCM57761");
15654 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15655 strcpy(tp->board_part_number, "BCM57765");
15656 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15657 strcpy(tp->board_part_number, "BCM57781");
15658 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15659 strcpy(tp->board_part_number, "BCM57785");
15660 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15661 strcpy(tp->board_part_number, "BCM57791");
15662 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15663 strcpy(tp->board_part_number, "BCM57795");
15666 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15667 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15668 strcpy(tp->board_part_number, "BCM57762");
15669 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15670 strcpy(tp->board_part_number, "BCM57766");
15671 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15672 strcpy(tp->board_part_number, "BCM57782");
15673 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15674 strcpy(tp->board_part_number, "BCM57786");
15677 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15678 strcpy(tp->board_part_number, "BCM95906");
15681 strcpy(tp->board_part_number, "none");
15685 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15689 if (tg3_nvram_read(tp, offset, &val) ||
15690 (val & 0xfc000000) != 0x0c000000 ||
15691 tg3_nvram_read(tp, offset + 4, &val) ||
15698 static void tg3_read_bc_ver(struct tg3 *tp)
15700 u32 val, offset, start, ver_offset;
15702 bool newver = false;
15704 if (tg3_nvram_read(tp, 0xc, &offset) ||
15705 tg3_nvram_read(tp, 0x4, &start))
15708 offset = tg3_nvram_logical_addr(tp, offset);
15710 if (tg3_nvram_read(tp, offset, &val))
15713 if ((val & 0xfc000000) == 0x0c000000) {
15714 if (tg3_nvram_read(tp, offset + 4, &val))
15721 dst_off = strlen(tp->fw_ver);
15724 if (TG3_VER_SIZE - dst_off < 16 ||
15725 tg3_nvram_read(tp, offset + 8, &ver_offset))
15728 offset = offset + ver_offset - start;
15729 for (i = 0; i < 16; i += 4) {
15731 if (tg3_nvram_read_be32(tp, offset + i, &v))
15734 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15739 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15742 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15743 TG3_NVM_BCVER_MAJSFT;
15744 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15745 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15746 "v%d.%02d", major, minor);
15750 static void tg3_read_hwsb_ver(struct tg3 *tp)
15752 u32 val, major, minor;
15754 /* Use native endian representation */
15755 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15758 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15759 TG3_NVM_HWSB_CFG1_MAJSFT;
15760 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15761 TG3_NVM_HWSB_CFG1_MINSFT;
15763 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15766 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15768 u32 offset, major, minor, build;
15770 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15772 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15775 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15776 case TG3_EEPROM_SB_REVISION_0:
15777 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15779 case TG3_EEPROM_SB_REVISION_2:
15780 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15782 case TG3_EEPROM_SB_REVISION_3:
15783 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15785 case TG3_EEPROM_SB_REVISION_4:
15786 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15788 case TG3_EEPROM_SB_REVISION_5:
15789 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15791 case TG3_EEPROM_SB_REVISION_6:
15792 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15798 if (tg3_nvram_read(tp, offset, &val))
15801 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15802 TG3_EEPROM_SB_EDH_BLD_SHFT;
15803 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15804 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15805 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15807 if (minor > 99 || build > 26)
15810 offset = strlen(tp->fw_ver);
15811 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15812 " v%d.%02d", major, minor);
15815 offset = strlen(tp->fw_ver);
15816 if (offset < TG3_VER_SIZE - 1)
15817 tp->fw_ver[offset] = 'a' + build - 1;
15821 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15823 u32 val, offset, start;
15826 for (offset = TG3_NVM_DIR_START;
15827 offset < TG3_NVM_DIR_END;
15828 offset += TG3_NVM_DIRENT_SIZE) {
15829 if (tg3_nvram_read(tp, offset, &val))
15832 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15836 if (offset == TG3_NVM_DIR_END)
15839 if (!tg3_flag(tp, 5705_PLUS))
15840 start = 0x08000000;
15841 else if (tg3_nvram_read(tp, offset - 4, &start))
15844 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15845 !tg3_fw_img_is_valid(tp, offset) ||
15846 tg3_nvram_read(tp, offset + 8, &val))
15849 offset += val - start;
15851 vlen = strlen(tp->fw_ver);
15853 tp->fw_ver[vlen++] = ',';
15854 tp->fw_ver[vlen++] = ' ';
15856 for (i = 0; i < 4; i++) {
15858 if (tg3_nvram_read_be32(tp, offset, &v))
15861 offset += sizeof(v);
15863 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15864 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15868 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15873 static void tg3_probe_ncsi(struct tg3 *tp)
15877 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15878 if (apedata != APE_SEG_SIG_MAGIC)
15881 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15882 if (!(apedata & APE_FW_STATUS_READY))
15885 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15886 tg3_flag_set(tp, APE_HAS_NCSI);
15889 static void tg3_read_dash_ver(struct tg3 *tp)
15895 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15897 if (tg3_flag(tp, APE_HAS_NCSI))
15899 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15904 vlen = strlen(tp->fw_ver);
15906 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15908 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15909 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15910 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15911 (apedata & APE_FW_VERSION_BLDMSK));
15914 static void tg3_read_otp_ver(struct tg3 *tp)
15918 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15921 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15922 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15923 TG3_OTP_MAGIC0_VALID(val)) {
15924 u64 val64 = (u64) val << 32 | val2;
15928 for (i = 0; i < 7; i++) {
15929 if ((val64 & 0xff) == 0)
15931 ver = val64 & 0xff;
15934 vlen = strlen(tp->fw_ver);
15935 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15939 static void tg3_read_fw_ver(struct tg3 *tp)
15942 bool vpd_vers = false;
15944 if (tp->fw_ver[0] != 0)
15947 if (tg3_flag(tp, NO_NVRAM)) {
15948 strcat(tp->fw_ver, "sb");
15949 tg3_read_otp_ver(tp);
15953 if (tg3_nvram_read(tp, 0, &val))
15956 if (val == TG3_EEPROM_MAGIC)
15957 tg3_read_bc_ver(tp);
15958 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15959 tg3_read_sb_ver(tp, val);
15960 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15961 tg3_read_hwsb_ver(tp);
15963 if (tg3_flag(tp, ENABLE_ASF)) {
15964 if (tg3_flag(tp, ENABLE_APE)) {
15965 tg3_probe_ncsi(tp);
15967 tg3_read_dash_ver(tp);
15968 } else if (!vpd_vers) {
15969 tg3_read_mgmtfw_ver(tp);
15973 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15976 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15978 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15979 return TG3_RX_RET_MAX_SIZE_5717;
15980 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15981 return TG3_RX_RET_MAX_SIZE_5700;
15983 return TG3_RX_RET_MAX_SIZE_5705;
15986 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
15987 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15988 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15989 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15993 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15995 struct pci_dev *peer;
15996 unsigned int func, devnr = tp->pdev->devfn & ~7;
15998 for (func = 0; func < 8; func++) {
15999 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16000 if (peer && peer != tp->pdev)
16004 /* 5704 can be configured in single-port mode, set peer to
16005 * tp->pdev in that case.
16013 * We don't need to keep the refcount elevated; there's no way
16014 * to remove one half of this device without removing the other
16021 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16023 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16024 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16027 /* All devices that use the alternate
16028 * ASIC REV location have a CPMU.
16030 tg3_flag_set(tp, CPMU_PRESENT);
16032 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16033 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16034 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16035 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16036 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16037 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16038 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16039 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16040 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16041 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16042 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16043 reg = TG3PCI_GEN2_PRODID_ASICREV;
16044 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16045 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16046 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16047 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16048 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16049 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16050 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16051 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16052 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16053 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16054 reg = TG3PCI_GEN15_PRODID_ASICREV;
16056 reg = TG3PCI_PRODID_ASICREV;
16058 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16061 /* Wrong chip ID in 5752 A0. This code can be removed later
16062 * as A0 is not in production.
16064 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16065 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16067 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16068 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16070 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16071 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16072 tg3_asic_rev(tp) == ASIC_REV_5720)
16073 tg3_flag_set(tp, 5717_PLUS);
16075 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16076 tg3_asic_rev(tp) == ASIC_REV_57766)
16077 tg3_flag_set(tp, 57765_CLASS);
16079 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16080 tg3_asic_rev(tp) == ASIC_REV_5762)
16081 tg3_flag_set(tp, 57765_PLUS);
16083 /* Intentionally exclude ASIC_REV_5906 */
16084 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16085 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16086 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16087 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16088 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16089 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16090 tg3_flag(tp, 57765_PLUS))
16091 tg3_flag_set(tp, 5755_PLUS);
16093 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16094 tg3_asic_rev(tp) == ASIC_REV_5714)
16095 tg3_flag_set(tp, 5780_CLASS);
16097 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16098 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16099 tg3_asic_rev(tp) == ASIC_REV_5906 ||
16100 tg3_flag(tp, 5755_PLUS) ||
16101 tg3_flag(tp, 5780_CLASS))
16102 tg3_flag_set(tp, 5750_PLUS);
16104 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16105 tg3_flag(tp, 5750_PLUS))
16106 tg3_flag_set(tp, 5705_PLUS);
16109 static bool tg3_10_100_only_device(struct tg3 *tp,
16110 const struct pci_device_id *ent)
16112 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16114 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16115 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16116 (tp->phy_flags & TG3_PHYFLG_IS_FET))
16119 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16120 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16121 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16131 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16134 u32 pci_state_reg, grc_misc_cfg;
16139 /* Force memory write invalidate off. If we leave it on,
16140 * then on 5700_BX chips we have to enable a workaround.
16141 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16142 * to match the cacheline size. The Broadcom driver have this
16143 * workaround but turns MWI off all the times so never uses
16144 * it. This seems to suggest that the workaround is insufficient.
16146 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16147 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16148 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16150 /* Important! -- Make sure register accesses are byteswapped
16151 * correctly. Also, for those chips that require it, make
16152 * sure that indirect register accesses are enabled before
16153 * the first operation.
16155 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16157 tp->misc_host_ctrl |= (misc_ctrl_reg &
16158 MISC_HOST_CTRL_CHIPREV);
16159 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16160 tp->misc_host_ctrl);
16162 tg3_detect_asic_rev(tp, misc_ctrl_reg);
16164 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16165 * we need to disable memory and use config. cycles
16166 * only to access all registers. The 5702/03 chips
16167 * can mistakenly decode the special cycles from the
16168 * ICH chipsets as memory write cycles, causing corruption
16169 * of register and memory space. Only certain ICH bridges
16170 * will drive special cycles with non-zero data during the
16171 * address phase which can fall within the 5703's address
16172 * range. This is not an ICH bug as the PCI spec allows
16173 * non-zero address during special cycles. However, only
16174 * these ICH bridges are known to drive non-zero addresses
16175 * during special cycles.
16177 * Since special cycles do not cross PCI bridges, we only
16178 * enable this workaround if the 5703 is on the secondary
16179 * bus of these ICH bridges.
16181 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16182 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16183 static struct tg3_dev_id {
16187 } ich_chipsets[] = {
16188 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16190 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16192 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16194 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16198 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16199 struct pci_dev *bridge = NULL;
16201 while (pci_id->vendor != 0) {
16202 bridge = pci_get_device(pci_id->vendor, pci_id->device,
16208 if (pci_id->rev != PCI_ANY_ID) {
16209 if (bridge->revision > pci_id->rev)
16212 if (bridge->subordinate &&
16213 (bridge->subordinate->number ==
16214 tp->pdev->bus->number)) {
16215 tg3_flag_set(tp, ICH_WORKAROUND);
16216 pci_dev_put(bridge);
16222 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16223 static struct tg3_dev_id {
16226 } bridge_chipsets[] = {
16227 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16228 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16231 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16232 struct pci_dev *bridge = NULL;
16234 while (pci_id->vendor != 0) {
16235 bridge = pci_get_device(pci_id->vendor,
16242 if (bridge->subordinate &&
16243 (bridge->subordinate->number <=
16244 tp->pdev->bus->number) &&
16245 (bridge->subordinate->busn_res.end >=
16246 tp->pdev->bus->number)) {
16247 tg3_flag_set(tp, 5701_DMA_BUG);
16248 pci_dev_put(bridge);
16254 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16255 * DMA addresses > 40-bit. This bridge may have other additional
16256 * 57xx devices behind it in some 4-port NIC designs for example.
16257 * Any tg3 device found behind the bridge will also need the 40-bit
16260 if (tg3_flag(tp, 5780_CLASS)) {
16261 tg3_flag_set(tp, 40BIT_DMA_BUG);
16262 tp->msi_cap = tp->pdev->msi_cap;
16264 struct pci_dev *bridge = NULL;
16267 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16268 PCI_DEVICE_ID_SERVERWORKS_EPB,
16270 if (bridge && bridge->subordinate &&
16271 (bridge->subordinate->number <=
16272 tp->pdev->bus->number) &&
16273 (bridge->subordinate->busn_res.end >=
16274 tp->pdev->bus->number)) {
16275 tg3_flag_set(tp, 40BIT_DMA_BUG);
16276 pci_dev_put(bridge);
16282 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16283 tg3_asic_rev(tp) == ASIC_REV_5714)
16284 tp->pdev_peer = tg3_find_peer(tp);
16286 /* Determine TSO capabilities */
16287 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16288 ; /* Do nothing. HW bug. */
16289 else if (tg3_flag(tp, 57765_PLUS))
16290 tg3_flag_set(tp, HW_TSO_3);
16291 else if (tg3_flag(tp, 5755_PLUS) ||
16292 tg3_asic_rev(tp) == ASIC_REV_5906)
16293 tg3_flag_set(tp, HW_TSO_2);
16294 else if (tg3_flag(tp, 5750_PLUS)) {
16295 tg3_flag_set(tp, HW_TSO_1);
16296 tg3_flag_set(tp, TSO_BUG);
16297 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16298 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16299 tg3_flag_clear(tp, TSO_BUG);
16300 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16301 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16302 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16303 tg3_flag_set(tp, FW_TSO);
16304 tg3_flag_set(tp, TSO_BUG);
16305 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16306 tp->fw_needed = FIRMWARE_TG3TSO5;
16308 tp->fw_needed = FIRMWARE_TG3TSO;
16311 /* Selectively allow TSO based on operating conditions */
16312 if (tg3_flag(tp, HW_TSO_1) ||
16313 tg3_flag(tp, HW_TSO_2) ||
16314 tg3_flag(tp, HW_TSO_3) ||
16315 tg3_flag(tp, FW_TSO)) {
16316 /* For firmware TSO, assume ASF is disabled.
16317 * We'll disable TSO later if we discover ASF
16318 * is enabled in tg3_get_eeprom_hw_cfg().
16320 tg3_flag_set(tp, TSO_CAPABLE);
16322 tg3_flag_clear(tp, TSO_CAPABLE);
16323 tg3_flag_clear(tp, TSO_BUG);
16324 tp->fw_needed = NULL;
16327 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16328 tp->fw_needed = FIRMWARE_TG3;
16330 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16331 tp->fw_needed = FIRMWARE_TG357766;
16335 if (tg3_flag(tp, 5750_PLUS)) {
16336 tg3_flag_set(tp, SUPPORT_MSI);
16337 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16338 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16339 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16340 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16341 tp->pdev_peer == tp->pdev))
16342 tg3_flag_clear(tp, SUPPORT_MSI);
16344 if (tg3_flag(tp, 5755_PLUS) ||
16345 tg3_asic_rev(tp) == ASIC_REV_5906) {
16346 tg3_flag_set(tp, 1SHOT_MSI);
16349 if (tg3_flag(tp, 57765_PLUS)) {
16350 tg3_flag_set(tp, SUPPORT_MSIX);
16351 tp->irq_max = TG3_IRQ_MAX_VECS;
16357 if (tp->irq_max > 1) {
16358 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16359 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16361 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16362 tg3_asic_rev(tp) == ASIC_REV_5720)
16363 tp->txq_max = tp->irq_max - 1;
16366 if (tg3_flag(tp, 5755_PLUS) ||
16367 tg3_asic_rev(tp) == ASIC_REV_5906)
16368 tg3_flag_set(tp, SHORT_DMA_BUG);
16370 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16371 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16373 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16374 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16375 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16376 tg3_asic_rev(tp) == ASIC_REV_5762)
16377 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16379 if (tg3_flag(tp, 57765_PLUS) &&
16380 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16381 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16383 if (!tg3_flag(tp, 5705_PLUS) ||
16384 tg3_flag(tp, 5780_CLASS) ||
16385 tg3_flag(tp, USE_JUMBO_BDFLAG))
16386 tg3_flag_set(tp, JUMBO_CAPABLE);
16388 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16391 if (pci_is_pcie(tp->pdev)) {
16394 tg3_flag_set(tp, PCI_EXPRESS);
16396 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16397 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16398 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16399 tg3_flag_clear(tp, HW_TSO_2);
16400 tg3_flag_clear(tp, TSO_CAPABLE);
16402 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16403 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16404 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16405 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16406 tg3_flag_set(tp, CLKREQ_BUG);
16407 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16408 tg3_flag_set(tp, L1PLLPD_EN);
16410 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16411 /* BCM5785 devices are effectively PCIe devices, and should
16412 * follow PCIe codepaths, but do not have a PCIe capabilities
16415 tg3_flag_set(tp, PCI_EXPRESS);
16416 } else if (!tg3_flag(tp, 5705_PLUS) ||
16417 tg3_flag(tp, 5780_CLASS)) {
16418 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16419 if (!tp->pcix_cap) {
16420 dev_err(&tp->pdev->dev,
16421 "Cannot find PCI-X capability, aborting\n");
16425 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16426 tg3_flag_set(tp, PCIX_MODE);
16429 /* If we have an AMD 762 or VIA K8T800 chipset, write
16430 * reordering to the mailbox registers done by the host
16431 * controller can cause major troubles. We read back from
16432 * every mailbox register write to force the writes to be
16433 * posted to the chip in order.
16435 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16436 !tg3_flag(tp, PCI_EXPRESS))
16437 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16439 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16440 &tp->pci_cacheline_sz);
16441 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16442 &tp->pci_lat_timer);
16443 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16444 tp->pci_lat_timer < 64) {
16445 tp->pci_lat_timer = 64;
16446 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16447 tp->pci_lat_timer);
16450 /* Important! -- It is critical that the PCI-X hw workaround
16451 * situation is decided before the first MMIO register access.
16453 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16454 /* 5700 BX chips need to have their TX producer index
16455 * mailboxes written twice to workaround a bug.
16457 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16459 /* If we are in PCI-X mode, enable register write workaround.
16461 * The workaround is to use indirect register accesses
16462 * for all chip writes not to mailbox registers.
16464 if (tg3_flag(tp, PCIX_MODE)) {
16467 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16469 /* The chip can have it's power management PCI config
16470 * space registers clobbered due to this bug.
16471 * So explicitly force the chip into D0 here.
16473 pci_read_config_dword(tp->pdev,
16474 tp->pdev->pm_cap + PCI_PM_CTRL,
16476 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16477 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16478 pci_write_config_dword(tp->pdev,
16479 tp->pdev->pm_cap + PCI_PM_CTRL,
16482 /* Also, force SERR#/PERR# in PCI command. */
16483 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16484 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16485 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16489 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16490 tg3_flag_set(tp, PCI_HIGH_SPEED);
16491 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16492 tg3_flag_set(tp, PCI_32BIT);
16494 /* Chip-specific fixup from Broadcom driver */
16495 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16496 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16497 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16498 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16501 /* Default fast path register access methods */
16502 tp->read32 = tg3_read32;
16503 tp->write32 = tg3_write32;
16504 tp->read32_mbox = tg3_read32;
16505 tp->write32_mbox = tg3_write32;
16506 tp->write32_tx_mbox = tg3_write32;
16507 tp->write32_rx_mbox = tg3_write32;
16509 /* Various workaround register access methods */
16510 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16511 tp->write32 = tg3_write_indirect_reg32;
16512 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16513 (tg3_flag(tp, PCI_EXPRESS) &&
16514 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16516 * Back to back register writes can cause problems on these
16517 * chips, the workaround is to read back all reg writes
16518 * except those to mailbox regs.
16520 * See tg3_write_indirect_reg32().
16522 tp->write32 = tg3_write_flush_reg32;
16525 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16526 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16527 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16528 tp->write32_rx_mbox = tg3_write_flush_reg32;
16531 if (tg3_flag(tp, ICH_WORKAROUND)) {
16532 tp->read32 = tg3_read_indirect_reg32;
16533 tp->write32 = tg3_write_indirect_reg32;
16534 tp->read32_mbox = tg3_read_indirect_mbox;
16535 tp->write32_mbox = tg3_write_indirect_mbox;
16536 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16537 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16542 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16543 pci_cmd &= ~PCI_COMMAND_MEMORY;
16544 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16546 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16547 tp->read32_mbox = tg3_read32_mbox_5906;
16548 tp->write32_mbox = tg3_write32_mbox_5906;
16549 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16550 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16553 if (tp->write32 == tg3_write_indirect_reg32 ||
16554 (tg3_flag(tp, PCIX_MODE) &&
16555 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16556 tg3_asic_rev(tp) == ASIC_REV_5701)))
16557 tg3_flag_set(tp, SRAM_USE_CONFIG);
16559 /* The memory arbiter has to be enabled in order for SRAM accesses
16560 * to succeed. Normally on powerup the tg3 chip firmware will make
16561 * sure it is enabled, but other entities such as system netboot
16562 * code might disable it.
16564 val = tr32(MEMARB_MODE);
16565 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16567 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16568 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16569 tg3_flag(tp, 5780_CLASS)) {
16570 if (tg3_flag(tp, PCIX_MODE)) {
16571 pci_read_config_dword(tp->pdev,
16572 tp->pcix_cap + PCI_X_STATUS,
16574 tp->pci_fn = val & 0x7;
16576 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16577 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16578 tg3_asic_rev(tp) == ASIC_REV_5720) {
16579 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16580 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16581 val = tr32(TG3_CPMU_STATUS);
16583 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16584 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16586 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16587 TG3_CPMU_STATUS_FSHFT_5719;
16590 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16591 tp->write32_tx_mbox = tg3_write_flush_reg32;
16592 tp->write32_rx_mbox = tg3_write_flush_reg32;
16595 /* Get eeprom hw config before calling tg3_set_power_state().
16596 * In particular, the TG3_FLAG_IS_NIC flag must be
16597 * determined before calling tg3_set_power_state() so that
16598 * we know whether or not to switch out of Vaux power.
16599 * When the flag is set, it means that GPIO1 is used for eeprom
16600 * write protect and also implies that it is a LOM where GPIOs
16601 * are not used to switch power.
16603 tg3_get_eeprom_hw_cfg(tp);
16605 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16606 tg3_flag_clear(tp, TSO_CAPABLE);
16607 tg3_flag_clear(tp, TSO_BUG);
16608 tp->fw_needed = NULL;
16611 if (tg3_flag(tp, ENABLE_APE)) {
16612 /* Allow reads and writes to the
16613 * APE register and memory space.
16615 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16616 PCISTATE_ALLOW_APE_SHMEM_WR |
16617 PCISTATE_ALLOW_APE_PSPACE_WR;
16618 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16621 tg3_ape_lock_init(tp);
16624 /* Set up tp->grc_local_ctrl before calling
16625 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16626 * will bring 5700's external PHY out of reset.
16627 * It is also used as eeprom write protect on LOMs.
16629 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16630 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16631 tg3_flag(tp, EEPROM_WRITE_PROT))
16632 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16633 GRC_LCLCTRL_GPIO_OUTPUT1);
16634 /* Unused GPIO3 must be driven as output on 5752 because there
16635 * are no pull-up resistors on unused GPIO pins.
16637 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16638 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16640 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16641 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16642 tg3_flag(tp, 57765_CLASS))
16643 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16645 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16646 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16647 /* Turn off the debug UART. */
16648 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16649 if (tg3_flag(tp, IS_NIC))
16650 /* Keep VMain power. */
16651 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16652 GRC_LCLCTRL_GPIO_OUTPUT0;
16655 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16656 tp->grc_local_ctrl |=
16657 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16659 /* Switch out of Vaux if it is a NIC */
16660 tg3_pwrsrc_switch_to_vmain(tp);
16662 /* Derive initial jumbo mode from MTU assigned in
16663 * ether_setup() via the alloc_etherdev() call
16665 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16666 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16668 /* Determine WakeOnLan speed to use. */
16669 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16670 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16671 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16672 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16673 tg3_flag_clear(tp, WOL_SPEED_100MB);
16675 tg3_flag_set(tp, WOL_SPEED_100MB);
16678 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16679 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16681 /* A few boards don't want Ethernet@WireSpeed phy feature */
16682 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16683 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16684 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16685 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16686 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16687 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16688 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16690 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16691 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16692 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16693 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16694 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16696 if (tg3_flag(tp, 5705_PLUS) &&
16697 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16698 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16699 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16700 !tg3_flag(tp, 57765_PLUS)) {
16701 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16702 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16703 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16704 tg3_asic_rev(tp) == ASIC_REV_5761) {
16705 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16706 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16707 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16708 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16709 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16711 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16714 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16715 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16716 tp->phy_otp = tg3_read_otp_phycfg(tp);
16717 if (tp->phy_otp == 0)
16718 tp->phy_otp = TG3_OTP_DEFAULT;
16721 if (tg3_flag(tp, CPMU_PRESENT))
16722 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16724 tp->mi_mode = MAC_MI_MODE_BASE;
16726 tp->coalesce_mode = 0;
16727 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16728 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16729 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16731 /* Set these bits to enable statistics workaround. */
16732 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16733 tg3_asic_rev(tp) == ASIC_REV_5762 ||
16734 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16735 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16736 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16737 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16740 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16741 tg3_asic_rev(tp) == ASIC_REV_57780)
16742 tg3_flag_set(tp, USE_PHYLIB);
16744 err = tg3_mdio_init(tp);
16748 /* Initialize data/descriptor byte/word swapping. */
16749 val = tr32(GRC_MODE);
16750 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16751 tg3_asic_rev(tp) == ASIC_REV_5762)
16752 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16753 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16754 GRC_MODE_B2HRX_ENABLE |
16755 GRC_MODE_HTX2B_ENABLE |
16756 GRC_MODE_HOST_STACKUP);
16758 val &= GRC_MODE_HOST_STACKUP;
16760 tw32(GRC_MODE, val | tp->grc_mode);
16762 tg3_switch_clocks(tp);
16764 /* Clear this out for sanity. */
16765 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16767 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16768 tw32(TG3PCI_REG_BASE_ADDR, 0);
16770 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16772 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16773 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16774 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16775 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16776 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16777 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16778 void __iomem *sram_base;
16780 /* Write some dummy words into the SRAM status block
16781 * area, see if it reads back correctly. If the return
16782 * value is bad, force enable the PCIX workaround.
16784 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16786 writel(0x00000000, sram_base);
16787 writel(0x00000000, sram_base + 4);
16788 writel(0xffffffff, sram_base + 4);
16789 if (readl(sram_base) != 0x00000000)
16790 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16795 tg3_nvram_init(tp);
16797 /* If the device has an NVRAM, no need to load patch firmware */
16798 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16799 !tg3_flag(tp, NO_NVRAM))
16800 tp->fw_needed = NULL;
16802 grc_misc_cfg = tr32(GRC_MISC_CFG);
16803 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16805 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16806 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16807 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16808 tg3_flag_set(tp, IS_5788);
16810 if (!tg3_flag(tp, IS_5788) &&
16811 tg3_asic_rev(tp) != ASIC_REV_5700)
16812 tg3_flag_set(tp, TAGGED_STATUS);
16813 if (tg3_flag(tp, TAGGED_STATUS)) {
16814 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16815 HOSTCC_MODE_CLRTICK_TXBD);
16817 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16818 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16819 tp->misc_host_ctrl);
16822 /* Preserve the APE MAC_MODE bits */
16823 if (tg3_flag(tp, ENABLE_APE))
16824 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16828 if (tg3_10_100_only_device(tp, ent))
16829 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16831 err = tg3_phy_probe(tp);
16833 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16834 /* ... but do not return immediately ... */
16839 tg3_read_fw_ver(tp);
16841 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16842 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16844 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16845 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16847 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16850 /* 5700 {AX,BX} chips have a broken status block link
16851 * change bit implementation, so we must use the
16852 * status register in those cases.
16854 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16855 tg3_flag_set(tp, USE_LINKCHG_REG);
16857 tg3_flag_clear(tp, USE_LINKCHG_REG);
16859 /* The led_ctrl is set during tg3_phy_probe, here we might
16860 * have to force the link status polling mechanism based
16861 * upon subsystem IDs.
16863 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16864 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16865 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16866 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16867 tg3_flag_set(tp, USE_LINKCHG_REG);
16870 /* For all SERDES we poll the MAC status register. */
16871 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16872 tg3_flag_set(tp, POLL_SERDES);
16874 tg3_flag_clear(tp, POLL_SERDES);
16876 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16877 tg3_flag_set(tp, POLL_CPMU_LINK);
16879 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16880 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16881 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16882 tg3_flag(tp, PCIX_MODE)) {
16883 tp->rx_offset = NET_SKB_PAD;
16884 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16885 tp->rx_copy_thresh = ~(u16)0;
16889 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16890 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16891 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16893 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16895 /* Increment the rx prod index on the rx std ring by at most
16896 * 8 for these chips to workaround hw errata.
16898 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16899 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16900 tg3_asic_rev(tp) == ASIC_REV_5755)
16901 tp->rx_std_max_post = 8;
16903 if (tg3_flag(tp, ASPM_WORKAROUND))
16904 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16905 PCIE_PWR_MGMT_L1_THRESH_MSK;
16910 #ifdef CONFIG_SPARC
16911 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16913 struct net_device *dev = tp->dev;
16914 struct pci_dev *pdev = tp->pdev;
16915 struct device_node *dp = pci_device_to_OF_node(pdev);
16916 const unsigned char *addr;
16919 addr = of_get_property(dp, "local-mac-address", &len);
16920 if (addr && len == ETH_ALEN) {
16921 memcpy(dev->dev_addr, addr, ETH_ALEN);
16927 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16929 struct net_device *dev = tp->dev;
16931 memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16936 static int tg3_get_device_address(struct tg3 *tp)
16938 struct net_device *dev = tp->dev;
16939 u32 hi, lo, mac_offset;
16943 #ifdef CONFIG_SPARC
16944 if (!tg3_get_macaddr_sparc(tp))
16948 if (tg3_flag(tp, IS_SSB_CORE)) {
16949 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16950 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16955 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16956 tg3_flag(tp, 5780_CLASS)) {
16957 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16959 if (tg3_nvram_lock(tp))
16960 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16962 tg3_nvram_unlock(tp);
16963 } else if (tg3_flag(tp, 5717_PLUS)) {
16964 if (tp->pci_fn & 1)
16966 if (tp->pci_fn > 1)
16967 mac_offset += 0x18c;
16968 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16971 /* First try to get it from MAC address mailbox. */
16972 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16973 if ((hi >> 16) == 0x484b) {
16974 dev->dev_addr[0] = (hi >> 8) & 0xff;
16975 dev->dev_addr[1] = (hi >> 0) & 0xff;
16977 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16978 dev->dev_addr[2] = (lo >> 24) & 0xff;
16979 dev->dev_addr[3] = (lo >> 16) & 0xff;
16980 dev->dev_addr[4] = (lo >> 8) & 0xff;
16981 dev->dev_addr[5] = (lo >> 0) & 0xff;
16983 /* Some old bootcode may report a 0 MAC address in SRAM */
16984 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16987 /* Next, try NVRAM. */
16988 if (!tg3_flag(tp, NO_NVRAM) &&
16989 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16990 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16991 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16992 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16994 /* Finally just fetch it out of the MAC control regs. */
16996 hi = tr32(MAC_ADDR_0_HIGH);
16997 lo = tr32(MAC_ADDR_0_LOW);
16999 dev->dev_addr[5] = lo & 0xff;
17000 dev->dev_addr[4] = (lo >> 8) & 0xff;
17001 dev->dev_addr[3] = (lo >> 16) & 0xff;
17002 dev->dev_addr[2] = (lo >> 24) & 0xff;
17003 dev->dev_addr[1] = hi & 0xff;
17004 dev->dev_addr[0] = (hi >> 8) & 0xff;
17008 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
17009 #ifdef CONFIG_SPARC
17010 if (!tg3_get_default_macaddr_sparc(tp))
17018 #define BOUNDARY_SINGLE_CACHELINE 1
17019 #define BOUNDARY_MULTI_CACHELINE 2
17021 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17023 int cacheline_size;
17027 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17029 cacheline_size = 1024;
17031 cacheline_size = (int) byte * 4;
17033 /* On 5703 and later chips, the boundary bits have no
17036 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17037 tg3_asic_rev(tp) != ASIC_REV_5701 &&
17038 !tg3_flag(tp, PCI_EXPRESS))
17041 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17042 goal = BOUNDARY_MULTI_CACHELINE;
17044 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17045 goal = BOUNDARY_SINGLE_CACHELINE;
17051 if (tg3_flag(tp, 57765_PLUS)) {
17052 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17059 /* PCI controllers on most RISC systems tend to disconnect
17060 * when a device tries to burst across a cache-line boundary.
17061 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17063 * Unfortunately, for PCI-E there are only limited
17064 * write-side controls for this, and thus for reads
17065 * we will still get the disconnects. We'll also waste
17066 * these PCI cycles for both read and write for chips
17067 * other than 5700 and 5701 which do not implement the
17070 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17071 switch (cacheline_size) {
17076 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17077 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17078 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17080 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17081 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17086 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17087 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17091 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17092 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17095 } else if (tg3_flag(tp, PCI_EXPRESS)) {
17096 switch (cacheline_size) {
17100 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17101 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17102 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17108 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17109 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17113 switch (cacheline_size) {
17115 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17116 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17117 DMA_RWCTRL_WRITE_BNDRY_16);
17122 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17123 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17124 DMA_RWCTRL_WRITE_BNDRY_32);
17129 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17130 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17131 DMA_RWCTRL_WRITE_BNDRY_64);
17136 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17137 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17138 DMA_RWCTRL_WRITE_BNDRY_128);
17143 val |= (DMA_RWCTRL_READ_BNDRY_256 |
17144 DMA_RWCTRL_WRITE_BNDRY_256);
17147 val |= (DMA_RWCTRL_READ_BNDRY_512 |
17148 DMA_RWCTRL_WRITE_BNDRY_512);
17152 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17153 DMA_RWCTRL_WRITE_BNDRY_1024);
17162 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17163 int size, bool to_device)
17165 struct tg3_internal_buffer_desc test_desc;
17166 u32 sram_dma_descs;
17169 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17171 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17172 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17173 tw32(RDMAC_STATUS, 0);
17174 tw32(WDMAC_STATUS, 0);
17176 tw32(BUFMGR_MODE, 0);
17177 tw32(FTQ_RESET, 0);
17179 test_desc.addr_hi = ((u64) buf_dma) >> 32;
17180 test_desc.addr_lo = buf_dma & 0xffffffff;
17181 test_desc.nic_mbuf = 0x00002100;
17182 test_desc.len = size;
17185 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17186 * the *second* time the tg3 driver was getting loaded after an
17189 * Broadcom tells me:
17190 * ...the DMA engine is connected to the GRC block and a DMA
17191 * reset may affect the GRC block in some unpredictable way...
17192 * The behavior of resets to individual blocks has not been tested.
17194 * Broadcom noted the GRC reset will also reset all sub-components.
17197 test_desc.cqid_sqid = (13 << 8) | 2;
17199 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17202 test_desc.cqid_sqid = (16 << 8) | 7;
17204 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17207 test_desc.flags = 0x00000005;
17209 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17212 val = *(((u32 *)&test_desc) + i);
17213 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17214 sram_dma_descs + (i * sizeof(u32)));
17215 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17217 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17220 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17222 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17225 for (i = 0; i < 40; i++) {
17229 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17231 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17232 if ((val & 0xffff) == sram_dma_descs) {
17243 #define TEST_BUFFER_SIZE 0x2000
17245 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17246 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17250 static int tg3_test_dma(struct tg3 *tp)
17252 dma_addr_t buf_dma;
17253 u32 *buf, saved_dma_rwctrl;
17256 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17257 &buf_dma, GFP_KERNEL);
17263 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17264 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17266 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17268 if (tg3_flag(tp, 57765_PLUS))
17271 if (tg3_flag(tp, PCI_EXPRESS)) {
17272 /* DMA read watermark not used on PCIE */
17273 tp->dma_rwctrl |= 0x00180000;
17274 } else if (!tg3_flag(tp, PCIX_MODE)) {
17275 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17276 tg3_asic_rev(tp) == ASIC_REV_5750)
17277 tp->dma_rwctrl |= 0x003f0000;
17279 tp->dma_rwctrl |= 0x003f000f;
17281 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17282 tg3_asic_rev(tp) == ASIC_REV_5704) {
17283 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17284 u32 read_water = 0x7;
17286 /* If the 5704 is behind the EPB bridge, we can
17287 * do the less restrictive ONE_DMA workaround for
17288 * better performance.
17290 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17291 tg3_asic_rev(tp) == ASIC_REV_5704)
17292 tp->dma_rwctrl |= 0x8000;
17293 else if (ccval == 0x6 || ccval == 0x7)
17294 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17296 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17298 /* Set bit 23 to enable PCIX hw bug fix */
17300 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17301 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17303 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17304 /* 5780 always in PCIX mode */
17305 tp->dma_rwctrl |= 0x00144000;
17306 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17307 /* 5714 always in PCIX mode */
17308 tp->dma_rwctrl |= 0x00148000;
17310 tp->dma_rwctrl |= 0x001b000f;
17313 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17314 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17316 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17317 tg3_asic_rev(tp) == ASIC_REV_5704)
17318 tp->dma_rwctrl &= 0xfffffff0;
17320 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17321 tg3_asic_rev(tp) == ASIC_REV_5701) {
17322 /* Remove this if it causes problems for some boards. */
17323 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17325 /* On 5700/5701 chips, we need to set this bit.
17326 * Otherwise the chip will issue cacheline transactions
17327 * to streamable DMA memory with not all the byte
17328 * enables turned on. This is an error on several
17329 * RISC PCI controllers, in particular sparc64.
17331 * On 5703/5704 chips, this bit has been reassigned
17332 * a different meaning. In particular, it is used
17333 * on those chips to enable a PCI-X workaround.
17335 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17338 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17341 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17342 tg3_asic_rev(tp) != ASIC_REV_5701)
17345 /* It is best to perform DMA test with maximum write burst size
17346 * to expose the 5700/5701 write DMA bug.
17348 saved_dma_rwctrl = tp->dma_rwctrl;
17349 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17350 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17355 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17358 /* Send the buffer to the chip. */
17359 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17361 dev_err(&tp->pdev->dev,
17362 "%s: Buffer write failed. err = %d\n",
17367 /* Now read it back. */
17368 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17370 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17371 "err = %d\n", __func__, ret);
17376 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17380 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17381 DMA_RWCTRL_WRITE_BNDRY_16) {
17382 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17383 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17384 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17387 dev_err(&tp->pdev->dev,
17388 "%s: Buffer corrupted on read back! "
17389 "(%d != %d)\n", __func__, p[i], i);
17395 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17401 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17402 DMA_RWCTRL_WRITE_BNDRY_16) {
17403 /* DMA test passed without adjusting DMA boundary,
17404 * now look for chipsets that are known to expose the
17405 * DMA bug without failing the test.
17407 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17408 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17409 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17411 /* Safe to use the calculated DMA boundary. */
17412 tp->dma_rwctrl = saved_dma_rwctrl;
17415 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17419 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17424 static void tg3_init_bufmgr_config(struct tg3 *tp)
17426 if (tg3_flag(tp, 57765_PLUS)) {
17427 tp->bufmgr_config.mbuf_read_dma_low_water =
17428 DEFAULT_MB_RDMA_LOW_WATER_5705;
17429 tp->bufmgr_config.mbuf_mac_rx_low_water =
17430 DEFAULT_MB_MACRX_LOW_WATER_57765;
17431 tp->bufmgr_config.mbuf_high_water =
17432 DEFAULT_MB_HIGH_WATER_57765;
17434 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17435 DEFAULT_MB_RDMA_LOW_WATER_5705;
17436 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17437 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17438 tp->bufmgr_config.mbuf_high_water_jumbo =
17439 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17440 } else if (tg3_flag(tp, 5705_PLUS)) {
17441 tp->bufmgr_config.mbuf_read_dma_low_water =
17442 DEFAULT_MB_RDMA_LOW_WATER_5705;
17443 tp->bufmgr_config.mbuf_mac_rx_low_water =
17444 DEFAULT_MB_MACRX_LOW_WATER_5705;
17445 tp->bufmgr_config.mbuf_high_water =
17446 DEFAULT_MB_HIGH_WATER_5705;
17447 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17448 tp->bufmgr_config.mbuf_mac_rx_low_water =
17449 DEFAULT_MB_MACRX_LOW_WATER_5906;
17450 tp->bufmgr_config.mbuf_high_water =
17451 DEFAULT_MB_HIGH_WATER_5906;
17454 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17455 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17456 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17457 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17458 tp->bufmgr_config.mbuf_high_water_jumbo =
17459 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17461 tp->bufmgr_config.mbuf_read_dma_low_water =
17462 DEFAULT_MB_RDMA_LOW_WATER;
17463 tp->bufmgr_config.mbuf_mac_rx_low_water =
17464 DEFAULT_MB_MACRX_LOW_WATER;
17465 tp->bufmgr_config.mbuf_high_water =
17466 DEFAULT_MB_HIGH_WATER;
17468 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17469 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17470 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17471 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17472 tp->bufmgr_config.mbuf_high_water_jumbo =
17473 DEFAULT_MB_HIGH_WATER_JUMBO;
17476 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17477 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17480 static char *tg3_phy_string(struct tg3 *tp)
17482 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17483 case TG3_PHY_ID_BCM5400: return "5400";
17484 case TG3_PHY_ID_BCM5401: return "5401";
17485 case TG3_PHY_ID_BCM5411: return "5411";
17486 case TG3_PHY_ID_BCM5701: return "5701";
17487 case TG3_PHY_ID_BCM5703: return "5703";
17488 case TG3_PHY_ID_BCM5704: return "5704";
17489 case TG3_PHY_ID_BCM5705: return "5705";
17490 case TG3_PHY_ID_BCM5750: return "5750";
17491 case TG3_PHY_ID_BCM5752: return "5752";
17492 case TG3_PHY_ID_BCM5714: return "5714";
17493 case TG3_PHY_ID_BCM5780: return "5780";
17494 case TG3_PHY_ID_BCM5755: return "5755";
17495 case TG3_PHY_ID_BCM5787: return "5787";
17496 case TG3_PHY_ID_BCM5784: return "5784";
17497 case TG3_PHY_ID_BCM5756: return "5722/5756";
17498 case TG3_PHY_ID_BCM5906: return "5906";
17499 case TG3_PHY_ID_BCM5761: return "5761";
17500 case TG3_PHY_ID_BCM5718C: return "5718C";
17501 case TG3_PHY_ID_BCM5718S: return "5718S";
17502 case TG3_PHY_ID_BCM57765: return "57765";
17503 case TG3_PHY_ID_BCM5719C: return "5719C";
17504 case TG3_PHY_ID_BCM5720C: return "5720C";
17505 case TG3_PHY_ID_BCM5762: return "5762C";
17506 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17507 case 0: return "serdes";
17508 default: return "unknown";
17512 static char *tg3_bus_string(struct tg3 *tp, char *str)
17514 if (tg3_flag(tp, PCI_EXPRESS)) {
17515 strcpy(str, "PCI Express");
17517 } else if (tg3_flag(tp, PCIX_MODE)) {
17518 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17520 strcpy(str, "PCIX:");
17522 if ((clock_ctrl == 7) ||
17523 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17524 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17525 strcat(str, "133MHz");
17526 else if (clock_ctrl == 0)
17527 strcat(str, "33MHz");
17528 else if (clock_ctrl == 2)
17529 strcat(str, "50MHz");
17530 else if (clock_ctrl == 4)
17531 strcat(str, "66MHz");
17532 else if (clock_ctrl == 6)
17533 strcat(str, "100MHz");
17535 strcpy(str, "PCI:");
17536 if (tg3_flag(tp, PCI_HIGH_SPEED))
17537 strcat(str, "66MHz");
17539 strcat(str, "33MHz");
17541 if (tg3_flag(tp, PCI_32BIT))
17542 strcat(str, ":32-bit");
17544 strcat(str, ":64-bit");
17548 static void tg3_init_coal(struct tg3 *tp)
17550 struct ethtool_coalesce *ec = &tp->coal;
17552 memset(ec, 0, sizeof(*ec));
17553 ec->cmd = ETHTOOL_GCOALESCE;
17554 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17555 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17556 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17557 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17558 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17559 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17560 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17561 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17562 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17564 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17565 HOSTCC_MODE_CLRTICK_TXBD)) {
17566 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17567 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17568 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17569 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17572 if (tg3_flag(tp, 5705_PLUS)) {
17573 ec->rx_coalesce_usecs_irq = 0;
17574 ec->tx_coalesce_usecs_irq = 0;
17575 ec->stats_block_coalesce_usecs = 0;
17579 static int tg3_init_one(struct pci_dev *pdev,
17580 const struct pci_device_id *ent)
17582 struct net_device *dev;
17585 u32 sndmbx, rcvmbx, intmbx;
17587 u64 dma_mask, persist_dma_mask;
17588 netdev_features_t features = 0;
17590 printk_once(KERN_INFO "%s\n", version);
17592 err = pci_enable_device(pdev);
17594 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17598 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17600 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17601 goto err_out_disable_pdev;
17604 pci_set_master(pdev);
17606 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17609 goto err_out_free_res;
17612 SET_NETDEV_DEV(dev, &pdev->dev);
17614 tp = netdev_priv(dev);
17617 tp->rx_mode = TG3_DEF_RX_MODE;
17618 tp->tx_mode = TG3_DEF_TX_MODE;
17620 tp->pcierr_recovery = false;
17623 tp->msg_enable = tg3_debug;
17625 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17627 if (pdev_is_ssb_gige_core(pdev)) {
17628 tg3_flag_set(tp, IS_SSB_CORE);
17629 if (ssb_gige_must_flush_posted_writes(pdev))
17630 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17631 if (ssb_gige_one_dma_at_once(pdev))
17632 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17633 if (ssb_gige_have_roboswitch(pdev)) {
17634 tg3_flag_set(tp, USE_PHYLIB);
17635 tg3_flag_set(tp, ROBOSWITCH);
17637 if (ssb_gige_is_rgmii(pdev))
17638 tg3_flag_set(tp, RGMII_MODE);
17641 /* The word/byte swap controls here control register access byte
17642 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17645 tp->misc_host_ctrl =
17646 MISC_HOST_CTRL_MASK_PCI_INT |
17647 MISC_HOST_CTRL_WORD_SWAP |
17648 MISC_HOST_CTRL_INDIR_ACCESS |
17649 MISC_HOST_CTRL_PCISTATE_RW;
17651 /* The NONFRM (non-frame) byte/word swap controls take effect
17652 * on descriptor entries, anything which isn't packet data.
17654 * The StrongARM chips on the board (one for tx, one for rx)
17655 * are running in big-endian mode.
17657 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17658 GRC_MODE_WSWAP_NONFRM_DATA);
17659 #ifdef __BIG_ENDIAN
17660 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17662 spin_lock_init(&tp->lock);
17663 spin_lock_init(&tp->indirect_lock);
17664 INIT_WORK(&tp->reset_task, tg3_reset_task);
17666 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17668 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17670 goto err_out_free_dev;
17673 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17674 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17675 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17676 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17677 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17678 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17679 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17680 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17681 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17682 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17683 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17684 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17685 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17686 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17687 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17688 tg3_flag_set(tp, ENABLE_APE);
17689 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17690 if (!tp->aperegs) {
17691 dev_err(&pdev->dev,
17692 "Cannot map APE registers, aborting\n");
17694 goto err_out_iounmap;
17698 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17699 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17701 dev->ethtool_ops = &tg3_ethtool_ops;
17702 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17703 dev->netdev_ops = &tg3_netdev_ops;
17704 dev->irq = pdev->irq;
17706 err = tg3_get_invariants(tp, ent);
17708 dev_err(&pdev->dev,
17709 "Problem fetching invariants of chip, aborting\n");
17710 goto err_out_apeunmap;
17713 /* The EPB bridge inside 5714, 5715, and 5780 and any
17714 * device behind the EPB cannot support DMA addresses > 40-bit.
17715 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17716 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17717 * do DMA address check in tg3_start_xmit().
17719 if (tg3_flag(tp, IS_5788))
17720 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17721 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17722 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17723 #ifdef CONFIG_HIGHMEM
17724 dma_mask = DMA_BIT_MASK(64);
17727 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17729 /* Configure DMA attributes. */
17730 if (dma_mask > DMA_BIT_MASK(32)) {
17731 err = pci_set_dma_mask(pdev, dma_mask);
17733 features |= NETIF_F_HIGHDMA;
17734 err = pci_set_consistent_dma_mask(pdev,
17737 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17738 "DMA for consistent allocations\n");
17739 goto err_out_apeunmap;
17743 if (err || dma_mask == DMA_BIT_MASK(32)) {
17744 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17746 dev_err(&pdev->dev,
17747 "No usable DMA configuration, aborting\n");
17748 goto err_out_apeunmap;
17752 tg3_init_bufmgr_config(tp);
17754 /* 5700 B0 chips do not support checksumming correctly due
17755 * to hardware bugs.
17757 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17758 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17760 if (tg3_flag(tp, 5755_PLUS))
17761 features |= NETIF_F_IPV6_CSUM;
17764 /* TSO is on by default on chips that support hardware TSO.
17765 * Firmware TSO on older chips gives lower performance, so it
17766 * is off by default, but can be enabled using ethtool.
17768 if ((tg3_flag(tp, HW_TSO_1) ||
17769 tg3_flag(tp, HW_TSO_2) ||
17770 tg3_flag(tp, HW_TSO_3)) &&
17771 (features & NETIF_F_IP_CSUM))
17772 features |= NETIF_F_TSO;
17773 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17774 if (features & NETIF_F_IPV6_CSUM)
17775 features |= NETIF_F_TSO6;
17776 if (tg3_flag(tp, HW_TSO_3) ||
17777 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17778 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17779 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17780 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17781 tg3_asic_rev(tp) == ASIC_REV_57780)
17782 features |= NETIF_F_TSO_ECN;
17785 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17786 NETIF_F_HW_VLAN_CTAG_RX;
17787 dev->vlan_features |= features;
17790 * Add loopback capability only for a subset of devices that support
17791 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17792 * loopback for the remaining devices.
17794 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17795 !tg3_flag(tp, CPMU_PRESENT))
17796 /* Add the loopback capability */
17797 features |= NETIF_F_LOOPBACK;
17799 dev->hw_features |= features;
17800 dev->priv_flags |= IFF_UNICAST_FLT;
17802 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17803 !tg3_flag(tp, TSO_CAPABLE) &&
17804 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17805 tg3_flag_set(tp, MAX_RXPEND_64);
17806 tp->rx_pending = 63;
17809 err = tg3_get_device_address(tp);
17811 dev_err(&pdev->dev,
17812 "Could not obtain valid ethernet address, aborting\n");
17813 goto err_out_apeunmap;
17816 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17817 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17818 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17819 for (i = 0; i < tp->irq_max; i++) {
17820 struct tg3_napi *tnapi = &tp->napi[i];
17823 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17825 tnapi->int_mbox = intmbx;
17831 tnapi->consmbox = rcvmbx;
17832 tnapi->prodmbox = sndmbx;
17835 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17837 tnapi->coal_now = HOSTCC_MODE_NOW;
17839 if (!tg3_flag(tp, SUPPORT_MSIX))
17843 * If we support MSIX, we'll be using RSS. If we're using
17844 * RSS, the first vector only handles link interrupts and the
17845 * remaining vectors handle rx and tx interrupts. Reuse the
17846 * mailbox values for the next iteration. The values we setup
17847 * above are still useful for the single vectored mode.
17861 * Reset chip in case UNDI or EFI driver did not shutdown
17862 * DMA self test will enable WDMAC and we'll see (spurious)
17863 * pending DMA on the PCI bus at that point.
17865 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17866 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17867 tg3_full_lock(tp, 0);
17868 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17869 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17870 tg3_full_unlock(tp);
17873 err = tg3_test_dma(tp);
17875 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17876 goto err_out_apeunmap;
17881 pci_set_drvdata(pdev, dev);
17883 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17884 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17885 tg3_asic_rev(tp) == ASIC_REV_5762)
17886 tg3_flag_set(tp, PTP_CAPABLE);
17888 tg3_timer_init(tp);
17890 tg3_carrier_off(tp);
17892 err = register_netdev(dev);
17894 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17895 goto err_out_apeunmap;
17898 if (tg3_flag(tp, PTP_CAPABLE)) {
17900 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17902 if (IS_ERR(tp->ptp_clock))
17903 tp->ptp_clock = NULL;
17906 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17907 tp->board_part_number,
17908 tg3_chip_rev_id(tp),
17909 tg3_bus_string(tp, str),
17912 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17915 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17916 ethtype = "10/100Base-TX";
17917 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17918 ethtype = "1000Base-SX";
17920 ethtype = "10/100/1000Base-T";
17922 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17923 "(WireSpeed[%d], EEE[%d])\n",
17924 tg3_phy_string(tp), ethtype,
17925 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17926 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17929 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17930 (dev->features & NETIF_F_RXCSUM) != 0,
17931 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17932 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17933 tg3_flag(tp, ENABLE_ASF) != 0,
17934 tg3_flag(tp, TSO_CAPABLE) != 0);
17935 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17937 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17938 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17940 pci_save_state(pdev);
17946 iounmap(tp->aperegs);
17947 tp->aperegs = NULL;
17960 pci_release_regions(pdev);
17962 err_out_disable_pdev:
17963 if (pci_is_enabled(pdev))
17964 pci_disable_device(pdev);
17968 static void tg3_remove_one(struct pci_dev *pdev)
17970 struct net_device *dev = pci_get_drvdata(pdev);
17973 struct tg3 *tp = netdev_priv(dev);
17977 release_firmware(tp->fw);
17979 tg3_reset_task_cancel(tp);
17981 if (tg3_flag(tp, USE_PHYLIB)) {
17986 unregister_netdev(dev);
17988 iounmap(tp->aperegs);
17989 tp->aperegs = NULL;
17996 pci_release_regions(pdev);
17997 pci_disable_device(pdev);
18001 #ifdef CONFIG_PM_SLEEP
18002 static int tg3_suspend(struct device *device)
18004 struct pci_dev *pdev = to_pci_dev(device);
18005 struct net_device *dev = pci_get_drvdata(pdev);
18006 struct tg3 *tp = netdev_priv(dev);
18011 if (!netif_running(dev))
18014 tg3_reset_task_cancel(tp);
18016 tg3_netif_stop(tp);
18018 tg3_timer_stop(tp);
18020 tg3_full_lock(tp, 1);
18021 tg3_disable_ints(tp);
18022 tg3_full_unlock(tp);
18024 netif_device_detach(dev);
18026 tg3_full_lock(tp, 0);
18027 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18028 tg3_flag_clear(tp, INIT_COMPLETE);
18029 tg3_full_unlock(tp);
18031 err = tg3_power_down_prepare(tp);
18035 tg3_full_lock(tp, 0);
18037 tg3_flag_set(tp, INIT_COMPLETE);
18038 err2 = tg3_restart_hw(tp, true);
18042 tg3_timer_start(tp);
18044 netif_device_attach(dev);
18045 tg3_netif_start(tp);
18048 tg3_full_unlock(tp);
18059 static int tg3_resume(struct device *device)
18061 struct pci_dev *pdev = to_pci_dev(device);
18062 struct net_device *dev = pci_get_drvdata(pdev);
18063 struct tg3 *tp = netdev_priv(dev);
18068 if (!netif_running(dev))
18071 netif_device_attach(dev);
18073 tg3_full_lock(tp, 0);
18075 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18077 tg3_flag_set(tp, INIT_COMPLETE);
18078 err = tg3_restart_hw(tp,
18079 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18083 tg3_timer_start(tp);
18085 tg3_netif_start(tp);
18088 tg3_full_unlock(tp);
18097 #endif /* CONFIG_PM_SLEEP */
18099 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18101 static void tg3_shutdown(struct pci_dev *pdev)
18103 struct net_device *dev = pci_get_drvdata(pdev);
18104 struct tg3 *tp = netdev_priv(dev);
18107 netif_device_detach(dev);
18109 if (netif_running(dev))
18112 if (system_state == SYSTEM_POWER_OFF)
18113 tg3_power_down(tp);
18119 * tg3_io_error_detected - called when PCI error is detected
18120 * @pdev: Pointer to PCI device
18121 * @state: The current pci connection state
18123 * This function is called after a PCI bus error affecting
18124 * this device has been detected.
18126 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18127 pci_channel_state_t state)
18129 struct net_device *netdev = pci_get_drvdata(pdev);
18130 struct tg3 *tp = netdev_priv(netdev);
18131 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18133 netdev_info(netdev, "PCI I/O error detected\n");
18137 /* We needn't recover from permanent error */
18138 if (state == pci_channel_io_frozen)
18139 tp->pcierr_recovery = true;
18141 /* We probably don't have netdev yet */
18142 if (!netdev || !netif_running(netdev))
18147 tg3_netif_stop(tp);
18149 tg3_timer_stop(tp);
18151 /* Want to make sure that the reset task doesn't run */
18152 tg3_reset_task_cancel(tp);
18154 netif_device_detach(netdev);
18156 /* Clean up software state, even if MMIO is blocked */
18157 tg3_full_lock(tp, 0);
18158 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18159 tg3_full_unlock(tp);
18162 if (state == pci_channel_io_perm_failure) {
18164 tg3_napi_enable(tp);
18167 err = PCI_ERS_RESULT_DISCONNECT;
18169 pci_disable_device(pdev);
18178 * tg3_io_slot_reset - called after the pci bus has been reset.
18179 * @pdev: Pointer to PCI device
18181 * Restart the card from scratch, as if from a cold-boot.
18182 * At this point, the card has exprienced a hard reset,
18183 * followed by fixups by BIOS, and has its config space
18184 * set up identically to what it was at cold boot.
18186 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18188 struct net_device *netdev = pci_get_drvdata(pdev);
18189 struct tg3 *tp = netdev_priv(netdev);
18190 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18195 if (pci_enable_device(pdev)) {
18196 dev_err(&pdev->dev,
18197 "Cannot re-enable PCI device after reset.\n");
18201 pci_set_master(pdev);
18202 pci_restore_state(pdev);
18203 pci_save_state(pdev);
18205 if (!netdev || !netif_running(netdev)) {
18206 rc = PCI_ERS_RESULT_RECOVERED;
18210 err = tg3_power_up(tp);
18214 rc = PCI_ERS_RESULT_RECOVERED;
18217 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18218 tg3_napi_enable(tp);
18227 * tg3_io_resume - called when traffic can start flowing again.
18228 * @pdev: Pointer to PCI device
18230 * This callback is called when the error recovery driver tells
18231 * us that its OK to resume normal operation.
18233 static void tg3_io_resume(struct pci_dev *pdev)
18235 struct net_device *netdev = pci_get_drvdata(pdev);
18236 struct tg3 *tp = netdev_priv(netdev);
18241 if (!netif_running(netdev))
18244 tg3_full_lock(tp, 0);
18245 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18246 tg3_flag_set(tp, INIT_COMPLETE);
18247 err = tg3_restart_hw(tp, true);
18249 tg3_full_unlock(tp);
18250 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18254 netif_device_attach(netdev);
18256 tg3_timer_start(tp);
18258 tg3_netif_start(tp);
18260 tg3_full_unlock(tp);
18265 tp->pcierr_recovery = false;
18269 static const struct pci_error_handlers tg3_err_handler = {
18270 .error_detected = tg3_io_error_detected,
18271 .slot_reset = tg3_io_slot_reset,
18272 .resume = tg3_io_resume
18275 static struct pci_driver tg3_driver = {
18276 .name = DRV_MODULE_NAME,
18277 .id_table = tg3_pci_tbl,
18278 .probe = tg3_init_one,
18279 .remove = tg3_remove_one,
18280 .err_handler = &tg3_err_handler,
18281 .driver.pm = &tg3_pm_ops,
18282 .shutdown = tg3_shutdown,
18285 module_pci_driver(tg3_driver);