2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2012 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/hwmon.h>
48 #include <linux/hwmon-sysfs.h>
50 #include <net/checksum.h>
54 #include <asm/byteorder.h>
55 #include <linux/uaccess.h>
58 #include <asm/idprom.h>
67 /* Functions & macros to verify TG3_FLAGS types */
69 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
71 return test_bit(flag, bits);
74 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
81 clear_bit(flag, bits);
84 #define tg3_flag(tp, flag) \
85 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_set(tp, flag) \
87 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
88 #define tg3_flag_clear(tp, flag) \
89 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
91 #define DRV_MODULE_NAME "tg3"
93 #define TG3_MIN_NUM 125
94 #define DRV_MODULE_VERSION \
95 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
96 #define DRV_MODULE_RELDATE "September 26, 2012"
98 #define RESET_KIND_SHUTDOWN 0
99 #define RESET_KIND_INIT 1
100 #define RESET_KIND_SUSPEND 2
102 #define TG3_DEF_RX_MODE 0
103 #define TG3_DEF_TX_MODE 0
104 #define TG3_DEF_MSG_ENABLE \
114 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
116 /* length of time before we decide the hardware is borked,
117 * and dev->tx_timeout() should be called to fix the problem
120 #define TG3_TX_TIMEOUT (5 * HZ)
122 /* hardware minimum and maximum for a single frame's data payload */
123 #define TG3_MIN_MTU 60
124 #define TG3_MAX_MTU(tp) \
125 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
127 /* These numbers seem to be hard coded in the NIC firmware somehow.
128 * You can't change the ring sizes, but you can change where you place
129 * them in the NIC onboard memory.
131 #define TG3_RX_STD_RING_SIZE(tp) \
132 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
133 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
134 #define TG3_DEF_RX_RING_PENDING 200
135 #define TG3_RX_JMB_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
138 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
140 /* Do not place this n-ring entries value into the tp struct itself,
141 * we really want to expose these constants to GCC so that modulo et
142 * al. operations are done with shifts and masks instead of with
143 * hw multiply/modulo instructions. Another solution would be to
144 * replace things like '% foo' with '& (foo - 1)'.
147 #define TG3_TX_RING_SIZE 512
148 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
150 #define TG3_RX_STD_RING_BYTES(tp) \
151 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
152 #define TG3_RX_JMB_RING_BYTES(tp) \
153 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
154 #define TG3_RX_RCB_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
156 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
158 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
160 #define TG3_DMA_BYTE_ENAB 64
162 #define TG3_RX_STD_DMA_SZ 1536
163 #define TG3_RX_JMB_DMA_SZ 9046
165 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
167 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
168 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
170 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
171 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
173 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
174 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
176 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
177 * that are at least dword aligned when used in PCIX mode. The driver
178 * works around this bug by double copying the packet. This workaround
179 * is built into the normal double copy length check for efficiency.
181 * However, the double copy is only necessary on those architectures
182 * where unaligned memory accesses are inefficient. For those architectures
183 * where unaligned memory accesses incur little penalty, we can reintegrate
184 * the 5701 in the normal rx path. Doing so saves a device structure
185 * dereference by hardcoding the double copy threshold in place.
187 #define TG3_RX_COPY_THRESHOLD 256
188 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
189 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
191 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
194 #if (NET_IP_ALIGN != 0)
195 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
197 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
200 /* minimum number of free TX descriptors required to wake up TX process */
201 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
202 #define TG3_TX_BD_DMA_MAX_2K 2048
203 #define TG3_TX_BD_DMA_MAX_4K 4096
205 #define TG3_RAW_IP_ALIGN 2
207 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
208 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
210 #define FIRMWARE_TG3 "tigon/tg3.bin"
211 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
212 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
214 static char version[] __devinitdata =
215 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
217 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
218 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
219 MODULE_LICENSE("GPL");
220 MODULE_VERSION(DRV_MODULE_VERSION);
221 MODULE_FIRMWARE(FIRMWARE_TG3);
222 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
223 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
225 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
226 module_param(tg3_debug, int, 0);
227 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
229 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
304 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
305 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
306 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
307 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
308 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
309 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
310 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
311 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
315 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
317 static const struct {
318 const char string[ETH_GSTRING_LEN];
319 } ethtool_stats_keys[] = {
322 { "rx_ucast_packets" },
323 { "rx_mcast_packets" },
324 { "rx_bcast_packets" },
326 { "rx_align_errors" },
327 { "rx_xon_pause_rcvd" },
328 { "rx_xoff_pause_rcvd" },
329 { "rx_mac_ctrl_rcvd" },
330 { "rx_xoff_entered" },
331 { "rx_frame_too_long_errors" },
333 { "rx_undersize_packets" },
334 { "rx_in_length_errors" },
335 { "rx_out_length_errors" },
336 { "rx_64_or_less_octet_packets" },
337 { "rx_65_to_127_octet_packets" },
338 { "rx_128_to_255_octet_packets" },
339 { "rx_256_to_511_octet_packets" },
340 { "rx_512_to_1023_octet_packets" },
341 { "rx_1024_to_1522_octet_packets" },
342 { "rx_1523_to_2047_octet_packets" },
343 { "rx_2048_to_4095_octet_packets" },
344 { "rx_4096_to_8191_octet_packets" },
345 { "rx_8192_to_9022_octet_packets" },
352 { "tx_flow_control" },
354 { "tx_single_collisions" },
355 { "tx_mult_collisions" },
357 { "tx_excessive_collisions" },
358 { "tx_late_collisions" },
359 { "tx_collide_2times" },
360 { "tx_collide_3times" },
361 { "tx_collide_4times" },
362 { "tx_collide_5times" },
363 { "tx_collide_6times" },
364 { "tx_collide_7times" },
365 { "tx_collide_8times" },
366 { "tx_collide_9times" },
367 { "tx_collide_10times" },
368 { "tx_collide_11times" },
369 { "tx_collide_12times" },
370 { "tx_collide_13times" },
371 { "tx_collide_14times" },
372 { "tx_collide_15times" },
373 { "tx_ucast_packets" },
374 { "tx_mcast_packets" },
375 { "tx_bcast_packets" },
376 { "tx_carrier_sense_errors" },
380 { "dma_writeq_full" },
381 { "dma_write_prioq_full" },
385 { "rx_threshold_hit" },
387 { "dma_readq_full" },
388 { "dma_read_prioq_full" },
389 { "tx_comp_queue_full" },
391 { "ring_set_send_prod_index" },
392 { "ring_status_update" },
394 { "nic_avoided_irqs" },
395 { "nic_tx_threshold_hit" },
397 { "mbuf_lwm_thresh_hit" },
400 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
403 static const struct {
404 const char string[ETH_GSTRING_LEN];
405 } ethtool_test_keys[] = {
406 { "nvram test (online) " },
407 { "link test (online) " },
408 { "register test (offline)" },
409 { "memory test (offline)" },
410 { "mac loopback test (offline)" },
411 { "phy loopback test (offline)" },
412 { "ext loopback test (offline)" },
413 { "interrupt test (offline)" },
416 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
419 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
421 writel(val, tp->regs + off);
424 static u32 tg3_read32(struct tg3 *tp, u32 off)
426 return readl(tp->regs + off);
429 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
431 writel(val, tp->aperegs + off);
434 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
436 return readl(tp->aperegs + off);
439 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
443 spin_lock_irqsave(&tp->indirect_lock, flags);
444 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
445 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
446 spin_unlock_irqrestore(&tp->indirect_lock, flags);
449 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
451 writel(val, tp->regs + off);
452 readl(tp->regs + off);
455 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
460 spin_lock_irqsave(&tp->indirect_lock, flags);
461 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
462 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
463 spin_unlock_irqrestore(&tp->indirect_lock, flags);
467 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
471 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
472 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
473 TG3_64BIT_REG_LOW, val);
476 if (off == TG3_RX_STD_PROD_IDX_REG) {
477 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
478 TG3_64BIT_REG_LOW, val);
482 spin_lock_irqsave(&tp->indirect_lock, flags);
483 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
484 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
485 spin_unlock_irqrestore(&tp->indirect_lock, flags);
487 /* In indirect mode when disabling interrupts, we also need
488 * to clear the interrupt bit in the GRC local ctrl register.
490 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
492 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
493 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
497 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
502 spin_lock_irqsave(&tp->indirect_lock, flags);
503 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
504 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
505 spin_unlock_irqrestore(&tp->indirect_lock, flags);
509 /* usec_wait specifies the wait time in usec when writing to certain registers
510 * where it is unsafe to read back the register without some delay.
511 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
512 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
514 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
516 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
517 /* Non-posted methods */
518 tp->write32(tp, off, val);
521 tg3_write32(tp, off, val);
526 /* Wait again after the read for the posted method to guarantee that
527 * the wait time is met.
533 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
535 tp->write32_mbox(tp, off, val);
536 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
537 tp->read32_mbox(tp, off);
540 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
542 void __iomem *mbox = tp->regs + off;
544 if (tg3_flag(tp, TXD_MBOX_HWBUG))
546 if (tg3_flag(tp, MBOX_WRITE_REORDER))
550 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
552 return readl(tp->regs + off + GRCMBOX_BASE);
555 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
557 writel(val, tp->regs + off + GRCMBOX_BASE);
560 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
561 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
562 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
563 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
564 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
566 #define tw32(reg, val) tp->write32(tp, reg, val)
567 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
568 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
569 #define tr32(reg) tp->read32(tp, reg)
571 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
575 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
576 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
579 spin_lock_irqsave(&tp->indirect_lock, flags);
580 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
581 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
582 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
584 /* Always leave this as zero. */
585 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
587 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
588 tw32_f(TG3PCI_MEM_WIN_DATA, val);
590 /* Always leave this as zero. */
591 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
593 spin_unlock_irqrestore(&tp->indirect_lock, flags);
596 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
600 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
601 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
606 spin_lock_irqsave(&tp->indirect_lock, flags);
607 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
608 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
609 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
611 /* Always leave this as zero. */
612 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
614 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
615 *val = tr32(TG3PCI_MEM_WIN_DATA);
617 /* Always leave this as zero. */
618 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
620 spin_unlock_irqrestore(&tp->indirect_lock, flags);
623 static void tg3_ape_lock_init(struct tg3 *tp)
628 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
629 regbase = TG3_APE_LOCK_GRANT;
631 regbase = TG3_APE_PER_LOCK_GRANT;
633 /* Make sure the driver hasn't any stale locks. */
634 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
636 case TG3_APE_LOCK_PHY0:
637 case TG3_APE_LOCK_PHY1:
638 case TG3_APE_LOCK_PHY2:
639 case TG3_APE_LOCK_PHY3:
640 bit = APE_LOCK_GRANT_DRIVER;
644 bit = APE_LOCK_GRANT_DRIVER;
646 bit = 1 << tp->pci_fn;
648 tg3_ape_write32(tp, regbase + 4 * i, bit);
653 static int tg3_ape_lock(struct tg3 *tp, int locknum)
657 u32 status, req, gnt, bit;
659 if (!tg3_flag(tp, ENABLE_APE))
663 case TG3_APE_LOCK_GPIO:
664 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
666 case TG3_APE_LOCK_GRC:
667 case TG3_APE_LOCK_MEM:
669 bit = APE_LOCK_REQ_DRIVER;
671 bit = 1 << tp->pci_fn;
673 case TG3_APE_LOCK_PHY0:
674 case TG3_APE_LOCK_PHY1:
675 case TG3_APE_LOCK_PHY2:
676 case TG3_APE_LOCK_PHY3:
677 bit = APE_LOCK_REQ_DRIVER;
683 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
684 req = TG3_APE_LOCK_REQ;
685 gnt = TG3_APE_LOCK_GRANT;
687 req = TG3_APE_PER_LOCK_REQ;
688 gnt = TG3_APE_PER_LOCK_GRANT;
693 tg3_ape_write32(tp, req + off, bit);
695 /* Wait for up to 1 millisecond to acquire lock. */
696 for (i = 0; i < 100; i++) {
697 status = tg3_ape_read32(tp, gnt + off);
704 /* Revoke the lock request. */
705 tg3_ape_write32(tp, gnt + off, bit);
712 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
716 if (!tg3_flag(tp, ENABLE_APE))
720 case TG3_APE_LOCK_GPIO:
721 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
723 case TG3_APE_LOCK_GRC:
724 case TG3_APE_LOCK_MEM:
726 bit = APE_LOCK_GRANT_DRIVER;
728 bit = 1 << tp->pci_fn;
730 case TG3_APE_LOCK_PHY0:
731 case TG3_APE_LOCK_PHY1:
732 case TG3_APE_LOCK_PHY2:
733 case TG3_APE_LOCK_PHY3:
734 bit = APE_LOCK_GRANT_DRIVER;
740 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
741 gnt = TG3_APE_LOCK_GRANT;
743 gnt = TG3_APE_PER_LOCK_GRANT;
745 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
748 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
753 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
756 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
757 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
760 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
763 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
766 return timeout_us ? 0 : -EBUSY;
769 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
773 for (i = 0; i < timeout_us / 10; i++) {
774 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
776 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
782 return i == timeout_us / 10;
785 int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off, u32 len)
788 u32 i, bufoff, msgoff, maxlen, apedata;
790 if (!tg3_flag(tp, APE_HAS_NCSI))
793 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
794 if (apedata != APE_SEG_SIG_MAGIC)
797 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
798 if (!(apedata & APE_FW_STATUS_READY))
801 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
803 msgoff = bufoff + 2 * sizeof(u32);
804 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
809 /* Cap xfer sizes to scratchpad limits. */
810 length = (len > maxlen) ? maxlen : len;
813 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
814 if (!(apedata & APE_FW_STATUS_READY))
817 /* Wait for up to 1 msec for APE to service previous event. */
818 err = tg3_ape_event_lock(tp, 1000);
822 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
823 APE_EVENT_STATUS_SCRTCHPD_READ |
824 APE_EVENT_STATUS_EVENT_PENDING;
825 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
827 tg3_ape_write32(tp, bufoff, base_off);
828 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
830 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
831 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
835 if (tg3_ape_wait_for_event(tp, 30000))
838 for (i = 0; length; i += 4, length -= 4) {
839 u32 val = tg3_ape_read32(tp, msgoff + i);
840 memcpy(data, &val, sizeof(u32));
848 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
853 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
854 if (apedata != APE_SEG_SIG_MAGIC)
857 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
858 if (!(apedata & APE_FW_STATUS_READY))
861 /* Wait for up to 1 millisecond for APE to service previous event. */
862 err = tg3_ape_event_lock(tp, 1000);
866 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
867 event | APE_EVENT_STATUS_EVENT_PENDING);
869 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
870 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
875 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
880 if (!tg3_flag(tp, ENABLE_APE))
884 case RESET_KIND_INIT:
885 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
886 APE_HOST_SEG_SIG_MAGIC);
887 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
888 APE_HOST_SEG_LEN_MAGIC);
889 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
890 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
891 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
892 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
893 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
894 APE_HOST_BEHAV_NO_PHYLOCK);
895 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
896 TG3_APE_HOST_DRVR_STATE_START);
898 event = APE_EVENT_STATUS_STATE_START;
900 case RESET_KIND_SHUTDOWN:
901 /* With the interface we are currently using,
902 * APE does not track driver state. Wiping
903 * out the HOST SEGMENT SIGNATURE forces
904 * the APE to assume OS absent status.
906 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
908 if (device_may_wakeup(&tp->pdev->dev) &&
909 tg3_flag(tp, WOL_ENABLE)) {
910 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
911 TG3_APE_HOST_WOL_SPEED_AUTO);
912 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
914 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
916 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
918 event = APE_EVENT_STATUS_STATE_UNLOAD;
920 case RESET_KIND_SUSPEND:
921 event = APE_EVENT_STATUS_STATE_SUSPEND;
927 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
929 tg3_ape_send_event(tp, event);
932 static void tg3_disable_ints(struct tg3 *tp)
936 tw32(TG3PCI_MISC_HOST_CTRL,
937 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
938 for (i = 0; i < tp->irq_max; i++)
939 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
942 static void tg3_enable_ints(struct tg3 *tp)
949 tw32(TG3PCI_MISC_HOST_CTRL,
950 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
952 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
953 for (i = 0; i < tp->irq_cnt; i++) {
954 struct tg3_napi *tnapi = &tp->napi[i];
956 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
957 if (tg3_flag(tp, 1SHOT_MSI))
958 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
960 tp->coal_now |= tnapi->coal_now;
963 /* Force an initial interrupt */
964 if (!tg3_flag(tp, TAGGED_STATUS) &&
965 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
966 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
968 tw32(HOSTCC_MODE, tp->coal_now);
970 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
973 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
975 struct tg3 *tp = tnapi->tp;
976 struct tg3_hw_status *sblk = tnapi->hw_status;
977 unsigned int work_exists = 0;
979 /* check for phy events */
980 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
981 if (sblk->status & SD_STATUS_LINK_CHG)
985 /* check for TX work to do */
986 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
989 /* check for RX work to do */
990 if (tnapi->rx_rcb_prod_idx &&
991 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
998 * similar to tg3_enable_ints, but it accurately determines whether there
999 * is new work pending and can return without flushing the PIO write
1000 * which reenables interrupts
1002 static void tg3_int_reenable(struct tg3_napi *tnapi)
1004 struct tg3 *tp = tnapi->tp;
1006 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1009 /* When doing tagged status, this work check is unnecessary.
1010 * The last_tag we write above tells the chip which piece of
1011 * work we've completed.
1013 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1014 tw32(HOSTCC_MODE, tp->coalesce_mode |
1015 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1018 static void tg3_switch_clocks(struct tg3 *tp)
1021 u32 orig_clock_ctrl;
1023 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1026 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1028 orig_clock_ctrl = clock_ctrl;
1029 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1030 CLOCK_CTRL_CLKRUN_OENABLE |
1032 tp->pci_clock_ctrl = clock_ctrl;
1034 if (tg3_flag(tp, 5705_PLUS)) {
1035 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1036 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1037 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1039 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1040 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1042 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1044 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1045 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1048 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1051 #define PHY_BUSY_LOOPS 5000
1053 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1059 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1061 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1065 tg3_ape_lock(tp, tp->phy_ape_lock);
1069 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1070 MI_COM_PHY_ADDR_MASK);
1071 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1072 MI_COM_REG_ADDR_MASK);
1073 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1075 tw32_f(MAC_MI_COM, frame_val);
1077 loops = PHY_BUSY_LOOPS;
1078 while (loops != 0) {
1080 frame_val = tr32(MAC_MI_COM);
1082 if ((frame_val & MI_COM_BUSY) == 0) {
1084 frame_val = tr32(MAC_MI_COM);
1092 *val = frame_val & MI_COM_DATA_MASK;
1096 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1097 tw32_f(MAC_MI_MODE, tp->mi_mode);
1101 tg3_ape_unlock(tp, tp->phy_ape_lock);
1106 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1112 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1113 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1116 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1118 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1122 tg3_ape_lock(tp, tp->phy_ape_lock);
1124 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1125 MI_COM_PHY_ADDR_MASK);
1126 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1127 MI_COM_REG_ADDR_MASK);
1128 frame_val |= (val & MI_COM_DATA_MASK);
1129 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1131 tw32_f(MAC_MI_COM, frame_val);
1133 loops = PHY_BUSY_LOOPS;
1134 while (loops != 0) {
1136 frame_val = tr32(MAC_MI_COM);
1137 if ((frame_val & MI_COM_BUSY) == 0) {
1139 frame_val = tr32(MAC_MI_COM);
1149 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1150 tw32_f(MAC_MI_MODE, tp->mi_mode);
1154 tg3_ape_unlock(tp, tp->phy_ape_lock);
1159 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1163 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1167 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1171 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1172 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1176 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1182 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1186 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1190 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1194 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1195 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1199 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1205 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1209 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1211 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1216 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1220 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1222 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1227 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1231 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1232 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1233 MII_TG3_AUXCTL_SHDWSEL_MISC);
1235 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1240 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1242 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1243 set |= MII_TG3_AUXCTL_MISC_WREN;
1245 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1248 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1249 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1250 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1251 MII_TG3_AUXCTL_ACTL_TX_6DB)
1253 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1254 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1255 MII_TG3_AUXCTL_ACTL_TX_6DB);
1257 static int tg3_bmcr_reset(struct tg3 *tp)
1262 /* OK, reset it, and poll the BMCR_RESET bit until it
1263 * clears or we time out.
1265 phy_control = BMCR_RESET;
1266 err = tg3_writephy(tp, MII_BMCR, phy_control);
1272 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1276 if ((phy_control & BMCR_RESET) == 0) {
1288 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1290 struct tg3 *tp = bp->priv;
1293 spin_lock_bh(&tp->lock);
1295 if (tg3_readphy(tp, reg, &val))
1298 spin_unlock_bh(&tp->lock);
1303 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1305 struct tg3 *tp = bp->priv;
1308 spin_lock_bh(&tp->lock);
1310 if (tg3_writephy(tp, reg, val))
1313 spin_unlock_bh(&tp->lock);
1318 static int tg3_mdio_reset(struct mii_bus *bp)
1323 static void tg3_mdio_config_5785(struct tg3 *tp)
1326 struct phy_device *phydev;
1328 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1329 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1330 case PHY_ID_BCM50610:
1331 case PHY_ID_BCM50610M:
1332 val = MAC_PHYCFG2_50610_LED_MODES;
1334 case PHY_ID_BCMAC131:
1335 val = MAC_PHYCFG2_AC131_LED_MODES;
1337 case PHY_ID_RTL8211C:
1338 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1340 case PHY_ID_RTL8201E:
1341 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1347 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1348 tw32(MAC_PHYCFG2, val);
1350 val = tr32(MAC_PHYCFG1);
1351 val &= ~(MAC_PHYCFG1_RGMII_INT |
1352 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1353 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1354 tw32(MAC_PHYCFG1, val);
1359 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1360 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1361 MAC_PHYCFG2_FMODE_MASK_MASK |
1362 MAC_PHYCFG2_GMODE_MASK_MASK |
1363 MAC_PHYCFG2_ACT_MASK_MASK |
1364 MAC_PHYCFG2_QUAL_MASK_MASK |
1365 MAC_PHYCFG2_INBAND_ENABLE;
1367 tw32(MAC_PHYCFG2, val);
1369 val = tr32(MAC_PHYCFG1);
1370 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1371 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1372 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1373 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1374 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1375 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1376 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1378 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1379 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1380 tw32(MAC_PHYCFG1, val);
1382 val = tr32(MAC_EXT_RGMII_MODE);
1383 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1384 MAC_RGMII_MODE_RX_QUALITY |
1385 MAC_RGMII_MODE_RX_ACTIVITY |
1386 MAC_RGMII_MODE_RX_ENG_DET |
1387 MAC_RGMII_MODE_TX_ENABLE |
1388 MAC_RGMII_MODE_TX_LOWPWR |
1389 MAC_RGMII_MODE_TX_RESET);
1390 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1391 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1392 val |= MAC_RGMII_MODE_RX_INT_B |
1393 MAC_RGMII_MODE_RX_QUALITY |
1394 MAC_RGMII_MODE_RX_ACTIVITY |
1395 MAC_RGMII_MODE_RX_ENG_DET;
1396 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1397 val |= MAC_RGMII_MODE_TX_ENABLE |
1398 MAC_RGMII_MODE_TX_LOWPWR |
1399 MAC_RGMII_MODE_TX_RESET;
1401 tw32(MAC_EXT_RGMII_MODE, val);
1404 static void tg3_mdio_start(struct tg3 *tp)
1406 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1407 tw32_f(MAC_MI_MODE, tp->mi_mode);
1410 if (tg3_flag(tp, MDIOBUS_INITED) &&
1411 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1412 tg3_mdio_config_5785(tp);
1415 static int tg3_mdio_init(struct tg3 *tp)
1419 struct phy_device *phydev;
1421 if (tg3_flag(tp, 5717_PLUS)) {
1424 tp->phy_addr = tp->pci_fn + 1;
1426 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1427 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1429 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1430 TG3_CPMU_PHY_STRAP_IS_SERDES;
1434 tp->phy_addr = TG3_PHY_MII_ADDR;
1438 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1441 tp->mdio_bus = mdiobus_alloc();
1442 if (tp->mdio_bus == NULL)
1445 tp->mdio_bus->name = "tg3 mdio bus";
1446 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1447 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1448 tp->mdio_bus->priv = tp;
1449 tp->mdio_bus->parent = &tp->pdev->dev;
1450 tp->mdio_bus->read = &tg3_mdio_read;
1451 tp->mdio_bus->write = &tg3_mdio_write;
1452 tp->mdio_bus->reset = &tg3_mdio_reset;
1453 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1454 tp->mdio_bus->irq = &tp->mdio_irq[0];
1456 for (i = 0; i < PHY_MAX_ADDR; i++)
1457 tp->mdio_bus->irq[i] = PHY_POLL;
1459 /* The bus registration will look for all the PHYs on the mdio bus.
1460 * Unfortunately, it does not ensure the PHY is powered up before
1461 * accessing the PHY ID registers. A chip reset is the
1462 * quickest way to bring the device back to an operational state..
1464 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1467 i = mdiobus_register(tp->mdio_bus);
1469 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1470 mdiobus_free(tp->mdio_bus);
1474 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1476 if (!phydev || !phydev->drv) {
1477 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1478 mdiobus_unregister(tp->mdio_bus);
1479 mdiobus_free(tp->mdio_bus);
1483 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1484 case PHY_ID_BCM57780:
1485 phydev->interface = PHY_INTERFACE_MODE_GMII;
1486 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1488 case PHY_ID_BCM50610:
1489 case PHY_ID_BCM50610M:
1490 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1491 PHY_BRCM_RX_REFCLK_UNUSED |
1492 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1493 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1494 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1495 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1496 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1497 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1498 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1499 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1501 case PHY_ID_RTL8211C:
1502 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1504 case PHY_ID_RTL8201E:
1505 case PHY_ID_BCMAC131:
1506 phydev->interface = PHY_INTERFACE_MODE_MII;
1507 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1508 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1512 tg3_flag_set(tp, MDIOBUS_INITED);
1514 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1515 tg3_mdio_config_5785(tp);
1520 static void tg3_mdio_fini(struct tg3 *tp)
1522 if (tg3_flag(tp, MDIOBUS_INITED)) {
1523 tg3_flag_clear(tp, MDIOBUS_INITED);
1524 mdiobus_unregister(tp->mdio_bus);
1525 mdiobus_free(tp->mdio_bus);
1529 /* tp->lock is held. */
1530 static inline void tg3_generate_fw_event(struct tg3 *tp)
1534 val = tr32(GRC_RX_CPU_EVENT);
1535 val |= GRC_RX_CPU_DRIVER_EVENT;
1536 tw32_f(GRC_RX_CPU_EVENT, val);
1538 tp->last_event_jiffies = jiffies;
1541 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1543 /* tp->lock is held. */
1544 static void tg3_wait_for_event_ack(struct tg3 *tp)
1547 unsigned int delay_cnt;
1550 /* If enough time has passed, no wait is necessary. */
1551 time_remain = (long)(tp->last_event_jiffies + 1 +
1552 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1554 if (time_remain < 0)
1557 /* Check if we can shorten the wait time. */
1558 delay_cnt = jiffies_to_usecs(time_remain);
1559 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1560 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1561 delay_cnt = (delay_cnt >> 3) + 1;
1563 for (i = 0; i < delay_cnt; i++) {
1564 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1570 /* tp->lock is held. */
1571 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1576 if (!tg3_readphy(tp, MII_BMCR, ®))
1578 if (!tg3_readphy(tp, MII_BMSR, ®))
1579 val |= (reg & 0xffff);
1583 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1585 if (!tg3_readphy(tp, MII_LPA, ®))
1586 val |= (reg & 0xffff);
1590 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1591 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1593 if (!tg3_readphy(tp, MII_STAT1000, ®))
1594 val |= (reg & 0xffff);
1598 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1605 /* tp->lock is held. */
1606 static void tg3_ump_link_report(struct tg3 *tp)
1610 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1613 tg3_phy_gather_ump_data(tp, data);
1615 tg3_wait_for_event_ack(tp);
1617 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1618 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1619 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1620 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1621 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1622 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1624 tg3_generate_fw_event(tp);
1627 /* tp->lock is held. */
1628 static void tg3_stop_fw(struct tg3 *tp)
1630 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1631 /* Wait for RX cpu to ACK the previous event. */
1632 tg3_wait_for_event_ack(tp);
1634 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1636 tg3_generate_fw_event(tp);
1638 /* Wait for RX cpu to ACK this event. */
1639 tg3_wait_for_event_ack(tp);
1643 /* tp->lock is held. */
1644 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1646 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1647 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1649 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1651 case RESET_KIND_INIT:
1652 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1656 case RESET_KIND_SHUTDOWN:
1657 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1661 case RESET_KIND_SUSPEND:
1662 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1671 if (kind == RESET_KIND_INIT ||
1672 kind == RESET_KIND_SUSPEND)
1673 tg3_ape_driver_state_change(tp, kind);
1676 /* tp->lock is held. */
1677 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1679 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1681 case RESET_KIND_INIT:
1682 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1683 DRV_STATE_START_DONE);
1686 case RESET_KIND_SHUTDOWN:
1687 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1688 DRV_STATE_UNLOAD_DONE);
1696 if (kind == RESET_KIND_SHUTDOWN)
1697 tg3_ape_driver_state_change(tp, kind);
1700 /* tp->lock is held. */
1701 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1703 if (tg3_flag(tp, ENABLE_ASF)) {
1705 case RESET_KIND_INIT:
1706 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1710 case RESET_KIND_SHUTDOWN:
1711 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1715 case RESET_KIND_SUSPEND:
1716 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1726 static int tg3_poll_fw(struct tg3 *tp)
1731 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1732 /* Wait up to 20ms for init done. */
1733 for (i = 0; i < 200; i++) {
1734 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1741 /* Wait for firmware initialization to complete. */
1742 for (i = 0; i < 100000; i++) {
1743 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1744 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1749 /* Chip might not be fitted with firmware. Some Sun onboard
1750 * parts are configured like that. So don't signal the timeout
1751 * of the above loop as an error, but do report the lack of
1752 * running firmware once.
1754 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1755 tg3_flag_set(tp, NO_FWARE_REPORTED);
1757 netdev_info(tp->dev, "No firmware running\n");
1760 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1761 /* The 57765 A0 needs a little more
1762 * time to do some important work.
1770 static void tg3_link_report(struct tg3 *tp)
1772 if (!netif_carrier_ok(tp->dev)) {
1773 netif_info(tp, link, tp->dev, "Link is down\n");
1774 tg3_ump_link_report(tp);
1775 } else if (netif_msg_link(tp)) {
1776 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1777 (tp->link_config.active_speed == SPEED_1000 ?
1779 (tp->link_config.active_speed == SPEED_100 ?
1781 (tp->link_config.active_duplex == DUPLEX_FULL ?
1784 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1785 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1787 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1790 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1791 netdev_info(tp->dev, "EEE is %s\n",
1792 tp->setlpicnt ? "enabled" : "disabled");
1794 tg3_ump_link_report(tp);
1798 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1802 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1803 miireg = ADVERTISE_1000XPAUSE;
1804 else if (flow_ctrl & FLOW_CTRL_TX)
1805 miireg = ADVERTISE_1000XPSE_ASYM;
1806 else if (flow_ctrl & FLOW_CTRL_RX)
1807 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1814 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1818 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1819 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1820 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1821 if (lcladv & ADVERTISE_1000XPAUSE)
1823 if (rmtadv & ADVERTISE_1000XPAUSE)
1830 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1834 u32 old_rx_mode = tp->rx_mode;
1835 u32 old_tx_mode = tp->tx_mode;
1837 if (tg3_flag(tp, USE_PHYLIB))
1838 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1840 autoneg = tp->link_config.autoneg;
1842 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1843 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1844 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1846 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1848 flowctrl = tp->link_config.flowctrl;
1850 tp->link_config.active_flowctrl = flowctrl;
1852 if (flowctrl & FLOW_CTRL_RX)
1853 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1855 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1857 if (old_rx_mode != tp->rx_mode)
1858 tw32_f(MAC_RX_MODE, tp->rx_mode);
1860 if (flowctrl & FLOW_CTRL_TX)
1861 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1863 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1865 if (old_tx_mode != tp->tx_mode)
1866 tw32_f(MAC_TX_MODE, tp->tx_mode);
1869 static void tg3_adjust_link(struct net_device *dev)
1871 u8 oldflowctrl, linkmesg = 0;
1872 u32 mac_mode, lcl_adv, rmt_adv;
1873 struct tg3 *tp = netdev_priv(dev);
1874 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1876 spin_lock_bh(&tp->lock);
1878 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1879 MAC_MODE_HALF_DUPLEX);
1881 oldflowctrl = tp->link_config.active_flowctrl;
1887 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1888 mac_mode |= MAC_MODE_PORT_MODE_MII;
1889 else if (phydev->speed == SPEED_1000 ||
1890 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1891 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1893 mac_mode |= MAC_MODE_PORT_MODE_MII;
1895 if (phydev->duplex == DUPLEX_HALF)
1896 mac_mode |= MAC_MODE_HALF_DUPLEX;
1898 lcl_adv = mii_advertise_flowctrl(
1899 tp->link_config.flowctrl);
1902 rmt_adv = LPA_PAUSE_CAP;
1903 if (phydev->asym_pause)
1904 rmt_adv |= LPA_PAUSE_ASYM;
1907 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1909 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1911 if (mac_mode != tp->mac_mode) {
1912 tp->mac_mode = mac_mode;
1913 tw32_f(MAC_MODE, tp->mac_mode);
1917 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1918 if (phydev->speed == SPEED_10)
1920 MAC_MI_STAT_10MBPS_MODE |
1921 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1923 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1926 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1927 tw32(MAC_TX_LENGTHS,
1928 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1929 (6 << TX_LENGTHS_IPG_SHIFT) |
1930 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1932 tw32(MAC_TX_LENGTHS,
1933 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1934 (6 << TX_LENGTHS_IPG_SHIFT) |
1935 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1937 if (phydev->link != tp->old_link ||
1938 phydev->speed != tp->link_config.active_speed ||
1939 phydev->duplex != tp->link_config.active_duplex ||
1940 oldflowctrl != tp->link_config.active_flowctrl)
1943 tp->old_link = phydev->link;
1944 tp->link_config.active_speed = phydev->speed;
1945 tp->link_config.active_duplex = phydev->duplex;
1947 spin_unlock_bh(&tp->lock);
1950 tg3_link_report(tp);
1953 static int tg3_phy_init(struct tg3 *tp)
1955 struct phy_device *phydev;
1957 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1960 /* Bring the PHY back to a known state. */
1963 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1965 /* Attach the MAC to the PHY. */
1966 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1967 phydev->dev_flags, phydev->interface);
1968 if (IS_ERR(phydev)) {
1969 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1970 return PTR_ERR(phydev);
1973 /* Mask with MAC supported features. */
1974 switch (phydev->interface) {
1975 case PHY_INTERFACE_MODE_GMII:
1976 case PHY_INTERFACE_MODE_RGMII:
1977 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1978 phydev->supported &= (PHY_GBIT_FEATURES |
1980 SUPPORTED_Asym_Pause);
1984 case PHY_INTERFACE_MODE_MII:
1985 phydev->supported &= (PHY_BASIC_FEATURES |
1987 SUPPORTED_Asym_Pause);
1990 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1994 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1996 phydev->advertising = phydev->supported;
2001 static void tg3_phy_start(struct tg3 *tp)
2003 struct phy_device *phydev;
2005 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2008 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2010 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2011 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2012 phydev->speed = tp->link_config.speed;
2013 phydev->duplex = tp->link_config.duplex;
2014 phydev->autoneg = tp->link_config.autoneg;
2015 phydev->advertising = tp->link_config.advertising;
2020 phy_start_aneg(phydev);
2023 static void tg3_phy_stop(struct tg3 *tp)
2025 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2028 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2031 static void tg3_phy_fini(struct tg3 *tp)
2033 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2034 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2035 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2039 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2044 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2047 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2048 /* Cannot do read-modify-write on 5401 */
2049 err = tg3_phy_auxctl_write(tp,
2050 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2051 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2056 err = tg3_phy_auxctl_read(tp,
2057 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2061 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2062 err = tg3_phy_auxctl_write(tp,
2063 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2069 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2073 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2076 tg3_writephy(tp, MII_TG3_FET_TEST,
2077 phytest | MII_TG3_FET_SHADOW_EN);
2078 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2080 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2082 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2083 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2085 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2089 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2093 if (!tg3_flag(tp, 5705_PLUS) ||
2094 (tg3_flag(tp, 5717_PLUS) &&
2095 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2098 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2099 tg3_phy_fet_toggle_apd(tp, enable);
2103 reg = MII_TG3_MISC_SHDW_WREN |
2104 MII_TG3_MISC_SHDW_SCR5_SEL |
2105 MII_TG3_MISC_SHDW_SCR5_LPED |
2106 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2107 MII_TG3_MISC_SHDW_SCR5_SDTL |
2108 MII_TG3_MISC_SHDW_SCR5_C125OE;
2109 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2110 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2112 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2115 reg = MII_TG3_MISC_SHDW_WREN |
2116 MII_TG3_MISC_SHDW_APD_SEL |
2117 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2119 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2121 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2124 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2128 if (!tg3_flag(tp, 5705_PLUS) ||
2129 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2132 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2135 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2136 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2138 tg3_writephy(tp, MII_TG3_FET_TEST,
2139 ephy | MII_TG3_FET_SHADOW_EN);
2140 if (!tg3_readphy(tp, reg, &phy)) {
2142 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2144 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2145 tg3_writephy(tp, reg, phy);
2147 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2152 ret = tg3_phy_auxctl_read(tp,
2153 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2156 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2158 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2159 tg3_phy_auxctl_write(tp,
2160 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2165 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2170 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2173 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2175 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2176 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2179 static void tg3_phy_apply_otp(struct tg3 *tp)
2188 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2191 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2192 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2193 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2195 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2196 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2197 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2199 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2200 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2201 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2203 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2204 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2206 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2207 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2209 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2210 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2211 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2213 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2216 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2220 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2225 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2226 current_link_up == 1 &&
2227 tp->link_config.active_duplex == DUPLEX_FULL &&
2228 (tp->link_config.active_speed == SPEED_100 ||
2229 tp->link_config.active_speed == SPEED_1000)) {
2232 if (tp->link_config.active_speed == SPEED_1000)
2233 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2235 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2237 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2239 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2240 TG3_CL45_D7_EEERES_STAT, &val);
2242 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2243 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2247 if (!tp->setlpicnt) {
2248 if (current_link_up == 1 &&
2249 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2250 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2251 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2254 val = tr32(TG3_CPMU_EEE_MODE);
2255 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2259 static void tg3_phy_eee_enable(struct tg3 *tp)
2263 if (tp->link_config.active_speed == SPEED_1000 &&
2264 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2265 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2266 tg3_flag(tp, 57765_CLASS)) &&
2267 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2268 val = MII_TG3_DSP_TAP26_ALNOKO |
2269 MII_TG3_DSP_TAP26_RMRXSTO;
2270 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2271 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2274 val = tr32(TG3_CPMU_EEE_MODE);
2275 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2278 static int tg3_wait_macro_done(struct tg3 *tp)
2285 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2286 if ((tmp32 & 0x1000) == 0)
2296 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2298 static const u32 test_pat[4][6] = {
2299 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2300 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2301 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2302 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2306 for (chan = 0; chan < 4; chan++) {
2309 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2310 (chan * 0x2000) | 0x0200);
2311 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2313 for (i = 0; i < 6; i++)
2314 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2317 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2318 if (tg3_wait_macro_done(tp)) {
2323 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2324 (chan * 0x2000) | 0x0200);
2325 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2326 if (tg3_wait_macro_done(tp)) {
2331 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2332 if (tg3_wait_macro_done(tp)) {
2337 for (i = 0; i < 6; i += 2) {
2340 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2341 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2342 tg3_wait_macro_done(tp)) {
2348 if (low != test_pat[chan][i] ||
2349 high != test_pat[chan][i+1]) {
2350 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2351 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2352 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2362 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2366 for (chan = 0; chan < 4; chan++) {
2369 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2370 (chan * 0x2000) | 0x0200);
2371 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2372 for (i = 0; i < 6; i++)
2373 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2374 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2375 if (tg3_wait_macro_done(tp))
2382 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2384 u32 reg32, phy9_orig;
2385 int retries, do_phy_reset, err;
2391 err = tg3_bmcr_reset(tp);
2397 /* Disable transmitter and interrupt. */
2398 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2402 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2404 /* Set full-duplex, 1000 mbps. */
2405 tg3_writephy(tp, MII_BMCR,
2406 BMCR_FULLDPLX | BMCR_SPEED1000);
2408 /* Set to master mode. */
2409 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2412 tg3_writephy(tp, MII_CTRL1000,
2413 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2415 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2419 /* Block the PHY control access. */
2420 tg3_phydsp_write(tp, 0x8005, 0x0800);
2422 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2425 } while (--retries);
2427 err = tg3_phy_reset_chanpat(tp);
2431 tg3_phydsp_write(tp, 0x8005, 0x0000);
2433 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2434 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2436 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2438 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2440 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
2442 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2449 /* This will reset the tigon3 PHY if there is no valid
2450 * link unless the FORCE argument is non-zero.
2452 static int tg3_phy_reset(struct tg3 *tp)
2457 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2458 val = tr32(GRC_MISC_CFG);
2459 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2462 err = tg3_readphy(tp, MII_BMSR, &val);
2463 err |= tg3_readphy(tp, MII_BMSR, &val);
2467 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2468 netif_carrier_off(tp->dev);
2469 tg3_link_report(tp);
2472 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2473 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2474 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2475 err = tg3_phy_reset_5703_4_5(tp);
2482 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2483 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2484 cpmuctrl = tr32(TG3_CPMU_CTRL);
2485 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2487 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2490 err = tg3_bmcr_reset(tp);
2494 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2495 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2496 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2498 tw32(TG3_CPMU_CTRL, cpmuctrl);
2501 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2502 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2503 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2504 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2505 CPMU_LSPD_1000MB_MACCLK_12_5) {
2506 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2508 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2512 if (tg3_flag(tp, 5717_PLUS) &&
2513 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2516 tg3_phy_apply_otp(tp);
2518 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2519 tg3_phy_toggle_apd(tp, true);
2521 tg3_phy_toggle_apd(tp, false);
2524 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2525 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2526 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2527 tg3_phydsp_write(tp, 0x000a, 0x0323);
2528 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2531 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2532 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2533 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2536 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2537 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2538 tg3_phydsp_write(tp, 0x000a, 0x310b);
2539 tg3_phydsp_write(tp, 0x201f, 0x9506);
2540 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2541 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2543 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2544 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2545 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2546 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2547 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2548 tg3_writephy(tp, MII_TG3_TEST1,
2549 MII_TG3_TEST1_TRIM_EN | 0x4);
2551 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2553 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2557 /* Set Extended packet length bit (bit 14) on all chips that */
2558 /* support jumbo frames */
2559 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2560 /* Cannot do read-modify-write on 5401 */
2561 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2562 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2563 /* Set bit 14 with read-modify-write to preserve other bits */
2564 err = tg3_phy_auxctl_read(tp,
2565 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2567 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2568 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2571 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2572 * jumbo frames transmission.
2574 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2575 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2576 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2577 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2580 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2581 /* adjust output voltage */
2582 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2585 tg3_phy_toggle_automdix(tp, 1);
2586 tg3_phy_set_wirespeed(tp);
2590 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2591 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2592 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2593 TG3_GPIO_MSG_NEED_VAUX)
2594 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2595 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2596 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2597 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2598 (TG3_GPIO_MSG_DRVR_PRES << 12))
2600 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2601 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2602 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2603 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2604 (TG3_GPIO_MSG_NEED_VAUX << 12))
2606 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2610 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2611 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2612 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2614 status = tr32(TG3_CPMU_DRV_STATUS);
2616 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2617 status &= ~(TG3_GPIO_MSG_MASK << shift);
2618 status |= (newstat << shift);
2620 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2621 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2622 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2624 tw32(TG3_CPMU_DRV_STATUS, status);
2626 return status >> TG3_APE_GPIO_MSG_SHIFT;
2629 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2631 if (!tg3_flag(tp, IS_NIC))
2634 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2635 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2636 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2637 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2640 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2642 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2643 TG3_GRC_LCLCTL_PWRSW_DELAY);
2645 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2647 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2648 TG3_GRC_LCLCTL_PWRSW_DELAY);
2654 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2658 if (!tg3_flag(tp, IS_NIC) ||
2659 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2660 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2663 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2665 tw32_wait_f(GRC_LOCAL_CTRL,
2666 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2667 TG3_GRC_LCLCTL_PWRSW_DELAY);
2669 tw32_wait_f(GRC_LOCAL_CTRL,
2671 TG3_GRC_LCLCTL_PWRSW_DELAY);
2673 tw32_wait_f(GRC_LOCAL_CTRL,
2674 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2675 TG3_GRC_LCLCTL_PWRSW_DELAY);
2678 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2680 if (!tg3_flag(tp, IS_NIC))
2683 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2684 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2685 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2686 (GRC_LCLCTRL_GPIO_OE0 |
2687 GRC_LCLCTRL_GPIO_OE1 |
2688 GRC_LCLCTRL_GPIO_OE2 |
2689 GRC_LCLCTRL_GPIO_OUTPUT0 |
2690 GRC_LCLCTRL_GPIO_OUTPUT1),
2691 TG3_GRC_LCLCTL_PWRSW_DELAY);
2692 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2693 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2694 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2695 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2696 GRC_LCLCTRL_GPIO_OE1 |
2697 GRC_LCLCTRL_GPIO_OE2 |
2698 GRC_LCLCTRL_GPIO_OUTPUT0 |
2699 GRC_LCLCTRL_GPIO_OUTPUT1 |
2701 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2702 TG3_GRC_LCLCTL_PWRSW_DELAY);
2704 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2705 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2706 TG3_GRC_LCLCTL_PWRSW_DELAY);
2708 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2709 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2710 TG3_GRC_LCLCTL_PWRSW_DELAY);
2713 u32 grc_local_ctrl = 0;
2715 /* Workaround to prevent overdrawing Amps. */
2716 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2717 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2718 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2720 TG3_GRC_LCLCTL_PWRSW_DELAY);
2723 /* On 5753 and variants, GPIO2 cannot be used. */
2724 no_gpio2 = tp->nic_sram_data_cfg &
2725 NIC_SRAM_DATA_CFG_NO_GPIO2;
2727 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2728 GRC_LCLCTRL_GPIO_OE1 |
2729 GRC_LCLCTRL_GPIO_OE2 |
2730 GRC_LCLCTRL_GPIO_OUTPUT1 |
2731 GRC_LCLCTRL_GPIO_OUTPUT2;
2733 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2734 GRC_LCLCTRL_GPIO_OUTPUT2);
2736 tw32_wait_f(GRC_LOCAL_CTRL,
2737 tp->grc_local_ctrl | grc_local_ctrl,
2738 TG3_GRC_LCLCTL_PWRSW_DELAY);
2740 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2742 tw32_wait_f(GRC_LOCAL_CTRL,
2743 tp->grc_local_ctrl | grc_local_ctrl,
2744 TG3_GRC_LCLCTL_PWRSW_DELAY);
2747 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2748 tw32_wait_f(GRC_LOCAL_CTRL,
2749 tp->grc_local_ctrl | grc_local_ctrl,
2750 TG3_GRC_LCLCTL_PWRSW_DELAY);
2755 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2759 /* Serialize power state transitions */
2760 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2763 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2764 msg = TG3_GPIO_MSG_NEED_VAUX;
2766 msg = tg3_set_function_status(tp, msg);
2768 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2771 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2772 tg3_pwrsrc_switch_to_vaux(tp);
2774 tg3_pwrsrc_die_with_vmain(tp);
2777 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2780 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2782 bool need_vaux = false;
2784 /* The GPIOs do something completely different on 57765. */
2785 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2788 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2789 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2790 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2791 tg3_frob_aux_power_5717(tp, include_wol ?
2792 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2796 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2797 struct net_device *dev_peer;
2799 dev_peer = pci_get_drvdata(tp->pdev_peer);
2801 /* remove_one() may have been run on the peer. */
2803 struct tg3 *tp_peer = netdev_priv(dev_peer);
2805 if (tg3_flag(tp_peer, INIT_COMPLETE))
2808 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2809 tg3_flag(tp_peer, ENABLE_ASF))
2814 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2815 tg3_flag(tp, ENABLE_ASF))
2819 tg3_pwrsrc_switch_to_vaux(tp);
2821 tg3_pwrsrc_die_with_vmain(tp);
2824 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2826 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2828 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2829 if (speed != SPEED_10)
2831 } else if (speed == SPEED_10)
2837 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2841 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2842 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2843 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2844 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2847 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2848 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2849 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2854 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2856 val = tr32(GRC_MISC_CFG);
2857 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2860 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2862 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2865 tg3_writephy(tp, MII_ADVERTISE, 0);
2866 tg3_writephy(tp, MII_BMCR,
2867 BMCR_ANENABLE | BMCR_ANRESTART);
2869 tg3_writephy(tp, MII_TG3_FET_TEST,
2870 phytest | MII_TG3_FET_SHADOW_EN);
2871 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2872 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2874 MII_TG3_FET_SHDW_AUXMODE4,
2877 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2880 } else if (do_low_power) {
2881 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2882 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2884 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2885 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2886 MII_TG3_AUXCTL_PCTL_VREG_11V;
2887 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2890 /* The PHY should not be powered down on some chips because
2893 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2894 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2895 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2896 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2897 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2901 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2902 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2903 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2904 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2905 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2906 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2909 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2912 /* tp->lock is held. */
2913 static int tg3_nvram_lock(struct tg3 *tp)
2915 if (tg3_flag(tp, NVRAM)) {
2918 if (tp->nvram_lock_cnt == 0) {
2919 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2920 for (i = 0; i < 8000; i++) {
2921 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2926 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2930 tp->nvram_lock_cnt++;
2935 /* tp->lock is held. */
2936 static void tg3_nvram_unlock(struct tg3 *tp)
2938 if (tg3_flag(tp, NVRAM)) {
2939 if (tp->nvram_lock_cnt > 0)
2940 tp->nvram_lock_cnt--;
2941 if (tp->nvram_lock_cnt == 0)
2942 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2946 /* tp->lock is held. */
2947 static void tg3_enable_nvram_access(struct tg3 *tp)
2949 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2950 u32 nvaccess = tr32(NVRAM_ACCESS);
2952 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2956 /* tp->lock is held. */
2957 static void tg3_disable_nvram_access(struct tg3 *tp)
2959 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2960 u32 nvaccess = tr32(NVRAM_ACCESS);
2962 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2966 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2967 u32 offset, u32 *val)
2972 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2975 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2976 EEPROM_ADDR_DEVID_MASK |
2978 tw32(GRC_EEPROM_ADDR,
2980 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2981 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2982 EEPROM_ADDR_ADDR_MASK) |
2983 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2985 for (i = 0; i < 1000; i++) {
2986 tmp = tr32(GRC_EEPROM_ADDR);
2988 if (tmp & EEPROM_ADDR_COMPLETE)
2992 if (!(tmp & EEPROM_ADDR_COMPLETE))
2995 tmp = tr32(GRC_EEPROM_DATA);
2998 * The data will always be opposite the native endian
2999 * format. Perform a blind byteswap to compensate.
3006 #define NVRAM_CMD_TIMEOUT 10000
3008 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3012 tw32(NVRAM_CMD, nvram_cmd);
3013 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3015 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3021 if (i == NVRAM_CMD_TIMEOUT)
3027 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3029 if (tg3_flag(tp, NVRAM) &&
3030 tg3_flag(tp, NVRAM_BUFFERED) &&
3031 tg3_flag(tp, FLASH) &&
3032 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3033 (tp->nvram_jedecnum == JEDEC_ATMEL))
3035 addr = ((addr / tp->nvram_pagesize) <<
3036 ATMEL_AT45DB0X1B_PAGE_POS) +
3037 (addr % tp->nvram_pagesize);
3042 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3044 if (tg3_flag(tp, NVRAM) &&
3045 tg3_flag(tp, NVRAM_BUFFERED) &&
3046 tg3_flag(tp, FLASH) &&
3047 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3048 (tp->nvram_jedecnum == JEDEC_ATMEL))
3050 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3051 tp->nvram_pagesize) +
3052 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3057 /* NOTE: Data read in from NVRAM is byteswapped according to
3058 * the byteswapping settings for all other register accesses.
3059 * tg3 devices are BE devices, so on a BE machine, the data
3060 * returned will be exactly as it is seen in NVRAM. On a LE
3061 * machine, the 32-bit value will be byteswapped.
3063 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3067 if (!tg3_flag(tp, NVRAM))
3068 return tg3_nvram_read_using_eeprom(tp, offset, val);
3070 offset = tg3_nvram_phys_addr(tp, offset);
3072 if (offset > NVRAM_ADDR_MSK)
3075 ret = tg3_nvram_lock(tp);
3079 tg3_enable_nvram_access(tp);
3081 tw32(NVRAM_ADDR, offset);
3082 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3083 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3086 *val = tr32(NVRAM_RDDATA);
3088 tg3_disable_nvram_access(tp);
3090 tg3_nvram_unlock(tp);
3095 /* Ensures NVRAM data is in bytestream format. */
3096 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3099 int res = tg3_nvram_read(tp, offset, &v);
3101 *val = cpu_to_be32(v);
3105 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3106 u32 offset, u32 len, u8 *buf)
3111 for (i = 0; i < len; i += 4) {
3117 memcpy(&data, buf + i, 4);
3120 * The SEEPROM interface expects the data to always be opposite
3121 * the native endian format. We accomplish this by reversing
3122 * all the operations that would have been performed on the
3123 * data from a call to tg3_nvram_read_be32().
3125 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3127 val = tr32(GRC_EEPROM_ADDR);
3128 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3130 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3132 tw32(GRC_EEPROM_ADDR, val |
3133 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3134 (addr & EEPROM_ADDR_ADDR_MASK) |
3138 for (j = 0; j < 1000; j++) {
3139 val = tr32(GRC_EEPROM_ADDR);
3141 if (val & EEPROM_ADDR_COMPLETE)
3145 if (!(val & EEPROM_ADDR_COMPLETE)) {
3154 /* offset and length are dword aligned */
3155 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3159 u32 pagesize = tp->nvram_pagesize;
3160 u32 pagemask = pagesize - 1;
3164 tmp = kmalloc(pagesize, GFP_KERNEL);
3170 u32 phy_addr, page_off, size;
3172 phy_addr = offset & ~pagemask;
3174 for (j = 0; j < pagesize; j += 4) {
3175 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3176 (__be32 *) (tmp + j));
3183 page_off = offset & pagemask;
3190 memcpy(tmp + page_off, buf, size);
3192 offset = offset + (pagesize - page_off);
3194 tg3_enable_nvram_access(tp);
3197 * Before we can erase the flash page, we need
3198 * to issue a special "write enable" command.
3200 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3202 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3205 /* Erase the target page */
3206 tw32(NVRAM_ADDR, phy_addr);
3208 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3209 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3211 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3214 /* Issue another write enable to start the write. */
3215 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3217 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3220 for (j = 0; j < pagesize; j += 4) {
3223 data = *((__be32 *) (tmp + j));
3225 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3227 tw32(NVRAM_ADDR, phy_addr + j);
3229 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3233 nvram_cmd |= NVRAM_CMD_FIRST;
3234 else if (j == (pagesize - 4))
3235 nvram_cmd |= NVRAM_CMD_LAST;
3237 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3245 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3246 tg3_nvram_exec_cmd(tp, nvram_cmd);
3253 /* offset and length are dword aligned */
3254 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3259 for (i = 0; i < len; i += 4, offset += 4) {
3260 u32 page_off, phy_addr, nvram_cmd;
3263 memcpy(&data, buf + i, 4);
3264 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3266 page_off = offset % tp->nvram_pagesize;
3268 phy_addr = tg3_nvram_phys_addr(tp, offset);
3270 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3272 if (page_off == 0 || i == 0)
3273 nvram_cmd |= NVRAM_CMD_FIRST;
3274 if (page_off == (tp->nvram_pagesize - 4))
3275 nvram_cmd |= NVRAM_CMD_LAST;
3278 nvram_cmd |= NVRAM_CMD_LAST;
3280 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3281 !tg3_flag(tp, FLASH) ||
3282 !tg3_flag(tp, 57765_PLUS))
3283 tw32(NVRAM_ADDR, phy_addr);
3285 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3286 !tg3_flag(tp, 5755_PLUS) &&
3287 (tp->nvram_jedecnum == JEDEC_ST) &&
3288 (nvram_cmd & NVRAM_CMD_FIRST)) {
3291 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3292 ret = tg3_nvram_exec_cmd(tp, cmd);
3296 if (!tg3_flag(tp, FLASH)) {
3297 /* We always do complete word writes to eeprom. */
3298 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3301 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3308 /* offset and length are dword aligned */
3309 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3313 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3314 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3315 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3319 if (!tg3_flag(tp, NVRAM)) {
3320 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3324 ret = tg3_nvram_lock(tp);
3328 tg3_enable_nvram_access(tp);
3329 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3330 tw32(NVRAM_WRITE1, 0x406);
3332 grc_mode = tr32(GRC_MODE);
3333 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3335 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3336 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3339 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3343 grc_mode = tr32(GRC_MODE);
3344 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3346 tg3_disable_nvram_access(tp);
3347 tg3_nvram_unlock(tp);
3350 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3351 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3358 #define RX_CPU_SCRATCH_BASE 0x30000
3359 #define RX_CPU_SCRATCH_SIZE 0x04000
3360 #define TX_CPU_SCRATCH_BASE 0x34000
3361 #define TX_CPU_SCRATCH_SIZE 0x04000
3363 /* tp->lock is held. */
3364 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3368 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3370 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3371 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3373 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3376 if (offset == RX_CPU_BASE) {
3377 for (i = 0; i < 10000; i++) {
3378 tw32(offset + CPU_STATE, 0xffffffff);
3379 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3380 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3384 tw32(offset + CPU_STATE, 0xffffffff);
3385 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3388 for (i = 0; i < 10000; i++) {
3389 tw32(offset + CPU_STATE, 0xffffffff);
3390 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3391 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3397 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3398 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3402 /* Clear firmware's nvram arbitration. */
3403 if (tg3_flag(tp, NVRAM))
3404 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3409 unsigned int fw_base;
3410 unsigned int fw_len;
3411 const __be32 *fw_data;
3414 /* tp->lock is held. */
3415 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3416 u32 cpu_scratch_base, int cpu_scratch_size,
3417 struct fw_info *info)
3419 int err, lock_err, i;
3420 void (*write_op)(struct tg3 *, u32, u32);
3422 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3424 "%s: Trying to load TX cpu firmware which is 5705\n",
3429 if (tg3_flag(tp, 5705_PLUS))
3430 write_op = tg3_write_mem;
3432 write_op = tg3_write_indirect_reg32;
3434 /* It is possible that bootcode is still loading at this point.
3435 * Get the nvram lock first before halting the cpu.
3437 lock_err = tg3_nvram_lock(tp);
3438 err = tg3_halt_cpu(tp, cpu_base);
3440 tg3_nvram_unlock(tp);
3444 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3445 write_op(tp, cpu_scratch_base + i, 0);
3446 tw32(cpu_base + CPU_STATE, 0xffffffff);
3447 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3448 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3449 write_op(tp, (cpu_scratch_base +
3450 (info->fw_base & 0xffff) +
3452 be32_to_cpu(info->fw_data[i]));
3460 /* tp->lock is held. */
3461 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3463 struct fw_info info;
3464 const __be32 *fw_data;
3467 fw_data = (void *)tp->fw->data;
3469 /* Firmware blob starts with version numbers, followed by
3470 start address and length. We are setting complete length.
3471 length = end_address_of_bss - start_address_of_text.
3472 Remainder is the blob to be loaded contiguously
3473 from start address. */
3475 info.fw_base = be32_to_cpu(fw_data[1]);
3476 info.fw_len = tp->fw->size - 12;
3477 info.fw_data = &fw_data[3];
3479 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3480 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3485 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3486 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3491 /* Now startup only the RX cpu. */
3492 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3493 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3495 for (i = 0; i < 5; i++) {
3496 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3498 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3499 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3500 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3504 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3505 "should be %08x\n", __func__,
3506 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3509 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3510 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
3515 /* tp->lock is held. */
3516 static int tg3_load_tso_firmware(struct tg3 *tp)
3518 struct fw_info info;
3519 const __be32 *fw_data;
3520 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3523 if (tg3_flag(tp, HW_TSO_1) ||
3524 tg3_flag(tp, HW_TSO_2) ||
3525 tg3_flag(tp, HW_TSO_3))
3528 fw_data = (void *)tp->fw->data;
3530 /* Firmware blob starts with version numbers, followed by
3531 start address and length. We are setting complete length.
3532 length = end_address_of_bss - start_address_of_text.
3533 Remainder is the blob to be loaded contiguously
3534 from start address. */
3536 info.fw_base = be32_to_cpu(fw_data[1]);
3537 cpu_scratch_size = tp->fw_len;
3538 info.fw_len = tp->fw->size - 12;
3539 info.fw_data = &fw_data[3];
3541 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3542 cpu_base = RX_CPU_BASE;
3543 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3545 cpu_base = TX_CPU_BASE;
3546 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3547 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3550 err = tg3_load_firmware_cpu(tp, cpu_base,
3551 cpu_scratch_base, cpu_scratch_size,
3556 /* Now startup the cpu. */
3557 tw32(cpu_base + CPU_STATE, 0xffffffff);
3558 tw32_f(cpu_base + CPU_PC, info.fw_base);
3560 for (i = 0; i < 5; i++) {
3561 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3563 tw32(cpu_base + CPU_STATE, 0xffffffff);
3564 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3565 tw32_f(cpu_base + CPU_PC, info.fw_base);
3570 "%s fails to set CPU PC, is %08x should be %08x\n",
3571 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3574 tw32(cpu_base + CPU_STATE, 0xffffffff);
3575 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3580 /* tp->lock is held. */
3581 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3583 u32 addr_high, addr_low;
3586 addr_high = ((tp->dev->dev_addr[0] << 8) |
3587 tp->dev->dev_addr[1]);
3588 addr_low = ((tp->dev->dev_addr[2] << 24) |
3589 (tp->dev->dev_addr[3] << 16) |
3590 (tp->dev->dev_addr[4] << 8) |
3591 (tp->dev->dev_addr[5] << 0));
3592 for (i = 0; i < 4; i++) {
3593 if (i == 1 && skip_mac_1)
3595 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3596 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3599 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3600 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3601 for (i = 0; i < 12; i++) {
3602 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3603 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3607 addr_high = (tp->dev->dev_addr[0] +
3608 tp->dev->dev_addr[1] +
3609 tp->dev->dev_addr[2] +
3610 tp->dev->dev_addr[3] +
3611 tp->dev->dev_addr[4] +
3612 tp->dev->dev_addr[5]) &
3613 TX_BACKOFF_SEED_MASK;
3614 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3617 static void tg3_enable_register_access(struct tg3 *tp)
3620 * Make sure register accesses (indirect or otherwise) will function
3623 pci_write_config_dword(tp->pdev,
3624 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3627 static int tg3_power_up(struct tg3 *tp)
3631 tg3_enable_register_access(tp);
3633 err = pci_set_power_state(tp->pdev, PCI_D0);
3635 /* Switch out of Vaux if it is a NIC */
3636 tg3_pwrsrc_switch_to_vmain(tp);
3638 netdev_err(tp->dev, "Transition to D0 failed\n");
3644 static int tg3_setup_phy(struct tg3 *, int);
3646 static int tg3_power_down_prepare(struct tg3 *tp)
3649 bool device_should_wake, do_low_power;
3651 tg3_enable_register_access(tp);
3653 /* Restore the CLKREQ setting. */
3654 if (tg3_flag(tp, CLKREQ_BUG))
3655 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3656 PCI_EXP_LNKCTL_CLKREQ_EN);
3658 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3659 tw32(TG3PCI_MISC_HOST_CTRL,
3660 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3662 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3663 tg3_flag(tp, WOL_ENABLE);
3665 if (tg3_flag(tp, USE_PHYLIB)) {
3666 do_low_power = false;
3667 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3668 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3669 struct phy_device *phydev;
3670 u32 phyid, advertising;
3672 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3674 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3676 tp->link_config.speed = phydev->speed;
3677 tp->link_config.duplex = phydev->duplex;
3678 tp->link_config.autoneg = phydev->autoneg;
3679 tp->link_config.advertising = phydev->advertising;
3681 advertising = ADVERTISED_TP |
3683 ADVERTISED_Autoneg |
3684 ADVERTISED_10baseT_Half;
3686 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3687 if (tg3_flag(tp, WOL_SPEED_100MB))
3689 ADVERTISED_100baseT_Half |
3690 ADVERTISED_100baseT_Full |
3691 ADVERTISED_10baseT_Full;
3693 advertising |= ADVERTISED_10baseT_Full;
3696 phydev->advertising = advertising;
3698 phy_start_aneg(phydev);
3700 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3701 if (phyid != PHY_ID_BCMAC131) {
3702 phyid &= PHY_BCM_OUI_MASK;
3703 if (phyid == PHY_BCM_OUI_1 ||
3704 phyid == PHY_BCM_OUI_2 ||
3705 phyid == PHY_BCM_OUI_3)
3706 do_low_power = true;
3710 do_low_power = true;
3712 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3713 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3715 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3716 tg3_setup_phy(tp, 0);
3719 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3722 val = tr32(GRC_VCPU_EXT_CTRL);
3723 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3724 } else if (!tg3_flag(tp, ENABLE_ASF)) {
3728 for (i = 0; i < 200; i++) {
3729 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3730 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3735 if (tg3_flag(tp, WOL_CAP))
3736 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3737 WOL_DRV_STATE_SHUTDOWN |
3741 if (device_should_wake) {
3744 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3746 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3747 tg3_phy_auxctl_write(tp,
3748 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3749 MII_TG3_AUXCTL_PCTL_WOL_EN |
3750 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3751 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3755 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3756 mac_mode = MAC_MODE_PORT_MODE_GMII;
3758 mac_mode = MAC_MODE_PORT_MODE_MII;
3760 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3761 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3763 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3764 SPEED_100 : SPEED_10;
3765 if (tg3_5700_link_polarity(tp, speed))
3766 mac_mode |= MAC_MODE_LINK_POLARITY;
3768 mac_mode &= ~MAC_MODE_LINK_POLARITY;
3771 mac_mode = MAC_MODE_PORT_MODE_TBI;
3774 if (!tg3_flag(tp, 5750_PLUS))
3775 tw32(MAC_LED_CTRL, tp->led_ctrl);
3777 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3778 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3779 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3780 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3782 if (tg3_flag(tp, ENABLE_APE))
3783 mac_mode |= MAC_MODE_APE_TX_EN |
3784 MAC_MODE_APE_RX_EN |
3785 MAC_MODE_TDE_ENABLE;
3787 tw32_f(MAC_MODE, mac_mode);
3790 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3794 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3795 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3796 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3799 base_val = tp->pci_clock_ctrl;
3800 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3801 CLOCK_CTRL_TXCLK_DISABLE);
3803 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3804 CLOCK_CTRL_PWRDOWN_PLL133, 40);
3805 } else if (tg3_flag(tp, 5780_CLASS) ||
3806 tg3_flag(tp, CPMU_PRESENT) ||
3807 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3809 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3810 u32 newbits1, newbits2;
3812 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3813 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3814 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3815 CLOCK_CTRL_TXCLK_DISABLE |
3817 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3818 } else if (tg3_flag(tp, 5705_PLUS)) {
3819 newbits1 = CLOCK_CTRL_625_CORE;
3820 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3822 newbits1 = CLOCK_CTRL_ALTCLK;
3823 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3826 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3829 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3832 if (!tg3_flag(tp, 5705_PLUS)) {
3835 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3836 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3837 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3838 CLOCK_CTRL_TXCLK_DISABLE |
3839 CLOCK_CTRL_44MHZ_CORE);
3841 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3844 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3845 tp->pci_clock_ctrl | newbits3, 40);
3849 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3850 tg3_power_down_phy(tp, do_low_power);
3852 tg3_frob_aux_power(tp, true);
3854 /* Workaround for unstable PLL clock */
3855 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3856 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3857 u32 val = tr32(0x7d00);
3859 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3861 if (!tg3_flag(tp, ENABLE_ASF)) {
3864 err = tg3_nvram_lock(tp);
3865 tg3_halt_cpu(tp, RX_CPU_BASE);
3867 tg3_nvram_unlock(tp);
3871 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3876 static void tg3_power_down(struct tg3 *tp)
3878 tg3_power_down_prepare(tp);
3880 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3881 pci_set_power_state(tp->pdev, PCI_D3hot);
3884 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3886 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3887 case MII_TG3_AUX_STAT_10HALF:
3889 *duplex = DUPLEX_HALF;
3892 case MII_TG3_AUX_STAT_10FULL:
3894 *duplex = DUPLEX_FULL;
3897 case MII_TG3_AUX_STAT_100HALF:
3899 *duplex = DUPLEX_HALF;
3902 case MII_TG3_AUX_STAT_100FULL:
3904 *duplex = DUPLEX_FULL;
3907 case MII_TG3_AUX_STAT_1000HALF:
3908 *speed = SPEED_1000;
3909 *duplex = DUPLEX_HALF;
3912 case MII_TG3_AUX_STAT_1000FULL:
3913 *speed = SPEED_1000;
3914 *duplex = DUPLEX_FULL;
3918 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3919 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3921 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3925 *speed = SPEED_UNKNOWN;
3926 *duplex = DUPLEX_UNKNOWN;
3931 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3936 new_adv = ADVERTISE_CSMA;
3937 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3938 new_adv |= mii_advertise_flowctrl(flowctrl);
3940 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3944 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3945 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3947 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3948 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3949 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3951 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3956 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3959 tw32(TG3_CPMU_EEE_MODE,
3960 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3962 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3967 /* Advertise 100-BaseTX EEE ability */
3968 if (advertise & ADVERTISED_100baseT_Full)
3969 val |= MDIO_AN_EEE_ADV_100TX;
3970 /* Advertise 1000-BaseT EEE ability */
3971 if (advertise & ADVERTISED_1000baseT_Full)
3972 val |= MDIO_AN_EEE_ADV_1000T;
3973 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3977 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3979 case ASIC_REV_57765:
3980 case ASIC_REV_57766:
3982 /* If we advertised any eee advertisements above... */
3984 val = MII_TG3_DSP_TAP26_ALNOKO |
3985 MII_TG3_DSP_TAP26_RMRXSTO |
3986 MII_TG3_DSP_TAP26_OPCSINPT;
3987 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3990 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3991 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3992 MII_TG3_DSP_CH34TP2_HIBW01);
3995 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
4004 static void tg3_phy_copper_begin(struct tg3 *tp)
4006 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4007 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4010 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4011 adv = ADVERTISED_10baseT_Half |
4012 ADVERTISED_10baseT_Full;
4013 if (tg3_flag(tp, WOL_SPEED_100MB))
4014 adv |= ADVERTISED_100baseT_Half |
4015 ADVERTISED_100baseT_Full;
4017 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4019 adv = tp->link_config.advertising;
4020 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4021 adv &= ~(ADVERTISED_1000baseT_Half |
4022 ADVERTISED_1000baseT_Full);
4024 fc = tp->link_config.flowctrl;
4027 tg3_phy_autoneg_cfg(tp, adv, fc);
4029 tg3_writephy(tp, MII_BMCR,
4030 BMCR_ANENABLE | BMCR_ANRESTART);
4033 u32 bmcr, orig_bmcr;
4035 tp->link_config.active_speed = tp->link_config.speed;
4036 tp->link_config.active_duplex = tp->link_config.duplex;
4039 switch (tp->link_config.speed) {
4045 bmcr |= BMCR_SPEED100;
4049 bmcr |= BMCR_SPEED1000;
4053 if (tp->link_config.duplex == DUPLEX_FULL)
4054 bmcr |= BMCR_FULLDPLX;
4056 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4057 (bmcr != orig_bmcr)) {
4058 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4059 for (i = 0; i < 1500; i++) {
4063 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4064 tg3_readphy(tp, MII_BMSR, &tmp))
4066 if (!(tmp & BMSR_LSTATUS)) {
4071 tg3_writephy(tp, MII_BMCR, bmcr);
4077 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4081 /* Turn off tap power management. */
4082 /* Set Extended packet length bit */
4083 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4085 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4086 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4087 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4088 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4089 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4096 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4098 u32 advmsk, tgtadv, advertising;
4100 advertising = tp->link_config.advertising;
4101 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4103 advmsk = ADVERTISE_ALL;
4104 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4105 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4106 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4109 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4112 if ((*lcladv & advmsk) != tgtadv)
4115 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4118 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4120 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4124 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4125 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4126 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4127 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4128 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4130 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4133 if (tg3_ctrl != tgtadv)
4140 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4144 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4147 if (tg3_readphy(tp, MII_STAT1000, &val))
4150 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4153 if (tg3_readphy(tp, MII_LPA, rmtadv))
4156 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4157 tp->link_config.rmt_adv = lpeth;
4162 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4164 int current_link_up;
4166 u32 lcl_adv, rmt_adv;
4174 (MAC_STATUS_SYNC_CHANGED |
4175 MAC_STATUS_CFG_CHANGED |
4176 MAC_STATUS_MI_COMPLETION |
4177 MAC_STATUS_LNKSTATE_CHANGED));
4180 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4182 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4186 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4188 /* Some third-party PHYs need to be reset on link going
4191 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4192 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4193 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4194 netif_carrier_ok(tp->dev)) {
4195 tg3_readphy(tp, MII_BMSR, &bmsr);
4196 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4197 !(bmsr & BMSR_LSTATUS))
4203 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4204 tg3_readphy(tp, MII_BMSR, &bmsr);
4205 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4206 !tg3_flag(tp, INIT_COMPLETE))
4209 if (!(bmsr & BMSR_LSTATUS)) {
4210 err = tg3_init_5401phy_dsp(tp);
4214 tg3_readphy(tp, MII_BMSR, &bmsr);
4215 for (i = 0; i < 1000; i++) {
4217 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4218 (bmsr & BMSR_LSTATUS)) {
4224 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4225 TG3_PHY_REV_BCM5401_B0 &&
4226 !(bmsr & BMSR_LSTATUS) &&
4227 tp->link_config.active_speed == SPEED_1000) {
4228 err = tg3_phy_reset(tp);
4230 err = tg3_init_5401phy_dsp(tp);
4235 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4236 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4237 /* 5701 {A0,B0} CRC bug workaround */
4238 tg3_writephy(tp, 0x15, 0x0a75);
4239 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4240 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4241 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4244 /* Clear pending interrupts... */
4245 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4246 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4248 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4249 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4250 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4251 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4253 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4254 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4255 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4256 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4257 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4259 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4262 current_link_up = 0;
4263 current_speed = SPEED_UNKNOWN;
4264 current_duplex = DUPLEX_UNKNOWN;
4265 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4266 tp->link_config.rmt_adv = 0;
4268 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4269 err = tg3_phy_auxctl_read(tp,
4270 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4272 if (!err && !(val & (1 << 10))) {
4273 tg3_phy_auxctl_write(tp,
4274 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4281 for (i = 0; i < 100; i++) {
4282 tg3_readphy(tp, MII_BMSR, &bmsr);
4283 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4284 (bmsr & BMSR_LSTATUS))
4289 if (bmsr & BMSR_LSTATUS) {
4292 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4293 for (i = 0; i < 2000; i++) {
4295 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4300 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4305 for (i = 0; i < 200; i++) {
4306 tg3_readphy(tp, MII_BMCR, &bmcr);
4307 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4309 if (bmcr && bmcr != 0x7fff)
4317 tp->link_config.active_speed = current_speed;
4318 tp->link_config.active_duplex = current_duplex;
4320 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4321 if ((bmcr & BMCR_ANENABLE) &&
4322 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4323 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4324 current_link_up = 1;
4326 if (!(bmcr & BMCR_ANENABLE) &&
4327 tp->link_config.speed == current_speed &&
4328 tp->link_config.duplex == current_duplex &&
4329 tp->link_config.flowctrl ==
4330 tp->link_config.active_flowctrl) {
4331 current_link_up = 1;
4335 if (current_link_up == 1 &&
4336 tp->link_config.active_duplex == DUPLEX_FULL) {
4339 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4340 reg = MII_TG3_FET_GEN_STAT;
4341 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4343 reg = MII_TG3_EXT_STAT;
4344 bit = MII_TG3_EXT_STAT_MDIX;
4347 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4348 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4350 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4355 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4356 tg3_phy_copper_begin(tp);
4358 tg3_readphy(tp, MII_BMSR, &bmsr);
4359 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4360 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4361 current_link_up = 1;
4364 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4365 if (current_link_up == 1) {
4366 if (tp->link_config.active_speed == SPEED_100 ||
4367 tp->link_config.active_speed == SPEED_10)
4368 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4370 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4371 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4372 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4374 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4376 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4377 if (tp->link_config.active_duplex == DUPLEX_HALF)
4378 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4380 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4381 if (current_link_up == 1 &&
4382 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4383 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4385 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4388 /* ??? Without this setting Netgear GA302T PHY does not
4389 * ??? send/receive packets...
4391 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4392 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4393 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4394 tw32_f(MAC_MI_MODE, tp->mi_mode);
4398 tw32_f(MAC_MODE, tp->mac_mode);
4401 tg3_phy_eee_adjust(tp, current_link_up);
4403 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4404 /* Polled via timer. */
4405 tw32_f(MAC_EVENT, 0);
4407 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4411 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4412 current_link_up == 1 &&
4413 tp->link_config.active_speed == SPEED_1000 &&
4414 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4417 (MAC_STATUS_SYNC_CHANGED |
4418 MAC_STATUS_CFG_CHANGED));
4421 NIC_SRAM_FIRMWARE_MBOX,
4422 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4425 /* Prevent send BD corruption. */
4426 if (tg3_flag(tp, CLKREQ_BUG)) {
4427 if (tp->link_config.active_speed == SPEED_100 ||
4428 tp->link_config.active_speed == SPEED_10)
4429 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4430 PCI_EXP_LNKCTL_CLKREQ_EN);
4432 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4433 PCI_EXP_LNKCTL_CLKREQ_EN);
4436 if (current_link_up != netif_carrier_ok(tp->dev)) {
4437 if (current_link_up)
4438 netif_carrier_on(tp->dev);
4440 netif_carrier_off(tp->dev);
4441 tg3_link_report(tp);
4447 struct tg3_fiber_aneginfo {
4449 #define ANEG_STATE_UNKNOWN 0
4450 #define ANEG_STATE_AN_ENABLE 1
4451 #define ANEG_STATE_RESTART_INIT 2
4452 #define ANEG_STATE_RESTART 3
4453 #define ANEG_STATE_DISABLE_LINK_OK 4
4454 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4455 #define ANEG_STATE_ABILITY_DETECT 6
4456 #define ANEG_STATE_ACK_DETECT_INIT 7
4457 #define ANEG_STATE_ACK_DETECT 8
4458 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4459 #define ANEG_STATE_COMPLETE_ACK 10
4460 #define ANEG_STATE_IDLE_DETECT_INIT 11
4461 #define ANEG_STATE_IDLE_DETECT 12
4462 #define ANEG_STATE_LINK_OK 13
4463 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4464 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4467 #define MR_AN_ENABLE 0x00000001
4468 #define MR_RESTART_AN 0x00000002
4469 #define MR_AN_COMPLETE 0x00000004
4470 #define MR_PAGE_RX 0x00000008
4471 #define MR_NP_LOADED 0x00000010
4472 #define MR_TOGGLE_TX 0x00000020
4473 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4474 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4475 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4476 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4477 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4478 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4479 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4480 #define MR_TOGGLE_RX 0x00002000
4481 #define MR_NP_RX 0x00004000
4483 #define MR_LINK_OK 0x80000000
4485 unsigned long link_time, cur_time;
4487 u32 ability_match_cfg;
4488 int ability_match_count;
4490 char ability_match, idle_match, ack_match;
4492 u32 txconfig, rxconfig;
4493 #define ANEG_CFG_NP 0x00000080
4494 #define ANEG_CFG_ACK 0x00000040
4495 #define ANEG_CFG_RF2 0x00000020
4496 #define ANEG_CFG_RF1 0x00000010
4497 #define ANEG_CFG_PS2 0x00000001
4498 #define ANEG_CFG_PS1 0x00008000
4499 #define ANEG_CFG_HD 0x00004000
4500 #define ANEG_CFG_FD 0x00002000
4501 #define ANEG_CFG_INVAL 0x00001f06
4506 #define ANEG_TIMER_ENAB 2
4507 #define ANEG_FAILED -1
4509 #define ANEG_STATE_SETTLE_TIME 10000
4511 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4512 struct tg3_fiber_aneginfo *ap)
4515 unsigned long delta;
4519 if (ap->state == ANEG_STATE_UNKNOWN) {
4523 ap->ability_match_cfg = 0;
4524 ap->ability_match_count = 0;
4525 ap->ability_match = 0;
4531 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4532 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4534 if (rx_cfg_reg != ap->ability_match_cfg) {
4535 ap->ability_match_cfg = rx_cfg_reg;
4536 ap->ability_match = 0;
4537 ap->ability_match_count = 0;
4539 if (++ap->ability_match_count > 1) {
4540 ap->ability_match = 1;
4541 ap->ability_match_cfg = rx_cfg_reg;
4544 if (rx_cfg_reg & ANEG_CFG_ACK)
4552 ap->ability_match_cfg = 0;
4553 ap->ability_match_count = 0;
4554 ap->ability_match = 0;
4560 ap->rxconfig = rx_cfg_reg;
4563 switch (ap->state) {
4564 case ANEG_STATE_UNKNOWN:
4565 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4566 ap->state = ANEG_STATE_AN_ENABLE;
4569 case ANEG_STATE_AN_ENABLE:
4570 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4571 if (ap->flags & MR_AN_ENABLE) {
4574 ap->ability_match_cfg = 0;
4575 ap->ability_match_count = 0;
4576 ap->ability_match = 0;
4580 ap->state = ANEG_STATE_RESTART_INIT;
4582 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4586 case ANEG_STATE_RESTART_INIT:
4587 ap->link_time = ap->cur_time;
4588 ap->flags &= ~(MR_NP_LOADED);
4590 tw32(MAC_TX_AUTO_NEG, 0);
4591 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4592 tw32_f(MAC_MODE, tp->mac_mode);
4595 ret = ANEG_TIMER_ENAB;
4596 ap->state = ANEG_STATE_RESTART;
4599 case ANEG_STATE_RESTART:
4600 delta = ap->cur_time - ap->link_time;
4601 if (delta > ANEG_STATE_SETTLE_TIME)
4602 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4604 ret = ANEG_TIMER_ENAB;
4607 case ANEG_STATE_DISABLE_LINK_OK:
4611 case ANEG_STATE_ABILITY_DETECT_INIT:
4612 ap->flags &= ~(MR_TOGGLE_TX);
4613 ap->txconfig = ANEG_CFG_FD;
4614 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4615 if (flowctrl & ADVERTISE_1000XPAUSE)
4616 ap->txconfig |= ANEG_CFG_PS1;
4617 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4618 ap->txconfig |= ANEG_CFG_PS2;
4619 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4620 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4621 tw32_f(MAC_MODE, tp->mac_mode);
4624 ap->state = ANEG_STATE_ABILITY_DETECT;
4627 case ANEG_STATE_ABILITY_DETECT:
4628 if (ap->ability_match != 0 && ap->rxconfig != 0)
4629 ap->state = ANEG_STATE_ACK_DETECT_INIT;
4632 case ANEG_STATE_ACK_DETECT_INIT:
4633 ap->txconfig |= ANEG_CFG_ACK;
4634 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4635 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4636 tw32_f(MAC_MODE, tp->mac_mode);
4639 ap->state = ANEG_STATE_ACK_DETECT;
4642 case ANEG_STATE_ACK_DETECT:
4643 if (ap->ack_match != 0) {
4644 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4645 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4646 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4648 ap->state = ANEG_STATE_AN_ENABLE;
4650 } else if (ap->ability_match != 0 &&
4651 ap->rxconfig == 0) {
4652 ap->state = ANEG_STATE_AN_ENABLE;
4656 case ANEG_STATE_COMPLETE_ACK_INIT:
4657 if (ap->rxconfig & ANEG_CFG_INVAL) {
4661 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4662 MR_LP_ADV_HALF_DUPLEX |
4663 MR_LP_ADV_SYM_PAUSE |
4664 MR_LP_ADV_ASYM_PAUSE |
4665 MR_LP_ADV_REMOTE_FAULT1 |
4666 MR_LP_ADV_REMOTE_FAULT2 |
4667 MR_LP_ADV_NEXT_PAGE |
4670 if (ap->rxconfig & ANEG_CFG_FD)
4671 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4672 if (ap->rxconfig & ANEG_CFG_HD)
4673 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4674 if (ap->rxconfig & ANEG_CFG_PS1)
4675 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4676 if (ap->rxconfig & ANEG_CFG_PS2)
4677 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4678 if (ap->rxconfig & ANEG_CFG_RF1)
4679 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4680 if (ap->rxconfig & ANEG_CFG_RF2)
4681 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4682 if (ap->rxconfig & ANEG_CFG_NP)
4683 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4685 ap->link_time = ap->cur_time;
4687 ap->flags ^= (MR_TOGGLE_TX);
4688 if (ap->rxconfig & 0x0008)
4689 ap->flags |= MR_TOGGLE_RX;
4690 if (ap->rxconfig & ANEG_CFG_NP)
4691 ap->flags |= MR_NP_RX;
4692 ap->flags |= MR_PAGE_RX;
4694 ap->state = ANEG_STATE_COMPLETE_ACK;
4695 ret = ANEG_TIMER_ENAB;
4698 case ANEG_STATE_COMPLETE_ACK:
4699 if (ap->ability_match != 0 &&
4700 ap->rxconfig == 0) {
4701 ap->state = ANEG_STATE_AN_ENABLE;
4704 delta = ap->cur_time - ap->link_time;
4705 if (delta > ANEG_STATE_SETTLE_TIME) {
4706 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4707 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4709 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4710 !(ap->flags & MR_NP_RX)) {
4711 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4719 case ANEG_STATE_IDLE_DETECT_INIT:
4720 ap->link_time = ap->cur_time;
4721 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4722 tw32_f(MAC_MODE, tp->mac_mode);
4725 ap->state = ANEG_STATE_IDLE_DETECT;
4726 ret = ANEG_TIMER_ENAB;
4729 case ANEG_STATE_IDLE_DETECT:
4730 if (ap->ability_match != 0 &&
4731 ap->rxconfig == 0) {
4732 ap->state = ANEG_STATE_AN_ENABLE;
4735 delta = ap->cur_time - ap->link_time;
4736 if (delta > ANEG_STATE_SETTLE_TIME) {
4737 /* XXX another gem from the Broadcom driver :( */
4738 ap->state = ANEG_STATE_LINK_OK;
4742 case ANEG_STATE_LINK_OK:
4743 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4747 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4748 /* ??? unimplemented */
4751 case ANEG_STATE_NEXT_PAGE_WAIT:
4752 /* ??? unimplemented */
4763 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4766 struct tg3_fiber_aneginfo aninfo;
4767 int status = ANEG_FAILED;
4771 tw32_f(MAC_TX_AUTO_NEG, 0);
4773 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4774 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4777 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4780 memset(&aninfo, 0, sizeof(aninfo));
4781 aninfo.flags |= MR_AN_ENABLE;
4782 aninfo.state = ANEG_STATE_UNKNOWN;
4783 aninfo.cur_time = 0;
4785 while (++tick < 195000) {
4786 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4787 if (status == ANEG_DONE || status == ANEG_FAILED)
4793 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4794 tw32_f(MAC_MODE, tp->mac_mode);
4797 *txflags = aninfo.txconfig;
4798 *rxflags = aninfo.flags;
4800 if (status == ANEG_DONE &&
4801 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4802 MR_LP_ADV_FULL_DUPLEX)))
4808 static void tg3_init_bcm8002(struct tg3 *tp)
4810 u32 mac_status = tr32(MAC_STATUS);
4813 /* Reset when initting first time or we have a link. */
4814 if (tg3_flag(tp, INIT_COMPLETE) &&
4815 !(mac_status & MAC_STATUS_PCS_SYNCED))
4818 /* Set PLL lock range. */
4819 tg3_writephy(tp, 0x16, 0x8007);
4822 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4824 /* Wait for reset to complete. */
4825 /* XXX schedule_timeout() ... */
4826 for (i = 0; i < 500; i++)
4829 /* Config mode; select PMA/Ch 1 regs. */
4830 tg3_writephy(tp, 0x10, 0x8411);
4832 /* Enable auto-lock and comdet, select txclk for tx. */
4833 tg3_writephy(tp, 0x11, 0x0a10);
4835 tg3_writephy(tp, 0x18, 0x00a0);
4836 tg3_writephy(tp, 0x16, 0x41ff);
4838 /* Assert and deassert POR. */
4839 tg3_writephy(tp, 0x13, 0x0400);
4841 tg3_writephy(tp, 0x13, 0x0000);
4843 tg3_writephy(tp, 0x11, 0x0a50);
4845 tg3_writephy(tp, 0x11, 0x0a10);
4847 /* Wait for signal to stabilize */
4848 /* XXX schedule_timeout() ... */
4849 for (i = 0; i < 15000; i++)
4852 /* Deselect the channel register so we can read the PHYID
4855 tg3_writephy(tp, 0x10, 0x8011);
4858 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4861 u32 sg_dig_ctrl, sg_dig_status;
4862 u32 serdes_cfg, expected_sg_dig_ctrl;
4863 int workaround, port_a;
4864 int current_link_up;
4867 expected_sg_dig_ctrl = 0;
4870 current_link_up = 0;
4872 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4873 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4875 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4878 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4879 /* preserve bits 20-23 for voltage regulator */
4880 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4883 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4885 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4886 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4888 u32 val = serdes_cfg;
4894 tw32_f(MAC_SERDES_CFG, val);
4897 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4899 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4900 tg3_setup_flow_control(tp, 0, 0);
4901 current_link_up = 1;
4906 /* Want auto-negotiation. */
4907 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4909 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4910 if (flowctrl & ADVERTISE_1000XPAUSE)
4911 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4912 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4913 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4915 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4916 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4917 tp->serdes_counter &&
4918 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4919 MAC_STATUS_RCVD_CFG)) ==
4920 MAC_STATUS_PCS_SYNCED)) {
4921 tp->serdes_counter--;
4922 current_link_up = 1;
4927 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4928 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4930 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4932 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4933 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4934 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4935 MAC_STATUS_SIGNAL_DET)) {
4936 sg_dig_status = tr32(SG_DIG_STATUS);
4937 mac_status = tr32(MAC_STATUS);
4939 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4940 (mac_status & MAC_STATUS_PCS_SYNCED)) {
4941 u32 local_adv = 0, remote_adv = 0;
4943 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4944 local_adv |= ADVERTISE_1000XPAUSE;
4945 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4946 local_adv |= ADVERTISE_1000XPSE_ASYM;
4948 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4949 remote_adv |= LPA_1000XPAUSE;
4950 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4951 remote_adv |= LPA_1000XPAUSE_ASYM;
4953 tp->link_config.rmt_adv =
4954 mii_adv_to_ethtool_adv_x(remote_adv);
4956 tg3_setup_flow_control(tp, local_adv, remote_adv);
4957 current_link_up = 1;
4958 tp->serdes_counter = 0;
4959 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4960 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4961 if (tp->serdes_counter)
4962 tp->serdes_counter--;
4965 u32 val = serdes_cfg;
4972 tw32_f(MAC_SERDES_CFG, val);
4975 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4978 /* Link parallel detection - link is up */
4979 /* only if we have PCS_SYNC and not */
4980 /* receiving config code words */
4981 mac_status = tr32(MAC_STATUS);
4982 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4983 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4984 tg3_setup_flow_control(tp, 0, 0);
4985 current_link_up = 1;
4987 TG3_PHYFLG_PARALLEL_DETECT;
4988 tp->serdes_counter =
4989 SERDES_PARALLEL_DET_TIMEOUT;
4991 goto restart_autoneg;
4995 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4996 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5000 return current_link_up;
5003 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5005 int current_link_up = 0;
5007 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5010 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5011 u32 txflags, rxflags;
5014 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5015 u32 local_adv = 0, remote_adv = 0;
5017 if (txflags & ANEG_CFG_PS1)
5018 local_adv |= ADVERTISE_1000XPAUSE;
5019 if (txflags & ANEG_CFG_PS2)
5020 local_adv |= ADVERTISE_1000XPSE_ASYM;
5022 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5023 remote_adv |= LPA_1000XPAUSE;
5024 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5025 remote_adv |= LPA_1000XPAUSE_ASYM;
5027 tp->link_config.rmt_adv =
5028 mii_adv_to_ethtool_adv_x(remote_adv);
5030 tg3_setup_flow_control(tp, local_adv, remote_adv);
5032 current_link_up = 1;
5034 for (i = 0; i < 30; i++) {
5037 (MAC_STATUS_SYNC_CHANGED |
5038 MAC_STATUS_CFG_CHANGED));
5040 if ((tr32(MAC_STATUS) &
5041 (MAC_STATUS_SYNC_CHANGED |
5042 MAC_STATUS_CFG_CHANGED)) == 0)
5046 mac_status = tr32(MAC_STATUS);
5047 if (current_link_up == 0 &&
5048 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5049 !(mac_status & MAC_STATUS_RCVD_CFG))
5050 current_link_up = 1;
5052 tg3_setup_flow_control(tp, 0, 0);
5054 /* Forcing 1000FD link up. */
5055 current_link_up = 1;
5057 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5060 tw32_f(MAC_MODE, tp->mac_mode);
5065 return current_link_up;
5068 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5071 u16 orig_active_speed;
5072 u8 orig_active_duplex;
5074 int current_link_up;
5077 orig_pause_cfg = tp->link_config.active_flowctrl;
5078 orig_active_speed = tp->link_config.active_speed;
5079 orig_active_duplex = tp->link_config.active_duplex;
5081 if (!tg3_flag(tp, HW_AUTONEG) &&
5082 netif_carrier_ok(tp->dev) &&
5083 tg3_flag(tp, INIT_COMPLETE)) {
5084 mac_status = tr32(MAC_STATUS);
5085 mac_status &= (MAC_STATUS_PCS_SYNCED |
5086 MAC_STATUS_SIGNAL_DET |
5087 MAC_STATUS_CFG_CHANGED |
5088 MAC_STATUS_RCVD_CFG);
5089 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5090 MAC_STATUS_SIGNAL_DET)) {
5091 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5092 MAC_STATUS_CFG_CHANGED));
5097 tw32_f(MAC_TX_AUTO_NEG, 0);
5099 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5100 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5101 tw32_f(MAC_MODE, tp->mac_mode);
5104 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5105 tg3_init_bcm8002(tp);
5107 /* Enable link change event even when serdes polling. */
5108 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5111 current_link_up = 0;
5112 tp->link_config.rmt_adv = 0;
5113 mac_status = tr32(MAC_STATUS);
5115 if (tg3_flag(tp, HW_AUTONEG))
5116 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5118 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5120 tp->napi[0].hw_status->status =
5121 (SD_STATUS_UPDATED |
5122 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5124 for (i = 0; i < 100; i++) {
5125 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5126 MAC_STATUS_CFG_CHANGED));
5128 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5129 MAC_STATUS_CFG_CHANGED |
5130 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5134 mac_status = tr32(MAC_STATUS);
5135 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5136 current_link_up = 0;
5137 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5138 tp->serdes_counter == 0) {
5139 tw32_f(MAC_MODE, (tp->mac_mode |
5140 MAC_MODE_SEND_CONFIGS));
5142 tw32_f(MAC_MODE, tp->mac_mode);
5146 if (current_link_up == 1) {
5147 tp->link_config.active_speed = SPEED_1000;
5148 tp->link_config.active_duplex = DUPLEX_FULL;
5149 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5150 LED_CTRL_LNKLED_OVERRIDE |
5151 LED_CTRL_1000MBPS_ON));
5153 tp->link_config.active_speed = SPEED_UNKNOWN;
5154 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5155 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5156 LED_CTRL_LNKLED_OVERRIDE |
5157 LED_CTRL_TRAFFIC_OVERRIDE));
5160 if (current_link_up != netif_carrier_ok(tp->dev)) {
5161 if (current_link_up)
5162 netif_carrier_on(tp->dev);
5164 netif_carrier_off(tp->dev);
5165 tg3_link_report(tp);
5167 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5168 if (orig_pause_cfg != now_pause_cfg ||
5169 orig_active_speed != tp->link_config.active_speed ||
5170 orig_active_duplex != tp->link_config.active_duplex)
5171 tg3_link_report(tp);
5177 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5179 int current_link_up, err = 0;
5183 u32 local_adv, remote_adv;
5185 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5186 tw32_f(MAC_MODE, tp->mac_mode);
5192 (MAC_STATUS_SYNC_CHANGED |
5193 MAC_STATUS_CFG_CHANGED |
5194 MAC_STATUS_MI_COMPLETION |
5195 MAC_STATUS_LNKSTATE_CHANGED));
5201 current_link_up = 0;
5202 current_speed = SPEED_UNKNOWN;
5203 current_duplex = DUPLEX_UNKNOWN;
5204 tp->link_config.rmt_adv = 0;
5206 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5207 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5208 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5209 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5210 bmsr |= BMSR_LSTATUS;
5212 bmsr &= ~BMSR_LSTATUS;
5215 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5217 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5218 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5219 /* do nothing, just check for link up at the end */
5220 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5223 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5224 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5225 ADVERTISE_1000XPAUSE |
5226 ADVERTISE_1000XPSE_ASYM |
5229 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5230 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5232 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5233 tg3_writephy(tp, MII_ADVERTISE, newadv);
5234 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5235 tg3_writephy(tp, MII_BMCR, bmcr);
5237 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5238 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5239 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5246 bmcr &= ~BMCR_SPEED1000;
5247 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5249 if (tp->link_config.duplex == DUPLEX_FULL)
5250 new_bmcr |= BMCR_FULLDPLX;
5252 if (new_bmcr != bmcr) {
5253 /* BMCR_SPEED1000 is a reserved bit that needs
5254 * to be set on write.
5256 new_bmcr |= BMCR_SPEED1000;
5258 /* Force a linkdown */
5259 if (netif_carrier_ok(tp->dev)) {
5262 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5263 adv &= ~(ADVERTISE_1000XFULL |
5264 ADVERTISE_1000XHALF |
5266 tg3_writephy(tp, MII_ADVERTISE, adv);
5267 tg3_writephy(tp, MII_BMCR, bmcr |
5271 netif_carrier_off(tp->dev);
5273 tg3_writephy(tp, MII_BMCR, new_bmcr);
5275 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5276 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5277 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5279 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5280 bmsr |= BMSR_LSTATUS;
5282 bmsr &= ~BMSR_LSTATUS;
5284 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5288 if (bmsr & BMSR_LSTATUS) {
5289 current_speed = SPEED_1000;
5290 current_link_up = 1;
5291 if (bmcr & BMCR_FULLDPLX)
5292 current_duplex = DUPLEX_FULL;
5294 current_duplex = DUPLEX_HALF;
5299 if (bmcr & BMCR_ANENABLE) {
5302 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5303 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5304 common = local_adv & remote_adv;
5305 if (common & (ADVERTISE_1000XHALF |
5306 ADVERTISE_1000XFULL)) {
5307 if (common & ADVERTISE_1000XFULL)
5308 current_duplex = DUPLEX_FULL;
5310 current_duplex = DUPLEX_HALF;
5312 tp->link_config.rmt_adv =
5313 mii_adv_to_ethtool_adv_x(remote_adv);
5314 } else if (!tg3_flag(tp, 5780_CLASS)) {
5315 /* Link is up via parallel detect */
5317 current_link_up = 0;
5322 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5323 tg3_setup_flow_control(tp, local_adv, remote_adv);
5325 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5326 if (tp->link_config.active_duplex == DUPLEX_HALF)
5327 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5329 tw32_f(MAC_MODE, tp->mac_mode);
5332 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5334 tp->link_config.active_speed = current_speed;
5335 tp->link_config.active_duplex = current_duplex;
5337 if (current_link_up != netif_carrier_ok(tp->dev)) {
5338 if (current_link_up)
5339 netif_carrier_on(tp->dev);
5341 netif_carrier_off(tp->dev);
5342 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5344 tg3_link_report(tp);
5349 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5351 if (tp->serdes_counter) {
5352 /* Give autoneg time to complete. */
5353 tp->serdes_counter--;
5357 if (!netif_carrier_ok(tp->dev) &&
5358 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5361 tg3_readphy(tp, MII_BMCR, &bmcr);
5362 if (bmcr & BMCR_ANENABLE) {
5365 /* Select shadow register 0x1f */
5366 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5367 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5369 /* Select expansion interrupt status register */
5370 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5371 MII_TG3_DSP_EXP1_INT_STAT);
5372 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5373 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5375 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5376 /* We have signal detect and not receiving
5377 * config code words, link is up by parallel
5381 bmcr &= ~BMCR_ANENABLE;
5382 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5383 tg3_writephy(tp, MII_BMCR, bmcr);
5384 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5387 } else if (netif_carrier_ok(tp->dev) &&
5388 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5389 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5392 /* Select expansion interrupt status register */
5393 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5394 MII_TG3_DSP_EXP1_INT_STAT);
5395 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5399 /* Config code words received, turn on autoneg. */
5400 tg3_readphy(tp, MII_BMCR, &bmcr);
5401 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5403 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5409 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5414 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5415 err = tg3_setup_fiber_phy(tp, force_reset);
5416 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5417 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5419 err = tg3_setup_copper_phy(tp, force_reset);
5421 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5424 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5425 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5427 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5432 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5433 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5434 tw32(GRC_MISC_CFG, val);
5437 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5438 (6 << TX_LENGTHS_IPG_SHIFT);
5439 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5440 val |= tr32(MAC_TX_LENGTHS) &
5441 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5442 TX_LENGTHS_CNT_DWN_VAL_MSK);
5444 if (tp->link_config.active_speed == SPEED_1000 &&
5445 tp->link_config.active_duplex == DUPLEX_HALF)
5446 tw32(MAC_TX_LENGTHS, val |
5447 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5449 tw32(MAC_TX_LENGTHS, val |
5450 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5452 if (!tg3_flag(tp, 5705_PLUS)) {
5453 if (netif_carrier_ok(tp->dev)) {
5454 tw32(HOSTCC_STAT_COAL_TICKS,
5455 tp->coal.stats_block_coalesce_usecs);
5457 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5461 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5462 val = tr32(PCIE_PWR_MGMT_THRESH);
5463 if (!netif_carrier_ok(tp->dev))
5464 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5467 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5468 tw32(PCIE_PWR_MGMT_THRESH, val);
5474 static inline int tg3_irq_sync(struct tg3 *tp)
5476 return tp->irq_sync;
5479 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5483 dst = (u32 *)((u8 *)dst + off);
5484 for (i = 0; i < len; i += sizeof(u32))
5485 *dst++ = tr32(off + i);
5488 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5490 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5491 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5492 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5493 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5494 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5495 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5496 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5497 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5498 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5499 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5500 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5501 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5502 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5503 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5504 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5505 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5506 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5507 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5508 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5510 if (tg3_flag(tp, SUPPORT_MSIX))
5511 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5513 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5514 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5515 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5516 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5517 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5518 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5519 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5520 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5522 if (!tg3_flag(tp, 5705_PLUS)) {
5523 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5524 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5525 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5528 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5529 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5530 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5531 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5532 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5534 if (tg3_flag(tp, NVRAM))
5535 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5538 static void tg3_dump_state(struct tg3 *tp)
5543 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5545 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5549 if (tg3_flag(tp, PCI_EXPRESS)) {
5550 /* Read up to but not including private PCI registers */
5551 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5552 regs[i / sizeof(u32)] = tr32(i);
5554 tg3_dump_legacy_regs(tp, regs);
5556 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5557 if (!regs[i + 0] && !regs[i + 1] &&
5558 !regs[i + 2] && !regs[i + 3])
5561 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5563 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5568 for (i = 0; i < tp->irq_cnt; i++) {
5569 struct tg3_napi *tnapi = &tp->napi[i];
5571 /* SW status block */
5573 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5575 tnapi->hw_status->status,
5576 tnapi->hw_status->status_tag,
5577 tnapi->hw_status->rx_jumbo_consumer,
5578 tnapi->hw_status->rx_consumer,
5579 tnapi->hw_status->rx_mini_consumer,
5580 tnapi->hw_status->idx[0].rx_producer,
5581 tnapi->hw_status->idx[0].tx_consumer);
5584 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5586 tnapi->last_tag, tnapi->last_irq_tag,
5587 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5589 tnapi->prodring.rx_std_prod_idx,
5590 tnapi->prodring.rx_std_cons_idx,
5591 tnapi->prodring.rx_jmb_prod_idx,
5592 tnapi->prodring.rx_jmb_cons_idx);
5596 /* This is called whenever we suspect that the system chipset is re-
5597 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5598 * is bogus tx completions. We try to recover by setting the
5599 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5602 static void tg3_tx_recover(struct tg3 *tp)
5604 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5605 tp->write32_tx_mbox == tg3_write_indirect_mbox);
5607 netdev_warn(tp->dev,
5608 "The system may be re-ordering memory-mapped I/O "
5609 "cycles to the network device, attempting to recover. "
5610 "Please report the problem to the driver maintainer "
5611 "and include system chipset information.\n");
5613 spin_lock(&tp->lock);
5614 tg3_flag_set(tp, TX_RECOVERY_PENDING);
5615 spin_unlock(&tp->lock);
5618 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5620 /* Tell compiler to fetch tx indices from memory. */
5622 return tnapi->tx_pending -
5623 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5626 /* Tigon3 never reports partial packet sends. So we do not
5627 * need special logic to handle SKBs that have not had all
5628 * of their frags sent yet, like SunGEM does.
5630 static void tg3_tx(struct tg3_napi *tnapi)
5632 struct tg3 *tp = tnapi->tp;
5633 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5634 u32 sw_idx = tnapi->tx_cons;
5635 struct netdev_queue *txq;
5636 int index = tnapi - tp->napi;
5637 unsigned int pkts_compl = 0, bytes_compl = 0;
5639 if (tg3_flag(tp, ENABLE_TSS))
5642 txq = netdev_get_tx_queue(tp->dev, index);
5644 while (sw_idx != hw_idx) {
5645 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5646 struct sk_buff *skb = ri->skb;
5649 if (unlikely(skb == NULL)) {
5654 pci_unmap_single(tp->pdev,
5655 dma_unmap_addr(ri, mapping),
5661 while (ri->fragmented) {
5662 ri->fragmented = false;
5663 sw_idx = NEXT_TX(sw_idx);
5664 ri = &tnapi->tx_buffers[sw_idx];
5667 sw_idx = NEXT_TX(sw_idx);
5669 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5670 ri = &tnapi->tx_buffers[sw_idx];
5671 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5674 pci_unmap_page(tp->pdev,
5675 dma_unmap_addr(ri, mapping),
5676 skb_frag_size(&skb_shinfo(skb)->frags[i]),
5679 while (ri->fragmented) {
5680 ri->fragmented = false;
5681 sw_idx = NEXT_TX(sw_idx);
5682 ri = &tnapi->tx_buffers[sw_idx];
5685 sw_idx = NEXT_TX(sw_idx);
5689 bytes_compl += skb->len;
5693 if (unlikely(tx_bug)) {
5699 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5701 tnapi->tx_cons = sw_idx;
5703 /* Need to make the tx_cons update visible to tg3_start_xmit()
5704 * before checking for netif_queue_stopped(). Without the
5705 * memory barrier, there is a small possibility that tg3_start_xmit()
5706 * will miss it and cause the queue to be stopped forever.
5710 if (unlikely(netif_tx_queue_stopped(txq) &&
5711 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5712 __netif_tx_lock(txq, smp_processor_id());
5713 if (netif_tx_queue_stopped(txq) &&
5714 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5715 netif_tx_wake_queue(txq);
5716 __netif_tx_unlock(txq);
5720 static void tg3_frag_free(bool is_frag, void *data)
5723 put_page(virt_to_head_page(data));
5728 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5730 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5731 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5736 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5737 map_sz, PCI_DMA_FROMDEVICE);
5738 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
5743 /* Returns size of skb allocated or < 0 on error.
5745 * We only need to fill in the address because the other members
5746 * of the RX descriptor are invariant, see tg3_init_rings.
5748 * Note the purposeful assymetry of cpu vs. chip accesses. For
5749 * posting buffers we only dirty the first cache line of the RX
5750 * descriptor (containing the address). Whereas for the RX status
5751 * buffers the cpu only reads the last cacheline of the RX descriptor
5752 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5754 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5755 u32 opaque_key, u32 dest_idx_unmasked,
5756 unsigned int *frag_size)
5758 struct tg3_rx_buffer_desc *desc;
5759 struct ring_info *map;
5762 int skb_size, data_size, dest_idx;
5764 switch (opaque_key) {
5765 case RXD_OPAQUE_RING_STD:
5766 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5767 desc = &tpr->rx_std[dest_idx];
5768 map = &tpr->rx_std_buffers[dest_idx];
5769 data_size = tp->rx_pkt_map_sz;
5772 case RXD_OPAQUE_RING_JUMBO:
5773 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5774 desc = &tpr->rx_jmb[dest_idx].std;
5775 map = &tpr->rx_jmb_buffers[dest_idx];
5776 data_size = TG3_RX_JMB_MAP_SZ;
5783 /* Do not overwrite any of the map or rp information
5784 * until we are sure we can commit to a new buffer.
5786 * Callers depend upon this behavior and assume that
5787 * we leave everything unchanged if we fail.
5789 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5790 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5791 if (skb_size <= PAGE_SIZE) {
5792 data = netdev_alloc_frag(skb_size);
5793 *frag_size = skb_size;
5795 data = kmalloc(skb_size, GFP_ATOMIC);
5801 mapping = pci_map_single(tp->pdev,
5802 data + TG3_RX_OFFSET(tp),
5804 PCI_DMA_FROMDEVICE);
5805 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
5806 tg3_frag_free(skb_size <= PAGE_SIZE, data);
5811 dma_unmap_addr_set(map, mapping, mapping);
5813 desc->addr_hi = ((u64)mapping >> 32);
5814 desc->addr_lo = ((u64)mapping & 0xffffffff);
5819 /* We only need to move over in the address because the other
5820 * members of the RX descriptor are invariant. See notes above
5821 * tg3_alloc_rx_data for full details.
5823 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5824 struct tg3_rx_prodring_set *dpr,
5825 u32 opaque_key, int src_idx,
5826 u32 dest_idx_unmasked)
5828 struct tg3 *tp = tnapi->tp;
5829 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5830 struct ring_info *src_map, *dest_map;
5831 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5834 switch (opaque_key) {
5835 case RXD_OPAQUE_RING_STD:
5836 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5837 dest_desc = &dpr->rx_std[dest_idx];
5838 dest_map = &dpr->rx_std_buffers[dest_idx];
5839 src_desc = &spr->rx_std[src_idx];
5840 src_map = &spr->rx_std_buffers[src_idx];
5843 case RXD_OPAQUE_RING_JUMBO:
5844 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5845 dest_desc = &dpr->rx_jmb[dest_idx].std;
5846 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5847 src_desc = &spr->rx_jmb[src_idx].std;
5848 src_map = &spr->rx_jmb_buffers[src_idx];
5855 dest_map->data = src_map->data;
5856 dma_unmap_addr_set(dest_map, mapping,
5857 dma_unmap_addr(src_map, mapping));
5858 dest_desc->addr_hi = src_desc->addr_hi;
5859 dest_desc->addr_lo = src_desc->addr_lo;
5861 /* Ensure that the update to the skb happens after the physical
5862 * addresses have been transferred to the new BD location.
5866 src_map->data = NULL;
5869 /* The RX ring scheme is composed of multiple rings which post fresh
5870 * buffers to the chip, and one special ring the chip uses to report
5871 * status back to the host.
5873 * The special ring reports the status of received packets to the
5874 * host. The chip does not write into the original descriptor the
5875 * RX buffer was obtained from. The chip simply takes the original
5876 * descriptor as provided by the host, updates the status and length
5877 * field, then writes this into the next status ring entry.
5879 * Each ring the host uses to post buffers to the chip is described
5880 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5881 * it is first placed into the on-chip ram. When the packet's length
5882 * is known, it walks down the TG3_BDINFO entries to select the ring.
5883 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5884 * which is within the range of the new packet's length is chosen.
5886 * The "separate ring for rx status" scheme may sound queer, but it makes
5887 * sense from a cache coherency perspective. If only the host writes
5888 * to the buffer post rings, and only the chip writes to the rx status
5889 * rings, then cache lines never move beyond shared-modified state.
5890 * If both the host and chip were to write into the same ring, cache line
5891 * eviction could occur since both entities want it in an exclusive state.
5893 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5895 struct tg3 *tp = tnapi->tp;
5896 u32 work_mask, rx_std_posted = 0;
5897 u32 std_prod_idx, jmb_prod_idx;
5898 u32 sw_idx = tnapi->rx_rcb_ptr;
5901 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5903 hw_idx = *(tnapi->rx_rcb_prod_idx);
5905 * We need to order the read of hw_idx and the read of
5906 * the opaque cookie.
5911 std_prod_idx = tpr->rx_std_prod_idx;
5912 jmb_prod_idx = tpr->rx_jmb_prod_idx;
5913 while (sw_idx != hw_idx && budget > 0) {
5914 struct ring_info *ri;
5915 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5917 struct sk_buff *skb;
5918 dma_addr_t dma_addr;
5919 u32 opaque_key, desc_idx, *post_ptr;
5922 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5923 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5924 if (opaque_key == RXD_OPAQUE_RING_STD) {
5925 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5926 dma_addr = dma_unmap_addr(ri, mapping);
5928 post_ptr = &std_prod_idx;
5930 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5931 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5932 dma_addr = dma_unmap_addr(ri, mapping);
5934 post_ptr = &jmb_prod_idx;
5936 goto next_pkt_nopost;
5938 work_mask |= opaque_key;
5940 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5941 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5943 tg3_recycle_rx(tnapi, tpr, opaque_key,
5944 desc_idx, *post_ptr);
5946 /* Other statistics kept track of by card. */
5951 prefetch(data + TG3_RX_OFFSET(tp));
5952 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5955 if (len > TG3_RX_COPY_THRESH(tp)) {
5957 unsigned int frag_size;
5959 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5960 *post_ptr, &frag_size);
5964 pci_unmap_single(tp->pdev, dma_addr, skb_size,
5965 PCI_DMA_FROMDEVICE);
5967 skb = build_skb(data, frag_size);
5969 tg3_frag_free(frag_size != 0, data);
5970 goto drop_it_no_recycle;
5972 skb_reserve(skb, TG3_RX_OFFSET(tp));
5973 /* Ensure that the update to the data happens
5974 * after the usage of the old DMA mapping.
5981 tg3_recycle_rx(tnapi, tpr, opaque_key,
5982 desc_idx, *post_ptr);
5984 skb = netdev_alloc_skb(tp->dev,
5985 len + TG3_RAW_IP_ALIGN);
5987 goto drop_it_no_recycle;
5989 skb_reserve(skb, TG3_RAW_IP_ALIGN);
5990 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5992 data + TG3_RX_OFFSET(tp),
5994 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5998 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5999 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6000 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6001 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6002 skb->ip_summed = CHECKSUM_UNNECESSARY;
6004 skb_checksum_none_assert(skb);
6006 skb->protocol = eth_type_trans(skb, tp->dev);
6008 if (len > (tp->dev->mtu + ETH_HLEN) &&
6009 skb->protocol != htons(ETH_P_8021Q)) {
6011 goto drop_it_no_recycle;
6014 if (desc->type_flags & RXD_FLAG_VLAN &&
6015 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6016 __vlan_hwaccel_put_tag(skb,
6017 desc->err_vlan & RXD_VLAN_MASK);
6019 napi_gro_receive(&tnapi->napi, skb);
6027 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6028 tpr->rx_std_prod_idx = std_prod_idx &
6029 tp->rx_std_ring_mask;
6030 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6031 tpr->rx_std_prod_idx);
6032 work_mask &= ~RXD_OPAQUE_RING_STD;
6037 sw_idx &= tp->rx_ret_ring_mask;
6039 /* Refresh hw_idx to see if there is new work */
6040 if (sw_idx == hw_idx) {
6041 hw_idx = *(tnapi->rx_rcb_prod_idx);
6046 /* ACK the status ring. */
6047 tnapi->rx_rcb_ptr = sw_idx;
6048 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6050 /* Refill RX ring(s). */
6051 if (!tg3_flag(tp, ENABLE_RSS)) {
6052 /* Sync BD data before updating mailbox */
6055 if (work_mask & RXD_OPAQUE_RING_STD) {
6056 tpr->rx_std_prod_idx = std_prod_idx &
6057 tp->rx_std_ring_mask;
6058 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6059 tpr->rx_std_prod_idx);
6061 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6062 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6063 tp->rx_jmb_ring_mask;
6064 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6065 tpr->rx_jmb_prod_idx);
6068 } else if (work_mask) {
6069 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6070 * updated before the producer indices can be updated.
6074 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6075 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6077 if (tnapi != &tp->napi[1]) {
6078 tp->rx_refill = true;
6079 napi_schedule(&tp->napi[1].napi);
6086 static void tg3_poll_link(struct tg3 *tp)
6088 /* handle link change and other phy events */
6089 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6090 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6092 if (sblk->status & SD_STATUS_LINK_CHG) {
6093 sblk->status = SD_STATUS_UPDATED |
6094 (sblk->status & ~SD_STATUS_LINK_CHG);
6095 spin_lock(&tp->lock);
6096 if (tg3_flag(tp, USE_PHYLIB)) {
6098 (MAC_STATUS_SYNC_CHANGED |
6099 MAC_STATUS_CFG_CHANGED |
6100 MAC_STATUS_MI_COMPLETION |
6101 MAC_STATUS_LNKSTATE_CHANGED));
6104 tg3_setup_phy(tp, 0);
6105 spin_unlock(&tp->lock);
6110 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6111 struct tg3_rx_prodring_set *dpr,
6112 struct tg3_rx_prodring_set *spr)
6114 u32 si, di, cpycnt, src_prod_idx;
6118 src_prod_idx = spr->rx_std_prod_idx;
6120 /* Make sure updates to the rx_std_buffers[] entries and the
6121 * standard producer index are seen in the correct order.
6125 if (spr->rx_std_cons_idx == src_prod_idx)
6128 if (spr->rx_std_cons_idx < src_prod_idx)
6129 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6131 cpycnt = tp->rx_std_ring_mask + 1 -
6132 spr->rx_std_cons_idx;
6134 cpycnt = min(cpycnt,
6135 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6137 si = spr->rx_std_cons_idx;
6138 di = dpr->rx_std_prod_idx;
6140 for (i = di; i < di + cpycnt; i++) {
6141 if (dpr->rx_std_buffers[i].data) {
6151 /* Ensure that updates to the rx_std_buffers ring and the
6152 * shadowed hardware producer ring from tg3_recycle_skb() are
6153 * ordered correctly WRT the skb check above.
6157 memcpy(&dpr->rx_std_buffers[di],
6158 &spr->rx_std_buffers[si],
6159 cpycnt * sizeof(struct ring_info));
6161 for (i = 0; i < cpycnt; i++, di++, si++) {
6162 struct tg3_rx_buffer_desc *sbd, *dbd;
6163 sbd = &spr->rx_std[si];
6164 dbd = &dpr->rx_std[di];
6165 dbd->addr_hi = sbd->addr_hi;
6166 dbd->addr_lo = sbd->addr_lo;
6169 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6170 tp->rx_std_ring_mask;
6171 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6172 tp->rx_std_ring_mask;
6176 src_prod_idx = spr->rx_jmb_prod_idx;
6178 /* Make sure updates to the rx_jmb_buffers[] entries and
6179 * the jumbo producer index are seen in the correct order.
6183 if (spr->rx_jmb_cons_idx == src_prod_idx)
6186 if (spr->rx_jmb_cons_idx < src_prod_idx)
6187 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6189 cpycnt = tp->rx_jmb_ring_mask + 1 -
6190 spr->rx_jmb_cons_idx;
6192 cpycnt = min(cpycnt,
6193 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6195 si = spr->rx_jmb_cons_idx;
6196 di = dpr->rx_jmb_prod_idx;
6198 for (i = di; i < di + cpycnt; i++) {
6199 if (dpr->rx_jmb_buffers[i].data) {
6209 /* Ensure that updates to the rx_jmb_buffers ring and the
6210 * shadowed hardware producer ring from tg3_recycle_skb() are
6211 * ordered correctly WRT the skb check above.
6215 memcpy(&dpr->rx_jmb_buffers[di],
6216 &spr->rx_jmb_buffers[si],
6217 cpycnt * sizeof(struct ring_info));
6219 for (i = 0; i < cpycnt; i++, di++, si++) {
6220 struct tg3_rx_buffer_desc *sbd, *dbd;
6221 sbd = &spr->rx_jmb[si].std;
6222 dbd = &dpr->rx_jmb[di].std;
6223 dbd->addr_hi = sbd->addr_hi;
6224 dbd->addr_lo = sbd->addr_lo;
6227 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6228 tp->rx_jmb_ring_mask;
6229 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6230 tp->rx_jmb_ring_mask;
6236 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6238 struct tg3 *tp = tnapi->tp;
6240 /* run TX completion thread */
6241 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6243 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6247 if (!tnapi->rx_rcb_prod_idx)
6250 /* run RX thread, within the bounds set by NAPI.
6251 * All RX "locking" is done by ensuring outside
6252 * code synchronizes with tg3->napi.poll()
6254 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6255 work_done += tg3_rx(tnapi, budget - work_done);
6257 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6258 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6260 u32 std_prod_idx = dpr->rx_std_prod_idx;
6261 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6263 tp->rx_refill = false;
6264 for (i = 1; i <= tp->rxq_cnt; i++)
6265 err |= tg3_rx_prodring_xfer(tp, dpr,
6266 &tp->napi[i].prodring);
6270 if (std_prod_idx != dpr->rx_std_prod_idx)
6271 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6272 dpr->rx_std_prod_idx);
6274 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6275 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6276 dpr->rx_jmb_prod_idx);
6281 tw32_f(HOSTCC_MODE, tp->coal_now);
6287 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6289 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6290 schedule_work(&tp->reset_task);
6293 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6295 cancel_work_sync(&tp->reset_task);
6296 tg3_flag_clear(tp, RESET_TASK_PENDING);
6297 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6300 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6302 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6303 struct tg3 *tp = tnapi->tp;
6305 struct tg3_hw_status *sblk = tnapi->hw_status;
6308 work_done = tg3_poll_work(tnapi, work_done, budget);
6310 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6313 if (unlikely(work_done >= budget))
6316 /* tp->last_tag is used in tg3_int_reenable() below
6317 * to tell the hw how much work has been processed,
6318 * so we must read it before checking for more work.
6320 tnapi->last_tag = sblk->status_tag;
6321 tnapi->last_irq_tag = tnapi->last_tag;
6324 /* check for RX/TX work to do */
6325 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6326 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6328 /* This test here is not race free, but will reduce
6329 * the number of interrupts by looping again.
6331 if (tnapi == &tp->napi[1] && tp->rx_refill)
6334 napi_complete(napi);
6335 /* Reenable interrupts. */
6336 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6338 /* This test here is synchronized by napi_schedule()
6339 * and napi_complete() to close the race condition.
6341 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6342 tw32(HOSTCC_MODE, tp->coalesce_mode |
6343 HOSTCC_MODE_ENABLE |
6354 /* work_done is guaranteed to be less than budget. */
6355 napi_complete(napi);
6356 tg3_reset_task_schedule(tp);
6360 static void tg3_process_error(struct tg3 *tp)
6363 bool real_error = false;
6365 if (tg3_flag(tp, ERROR_PROCESSED))
6368 /* Check Flow Attention register */
6369 val = tr32(HOSTCC_FLOW_ATTN);
6370 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6371 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6375 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6376 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6380 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6381 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6390 tg3_flag_set(tp, ERROR_PROCESSED);
6391 tg3_reset_task_schedule(tp);
6394 static int tg3_poll(struct napi_struct *napi, int budget)
6396 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6397 struct tg3 *tp = tnapi->tp;
6399 struct tg3_hw_status *sblk = tnapi->hw_status;
6402 if (sblk->status & SD_STATUS_ERROR)
6403 tg3_process_error(tp);
6407 work_done = tg3_poll_work(tnapi, work_done, budget);
6409 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6412 if (unlikely(work_done >= budget))
6415 if (tg3_flag(tp, TAGGED_STATUS)) {
6416 /* tp->last_tag is used in tg3_int_reenable() below
6417 * to tell the hw how much work has been processed,
6418 * so we must read it before checking for more work.
6420 tnapi->last_tag = sblk->status_tag;
6421 tnapi->last_irq_tag = tnapi->last_tag;
6424 sblk->status &= ~SD_STATUS_UPDATED;
6426 if (likely(!tg3_has_work(tnapi))) {
6427 napi_complete(napi);
6428 tg3_int_reenable(tnapi);
6436 /* work_done is guaranteed to be less than budget. */
6437 napi_complete(napi);
6438 tg3_reset_task_schedule(tp);
6442 static void tg3_napi_disable(struct tg3 *tp)
6446 for (i = tp->irq_cnt - 1; i >= 0; i--)
6447 napi_disable(&tp->napi[i].napi);
6450 static void tg3_napi_enable(struct tg3 *tp)
6454 for (i = 0; i < tp->irq_cnt; i++)
6455 napi_enable(&tp->napi[i].napi);
6458 static void tg3_napi_init(struct tg3 *tp)
6462 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6463 for (i = 1; i < tp->irq_cnt; i++)
6464 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6467 static void tg3_napi_fini(struct tg3 *tp)
6471 for (i = 0; i < tp->irq_cnt; i++)
6472 netif_napi_del(&tp->napi[i].napi);
6475 static inline void tg3_netif_stop(struct tg3 *tp)
6477 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6478 tg3_napi_disable(tp);
6479 netif_tx_disable(tp->dev);
6482 static inline void tg3_netif_start(struct tg3 *tp)
6484 /* NOTE: unconditional netif_tx_wake_all_queues is only
6485 * appropriate so long as all callers are assured to
6486 * have free tx slots (such as after tg3_init_hw)
6488 netif_tx_wake_all_queues(tp->dev);
6490 tg3_napi_enable(tp);
6491 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6492 tg3_enable_ints(tp);
6495 static void tg3_irq_quiesce(struct tg3 *tp)
6499 BUG_ON(tp->irq_sync);
6504 for (i = 0; i < tp->irq_cnt; i++)
6505 synchronize_irq(tp->napi[i].irq_vec);
6508 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6509 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6510 * with as well. Most of the time, this is not necessary except when
6511 * shutting down the device.
6513 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6515 spin_lock_bh(&tp->lock);
6517 tg3_irq_quiesce(tp);
6520 static inline void tg3_full_unlock(struct tg3 *tp)
6522 spin_unlock_bh(&tp->lock);
6525 /* One-shot MSI handler - Chip automatically disables interrupt
6526 * after sending MSI so driver doesn't have to do it.
6528 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6530 struct tg3_napi *tnapi = dev_id;
6531 struct tg3 *tp = tnapi->tp;
6533 prefetch(tnapi->hw_status);
6535 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6537 if (likely(!tg3_irq_sync(tp)))
6538 napi_schedule(&tnapi->napi);
6543 /* MSI ISR - No need to check for interrupt sharing and no need to
6544 * flush status block and interrupt mailbox. PCI ordering rules
6545 * guarantee that MSI will arrive after the status block.
6547 static irqreturn_t tg3_msi(int irq, void *dev_id)
6549 struct tg3_napi *tnapi = dev_id;
6550 struct tg3 *tp = tnapi->tp;
6552 prefetch(tnapi->hw_status);
6554 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6556 * Writing any value to intr-mbox-0 clears PCI INTA# and
6557 * chip-internal interrupt pending events.
6558 * Writing non-zero to intr-mbox-0 additional tells the
6559 * NIC to stop sending us irqs, engaging "in-intr-handler"
6562 tw32_mailbox(tnapi->int_mbox, 0x00000001);
6563 if (likely(!tg3_irq_sync(tp)))
6564 napi_schedule(&tnapi->napi);
6566 return IRQ_RETVAL(1);
6569 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6571 struct tg3_napi *tnapi = dev_id;
6572 struct tg3 *tp = tnapi->tp;
6573 struct tg3_hw_status *sblk = tnapi->hw_status;
6574 unsigned int handled = 1;
6576 /* In INTx mode, it is possible for the interrupt to arrive at
6577 * the CPU before the status block posted prior to the interrupt.
6578 * Reading the PCI State register will confirm whether the
6579 * interrupt is ours and will flush the status block.
6581 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6582 if (tg3_flag(tp, CHIP_RESETTING) ||
6583 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6590 * Writing any value to intr-mbox-0 clears PCI INTA# and
6591 * chip-internal interrupt pending events.
6592 * Writing non-zero to intr-mbox-0 additional tells the
6593 * NIC to stop sending us irqs, engaging "in-intr-handler"
6596 * Flush the mailbox to de-assert the IRQ immediately to prevent
6597 * spurious interrupts. The flush impacts performance but
6598 * excessive spurious interrupts can be worse in some cases.
6600 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6601 if (tg3_irq_sync(tp))
6603 sblk->status &= ~SD_STATUS_UPDATED;
6604 if (likely(tg3_has_work(tnapi))) {
6605 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6606 napi_schedule(&tnapi->napi);
6608 /* No work, shared interrupt perhaps? re-enable
6609 * interrupts, and flush that PCI write
6611 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6615 return IRQ_RETVAL(handled);
6618 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6620 struct tg3_napi *tnapi = dev_id;
6621 struct tg3 *tp = tnapi->tp;
6622 struct tg3_hw_status *sblk = tnapi->hw_status;
6623 unsigned int handled = 1;
6625 /* In INTx mode, it is possible for the interrupt to arrive at
6626 * the CPU before the status block posted prior to the interrupt.
6627 * Reading the PCI State register will confirm whether the
6628 * interrupt is ours and will flush the status block.
6630 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6631 if (tg3_flag(tp, CHIP_RESETTING) ||
6632 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6639 * writing any value to intr-mbox-0 clears PCI INTA# and
6640 * chip-internal interrupt pending events.
6641 * writing non-zero to intr-mbox-0 additional tells the
6642 * NIC to stop sending us irqs, engaging "in-intr-handler"
6645 * Flush the mailbox to de-assert the IRQ immediately to prevent
6646 * spurious interrupts. The flush impacts performance but
6647 * excessive spurious interrupts can be worse in some cases.
6649 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6652 * In a shared interrupt configuration, sometimes other devices'
6653 * interrupts will scream. We record the current status tag here
6654 * so that the above check can report that the screaming interrupts
6655 * are unhandled. Eventually they will be silenced.
6657 tnapi->last_irq_tag = sblk->status_tag;
6659 if (tg3_irq_sync(tp))
6662 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6664 napi_schedule(&tnapi->napi);
6667 return IRQ_RETVAL(handled);
6670 /* ISR for interrupt test */
6671 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6673 struct tg3_napi *tnapi = dev_id;
6674 struct tg3 *tp = tnapi->tp;
6675 struct tg3_hw_status *sblk = tnapi->hw_status;
6677 if ((sblk->status & SD_STATUS_UPDATED) ||
6678 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6679 tg3_disable_ints(tp);
6680 return IRQ_RETVAL(1);
6682 return IRQ_RETVAL(0);
6685 #ifdef CONFIG_NET_POLL_CONTROLLER
6686 static void tg3_poll_controller(struct net_device *dev)
6689 struct tg3 *tp = netdev_priv(dev);
6691 for (i = 0; i < tp->irq_cnt; i++)
6692 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6696 static void tg3_tx_timeout(struct net_device *dev)
6698 struct tg3 *tp = netdev_priv(dev);
6700 if (netif_msg_tx_err(tp)) {
6701 netdev_err(dev, "transmit timed out, resetting\n");
6705 tg3_reset_task_schedule(tp);
6708 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6709 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6711 u32 base = (u32) mapping & 0xffffffff;
6713 return (base > 0xffffdcc0) && (base + len + 8 < base);
6716 /* Test for DMA addresses > 40-bit */
6717 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6720 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6721 if (tg3_flag(tp, 40BIT_DMA_BUG))
6722 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6729 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6730 dma_addr_t mapping, u32 len, u32 flags,
6733 txbd->addr_hi = ((u64) mapping >> 32);
6734 txbd->addr_lo = ((u64) mapping & 0xffffffff);
6735 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6736 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6739 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6740 dma_addr_t map, u32 len, u32 flags,
6743 struct tg3 *tp = tnapi->tp;
6746 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6749 if (tg3_4g_overflow_test(map, len))
6752 if (tg3_40bit_overflow_test(tp, map, len))
6755 if (tp->dma_limit) {
6756 u32 prvidx = *entry;
6757 u32 tmp_flag = flags & ~TXD_FLAG_END;
6758 while (len > tp->dma_limit && *budget) {
6759 u32 frag_len = tp->dma_limit;
6760 len -= tp->dma_limit;
6762 /* Avoid the 8byte DMA problem */
6764 len += tp->dma_limit / 2;
6765 frag_len = tp->dma_limit / 2;
6768 tnapi->tx_buffers[*entry].fragmented = true;
6770 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6771 frag_len, tmp_flag, mss, vlan);
6774 *entry = NEXT_TX(*entry);
6781 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6782 len, flags, mss, vlan);
6784 *entry = NEXT_TX(*entry);
6787 tnapi->tx_buffers[prvidx].fragmented = false;
6791 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6792 len, flags, mss, vlan);
6793 *entry = NEXT_TX(*entry);
6799 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6802 struct sk_buff *skb;
6803 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6808 pci_unmap_single(tnapi->tp->pdev,
6809 dma_unmap_addr(txb, mapping),
6813 while (txb->fragmented) {
6814 txb->fragmented = false;
6815 entry = NEXT_TX(entry);
6816 txb = &tnapi->tx_buffers[entry];
6819 for (i = 0; i <= last; i++) {
6820 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6822 entry = NEXT_TX(entry);
6823 txb = &tnapi->tx_buffers[entry];
6825 pci_unmap_page(tnapi->tp->pdev,
6826 dma_unmap_addr(txb, mapping),
6827 skb_frag_size(frag), PCI_DMA_TODEVICE);
6829 while (txb->fragmented) {
6830 txb->fragmented = false;
6831 entry = NEXT_TX(entry);
6832 txb = &tnapi->tx_buffers[entry];
6837 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6838 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6839 struct sk_buff **pskb,
6840 u32 *entry, u32 *budget,
6841 u32 base_flags, u32 mss, u32 vlan)
6843 struct tg3 *tp = tnapi->tp;
6844 struct sk_buff *new_skb, *skb = *pskb;
6845 dma_addr_t new_addr = 0;
6848 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6849 new_skb = skb_copy(skb, GFP_ATOMIC);
6851 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6853 new_skb = skb_copy_expand(skb,
6854 skb_headroom(skb) + more_headroom,
6855 skb_tailroom(skb), GFP_ATOMIC);
6861 /* New SKB is guaranteed to be linear. */
6862 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6864 /* Make sure the mapping succeeded */
6865 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6866 dev_kfree_skb(new_skb);
6869 u32 save_entry = *entry;
6871 base_flags |= TXD_FLAG_END;
6873 tnapi->tx_buffers[*entry].skb = new_skb;
6874 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6877 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6878 new_skb->len, base_flags,
6880 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6881 dev_kfree_skb(new_skb);
6892 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6894 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6895 * TSO header is greater than 80 bytes.
6897 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6899 struct sk_buff *segs, *nskb;
6900 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6902 /* Estimate the number of fragments in the worst case */
6903 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6904 netif_stop_queue(tp->dev);
6906 /* netif_tx_stop_queue() must be done before checking
6907 * checking tx index in tg3_tx_avail() below, because in
6908 * tg3_tx(), we update tx index before checking for
6909 * netif_tx_queue_stopped().
6912 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6913 return NETDEV_TX_BUSY;
6915 netif_wake_queue(tp->dev);
6918 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6920 goto tg3_tso_bug_end;
6926 tg3_start_xmit(nskb, tp->dev);
6932 return NETDEV_TX_OK;
6935 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6936 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6938 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6940 struct tg3 *tp = netdev_priv(dev);
6941 u32 len, entry, base_flags, mss, vlan = 0;
6943 int i = -1, would_hit_hwbug;
6945 struct tg3_napi *tnapi;
6946 struct netdev_queue *txq;
6949 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6950 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6951 if (tg3_flag(tp, ENABLE_TSS))
6954 budget = tg3_tx_avail(tnapi);
6956 /* We are running in BH disabled context with netif_tx_lock
6957 * and TX reclaim runs via tp->napi.poll inside of a software
6958 * interrupt. Furthermore, IRQ processing runs lockless so we have
6959 * no IRQ context deadlocks to worry about either. Rejoice!
6961 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6962 if (!netif_tx_queue_stopped(txq)) {
6963 netif_tx_stop_queue(txq);
6965 /* This is a hard error, log it. */
6967 "BUG! Tx Ring full when queue awake!\n");
6969 return NETDEV_TX_BUSY;
6972 entry = tnapi->tx_prod;
6974 if (skb->ip_summed == CHECKSUM_PARTIAL)
6975 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6977 mss = skb_shinfo(skb)->gso_size;
6980 u32 tcp_opt_len, hdr_len;
6982 if (skb_header_cloned(skb) &&
6983 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6987 tcp_opt_len = tcp_optlen(skb);
6989 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6991 if (!skb_is_gso_v6(skb)) {
6993 iph->tot_len = htons(mss + hdr_len);
6996 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6997 tg3_flag(tp, TSO_BUG))
6998 return tg3_tso_bug(tp, skb);
7000 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7001 TXD_FLAG_CPU_POST_DMA);
7003 if (tg3_flag(tp, HW_TSO_1) ||
7004 tg3_flag(tp, HW_TSO_2) ||
7005 tg3_flag(tp, HW_TSO_3)) {
7006 tcp_hdr(skb)->check = 0;
7007 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7009 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7014 if (tg3_flag(tp, HW_TSO_3)) {
7015 mss |= (hdr_len & 0xc) << 12;
7017 base_flags |= 0x00000010;
7018 base_flags |= (hdr_len & 0x3e0) << 5;
7019 } else if (tg3_flag(tp, HW_TSO_2))
7020 mss |= hdr_len << 9;
7021 else if (tg3_flag(tp, HW_TSO_1) ||
7022 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7023 if (tcp_opt_len || iph->ihl > 5) {
7026 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7027 mss |= (tsflags << 11);
7030 if (tcp_opt_len || iph->ihl > 5) {
7033 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7034 base_flags |= tsflags << 12;
7039 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7040 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7041 base_flags |= TXD_FLAG_JMB_PKT;
7043 if (vlan_tx_tag_present(skb)) {
7044 base_flags |= TXD_FLAG_VLAN;
7045 vlan = vlan_tx_tag_get(skb);
7048 len = skb_headlen(skb);
7050 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7051 if (pci_dma_mapping_error(tp->pdev, mapping))
7055 tnapi->tx_buffers[entry].skb = skb;
7056 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7058 would_hit_hwbug = 0;
7060 if (tg3_flag(tp, 5701_DMA_BUG))
7061 would_hit_hwbug = 1;
7063 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7064 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7066 would_hit_hwbug = 1;
7067 } else if (skb_shinfo(skb)->nr_frags > 0) {
7070 if (!tg3_flag(tp, HW_TSO_1) &&
7071 !tg3_flag(tp, HW_TSO_2) &&
7072 !tg3_flag(tp, HW_TSO_3))
7075 /* Now loop through additional data
7076 * fragments, and queue them.
7078 last = skb_shinfo(skb)->nr_frags - 1;
7079 for (i = 0; i <= last; i++) {
7080 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7082 len = skb_frag_size(frag);
7083 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7084 len, DMA_TO_DEVICE);
7086 tnapi->tx_buffers[entry].skb = NULL;
7087 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7089 if (dma_mapping_error(&tp->pdev->dev, mapping))
7093 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7095 ((i == last) ? TXD_FLAG_END : 0),
7097 would_hit_hwbug = 1;
7103 if (would_hit_hwbug) {
7104 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7106 /* If the workaround fails due to memory/mapping
7107 * failure, silently drop this packet.
7109 entry = tnapi->tx_prod;
7110 budget = tg3_tx_avail(tnapi);
7111 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7112 base_flags, mss, vlan))
7116 skb_tx_timestamp(skb);
7117 netdev_tx_sent_queue(txq, skb->len);
7119 /* Sync BD data before updating mailbox */
7122 /* Packets are ready, update Tx producer idx local and on card. */
7123 tw32_tx_mbox(tnapi->prodmbox, entry);
7125 tnapi->tx_prod = entry;
7126 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7127 netif_tx_stop_queue(txq);
7129 /* netif_tx_stop_queue() must be done before checking
7130 * checking tx index in tg3_tx_avail() below, because in
7131 * tg3_tx(), we update tx index before checking for
7132 * netif_tx_queue_stopped().
7135 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7136 netif_tx_wake_queue(txq);
7140 return NETDEV_TX_OK;
7143 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7144 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7149 return NETDEV_TX_OK;
7152 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7155 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7156 MAC_MODE_PORT_MODE_MASK);
7158 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7160 if (!tg3_flag(tp, 5705_PLUS))
7161 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7163 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7164 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7166 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7168 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7170 if (tg3_flag(tp, 5705_PLUS) ||
7171 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7172 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7173 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7176 tw32(MAC_MODE, tp->mac_mode);
7180 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7182 u32 val, bmcr, mac_mode, ptest = 0;
7184 tg3_phy_toggle_apd(tp, false);
7185 tg3_phy_toggle_automdix(tp, 0);
7187 if (extlpbk && tg3_phy_set_extloopbk(tp))
7190 bmcr = BMCR_FULLDPLX;
7195 bmcr |= BMCR_SPEED100;
7199 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7201 bmcr |= BMCR_SPEED100;
7204 bmcr |= BMCR_SPEED1000;
7209 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7210 tg3_readphy(tp, MII_CTRL1000, &val);
7211 val |= CTL1000_AS_MASTER |
7212 CTL1000_ENABLE_MASTER;
7213 tg3_writephy(tp, MII_CTRL1000, val);
7215 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7216 MII_TG3_FET_PTEST_TRIM_2;
7217 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7220 bmcr |= BMCR_LOOPBACK;
7222 tg3_writephy(tp, MII_BMCR, bmcr);
7224 /* The write needs to be flushed for the FETs */
7225 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7226 tg3_readphy(tp, MII_BMCR, &bmcr);
7230 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7231 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7232 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7233 MII_TG3_FET_PTEST_FRC_TX_LINK |
7234 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7236 /* The write needs to be flushed for the AC131 */
7237 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7240 /* Reset to prevent losing 1st rx packet intermittently */
7241 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7242 tg3_flag(tp, 5780_CLASS)) {
7243 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7245 tw32_f(MAC_RX_MODE, tp->rx_mode);
7248 mac_mode = tp->mac_mode &
7249 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7250 if (speed == SPEED_1000)
7251 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7253 mac_mode |= MAC_MODE_PORT_MODE_MII;
7255 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7256 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7258 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7259 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7260 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7261 mac_mode |= MAC_MODE_LINK_POLARITY;
7263 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7264 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7267 tw32(MAC_MODE, mac_mode);
7273 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7275 struct tg3 *tp = netdev_priv(dev);
7277 if (features & NETIF_F_LOOPBACK) {
7278 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7281 spin_lock_bh(&tp->lock);
7282 tg3_mac_loopback(tp, true);
7283 netif_carrier_on(tp->dev);
7284 spin_unlock_bh(&tp->lock);
7285 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7287 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7290 spin_lock_bh(&tp->lock);
7291 tg3_mac_loopback(tp, false);
7292 /* Force link status check */
7293 tg3_setup_phy(tp, 1);
7294 spin_unlock_bh(&tp->lock);
7295 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7299 static netdev_features_t tg3_fix_features(struct net_device *dev,
7300 netdev_features_t features)
7302 struct tg3 *tp = netdev_priv(dev);
7304 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7305 features &= ~NETIF_F_ALL_TSO;
7310 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7312 netdev_features_t changed = dev->features ^ features;
7314 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7315 tg3_set_loopback(dev, features);
7320 static void tg3_rx_prodring_free(struct tg3 *tp,
7321 struct tg3_rx_prodring_set *tpr)
7325 if (tpr != &tp->napi[0].prodring) {
7326 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7327 i = (i + 1) & tp->rx_std_ring_mask)
7328 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7331 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7332 for (i = tpr->rx_jmb_cons_idx;
7333 i != tpr->rx_jmb_prod_idx;
7334 i = (i + 1) & tp->rx_jmb_ring_mask) {
7335 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7343 for (i = 0; i <= tp->rx_std_ring_mask; i++)
7344 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7347 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7348 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7349 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7354 /* Initialize rx rings for packet processing.
7356 * The chip has been shut down and the driver detached from
7357 * the networking, so no interrupts or new tx packets will
7358 * end up in the driver. tp->{tx,}lock are held and thus
7361 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7362 struct tg3_rx_prodring_set *tpr)
7364 u32 i, rx_pkt_dma_sz;
7366 tpr->rx_std_cons_idx = 0;
7367 tpr->rx_std_prod_idx = 0;
7368 tpr->rx_jmb_cons_idx = 0;
7369 tpr->rx_jmb_prod_idx = 0;
7371 if (tpr != &tp->napi[0].prodring) {
7372 memset(&tpr->rx_std_buffers[0], 0,
7373 TG3_RX_STD_BUFF_RING_SIZE(tp));
7374 if (tpr->rx_jmb_buffers)
7375 memset(&tpr->rx_jmb_buffers[0], 0,
7376 TG3_RX_JMB_BUFF_RING_SIZE(tp));
7380 /* Zero out all descriptors. */
7381 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7383 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7384 if (tg3_flag(tp, 5780_CLASS) &&
7385 tp->dev->mtu > ETH_DATA_LEN)
7386 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7387 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7389 /* Initialize invariants of the rings, we only set this
7390 * stuff once. This works because the card does not
7391 * write into the rx buffer posting rings.
7393 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7394 struct tg3_rx_buffer_desc *rxd;
7396 rxd = &tpr->rx_std[i];
7397 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7398 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7399 rxd->opaque = (RXD_OPAQUE_RING_STD |
7400 (i << RXD_OPAQUE_INDEX_SHIFT));
7403 /* Now allocate fresh SKBs for each rx ring. */
7404 for (i = 0; i < tp->rx_pending; i++) {
7405 unsigned int frag_size;
7407 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7409 netdev_warn(tp->dev,
7410 "Using a smaller RX standard ring. Only "
7411 "%d out of %d buffers were allocated "
7412 "successfully\n", i, tp->rx_pending);
7420 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7423 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7425 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7428 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7429 struct tg3_rx_buffer_desc *rxd;
7431 rxd = &tpr->rx_jmb[i].std;
7432 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7433 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7435 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7436 (i << RXD_OPAQUE_INDEX_SHIFT));
7439 for (i = 0; i < tp->rx_jumbo_pending; i++) {
7440 unsigned int frag_size;
7442 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7444 netdev_warn(tp->dev,
7445 "Using a smaller RX jumbo ring. Only %d "
7446 "out of %d buffers were allocated "
7447 "successfully\n", i, tp->rx_jumbo_pending);
7450 tp->rx_jumbo_pending = i;
7459 tg3_rx_prodring_free(tp, tpr);
7463 static void tg3_rx_prodring_fini(struct tg3 *tp,
7464 struct tg3_rx_prodring_set *tpr)
7466 kfree(tpr->rx_std_buffers);
7467 tpr->rx_std_buffers = NULL;
7468 kfree(tpr->rx_jmb_buffers);
7469 tpr->rx_jmb_buffers = NULL;
7471 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7472 tpr->rx_std, tpr->rx_std_mapping);
7476 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7477 tpr->rx_jmb, tpr->rx_jmb_mapping);
7482 static int tg3_rx_prodring_init(struct tg3 *tp,
7483 struct tg3_rx_prodring_set *tpr)
7485 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7487 if (!tpr->rx_std_buffers)
7490 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7491 TG3_RX_STD_RING_BYTES(tp),
7492 &tpr->rx_std_mapping,
7497 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7498 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7500 if (!tpr->rx_jmb_buffers)
7503 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7504 TG3_RX_JMB_RING_BYTES(tp),
7505 &tpr->rx_jmb_mapping,
7514 tg3_rx_prodring_fini(tp, tpr);
7518 /* Free up pending packets in all rx/tx rings.
7520 * The chip has been shut down and the driver detached from
7521 * the networking, so no interrupts or new tx packets will
7522 * end up in the driver. tp->{tx,}lock is not held and we are not
7523 * in an interrupt context and thus may sleep.
7525 static void tg3_free_rings(struct tg3 *tp)
7529 for (j = 0; j < tp->irq_cnt; j++) {
7530 struct tg3_napi *tnapi = &tp->napi[j];
7532 tg3_rx_prodring_free(tp, &tnapi->prodring);
7534 if (!tnapi->tx_buffers)
7537 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7538 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7543 tg3_tx_skb_unmap(tnapi, i,
7544 skb_shinfo(skb)->nr_frags - 1);
7546 dev_kfree_skb_any(skb);
7548 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7552 /* Initialize tx/rx rings for packet processing.
7554 * The chip has been shut down and the driver detached from
7555 * the networking, so no interrupts or new tx packets will
7556 * end up in the driver. tp->{tx,}lock are held and thus
7559 static int tg3_init_rings(struct tg3 *tp)
7563 /* Free up all the SKBs. */
7566 for (i = 0; i < tp->irq_cnt; i++) {
7567 struct tg3_napi *tnapi = &tp->napi[i];
7569 tnapi->last_tag = 0;
7570 tnapi->last_irq_tag = 0;
7571 tnapi->hw_status->status = 0;
7572 tnapi->hw_status->status_tag = 0;
7573 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7578 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7580 tnapi->rx_rcb_ptr = 0;
7582 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7584 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7593 static void tg3_mem_tx_release(struct tg3 *tp)
7597 for (i = 0; i < tp->irq_max; i++) {
7598 struct tg3_napi *tnapi = &tp->napi[i];
7600 if (tnapi->tx_ring) {
7601 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7602 tnapi->tx_ring, tnapi->tx_desc_mapping);
7603 tnapi->tx_ring = NULL;
7606 kfree(tnapi->tx_buffers);
7607 tnapi->tx_buffers = NULL;
7611 static int tg3_mem_tx_acquire(struct tg3 *tp)
7614 struct tg3_napi *tnapi = &tp->napi[0];
7616 /* If multivector TSS is enabled, vector 0 does not handle
7617 * tx interrupts. Don't allocate any resources for it.
7619 if (tg3_flag(tp, ENABLE_TSS))
7622 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
7623 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
7624 TG3_TX_RING_SIZE, GFP_KERNEL);
7625 if (!tnapi->tx_buffers)
7628 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7630 &tnapi->tx_desc_mapping,
7632 if (!tnapi->tx_ring)
7639 tg3_mem_tx_release(tp);
7643 static void tg3_mem_rx_release(struct tg3 *tp)
7647 for (i = 0; i < tp->irq_max; i++) {
7648 struct tg3_napi *tnapi = &tp->napi[i];
7650 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7655 dma_free_coherent(&tp->pdev->dev,
7656 TG3_RX_RCB_RING_BYTES(tp),
7658 tnapi->rx_rcb_mapping);
7659 tnapi->rx_rcb = NULL;
7663 static int tg3_mem_rx_acquire(struct tg3 *tp)
7665 unsigned int i, limit;
7667 limit = tp->rxq_cnt;
7669 /* If RSS is enabled, we need a (dummy) producer ring
7670 * set on vector zero. This is the true hw prodring.
7672 if (tg3_flag(tp, ENABLE_RSS))
7675 for (i = 0; i < limit; i++) {
7676 struct tg3_napi *tnapi = &tp->napi[i];
7678 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7681 /* If multivector RSS is enabled, vector 0
7682 * does not handle rx or tx interrupts.
7683 * Don't allocate any resources for it.
7685 if (!i && tg3_flag(tp, ENABLE_RSS))
7688 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7689 TG3_RX_RCB_RING_BYTES(tp),
7690 &tnapi->rx_rcb_mapping,
7695 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7701 tg3_mem_rx_release(tp);
7706 * Must not be invoked with interrupt sources disabled and
7707 * the hardware shutdown down.
7709 static void tg3_free_consistent(struct tg3 *tp)
7713 for (i = 0; i < tp->irq_cnt; i++) {
7714 struct tg3_napi *tnapi = &tp->napi[i];
7716 if (tnapi->hw_status) {
7717 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7719 tnapi->status_mapping);
7720 tnapi->hw_status = NULL;
7724 tg3_mem_rx_release(tp);
7725 tg3_mem_tx_release(tp);
7728 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7729 tp->hw_stats, tp->stats_mapping);
7730 tp->hw_stats = NULL;
7735 * Must not be invoked with interrupt sources disabled and
7736 * the hardware shutdown down. Can sleep.
7738 static int tg3_alloc_consistent(struct tg3 *tp)
7742 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7743 sizeof(struct tg3_hw_stats),
7749 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7751 for (i = 0; i < tp->irq_cnt; i++) {
7752 struct tg3_napi *tnapi = &tp->napi[i];
7753 struct tg3_hw_status *sblk;
7755 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7757 &tnapi->status_mapping,
7759 if (!tnapi->hw_status)
7762 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7763 sblk = tnapi->hw_status;
7765 if (tg3_flag(tp, ENABLE_RSS)) {
7769 * When RSS is enabled, the status block format changes
7770 * slightly. The "rx_jumbo_consumer", "reserved",
7771 * and "rx_mini_consumer" members get mapped to the
7772 * other three rx return ring producer indexes.
7776 prodptr = &sblk->idx[0].rx_producer;
7779 prodptr = &sblk->rx_jumbo_consumer;
7782 prodptr = &sblk->reserved;
7785 prodptr = &sblk->rx_mini_consumer;
7788 tnapi->rx_rcb_prod_idx = prodptr;
7790 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7794 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
7800 tg3_free_consistent(tp);
7804 #define MAX_WAIT_CNT 1000
7806 /* To stop a block, clear the enable bit and poll till it
7807 * clears. tp->lock is held.
7809 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7814 if (tg3_flag(tp, 5705_PLUS)) {
7821 /* We can't enable/disable these bits of the
7822 * 5705/5750, just say success.
7835 for (i = 0; i < MAX_WAIT_CNT; i++) {
7838 if ((val & enable_bit) == 0)
7842 if (i == MAX_WAIT_CNT && !silent) {
7843 dev_err(&tp->pdev->dev,
7844 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7852 /* tp->lock is held. */
7853 static int tg3_abort_hw(struct tg3 *tp, int silent)
7857 tg3_disable_ints(tp);
7859 tp->rx_mode &= ~RX_MODE_ENABLE;
7860 tw32_f(MAC_RX_MODE, tp->rx_mode);
7863 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7864 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7865 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7866 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7867 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7868 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7870 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7871 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7872 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7873 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7874 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7875 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7876 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7878 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7879 tw32_f(MAC_MODE, tp->mac_mode);
7882 tp->tx_mode &= ~TX_MODE_ENABLE;
7883 tw32_f(MAC_TX_MODE, tp->tx_mode);
7885 for (i = 0; i < MAX_WAIT_CNT; i++) {
7887 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7890 if (i >= MAX_WAIT_CNT) {
7891 dev_err(&tp->pdev->dev,
7892 "%s timed out, TX_MODE_ENABLE will not clear "
7893 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7897 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7898 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7899 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7901 tw32(FTQ_RESET, 0xffffffff);
7902 tw32(FTQ_RESET, 0x00000000);
7904 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7905 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7907 for (i = 0; i < tp->irq_cnt; i++) {
7908 struct tg3_napi *tnapi = &tp->napi[i];
7909 if (tnapi->hw_status)
7910 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7916 /* Save PCI command register before chip reset */
7917 static void tg3_save_pci_state(struct tg3 *tp)
7919 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7922 /* Restore PCI state after chip reset */
7923 static void tg3_restore_pci_state(struct tg3 *tp)
7927 /* Re-enable indirect register accesses. */
7928 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7929 tp->misc_host_ctrl);
7931 /* Set MAX PCI retry to zero. */
7932 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7933 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7934 tg3_flag(tp, PCIX_MODE))
7935 val |= PCISTATE_RETRY_SAME_DMA;
7936 /* Allow reads and writes to the APE register and memory space. */
7937 if (tg3_flag(tp, ENABLE_APE))
7938 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7939 PCISTATE_ALLOW_APE_SHMEM_WR |
7940 PCISTATE_ALLOW_APE_PSPACE_WR;
7941 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7943 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7945 if (!tg3_flag(tp, PCI_EXPRESS)) {
7946 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7947 tp->pci_cacheline_sz);
7948 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7952 /* Make sure PCI-X relaxed ordering bit is clear. */
7953 if (tg3_flag(tp, PCIX_MODE)) {
7956 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7958 pcix_cmd &= ~PCI_X_CMD_ERO;
7959 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7963 if (tg3_flag(tp, 5780_CLASS)) {
7965 /* Chip reset on 5780 will reset MSI enable bit,
7966 * so need to restore it.
7968 if (tg3_flag(tp, USING_MSI)) {
7971 pci_read_config_word(tp->pdev,
7972 tp->msi_cap + PCI_MSI_FLAGS,
7974 pci_write_config_word(tp->pdev,
7975 tp->msi_cap + PCI_MSI_FLAGS,
7976 ctrl | PCI_MSI_FLAGS_ENABLE);
7977 val = tr32(MSGINT_MODE);
7978 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7983 /* tp->lock is held. */
7984 static int tg3_chip_reset(struct tg3 *tp)
7987 void (*write_op)(struct tg3 *, u32, u32);
7992 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7994 /* No matching tg3_nvram_unlock() after this because
7995 * chip reset below will undo the nvram lock.
7997 tp->nvram_lock_cnt = 0;
7999 /* GRC_MISC_CFG core clock reset will clear the memory
8000 * enable bit in PCI register 4 and the MSI enable bit
8001 * on some chips, so we save relevant registers here.
8003 tg3_save_pci_state(tp);
8005 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8006 tg3_flag(tp, 5755_PLUS))
8007 tw32(GRC_FASTBOOT_PC, 0);
8010 * We must avoid the readl() that normally takes place.
8011 * It locks machines, causes machine checks, and other
8012 * fun things. So, temporarily disable the 5701
8013 * hardware workaround, while we do the reset.
8015 write_op = tp->write32;
8016 if (write_op == tg3_write_flush_reg32)
8017 tp->write32 = tg3_write32;
8019 /* Prevent the irq handler from reading or writing PCI registers
8020 * during chip reset when the memory enable bit in the PCI command
8021 * register may be cleared. The chip does not generate interrupt
8022 * at this time, but the irq handler may still be called due to irq
8023 * sharing or irqpoll.
8025 tg3_flag_set(tp, CHIP_RESETTING);
8026 for (i = 0; i < tp->irq_cnt; i++) {
8027 struct tg3_napi *tnapi = &tp->napi[i];
8028 if (tnapi->hw_status) {
8029 tnapi->hw_status->status = 0;
8030 tnapi->hw_status->status_tag = 0;
8032 tnapi->last_tag = 0;
8033 tnapi->last_irq_tag = 0;
8037 for (i = 0; i < tp->irq_cnt; i++)
8038 synchronize_irq(tp->napi[i].irq_vec);
8040 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8041 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8042 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8046 val = GRC_MISC_CFG_CORECLK_RESET;
8048 if (tg3_flag(tp, PCI_EXPRESS)) {
8049 /* Force PCIe 1.0a mode */
8050 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8051 !tg3_flag(tp, 57765_PLUS) &&
8052 tr32(TG3_PCIE_PHY_TSTCTL) ==
8053 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8054 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8056 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
8057 tw32(GRC_MISC_CFG, (1 << 29));
8062 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8063 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8064 tw32(GRC_VCPU_EXT_CTRL,
8065 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8068 /* Manage gphy power for all CPMU absent PCIe devices. */
8069 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8070 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8072 tw32(GRC_MISC_CFG, val);
8074 /* restore 5701 hardware bug workaround write method */
8075 tp->write32 = write_op;
8077 /* Unfortunately, we have to delay before the PCI read back.
8078 * Some 575X chips even will not respond to a PCI cfg access
8079 * when the reset command is given to the chip.
8081 * How do these hardware designers expect things to work
8082 * properly if the PCI write is posted for a long period
8083 * of time? It is always necessary to have some method by
8084 * which a register read back can occur to push the write
8085 * out which does the reset.
8087 * For most tg3 variants the trick below was working.
8092 /* Flush PCI posted writes. The normal MMIO registers
8093 * are inaccessible at this time so this is the only
8094 * way to make this reliably (actually, this is no longer
8095 * the case, see above). I tried to use indirect
8096 * register read/write but this upset some 5701 variants.
8098 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8102 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8105 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
8109 /* Wait for link training to complete. */
8110 for (i = 0; i < 5000; i++)
8113 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8114 pci_write_config_dword(tp->pdev, 0xc4,
8115 cfg_val | (1 << 15));
8118 /* Clear the "no snoop" and "relaxed ordering" bits. */
8119 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8121 * Older PCIe devices only support the 128 byte
8122 * MPS setting. Enforce the restriction.
8124 if (!tg3_flag(tp, CPMU_PRESENT))
8125 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8126 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8128 /* Clear error status */
8129 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8130 PCI_EXP_DEVSTA_CED |
8131 PCI_EXP_DEVSTA_NFED |
8132 PCI_EXP_DEVSTA_FED |
8133 PCI_EXP_DEVSTA_URD);
8136 tg3_restore_pci_state(tp);
8138 tg3_flag_clear(tp, CHIP_RESETTING);
8139 tg3_flag_clear(tp, ERROR_PROCESSED);
8142 if (tg3_flag(tp, 5780_CLASS))
8143 val = tr32(MEMARB_MODE);
8144 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8146 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
8148 tw32(0x5000, 0x400);
8151 tw32(GRC_MODE, tp->grc_mode);
8153 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
8156 tw32(0xc4, val | (1 << 15));
8159 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8160 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8161 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8162 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8163 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8164 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8167 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8168 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8170 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8171 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8176 tw32_f(MAC_MODE, val);
8179 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8181 err = tg3_poll_fw(tp);
8187 if (tg3_flag(tp, PCI_EXPRESS) &&
8188 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8189 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8190 !tg3_flag(tp, 57765_PLUS)) {
8193 tw32(0x7c00, val | (1 << 25));
8196 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8197 val = tr32(TG3_CPMU_CLCK_ORIDE);
8198 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8201 /* Reprobe ASF enable state. */
8202 tg3_flag_clear(tp, ENABLE_ASF);
8203 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8204 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8205 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8208 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8209 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8210 tg3_flag_set(tp, ENABLE_ASF);
8211 tp->last_event_jiffies = jiffies;
8212 if (tg3_flag(tp, 5750_PLUS))
8213 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8220 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8221 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8223 /* tp->lock is held. */
8224 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8230 tg3_write_sig_pre_reset(tp, kind);
8232 tg3_abort_hw(tp, silent);
8233 err = tg3_chip_reset(tp);
8235 __tg3_set_mac_addr(tp, 0);
8237 tg3_write_sig_legacy(tp, kind);
8238 tg3_write_sig_post_reset(tp, kind);
8241 /* Save the stats across chip resets... */
8242 tg3_get_nstats(tp, &tp->net_stats_prev);
8243 tg3_get_estats(tp, &tp->estats_prev);
8245 /* And make sure the next sample is new data */
8246 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8255 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8257 struct tg3 *tp = netdev_priv(dev);
8258 struct sockaddr *addr = p;
8259 int err = 0, skip_mac_1 = 0;
8261 if (!is_valid_ether_addr(addr->sa_data))
8262 return -EADDRNOTAVAIL;
8264 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8266 if (!netif_running(dev))
8269 if (tg3_flag(tp, ENABLE_ASF)) {
8270 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8272 addr0_high = tr32(MAC_ADDR_0_HIGH);
8273 addr0_low = tr32(MAC_ADDR_0_LOW);
8274 addr1_high = tr32(MAC_ADDR_1_HIGH);
8275 addr1_low = tr32(MAC_ADDR_1_LOW);
8277 /* Skip MAC addr 1 if ASF is using it. */
8278 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8279 !(addr1_high == 0 && addr1_low == 0))
8282 spin_lock_bh(&tp->lock);
8283 __tg3_set_mac_addr(tp, skip_mac_1);
8284 spin_unlock_bh(&tp->lock);
8289 /* tp->lock is held. */
8290 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8291 dma_addr_t mapping, u32 maxlen_flags,
8295 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8296 ((u64) mapping >> 32));
8298 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8299 ((u64) mapping & 0xffffffff));
8301 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8304 if (!tg3_flag(tp, 5705_PLUS))
8306 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8311 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8315 if (!tg3_flag(tp, ENABLE_TSS)) {
8316 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8317 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8318 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8320 tw32(HOSTCC_TXCOL_TICKS, 0);
8321 tw32(HOSTCC_TXMAX_FRAMES, 0);
8322 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8324 for (; i < tp->txq_cnt; i++) {
8327 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8328 tw32(reg, ec->tx_coalesce_usecs);
8329 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8330 tw32(reg, ec->tx_max_coalesced_frames);
8331 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8332 tw32(reg, ec->tx_max_coalesced_frames_irq);
8336 for (; i < tp->irq_max - 1; i++) {
8337 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8338 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8339 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8343 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8346 u32 limit = tp->rxq_cnt;
8348 if (!tg3_flag(tp, ENABLE_RSS)) {
8349 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8350 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8351 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8354 tw32(HOSTCC_RXCOL_TICKS, 0);
8355 tw32(HOSTCC_RXMAX_FRAMES, 0);
8356 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8359 for (; i < limit; i++) {
8362 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8363 tw32(reg, ec->rx_coalesce_usecs);
8364 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8365 tw32(reg, ec->rx_max_coalesced_frames);
8366 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8367 tw32(reg, ec->rx_max_coalesced_frames_irq);
8370 for (; i < tp->irq_max - 1; i++) {
8371 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8372 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8373 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8377 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8379 tg3_coal_tx_init(tp, ec);
8380 tg3_coal_rx_init(tp, ec);
8382 if (!tg3_flag(tp, 5705_PLUS)) {
8383 u32 val = ec->stats_block_coalesce_usecs;
8385 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8386 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8388 if (!netif_carrier_ok(tp->dev))
8391 tw32(HOSTCC_STAT_COAL_TICKS, val);
8395 /* tp->lock is held. */
8396 static void tg3_rings_reset(struct tg3 *tp)
8399 u32 stblk, txrcb, rxrcb, limit;
8400 struct tg3_napi *tnapi = &tp->napi[0];
8402 /* Disable all transmit rings but the first. */
8403 if (!tg3_flag(tp, 5705_PLUS))
8404 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8405 else if (tg3_flag(tp, 5717_PLUS))
8406 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8407 else if (tg3_flag(tp, 57765_CLASS))
8408 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8410 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8412 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8413 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8414 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8415 BDINFO_FLAGS_DISABLED);
8418 /* Disable all receive return rings but the first. */
8419 if (tg3_flag(tp, 5717_PLUS))
8420 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8421 else if (!tg3_flag(tp, 5705_PLUS))
8422 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8423 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8424 tg3_flag(tp, 57765_CLASS))
8425 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8427 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8429 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8430 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8431 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8432 BDINFO_FLAGS_DISABLED);
8434 /* Disable interrupts */
8435 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8436 tp->napi[0].chk_msi_cnt = 0;
8437 tp->napi[0].last_rx_cons = 0;
8438 tp->napi[0].last_tx_cons = 0;
8440 /* Zero mailbox registers. */
8441 if (tg3_flag(tp, SUPPORT_MSIX)) {
8442 for (i = 1; i < tp->irq_max; i++) {
8443 tp->napi[i].tx_prod = 0;
8444 tp->napi[i].tx_cons = 0;
8445 if (tg3_flag(tp, ENABLE_TSS))
8446 tw32_mailbox(tp->napi[i].prodmbox, 0);
8447 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8448 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8449 tp->napi[i].chk_msi_cnt = 0;
8450 tp->napi[i].last_rx_cons = 0;
8451 tp->napi[i].last_tx_cons = 0;
8453 if (!tg3_flag(tp, ENABLE_TSS))
8454 tw32_mailbox(tp->napi[0].prodmbox, 0);
8456 tp->napi[0].tx_prod = 0;
8457 tp->napi[0].tx_cons = 0;
8458 tw32_mailbox(tp->napi[0].prodmbox, 0);
8459 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8462 /* Make sure the NIC-based send BD rings are disabled. */
8463 if (!tg3_flag(tp, 5705_PLUS)) {
8464 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8465 for (i = 0; i < 16; i++)
8466 tw32_tx_mbox(mbox + i * 8, 0);
8469 txrcb = NIC_SRAM_SEND_RCB;
8470 rxrcb = NIC_SRAM_RCV_RET_RCB;
8472 /* Clear status block in ram. */
8473 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8475 /* Set status block DMA address */
8476 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8477 ((u64) tnapi->status_mapping >> 32));
8478 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8479 ((u64) tnapi->status_mapping & 0xffffffff));
8481 if (tnapi->tx_ring) {
8482 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8483 (TG3_TX_RING_SIZE <<
8484 BDINFO_FLAGS_MAXLEN_SHIFT),
8485 NIC_SRAM_TX_BUFFER_DESC);
8486 txrcb += TG3_BDINFO_SIZE;
8489 if (tnapi->rx_rcb) {
8490 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8491 (tp->rx_ret_ring_mask + 1) <<
8492 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8493 rxrcb += TG3_BDINFO_SIZE;
8496 stblk = HOSTCC_STATBLCK_RING1;
8498 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8499 u64 mapping = (u64)tnapi->status_mapping;
8500 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8501 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8503 /* Clear status block in ram. */
8504 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8506 if (tnapi->tx_ring) {
8507 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8508 (TG3_TX_RING_SIZE <<
8509 BDINFO_FLAGS_MAXLEN_SHIFT),
8510 NIC_SRAM_TX_BUFFER_DESC);
8511 txrcb += TG3_BDINFO_SIZE;
8514 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8515 ((tp->rx_ret_ring_mask + 1) <<
8516 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8519 rxrcb += TG3_BDINFO_SIZE;
8523 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8525 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8527 if (!tg3_flag(tp, 5750_PLUS) ||
8528 tg3_flag(tp, 5780_CLASS) ||
8529 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8530 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8531 tg3_flag(tp, 57765_PLUS))
8532 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8533 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8534 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8535 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8537 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8539 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8540 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8542 val = min(nic_rep_thresh, host_rep_thresh);
8543 tw32(RCVBDI_STD_THRESH, val);
8545 if (tg3_flag(tp, 57765_PLUS))
8546 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8548 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8551 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8553 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8555 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8556 tw32(RCVBDI_JUMBO_THRESH, val);
8558 if (tg3_flag(tp, 57765_PLUS))
8559 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8562 static inline u32 calc_crc(unsigned char *buf, int len)
8570 for (j = 0; j < len; j++) {
8573 for (k = 0; k < 8; k++) {
8586 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8588 /* accept or reject all multicast frames */
8589 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8590 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8591 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8592 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8595 static void __tg3_set_rx_mode(struct net_device *dev)
8597 struct tg3 *tp = netdev_priv(dev);
8600 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8601 RX_MODE_KEEP_VLAN_TAG);
8603 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8604 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8607 if (!tg3_flag(tp, ENABLE_ASF))
8608 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8611 if (dev->flags & IFF_PROMISC) {
8612 /* Promiscuous mode. */
8613 rx_mode |= RX_MODE_PROMISC;
8614 } else if (dev->flags & IFF_ALLMULTI) {
8615 /* Accept all multicast. */
8616 tg3_set_multi(tp, 1);
8617 } else if (netdev_mc_empty(dev)) {
8618 /* Reject all multicast. */
8619 tg3_set_multi(tp, 0);
8621 /* Accept one or more multicast(s). */
8622 struct netdev_hw_addr *ha;
8623 u32 mc_filter[4] = { 0, };
8628 netdev_for_each_mc_addr(ha, dev) {
8629 crc = calc_crc(ha->addr, ETH_ALEN);
8631 regidx = (bit & 0x60) >> 5;
8633 mc_filter[regidx] |= (1 << bit);
8636 tw32(MAC_HASH_REG_0, mc_filter[0]);
8637 tw32(MAC_HASH_REG_1, mc_filter[1]);
8638 tw32(MAC_HASH_REG_2, mc_filter[2]);
8639 tw32(MAC_HASH_REG_3, mc_filter[3]);
8642 if (rx_mode != tp->rx_mode) {
8643 tp->rx_mode = rx_mode;
8644 tw32_f(MAC_RX_MODE, rx_mode);
8649 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
8653 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8654 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
8657 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8661 if (!tg3_flag(tp, SUPPORT_MSIX))
8664 if (tp->irq_cnt <= 2) {
8665 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8669 /* Validate table against current IRQ count */
8670 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8671 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8675 if (i != TG3_RSS_INDIR_TBL_SIZE)
8676 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
8679 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8682 u32 reg = MAC_RSS_INDIR_TBL_0;
8684 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8685 u32 val = tp->rss_ind_tbl[i];
8687 for (; i % 8; i++) {
8689 val |= tp->rss_ind_tbl[i];
8696 /* tp->lock is held. */
8697 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8699 u32 val, rdmac_mode;
8701 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8703 tg3_disable_ints(tp);
8707 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8709 if (tg3_flag(tp, INIT_COMPLETE))
8710 tg3_abort_hw(tp, 1);
8712 /* Enable MAC control of LPI */
8713 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8714 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8715 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8716 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8718 tw32_f(TG3_CPMU_EEE_CTRL,
8719 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8721 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8722 TG3_CPMU_EEEMD_LPI_IN_TX |
8723 TG3_CPMU_EEEMD_LPI_IN_RX |
8724 TG3_CPMU_EEEMD_EEE_ENABLE;
8726 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8727 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8729 if (tg3_flag(tp, ENABLE_APE))
8730 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8732 tw32_f(TG3_CPMU_EEE_MODE, val);
8734 tw32_f(TG3_CPMU_EEE_DBTMR1,
8735 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8736 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8738 tw32_f(TG3_CPMU_EEE_DBTMR2,
8739 TG3_CPMU_DBTMR2_APE_TX_2047US |
8740 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8746 err = tg3_chip_reset(tp);
8750 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8752 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8753 val = tr32(TG3_CPMU_CTRL);
8754 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8755 tw32(TG3_CPMU_CTRL, val);
8757 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8758 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8759 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8760 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8762 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8763 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8764 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8765 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8767 val = tr32(TG3_CPMU_HST_ACC);
8768 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8769 val |= CPMU_HST_ACC_MACCLK_6_25;
8770 tw32(TG3_CPMU_HST_ACC, val);
8773 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8774 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8775 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8776 PCIE_PWR_MGMT_L1_THRESH_4MS;
8777 tw32(PCIE_PWR_MGMT_THRESH, val);
8779 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8780 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8782 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8784 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8785 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8788 if (tg3_flag(tp, L1PLLPD_EN)) {
8789 u32 grc_mode = tr32(GRC_MODE);
8791 /* Access the lower 1K of PL PCIE block registers. */
8792 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8793 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8795 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8796 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8797 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8799 tw32(GRC_MODE, grc_mode);
8802 if (tg3_flag(tp, 57765_CLASS)) {
8803 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8804 u32 grc_mode = tr32(GRC_MODE);
8806 /* Access the lower 1K of PL PCIE block registers. */
8807 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8808 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8810 val = tr32(TG3_PCIE_TLDLPL_PORT +
8811 TG3_PCIE_PL_LO_PHYCTL5);
8812 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8813 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8815 tw32(GRC_MODE, grc_mode);
8818 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8819 u32 grc_mode = tr32(GRC_MODE);
8821 /* Access the lower 1K of DL PCIE block registers. */
8822 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8823 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8825 val = tr32(TG3_PCIE_TLDLPL_PORT +
8826 TG3_PCIE_DL_LO_FTSMAX);
8827 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8828 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8829 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8831 tw32(GRC_MODE, grc_mode);
8834 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8835 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8836 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8837 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8840 /* This works around an issue with Athlon chipsets on
8841 * B3 tigon3 silicon. This bit has no effect on any
8842 * other revision. But do not set this on PCI Express
8843 * chips and don't even touch the clocks if the CPMU is present.
8845 if (!tg3_flag(tp, CPMU_PRESENT)) {
8846 if (!tg3_flag(tp, PCI_EXPRESS))
8847 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8848 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8851 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8852 tg3_flag(tp, PCIX_MODE)) {
8853 val = tr32(TG3PCI_PCISTATE);
8854 val |= PCISTATE_RETRY_SAME_DMA;
8855 tw32(TG3PCI_PCISTATE, val);
8858 if (tg3_flag(tp, ENABLE_APE)) {
8859 /* Allow reads and writes to the
8860 * APE register and memory space.
8862 val = tr32(TG3PCI_PCISTATE);
8863 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8864 PCISTATE_ALLOW_APE_SHMEM_WR |
8865 PCISTATE_ALLOW_APE_PSPACE_WR;
8866 tw32(TG3PCI_PCISTATE, val);
8869 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8870 /* Enable some hw fixes. */
8871 val = tr32(TG3PCI_MSI_DATA);
8872 val |= (1 << 26) | (1 << 28) | (1 << 29);
8873 tw32(TG3PCI_MSI_DATA, val);
8876 /* Descriptor ring init may make accesses to the
8877 * NIC SRAM area to setup the TX descriptors, so we
8878 * can only do this after the hardware has been
8879 * successfully reset.
8881 err = tg3_init_rings(tp);
8885 if (tg3_flag(tp, 57765_PLUS)) {
8886 val = tr32(TG3PCI_DMA_RW_CTRL) &
8887 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8888 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8889 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8890 if (!tg3_flag(tp, 57765_CLASS) &&
8891 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8892 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8893 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8894 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8895 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8896 /* This value is determined during the probe time DMA
8897 * engine test, tg3_test_dma.
8899 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8902 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8903 GRC_MODE_4X_NIC_SEND_RINGS |
8904 GRC_MODE_NO_TX_PHDR_CSUM |
8905 GRC_MODE_NO_RX_PHDR_CSUM);
8906 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8908 /* Pseudo-header checksum is done by hardware logic and not
8909 * the offload processers, so make the chip do the pseudo-
8910 * header checksums on receive. For transmit it is more
8911 * convenient to do the pseudo-header checksum in software
8912 * as Linux does that on transmit for us in all cases.
8914 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8918 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8920 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8921 val = tr32(GRC_MISC_CFG);
8923 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8924 tw32(GRC_MISC_CFG, val);
8926 /* Initialize MBUF/DESC pool. */
8927 if (tg3_flag(tp, 5750_PLUS)) {
8929 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8930 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8931 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8932 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8934 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8935 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8936 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8937 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8940 fw_len = tp->fw_len;
8941 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8942 tw32(BUFMGR_MB_POOL_ADDR,
8943 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8944 tw32(BUFMGR_MB_POOL_SIZE,
8945 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8948 if (tp->dev->mtu <= ETH_DATA_LEN) {
8949 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8950 tp->bufmgr_config.mbuf_read_dma_low_water);
8951 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8952 tp->bufmgr_config.mbuf_mac_rx_low_water);
8953 tw32(BUFMGR_MB_HIGH_WATER,
8954 tp->bufmgr_config.mbuf_high_water);
8956 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8957 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8958 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8959 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8960 tw32(BUFMGR_MB_HIGH_WATER,
8961 tp->bufmgr_config.mbuf_high_water_jumbo);
8963 tw32(BUFMGR_DMA_LOW_WATER,
8964 tp->bufmgr_config.dma_low_water);
8965 tw32(BUFMGR_DMA_HIGH_WATER,
8966 tp->bufmgr_config.dma_high_water);
8968 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8969 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8970 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8971 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8972 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8973 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8974 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8975 tw32(BUFMGR_MODE, val);
8976 for (i = 0; i < 2000; i++) {
8977 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8982 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8986 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8987 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8989 tg3_setup_rxbd_thresholds(tp);
8991 /* Initialize TG3_BDINFO's at:
8992 * RCVDBDI_STD_BD: standard eth size rx ring
8993 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8994 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8997 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8998 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8999 * ring attribute flags
9000 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9002 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9003 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9005 * The size of each ring is fixed in the firmware, but the location is
9008 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9009 ((u64) tpr->rx_std_mapping >> 32));
9010 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9011 ((u64) tpr->rx_std_mapping & 0xffffffff));
9012 if (!tg3_flag(tp, 5717_PLUS))
9013 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9014 NIC_SRAM_RX_BUFFER_DESC);
9016 /* Disable the mini ring */
9017 if (!tg3_flag(tp, 5705_PLUS))
9018 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9019 BDINFO_FLAGS_DISABLED);
9021 /* Program the jumbo buffer descriptor ring control
9022 * blocks on those devices that have them.
9024 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9025 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9027 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9028 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9029 ((u64) tpr->rx_jmb_mapping >> 32));
9030 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9031 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9032 val = TG3_RX_JMB_RING_SIZE(tp) <<
9033 BDINFO_FLAGS_MAXLEN_SHIFT;
9034 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9035 val | BDINFO_FLAGS_USE_EXT_RECV);
9036 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9037 tg3_flag(tp, 57765_CLASS))
9038 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9039 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9041 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9042 BDINFO_FLAGS_DISABLED);
9045 if (tg3_flag(tp, 57765_PLUS)) {
9046 val = TG3_RX_STD_RING_SIZE(tp);
9047 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9048 val |= (TG3_RX_STD_DMA_SZ << 2);
9050 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9052 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9054 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9056 tpr->rx_std_prod_idx = tp->rx_pending;
9057 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9059 tpr->rx_jmb_prod_idx =
9060 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9061 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9063 tg3_rings_reset(tp);
9065 /* Initialize MAC address and backoff seed. */
9066 __tg3_set_mac_addr(tp, 0);
9068 /* MTU + ethernet header + FCS + optional VLAN tag */
9069 tw32(MAC_RX_MTU_SIZE,
9070 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9072 /* The slot time is changed by tg3_setup_phy if we
9073 * run at gigabit with half duplex.
9075 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9076 (6 << TX_LENGTHS_IPG_SHIFT) |
9077 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9079 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9080 val |= tr32(MAC_TX_LENGTHS) &
9081 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9082 TX_LENGTHS_CNT_DWN_VAL_MSK);
9084 tw32(MAC_TX_LENGTHS, val);
9086 /* Receive rules. */
9087 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9088 tw32(RCVLPC_CONFIG, 0x0181);
9090 /* Calculate RDMAC_MODE setting early, we need it to determine
9091 * the RCVLPC_STATE_ENABLE mask.
9093 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9094 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9095 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9096 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9097 RDMAC_MODE_LNGREAD_ENAB);
9099 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
9100 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9102 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9103 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9104 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9105 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9106 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9107 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9109 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9110 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9111 if (tg3_flag(tp, TSO_CAPABLE) &&
9112 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
9113 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9114 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9115 !tg3_flag(tp, IS_5788)) {
9116 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9120 if (tg3_flag(tp, PCI_EXPRESS))
9121 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9123 if (tg3_flag(tp, HW_TSO_1) ||
9124 tg3_flag(tp, HW_TSO_2) ||
9125 tg3_flag(tp, HW_TSO_3))
9126 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9128 if (tg3_flag(tp, 57765_PLUS) ||
9129 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9130 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9131 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9133 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9134 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9136 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9137 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9138 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9139 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
9140 tg3_flag(tp, 57765_PLUS)) {
9141 val = tr32(TG3_RDMA_RSRVCTRL_REG);
9142 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
9143 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9144 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9145 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9146 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9147 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9148 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9150 tw32(TG3_RDMA_RSRVCTRL_REG,
9151 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9154 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9155 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9156 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9157 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
9158 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9159 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9162 /* Receive/send statistics. */
9163 if (tg3_flag(tp, 5750_PLUS)) {
9164 val = tr32(RCVLPC_STATS_ENABLE);
9165 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9166 tw32(RCVLPC_STATS_ENABLE, val);
9167 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9168 tg3_flag(tp, TSO_CAPABLE)) {
9169 val = tr32(RCVLPC_STATS_ENABLE);
9170 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9171 tw32(RCVLPC_STATS_ENABLE, val);
9173 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9175 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9176 tw32(SNDDATAI_STATSENAB, 0xffffff);
9177 tw32(SNDDATAI_STATSCTRL,
9178 (SNDDATAI_SCTRL_ENABLE |
9179 SNDDATAI_SCTRL_FASTUPD));
9181 /* Setup host coalescing engine. */
9182 tw32(HOSTCC_MODE, 0);
9183 for (i = 0; i < 2000; i++) {
9184 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9189 __tg3_set_coalesce(tp, &tp->coal);
9191 if (!tg3_flag(tp, 5705_PLUS)) {
9192 /* Status/statistics block address. See tg3_timer,
9193 * the tg3_periodic_fetch_stats call there, and
9194 * tg3_get_stats to see how this works for 5705/5750 chips.
9196 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9197 ((u64) tp->stats_mapping >> 32));
9198 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9199 ((u64) tp->stats_mapping & 0xffffffff));
9200 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9202 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9204 /* Clear statistics and status block memory areas */
9205 for (i = NIC_SRAM_STATS_BLK;
9206 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9208 tg3_write_mem(tp, i, 0);
9213 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9215 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9216 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9217 if (!tg3_flag(tp, 5705_PLUS))
9218 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9220 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9221 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9222 /* reset to prevent losing 1st rx packet intermittently */
9223 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9227 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9228 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9229 MAC_MODE_FHDE_ENABLE;
9230 if (tg3_flag(tp, ENABLE_APE))
9231 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9232 if (!tg3_flag(tp, 5705_PLUS) &&
9233 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9234 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9235 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9236 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9239 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9240 * If TG3_FLAG_IS_NIC is zero, we should read the
9241 * register to preserve the GPIO settings for LOMs. The GPIOs,
9242 * whether used as inputs or outputs, are set by boot code after
9245 if (!tg3_flag(tp, IS_NIC)) {
9248 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9249 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9250 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9252 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9253 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9254 GRC_LCLCTRL_GPIO_OUTPUT3;
9256 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9257 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9259 tp->grc_local_ctrl &= ~gpio_mask;
9260 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9262 /* GPIO1 must be driven high for eeprom write protect */
9263 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9264 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9265 GRC_LCLCTRL_GPIO_OUTPUT1);
9267 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9270 if (tg3_flag(tp, USING_MSIX)) {
9271 val = tr32(MSGINT_MODE);
9272 val |= MSGINT_MODE_ENABLE;
9273 if (tp->irq_cnt > 1)
9274 val |= MSGINT_MODE_MULTIVEC_EN;
9275 if (!tg3_flag(tp, 1SHOT_MSI))
9276 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9277 tw32(MSGINT_MODE, val);
9280 if (!tg3_flag(tp, 5705_PLUS)) {
9281 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9285 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9286 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9287 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9288 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9289 WDMAC_MODE_LNGREAD_ENAB);
9291 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9292 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9293 if (tg3_flag(tp, TSO_CAPABLE) &&
9294 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9295 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9297 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9298 !tg3_flag(tp, IS_5788)) {
9299 val |= WDMAC_MODE_RX_ACCEL;
9303 /* Enable host coalescing bug fix */
9304 if (tg3_flag(tp, 5755_PLUS))
9305 val |= WDMAC_MODE_STATUS_TAG_FIX;
9307 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9308 val |= WDMAC_MODE_BURST_ALL_DATA;
9310 tw32_f(WDMAC_MODE, val);
9313 if (tg3_flag(tp, PCIX_MODE)) {
9316 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9318 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9319 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9320 pcix_cmd |= PCI_X_CMD_READ_2K;
9321 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9322 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9323 pcix_cmd |= PCI_X_CMD_READ_2K;
9325 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9329 tw32_f(RDMAC_MODE, rdmac_mode);
9332 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
9333 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9334 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9337 if (i < TG3_NUM_RDMA_CHANNELS) {
9338 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9339 val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9340 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9341 tg3_flag_set(tp, 5719_RDMA_BUG);
9345 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9346 if (!tg3_flag(tp, 5705_PLUS))
9347 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9349 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9351 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9353 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9355 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9356 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9357 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9358 if (tg3_flag(tp, LRG_PROD_RING_CAP))
9359 val |= RCVDBDI_MODE_LRG_RING_SZ;
9360 tw32(RCVDBDI_MODE, val);
9361 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9362 if (tg3_flag(tp, HW_TSO_1) ||
9363 tg3_flag(tp, HW_TSO_2) ||
9364 tg3_flag(tp, HW_TSO_3))
9365 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9366 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9367 if (tg3_flag(tp, ENABLE_TSS))
9368 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9369 tw32(SNDBDI_MODE, val);
9370 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9372 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9373 err = tg3_load_5701_a0_firmware_fix(tp);
9378 if (tg3_flag(tp, TSO_CAPABLE)) {
9379 err = tg3_load_tso_firmware(tp);
9384 tp->tx_mode = TX_MODE_ENABLE;
9386 if (tg3_flag(tp, 5755_PLUS) ||
9387 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9388 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9390 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9391 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9392 tp->tx_mode &= ~val;
9393 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9396 tw32_f(MAC_TX_MODE, tp->tx_mode);
9399 if (tg3_flag(tp, ENABLE_RSS)) {
9400 tg3_rss_write_indir_tbl(tp);
9402 /* Setup the "secret" hash key. */
9403 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9404 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9405 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9406 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9407 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9408 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9409 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9410 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9411 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9412 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9415 tp->rx_mode = RX_MODE_ENABLE;
9416 if (tg3_flag(tp, 5755_PLUS))
9417 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9419 if (tg3_flag(tp, ENABLE_RSS))
9420 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9421 RX_MODE_RSS_ITBL_HASH_BITS_7 |
9422 RX_MODE_RSS_IPV6_HASH_EN |
9423 RX_MODE_RSS_TCP_IPV6_HASH_EN |
9424 RX_MODE_RSS_IPV4_HASH_EN |
9425 RX_MODE_RSS_TCP_IPV4_HASH_EN;
9427 tw32_f(MAC_RX_MODE, tp->rx_mode);
9430 tw32(MAC_LED_CTRL, tp->led_ctrl);
9432 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9433 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9434 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9437 tw32_f(MAC_RX_MODE, tp->rx_mode);
9440 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9441 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9442 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9443 /* Set drive transmission level to 1.2V */
9444 /* only if the signal pre-emphasis bit is not set */
9445 val = tr32(MAC_SERDES_CFG);
9448 tw32(MAC_SERDES_CFG, val);
9450 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9451 tw32(MAC_SERDES_CFG, 0x616000);
9454 /* Prevent chip from dropping frames when flow control
9457 if (tg3_flag(tp, 57765_CLASS))
9461 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9463 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9464 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9465 /* Use hardware link auto-negotiation */
9466 tg3_flag_set(tp, HW_AUTONEG);
9469 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9470 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9473 tmp = tr32(SERDES_RX_CTRL);
9474 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9475 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9476 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9477 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9480 if (!tg3_flag(tp, USE_PHYLIB)) {
9481 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9482 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9484 err = tg3_setup_phy(tp, 0);
9488 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9489 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9492 /* Clear CRC stats. */
9493 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9494 tg3_writephy(tp, MII_TG3_TEST1,
9495 tmp | MII_TG3_TEST1_CRC_EN);
9496 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9501 __tg3_set_rx_mode(tp->dev);
9503 /* Initialize receive rules. */
9504 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9505 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9506 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9507 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9509 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9513 if (tg3_flag(tp, ENABLE_ASF))
9517 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9519 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9521 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9523 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9525 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9527 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9529 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9531 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9533 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9535 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9537 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9539 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9541 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9543 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9551 if (tg3_flag(tp, ENABLE_APE))
9552 /* Write our heartbeat update interval to APE. */
9553 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9554 APE_HOST_HEARTBEAT_INT_DISABLE);
9556 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9561 /* Called at device open time to get the chip ready for
9562 * packet processing. Invoked with tp->lock held.
9564 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9566 tg3_switch_clocks(tp);
9568 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9570 return tg3_reset_hw(tp, reset_phy);
9573 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9577 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9578 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9580 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9583 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9584 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9585 memset(ocir, 0, TG3_OCIR_LEN);
9589 /* sysfs attributes for hwmon */
9590 static ssize_t tg3_show_temp(struct device *dev,
9591 struct device_attribute *devattr, char *buf)
9593 struct pci_dev *pdev = to_pci_dev(dev);
9594 struct net_device *netdev = pci_get_drvdata(pdev);
9595 struct tg3 *tp = netdev_priv(netdev);
9596 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
9599 spin_lock_bh(&tp->lock);
9600 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
9601 sizeof(temperature));
9602 spin_unlock_bh(&tp->lock);
9603 return sprintf(buf, "%u\n", temperature);
9607 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
9608 TG3_TEMP_SENSOR_OFFSET);
9609 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
9610 TG3_TEMP_CAUTION_OFFSET);
9611 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
9612 TG3_TEMP_MAX_OFFSET);
9614 static struct attribute *tg3_attributes[] = {
9615 &sensor_dev_attr_temp1_input.dev_attr.attr,
9616 &sensor_dev_attr_temp1_crit.dev_attr.attr,
9617 &sensor_dev_attr_temp1_max.dev_attr.attr,
9621 static const struct attribute_group tg3_group = {
9622 .attrs = tg3_attributes,
9625 static void tg3_hwmon_close(struct tg3 *tp)
9627 if (tp->hwmon_dev) {
9628 hwmon_device_unregister(tp->hwmon_dev);
9629 tp->hwmon_dev = NULL;
9630 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
9634 static void tg3_hwmon_open(struct tg3 *tp)
9638 struct pci_dev *pdev = tp->pdev;
9639 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
9641 tg3_sd_scan_scratchpad(tp, ocirs);
9643 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
9644 if (!ocirs[i].src_data_length)
9647 size += ocirs[i].src_hdr_length;
9648 size += ocirs[i].src_data_length;
9654 /* Register hwmon sysfs hooks */
9655 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
9657 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
9661 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
9662 if (IS_ERR(tp->hwmon_dev)) {
9663 tp->hwmon_dev = NULL;
9664 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
9665 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
9670 #define TG3_STAT_ADD32(PSTAT, REG) \
9671 do { u32 __val = tr32(REG); \
9672 (PSTAT)->low += __val; \
9673 if ((PSTAT)->low < __val) \
9674 (PSTAT)->high += 1; \
9677 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9679 struct tg3_hw_stats *sp = tp->hw_stats;
9681 if (!netif_carrier_ok(tp->dev))
9684 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9685 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9686 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9687 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9688 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9689 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9690 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9691 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9692 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9693 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9694 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9695 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9696 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9697 if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
9698 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
9699 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
9702 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9703 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
9704 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9705 tg3_flag_clear(tp, 5719_RDMA_BUG);
9708 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9709 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9710 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9711 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9712 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9713 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9714 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9715 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9716 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9717 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9718 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9719 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9720 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9721 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9723 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9724 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9725 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9726 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9727 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9729 u32 val = tr32(HOSTCC_FLOW_ATTN);
9730 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9732 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9733 sp->rx_discards.low += val;
9734 if (sp->rx_discards.low < val)
9735 sp->rx_discards.high += 1;
9737 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9739 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9742 static void tg3_chk_missed_msi(struct tg3 *tp)
9746 for (i = 0; i < tp->irq_cnt; i++) {
9747 struct tg3_napi *tnapi = &tp->napi[i];
9749 if (tg3_has_work(tnapi)) {
9750 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9751 tnapi->last_tx_cons == tnapi->tx_cons) {
9752 if (tnapi->chk_msi_cnt < 1) {
9753 tnapi->chk_msi_cnt++;
9759 tnapi->chk_msi_cnt = 0;
9760 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9761 tnapi->last_tx_cons = tnapi->tx_cons;
9765 static void tg3_timer(unsigned long __opaque)
9767 struct tg3 *tp = (struct tg3 *) __opaque;
9769 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9772 spin_lock(&tp->lock);
9774 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9775 tg3_flag(tp, 57765_CLASS))
9776 tg3_chk_missed_msi(tp);
9778 if (!tg3_flag(tp, TAGGED_STATUS)) {
9779 /* All of this garbage is because when using non-tagged
9780 * IRQ status the mailbox/status_block protocol the chip
9781 * uses with the cpu is race prone.
9783 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9784 tw32(GRC_LOCAL_CTRL,
9785 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9787 tw32(HOSTCC_MODE, tp->coalesce_mode |
9788 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9791 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9792 spin_unlock(&tp->lock);
9793 tg3_reset_task_schedule(tp);
9798 /* This part only runs once per second. */
9799 if (!--tp->timer_counter) {
9800 if (tg3_flag(tp, 5705_PLUS))
9801 tg3_periodic_fetch_stats(tp);
9803 if (tp->setlpicnt && !--tp->setlpicnt)
9804 tg3_phy_eee_enable(tp);
9806 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9810 mac_stat = tr32(MAC_STATUS);
9813 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9814 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9816 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9820 tg3_setup_phy(tp, 0);
9821 } else if (tg3_flag(tp, POLL_SERDES)) {
9822 u32 mac_stat = tr32(MAC_STATUS);
9825 if (netif_carrier_ok(tp->dev) &&
9826 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9829 if (!netif_carrier_ok(tp->dev) &&
9830 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9831 MAC_STATUS_SIGNAL_DET))) {
9835 if (!tp->serdes_counter) {
9838 ~MAC_MODE_PORT_MODE_MASK));
9840 tw32_f(MAC_MODE, tp->mac_mode);
9843 tg3_setup_phy(tp, 0);
9845 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9846 tg3_flag(tp, 5780_CLASS)) {
9847 tg3_serdes_parallel_detect(tp);
9850 tp->timer_counter = tp->timer_multiplier;
9853 /* Heartbeat is only sent once every 2 seconds.
9855 * The heartbeat is to tell the ASF firmware that the host
9856 * driver is still alive. In the event that the OS crashes,
9857 * ASF needs to reset the hardware to free up the FIFO space
9858 * that may be filled with rx packets destined for the host.
9859 * If the FIFO is full, ASF will no longer function properly.
9861 * Unintended resets have been reported on real time kernels
9862 * where the timer doesn't run on time. Netpoll will also have
9865 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9866 * to check the ring condition when the heartbeat is expiring
9867 * before doing the reset. This will prevent most unintended
9870 if (!--tp->asf_counter) {
9871 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9872 tg3_wait_for_event_ack(tp);
9874 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9875 FWCMD_NICDRV_ALIVE3);
9876 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9877 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9878 TG3_FW_UPDATE_TIMEOUT_SEC);
9880 tg3_generate_fw_event(tp);
9882 tp->asf_counter = tp->asf_multiplier;
9885 spin_unlock(&tp->lock);
9888 tp->timer.expires = jiffies + tp->timer_offset;
9889 add_timer(&tp->timer);
9892 static void __devinit tg3_timer_init(struct tg3 *tp)
9894 if (tg3_flag(tp, TAGGED_STATUS) &&
9895 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9896 !tg3_flag(tp, 57765_CLASS))
9897 tp->timer_offset = HZ;
9899 tp->timer_offset = HZ / 10;
9901 BUG_ON(tp->timer_offset > HZ);
9903 tp->timer_multiplier = (HZ / tp->timer_offset);
9904 tp->asf_multiplier = (HZ / tp->timer_offset) *
9905 TG3_FW_UPDATE_FREQ_SEC;
9907 init_timer(&tp->timer);
9908 tp->timer.data = (unsigned long) tp;
9909 tp->timer.function = tg3_timer;
9912 static void tg3_timer_start(struct tg3 *tp)
9914 tp->asf_counter = tp->asf_multiplier;
9915 tp->timer_counter = tp->timer_multiplier;
9917 tp->timer.expires = jiffies + tp->timer_offset;
9918 add_timer(&tp->timer);
9921 static void tg3_timer_stop(struct tg3 *tp)
9923 del_timer_sync(&tp->timer);
9926 /* Restart hardware after configuration changes, self-test, etc.
9927 * Invoked with tp->lock held.
9929 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9930 __releases(tp->lock)
9931 __acquires(tp->lock)
9935 err = tg3_init_hw(tp, reset_phy);
9938 "Failed to re-initialize device, aborting\n");
9939 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9940 tg3_full_unlock(tp);
9943 tg3_napi_enable(tp);
9945 tg3_full_lock(tp, 0);
9950 static void tg3_reset_task(struct work_struct *work)
9952 struct tg3 *tp = container_of(work, struct tg3, reset_task);
9955 tg3_full_lock(tp, 0);
9957 if (!netif_running(tp->dev)) {
9958 tg3_flag_clear(tp, RESET_TASK_PENDING);
9959 tg3_full_unlock(tp);
9963 tg3_full_unlock(tp);
9969 tg3_full_lock(tp, 1);
9971 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9972 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9973 tp->write32_rx_mbox = tg3_write_flush_reg32;
9974 tg3_flag_set(tp, MBOX_WRITE_REORDER);
9975 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9978 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9979 err = tg3_init_hw(tp, 1);
9983 tg3_netif_start(tp);
9986 tg3_full_unlock(tp);
9991 tg3_flag_clear(tp, RESET_TASK_PENDING);
9994 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9997 unsigned long flags;
9999 struct tg3_napi *tnapi = &tp->napi[irq_num];
10001 if (tp->irq_cnt == 1)
10002 name = tp->dev->name;
10004 name = &tnapi->irq_lbl[0];
10005 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10006 name[IFNAMSIZ-1] = 0;
10009 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10011 if (tg3_flag(tp, 1SHOT_MSI))
10012 fn = tg3_msi_1shot;
10015 fn = tg3_interrupt;
10016 if (tg3_flag(tp, TAGGED_STATUS))
10017 fn = tg3_interrupt_tagged;
10018 flags = IRQF_SHARED;
10021 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10024 static int tg3_test_interrupt(struct tg3 *tp)
10026 struct tg3_napi *tnapi = &tp->napi[0];
10027 struct net_device *dev = tp->dev;
10028 int err, i, intr_ok = 0;
10031 if (!netif_running(dev))
10034 tg3_disable_ints(tp);
10036 free_irq(tnapi->irq_vec, tnapi);
10039 * Turn off MSI one shot mode. Otherwise this test has no
10040 * observable way to know whether the interrupt was delivered.
10042 if (tg3_flag(tp, 57765_PLUS)) {
10043 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10044 tw32(MSGINT_MODE, val);
10047 err = request_irq(tnapi->irq_vec, tg3_test_isr,
10048 IRQF_SHARED, dev->name, tnapi);
10052 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10053 tg3_enable_ints(tp);
10055 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10058 for (i = 0; i < 5; i++) {
10059 u32 int_mbox, misc_host_ctrl;
10061 int_mbox = tr32_mailbox(tnapi->int_mbox);
10062 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10064 if ((int_mbox != 0) ||
10065 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10070 if (tg3_flag(tp, 57765_PLUS) &&
10071 tnapi->hw_status->status_tag != tnapi->last_tag)
10072 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10077 tg3_disable_ints(tp);
10079 free_irq(tnapi->irq_vec, tnapi);
10081 err = tg3_request_irq(tp, 0);
10087 /* Reenable MSI one shot mode. */
10088 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10089 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10090 tw32(MSGINT_MODE, val);
10098 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10099 * successfully restored
10101 static int tg3_test_msi(struct tg3 *tp)
10106 if (!tg3_flag(tp, USING_MSI))
10109 /* Turn off SERR reporting in case MSI terminates with Master
10112 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10113 pci_write_config_word(tp->pdev, PCI_COMMAND,
10114 pci_cmd & ~PCI_COMMAND_SERR);
10116 err = tg3_test_interrupt(tp);
10118 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10123 /* other failures */
10127 /* MSI test failed, go back to INTx mode */
10128 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10129 "to INTx mode. Please report this failure to the PCI "
10130 "maintainer and include system chipset information\n");
10132 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10134 pci_disable_msi(tp->pdev);
10136 tg3_flag_clear(tp, USING_MSI);
10137 tp->napi[0].irq_vec = tp->pdev->irq;
10139 err = tg3_request_irq(tp, 0);
10143 /* Need to reset the chip because the MSI cycle may have terminated
10144 * with Master Abort.
10146 tg3_full_lock(tp, 1);
10148 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10149 err = tg3_init_hw(tp, 1);
10151 tg3_full_unlock(tp);
10154 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10159 static int tg3_request_firmware(struct tg3 *tp)
10161 const __be32 *fw_data;
10163 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10164 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10169 fw_data = (void *)tp->fw->data;
10171 /* Firmware blob starts with version numbers, followed by
10172 * start address and _full_ length including BSS sections
10173 * (which must be longer than the actual data, of course
10176 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
10177 if (tp->fw_len < (tp->fw->size - 12)) {
10178 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10179 tp->fw_len, tp->fw_needed);
10180 release_firmware(tp->fw);
10185 /* We no longer need firmware; we have it. */
10186 tp->fw_needed = NULL;
10190 static u32 tg3_irq_count(struct tg3 *tp)
10192 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10195 /* We want as many rx rings enabled as there are cpus.
10196 * In multiqueue MSI-X mode, the first MSI-X vector
10197 * only deals with link interrupts, etc, so we add
10198 * one to the number of vectors we are requesting.
10200 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10206 static bool tg3_enable_msix(struct tg3 *tp)
10209 struct msix_entry msix_ent[tp->irq_max];
10211 tp->txq_cnt = tp->txq_req;
10212 tp->rxq_cnt = tp->rxq_req;
10214 tp->rxq_cnt = netif_get_num_default_rss_queues();
10215 if (tp->rxq_cnt > tp->rxq_max)
10216 tp->rxq_cnt = tp->rxq_max;
10218 /* Disable multiple TX rings by default. Simple round-robin hardware
10219 * scheduling of the TX rings can cause starvation of rings with
10220 * small packets when other rings have TSO or jumbo packets.
10225 tp->irq_cnt = tg3_irq_count(tp);
10227 for (i = 0; i < tp->irq_max; i++) {
10228 msix_ent[i].entry = i;
10229 msix_ent[i].vector = 0;
10232 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10235 } else if (rc != 0) {
10236 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10238 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10241 tp->rxq_cnt = max(rc - 1, 1);
10243 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10246 for (i = 0; i < tp->irq_max; i++)
10247 tp->napi[i].irq_vec = msix_ent[i].vector;
10249 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10250 pci_disable_msix(tp->pdev);
10254 if (tp->irq_cnt == 1)
10257 tg3_flag_set(tp, ENABLE_RSS);
10259 if (tp->txq_cnt > 1)
10260 tg3_flag_set(tp, ENABLE_TSS);
10262 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10267 static void tg3_ints_init(struct tg3 *tp)
10269 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10270 !tg3_flag(tp, TAGGED_STATUS)) {
10271 /* All MSI supporting chips should support tagged
10272 * status. Assert that this is the case.
10274 netdev_warn(tp->dev,
10275 "MSI without TAGGED_STATUS? Not using MSI\n");
10279 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10280 tg3_flag_set(tp, USING_MSIX);
10281 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10282 tg3_flag_set(tp, USING_MSI);
10284 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10285 u32 msi_mode = tr32(MSGINT_MODE);
10286 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10287 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10288 if (!tg3_flag(tp, 1SHOT_MSI))
10289 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10290 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10293 if (!tg3_flag(tp, USING_MSIX)) {
10295 tp->napi[0].irq_vec = tp->pdev->irq;
10298 if (tp->irq_cnt == 1) {
10301 netif_set_real_num_tx_queues(tp->dev, 1);
10302 netif_set_real_num_rx_queues(tp->dev, 1);
10306 static void tg3_ints_fini(struct tg3 *tp)
10308 if (tg3_flag(tp, USING_MSIX))
10309 pci_disable_msix(tp->pdev);
10310 else if (tg3_flag(tp, USING_MSI))
10311 pci_disable_msi(tp->pdev);
10312 tg3_flag_clear(tp, USING_MSI);
10313 tg3_flag_clear(tp, USING_MSIX);
10314 tg3_flag_clear(tp, ENABLE_RSS);
10315 tg3_flag_clear(tp, ENABLE_TSS);
10318 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq)
10320 struct net_device *dev = tp->dev;
10324 * Setup interrupts first so we know how
10325 * many NAPI resources to allocate
10329 tg3_rss_check_indir_tbl(tp);
10331 /* The placement of this call is tied
10332 * to the setup and use of Host TX descriptors.
10334 err = tg3_alloc_consistent(tp);
10340 tg3_napi_enable(tp);
10342 for (i = 0; i < tp->irq_cnt; i++) {
10343 struct tg3_napi *tnapi = &tp->napi[i];
10344 err = tg3_request_irq(tp, i);
10346 for (i--; i >= 0; i--) {
10347 tnapi = &tp->napi[i];
10348 free_irq(tnapi->irq_vec, tnapi);
10354 tg3_full_lock(tp, 0);
10356 err = tg3_init_hw(tp, reset_phy);
10358 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10359 tg3_free_rings(tp);
10362 tg3_full_unlock(tp);
10367 if (test_irq && tg3_flag(tp, USING_MSI)) {
10368 err = tg3_test_msi(tp);
10371 tg3_full_lock(tp, 0);
10372 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10373 tg3_free_rings(tp);
10374 tg3_full_unlock(tp);
10379 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10380 u32 val = tr32(PCIE_TRANSACTION_CFG);
10382 tw32(PCIE_TRANSACTION_CFG,
10383 val | PCIE_TRANS_CFG_1SHOT_MSI);
10389 tg3_hwmon_open(tp);
10391 tg3_full_lock(tp, 0);
10393 tg3_timer_start(tp);
10394 tg3_flag_set(tp, INIT_COMPLETE);
10395 tg3_enable_ints(tp);
10397 tg3_full_unlock(tp);
10399 netif_tx_start_all_queues(dev);
10402 * Reset loopback feature if it was turned on while the device was down
10403 * make sure that it's installed properly now.
10405 if (dev->features & NETIF_F_LOOPBACK)
10406 tg3_set_loopback(dev, dev->features);
10411 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10412 struct tg3_napi *tnapi = &tp->napi[i];
10413 free_irq(tnapi->irq_vec, tnapi);
10417 tg3_napi_disable(tp);
10419 tg3_free_consistent(tp);
10427 static void tg3_stop(struct tg3 *tp)
10431 tg3_napi_disable(tp);
10432 tg3_reset_task_cancel(tp);
10434 netif_tx_disable(tp->dev);
10436 tg3_timer_stop(tp);
10438 tg3_hwmon_close(tp);
10442 tg3_full_lock(tp, 1);
10444 tg3_disable_ints(tp);
10446 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10447 tg3_free_rings(tp);
10448 tg3_flag_clear(tp, INIT_COMPLETE);
10450 tg3_full_unlock(tp);
10452 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10453 struct tg3_napi *tnapi = &tp->napi[i];
10454 free_irq(tnapi->irq_vec, tnapi);
10461 tg3_free_consistent(tp);
10464 static int tg3_open(struct net_device *dev)
10466 struct tg3 *tp = netdev_priv(dev);
10469 if (tp->fw_needed) {
10470 err = tg3_request_firmware(tp);
10471 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10475 netdev_warn(tp->dev, "TSO capability disabled\n");
10476 tg3_flag_clear(tp, TSO_CAPABLE);
10477 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10478 netdev_notice(tp->dev, "TSO capability restored\n");
10479 tg3_flag_set(tp, TSO_CAPABLE);
10483 netif_carrier_off(tp->dev);
10485 err = tg3_power_up(tp);
10489 tg3_full_lock(tp, 0);
10491 tg3_disable_ints(tp);
10492 tg3_flag_clear(tp, INIT_COMPLETE);
10494 tg3_full_unlock(tp);
10496 err = tg3_start(tp, true, true);
10498 tg3_frob_aux_power(tp, false);
10499 pci_set_power_state(tp->pdev, PCI_D3hot);
10504 static int tg3_close(struct net_device *dev)
10506 struct tg3 *tp = netdev_priv(dev);
10510 /* Clear stats across close / open calls */
10511 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10512 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10514 tg3_power_down(tp);
10516 netif_carrier_off(tp->dev);
10521 static inline u64 get_stat64(tg3_stat64_t *val)
10523 return ((u64)val->high << 32) | ((u64)val->low);
10526 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10528 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10530 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10531 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10532 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10535 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10536 tg3_writephy(tp, MII_TG3_TEST1,
10537 val | MII_TG3_TEST1_CRC_EN);
10538 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10542 tp->phy_crc_errors += val;
10544 return tp->phy_crc_errors;
10547 return get_stat64(&hw_stats->rx_fcs_errors);
10550 #define ESTAT_ADD(member) \
10551 estats->member = old_estats->member + \
10552 get_stat64(&hw_stats->member)
10554 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10556 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10557 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10559 ESTAT_ADD(rx_octets);
10560 ESTAT_ADD(rx_fragments);
10561 ESTAT_ADD(rx_ucast_packets);
10562 ESTAT_ADD(rx_mcast_packets);
10563 ESTAT_ADD(rx_bcast_packets);
10564 ESTAT_ADD(rx_fcs_errors);
10565 ESTAT_ADD(rx_align_errors);
10566 ESTAT_ADD(rx_xon_pause_rcvd);
10567 ESTAT_ADD(rx_xoff_pause_rcvd);
10568 ESTAT_ADD(rx_mac_ctrl_rcvd);
10569 ESTAT_ADD(rx_xoff_entered);
10570 ESTAT_ADD(rx_frame_too_long_errors);
10571 ESTAT_ADD(rx_jabbers);
10572 ESTAT_ADD(rx_undersize_packets);
10573 ESTAT_ADD(rx_in_length_errors);
10574 ESTAT_ADD(rx_out_length_errors);
10575 ESTAT_ADD(rx_64_or_less_octet_packets);
10576 ESTAT_ADD(rx_65_to_127_octet_packets);
10577 ESTAT_ADD(rx_128_to_255_octet_packets);
10578 ESTAT_ADD(rx_256_to_511_octet_packets);
10579 ESTAT_ADD(rx_512_to_1023_octet_packets);
10580 ESTAT_ADD(rx_1024_to_1522_octet_packets);
10581 ESTAT_ADD(rx_1523_to_2047_octet_packets);
10582 ESTAT_ADD(rx_2048_to_4095_octet_packets);
10583 ESTAT_ADD(rx_4096_to_8191_octet_packets);
10584 ESTAT_ADD(rx_8192_to_9022_octet_packets);
10586 ESTAT_ADD(tx_octets);
10587 ESTAT_ADD(tx_collisions);
10588 ESTAT_ADD(tx_xon_sent);
10589 ESTAT_ADD(tx_xoff_sent);
10590 ESTAT_ADD(tx_flow_control);
10591 ESTAT_ADD(tx_mac_errors);
10592 ESTAT_ADD(tx_single_collisions);
10593 ESTAT_ADD(tx_mult_collisions);
10594 ESTAT_ADD(tx_deferred);
10595 ESTAT_ADD(tx_excessive_collisions);
10596 ESTAT_ADD(tx_late_collisions);
10597 ESTAT_ADD(tx_collide_2times);
10598 ESTAT_ADD(tx_collide_3times);
10599 ESTAT_ADD(tx_collide_4times);
10600 ESTAT_ADD(tx_collide_5times);
10601 ESTAT_ADD(tx_collide_6times);
10602 ESTAT_ADD(tx_collide_7times);
10603 ESTAT_ADD(tx_collide_8times);
10604 ESTAT_ADD(tx_collide_9times);
10605 ESTAT_ADD(tx_collide_10times);
10606 ESTAT_ADD(tx_collide_11times);
10607 ESTAT_ADD(tx_collide_12times);
10608 ESTAT_ADD(tx_collide_13times);
10609 ESTAT_ADD(tx_collide_14times);
10610 ESTAT_ADD(tx_collide_15times);
10611 ESTAT_ADD(tx_ucast_packets);
10612 ESTAT_ADD(tx_mcast_packets);
10613 ESTAT_ADD(tx_bcast_packets);
10614 ESTAT_ADD(tx_carrier_sense_errors);
10615 ESTAT_ADD(tx_discards);
10616 ESTAT_ADD(tx_errors);
10618 ESTAT_ADD(dma_writeq_full);
10619 ESTAT_ADD(dma_write_prioq_full);
10620 ESTAT_ADD(rxbds_empty);
10621 ESTAT_ADD(rx_discards);
10622 ESTAT_ADD(rx_errors);
10623 ESTAT_ADD(rx_threshold_hit);
10625 ESTAT_ADD(dma_readq_full);
10626 ESTAT_ADD(dma_read_prioq_full);
10627 ESTAT_ADD(tx_comp_queue_full);
10629 ESTAT_ADD(ring_set_send_prod_index);
10630 ESTAT_ADD(ring_status_update);
10631 ESTAT_ADD(nic_irqs);
10632 ESTAT_ADD(nic_avoided_irqs);
10633 ESTAT_ADD(nic_tx_threshold_hit);
10635 ESTAT_ADD(mbuf_lwm_thresh_hit);
10638 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10640 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10641 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10643 stats->rx_packets = old_stats->rx_packets +
10644 get_stat64(&hw_stats->rx_ucast_packets) +
10645 get_stat64(&hw_stats->rx_mcast_packets) +
10646 get_stat64(&hw_stats->rx_bcast_packets);
10648 stats->tx_packets = old_stats->tx_packets +
10649 get_stat64(&hw_stats->tx_ucast_packets) +
10650 get_stat64(&hw_stats->tx_mcast_packets) +
10651 get_stat64(&hw_stats->tx_bcast_packets);
10653 stats->rx_bytes = old_stats->rx_bytes +
10654 get_stat64(&hw_stats->rx_octets);
10655 stats->tx_bytes = old_stats->tx_bytes +
10656 get_stat64(&hw_stats->tx_octets);
10658 stats->rx_errors = old_stats->rx_errors +
10659 get_stat64(&hw_stats->rx_errors);
10660 stats->tx_errors = old_stats->tx_errors +
10661 get_stat64(&hw_stats->tx_errors) +
10662 get_stat64(&hw_stats->tx_mac_errors) +
10663 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10664 get_stat64(&hw_stats->tx_discards);
10666 stats->multicast = old_stats->multicast +
10667 get_stat64(&hw_stats->rx_mcast_packets);
10668 stats->collisions = old_stats->collisions +
10669 get_stat64(&hw_stats->tx_collisions);
10671 stats->rx_length_errors = old_stats->rx_length_errors +
10672 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10673 get_stat64(&hw_stats->rx_undersize_packets);
10675 stats->rx_over_errors = old_stats->rx_over_errors +
10676 get_stat64(&hw_stats->rxbds_empty);
10677 stats->rx_frame_errors = old_stats->rx_frame_errors +
10678 get_stat64(&hw_stats->rx_align_errors);
10679 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10680 get_stat64(&hw_stats->tx_discards);
10681 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10682 get_stat64(&hw_stats->tx_carrier_sense_errors);
10684 stats->rx_crc_errors = old_stats->rx_crc_errors +
10685 tg3_calc_crc_errors(tp);
10687 stats->rx_missed_errors = old_stats->rx_missed_errors +
10688 get_stat64(&hw_stats->rx_discards);
10690 stats->rx_dropped = tp->rx_dropped;
10691 stats->tx_dropped = tp->tx_dropped;
10694 static int tg3_get_regs_len(struct net_device *dev)
10696 return TG3_REG_BLK_SIZE;
10699 static void tg3_get_regs(struct net_device *dev,
10700 struct ethtool_regs *regs, void *_p)
10702 struct tg3 *tp = netdev_priv(dev);
10706 memset(_p, 0, TG3_REG_BLK_SIZE);
10708 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10711 tg3_full_lock(tp, 0);
10713 tg3_dump_legacy_regs(tp, (u32 *)_p);
10715 tg3_full_unlock(tp);
10718 static int tg3_get_eeprom_len(struct net_device *dev)
10720 struct tg3 *tp = netdev_priv(dev);
10722 return tp->nvram_size;
10725 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10727 struct tg3 *tp = netdev_priv(dev);
10730 u32 i, offset, len, b_offset, b_count;
10733 if (tg3_flag(tp, NO_NVRAM))
10736 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10739 offset = eeprom->offset;
10743 eeprom->magic = TG3_EEPROM_MAGIC;
10746 /* adjustments to start on required 4 byte boundary */
10747 b_offset = offset & 3;
10748 b_count = 4 - b_offset;
10749 if (b_count > len) {
10750 /* i.e. offset=1 len=2 */
10753 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10756 memcpy(data, ((char *)&val) + b_offset, b_count);
10759 eeprom->len += b_count;
10762 /* read bytes up to the last 4 byte boundary */
10763 pd = &data[eeprom->len];
10764 for (i = 0; i < (len - (len & 3)); i += 4) {
10765 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10770 memcpy(pd + i, &val, 4);
10775 /* read last bytes not ending on 4 byte boundary */
10776 pd = &data[eeprom->len];
10778 b_offset = offset + len - b_count;
10779 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10782 memcpy(pd, &val, b_count);
10783 eeprom->len += b_count;
10788 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10790 struct tg3 *tp = netdev_priv(dev);
10792 u32 offset, len, b_offset, odd_len;
10796 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10799 if (tg3_flag(tp, NO_NVRAM) ||
10800 eeprom->magic != TG3_EEPROM_MAGIC)
10803 offset = eeprom->offset;
10806 if ((b_offset = (offset & 3))) {
10807 /* adjustments to start on required 4 byte boundary */
10808 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10819 /* adjustments to end on required 4 byte boundary */
10821 len = (len + 3) & ~3;
10822 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10828 if (b_offset || odd_len) {
10829 buf = kmalloc(len, GFP_KERNEL);
10833 memcpy(buf, &start, 4);
10835 memcpy(buf+len-4, &end, 4);
10836 memcpy(buf + b_offset, data, eeprom->len);
10839 ret = tg3_nvram_write_block(tp, offset, len, buf);
10847 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10849 struct tg3 *tp = netdev_priv(dev);
10851 if (tg3_flag(tp, USE_PHYLIB)) {
10852 struct phy_device *phydev;
10853 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10855 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10856 return phy_ethtool_gset(phydev, cmd);
10859 cmd->supported = (SUPPORTED_Autoneg);
10861 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10862 cmd->supported |= (SUPPORTED_1000baseT_Half |
10863 SUPPORTED_1000baseT_Full);
10865 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10866 cmd->supported |= (SUPPORTED_100baseT_Half |
10867 SUPPORTED_100baseT_Full |
10868 SUPPORTED_10baseT_Half |
10869 SUPPORTED_10baseT_Full |
10871 cmd->port = PORT_TP;
10873 cmd->supported |= SUPPORTED_FIBRE;
10874 cmd->port = PORT_FIBRE;
10877 cmd->advertising = tp->link_config.advertising;
10878 if (tg3_flag(tp, PAUSE_AUTONEG)) {
10879 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10880 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10881 cmd->advertising |= ADVERTISED_Pause;
10883 cmd->advertising |= ADVERTISED_Pause |
10884 ADVERTISED_Asym_Pause;
10886 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10887 cmd->advertising |= ADVERTISED_Asym_Pause;
10890 if (netif_running(dev) && netif_carrier_ok(dev)) {
10891 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10892 cmd->duplex = tp->link_config.active_duplex;
10893 cmd->lp_advertising = tp->link_config.rmt_adv;
10894 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10895 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10896 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10898 cmd->eth_tp_mdix = ETH_TP_MDI;
10901 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10902 cmd->duplex = DUPLEX_UNKNOWN;
10903 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10905 cmd->phy_address = tp->phy_addr;
10906 cmd->transceiver = XCVR_INTERNAL;
10907 cmd->autoneg = tp->link_config.autoneg;
10913 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10915 struct tg3 *tp = netdev_priv(dev);
10916 u32 speed = ethtool_cmd_speed(cmd);
10918 if (tg3_flag(tp, USE_PHYLIB)) {
10919 struct phy_device *phydev;
10920 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10922 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10923 return phy_ethtool_sset(phydev, cmd);
10926 if (cmd->autoneg != AUTONEG_ENABLE &&
10927 cmd->autoneg != AUTONEG_DISABLE)
10930 if (cmd->autoneg == AUTONEG_DISABLE &&
10931 cmd->duplex != DUPLEX_FULL &&
10932 cmd->duplex != DUPLEX_HALF)
10935 if (cmd->autoneg == AUTONEG_ENABLE) {
10936 u32 mask = ADVERTISED_Autoneg |
10938 ADVERTISED_Asym_Pause;
10940 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10941 mask |= ADVERTISED_1000baseT_Half |
10942 ADVERTISED_1000baseT_Full;
10944 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10945 mask |= ADVERTISED_100baseT_Half |
10946 ADVERTISED_100baseT_Full |
10947 ADVERTISED_10baseT_Half |
10948 ADVERTISED_10baseT_Full |
10951 mask |= ADVERTISED_FIBRE;
10953 if (cmd->advertising & ~mask)
10956 mask &= (ADVERTISED_1000baseT_Half |
10957 ADVERTISED_1000baseT_Full |
10958 ADVERTISED_100baseT_Half |
10959 ADVERTISED_100baseT_Full |
10960 ADVERTISED_10baseT_Half |
10961 ADVERTISED_10baseT_Full);
10963 cmd->advertising &= mask;
10965 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10966 if (speed != SPEED_1000)
10969 if (cmd->duplex != DUPLEX_FULL)
10972 if (speed != SPEED_100 &&
10978 tg3_full_lock(tp, 0);
10980 tp->link_config.autoneg = cmd->autoneg;
10981 if (cmd->autoneg == AUTONEG_ENABLE) {
10982 tp->link_config.advertising = (cmd->advertising |
10983 ADVERTISED_Autoneg);
10984 tp->link_config.speed = SPEED_UNKNOWN;
10985 tp->link_config.duplex = DUPLEX_UNKNOWN;
10987 tp->link_config.advertising = 0;
10988 tp->link_config.speed = speed;
10989 tp->link_config.duplex = cmd->duplex;
10992 if (netif_running(dev))
10993 tg3_setup_phy(tp, 1);
10995 tg3_full_unlock(tp);
11000 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11002 struct tg3 *tp = netdev_priv(dev);
11004 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11005 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11006 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11007 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11010 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11012 struct tg3 *tp = netdev_priv(dev);
11014 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11015 wol->supported = WAKE_MAGIC;
11017 wol->supported = 0;
11019 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11020 wol->wolopts = WAKE_MAGIC;
11021 memset(&wol->sopass, 0, sizeof(wol->sopass));
11024 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11026 struct tg3 *tp = netdev_priv(dev);
11027 struct device *dp = &tp->pdev->dev;
11029 if (wol->wolopts & ~WAKE_MAGIC)
11031 if ((wol->wolopts & WAKE_MAGIC) &&
11032 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11035 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11037 spin_lock_bh(&tp->lock);
11038 if (device_may_wakeup(dp))
11039 tg3_flag_set(tp, WOL_ENABLE);
11041 tg3_flag_clear(tp, WOL_ENABLE);
11042 spin_unlock_bh(&tp->lock);
11047 static u32 tg3_get_msglevel(struct net_device *dev)
11049 struct tg3 *tp = netdev_priv(dev);
11050 return tp->msg_enable;
11053 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11055 struct tg3 *tp = netdev_priv(dev);
11056 tp->msg_enable = value;
11059 static int tg3_nway_reset(struct net_device *dev)
11061 struct tg3 *tp = netdev_priv(dev);
11064 if (!netif_running(dev))
11067 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11070 if (tg3_flag(tp, USE_PHYLIB)) {
11071 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11073 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11077 spin_lock_bh(&tp->lock);
11079 tg3_readphy(tp, MII_BMCR, &bmcr);
11080 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11081 ((bmcr & BMCR_ANENABLE) ||
11082 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11083 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11087 spin_unlock_bh(&tp->lock);
11093 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11095 struct tg3 *tp = netdev_priv(dev);
11097 ering->rx_max_pending = tp->rx_std_ring_mask;
11098 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11099 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11101 ering->rx_jumbo_max_pending = 0;
11103 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11105 ering->rx_pending = tp->rx_pending;
11106 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11107 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11109 ering->rx_jumbo_pending = 0;
11111 ering->tx_pending = tp->napi[0].tx_pending;
11114 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11116 struct tg3 *tp = netdev_priv(dev);
11117 int i, irq_sync = 0, err = 0;
11119 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11120 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11121 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11122 (ering->tx_pending <= MAX_SKB_FRAGS) ||
11123 (tg3_flag(tp, TSO_BUG) &&
11124 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11127 if (netif_running(dev)) {
11129 tg3_netif_stop(tp);
11133 tg3_full_lock(tp, irq_sync);
11135 tp->rx_pending = ering->rx_pending;
11137 if (tg3_flag(tp, MAX_RXPEND_64) &&
11138 tp->rx_pending > 63)
11139 tp->rx_pending = 63;
11140 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11142 for (i = 0; i < tp->irq_max; i++)
11143 tp->napi[i].tx_pending = ering->tx_pending;
11145 if (netif_running(dev)) {
11146 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11147 err = tg3_restart_hw(tp, 1);
11149 tg3_netif_start(tp);
11152 tg3_full_unlock(tp);
11154 if (irq_sync && !err)
11160 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11162 struct tg3 *tp = netdev_priv(dev);
11164 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11166 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11167 epause->rx_pause = 1;
11169 epause->rx_pause = 0;
11171 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11172 epause->tx_pause = 1;
11174 epause->tx_pause = 0;
11177 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11179 struct tg3 *tp = netdev_priv(dev);
11182 if (tg3_flag(tp, USE_PHYLIB)) {
11184 struct phy_device *phydev;
11186 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11188 if (!(phydev->supported & SUPPORTED_Pause) ||
11189 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11190 (epause->rx_pause != epause->tx_pause)))
11193 tp->link_config.flowctrl = 0;
11194 if (epause->rx_pause) {
11195 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11197 if (epause->tx_pause) {
11198 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11199 newadv = ADVERTISED_Pause;
11201 newadv = ADVERTISED_Pause |
11202 ADVERTISED_Asym_Pause;
11203 } else if (epause->tx_pause) {
11204 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11205 newadv = ADVERTISED_Asym_Pause;
11209 if (epause->autoneg)
11210 tg3_flag_set(tp, PAUSE_AUTONEG);
11212 tg3_flag_clear(tp, PAUSE_AUTONEG);
11214 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11215 u32 oldadv = phydev->advertising &
11216 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11217 if (oldadv != newadv) {
11218 phydev->advertising &=
11219 ~(ADVERTISED_Pause |
11220 ADVERTISED_Asym_Pause);
11221 phydev->advertising |= newadv;
11222 if (phydev->autoneg) {
11224 * Always renegotiate the link to
11225 * inform our link partner of our
11226 * flow control settings, even if the
11227 * flow control is forced. Let
11228 * tg3_adjust_link() do the final
11229 * flow control setup.
11231 return phy_start_aneg(phydev);
11235 if (!epause->autoneg)
11236 tg3_setup_flow_control(tp, 0, 0);
11238 tp->link_config.advertising &=
11239 ~(ADVERTISED_Pause |
11240 ADVERTISED_Asym_Pause);
11241 tp->link_config.advertising |= newadv;
11246 if (netif_running(dev)) {
11247 tg3_netif_stop(tp);
11251 tg3_full_lock(tp, irq_sync);
11253 if (epause->autoneg)
11254 tg3_flag_set(tp, PAUSE_AUTONEG);
11256 tg3_flag_clear(tp, PAUSE_AUTONEG);
11257 if (epause->rx_pause)
11258 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11260 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11261 if (epause->tx_pause)
11262 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11264 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11266 if (netif_running(dev)) {
11267 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11268 err = tg3_restart_hw(tp, 1);
11270 tg3_netif_start(tp);
11273 tg3_full_unlock(tp);
11279 static int tg3_get_sset_count(struct net_device *dev, int sset)
11283 return TG3_NUM_TEST;
11285 return TG3_NUM_STATS;
11287 return -EOPNOTSUPP;
11291 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11292 u32 *rules __always_unused)
11294 struct tg3 *tp = netdev_priv(dev);
11296 if (!tg3_flag(tp, SUPPORT_MSIX))
11297 return -EOPNOTSUPP;
11299 switch (info->cmd) {
11300 case ETHTOOL_GRXRINGS:
11301 if (netif_running(tp->dev))
11302 info->data = tp->rxq_cnt;
11304 info->data = num_online_cpus();
11305 if (info->data > TG3_RSS_MAX_NUM_QS)
11306 info->data = TG3_RSS_MAX_NUM_QS;
11309 /* The first interrupt vector only
11310 * handles link interrupts.
11316 return -EOPNOTSUPP;
11320 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11323 struct tg3 *tp = netdev_priv(dev);
11325 if (tg3_flag(tp, SUPPORT_MSIX))
11326 size = TG3_RSS_INDIR_TBL_SIZE;
11331 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11333 struct tg3 *tp = netdev_priv(dev);
11336 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11337 indir[i] = tp->rss_ind_tbl[i];
11342 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11344 struct tg3 *tp = netdev_priv(dev);
11347 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11348 tp->rss_ind_tbl[i] = indir[i];
11350 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11353 /* It is legal to write the indirection
11354 * table while the device is running.
11356 tg3_full_lock(tp, 0);
11357 tg3_rss_write_indir_tbl(tp);
11358 tg3_full_unlock(tp);
11363 static void tg3_get_channels(struct net_device *dev,
11364 struct ethtool_channels *channel)
11366 struct tg3 *tp = netdev_priv(dev);
11367 u32 deflt_qs = netif_get_num_default_rss_queues();
11369 channel->max_rx = tp->rxq_max;
11370 channel->max_tx = tp->txq_max;
11372 if (netif_running(dev)) {
11373 channel->rx_count = tp->rxq_cnt;
11374 channel->tx_count = tp->txq_cnt;
11377 channel->rx_count = tp->rxq_req;
11379 channel->rx_count = min(deflt_qs, tp->rxq_max);
11382 channel->tx_count = tp->txq_req;
11384 channel->tx_count = min(deflt_qs, tp->txq_max);
11388 static int tg3_set_channels(struct net_device *dev,
11389 struct ethtool_channels *channel)
11391 struct tg3 *tp = netdev_priv(dev);
11393 if (!tg3_flag(tp, SUPPORT_MSIX))
11394 return -EOPNOTSUPP;
11396 if (channel->rx_count > tp->rxq_max ||
11397 channel->tx_count > tp->txq_max)
11400 tp->rxq_req = channel->rx_count;
11401 tp->txq_req = channel->tx_count;
11403 if (!netif_running(dev))
11408 netif_carrier_off(dev);
11410 tg3_start(tp, true, false);
11415 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11417 switch (stringset) {
11419 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
11422 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
11425 WARN_ON(1); /* we need a WARN() */
11430 static int tg3_set_phys_id(struct net_device *dev,
11431 enum ethtool_phys_id_state state)
11433 struct tg3 *tp = netdev_priv(dev);
11435 if (!netif_running(tp->dev))
11439 case ETHTOOL_ID_ACTIVE:
11440 return 1; /* cycle on/off once per second */
11442 case ETHTOOL_ID_ON:
11443 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11444 LED_CTRL_1000MBPS_ON |
11445 LED_CTRL_100MBPS_ON |
11446 LED_CTRL_10MBPS_ON |
11447 LED_CTRL_TRAFFIC_OVERRIDE |
11448 LED_CTRL_TRAFFIC_BLINK |
11449 LED_CTRL_TRAFFIC_LED);
11452 case ETHTOOL_ID_OFF:
11453 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11454 LED_CTRL_TRAFFIC_OVERRIDE);
11457 case ETHTOOL_ID_INACTIVE:
11458 tw32(MAC_LED_CTRL, tp->led_ctrl);
11465 static void tg3_get_ethtool_stats(struct net_device *dev,
11466 struct ethtool_stats *estats, u64 *tmp_stats)
11468 struct tg3 *tp = netdev_priv(dev);
11471 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11473 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11476 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11480 u32 offset = 0, len = 0;
11483 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11486 if (magic == TG3_EEPROM_MAGIC) {
11487 for (offset = TG3_NVM_DIR_START;
11488 offset < TG3_NVM_DIR_END;
11489 offset += TG3_NVM_DIRENT_SIZE) {
11490 if (tg3_nvram_read(tp, offset, &val))
11493 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11494 TG3_NVM_DIRTYPE_EXTVPD)
11498 if (offset != TG3_NVM_DIR_END) {
11499 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11500 if (tg3_nvram_read(tp, offset + 4, &offset))
11503 offset = tg3_nvram_logical_addr(tp, offset);
11507 if (!offset || !len) {
11508 offset = TG3_NVM_VPD_OFF;
11509 len = TG3_NVM_VPD_LEN;
11512 buf = kmalloc(len, GFP_KERNEL);
11516 if (magic == TG3_EEPROM_MAGIC) {
11517 for (i = 0; i < len; i += 4) {
11518 /* The data is in little-endian format in NVRAM.
11519 * Use the big-endian read routines to preserve
11520 * the byte order as it exists in NVRAM.
11522 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11528 unsigned int pos = 0;
11530 ptr = (u8 *)&buf[0];
11531 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11532 cnt = pci_read_vpd(tp->pdev, pos,
11534 if (cnt == -ETIMEDOUT || cnt == -EINTR)
11552 #define NVRAM_TEST_SIZE 0x100
11553 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
11554 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
11555 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
11556 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
11557 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
11558 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
11559 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11560 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11562 static int tg3_test_nvram(struct tg3 *tp)
11564 u32 csum, magic, len;
11566 int i, j, k, err = 0, size;
11568 if (tg3_flag(tp, NO_NVRAM))
11571 if (tg3_nvram_read(tp, 0, &magic) != 0)
11574 if (magic == TG3_EEPROM_MAGIC)
11575 size = NVRAM_TEST_SIZE;
11576 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11577 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11578 TG3_EEPROM_SB_FORMAT_1) {
11579 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11580 case TG3_EEPROM_SB_REVISION_0:
11581 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11583 case TG3_EEPROM_SB_REVISION_2:
11584 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11586 case TG3_EEPROM_SB_REVISION_3:
11587 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11589 case TG3_EEPROM_SB_REVISION_4:
11590 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11592 case TG3_EEPROM_SB_REVISION_5:
11593 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11595 case TG3_EEPROM_SB_REVISION_6:
11596 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11603 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11604 size = NVRAM_SELFBOOT_HW_SIZE;
11608 buf = kmalloc(size, GFP_KERNEL);
11613 for (i = 0, j = 0; i < size; i += 4, j++) {
11614 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11621 /* Selfboot format */
11622 magic = be32_to_cpu(buf[0]);
11623 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11624 TG3_EEPROM_MAGIC_FW) {
11625 u8 *buf8 = (u8 *) buf, csum8 = 0;
11627 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11628 TG3_EEPROM_SB_REVISION_2) {
11629 /* For rev 2, the csum doesn't include the MBA. */
11630 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11632 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11635 for (i = 0; i < size; i++)
11648 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11649 TG3_EEPROM_MAGIC_HW) {
11650 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11651 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11652 u8 *buf8 = (u8 *) buf;
11654 /* Separate the parity bits and the data bytes. */
11655 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11656 if ((i == 0) || (i == 8)) {
11660 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11661 parity[k++] = buf8[i] & msk;
11663 } else if (i == 16) {
11667 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11668 parity[k++] = buf8[i] & msk;
11671 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11672 parity[k++] = buf8[i] & msk;
11675 data[j++] = buf8[i];
11679 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11680 u8 hw8 = hweight8(data[i]);
11682 if ((hw8 & 0x1) && parity[i])
11684 else if (!(hw8 & 0x1) && !parity[i])
11693 /* Bootstrap checksum at offset 0x10 */
11694 csum = calc_crc((unsigned char *) buf, 0x10);
11695 if (csum != le32_to_cpu(buf[0x10/4]))
11698 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11699 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11700 if (csum != le32_to_cpu(buf[0xfc/4]))
11705 buf = tg3_vpd_readblock(tp, &len);
11709 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11711 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11715 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11718 i += PCI_VPD_LRDT_TAG_SIZE;
11719 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11720 PCI_VPD_RO_KEYWORD_CHKSUM);
11724 j += PCI_VPD_INFO_FLD_HDR_SIZE;
11726 for (i = 0; i <= j; i++)
11727 csum8 += ((u8 *)buf)[i];
11741 #define TG3_SERDES_TIMEOUT_SEC 2
11742 #define TG3_COPPER_TIMEOUT_SEC 6
11744 static int tg3_test_link(struct tg3 *tp)
11748 if (!netif_running(tp->dev))
11751 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11752 max = TG3_SERDES_TIMEOUT_SEC;
11754 max = TG3_COPPER_TIMEOUT_SEC;
11756 for (i = 0; i < max; i++) {
11757 if (netif_carrier_ok(tp->dev))
11760 if (msleep_interruptible(1000))
11767 /* Only test the commonly used registers */
11768 static int tg3_test_registers(struct tg3 *tp)
11770 int i, is_5705, is_5750;
11771 u32 offset, read_mask, write_mask, val, save_val, read_val;
11775 #define TG3_FL_5705 0x1
11776 #define TG3_FL_NOT_5705 0x2
11777 #define TG3_FL_NOT_5788 0x4
11778 #define TG3_FL_NOT_5750 0x8
11782 /* MAC Control Registers */
11783 { MAC_MODE, TG3_FL_NOT_5705,
11784 0x00000000, 0x00ef6f8c },
11785 { MAC_MODE, TG3_FL_5705,
11786 0x00000000, 0x01ef6b8c },
11787 { MAC_STATUS, TG3_FL_NOT_5705,
11788 0x03800107, 0x00000000 },
11789 { MAC_STATUS, TG3_FL_5705,
11790 0x03800100, 0x00000000 },
11791 { MAC_ADDR_0_HIGH, 0x0000,
11792 0x00000000, 0x0000ffff },
11793 { MAC_ADDR_0_LOW, 0x0000,
11794 0x00000000, 0xffffffff },
11795 { MAC_RX_MTU_SIZE, 0x0000,
11796 0x00000000, 0x0000ffff },
11797 { MAC_TX_MODE, 0x0000,
11798 0x00000000, 0x00000070 },
11799 { MAC_TX_LENGTHS, 0x0000,
11800 0x00000000, 0x00003fff },
11801 { MAC_RX_MODE, TG3_FL_NOT_5705,
11802 0x00000000, 0x000007fc },
11803 { MAC_RX_MODE, TG3_FL_5705,
11804 0x00000000, 0x000007dc },
11805 { MAC_HASH_REG_0, 0x0000,
11806 0x00000000, 0xffffffff },
11807 { MAC_HASH_REG_1, 0x0000,
11808 0x00000000, 0xffffffff },
11809 { MAC_HASH_REG_2, 0x0000,
11810 0x00000000, 0xffffffff },
11811 { MAC_HASH_REG_3, 0x0000,
11812 0x00000000, 0xffffffff },
11814 /* Receive Data and Receive BD Initiator Control Registers. */
11815 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11816 0x00000000, 0xffffffff },
11817 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11818 0x00000000, 0xffffffff },
11819 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11820 0x00000000, 0x00000003 },
11821 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11822 0x00000000, 0xffffffff },
11823 { RCVDBDI_STD_BD+0, 0x0000,
11824 0x00000000, 0xffffffff },
11825 { RCVDBDI_STD_BD+4, 0x0000,
11826 0x00000000, 0xffffffff },
11827 { RCVDBDI_STD_BD+8, 0x0000,
11828 0x00000000, 0xffff0002 },
11829 { RCVDBDI_STD_BD+0xc, 0x0000,
11830 0x00000000, 0xffffffff },
11832 /* Receive BD Initiator Control Registers. */
11833 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11834 0x00000000, 0xffffffff },
11835 { RCVBDI_STD_THRESH, TG3_FL_5705,
11836 0x00000000, 0x000003ff },
11837 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11838 0x00000000, 0xffffffff },
11840 /* Host Coalescing Control Registers. */
11841 { HOSTCC_MODE, TG3_FL_NOT_5705,
11842 0x00000000, 0x00000004 },
11843 { HOSTCC_MODE, TG3_FL_5705,
11844 0x00000000, 0x000000f6 },
11845 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11846 0x00000000, 0xffffffff },
11847 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11848 0x00000000, 0x000003ff },
11849 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11850 0x00000000, 0xffffffff },
11851 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11852 0x00000000, 0x000003ff },
11853 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11854 0x00000000, 0xffffffff },
11855 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11856 0x00000000, 0x000000ff },
11857 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11858 0x00000000, 0xffffffff },
11859 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11860 0x00000000, 0x000000ff },
11861 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11862 0x00000000, 0xffffffff },
11863 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11864 0x00000000, 0xffffffff },
11865 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11866 0x00000000, 0xffffffff },
11867 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11868 0x00000000, 0x000000ff },
11869 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11870 0x00000000, 0xffffffff },
11871 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11872 0x00000000, 0x000000ff },
11873 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11874 0x00000000, 0xffffffff },
11875 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11876 0x00000000, 0xffffffff },
11877 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11878 0x00000000, 0xffffffff },
11879 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11880 0x00000000, 0xffffffff },
11881 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11882 0x00000000, 0xffffffff },
11883 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11884 0xffffffff, 0x00000000 },
11885 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11886 0xffffffff, 0x00000000 },
11888 /* Buffer Manager Control Registers. */
11889 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11890 0x00000000, 0x007fff80 },
11891 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11892 0x00000000, 0x007fffff },
11893 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11894 0x00000000, 0x0000003f },
11895 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11896 0x00000000, 0x000001ff },
11897 { BUFMGR_MB_HIGH_WATER, 0x0000,
11898 0x00000000, 0x000001ff },
11899 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11900 0xffffffff, 0x00000000 },
11901 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11902 0xffffffff, 0x00000000 },
11904 /* Mailbox Registers */
11905 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11906 0x00000000, 0x000001ff },
11907 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11908 0x00000000, 0x000001ff },
11909 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11910 0x00000000, 0x000007ff },
11911 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11912 0x00000000, 0x000001ff },
11914 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11917 is_5705 = is_5750 = 0;
11918 if (tg3_flag(tp, 5705_PLUS)) {
11920 if (tg3_flag(tp, 5750_PLUS))
11924 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11925 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11928 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11931 if (tg3_flag(tp, IS_5788) &&
11932 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11935 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11938 offset = (u32) reg_tbl[i].offset;
11939 read_mask = reg_tbl[i].read_mask;
11940 write_mask = reg_tbl[i].write_mask;
11942 /* Save the original register content */
11943 save_val = tr32(offset);
11945 /* Determine the read-only value. */
11946 read_val = save_val & read_mask;
11948 /* Write zero to the register, then make sure the read-only bits
11949 * are not changed and the read/write bits are all zeros.
11953 val = tr32(offset);
11955 /* Test the read-only and read/write bits. */
11956 if (((val & read_mask) != read_val) || (val & write_mask))
11959 /* Write ones to all the bits defined by RdMask and WrMask, then
11960 * make sure the read-only bits are not changed and the
11961 * read/write bits are all ones.
11963 tw32(offset, read_mask | write_mask);
11965 val = tr32(offset);
11967 /* Test the read-only bits. */
11968 if ((val & read_mask) != read_val)
11971 /* Test the read/write bits. */
11972 if ((val & write_mask) != write_mask)
11975 tw32(offset, save_val);
11981 if (netif_msg_hw(tp))
11982 netdev_err(tp->dev,
11983 "Register test failed at offset %x\n", offset);
11984 tw32(offset, save_val);
11988 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11990 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11994 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11995 for (j = 0; j < len; j += 4) {
11998 tg3_write_mem(tp, offset + j, test_pattern[i]);
11999 tg3_read_mem(tp, offset + j, &val);
12000 if (val != test_pattern[i])
12007 static int tg3_test_memory(struct tg3 *tp)
12009 static struct mem_entry {
12012 } mem_tbl_570x[] = {
12013 { 0x00000000, 0x00b50},
12014 { 0x00002000, 0x1c000},
12015 { 0xffffffff, 0x00000}
12016 }, mem_tbl_5705[] = {
12017 { 0x00000100, 0x0000c},
12018 { 0x00000200, 0x00008},
12019 { 0x00004000, 0x00800},
12020 { 0x00006000, 0x01000},
12021 { 0x00008000, 0x02000},
12022 { 0x00010000, 0x0e000},
12023 { 0xffffffff, 0x00000}
12024 }, mem_tbl_5755[] = {
12025 { 0x00000200, 0x00008},
12026 { 0x00004000, 0x00800},
12027 { 0x00006000, 0x00800},
12028 { 0x00008000, 0x02000},
12029 { 0x00010000, 0x0c000},
12030 { 0xffffffff, 0x00000}
12031 }, mem_tbl_5906[] = {
12032 { 0x00000200, 0x00008},
12033 { 0x00004000, 0x00400},
12034 { 0x00006000, 0x00400},
12035 { 0x00008000, 0x01000},
12036 { 0x00010000, 0x01000},
12037 { 0xffffffff, 0x00000}
12038 }, mem_tbl_5717[] = {
12039 { 0x00000200, 0x00008},
12040 { 0x00010000, 0x0a000},
12041 { 0x00020000, 0x13c00},
12042 { 0xffffffff, 0x00000}
12043 }, mem_tbl_57765[] = {
12044 { 0x00000200, 0x00008},
12045 { 0x00004000, 0x00800},
12046 { 0x00006000, 0x09800},
12047 { 0x00010000, 0x0a000},
12048 { 0xffffffff, 0x00000}
12050 struct mem_entry *mem_tbl;
12054 if (tg3_flag(tp, 5717_PLUS))
12055 mem_tbl = mem_tbl_5717;
12056 else if (tg3_flag(tp, 57765_CLASS))
12057 mem_tbl = mem_tbl_57765;
12058 else if (tg3_flag(tp, 5755_PLUS))
12059 mem_tbl = mem_tbl_5755;
12060 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12061 mem_tbl = mem_tbl_5906;
12062 else if (tg3_flag(tp, 5705_PLUS))
12063 mem_tbl = mem_tbl_5705;
12065 mem_tbl = mem_tbl_570x;
12067 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12068 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12076 #define TG3_TSO_MSS 500
12078 #define TG3_TSO_IP_HDR_LEN 20
12079 #define TG3_TSO_TCP_HDR_LEN 20
12080 #define TG3_TSO_TCP_OPT_LEN 12
12082 static const u8 tg3_tso_header[] = {
12084 0x45, 0x00, 0x00, 0x00,
12085 0x00, 0x00, 0x40, 0x00,
12086 0x40, 0x06, 0x00, 0x00,
12087 0x0a, 0x00, 0x00, 0x01,
12088 0x0a, 0x00, 0x00, 0x02,
12089 0x0d, 0x00, 0xe0, 0x00,
12090 0x00, 0x00, 0x01, 0x00,
12091 0x00, 0x00, 0x02, 0x00,
12092 0x80, 0x10, 0x10, 0x00,
12093 0x14, 0x09, 0x00, 0x00,
12094 0x01, 0x01, 0x08, 0x0a,
12095 0x11, 0x11, 0x11, 0x11,
12096 0x11, 0x11, 0x11, 0x11,
12099 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12101 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12102 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12104 struct sk_buff *skb;
12105 u8 *tx_data, *rx_data;
12107 int num_pkts, tx_len, rx_len, i, err;
12108 struct tg3_rx_buffer_desc *desc;
12109 struct tg3_napi *tnapi, *rnapi;
12110 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12112 tnapi = &tp->napi[0];
12113 rnapi = &tp->napi[0];
12114 if (tp->irq_cnt > 1) {
12115 if (tg3_flag(tp, ENABLE_RSS))
12116 rnapi = &tp->napi[1];
12117 if (tg3_flag(tp, ENABLE_TSS))
12118 tnapi = &tp->napi[1];
12120 coal_now = tnapi->coal_now | rnapi->coal_now;
12125 skb = netdev_alloc_skb(tp->dev, tx_len);
12129 tx_data = skb_put(skb, tx_len);
12130 memcpy(tx_data, tp->dev->dev_addr, 6);
12131 memset(tx_data + 6, 0x0, 8);
12133 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12135 if (tso_loopback) {
12136 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12138 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12139 TG3_TSO_TCP_OPT_LEN;
12141 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12142 sizeof(tg3_tso_header));
12145 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12146 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12148 /* Set the total length field in the IP header */
12149 iph->tot_len = htons((u16)(mss + hdr_len));
12151 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12152 TXD_FLAG_CPU_POST_DMA);
12154 if (tg3_flag(tp, HW_TSO_1) ||
12155 tg3_flag(tp, HW_TSO_2) ||
12156 tg3_flag(tp, HW_TSO_3)) {
12158 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12159 th = (struct tcphdr *)&tx_data[val];
12162 base_flags |= TXD_FLAG_TCPUDP_CSUM;
12164 if (tg3_flag(tp, HW_TSO_3)) {
12165 mss |= (hdr_len & 0xc) << 12;
12166 if (hdr_len & 0x10)
12167 base_flags |= 0x00000010;
12168 base_flags |= (hdr_len & 0x3e0) << 5;
12169 } else if (tg3_flag(tp, HW_TSO_2))
12170 mss |= hdr_len << 9;
12171 else if (tg3_flag(tp, HW_TSO_1) ||
12172 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
12173 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12175 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12178 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12181 data_off = ETH_HLEN;
12183 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12184 tx_len > VLAN_ETH_FRAME_LEN)
12185 base_flags |= TXD_FLAG_JMB_PKT;
12188 for (i = data_off; i < tx_len; i++)
12189 tx_data[i] = (u8) (i & 0xff);
12191 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12192 if (pci_dma_mapping_error(tp->pdev, map)) {
12193 dev_kfree_skb(skb);
12197 val = tnapi->tx_prod;
12198 tnapi->tx_buffers[val].skb = skb;
12199 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12201 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12206 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12208 budget = tg3_tx_avail(tnapi);
12209 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12210 base_flags | TXD_FLAG_END, mss, 0)) {
12211 tnapi->tx_buffers[val].skb = NULL;
12212 dev_kfree_skb(skb);
12218 /* Sync BD data before updating mailbox */
12221 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12222 tr32_mailbox(tnapi->prodmbox);
12226 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
12227 for (i = 0; i < 35; i++) {
12228 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12233 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12234 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12235 if ((tx_idx == tnapi->tx_prod) &&
12236 (rx_idx == (rx_start_idx + num_pkts)))
12240 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12241 dev_kfree_skb(skb);
12243 if (tx_idx != tnapi->tx_prod)
12246 if (rx_idx != rx_start_idx + num_pkts)
12250 while (rx_idx != rx_start_idx) {
12251 desc = &rnapi->rx_rcb[rx_start_idx++];
12252 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12253 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12255 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12256 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12259 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12262 if (!tso_loopback) {
12263 if (rx_len != tx_len)
12266 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12267 if (opaque_key != RXD_OPAQUE_RING_STD)
12270 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12273 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12274 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12275 >> RXD_TCPCSUM_SHIFT != 0xffff) {
12279 if (opaque_key == RXD_OPAQUE_RING_STD) {
12280 rx_data = tpr->rx_std_buffers[desc_idx].data;
12281 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12283 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12284 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12285 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12290 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12291 PCI_DMA_FROMDEVICE);
12293 rx_data += TG3_RX_OFFSET(tp);
12294 for (i = data_off; i < rx_len; i++, val++) {
12295 if (*(rx_data + i) != (u8) (val & 0xff))
12302 /* tg3_free_rings will unmap and free the rx_data */
12307 #define TG3_STD_LOOPBACK_FAILED 1
12308 #define TG3_JMB_LOOPBACK_FAILED 2
12309 #define TG3_TSO_LOOPBACK_FAILED 4
12310 #define TG3_LOOPBACK_FAILED \
12311 (TG3_STD_LOOPBACK_FAILED | \
12312 TG3_JMB_LOOPBACK_FAILED | \
12313 TG3_TSO_LOOPBACK_FAILED)
12315 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12319 u32 jmb_pkt_sz = 9000;
12322 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12324 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12325 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12327 if (!netif_running(tp->dev)) {
12328 data[0] = TG3_LOOPBACK_FAILED;
12329 data[1] = TG3_LOOPBACK_FAILED;
12331 data[2] = TG3_LOOPBACK_FAILED;
12335 err = tg3_reset_hw(tp, 1);
12337 data[0] = TG3_LOOPBACK_FAILED;
12338 data[1] = TG3_LOOPBACK_FAILED;
12340 data[2] = TG3_LOOPBACK_FAILED;
12344 if (tg3_flag(tp, ENABLE_RSS)) {
12347 /* Reroute all rx packets to the 1st queue */
12348 for (i = MAC_RSS_INDIR_TBL_0;
12349 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12353 /* HW errata - mac loopback fails in some cases on 5780.
12354 * Normal traffic and PHY loopback are not affected by
12355 * errata. Also, the MAC loopback test is deprecated for
12356 * all newer ASIC revisions.
12358 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
12359 !tg3_flag(tp, CPMU_PRESENT)) {
12360 tg3_mac_loopback(tp, true);
12362 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12363 data[0] |= TG3_STD_LOOPBACK_FAILED;
12365 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12366 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12367 data[0] |= TG3_JMB_LOOPBACK_FAILED;
12369 tg3_mac_loopback(tp, false);
12372 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12373 !tg3_flag(tp, USE_PHYLIB)) {
12376 tg3_phy_lpbk_set(tp, 0, false);
12378 /* Wait for link */
12379 for (i = 0; i < 100; i++) {
12380 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12385 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12386 data[1] |= TG3_STD_LOOPBACK_FAILED;
12387 if (tg3_flag(tp, TSO_CAPABLE) &&
12388 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12389 data[1] |= TG3_TSO_LOOPBACK_FAILED;
12390 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12391 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12392 data[1] |= TG3_JMB_LOOPBACK_FAILED;
12395 tg3_phy_lpbk_set(tp, 0, true);
12397 /* All link indications report up, but the hardware
12398 * isn't really ready for about 20 msec. Double it
12403 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12404 data[2] |= TG3_STD_LOOPBACK_FAILED;
12405 if (tg3_flag(tp, TSO_CAPABLE) &&
12406 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12407 data[2] |= TG3_TSO_LOOPBACK_FAILED;
12408 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12409 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12410 data[2] |= TG3_JMB_LOOPBACK_FAILED;
12413 /* Re-enable gphy autopowerdown. */
12414 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12415 tg3_phy_toggle_apd(tp, true);
12418 err = (data[0] | data[1] | data[2]) ? -EIO : 0;
12421 tp->phy_flags |= eee_cap;
12426 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12429 struct tg3 *tp = netdev_priv(dev);
12430 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12432 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12433 tg3_power_up(tp)) {
12434 etest->flags |= ETH_TEST_FL_FAILED;
12435 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12439 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12441 if (tg3_test_nvram(tp) != 0) {
12442 etest->flags |= ETH_TEST_FL_FAILED;
12445 if (!doextlpbk && tg3_test_link(tp)) {
12446 etest->flags |= ETH_TEST_FL_FAILED;
12449 if (etest->flags & ETH_TEST_FL_OFFLINE) {
12450 int err, err2 = 0, irq_sync = 0;
12452 if (netif_running(dev)) {
12454 tg3_netif_stop(tp);
12458 tg3_full_lock(tp, irq_sync);
12460 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12461 err = tg3_nvram_lock(tp);
12462 tg3_halt_cpu(tp, RX_CPU_BASE);
12463 if (!tg3_flag(tp, 5705_PLUS))
12464 tg3_halt_cpu(tp, TX_CPU_BASE);
12466 tg3_nvram_unlock(tp);
12468 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12471 if (tg3_test_registers(tp) != 0) {
12472 etest->flags |= ETH_TEST_FL_FAILED;
12476 if (tg3_test_memory(tp) != 0) {
12477 etest->flags |= ETH_TEST_FL_FAILED;
12482 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12484 if (tg3_test_loopback(tp, &data[4], doextlpbk))
12485 etest->flags |= ETH_TEST_FL_FAILED;
12487 tg3_full_unlock(tp);
12489 if (tg3_test_interrupt(tp) != 0) {
12490 etest->flags |= ETH_TEST_FL_FAILED;
12494 tg3_full_lock(tp, 0);
12496 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12497 if (netif_running(dev)) {
12498 tg3_flag_set(tp, INIT_COMPLETE);
12499 err2 = tg3_restart_hw(tp, 1);
12501 tg3_netif_start(tp);
12504 tg3_full_unlock(tp);
12506 if (irq_sync && !err2)
12509 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12510 tg3_power_down(tp);
12514 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12516 struct mii_ioctl_data *data = if_mii(ifr);
12517 struct tg3 *tp = netdev_priv(dev);
12520 if (tg3_flag(tp, USE_PHYLIB)) {
12521 struct phy_device *phydev;
12522 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12524 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12525 return phy_mii_ioctl(phydev, ifr, cmd);
12530 data->phy_id = tp->phy_addr;
12533 case SIOCGMIIREG: {
12536 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12537 break; /* We have no PHY */
12539 if (!netif_running(dev))
12542 spin_lock_bh(&tp->lock);
12543 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12544 spin_unlock_bh(&tp->lock);
12546 data->val_out = mii_regval;
12552 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12553 break; /* We have no PHY */
12555 if (!netif_running(dev))
12558 spin_lock_bh(&tp->lock);
12559 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12560 spin_unlock_bh(&tp->lock);
12568 return -EOPNOTSUPP;
12571 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12573 struct tg3 *tp = netdev_priv(dev);
12575 memcpy(ec, &tp->coal, sizeof(*ec));
12579 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12581 struct tg3 *tp = netdev_priv(dev);
12582 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12583 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12585 if (!tg3_flag(tp, 5705_PLUS)) {
12586 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12587 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12588 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12589 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12592 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12593 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12594 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12595 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12596 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12597 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12598 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12599 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12600 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12601 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12604 /* No rx interrupts will be generated if both are zero */
12605 if ((ec->rx_coalesce_usecs == 0) &&
12606 (ec->rx_max_coalesced_frames == 0))
12609 /* No tx interrupts will be generated if both are zero */
12610 if ((ec->tx_coalesce_usecs == 0) &&
12611 (ec->tx_max_coalesced_frames == 0))
12614 /* Only copy relevant parameters, ignore all others. */
12615 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12616 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12617 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12618 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12619 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12620 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12621 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12622 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12623 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12625 if (netif_running(dev)) {
12626 tg3_full_lock(tp, 0);
12627 __tg3_set_coalesce(tp, &tp->coal);
12628 tg3_full_unlock(tp);
12633 static const struct ethtool_ops tg3_ethtool_ops = {
12634 .get_settings = tg3_get_settings,
12635 .set_settings = tg3_set_settings,
12636 .get_drvinfo = tg3_get_drvinfo,
12637 .get_regs_len = tg3_get_regs_len,
12638 .get_regs = tg3_get_regs,
12639 .get_wol = tg3_get_wol,
12640 .set_wol = tg3_set_wol,
12641 .get_msglevel = tg3_get_msglevel,
12642 .set_msglevel = tg3_set_msglevel,
12643 .nway_reset = tg3_nway_reset,
12644 .get_link = ethtool_op_get_link,
12645 .get_eeprom_len = tg3_get_eeprom_len,
12646 .get_eeprom = tg3_get_eeprom,
12647 .set_eeprom = tg3_set_eeprom,
12648 .get_ringparam = tg3_get_ringparam,
12649 .set_ringparam = tg3_set_ringparam,
12650 .get_pauseparam = tg3_get_pauseparam,
12651 .set_pauseparam = tg3_set_pauseparam,
12652 .self_test = tg3_self_test,
12653 .get_strings = tg3_get_strings,
12654 .set_phys_id = tg3_set_phys_id,
12655 .get_ethtool_stats = tg3_get_ethtool_stats,
12656 .get_coalesce = tg3_get_coalesce,
12657 .set_coalesce = tg3_set_coalesce,
12658 .get_sset_count = tg3_get_sset_count,
12659 .get_rxnfc = tg3_get_rxnfc,
12660 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
12661 .get_rxfh_indir = tg3_get_rxfh_indir,
12662 .set_rxfh_indir = tg3_set_rxfh_indir,
12663 .get_channels = tg3_get_channels,
12664 .set_channels = tg3_set_channels,
12665 .get_ts_info = ethtool_op_get_ts_info,
12668 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12669 struct rtnl_link_stats64 *stats)
12671 struct tg3 *tp = netdev_priv(dev);
12673 spin_lock_bh(&tp->lock);
12674 if (!tp->hw_stats) {
12675 spin_unlock_bh(&tp->lock);
12676 return &tp->net_stats_prev;
12679 tg3_get_nstats(tp, stats);
12680 spin_unlock_bh(&tp->lock);
12685 static void tg3_set_rx_mode(struct net_device *dev)
12687 struct tg3 *tp = netdev_priv(dev);
12689 if (!netif_running(dev))
12692 tg3_full_lock(tp, 0);
12693 __tg3_set_rx_mode(dev);
12694 tg3_full_unlock(tp);
12697 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12700 dev->mtu = new_mtu;
12702 if (new_mtu > ETH_DATA_LEN) {
12703 if (tg3_flag(tp, 5780_CLASS)) {
12704 netdev_update_features(dev);
12705 tg3_flag_clear(tp, TSO_CAPABLE);
12707 tg3_flag_set(tp, JUMBO_RING_ENABLE);
12710 if (tg3_flag(tp, 5780_CLASS)) {
12711 tg3_flag_set(tp, TSO_CAPABLE);
12712 netdev_update_features(dev);
12714 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12718 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12720 struct tg3 *tp = netdev_priv(dev);
12721 int err, reset_phy = 0;
12723 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12726 if (!netif_running(dev)) {
12727 /* We'll just catch it later when the
12730 tg3_set_mtu(dev, tp, new_mtu);
12736 tg3_netif_stop(tp);
12738 tg3_full_lock(tp, 1);
12740 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12742 tg3_set_mtu(dev, tp, new_mtu);
12744 /* Reset PHY, otherwise the read DMA engine will be in a mode that
12745 * breaks all requests to 256 bytes.
12747 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
12750 err = tg3_restart_hw(tp, reset_phy);
12753 tg3_netif_start(tp);
12755 tg3_full_unlock(tp);
12763 static const struct net_device_ops tg3_netdev_ops = {
12764 .ndo_open = tg3_open,
12765 .ndo_stop = tg3_close,
12766 .ndo_start_xmit = tg3_start_xmit,
12767 .ndo_get_stats64 = tg3_get_stats64,
12768 .ndo_validate_addr = eth_validate_addr,
12769 .ndo_set_rx_mode = tg3_set_rx_mode,
12770 .ndo_set_mac_address = tg3_set_mac_addr,
12771 .ndo_do_ioctl = tg3_ioctl,
12772 .ndo_tx_timeout = tg3_tx_timeout,
12773 .ndo_change_mtu = tg3_change_mtu,
12774 .ndo_fix_features = tg3_fix_features,
12775 .ndo_set_features = tg3_set_features,
12776 #ifdef CONFIG_NET_POLL_CONTROLLER
12777 .ndo_poll_controller = tg3_poll_controller,
12781 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12783 u32 cursize, val, magic;
12785 tp->nvram_size = EEPROM_CHIP_SIZE;
12787 if (tg3_nvram_read(tp, 0, &magic) != 0)
12790 if ((magic != TG3_EEPROM_MAGIC) &&
12791 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12792 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12796 * Size the chip by reading offsets at increasing powers of two.
12797 * When we encounter our validation signature, we know the addressing
12798 * has wrapped around, and thus have our chip size.
12802 while (cursize < tp->nvram_size) {
12803 if (tg3_nvram_read(tp, cursize, &val) != 0)
12812 tp->nvram_size = cursize;
12815 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12819 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12822 /* Selfboot format */
12823 if (val != TG3_EEPROM_MAGIC) {
12824 tg3_get_eeprom_size(tp);
12828 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12830 /* This is confusing. We want to operate on the
12831 * 16-bit value at offset 0xf2. The tg3_nvram_read()
12832 * call will read from NVRAM and byteswap the data
12833 * according to the byteswapping settings for all
12834 * other register accesses. This ensures the data we
12835 * want will always reside in the lower 16-bits.
12836 * However, the data in NVRAM is in LE format, which
12837 * means the data from the NVRAM read will always be
12838 * opposite the endianness of the CPU. The 16-bit
12839 * byteswap then brings the data to CPU endianness.
12841 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12845 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12848 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12852 nvcfg1 = tr32(NVRAM_CFG1);
12853 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12854 tg3_flag_set(tp, FLASH);
12856 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12857 tw32(NVRAM_CFG1, nvcfg1);
12860 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12861 tg3_flag(tp, 5780_CLASS)) {
12862 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12863 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12864 tp->nvram_jedecnum = JEDEC_ATMEL;
12865 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12866 tg3_flag_set(tp, NVRAM_BUFFERED);
12868 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12869 tp->nvram_jedecnum = JEDEC_ATMEL;
12870 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12872 case FLASH_VENDOR_ATMEL_EEPROM:
12873 tp->nvram_jedecnum = JEDEC_ATMEL;
12874 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12875 tg3_flag_set(tp, NVRAM_BUFFERED);
12877 case FLASH_VENDOR_ST:
12878 tp->nvram_jedecnum = JEDEC_ST;
12879 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12880 tg3_flag_set(tp, NVRAM_BUFFERED);
12882 case FLASH_VENDOR_SAIFUN:
12883 tp->nvram_jedecnum = JEDEC_SAIFUN;
12884 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12886 case FLASH_VENDOR_SST_SMALL:
12887 case FLASH_VENDOR_SST_LARGE:
12888 tp->nvram_jedecnum = JEDEC_SST;
12889 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12893 tp->nvram_jedecnum = JEDEC_ATMEL;
12894 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12895 tg3_flag_set(tp, NVRAM_BUFFERED);
12899 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12901 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12902 case FLASH_5752PAGE_SIZE_256:
12903 tp->nvram_pagesize = 256;
12905 case FLASH_5752PAGE_SIZE_512:
12906 tp->nvram_pagesize = 512;
12908 case FLASH_5752PAGE_SIZE_1K:
12909 tp->nvram_pagesize = 1024;
12911 case FLASH_5752PAGE_SIZE_2K:
12912 tp->nvram_pagesize = 2048;
12914 case FLASH_5752PAGE_SIZE_4K:
12915 tp->nvram_pagesize = 4096;
12917 case FLASH_5752PAGE_SIZE_264:
12918 tp->nvram_pagesize = 264;
12920 case FLASH_5752PAGE_SIZE_528:
12921 tp->nvram_pagesize = 528;
12926 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12930 nvcfg1 = tr32(NVRAM_CFG1);
12932 /* NVRAM protection for TPM */
12933 if (nvcfg1 & (1 << 27))
12934 tg3_flag_set(tp, PROTECTED_NVRAM);
12936 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12937 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12938 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12939 tp->nvram_jedecnum = JEDEC_ATMEL;
12940 tg3_flag_set(tp, NVRAM_BUFFERED);
12942 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12943 tp->nvram_jedecnum = JEDEC_ATMEL;
12944 tg3_flag_set(tp, NVRAM_BUFFERED);
12945 tg3_flag_set(tp, FLASH);
12947 case FLASH_5752VENDOR_ST_M45PE10:
12948 case FLASH_5752VENDOR_ST_M45PE20:
12949 case FLASH_5752VENDOR_ST_M45PE40:
12950 tp->nvram_jedecnum = JEDEC_ST;
12951 tg3_flag_set(tp, NVRAM_BUFFERED);
12952 tg3_flag_set(tp, FLASH);
12956 if (tg3_flag(tp, FLASH)) {
12957 tg3_nvram_get_pagesize(tp, nvcfg1);
12959 /* For eeprom, set pagesize to maximum eeprom size */
12960 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12962 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12963 tw32(NVRAM_CFG1, nvcfg1);
12967 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12969 u32 nvcfg1, protect = 0;
12971 nvcfg1 = tr32(NVRAM_CFG1);
12973 /* NVRAM protection for TPM */
12974 if (nvcfg1 & (1 << 27)) {
12975 tg3_flag_set(tp, PROTECTED_NVRAM);
12979 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12981 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12982 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12983 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12984 case FLASH_5755VENDOR_ATMEL_FLASH_5:
12985 tp->nvram_jedecnum = JEDEC_ATMEL;
12986 tg3_flag_set(tp, NVRAM_BUFFERED);
12987 tg3_flag_set(tp, FLASH);
12988 tp->nvram_pagesize = 264;
12989 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12990 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12991 tp->nvram_size = (protect ? 0x3e200 :
12992 TG3_NVRAM_SIZE_512KB);
12993 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12994 tp->nvram_size = (protect ? 0x1f200 :
12995 TG3_NVRAM_SIZE_256KB);
12997 tp->nvram_size = (protect ? 0x1f200 :
12998 TG3_NVRAM_SIZE_128KB);
13000 case FLASH_5752VENDOR_ST_M45PE10:
13001 case FLASH_5752VENDOR_ST_M45PE20:
13002 case FLASH_5752VENDOR_ST_M45PE40:
13003 tp->nvram_jedecnum = JEDEC_ST;
13004 tg3_flag_set(tp, NVRAM_BUFFERED);
13005 tg3_flag_set(tp, FLASH);
13006 tp->nvram_pagesize = 256;
13007 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13008 tp->nvram_size = (protect ?
13009 TG3_NVRAM_SIZE_64KB :
13010 TG3_NVRAM_SIZE_128KB);
13011 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13012 tp->nvram_size = (protect ?
13013 TG3_NVRAM_SIZE_64KB :
13014 TG3_NVRAM_SIZE_256KB);
13016 tp->nvram_size = (protect ?
13017 TG3_NVRAM_SIZE_128KB :
13018 TG3_NVRAM_SIZE_512KB);
13023 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
13027 nvcfg1 = tr32(NVRAM_CFG1);
13029 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13030 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13031 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13032 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13033 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13034 tp->nvram_jedecnum = JEDEC_ATMEL;
13035 tg3_flag_set(tp, NVRAM_BUFFERED);
13036 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13038 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13039 tw32(NVRAM_CFG1, nvcfg1);
13041 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13042 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13043 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13044 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13045 tp->nvram_jedecnum = JEDEC_ATMEL;
13046 tg3_flag_set(tp, NVRAM_BUFFERED);
13047 tg3_flag_set(tp, FLASH);
13048 tp->nvram_pagesize = 264;
13050 case FLASH_5752VENDOR_ST_M45PE10:
13051 case FLASH_5752VENDOR_ST_M45PE20:
13052 case FLASH_5752VENDOR_ST_M45PE40:
13053 tp->nvram_jedecnum = JEDEC_ST;
13054 tg3_flag_set(tp, NVRAM_BUFFERED);
13055 tg3_flag_set(tp, FLASH);
13056 tp->nvram_pagesize = 256;
13061 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
13063 u32 nvcfg1, protect = 0;
13065 nvcfg1 = tr32(NVRAM_CFG1);
13067 /* NVRAM protection for TPM */
13068 if (nvcfg1 & (1 << 27)) {
13069 tg3_flag_set(tp, PROTECTED_NVRAM);
13073 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13075 case FLASH_5761VENDOR_ATMEL_ADB021D:
13076 case FLASH_5761VENDOR_ATMEL_ADB041D:
13077 case FLASH_5761VENDOR_ATMEL_ADB081D:
13078 case FLASH_5761VENDOR_ATMEL_ADB161D:
13079 case FLASH_5761VENDOR_ATMEL_MDB021D:
13080 case FLASH_5761VENDOR_ATMEL_MDB041D:
13081 case FLASH_5761VENDOR_ATMEL_MDB081D:
13082 case FLASH_5761VENDOR_ATMEL_MDB161D:
13083 tp->nvram_jedecnum = JEDEC_ATMEL;
13084 tg3_flag_set(tp, NVRAM_BUFFERED);
13085 tg3_flag_set(tp, FLASH);
13086 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13087 tp->nvram_pagesize = 256;
13089 case FLASH_5761VENDOR_ST_A_M45PE20:
13090 case FLASH_5761VENDOR_ST_A_M45PE40:
13091 case FLASH_5761VENDOR_ST_A_M45PE80:
13092 case FLASH_5761VENDOR_ST_A_M45PE16:
13093 case FLASH_5761VENDOR_ST_M_M45PE20:
13094 case FLASH_5761VENDOR_ST_M_M45PE40:
13095 case FLASH_5761VENDOR_ST_M_M45PE80:
13096 case FLASH_5761VENDOR_ST_M_M45PE16:
13097 tp->nvram_jedecnum = JEDEC_ST;
13098 tg3_flag_set(tp, NVRAM_BUFFERED);
13099 tg3_flag_set(tp, FLASH);
13100 tp->nvram_pagesize = 256;
13105 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13108 case FLASH_5761VENDOR_ATMEL_ADB161D:
13109 case FLASH_5761VENDOR_ATMEL_MDB161D:
13110 case FLASH_5761VENDOR_ST_A_M45PE16:
13111 case FLASH_5761VENDOR_ST_M_M45PE16:
13112 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13114 case FLASH_5761VENDOR_ATMEL_ADB081D:
13115 case FLASH_5761VENDOR_ATMEL_MDB081D:
13116 case FLASH_5761VENDOR_ST_A_M45PE80:
13117 case FLASH_5761VENDOR_ST_M_M45PE80:
13118 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13120 case FLASH_5761VENDOR_ATMEL_ADB041D:
13121 case FLASH_5761VENDOR_ATMEL_MDB041D:
13122 case FLASH_5761VENDOR_ST_A_M45PE40:
13123 case FLASH_5761VENDOR_ST_M_M45PE40:
13124 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13126 case FLASH_5761VENDOR_ATMEL_ADB021D:
13127 case FLASH_5761VENDOR_ATMEL_MDB021D:
13128 case FLASH_5761VENDOR_ST_A_M45PE20:
13129 case FLASH_5761VENDOR_ST_M_M45PE20:
13130 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13136 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
13138 tp->nvram_jedecnum = JEDEC_ATMEL;
13139 tg3_flag_set(tp, NVRAM_BUFFERED);
13140 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13143 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
13147 nvcfg1 = tr32(NVRAM_CFG1);
13149 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13150 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13151 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13152 tp->nvram_jedecnum = JEDEC_ATMEL;
13153 tg3_flag_set(tp, NVRAM_BUFFERED);
13154 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13156 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13157 tw32(NVRAM_CFG1, nvcfg1);
13159 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13160 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13161 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13162 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13163 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13164 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13165 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13166 tp->nvram_jedecnum = JEDEC_ATMEL;
13167 tg3_flag_set(tp, NVRAM_BUFFERED);
13168 tg3_flag_set(tp, FLASH);
13170 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13171 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13172 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13173 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13174 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13176 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13177 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13178 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13180 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13181 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13182 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13186 case FLASH_5752VENDOR_ST_M45PE10:
13187 case FLASH_5752VENDOR_ST_M45PE20:
13188 case FLASH_5752VENDOR_ST_M45PE40:
13189 tp->nvram_jedecnum = JEDEC_ST;
13190 tg3_flag_set(tp, NVRAM_BUFFERED);
13191 tg3_flag_set(tp, FLASH);
13193 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13194 case FLASH_5752VENDOR_ST_M45PE10:
13195 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13197 case FLASH_5752VENDOR_ST_M45PE20:
13198 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13200 case FLASH_5752VENDOR_ST_M45PE40:
13201 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13206 tg3_flag_set(tp, NO_NVRAM);
13210 tg3_nvram_get_pagesize(tp, nvcfg1);
13211 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13212 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13216 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
13220 nvcfg1 = tr32(NVRAM_CFG1);
13222 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13223 case FLASH_5717VENDOR_ATMEL_EEPROM:
13224 case FLASH_5717VENDOR_MICRO_EEPROM:
13225 tp->nvram_jedecnum = JEDEC_ATMEL;
13226 tg3_flag_set(tp, NVRAM_BUFFERED);
13227 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13229 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13230 tw32(NVRAM_CFG1, nvcfg1);
13232 case FLASH_5717VENDOR_ATMEL_MDB011D:
13233 case FLASH_5717VENDOR_ATMEL_ADB011B:
13234 case FLASH_5717VENDOR_ATMEL_ADB011D:
13235 case FLASH_5717VENDOR_ATMEL_MDB021D:
13236 case FLASH_5717VENDOR_ATMEL_ADB021B:
13237 case FLASH_5717VENDOR_ATMEL_ADB021D:
13238 case FLASH_5717VENDOR_ATMEL_45USPT:
13239 tp->nvram_jedecnum = JEDEC_ATMEL;
13240 tg3_flag_set(tp, NVRAM_BUFFERED);
13241 tg3_flag_set(tp, FLASH);
13243 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13244 case FLASH_5717VENDOR_ATMEL_MDB021D:
13245 /* Detect size with tg3_nvram_get_size() */
13247 case FLASH_5717VENDOR_ATMEL_ADB021B:
13248 case FLASH_5717VENDOR_ATMEL_ADB021D:
13249 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13252 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13256 case FLASH_5717VENDOR_ST_M_M25PE10:
13257 case FLASH_5717VENDOR_ST_A_M25PE10:
13258 case FLASH_5717VENDOR_ST_M_M45PE10:
13259 case FLASH_5717VENDOR_ST_A_M45PE10:
13260 case FLASH_5717VENDOR_ST_M_M25PE20:
13261 case FLASH_5717VENDOR_ST_A_M25PE20:
13262 case FLASH_5717VENDOR_ST_M_M45PE20:
13263 case FLASH_5717VENDOR_ST_A_M45PE20:
13264 case FLASH_5717VENDOR_ST_25USPT:
13265 case FLASH_5717VENDOR_ST_45USPT:
13266 tp->nvram_jedecnum = JEDEC_ST;
13267 tg3_flag_set(tp, NVRAM_BUFFERED);
13268 tg3_flag_set(tp, FLASH);
13270 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13271 case FLASH_5717VENDOR_ST_M_M25PE20:
13272 case FLASH_5717VENDOR_ST_M_M45PE20:
13273 /* Detect size with tg3_nvram_get_size() */
13275 case FLASH_5717VENDOR_ST_A_M25PE20:
13276 case FLASH_5717VENDOR_ST_A_M45PE20:
13277 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13280 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13285 tg3_flag_set(tp, NO_NVRAM);
13289 tg3_nvram_get_pagesize(tp, nvcfg1);
13290 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13291 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13294 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
13296 u32 nvcfg1, nvmpinstrp;
13298 nvcfg1 = tr32(NVRAM_CFG1);
13299 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13301 switch (nvmpinstrp) {
13302 case FLASH_5720_EEPROM_HD:
13303 case FLASH_5720_EEPROM_LD:
13304 tp->nvram_jedecnum = JEDEC_ATMEL;
13305 tg3_flag_set(tp, NVRAM_BUFFERED);
13307 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13308 tw32(NVRAM_CFG1, nvcfg1);
13309 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13310 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13312 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13314 case FLASH_5720VENDOR_M_ATMEL_DB011D:
13315 case FLASH_5720VENDOR_A_ATMEL_DB011B:
13316 case FLASH_5720VENDOR_A_ATMEL_DB011D:
13317 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13318 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13319 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13320 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13321 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13322 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13323 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13324 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13325 case FLASH_5720VENDOR_ATMEL_45USPT:
13326 tp->nvram_jedecnum = JEDEC_ATMEL;
13327 tg3_flag_set(tp, NVRAM_BUFFERED);
13328 tg3_flag_set(tp, FLASH);
13330 switch (nvmpinstrp) {
13331 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13332 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13333 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13334 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13336 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13337 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13338 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13339 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13341 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13342 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13343 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13346 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13350 case FLASH_5720VENDOR_M_ST_M25PE10:
13351 case FLASH_5720VENDOR_M_ST_M45PE10:
13352 case FLASH_5720VENDOR_A_ST_M25PE10:
13353 case FLASH_5720VENDOR_A_ST_M45PE10:
13354 case FLASH_5720VENDOR_M_ST_M25PE20:
13355 case FLASH_5720VENDOR_M_ST_M45PE20:
13356 case FLASH_5720VENDOR_A_ST_M25PE20:
13357 case FLASH_5720VENDOR_A_ST_M45PE20:
13358 case FLASH_5720VENDOR_M_ST_M25PE40:
13359 case FLASH_5720VENDOR_M_ST_M45PE40:
13360 case FLASH_5720VENDOR_A_ST_M25PE40:
13361 case FLASH_5720VENDOR_A_ST_M45PE40:
13362 case FLASH_5720VENDOR_M_ST_M25PE80:
13363 case FLASH_5720VENDOR_M_ST_M45PE80:
13364 case FLASH_5720VENDOR_A_ST_M25PE80:
13365 case FLASH_5720VENDOR_A_ST_M45PE80:
13366 case FLASH_5720VENDOR_ST_25USPT:
13367 case FLASH_5720VENDOR_ST_45USPT:
13368 tp->nvram_jedecnum = JEDEC_ST;
13369 tg3_flag_set(tp, NVRAM_BUFFERED);
13370 tg3_flag_set(tp, FLASH);
13372 switch (nvmpinstrp) {
13373 case FLASH_5720VENDOR_M_ST_M25PE20:
13374 case FLASH_5720VENDOR_M_ST_M45PE20:
13375 case FLASH_5720VENDOR_A_ST_M25PE20:
13376 case FLASH_5720VENDOR_A_ST_M45PE20:
13377 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13379 case FLASH_5720VENDOR_M_ST_M25PE40:
13380 case FLASH_5720VENDOR_M_ST_M45PE40:
13381 case FLASH_5720VENDOR_A_ST_M25PE40:
13382 case FLASH_5720VENDOR_A_ST_M45PE40:
13383 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13385 case FLASH_5720VENDOR_M_ST_M25PE80:
13386 case FLASH_5720VENDOR_M_ST_M45PE80:
13387 case FLASH_5720VENDOR_A_ST_M25PE80:
13388 case FLASH_5720VENDOR_A_ST_M45PE80:
13389 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13392 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13397 tg3_flag_set(tp, NO_NVRAM);
13401 tg3_nvram_get_pagesize(tp, nvcfg1);
13402 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13403 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13406 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13407 static void __devinit tg3_nvram_init(struct tg3 *tp)
13409 tw32_f(GRC_EEPROM_ADDR,
13410 (EEPROM_ADDR_FSM_RESET |
13411 (EEPROM_DEFAULT_CLOCK_PERIOD <<
13412 EEPROM_ADDR_CLKPERD_SHIFT)));
13416 /* Enable seeprom accesses. */
13417 tw32_f(GRC_LOCAL_CTRL,
13418 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13421 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13422 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13423 tg3_flag_set(tp, NVRAM);
13425 if (tg3_nvram_lock(tp)) {
13426 netdev_warn(tp->dev,
13427 "Cannot get nvram lock, %s failed\n",
13431 tg3_enable_nvram_access(tp);
13433 tp->nvram_size = 0;
13435 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13436 tg3_get_5752_nvram_info(tp);
13437 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13438 tg3_get_5755_nvram_info(tp);
13439 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13440 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13441 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13442 tg3_get_5787_nvram_info(tp);
13443 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13444 tg3_get_5761_nvram_info(tp);
13445 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13446 tg3_get_5906_nvram_info(tp);
13447 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13448 tg3_flag(tp, 57765_CLASS))
13449 tg3_get_57780_nvram_info(tp);
13450 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13451 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13452 tg3_get_5717_nvram_info(tp);
13453 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13454 tg3_get_5720_nvram_info(tp);
13456 tg3_get_nvram_info(tp);
13458 if (tp->nvram_size == 0)
13459 tg3_get_nvram_size(tp);
13461 tg3_disable_nvram_access(tp);
13462 tg3_nvram_unlock(tp);
13465 tg3_flag_clear(tp, NVRAM);
13466 tg3_flag_clear(tp, NVRAM_BUFFERED);
13468 tg3_get_eeprom_size(tp);
13472 struct subsys_tbl_ent {
13473 u16 subsys_vendor, subsys_devid;
13477 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13478 /* Broadcom boards. */
13479 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13480 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13481 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13482 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13483 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13484 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13485 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13486 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13487 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13488 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13489 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13490 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13491 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13492 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13493 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13494 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13495 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13496 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13497 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13498 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13499 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13500 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13503 { TG3PCI_SUBVENDOR_ID_3COM,
13504 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13505 { TG3PCI_SUBVENDOR_ID_3COM,
13506 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13507 { TG3PCI_SUBVENDOR_ID_3COM,
13508 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13509 { TG3PCI_SUBVENDOR_ID_3COM,
13510 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13511 { TG3PCI_SUBVENDOR_ID_3COM,
13512 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13515 { TG3PCI_SUBVENDOR_ID_DELL,
13516 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13517 { TG3PCI_SUBVENDOR_ID_DELL,
13518 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13519 { TG3PCI_SUBVENDOR_ID_DELL,
13520 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13521 { TG3PCI_SUBVENDOR_ID_DELL,
13522 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13524 /* Compaq boards. */
13525 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13526 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13527 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13528 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13529 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13530 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13531 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13532 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13533 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13534 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13537 { TG3PCI_SUBVENDOR_ID_IBM,
13538 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13541 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13545 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13546 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13547 tp->pdev->subsystem_vendor) &&
13548 (subsys_id_to_phy_id[i].subsys_devid ==
13549 tp->pdev->subsystem_device))
13550 return &subsys_id_to_phy_id[i];
13555 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13559 tp->phy_id = TG3_PHY_ID_INVALID;
13560 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13562 /* Assume an onboard device and WOL capable by default. */
13563 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13564 tg3_flag_set(tp, WOL_CAP);
13566 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13567 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13568 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13569 tg3_flag_set(tp, IS_NIC);
13571 val = tr32(VCPU_CFGSHDW);
13572 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13573 tg3_flag_set(tp, ASPM_WORKAROUND);
13574 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13575 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13576 tg3_flag_set(tp, WOL_ENABLE);
13577 device_set_wakeup_enable(&tp->pdev->dev, true);
13582 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13583 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13584 u32 nic_cfg, led_cfg;
13585 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13586 int eeprom_phy_serdes = 0;
13588 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13589 tp->nic_sram_data_cfg = nic_cfg;
13591 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13592 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13593 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13594 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13595 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13596 (ver > 0) && (ver < 0x100))
13597 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13599 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13600 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13602 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13603 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13604 eeprom_phy_serdes = 1;
13606 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13607 if (nic_phy_id != 0) {
13608 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13609 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13611 eeprom_phy_id = (id1 >> 16) << 10;
13612 eeprom_phy_id |= (id2 & 0xfc00) << 16;
13613 eeprom_phy_id |= (id2 & 0x03ff) << 0;
13617 tp->phy_id = eeprom_phy_id;
13618 if (eeprom_phy_serdes) {
13619 if (!tg3_flag(tp, 5705_PLUS))
13620 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13622 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13625 if (tg3_flag(tp, 5750_PLUS))
13626 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13627 SHASTA_EXT_LED_MODE_MASK);
13629 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13633 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13634 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13637 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13638 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13641 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13642 tp->led_ctrl = LED_CTRL_MODE_MAC;
13644 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13645 * read on some older 5700/5701 bootcode.
13647 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13649 GET_ASIC_REV(tp->pci_chip_rev_id) ==
13651 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13655 case SHASTA_EXT_LED_SHARED:
13656 tp->led_ctrl = LED_CTRL_MODE_SHARED;
13657 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13658 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13659 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13660 LED_CTRL_MODE_PHY_2);
13663 case SHASTA_EXT_LED_MAC:
13664 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13667 case SHASTA_EXT_LED_COMBO:
13668 tp->led_ctrl = LED_CTRL_MODE_COMBO;
13669 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13670 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13671 LED_CTRL_MODE_PHY_2);
13676 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13677 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13678 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13679 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13681 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13682 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13684 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13685 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13686 if ((tp->pdev->subsystem_vendor ==
13687 PCI_VENDOR_ID_ARIMA) &&
13688 (tp->pdev->subsystem_device == 0x205a ||
13689 tp->pdev->subsystem_device == 0x2063))
13690 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13692 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13693 tg3_flag_set(tp, IS_NIC);
13696 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13697 tg3_flag_set(tp, ENABLE_ASF);
13698 if (tg3_flag(tp, 5750_PLUS))
13699 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13702 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13703 tg3_flag(tp, 5750_PLUS))
13704 tg3_flag_set(tp, ENABLE_APE);
13706 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13707 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13708 tg3_flag_clear(tp, WOL_CAP);
13710 if (tg3_flag(tp, WOL_CAP) &&
13711 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13712 tg3_flag_set(tp, WOL_ENABLE);
13713 device_set_wakeup_enable(&tp->pdev->dev, true);
13716 if (cfg2 & (1 << 17))
13717 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13719 /* serdes signal pre-emphasis in register 0x590 set by */
13720 /* bootcode if bit 18 is set */
13721 if (cfg2 & (1 << 18))
13722 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13724 if ((tg3_flag(tp, 57765_PLUS) ||
13725 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13726 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13727 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13728 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13730 if (tg3_flag(tp, PCI_EXPRESS) &&
13731 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13732 !tg3_flag(tp, 57765_PLUS)) {
13735 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13736 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13737 tg3_flag_set(tp, ASPM_WORKAROUND);
13740 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13741 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13742 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13743 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13744 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13745 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13748 if (tg3_flag(tp, WOL_CAP))
13749 device_set_wakeup_enable(&tp->pdev->dev,
13750 tg3_flag(tp, WOL_ENABLE));
13752 device_set_wakeup_capable(&tp->pdev->dev, false);
13755 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13760 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13761 tw32(OTP_CTRL, cmd);
13763 /* Wait for up to 1 ms for command to execute. */
13764 for (i = 0; i < 100; i++) {
13765 val = tr32(OTP_STATUS);
13766 if (val & OTP_STATUS_CMD_DONE)
13771 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13774 /* Read the gphy configuration from the OTP region of the chip. The gphy
13775 * configuration is a 32-bit value that straddles the alignment boundary.
13776 * We do two 32-bit reads and then shift and merge the results.
13778 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13780 u32 bhalf_otp, thalf_otp;
13782 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13784 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13787 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13789 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13792 thalf_otp = tr32(OTP_READ_DATA);
13794 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13796 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13799 bhalf_otp = tr32(OTP_READ_DATA);
13801 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13804 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13806 u32 adv = ADVERTISED_Autoneg;
13808 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13809 adv |= ADVERTISED_1000baseT_Half |
13810 ADVERTISED_1000baseT_Full;
13812 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13813 adv |= ADVERTISED_100baseT_Half |
13814 ADVERTISED_100baseT_Full |
13815 ADVERTISED_10baseT_Half |
13816 ADVERTISED_10baseT_Full |
13819 adv |= ADVERTISED_FIBRE;
13821 tp->link_config.advertising = adv;
13822 tp->link_config.speed = SPEED_UNKNOWN;
13823 tp->link_config.duplex = DUPLEX_UNKNOWN;
13824 tp->link_config.autoneg = AUTONEG_ENABLE;
13825 tp->link_config.active_speed = SPEED_UNKNOWN;
13826 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
13831 static int __devinit tg3_phy_probe(struct tg3 *tp)
13833 u32 hw_phy_id_1, hw_phy_id_2;
13834 u32 hw_phy_id, hw_phy_id_masked;
13837 /* flow control autonegotiation is default behavior */
13838 tg3_flag_set(tp, PAUSE_AUTONEG);
13839 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13841 if (tg3_flag(tp, ENABLE_APE)) {
13842 switch (tp->pci_fn) {
13844 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
13847 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
13850 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
13853 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
13858 if (tg3_flag(tp, USE_PHYLIB))
13859 return tg3_phy_init(tp);
13861 /* Reading the PHY ID register can conflict with ASF
13862 * firmware access to the PHY hardware.
13865 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13866 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13868 /* Now read the physical PHY_ID from the chip and verify
13869 * that it is sane. If it doesn't look good, we fall back
13870 * to either the hard-coded table based PHY_ID and failing
13871 * that the value found in the eeprom area.
13873 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13874 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13876 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13877 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13878 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13880 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13883 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13884 tp->phy_id = hw_phy_id;
13885 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13886 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13888 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13890 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13891 /* Do nothing, phy ID already set up in
13892 * tg3_get_eeprom_hw_cfg().
13895 struct subsys_tbl_ent *p;
13897 /* No eeprom signature? Try the hardcoded
13898 * subsys device table.
13900 p = tg3_lookup_by_subsys(tp);
13904 tp->phy_id = p->phy_id;
13906 tp->phy_id == TG3_PHY_ID_BCM8002)
13907 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13911 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13912 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13913 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13914 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13915 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13916 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13917 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13918 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13920 tg3_phy_init_link_config(tp);
13922 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13923 !tg3_flag(tp, ENABLE_APE) &&
13924 !tg3_flag(tp, ENABLE_ASF)) {
13927 tg3_readphy(tp, MII_BMSR, &bmsr);
13928 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13929 (bmsr & BMSR_LSTATUS))
13930 goto skip_phy_reset;
13932 err = tg3_phy_reset(tp);
13936 tg3_phy_set_wirespeed(tp);
13938 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13939 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13940 tp->link_config.flowctrl);
13942 tg3_writephy(tp, MII_BMCR,
13943 BMCR_ANENABLE | BMCR_ANRESTART);
13948 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13949 err = tg3_init_5401phy_dsp(tp);
13953 err = tg3_init_5401phy_dsp(tp);
13959 static void __devinit tg3_read_vpd(struct tg3 *tp)
13962 unsigned int block_end, rosize, len;
13966 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13970 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13972 goto out_not_found;
13974 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13975 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13976 i += PCI_VPD_LRDT_TAG_SIZE;
13978 if (block_end > vpdlen)
13979 goto out_not_found;
13981 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13982 PCI_VPD_RO_KEYWORD_MFR_ID);
13984 len = pci_vpd_info_field_size(&vpd_data[j]);
13986 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13987 if (j + len > block_end || len != 4 ||
13988 memcmp(&vpd_data[j], "1028", 4))
13991 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13992 PCI_VPD_RO_KEYWORD_VENDOR0);
13996 len = pci_vpd_info_field_size(&vpd_data[j]);
13998 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13999 if (j + len > block_end)
14002 memcpy(tp->fw_ver, &vpd_data[j], len);
14003 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
14007 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14008 PCI_VPD_RO_KEYWORD_PARTNO);
14010 goto out_not_found;
14012 len = pci_vpd_info_field_size(&vpd_data[i]);
14014 i += PCI_VPD_INFO_FLD_HDR_SIZE;
14015 if (len > TG3_BPN_SIZE ||
14016 (len + i) > vpdlen)
14017 goto out_not_found;
14019 memcpy(tp->board_part_number, &vpd_data[i], len);
14023 if (tp->board_part_number[0])
14027 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14028 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
14029 strcpy(tp->board_part_number, "BCM5717");
14030 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14031 strcpy(tp->board_part_number, "BCM5718");
14034 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14035 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14036 strcpy(tp->board_part_number, "BCM57780");
14037 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14038 strcpy(tp->board_part_number, "BCM57760");
14039 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14040 strcpy(tp->board_part_number, "BCM57790");
14041 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14042 strcpy(tp->board_part_number, "BCM57788");
14045 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14046 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14047 strcpy(tp->board_part_number, "BCM57761");
14048 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14049 strcpy(tp->board_part_number, "BCM57765");
14050 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14051 strcpy(tp->board_part_number, "BCM57781");
14052 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14053 strcpy(tp->board_part_number, "BCM57785");
14054 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14055 strcpy(tp->board_part_number, "BCM57791");
14056 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14057 strcpy(tp->board_part_number, "BCM57795");
14060 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
14061 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14062 strcpy(tp->board_part_number, "BCM57762");
14063 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14064 strcpy(tp->board_part_number, "BCM57766");
14065 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14066 strcpy(tp->board_part_number, "BCM57782");
14067 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14068 strcpy(tp->board_part_number, "BCM57786");
14071 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14072 strcpy(tp->board_part_number, "BCM95906");
14075 strcpy(tp->board_part_number, "none");
14079 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14083 if (tg3_nvram_read(tp, offset, &val) ||
14084 (val & 0xfc000000) != 0x0c000000 ||
14085 tg3_nvram_read(tp, offset + 4, &val) ||
14092 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
14094 u32 val, offset, start, ver_offset;
14096 bool newver = false;
14098 if (tg3_nvram_read(tp, 0xc, &offset) ||
14099 tg3_nvram_read(tp, 0x4, &start))
14102 offset = tg3_nvram_logical_addr(tp, offset);
14104 if (tg3_nvram_read(tp, offset, &val))
14107 if ((val & 0xfc000000) == 0x0c000000) {
14108 if (tg3_nvram_read(tp, offset + 4, &val))
14115 dst_off = strlen(tp->fw_ver);
14118 if (TG3_VER_SIZE - dst_off < 16 ||
14119 tg3_nvram_read(tp, offset + 8, &ver_offset))
14122 offset = offset + ver_offset - start;
14123 for (i = 0; i < 16; i += 4) {
14125 if (tg3_nvram_read_be32(tp, offset + i, &v))
14128 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14133 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14136 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14137 TG3_NVM_BCVER_MAJSFT;
14138 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14139 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14140 "v%d.%02d", major, minor);
14144 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
14146 u32 val, major, minor;
14148 /* Use native endian representation */
14149 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14152 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14153 TG3_NVM_HWSB_CFG1_MAJSFT;
14154 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14155 TG3_NVM_HWSB_CFG1_MINSFT;
14157 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14160 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
14162 u32 offset, major, minor, build;
14164 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14166 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14169 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14170 case TG3_EEPROM_SB_REVISION_0:
14171 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14173 case TG3_EEPROM_SB_REVISION_2:
14174 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14176 case TG3_EEPROM_SB_REVISION_3:
14177 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14179 case TG3_EEPROM_SB_REVISION_4:
14180 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14182 case TG3_EEPROM_SB_REVISION_5:
14183 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14185 case TG3_EEPROM_SB_REVISION_6:
14186 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14192 if (tg3_nvram_read(tp, offset, &val))
14195 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14196 TG3_EEPROM_SB_EDH_BLD_SHFT;
14197 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14198 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14199 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
14201 if (minor > 99 || build > 26)
14204 offset = strlen(tp->fw_ver);
14205 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14206 " v%d.%02d", major, minor);
14209 offset = strlen(tp->fw_ver);
14210 if (offset < TG3_VER_SIZE - 1)
14211 tp->fw_ver[offset] = 'a' + build - 1;
14215 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
14217 u32 val, offset, start;
14220 for (offset = TG3_NVM_DIR_START;
14221 offset < TG3_NVM_DIR_END;
14222 offset += TG3_NVM_DIRENT_SIZE) {
14223 if (tg3_nvram_read(tp, offset, &val))
14226 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14230 if (offset == TG3_NVM_DIR_END)
14233 if (!tg3_flag(tp, 5705_PLUS))
14234 start = 0x08000000;
14235 else if (tg3_nvram_read(tp, offset - 4, &start))
14238 if (tg3_nvram_read(tp, offset + 4, &offset) ||
14239 !tg3_fw_img_is_valid(tp, offset) ||
14240 tg3_nvram_read(tp, offset + 8, &val))
14243 offset += val - start;
14245 vlen = strlen(tp->fw_ver);
14247 tp->fw_ver[vlen++] = ',';
14248 tp->fw_ver[vlen++] = ' ';
14250 for (i = 0; i < 4; i++) {
14252 if (tg3_nvram_read_be32(tp, offset, &v))
14255 offset += sizeof(v);
14257 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14258 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
14262 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14267 static void __devinit tg3_probe_ncsi(struct tg3 *tp)
14271 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14272 if (apedata != APE_SEG_SIG_MAGIC)
14275 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14276 if (!(apedata & APE_FW_STATUS_READY))
14279 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14280 tg3_flag_set(tp, APE_HAS_NCSI);
14283 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
14289 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14291 if (tg3_flag(tp, APE_HAS_NCSI))
14296 vlen = strlen(tp->fw_ver);
14298 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14300 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14301 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14302 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14303 (apedata & APE_FW_VERSION_BLDMSK));
14306 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
14309 bool vpd_vers = false;
14311 if (tp->fw_ver[0] != 0)
14314 if (tg3_flag(tp, NO_NVRAM)) {
14315 strcat(tp->fw_ver, "sb");
14319 if (tg3_nvram_read(tp, 0, &val))
14322 if (val == TG3_EEPROM_MAGIC)
14323 tg3_read_bc_ver(tp);
14324 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14325 tg3_read_sb_ver(tp, val);
14326 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14327 tg3_read_hwsb_ver(tp);
14329 if (tg3_flag(tp, ENABLE_ASF)) {
14330 if (tg3_flag(tp, ENABLE_APE)) {
14331 tg3_probe_ncsi(tp);
14333 tg3_read_dash_ver(tp);
14334 } else if (!vpd_vers) {
14335 tg3_read_mgmtfw_ver(tp);
14339 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
14342 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14344 if (tg3_flag(tp, LRG_PROD_RING_CAP))
14345 return TG3_RX_RET_MAX_SIZE_5717;
14346 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
14347 return TG3_RX_RET_MAX_SIZE_5700;
14349 return TG3_RX_RET_MAX_SIZE_5705;
14352 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
14353 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14354 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14355 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14359 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14361 struct pci_dev *peer;
14362 unsigned int func, devnr = tp->pdev->devfn & ~7;
14364 for (func = 0; func < 8; func++) {
14365 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14366 if (peer && peer != tp->pdev)
14370 /* 5704 can be configured in single-port mode, set peer to
14371 * tp->pdev in that case.
14379 * We don't need to keep the refcount elevated; there's no way
14380 * to remove one half of this device without removing the other
14387 static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14389 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
14390 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
14393 /* All devices that use the alternate
14394 * ASIC REV location have a CPMU.
14396 tg3_flag_set(tp, CPMU_PRESENT);
14398 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14399 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
14400 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
14401 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
14402 reg = TG3PCI_GEN2_PRODID_ASICREV;
14403 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
14404 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
14405 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
14406 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14407 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14408 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14409 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14410 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14411 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14412 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14413 reg = TG3PCI_GEN15_PRODID_ASICREV;
14415 reg = TG3PCI_PRODID_ASICREV;
14417 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14420 /* Wrong chip ID in 5752 A0. This code can be removed later
14421 * as A0 is not in production.
14423 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14424 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14426 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14427 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14428 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14429 tg3_flag_set(tp, 5717_PLUS);
14431 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14432 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14433 tg3_flag_set(tp, 57765_CLASS);
14435 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14436 tg3_flag_set(tp, 57765_PLUS);
14438 /* Intentionally exclude ASIC_REV_5906 */
14439 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14440 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14441 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14442 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14443 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14444 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14445 tg3_flag(tp, 57765_PLUS))
14446 tg3_flag_set(tp, 5755_PLUS);
14448 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14449 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14450 tg3_flag_set(tp, 5780_CLASS);
14452 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14453 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14454 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14455 tg3_flag(tp, 5755_PLUS) ||
14456 tg3_flag(tp, 5780_CLASS))
14457 tg3_flag_set(tp, 5750_PLUS);
14459 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14460 tg3_flag(tp, 5750_PLUS))
14461 tg3_flag_set(tp, 5705_PLUS);
14464 static int __devinit tg3_get_invariants(struct tg3 *tp)
14467 u32 pci_state_reg, grc_misc_cfg;
14472 /* Force memory write invalidate off. If we leave it on,
14473 * then on 5700_BX chips we have to enable a workaround.
14474 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14475 * to match the cacheline size. The Broadcom driver have this
14476 * workaround but turns MWI off all the times so never uses
14477 * it. This seems to suggest that the workaround is insufficient.
14479 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14480 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14481 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14483 /* Important! -- Make sure register accesses are byteswapped
14484 * correctly. Also, for those chips that require it, make
14485 * sure that indirect register accesses are enabled before
14486 * the first operation.
14488 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14490 tp->misc_host_ctrl |= (misc_ctrl_reg &
14491 MISC_HOST_CTRL_CHIPREV);
14492 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14493 tp->misc_host_ctrl);
14495 tg3_detect_asic_rev(tp, misc_ctrl_reg);
14497 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14498 * we need to disable memory and use config. cycles
14499 * only to access all registers. The 5702/03 chips
14500 * can mistakenly decode the special cycles from the
14501 * ICH chipsets as memory write cycles, causing corruption
14502 * of register and memory space. Only certain ICH bridges
14503 * will drive special cycles with non-zero data during the
14504 * address phase which can fall within the 5703's address
14505 * range. This is not an ICH bug as the PCI spec allows
14506 * non-zero address during special cycles. However, only
14507 * these ICH bridges are known to drive non-zero addresses
14508 * during special cycles.
14510 * Since special cycles do not cross PCI bridges, we only
14511 * enable this workaround if the 5703 is on the secondary
14512 * bus of these ICH bridges.
14514 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14515 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14516 static struct tg3_dev_id {
14520 } ich_chipsets[] = {
14521 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14523 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14525 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14527 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14531 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14532 struct pci_dev *bridge = NULL;
14534 while (pci_id->vendor != 0) {
14535 bridge = pci_get_device(pci_id->vendor, pci_id->device,
14541 if (pci_id->rev != PCI_ANY_ID) {
14542 if (bridge->revision > pci_id->rev)
14545 if (bridge->subordinate &&
14546 (bridge->subordinate->number ==
14547 tp->pdev->bus->number)) {
14548 tg3_flag_set(tp, ICH_WORKAROUND);
14549 pci_dev_put(bridge);
14555 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14556 static struct tg3_dev_id {
14559 } bridge_chipsets[] = {
14560 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14561 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14564 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14565 struct pci_dev *bridge = NULL;
14567 while (pci_id->vendor != 0) {
14568 bridge = pci_get_device(pci_id->vendor,
14575 if (bridge->subordinate &&
14576 (bridge->subordinate->number <=
14577 tp->pdev->bus->number) &&
14578 (bridge->subordinate->busn_res.end >=
14579 tp->pdev->bus->number)) {
14580 tg3_flag_set(tp, 5701_DMA_BUG);
14581 pci_dev_put(bridge);
14587 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14588 * DMA addresses > 40-bit. This bridge may have other additional
14589 * 57xx devices behind it in some 4-port NIC designs for example.
14590 * Any tg3 device found behind the bridge will also need the 40-bit
14593 if (tg3_flag(tp, 5780_CLASS)) {
14594 tg3_flag_set(tp, 40BIT_DMA_BUG);
14595 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14597 struct pci_dev *bridge = NULL;
14600 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14601 PCI_DEVICE_ID_SERVERWORKS_EPB,
14603 if (bridge && bridge->subordinate &&
14604 (bridge->subordinate->number <=
14605 tp->pdev->bus->number) &&
14606 (bridge->subordinate->busn_res.end >=
14607 tp->pdev->bus->number)) {
14608 tg3_flag_set(tp, 40BIT_DMA_BUG);
14609 pci_dev_put(bridge);
14615 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14616 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14617 tp->pdev_peer = tg3_find_peer(tp);
14619 /* Determine TSO capabilities */
14620 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14621 ; /* Do nothing. HW bug. */
14622 else if (tg3_flag(tp, 57765_PLUS))
14623 tg3_flag_set(tp, HW_TSO_3);
14624 else if (tg3_flag(tp, 5755_PLUS) ||
14625 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14626 tg3_flag_set(tp, HW_TSO_2);
14627 else if (tg3_flag(tp, 5750_PLUS)) {
14628 tg3_flag_set(tp, HW_TSO_1);
14629 tg3_flag_set(tp, TSO_BUG);
14630 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14631 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14632 tg3_flag_clear(tp, TSO_BUG);
14633 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14634 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14635 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14636 tg3_flag_set(tp, TSO_BUG);
14637 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14638 tp->fw_needed = FIRMWARE_TG3TSO5;
14640 tp->fw_needed = FIRMWARE_TG3TSO;
14643 /* Selectively allow TSO based on operating conditions */
14644 if (tg3_flag(tp, HW_TSO_1) ||
14645 tg3_flag(tp, HW_TSO_2) ||
14646 tg3_flag(tp, HW_TSO_3) ||
14648 /* For firmware TSO, assume ASF is disabled.
14649 * We'll disable TSO later if we discover ASF
14650 * is enabled in tg3_get_eeprom_hw_cfg().
14652 tg3_flag_set(tp, TSO_CAPABLE);
14654 tg3_flag_clear(tp, TSO_CAPABLE);
14655 tg3_flag_clear(tp, TSO_BUG);
14656 tp->fw_needed = NULL;
14659 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14660 tp->fw_needed = FIRMWARE_TG3;
14664 if (tg3_flag(tp, 5750_PLUS)) {
14665 tg3_flag_set(tp, SUPPORT_MSI);
14666 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14667 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14668 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14669 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14670 tp->pdev_peer == tp->pdev))
14671 tg3_flag_clear(tp, SUPPORT_MSI);
14673 if (tg3_flag(tp, 5755_PLUS) ||
14674 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14675 tg3_flag_set(tp, 1SHOT_MSI);
14678 if (tg3_flag(tp, 57765_PLUS)) {
14679 tg3_flag_set(tp, SUPPORT_MSIX);
14680 tp->irq_max = TG3_IRQ_MAX_VECS;
14686 if (tp->irq_max > 1) {
14687 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
14688 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
14690 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14691 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14692 tp->txq_max = tp->irq_max - 1;
14695 if (tg3_flag(tp, 5755_PLUS) ||
14696 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14697 tg3_flag_set(tp, SHORT_DMA_BUG);
14699 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14700 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14702 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14703 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14704 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14705 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14707 if (tg3_flag(tp, 57765_PLUS) &&
14708 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14709 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14711 if (!tg3_flag(tp, 5705_PLUS) ||
14712 tg3_flag(tp, 5780_CLASS) ||
14713 tg3_flag(tp, USE_JUMBO_BDFLAG))
14714 tg3_flag_set(tp, JUMBO_CAPABLE);
14716 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14719 if (pci_is_pcie(tp->pdev)) {
14722 tg3_flag_set(tp, PCI_EXPRESS);
14724 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
14725 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14726 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14728 tg3_flag_clear(tp, HW_TSO_2);
14729 tg3_flag_clear(tp, TSO_CAPABLE);
14731 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14732 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14733 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14734 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14735 tg3_flag_set(tp, CLKREQ_BUG);
14736 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14737 tg3_flag_set(tp, L1PLLPD_EN);
14739 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14740 /* BCM5785 devices are effectively PCIe devices, and should
14741 * follow PCIe codepaths, but do not have a PCIe capabilities
14744 tg3_flag_set(tp, PCI_EXPRESS);
14745 } else if (!tg3_flag(tp, 5705_PLUS) ||
14746 tg3_flag(tp, 5780_CLASS)) {
14747 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14748 if (!tp->pcix_cap) {
14749 dev_err(&tp->pdev->dev,
14750 "Cannot find PCI-X capability, aborting\n");
14754 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14755 tg3_flag_set(tp, PCIX_MODE);
14758 /* If we have an AMD 762 or VIA K8T800 chipset, write
14759 * reordering to the mailbox registers done by the host
14760 * controller can cause major troubles. We read back from
14761 * every mailbox register write to force the writes to be
14762 * posted to the chip in order.
14764 if (pci_dev_present(tg3_write_reorder_chipsets) &&
14765 !tg3_flag(tp, PCI_EXPRESS))
14766 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14768 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14769 &tp->pci_cacheline_sz);
14770 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14771 &tp->pci_lat_timer);
14772 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14773 tp->pci_lat_timer < 64) {
14774 tp->pci_lat_timer = 64;
14775 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14776 tp->pci_lat_timer);
14779 /* Important! -- It is critical that the PCI-X hw workaround
14780 * situation is decided before the first MMIO register access.
14782 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14783 /* 5700 BX chips need to have their TX producer index
14784 * mailboxes written twice to workaround a bug.
14786 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14788 /* If we are in PCI-X mode, enable register write workaround.
14790 * The workaround is to use indirect register accesses
14791 * for all chip writes not to mailbox registers.
14793 if (tg3_flag(tp, PCIX_MODE)) {
14796 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14798 /* The chip can have it's power management PCI config
14799 * space registers clobbered due to this bug.
14800 * So explicitly force the chip into D0 here.
14802 pci_read_config_dword(tp->pdev,
14803 tp->pm_cap + PCI_PM_CTRL,
14805 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14806 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14807 pci_write_config_dword(tp->pdev,
14808 tp->pm_cap + PCI_PM_CTRL,
14811 /* Also, force SERR#/PERR# in PCI command. */
14812 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14813 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14814 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14818 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14819 tg3_flag_set(tp, PCI_HIGH_SPEED);
14820 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14821 tg3_flag_set(tp, PCI_32BIT);
14823 /* Chip-specific fixup from Broadcom driver */
14824 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14825 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14826 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14827 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14830 /* Default fast path register access methods */
14831 tp->read32 = tg3_read32;
14832 tp->write32 = tg3_write32;
14833 tp->read32_mbox = tg3_read32;
14834 tp->write32_mbox = tg3_write32;
14835 tp->write32_tx_mbox = tg3_write32;
14836 tp->write32_rx_mbox = tg3_write32;
14838 /* Various workaround register access methods */
14839 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14840 tp->write32 = tg3_write_indirect_reg32;
14841 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14842 (tg3_flag(tp, PCI_EXPRESS) &&
14843 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14845 * Back to back register writes can cause problems on these
14846 * chips, the workaround is to read back all reg writes
14847 * except those to mailbox regs.
14849 * See tg3_write_indirect_reg32().
14851 tp->write32 = tg3_write_flush_reg32;
14854 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14855 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14856 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14857 tp->write32_rx_mbox = tg3_write_flush_reg32;
14860 if (tg3_flag(tp, ICH_WORKAROUND)) {
14861 tp->read32 = tg3_read_indirect_reg32;
14862 tp->write32 = tg3_write_indirect_reg32;
14863 tp->read32_mbox = tg3_read_indirect_mbox;
14864 tp->write32_mbox = tg3_write_indirect_mbox;
14865 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14866 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14871 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14872 pci_cmd &= ~PCI_COMMAND_MEMORY;
14873 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14875 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14876 tp->read32_mbox = tg3_read32_mbox_5906;
14877 tp->write32_mbox = tg3_write32_mbox_5906;
14878 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14879 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14882 if (tp->write32 == tg3_write_indirect_reg32 ||
14883 (tg3_flag(tp, PCIX_MODE) &&
14884 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14885 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14886 tg3_flag_set(tp, SRAM_USE_CONFIG);
14888 /* The memory arbiter has to be enabled in order for SRAM accesses
14889 * to succeed. Normally on powerup the tg3 chip firmware will make
14890 * sure it is enabled, but other entities such as system netboot
14891 * code might disable it.
14893 val = tr32(MEMARB_MODE);
14894 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14896 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14897 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14898 tg3_flag(tp, 5780_CLASS)) {
14899 if (tg3_flag(tp, PCIX_MODE)) {
14900 pci_read_config_dword(tp->pdev,
14901 tp->pcix_cap + PCI_X_STATUS,
14903 tp->pci_fn = val & 0x7;
14905 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14906 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14907 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14908 NIC_SRAM_CPMUSTAT_SIG) {
14909 tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14910 tp->pci_fn = tp->pci_fn ? 1 : 0;
14912 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14913 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14914 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14915 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14916 NIC_SRAM_CPMUSTAT_SIG) {
14917 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14918 TG3_CPMU_STATUS_FSHFT_5719;
14922 /* Get eeprom hw config before calling tg3_set_power_state().
14923 * In particular, the TG3_FLAG_IS_NIC flag must be
14924 * determined before calling tg3_set_power_state() so that
14925 * we know whether or not to switch out of Vaux power.
14926 * When the flag is set, it means that GPIO1 is used for eeprom
14927 * write protect and also implies that it is a LOM where GPIOs
14928 * are not used to switch power.
14930 tg3_get_eeprom_hw_cfg(tp);
14932 if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14933 tg3_flag_clear(tp, TSO_CAPABLE);
14934 tg3_flag_clear(tp, TSO_BUG);
14935 tp->fw_needed = NULL;
14938 if (tg3_flag(tp, ENABLE_APE)) {
14939 /* Allow reads and writes to the
14940 * APE register and memory space.
14942 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14943 PCISTATE_ALLOW_APE_SHMEM_WR |
14944 PCISTATE_ALLOW_APE_PSPACE_WR;
14945 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14948 tg3_ape_lock_init(tp);
14951 /* Set up tp->grc_local_ctrl before calling
14952 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14953 * will bring 5700's external PHY out of reset.
14954 * It is also used as eeprom write protect on LOMs.
14956 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14957 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14958 tg3_flag(tp, EEPROM_WRITE_PROT))
14959 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14960 GRC_LCLCTRL_GPIO_OUTPUT1);
14961 /* Unused GPIO3 must be driven as output on 5752 because there
14962 * are no pull-up resistors on unused GPIO pins.
14964 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14965 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14967 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14968 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14969 tg3_flag(tp, 57765_CLASS))
14970 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14972 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14973 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14974 /* Turn off the debug UART. */
14975 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14976 if (tg3_flag(tp, IS_NIC))
14977 /* Keep VMain power. */
14978 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14979 GRC_LCLCTRL_GPIO_OUTPUT0;
14982 /* Switch out of Vaux if it is a NIC */
14983 tg3_pwrsrc_switch_to_vmain(tp);
14985 /* Derive initial jumbo mode from MTU assigned in
14986 * ether_setup() via the alloc_etherdev() call
14988 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14989 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14991 /* Determine WakeOnLan speed to use. */
14992 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14993 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14994 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14995 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14996 tg3_flag_clear(tp, WOL_SPEED_100MB);
14998 tg3_flag_set(tp, WOL_SPEED_100MB);
15001 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15002 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15004 /* A few boards don't want Ethernet@WireSpeed phy feature */
15005 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15006 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15007 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
15008 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
15009 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15010 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15011 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15013 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
15014 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
15015 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15016 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
15017 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15019 if (tg3_flag(tp, 5705_PLUS) &&
15020 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15021 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
15022 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
15023 !tg3_flag(tp, 57765_PLUS)) {
15024 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15025 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
15026 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15027 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
15028 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15029 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15030 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15031 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15032 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15034 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15037 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15038 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
15039 tp->phy_otp = tg3_read_otp_phycfg(tp);
15040 if (tp->phy_otp == 0)
15041 tp->phy_otp = TG3_OTP_DEFAULT;
15044 if (tg3_flag(tp, CPMU_PRESENT))
15045 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15047 tp->mi_mode = MAC_MI_MODE_BASE;
15049 tp->coalesce_mode = 0;
15050 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
15051 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
15052 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15054 /* Set these bits to enable statistics workaround. */
15055 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15056 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
15057 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
15058 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15059 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15062 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15063 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15064 tg3_flag_set(tp, USE_PHYLIB);
15066 err = tg3_mdio_init(tp);
15070 /* Initialize data/descriptor byte/word swapping. */
15071 val = tr32(GRC_MODE);
15072 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15073 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15074 GRC_MODE_WORD_SWAP_B2HRX_DATA |
15075 GRC_MODE_B2HRX_ENABLE |
15076 GRC_MODE_HTX2B_ENABLE |
15077 GRC_MODE_HOST_STACKUP);
15079 val &= GRC_MODE_HOST_STACKUP;
15081 tw32(GRC_MODE, val | tp->grc_mode);
15083 tg3_switch_clocks(tp);
15085 /* Clear this out for sanity. */
15086 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15088 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15090 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15091 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15092 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
15094 if (chiprevid == CHIPREV_ID_5701_A0 ||
15095 chiprevid == CHIPREV_ID_5701_B0 ||
15096 chiprevid == CHIPREV_ID_5701_B2 ||
15097 chiprevid == CHIPREV_ID_5701_B5) {
15098 void __iomem *sram_base;
15100 /* Write some dummy words into the SRAM status block
15101 * area, see if it reads back correctly. If the return
15102 * value is bad, force enable the PCIX workaround.
15104 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15106 writel(0x00000000, sram_base);
15107 writel(0x00000000, sram_base + 4);
15108 writel(0xffffffff, sram_base + 4);
15109 if (readl(sram_base) != 0x00000000)
15110 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15115 tg3_nvram_init(tp);
15117 grc_misc_cfg = tr32(GRC_MISC_CFG);
15118 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15120 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15121 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15122 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15123 tg3_flag_set(tp, IS_5788);
15125 if (!tg3_flag(tp, IS_5788) &&
15126 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
15127 tg3_flag_set(tp, TAGGED_STATUS);
15128 if (tg3_flag(tp, TAGGED_STATUS)) {
15129 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15130 HOSTCC_MODE_CLRTICK_TXBD);
15132 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15133 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15134 tp->misc_host_ctrl);
15137 /* Preserve the APE MAC_MODE bits */
15138 if (tg3_flag(tp, ENABLE_APE))
15139 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15143 /* these are limited to 10/100 only */
15144 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
15145 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15146 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15147 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
15148 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
15149 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
15150 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
15151 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
15152 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
15153 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
15154 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
15155 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
15156 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15157 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15158 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15159 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15161 err = tg3_phy_probe(tp);
15163 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15164 /* ... but do not return immediately ... */
15169 tg3_read_fw_ver(tp);
15171 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15172 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15174 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15175 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15177 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15180 /* 5700 {AX,BX} chips have a broken status block link
15181 * change bit implementation, so we must use the
15182 * status register in those cases.
15184 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15185 tg3_flag_set(tp, USE_LINKCHG_REG);
15187 tg3_flag_clear(tp, USE_LINKCHG_REG);
15189 /* The led_ctrl is set during tg3_phy_probe, here we might
15190 * have to force the link status polling mechanism based
15191 * upon subsystem IDs.
15193 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
15194 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15195 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15196 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15197 tg3_flag_set(tp, USE_LINKCHG_REG);
15200 /* For all SERDES we poll the MAC status register. */
15201 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
15202 tg3_flag_set(tp, POLL_SERDES);
15204 tg3_flag_clear(tp, POLL_SERDES);
15206 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
15207 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
15208 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15209 tg3_flag(tp, PCIX_MODE)) {
15210 tp->rx_offset = NET_SKB_PAD;
15211 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15212 tp->rx_copy_thresh = ~(u16)0;
15216 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15217 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
15218 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15220 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
15222 /* Increment the rx prod index on the rx std ring by at most
15223 * 8 for these chips to workaround hw errata.
15225 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15226 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15227 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
15228 tp->rx_std_max_post = 8;
15230 if (tg3_flag(tp, ASPM_WORKAROUND))
15231 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15232 PCIE_PWR_MGMT_L1_THRESH_MSK;
15237 #ifdef CONFIG_SPARC
15238 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
15240 struct net_device *dev = tp->dev;
15241 struct pci_dev *pdev = tp->pdev;
15242 struct device_node *dp = pci_device_to_OF_node(pdev);
15243 const unsigned char *addr;
15246 addr = of_get_property(dp, "local-mac-address", &len);
15247 if (addr && len == 6) {
15248 memcpy(dev->dev_addr, addr, 6);
15249 memcpy(dev->perm_addr, dev->dev_addr, 6);
15255 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
15257 struct net_device *dev = tp->dev;
15259 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
15260 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
15265 static int __devinit tg3_get_device_address(struct tg3 *tp)
15267 struct net_device *dev = tp->dev;
15268 u32 hi, lo, mac_offset;
15271 #ifdef CONFIG_SPARC
15272 if (!tg3_get_macaddr_sparc(tp))
15277 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15278 tg3_flag(tp, 5780_CLASS)) {
15279 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15281 if (tg3_nvram_lock(tp))
15282 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15284 tg3_nvram_unlock(tp);
15285 } else if (tg3_flag(tp, 5717_PLUS)) {
15286 if (tp->pci_fn & 1)
15288 if (tp->pci_fn > 1)
15289 mac_offset += 0x18c;
15290 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15293 /* First try to get it from MAC address mailbox. */
15294 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15295 if ((hi >> 16) == 0x484b) {
15296 dev->dev_addr[0] = (hi >> 8) & 0xff;
15297 dev->dev_addr[1] = (hi >> 0) & 0xff;
15299 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15300 dev->dev_addr[2] = (lo >> 24) & 0xff;
15301 dev->dev_addr[3] = (lo >> 16) & 0xff;
15302 dev->dev_addr[4] = (lo >> 8) & 0xff;
15303 dev->dev_addr[5] = (lo >> 0) & 0xff;
15305 /* Some old bootcode may report a 0 MAC address in SRAM */
15306 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15309 /* Next, try NVRAM. */
15310 if (!tg3_flag(tp, NO_NVRAM) &&
15311 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
15312 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
15313 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15314 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
15316 /* Finally just fetch it out of the MAC control regs. */
15318 hi = tr32(MAC_ADDR_0_HIGH);
15319 lo = tr32(MAC_ADDR_0_LOW);
15321 dev->dev_addr[5] = lo & 0xff;
15322 dev->dev_addr[4] = (lo >> 8) & 0xff;
15323 dev->dev_addr[3] = (lo >> 16) & 0xff;
15324 dev->dev_addr[2] = (lo >> 24) & 0xff;
15325 dev->dev_addr[1] = hi & 0xff;
15326 dev->dev_addr[0] = (hi >> 8) & 0xff;
15330 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
15331 #ifdef CONFIG_SPARC
15332 if (!tg3_get_default_macaddr_sparc(tp))
15337 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
15341 #define BOUNDARY_SINGLE_CACHELINE 1
15342 #define BOUNDARY_MULTI_CACHELINE 2
15344 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
15346 int cacheline_size;
15350 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
15352 cacheline_size = 1024;
15354 cacheline_size = (int) byte * 4;
15356 /* On 5703 and later chips, the boundary bits have no
15359 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15360 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15361 !tg3_flag(tp, PCI_EXPRESS))
15364 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
15365 goal = BOUNDARY_MULTI_CACHELINE;
15367 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
15368 goal = BOUNDARY_SINGLE_CACHELINE;
15374 if (tg3_flag(tp, 57765_PLUS)) {
15375 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
15382 /* PCI controllers on most RISC systems tend to disconnect
15383 * when a device tries to burst across a cache-line boundary.
15384 * Therefore, letting tg3 do so just wastes PCI bandwidth.
15386 * Unfortunately, for PCI-E there are only limited
15387 * write-side controls for this, and thus for reads
15388 * we will still get the disconnects. We'll also waste
15389 * these PCI cycles for both read and write for chips
15390 * other than 5700 and 5701 which do not implement the
15393 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
15394 switch (cacheline_size) {
15399 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15400 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
15401 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
15403 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15404 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15409 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
15410 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
15414 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15415 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15418 } else if (tg3_flag(tp, PCI_EXPRESS)) {
15419 switch (cacheline_size) {
15423 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15424 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15425 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15431 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15432 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15436 switch (cacheline_size) {
15438 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15439 val |= (DMA_RWCTRL_READ_BNDRY_16 |
15440 DMA_RWCTRL_WRITE_BNDRY_16);
15445 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15446 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15447 DMA_RWCTRL_WRITE_BNDRY_32);
15452 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15453 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15454 DMA_RWCTRL_WRITE_BNDRY_64);
15459 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15460 val |= (DMA_RWCTRL_READ_BNDRY_128 |
15461 DMA_RWCTRL_WRITE_BNDRY_128);
15466 val |= (DMA_RWCTRL_READ_BNDRY_256 |
15467 DMA_RWCTRL_WRITE_BNDRY_256);
15470 val |= (DMA_RWCTRL_READ_BNDRY_512 |
15471 DMA_RWCTRL_WRITE_BNDRY_512);
15475 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15476 DMA_RWCTRL_WRITE_BNDRY_1024);
15485 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15487 struct tg3_internal_buffer_desc test_desc;
15488 u32 sram_dma_descs;
15491 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15493 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15494 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15495 tw32(RDMAC_STATUS, 0);
15496 tw32(WDMAC_STATUS, 0);
15498 tw32(BUFMGR_MODE, 0);
15499 tw32(FTQ_RESET, 0);
15501 test_desc.addr_hi = ((u64) buf_dma) >> 32;
15502 test_desc.addr_lo = buf_dma & 0xffffffff;
15503 test_desc.nic_mbuf = 0x00002100;
15504 test_desc.len = size;
15507 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15508 * the *second* time the tg3 driver was getting loaded after an
15511 * Broadcom tells me:
15512 * ...the DMA engine is connected to the GRC block and a DMA
15513 * reset may affect the GRC block in some unpredictable way...
15514 * The behavior of resets to individual blocks has not been tested.
15516 * Broadcom noted the GRC reset will also reset all sub-components.
15519 test_desc.cqid_sqid = (13 << 8) | 2;
15521 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15524 test_desc.cqid_sqid = (16 << 8) | 7;
15526 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15529 test_desc.flags = 0x00000005;
15531 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15534 val = *(((u32 *)&test_desc) + i);
15535 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15536 sram_dma_descs + (i * sizeof(u32)));
15537 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15539 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15542 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15544 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15547 for (i = 0; i < 40; i++) {
15551 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15553 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15554 if ((val & 0xffff) == sram_dma_descs) {
15565 #define TEST_BUFFER_SIZE 0x2000
15567 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15568 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15572 static int __devinit tg3_test_dma(struct tg3 *tp)
15574 dma_addr_t buf_dma;
15575 u32 *buf, saved_dma_rwctrl;
15578 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15579 &buf_dma, GFP_KERNEL);
15585 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15586 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15588 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15590 if (tg3_flag(tp, 57765_PLUS))
15593 if (tg3_flag(tp, PCI_EXPRESS)) {
15594 /* DMA read watermark not used on PCIE */
15595 tp->dma_rwctrl |= 0x00180000;
15596 } else if (!tg3_flag(tp, PCIX_MODE)) {
15597 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15598 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15599 tp->dma_rwctrl |= 0x003f0000;
15601 tp->dma_rwctrl |= 0x003f000f;
15603 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15604 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15605 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15606 u32 read_water = 0x7;
15608 /* If the 5704 is behind the EPB bridge, we can
15609 * do the less restrictive ONE_DMA workaround for
15610 * better performance.
15612 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15613 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15614 tp->dma_rwctrl |= 0x8000;
15615 else if (ccval == 0x6 || ccval == 0x7)
15616 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15618 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15620 /* Set bit 23 to enable PCIX hw bug fix */
15622 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15623 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15625 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15626 /* 5780 always in PCIX mode */
15627 tp->dma_rwctrl |= 0x00144000;
15628 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15629 /* 5714 always in PCIX mode */
15630 tp->dma_rwctrl |= 0x00148000;
15632 tp->dma_rwctrl |= 0x001b000f;
15636 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15637 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15638 tp->dma_rwctrl &= 0xfffffff0;
15640 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15641 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15642 /* Remove this if it causes problems for some boards. */
15643 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15645 /* On 5700/5701 chips, we need to set this bit.
15646 * Otherwise the chip will issue cacheline transactions
15647 * to streamable DMA memory with not all the byte
15648 * enables turned on. This is an error on several
15649 * RISC PCI controllers, in particular sparc64.
15651 * On 5703/5704 chips, this bit has been reassigned
15652 * a different meaning. In particular, it is used
15653 * on those chips to enable a PCI-X workaround.
15655 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15658 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15661 /* Unneeded, already done by tg3_get_invariants. */
15662 tg3_switch_clocks(tp);
15665 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15666 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15669 /* It is best to perform DMA test with maximum write burst size
15670 * to expose the 5700/5701 write DMA bug.
15672 saved_dma_rwctrl = tp->dma_rwctrl;
15673 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15674 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15679 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15682 /* Send the buffer to the chip. */
15683 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15685 dev_err(&tp->pdev->dev,
15686 "%s: Buffer write failed. err = %d\n",
15692 /* validate data reached card RAM correctly. */
15693 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15695 tg3_read_mem(tp, 0x2100 + (i*4), &val);
15696 if (le32_to_cpu(val) != p[i]) {
15697 dev_err(&tp->pdev->dev,
15698 "%s: Buffer corrupted on device! "
15699 "(%d != %d)\n", __func__, val, i);
15700 /* ret = -ENODEV here? */
15705 /* Now read it back. */
15706 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15708 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15709 "err = %d\n", __func__, ret);
15714 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15718 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15719 DMA_RWCTRL_WRITE_BNDRY_16) {
15720 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15721 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15722 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15725 dev_err(&tp->pdev->dev,
15726 "%s: Buffer corrupted on read back! "
15727 "(%d != %d)\n", __func__, p[i], i);
15733 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15739 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15740 DMA_RWCTRL_WRITE_BNDRY_16) {
15741 /* DMA test passed without adjusting DMA boundary,
15742 * now look for chipsets that are known to expose the
15743 * DMA bug without failing the test.
15745 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15746 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15747 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15749 /* Safe to use the calculated DMA boundary. */
15750 tp->dma_rwctrl = saved_dma_rwctrl;
15753 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15757 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15762 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15764 if (tg3_flag(tp, 57765_PLUS)) {
15765 tp->bufmgr_config.mbuf_read_dma_low_water =
15766 DEFAULT_MB_RDMA_LOW_WATER_5705;
15767 tp->bufmgr_config.mbuf_mac_rx_low_water =
15768 DEFAULT_MB_MACRX_LOW_WATER_57765;
15769 tp->bufmgr_config.mbuf_high_water =
15770 DEFAULT_MB_HIGH_WATER_57765;
15772 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15773 DEFAULT_MB_RDMA_LOW_WATER_5705;
15774 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15775 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15776 tp->bufmgr_config.mbuf_high_water_jumbo =
15777 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15778 } else if (tg3_flag(tp, 5705_PLUS)) {
15779 tp->bufmgr_config.mbuf_read_dma_low_water =
15780 DEFAULT_MB_RDMA_LOW_WATER_5705;
15781 tp->bufmgr_config.mbuf_mac_rx_low_water =
15782 DEFAULT_MB_MACRX_LOW_WATER_5705;
15783 tp->bufmgr_config.mbuf_high_water =
15784 DEFAULT_MB_HIGH_WATER_5705;
15785 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15786 tp->bufmgr_config.mbuf_mac_rx_low_water =
15787 DEFAULT_MB_MACRX_LOW_WATER_5906;
15788 tp->bufmgr_config.mbuf_high_water =
15789 DEFAULT_MB_HIGH_WATER_5906;
15792 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15793 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15794 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15795 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15796 tp->bufmgr_config.mbuf_high_water_jumbo =
15797 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15799 tp->bufmgr_config.mbuf_read_dma_low_water =
15800 DEFAULT_MB_RDMA_LOW_WATER;
15801 tp->bufmgr_config.mbuf_mac_rx_low_water =
15802 DEFAULT_MB_MACRX_LOW_WATER;
15803 tp->bufmgr_config.mbuf_high_water =
15804 DEFAULT_MB_HIGH_WATER;
15806 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15807 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15808 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15809 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15810 tp->bufmgr_config.mbuf_high_water_jumbo =
15811 DEFAULT_MB_HIGH_WATER_JUMBO;
15814 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15815 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15818 static char * __devinit tg3_phy_string(struct tg3 *tp)
15820 switch (tp->phy_id & TG3_PHY_ID_MASK) {
15821 case TG3_PHY_ID_BCM5400: return "5400";
15822 case TG3_PHY_ID_BCM5401: return "5401";
15823 case TG3_PHY_ID_BCM5411: return "5411";
15824 case TG3_PHY_ID_BCM5701: return "5701";
15825 case TG3_PHY_ID_BCM5703: return "5703";
15826 case TG3_PHY_ID_BCM5704: return "5704";
15827 case TG3_PHY_ID_BCM5705: return "5705";
15828 case TG3_PHY_ID_BCM5750: return "5750";
15829 case TG3_PHY_ID_BCM5752: return "5752";
15830 case TG3_PHY_ID_BCM5714: return "5714";
15831 case TG3_PHY_ID_BCM5780: return "5780";
15832 case TG3_PHY_ID_BCM5755: return "5755";
15833 case TG3_PHY_ID_BCM5787: return "5787";
15834 case TG3_PHY_ID_BCM5784: return "5784";
15835 case TG3_PHY_ID_BCM5756: return "5722/5756";
15836 case TG3_PHY_ID_BCM5906: return "5906";
15837 case TG3_PHY_ID_BCM5761: return "5761";
15838 case TG3_PHY_ID_BCM5718C: return "5718C";
15839 case TG3_PHY_ID_BCM5718S: return "5718S";
15840 case TG3_PHY_ID_BCM57765: return "57765";
15841 case TG3_PHY_ID_BCM5719C: return "5719C";
15842 case TG3_PHY_ID_BCM5720C: return "5720C";
15843 case TG3_PHY_ID_BCM8002: return "8002/serdes";
15844 case 0: return "serdes";
15845 default: return "unknown";
15849 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15851 if (tg3_flag(tp, PCI_EXPRESS)) {
15852 strcpy(str, "PCI Express");
15854 } else if (tg3_flag(tp, PCIX_MODE)) {
15855 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15857 strcpy(str, "PCIX:");
15859 if ((clock_ctrl == 7) ||
15860 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15861 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15862 strcat(str, "133MHz");
15863 else if (clock_ctrl == 0)
15864 strcat(str, "33MHz");
15865 else if (clock_ctrl == 2)
15866 strcat(str, "50MHz");
15867 else if (clock_ctrl == 4)
15868 strcat(str, "66MHz");
15869 else if (clock_ctrl == 6)
15870 strcat(str, "100MHz");
15872 strcpy(str, "PCI:");
15873 if (tg3_flag(tp, PCI_HIGH_SPEED))
15874 strcat(str, "66MHz");
15876 strcat(str, "33MHz");
15878 if (tg3_flag(tp, PCI_32BIT))
15879 strcat(str, ":32-bit");
15881 strcat(str, ":64-bit");
15885 static void __devinit tg3_init_coal(struct tg3 *tp)
15887 struct ethtool_coalesce *ec = &tp->coal;
15889 memset(ec, 0, sizeof(*ec));
15890 ec->cmd = ETHTOOL_GCOALESCE;
15891 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15892 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15893 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15894 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15895 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15896 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15897 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15898 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15899 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15901 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15902 HOSTCC_MODE_CLRTICK_TXBD)) {
15903 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15904 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15905 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15906 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15909 if (tg3_flag(tp, 5705_PLUS)) {
15910 ec->rx_coalesce_usecs_irq = 0;
15911 ec->tx_coalesce_usecs_irq = 0;
15912 ec->stats_block_coalesce_usecs = 0;
15916 static int __devinit tg3_init_one(struct pci_dev *pdev,
15917 const struct pci_device_id *ent)
15919 struct net_device *dev;
15921 int i, err, pm_cap;
15922 u32 sndmbx, rcvmbx, intmbx;
15924 u64 dma_mask, persist_dma_mask;
15925 netdev_features_t features = 0;
15927 printk_once(KERN_INFO "%s\n", version);
15929 err = pci_enable_device(pdev);
15931 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15935 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15937 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15938 goto err_out_disable_pdev;
15941 pci_set_master(pdev);
15943 /* Find power-management capability. */
15944 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15946 dev_err(&pdev->dev,
15947 "Cannot find Power Management capability, aborting\n");
15949 goto err_out_free_res;
15952 err = pci_set_power_state(pdev, PCI_D0);
15954 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15955 goto err_out_free_res;
15958 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15961 goto err_out_power_down;
15964 SET_NETDEV_DEV(dev, &pdev->dev);
15966 tp = netdev_priv(dev);
15969 tp->pm_cap = pm_cap;
15970 tp->rx_mode = TG3_DEF_RX_MODE;
15971 tp->tx_mode = TG3_DEF_TX_MODE;
15974 tp->msg_enable = tg3_debug;
15976 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15978 /* The word/byte swap controls here control register access byte
15979 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15982 tp->misc_host_ctrl =
15983 MISC_HOST_CTRL_MASK_PCI_INT |
15984 MISC_HOST_CTRL_WORD_SWAP |
15985 MISC_HOST_CTRL_INDIR_ACCESS |
15986 MISC_HOST_CTRL_PCISTATE_RW;
15988 /* The NONFRM (non-frame) byte/word swap controls take effect
15989 * on descriptor entries, anything which isn't packet data.
15991 * The StrongARM chips on the board (one for tx, one for rx)
15992 * are running in big-endian mode.
15994 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15995 GRC_MODE_WSWAP_NONFRM_DATA);
15996 #ifdef __BIG_ENDIAN
15997 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15999 spin_lock_init(&tp->lock);
16000 spin_lock_init(&tp->indirect_lock);
16001 INIT_WORK(&tp->reset_task, tg3_reset_task);
16003 tp->regs = pci_ioremap_bar(pdev, BAR_0);
16005 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16007 goto err_out_free_dev;
16010 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16011 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16012 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16013 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16014 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16015 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16016 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16017 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
16018 tg3_flag_set(tp, ENABLE_APE);
16019 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16020 if (!tp->aperegs) {
16021 dev_err(&pdev->dev,
16022 "Cannot map APE registers, aborting\n");
16024 goto err_out_iounmap;
16028 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16029 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16031 dev->ethtool_ops = &tg3_ethtool_ops;
16032 dev->watchdog_timeo = TG3_TX_TIMEOUT;
16033 dev->netdev_ops = &tg3_netdev_ops;
16034 dev->irq = pdev->irq;
16036 err = tg3_get_invariants(tp);
16038 dev_err(&pdev->dev,
16039 "Problem fetching invariants of chip, aborting\n");
16040 goto err_out_apeunmap;
16043 /* The EPB bridge inside 5714, 5715, and 5780 and any
16044 * device behind the EPB cannot support DMA addresses > 40-bit.
16045 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16046 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16047 * do DMA address check in tg3_start_xmit().
16049 if (tg3_flag(tp, IS_5788))
16050 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16051 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16052 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16053 #ifdef CONFIG_HIGHMEM
16054 dma_mask = DMA_BIT_MASK(64);
16057 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16059 /* Configure DMA attributes. */
16060 if (dma_mask > DMA_BIT_MASK(32)) {
16061 err = pci_set_dma_mask(pdev, dma_mask);
16063 features |= NETIF_F_HIGHDMA;
16064 err = pci_set_consistent_dma_mask(pdev,
16067 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16068 "DMA for consistent allocations\n");
16069 goto err_out_apeunmap;
16073 if (err || dma_mask == DMA_BIT_MASK(32)) {
16074 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16076 dev_err(&pdev->dev,
16077 "No usable DMA configuration, aborting\n");
16078 goto err_out_apeunmap;
16082 tg3_init_bufmgr_config(tp);
16084 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16086 /* 5700 B0 chips do not support checksumming correctly due
16087 * to hardware bugs.
16089 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
16090 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16092 if (tg3_flag(tp, 5755_PLUS))
16093 features |= NETIF_F_IPV6_CSUM;
16096 /* TSO is on by default on chips that support hardware TSO.
16097 * Firmware TSO on older chips gives lower performance, so it
16098 * is off by default, but can be enabled using ethtool.
16100 if ((tg3_flag(tp, HW_TSO_1) ||
16101 tg3_flag(tp, HW_TSO_2) ||
16102 tg3_flag(tp, HW_TSO_3)) &&
16103 (features & NETIF_F_IP_CSUM))
16104 features |= NETIF_F_TSO;
16105 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16106 if (features & NETIF_F_IPV6_CSUM)
16107 features |= NETIF_F_TSO6;
16108 if (tg3_flag(tp, HW_TSO_3) ||
16109 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
16110 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
16111 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
16112 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
16113 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
16114 features |= NETIF_F_TSO_ECN;
16117 dev->features |= features;
16118 dev->vlan_features |= features;
16121 * Add loopback capability only for a subset of devices that support
16122 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16123 * loopback for the remaining devices.
16125 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
16126 !tg3_flag(tp, CPMU_PRESENT))
16127 /* Add the loopback capability */
16128 features |= NETIF_F_LOOPBACK;
16130 dev->hw_features |= features;
16132 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
16133 !tg3_flag(tp, TSO_CAPABLE) &&
16134 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16135 tg3_flag_set(tp, MAX_RXPEND_64);
16136 tp->rx_pending = 63;
16139 err = tg3_get_device_address(tp);
16141 dev_err(&pdev->dev,
16142 "Could not obtain valid ethernet address, aborting\n");
16143 goto err_out_apeunmap;
16147 * Reset chip in case UNDI or EFI driver did not shutdown
16148 * DMA self test will enable WDMAC and we'll see (spurious)
16149 * pending DMA on the PCI bus at that point.
16151 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16152 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
16153 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16154 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16157 err = tg3_test_dma(tp);
16159 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
16160 goto err_out_apeunmap;
16163 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16164 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16165 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
16166 for (i = 0; i < tp->irq_max; i++) {
16167 struct tg3_napi *tnapi = &tp->napi[i];
16170 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16172 tnapi->int_mbox = intmbx;
16178 tnapi->consmbox = rcvmbx;
16179 tnapi->prodmbox = sndmbx;
16182 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
16184 tnapi->coal_now = HOSTCC_MODE_NOW;
16186 if (!tg3_flag(tp, SUPPORT_MSIX))
16190 * If we support MSIX, we'll be using RSS. If we're using
16191 * RSS, the first vector only handles link interrupts and the
16192 * remaining vectors handle rx and tx interrupts. Reuse the
16193 * mailbox values for the next iteration. The values we setup
16194 * above are still useful for the single vectored mode.
16209 pci_set_drvdata(pdev, dev);
16211 if (tg3_flag(tp, 5717_PLUS)) {
16212 /* Resume a low-power mode */
16213 tg3_frob_aux_power(tp, false);
16216 tg3_timer_init(tp);
16218 err = register_netdev(dev);
16220 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
16221 goto err_out_apeunmap;
16224 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16225 tp->board_part_number,
16226 tp->pci_chip_rev_id,
16227 tg3_bus_string(tp, str),
16230 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
16231 struct phy_device *phydev;
16232 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
16234 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16235 phydev->drv->name, dev_name(&phydev->dev));
16239 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16240 ethtype = "10/100Base-TX";
16241 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16242 ethtype = "1000Base-SX";
16244 ethtype = "10/100/1000Base-T";
16246 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
16247 "(WireSpeed[%d], EEE[%d])\n",
16248 tg3_phy_string(tp), ethtype,
16249 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16250 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
16253 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16254 (dev->features & NETIF_F_RXCSUM) != 0,
16255 tg3_flag(tp, USE_LINKCHG_REG) != 0,
16256 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
16257 tg3_flag(tp, ENABLE_ASF) != 0,
16258 tg3_flag(tp, TSO_CAPABLE) != 0);
16259 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16261 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16262 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
16264 pci_save_state(pdev);
16270 iounmap(tp->aperegs);
16271 tp->aperegs = NULL;
16283 err_out_power_down:
16284 pci_set_power_state(pdev, PCI_D3hot);
16287 pci_release_regions(pdev);
16289 err_out_disable_pdev:
16290 pci_disable_device(pdev);
16291 pci_set_drvdata(pdev, NULL);
16295 static void __devexit tg3_remove_one(struct pci_dev *pdev)
16297 struct net_device *dev = pci_get_drvdata(pdev);
16300 struct tg3 *tp = netdev_priv(dev);
16302 release_firmware(tp->fw);
16304 tg3_reset_task_cancel(tp);
16306 if (tg3_flag(tp, USE_PHYLIB)) {
16311 unregister_netdev(dev);
16313 iounmap(tp->aperegs);
16314 tp->aperegs = NULL;
16321 pci_release_regions(pdev);
16322 pci_disable_device(pdev);
16323 pci_set_drvdata(pdev, NULL);
16327 #ifdef CONFIG_PM_SLEEP
16328 static int tg3_suspend(struct device *device)
16330 struct pci_dev *pdev = to_pci_dev(device);
16331 struct net_device *dev = pci_get_drvdata(pdev);
16332 struct tg3 *tp = netdev_priv(dev);
16335 if (!netif_running(dev))
16338 tg3_reset_task_cancel(tp);
16340 tg3_netif_stop(tp);
16342 tg3_timer_stop(tp);
16344 tg3_full_lock(tp, 1);
16345 tg3_disable_ints(tp);
16346 tg3_full_unlock(tp);
16348 netif_device_detach(dev);
16350 tg3_full_lock(tp, 0);
16351 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16352 tg3_flag_clear(tp, INIT_COMPLETE);
16353 tg3_full_unlock(tp);
16355 err = tg3_power_down_prepare(tp);
16359 tg3_full_lock(tp, 0);
16361 tg3_flag_set(tp, INIT_COMPLETE);
16362 err2 = tg3_restart_hw(tp, 1);
16366 tg3_timer_start(tp);
16368 netif_device_attach(dev);
16369 tg3_netif_start(tp);
16372 tg3_full_unlock(tp);
16381 static int tg3_resume(struct device *device)
16383 struct pci_dev *pdev = to_pci_dev(device);
16384 struct net_device *dev = pci_get_drvdata(pdev);
16385 struct tg3 *tp = netdev_priv(dev);
16388 if (!netif_running(dev))
16391 netif_device_attach(dev);
16393 tg3_full_lock(tp, 0);
16395 tg3_flag_set(tp, INIT_COMPLETE);
16396 err = tg3_restart_hw(tp, 1);
16400 tg3_timer_start(tp);
16402 tg3_netif_start(tp);
16405 tg3_full_unlock(tp);
16413 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
16414 #define TG3_PM_OPS (&tg3_pm_ops)
16418 #define TG3_PM_OPS NULL
16420 #endif /* CONFIG_PM_SLEEP */
16423 * tg3_io_error_detected - called when PCI error is detected
16424 * @pdev: Pointer to PCI device
16425 * @state: The current pci connection state
16427 * This function is called after a PCI bus error affecting
16428 * this device has been detected.
16430 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16431 pci_channel_state_t state)
16433 struct net_device *netdev = pci_get_drvdata(pdev);
16434 struct tg3 *tp = netdev_priv(netdev);
16435 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16437 netdev_info(netdev, "PCI I/O error detected\n");
16441 if (!netif_running(netdev))
16446 tg3_netif_stop(tp);
16448 tg3_timer_stop(tp);
16450 /* Want to make sure that the reset task doesn't run */
16451 tg3_reset_task_cancel(tp);
16453 netif_device_detach(netdev);
16455 /* Clean up software state, even if MMIO is blocked */
16456 tg3_full_lock(tp, 0);
16457 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16458 tg3_full_unlock(tp);
16461 if (state == pci_channel_io_perm_failure)
16462 err = PCI_ERS_RESULT_DISCONNECT;
16464 pci_disable_device(pdev);
16472 * tg3_io_slot_reset - called after the pci bus has been reset.
16473 * @pdev: Pointer to PCI device
16475 * Restart the card from scratch, as if from a cold-boot.
16476 * At this point, the card has exprienced a hard reset,
16477 * followed by fixups by BIOS, and has its config space
16478 * set up identically to what it was at cold boot.
16480 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16482 struct net_device *netdev = pci_get_drvdata(pdev);
16483 struct tg3 *tp = netdev_priv(netdev);
16484 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16489 if (pci_enable_device(pdev)) {
16490 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16494 pci_set_master(pdev);
16495 pci_restore_state(pdev);
16496 pci_save_state(pdev);
16498 if (!netif_running(netdev)) {
16499 rc = PCI_ERS_RESULT_RECOVERED;
16503 err = tg3_power_up(tp);
16507 rc = PCI_ERS_RESULT_RECOVERED;
16516 * tg3_io_resume - called when traffic can start flowing again.
16517 * @pdev: Pointer to PCI device
16519 * This callback is called when the error recovery driver tells
16520 * us that its OK to resume normal operation.
16522 static void tg3_io_resume(struct pci_dev *pdev)
16524 struct net_device *netdev = pci_get_drvdata(pdev);
16525 struct tg3 *tp = netdev_priv(netdev);
16530 if (!netif_running(netdev))
16533 tg3_full_lock(tp, 0);
16534 tg3_flag_set(tp, INIT_COMPLETE);
16535 err = tg3_restart_hw(tp, 1);
16536 tg3_full_unlock(tp);
16538 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16542 netif_device_attach(netdev);
16544 tg3_timer_start(tp);
16546 tg3_netif_start(tp);
16554 static const struct pci_error_handlers tg3_err_handler = {
16555 .error_detected = tg3_io_error_detected,
16556 .slot_reset = tg3_io_slot_reset,
16557 .resume = tg3_io_resume
16560 static struct pci_driver tg3_driver = {
16561 .name = DRV_MODULE_NAME,
16562 .id_table = tg3_pci_tbl,
16563 .probe = tg3_init_one,
16564 .remove = __devexit_p(tg3_remove_one),
16565 .err_handler = &tg3_err_handler,
16566 .driver.pm = TG3_PM_OPS,
16569 static int __init tg3_init(void)
16571 return pci_register_driver(&tg3_driver);
16574 static void __exit tg3_cleanup(void)
16576 pci_unregister_driver(&tg3_driver);
16579 module_init(tg3_init);
16580 module_exit(tg3_cleanup);