2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2012 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/hwmon.h>
48 #include <linux/hwmon-sysfs.h>
50 #include <net/checksum.h>
54 #include <asm/byteorder.h>
55 #include <linux/uaccess.h>
58 #include <asm/idprom.h>
67 /* Functions & macros to verify TG3_FLAGS types */
69 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
71 return test_bit(flag, bits);
74 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
81 clear_bit(flag, bits);
84 #define tg3_flag(tp, flag) \
85 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_set(tp, flag) \
87 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
88 #define tg3_flag_clear(tp, flag) \
89 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
91 #define DRV_MODULE_NAME "tg3"
93 #define TG3_MIN_NUM 125
94 #define DRV_MODULE_VERSION \
95 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
96 #define DRV_MODULE_RELDATE "September 26, 2012"
98 #define RESET_KIND_SHUTDOWN 0
99 #define RESET_KIND_INIT 1
100 #define RESET_KIND_SUSPEND 2
102 #define TG3_DEF_RX_MODE 0
103 #define TG3_DEF_TX_MODE 0
104 #define TG3_DEF_MSG_ENABLE \
114 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
116 /* length of time before we decide the hardware is borked,
117 * and dev->tx_timeout() should be called to fix the problem
120 #define TG3_TX_TIMEOUT (5 * HZ)
122 /* hardware minimum and maximum for a single frame's data payload */
123 #define TG3_MIN_MTU 60
124 #define TG3_MAX_MTU(tp) \
125 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
127 /* These numbers seem to be hard coded in the NIC firmware somehow.
128 * You can't change the ring sizes, but you can change where you place
129 * them in the NIC onboard memory.
131 #define TG3_RX_STD_RING_SIZE(tp) \
132 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
133 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
134 #define TG3_DEF_RX_RING_PENDING 200
135 #define TG3_RX_JMB_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
138 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
140 /* Do not place this n-ring entries value into the tp struct itself,
141 * we really want to expose these constants to GCC so that modulo et
142 * al. operations are done with shifts and masks instead of with
143 * hw multiply/modulo instructions. Another solution would be to
144 * replace things like '% foo' with '& (foo - 1)'.
147 #define TG3_TX_RING_SIZE 512
148 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
150 #define TG3_RX_STD_RING_BYTES(tp) \
151 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
152 #define TG3_RX_JMB_RING_BYTES(tp) \
153 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
154 #define TG3_RX_RCB_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
156 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
158 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
160 #define TG3_DMA_BYTE_ENAB 64
162 #define TG3_RX_STD_DMA_SZ 1536
163 #define TG3_RX_JMB_DMA_SZ 9046
165 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
167 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
168 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
170 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
171 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
173 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
174 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
176 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
177 * that are at least dword aligned when used in PCIX mode. The driver
178 * works around this bug by double copying the packet. This workaround
179 * is built into the normal double copy length check for efficiency.
181 * However, the double copy is only necessary on those architectures
182 * where unaligned memory accesses are inefficient. For those architectures
183 * where unaligned memory accesses incur little penalty, we can reintegrate
184 * the 5701 in the normal rx path. Doing so saves a device structure
185 * dereference by hardcoding the double copy threshold in place.
187 #define TG3_RX_COPY_THRESHOLD 256
188 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
189 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
191 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
194 #if (NET_IP_ALIGN != 0)
195 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
197 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
200 /* minimum number of free TX descriptors required to wake up TX process */
201 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
202 #define TG3_TX_BD_DMA_MAX_2K 2048
203 #define TG3_TX_BD_DMA_MAX_4K 4096
205 #define TG3_RAW_IP_ALIGN 2
207 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
208 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
210 #define FIRMWARE_TG3 "tigon/tg3.bin"
211 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
212 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
214 static char version[] __devinitdata =
215 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
217 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
218 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
219 MODULE_LICENSE("GPL");
220 MODULE_VERSION(DRV_MODULE_VERSION);
221 MODULE_FIRMWARE(FIRMWARE_TG3);
222 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
223 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
225 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
226 module_param(tg3_debug, int, 0);
227 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
229 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
304 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
305 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
306 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
307 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
308 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
309 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
310 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
311 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
315 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
317 static const struct {
318 const char string[ETH_GSTRING_LEN];
319 } ethtool_stats_keys[] = {
322 { "rx_ucast_packets" },
323 { "rx_mcast_packets" },
324 { "rx_bcast_packets" },
326 { "rx_align_errors" },
327 { "rx_xon_pause_rcvd" },
328 { "rx_xoff_pause_rcvd" },
329 { "rx_mac_ctrl_rcvd" },
330 { "rx_xoff_entered" },
331 { "rx_frame_too_long_errors" },
333 { "rx_undersize_packets" },
334 { "rx_in_length_errors" },
335 { "rx_out_length_errors" },
336 { "rx_64_or_less_octet_packets" },
337 { "rx_65_to_127_octet_packets" },
338 { "rx_128_to_255_octet_packets" },
339 { "rx_256_to_511_octet_packets" },
340 { "rx_512_to_1023_octet_packets" },
341 { "rx_1024_to_1522_octet_packets" },
342 { "rx_1523_to_2047_octet_packets" },
343 { "rx_2048_to_4095_octet_packets" },
344 { "rx_4096_to_8191_octet_packets" },
345 { "rx_8192_to_9022_octet_packets" },
352 { "tx_flow_control" },
354 { "tx_single_collisions" },
355 { "tx_mult_collisions" },
357 { "tx_excessive_collisions" },
358 { "tx_late_collisions" },
359 { "tx_collide_2times" },
360 { "tx_collide_3times" },
361 { "tx_collide_4times" },
362 { "tx_collide_5times" },
363 { "tx_collide_6times" },
364 { "tx_collide_7times" },
365 { "tx_collide_8times" },
366 { "tx_collide_9times" },
367 { "tx_collide_10times" },
368 { "tx_collide_11times" },
369 { "tx_collide_12times" },
370 { "tx_collide_13times" },
371 { "tx_collide_14times" },
372 { "tx_collide_15times" },
373 { "tx_ucast_packets" },
374 { "tx_mcast_packets" },
375 { "tx_bcast_packets" },
376 { "tx_carrier_sense_errors" },
380 { "dma_writeq_full" },
381 { "dma_write_prioq_full" },
385 { "rx_threshold_hit" },
387 { "dma_readq_full" },
388 { "dma_read_prioq_full" },
389 { "tx_comp_queue_full" },
391 { "ring_set_send_prod_index" },
392 { "ring_status_update" },
394 { "nic_avoided_irqs" },
395 { "nic_tx_threshold_hit" },
397 { "mbuf_lwm_thresh_hit" },
400 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
403 static const struct {
404 const char string[ETH_GSTRING_LEN];
405 } ethtool_test_keys[] = {
406 { "nvram test (online) " },
407 { "link test (online) " },
408 { "register test (offline)" },
409 { "memory test (offline)" },
410 { "mac loopback test (offline)" },
411 { "phy loopback test (offline)" },
412 { "ext loopback test (offline)" },
413 { "interrupt test (offline)" },
416 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
419 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
421 writel(val, tp->regs + off);
424 static u32 tg3_read32(struct tg3 *tp, u32 off)
426 return readl(tp->regs + off);
429 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
431 writel(val, tp->aperegs + off);
434 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
436 return readl(tp->aperegs + off);
439 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
443 spin_lock_irqsave(&tp->indirect_lock, flags);
444 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
445 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
446 spin_unlock_irqrestore(&tp->indirect_lock, flags);
449 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
451 writel(val, tp->regs + off);
452 readl(tp->regs + off);
455 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
460 spin_lock_irqsave(&tp->indirect_lock, flags);
461 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
462 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
463 spin_unlock_irqrestore(&tp->indirect_lock, flags);
467 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
471 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
472 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
473 TG3_64BIT_REG_LOW, val);
476 if (off == TG3_RX_STD_PROD_IDX_REG) {
477 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
478 TG3_64BIT_REG_LOW, val);
482 spin_lock_irqsave(&tp->indirect_lock, flags);
483 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
484 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
485 spin_unlock_irqrestore(&tp->indirect_lock, flags);
487 /* In indirect mode when disabling interrupts, we also need
488 * to clear the interrupt bit in the GRC local ctrl register.
490 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
492 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
493 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
497 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
502 spin_lock_irqsave(&tp->indirect_lock, flags);
503 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
504 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
505 spin_unlock_irqrestore(&tp->indirect_lock, flags);
509 /* usec_wait specifies the wait time in usec when writing to certain registers
510 * where it is unsafe to read back the register without some delay.
511 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
512 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
514 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
516 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
517 /* Non-posted methods */
518 tp->write32(tp, off, val);
521 tg3_write32(tp, off, val);
526 /* Wait again after the read for the posted method to guarantee that
527 * the wait time is met.
533 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
535 tp->write32_mbox(tp, off, val);
536 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
537 tp->read32_mbox(tp, off);
540 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
542 void __iomem *mbox = tp->regs + off;
544 if (tg3_flag(tp, TXD_MBOX_HWBUG))
546 if (tg3_flag(tp, MBOX_WRITE_REORDER))
550 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
552 return readl(tp->regs + off + GRCMBOX_BASE);
555 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
557 writel(val, tp->regs + off + GRCMBOX_BASE);
560 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
561 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
562 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
563 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
564 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
566 #define tw32(reg, val) tp->write32(tp, reg, val)
567 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
568 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
569 #define tr32(reg) tp->read32(tp, reg)
571 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
575 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
576 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
579 spin_lock_irqsave(&tp->indirect_lock, flags);
580 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
581 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
582 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
584 /* Always leave this as zero. */
585 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
587 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
588 tw32_f(TG3PCI_MEM_WIN_DATA, val);
590 /* Always leave this as zero. */
591 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
593 spin_unlock_irqrestore(&tp->indirect_lock, flags);
596 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
600 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
601 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
606 spin_lock_irqsave(&tp->indirect_lock, flags);
607 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
608 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
609 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
611 /* Always leave this as zero. */
612 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
614 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
615 *val = tr32(TG3PCI_MEM_WIN_DATA);
617 /* Always leave this as zero. */
618 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
620 spin_unlock_irqrestore(&tp->indirect_lock, flags);
623 static void tg3_ape_lock_init(struct tg3 *tp)
628 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
629 regbase = TG3_APE_LOCK_GRANT;
631 regbase = TG3_APE_PER_LOCK_GRANT;
633 /* Make sure the driver hasn't any stale locks. */
634 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
636 case TG3_APE_LOCK_PHY0:
637 case TG3_APE_LOCK_PHY1:
638 case TG3_APE_LOCK_PHY2:
639 case TG3_APE_LOCK_PHY3:
640 bit = APE_LOCK_GRANT_DRIVER;
644 bit = APE_LOCK_GRANT_DRIVER;
646 bit = 1 << tp->pci_fn;
648 tg3_ape_write32(tp, regbase + 4 * i, bit);
653 static int tg3_ape_lock(struct tg3 *tp, int locknum)
657 u32 status, req, gnt, bit;
659 if (!tg3_flag(tp, ENABLE_APE))
663 case TG3_APE_LOCK_GPIO:
664 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
666 case TG3_APE_LOCK_GRC:
667 case TG3_APE_LOCK_MEM:
669 bit = APE_LOCK_REQ_DRIVER;
671 bit = 1 << tp->pci_fn;
673 case TG3_APE_LOCK_PHY0:
674 case TG3_APE_LOCK_PHY1:
675 case TG3_APE_LOCK_PHY2:
676 case TG3_APE_LOCK_PHY3:
677 bit = APE_LOCK_REQ_DRIVER;
683 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
684 req = TG3_APE_LOCK_REQ;
685 gnt = TG3_APE_LOCK_GRANT;
687 req = TG3_APE_PER_LOCK_REQ;
688 gnt = TG3_APE_PER_LOCK_GRANT;
693 tg3_ape_write32(tp, req + off, bit);
695 /* Wait for up to 1 millisecond to acquire lock. */
696 for (i = 0; i < 100; i++) {
697 status = tg3_ape_read32(tp, gnt + off);
704 /* Revoke the lock request. */
705 tg3_ape_write32(tp, gnt + off, bit);
712 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
716 if (!tg3_flag(tp, ENABLE_APE))
720 case TG3_APE_LOCK_GPIO:
721 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
723 case TG3_APE_LOCK_GRC:
724 case TG3_APE_LOCK_MEM:
726 bit = APE_LOCK_GRANT_DRIVER;
728 bit = 1 << tp->pci_fn;
730 case TG3_APE_LOCK_PHY0:
731 case TG3_APE_LOCK_PHY1:
732 case TG3_APE_LOCK_PHY2:
733 case TG3_APE_LOCK_PHY3:
734 bit = APE_LOCK_GRANT_DRIVER;
740 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
741 gnt = TG3_APE_LOCK_GRANT;
743 gnt = TG3_APE_PER_LOCK_GRANT;
745 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
748 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
753 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
756 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
757 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
760 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
763 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
766 return timeout_us ? 0 : -EBUSY;
769 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
773 for (i = 0; i < timeout_us / 10; i++) {
774 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
776 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
782 return i == timeout_us / 10;
785 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
789 u32 i, bufoff, msgoff, maxlen, apedata;
791 if (!tg3_flag(tp, APE_HAS_NCSI))
794 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
795 if (apedata != APE_SEG_SIG_MAGIC)
798 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
799 if (!(apedata & APE_FW_STATUS_READY))
802 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
804 msgoff = bufoff + 2 * sizeof(u32);
805 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
810 /* Cap xfer sizes to scratchpad limits. */
811 length = (len > maxlen) ? maxlen : len;
814 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
815 if (!(apedata & APE_FW_STATUS_READY))
818 /* Wait for up to 1 msec for APE to service previous event. */
819 err = tg3_ape_event_lock(tp, 1000);
823 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
824 APE_EVENT_STATUS_SCRTCHPD_READ |
825 APE_EVENT_STATUS_EVENT_PENDING;
826 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
828 tg3_ape_write32(tp, bufoff, base_off);
829 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
831 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
832 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
836 if (tg3_ape_wait_for_event(tp, 30000))
839 for (i = 0; length; i += 4, length -= 4) {
840 u32 val = tg3_ape_read32(tp, msgoff + i);
841 memcpy(data, &val, sizeof(u32));
849 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
854 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
855 if (apedata != APE_SEG_SIG_MAGIC)
858 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
859 if (!(apedata & APE_FW_STATUS_READY))
862 /* Wait for up to 1 millisecond for APE to service previous event. */
863 err = tg3_ape_event_lock(tp, 1000);
867 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
868 event | APE_EVENT_STATUS_EVENT_PENDING);
870 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
871 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
876 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
881 if (!tg3_flag(tp, ENABLE_APE))
885 case RESET_KIND_INIT:
886 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
887 APE_HOST_SEG_SIG_MAGIC);
888 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
889 APE_HOST_SEG_LEN_MAGIC);
890 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
891 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
892 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
893 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
894 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
895 APE_HOST_BEHAV_NO_PHYLOCK);
896 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
897 TG3_APE_HOST_DRVR_STATE_START);
899 event = APE_EVENT_STATUS_STATE_START;
901 case RESET_KIND_SHUTDOWN:
902 /* With the interface we are currently using,
903 * APE does not track driver state. Wiping
904 * out the HOST SEGMENT SIGNATURE forces
905 * the APE to assume OS absent status.
907 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
909 if (device_may_wakeup(&tp->pdev->dev) &&
910 tg3_flag(tp, WOL_ENABLE)) {
911 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
912 TG3_APE_HOST_WOL_SPEED_AUTO);
913 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
915 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
917 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
919 event = APE_EVENT_STATUS_STATE_UNLOAD;
921 case RESET_KIND_SUSPEND:
922 event = APE_EVENT_STATUS_STATE_SUSPEND;
928 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
930 tg3_ape_send_event(tp, event);
933 static void tg3_disable_ints(struct tg3 *tp)
937 tw32(TG3PCI_MISC_HOST_CTRL,
938 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
939 for (i = 0; i < tp->irq_max; i++)
940 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
943 static void tg3_enable_ints(struct tg3 *tp)
950 tw32(TG3PCI_MISC_HOST_CTRL,
951 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
953 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
954 for (i = 0; i < tp->irq_cnt; i++) {
955 struct tg3_napi *tnapi = &tp->napi[i];
957 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
958 if (tg3_flag(tp, 1SHOT_MSI))
959 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
961 tp->coal_now |= tnapi->coal_now;
964 /* Force an initial interrupt */
965 if (!tg3_flag(tp, TAGGED_STATUS) &&
966 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
967 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
969 tw32(HOSTCC_MODE, tp->coal_now);
971 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
974 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
976 struct tg3 *tp = tnapi->tp;
977 struct tg3_hw_status *sblk = tnapi->hw_status;
978 unsigned int work_exists = 0;
980 /* check for phy events */
981 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
982 if (sblk->status & SD_STATUS_LINK_CHG)
986 /* check for TX work to do */
987 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
990 /* check for RX work to do */
991 if (tnapi->rx_rcb_prod_idx &&
992 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
999 * similar to tg3_enable_ints, but it accurately determines whether there
1000 * is new work pending and can return without flushing the PIO write
1001 * which reenables interrupts
1003 static void tg3_int_reenable(struct tg3_napi *tnapi)
1005 struct tg3 *tp = tnapi->tp;
1007 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1010 /* When doing tagged status, this work check is unnecessary.
1011 * The last_tag we write above tells the chip which piece of
1012 * work we've completed.
1014 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1015 tw32(HOSTCC_MODE, tp->coalesce_mode |
1016 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1019 static void tg3_switch_clocks(struct tg3 *tp)
1022 u32 orig_clock_ctrl;
1024 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1027 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1029 orig_clock_ctrl = clock_ctrl;
1030 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1031 CLOCK_CTRL_CLKRUN_OENABLE |
1033 tp->pci_clock_ctrl = clock_ctrl;
1035 if (tg3_flag(tp, 5705_PLUS)) {
1036 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1037 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1038 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1040 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1041 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1043 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1045 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1046 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1049 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1052 #define PHY_BUSY_LOOPS 5000
1054 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1060 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1062 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1066 tg3_ape_lock(tp, tp->phy_ape_lock);
1070 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1071 MI_COM_PHY_ADDR_MASK);
1072 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1073 MI_COM_REG_ADDR_MASK);
1074 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1076 tw32_f(MAC_MI_COM, frame_val);
1078 loops = PHY_BUSY_LOOPS;
1079 while (loops != 0) {
1081 frame_val = tr32(MAC_MI_COM);
1083 if ((frame_val & MI_COM_BUSY) == 0) {
1085 frame_val = tr32(MAC_MI_COM);
1093 *val = frame_val & MI_COM_DATA_MASK;
1097 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1098 tw32_f(MAC_MI_MODE, tp->mi_mode);
1102 tg3_ape_unlock(tp, tp->phy_ape_lock);
1107 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1113 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1114 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1117 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1119 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1123 tg3_ape_lock(tp, tp->phy_ape_lock);
1125 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1126 MI_COM_PHY_ADDR_MASK);
1127 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1128 MI_COM_REG_ADDR_MASK);
1129 frame_val |= (val & MI_COM_DATA_MASK);
1130 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1132 tw32_f(MAC_MI_COM, frame_val);
1134 loops = PHY_BUSY_LOOPS;
1135 while (loops != 0) {
1137 frame_val = tr32(MAC_MI_COM);
1138 if ((frame_val & MI_COM_BUSY) == 0) {
1140 frame_val = tr32(MAC_MI_COM);
1150 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1151 tw32_f(MAC_MI_MODE, tp->mi_mode);
1155 tg3_ape_unlock(tp, tp->phy_ape_lock);
1160 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1164 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1168 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1172 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1173 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1177 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1183 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1187 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1191 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1195 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1196 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1200 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1206 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1210 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1212 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1217 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1221 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1223 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1228 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1232 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1233 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1234 MII_TG3_AUXCTL_SHDWSEL_MISC);
1236 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1241 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1243 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1244 set |= MII_TG3_AUXCTL_MISC_WREN;
1246 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1249 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1250 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1251 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1252 MII_TG3_AUXCTL_ACTL_TX_6DB)
1254 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1255 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1256 MII_TG3_AUXCTL_ACTL_TX_6DB);
1258 static int tg3_bmcr_reset(struct tg3 *tp)
1263 /* OK, reset it, and poll the BMCR_RESET bit until it
1264 * clears or we time out.
1266 phy_control = BMCR_RESET;
1267 err = tg3_writephy(tp, MII_BMCR, phy_control);
1273 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1277 if ((phy_control & BMCR_RESET) == 0) {
1289 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1291 struct tg3 *tp = bp->priv;
1294 spin_lock_bh(&tp->lock);
1296 if (tg3_readphy(tp, reg, &val))
1299 spin_unlock_bh(&tp->lock);
1304 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1306 struct tg3 *tp = bp->priv;
1309 spin_lock_bh(&tp->lock);
1311 if (tg3_writephy(tp, reg, val))
1314 spin_unlock_bh(&tp->lock);
1319 static int tg3_mdio_reset(struct mii_bus *bp)
1324 static void tg3_mdio_config_5785(struct tg3 *tp)
1327 struct phy_device *phydev;
1329 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1330 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1331 case PHY_ID_BCM50610:
1332 case PHY_ID_BCM50610M:
1333 val = MAC_PHYCFG2_50610_LED_MODES;
1335 case PHY_ID_BCMAC131:
1336 val = MAC_PHYCFG2_AC131_LED_MODES;
1338 case PHY_ID_RTL8211C:
1339 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1341 case PHY_ID_RTL8201E:
1342 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1348 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1349 tw32(MAC_PHYCFG2, val);
1351 val = tr32(MAC_PHYCFG1);
1352 val &= ~(MAC_PHYCFG1_RGMII_INT |
1353 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1354 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1355 tw32(MAC_PHYCFG1, val);
1360 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1361 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1362 MAC_PHYCFG2_FMODE_MASK_MASK |
1363 MAC_PHYCFG2_GMODE_MASK_MASK |
1364 MAC_PHYCFG2_ACT_MASK_MASK |
1365 MAC_PHYCFG2_QUAL_MASK_MASK |
1366 MAC_PHYCFG2_INBAND_ENABLE;
1368 tw32(MAC_PHYCFG2, val);
1370 val = tr32(MAC_PHYCFG1);
1371 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1372 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1373 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1374 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1375 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1376 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1377 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1379 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1380 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1381 tw32(MAC_PHYCFG1, val);
1383 val = tr32(MAC_EXT_RGMII_MODE);
1384 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1385 MAC_RGMII_MODE_RX_QUALITY |
1386 MAC_RGMII_MODE_RX_ACTIVITY |
1387 MAC_RGMII_MODE_RX_ENG_DET |
1388 MAC_RGMII_MODE_TX_ENABLE |
1389 MAC_RGMII_MODE_TX_LOWPWR |
1390 MAC_RGMII_MODE_TX_RESET);
1391 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1392 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1393 val |= MAC_RGMII_MODE_RX_INT_B |
1394 MAC_RGMII_MODE_RX_QUALITY |
1395 MAC_RGMII_MODE_RX_ACTIVITY |
1396 MAC_RGMII_MODE_RX_ENG_DET;
1397 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1398 val |= MAC_RGMII_MODE_TX_ENABLE |
1399 MAC_RGMII_MODE_TX_LOWPWR |
1400 MAC_RGMII_MODE_TX_RESET;
1402 tw32(MAC_EXT_RGMII_MODE, val);
1405 static void tg3_mdio_start(struct tg3 *tp)
1407 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1408 tw32_f(MAC_MI_MODE, tp->mi_mode);
1411 if (tg3_flag(tp, MDIOBUS_INITED) &&
1412 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1413 tg3_mdio_config_5785(tp);
1416 static int tg3_mdio_init(struct tg3 *tp)
1420 struct phy_device *phydev;
1422 if (tg3_flag(tp, 5717_PLUS)) {
1425 tp->phy_addr = tp->pci_fn + 1;
1427 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1428 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1430 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1431 TG3_CPMU_PHY_STRAP_IS_SERDES;
1435 tp->phy_addr = TG3_PHY_MII_ADDR;
1439 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1442 tp->mdio_bus = mdiobus_alloc();
1443 if (tp->mdio_bus == NULL)
1446 tp->mdio_bus->name = "tg3 mdio bus";
1447 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1448 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1449 tp->mdio_bus->priv = tp;
1450 tp->mdio_bus->parent = &tp->pdev->dev;
1451 tp->mdio_bus->read = &tg3_mdio_read;
1452 tp->mdio_bus->write = &tg3_mdio_write;
1453 tp->mdio_bus->reset = &tg3_mdio_reset;
1454 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1455 tp->mdio_bus->irq = &tp->mdio_irq[0];
1457 for (i = 0; i < PHY_MAX_ADDR; i++)
1458 tp->mdio_bus->irq[i] = PHY_POLL;
1460 /* The bus registration will look for all the PHYs on the mdio bus.
1461 * Unfortunately, it does not ensure the PHY is powered up before
1462 * accessing the PHY ID registers. A chip reset is the
1463 * quickest way to bring the device back to an operational state..
1465 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1468 i = mdiobus_register(tp->mdio_bus);
1470 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1471 mdiobus_free(tp->mdio_bus);
1475 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1477 if (!phydev || !phydev->drv) {
1478 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1479 mdiobus_unregister(tp->mdio_bus);
1480 mdiobus_free(tp->mdio_bus);
1484 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1485 case PHY_ID_BCM57780:
1486 phydev->interface = PHY_INTERFACE_MODE_GMII;
1487 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1489 case PHY_ID_BCM50610:
1490 case PHY_ID_BCM50610M:
1491 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1492 PHY_BRCM_RX_REFCLK_UNUSED |
1493 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1494 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1495 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1496 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1497 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1498 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1499 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1500 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1502 case PHY_ID_RTL8211C:
1503 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1505 case PHY_ID_RTL8201E:
1506 case PHY_ID_BCMAC131:
1507 phydev->interface = PHY_INTERFACE_MODE_MII;
1508 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1509 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1513 tg3_flag_set(tp, MDIOBUS_INITED);
1515 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1516 tg3_mdio_config_5785(tp);
1521 static void tg3_mdio_fini(struct tg3 *tp)
1523 if (tg3_flag(tp, MDIOBUS_INITED)) {
1524 tg3_flag_clear(tp, MDIOBUS_INITED);
1525 mdiobus_unregister(tp->mdio_bus);
1526 mdiobus_free(tp->mdio_bus);
1530 /* tp->lock is held. */
1531 static inline void tg3_generate_fw_event(struct tg3 *tp)
1535 val = tr32(GRC_RX_CPU_EVENT);
1536 val |= GRC_RX_CPU_DRIVER_EVENT;
1537 tw32_f(GRC_RX_CPU_EVENT, val);
1539 tp->last_event_jiffies = jiffies;
1542 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1544 /* tp->lock is held. */
1545 static void tg3_wait_for_event_ack(struct tg3 *tp)
1548 unsigned int delay_cnt;
1551 /* If enough time has passed, no wait is necessary. */
1552 time_remain = (long)(tp->last_event_jiffies + 1 +
1553 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1555 if (time_remain < 0)
1558 /* Check if we can shorten the wait time. */
1559 delay_cnt = jiffies_to_usecs(time_remain);
1560 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1561 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1562 delay_cnt = (delay_cnt >> 3) + 1;
1564 for (i = 0; i < delay_cnt; i++) {
1565 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1571 /* tp->lock is held. */
1572 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1577 if (!tg3_readphy(tp, MII_BMCR, ®))
1579 if (!tg3_readphy(tp, MII_BMSR, ®))
1580 val |= (reg & 0xffff);
1584 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1586 if (!tg3_readphy(tp, MII_LPA, ®))
1587 val |= (reg & 0xffff);
1591 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1592 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1594 if (!tg3_readphy(tp, MII_STAT1000, ®))
1595 val |= (reg & 0xffff);
1599 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1606 /* tp->lock is held. */
1607 static void tg3_ump_link_report(struct tg3 *tp)
1611 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1614 tg3_phy_gather_ump_data(tp, data);
1616 tg3_wait_for_event_ack(tp);
1618 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1619 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1620 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1621 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1622 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1623 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1625 tg3_generate_fw_event(tp);
1628 /* tp->lock is held. */
1629 static void tg3_stop_fw(struct tg3 *tp)
1631 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1632 /* Wait for RX cpu to ACK the previous event. */
1633 tg3_wait_for_event_ack(tp);
1635 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1637 tg3_generate_fw_event(tp);
1639 /* Wait for RX cpu to ACK this event. */
1640 tg3_wait_for_event_ack(tp);
1644 /* tp->lock is held. */
1645 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1647 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1648 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1650 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1652 case RESET_KIND_INIT:
1653 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1657 case RESET_KIND_SHUTDOWN:
1658 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1662 case RESET_KIND_SUSPEND:
1663 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1672 if (kind == RESET_KIND_INIT ||
1673 kind == RESET_KIND_SUSPEND)
1674 tg3_ape_driver_state_change(tp, kind);
1677 /* tp->lock is held. */
1678 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1680 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1682 case RESET_KIND_INIT:
1683 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1684 DRV_STATE_START_DONE);
1687 case RESET_KIND_SHUTDOWN:
1688 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1689 DRV_STATE_UNLOAD_DONE);
1697 if (kind == RESET_KIND_SHUTDOWN)
1698 tg3_ape_driver_state_change(tp, kind);
1701 /* tp->lock is held. */
1702 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1704 if (tg3_flag(tp, ENABLE_ASF)) {
1706 case RESET_KIND_INIT:
1707 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1711 case RESET_KIND_SHUTDOWN:
1712 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1716 case RESET_KIND_SUSPEND:
1717 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1727 static int tg3_poll_fw(struct tg3 *tp)
1732 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1733 /* Wait up to 20ms for init done. */
1734 for (i = 0; i < 200; i++) {
1735 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1742 /* Wait for firmware initialization to complete. */
1743 for (i = 0; i < 100000; i++) {
1744 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1745 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1750 /* Chip might not be fitted with firmware. Some Sun onboard
1751 * parts are configured like that. So don't signal the timeout
1752 * of the above loop as an error, but do report the lack of
1753 * running firmware once.
1755 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1756 tg3_flag_set(tp, NO_FWARE_REPORTED);
1758 netdev_info(tp->dev, "No firmware running\n");
1761 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1762 /* The 57765 A0 needs a little more
1763 * time to do some important work.
1771 static void tg3_link_report(struct tg3 *tp)
1773 if (!netif_carrier_ok(tp->dev)) {
1774 netif_info(tp, link, tp->dev, "Link is down\n");
1775 tg3_ump_link_report(tp);
1776 } else if (netif_msg_link(tp)) {
1777 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1778 (tp->link_config.active_speed == SPEED_1000 ?
1780 (tp->link_config.active_speed == SPEED_100 ?
1782 (tp->link_config.active_duplex == DUPLEX_FULL ?
1785 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1786 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1788 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1791 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1792 netdev_info(tp->dev, "EEE is %s\n",
1793 tp->setlpicnt ? "enabled" : "disabled");
1795 tg3_ump_link_report(tp);
1799 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1803 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1804 miireg = ADVERTISE_1000XPAUSE;
1805 else if (flow_ctrl & FLOW_CTRL_TX)
1806 miireg = ADVERTISE_1000XPSE_ASYM;
1807 else if (flow_ctrl & FLOW_CTRL_RX)
1808 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1815 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1819 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1820 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1821 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1822 if (lcladv & ADVERTISE_1000XPAUSE)
1824 if (rmtadv & ADVERTISE_1000XPAUSE)
1831 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1835 u32 old_rx_mode = tp->rx_mode;
1836 u32 old_tx_mode = tp->tx_mode;
1838 if (tg3_flag(tp, USE_PHYLIB))
1839 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1841 autoneg = tp->link_config.autoneg;
1843 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1844 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1845 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1847 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1849 flowctrl = tp->link_config.flowctrl;
1851 tp->link_config.active_flowctrl = flowctrl;
1853 if (flowctrl & FLOW_CTRL_RX)
1854 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1856 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1858 if (old_rx_mode != tp->rx_mode)
1859 tw32_f(MAC_RX_MODE, tp->rx_mode);
1861 if (flowctrl & FLOW_CTRL_TX)
1862 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1864 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1866 if (old_tx_mode != tp->tx_mode)
1867 tw32_f(MAC_TX_MODE, tp->tx_mode);
1870 static void tg3_adjust_link(struct net_device *dev)
1872 u8 oldflowctrl, linkmesg = 0;
1873 u32 mac_mode, lcl_adv, rmt_adv;
1874 struct tg3 *tp = netdev_priv(dev);
1875 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1877 spin_lock_bh(&tp->lock);
1879 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1880 MAC_MODE_HALF_DUPLEX);
1882 oldflowctrl = tp->link_config.active_flowctrl;
1888 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1889 mac_mode |= MAC_MODE_PORT_MODE_MII;
1890 else if (phydev->speed == SPEED_1000 ||
1891 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1892 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1894 mac_mode |= MAC_MODE_PORT_MODE_MII;
1896 if (phydev->duplex == DUPLEX_HALF)
1897 mac_mode |= MAC_MODE_HALF_DUPLEX;
1899 lcl_adv = mii_advertise_flowctrl(
1900 tp->link_config.flowctrl);
1903 rmt_adv = LPA_PAUSE_CAP;
1904 if (phydev->asym_pause)
1905 rmt_adv |= LPA_PAUSE_ASYM;
1908 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1910 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1912 if (mac_mode != tp->mac_mode) {
1913 tp->mac_mode = mac_mode;
1914 tw32_f(MAC_MODE, tp->mac_mode);
1918 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1919 if (phydev->speed == SPEED_10)
1921 MAC_MI_STAT_10MBPS_MODE |
1922 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1924 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1927 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1928 tw32(MAC_TX_LENGTHS,
1929 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1930 (6 << TX_LENGTHS_IPG_SHIFT) |
1931 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1933 tw32(MAC_TX_LENGTHS,
1934 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1935 (6 << TX_LENGTHS_IPG_SHIFT) |
1936 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1938 if (phydev->link != tp->old_link ||
1939 phydev->speed != tp->link_config.active_speed ||
1940 phydev->duplex != tp->link_config.active_duplex ||
1941 oldflowctrl != tp->link_config.active_flowctrl)
1944 tp->old_link = phydev->link;
1945 tp->link_config.active_speed = phydev->speed;
1946 tp->link_config.active_duplex = phydev->duplex;
1948 spin_unlock_bh(&tp->lock);
1951 tg3_link_report(tp);
1954 static int tg3_phy_init(struct tg3 *tp)
1956 struct phy_device *phydev;
1958 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1961 /* Bring the PHY back to a known state. */
1964 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1966 /* Attach the MAC to the PHY. */
1967 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1968 phydev->dev_flags, phydev->interface);
1969 if (IS_ERR(phydev)) {
1970 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1971 return PTR_ERR(phydev);
1974 /* Mask with MAC supported features. */
1975 switch (phydev->interface) {
1976 case PHY_INTERFACE_MODE_GMII:
1977 case PHY_INTERFACE_MODE_RGMII:
1978 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1979 phydev->supported &= (PHY_GBIT_FEATURES |
1981 SUPPORTED_Asym_Pause);
1985 case PHY_INTERFACE_MODE_MII:
1986 phydev->supported &= (PHY_BASIC_FEATURES |
1988 SUPPORTED_Asym_Pause);
1991 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1995 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1997 phydev->advertising = phydev->supported;
2002 static void tg3_phy_start(struct tg3 *tp)
2004 struct phy_device *phydev;
2006 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2009 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2011 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2012 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2013 phydev->speed = tp->link_config.speed;
2014 phydev->duplex = tp->link_config.duplex;
2015 phydev->autoneg = tp->link_config.autoneg;
2016 phydev->advertising = tp->link_config.advertising;
2021 phy_start_aneg(phydev);
2024 static void tg3_phy_stop(struct tg3 *tp)
2026 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2029 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2032 static void tg3_phy_fini(struct tg3 *tp)
2034 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2035 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2036 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2040 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2045 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2048 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2049 /* Cannot do read-modify-write on 5401 */
2050 err = tg3_phy_auxctl_write(tp,
2051 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2052 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2057 err = tg3_phy_auxctl_read(tp,
2058 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2062 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2063 err = tg3_phy_auxctl_write(tp,
2064 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2070 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2074 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2077 tg3_writephy(tp, MII_TG3_FET_TEST,
2078 phytest | MII_TG3_FET_SHADOW_EN);
2079 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2081 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2083 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2084 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2086 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2090 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2094 if (!tg3_flag(tp, 5705_PLUS) ||
2095 (tg3_flag(tp, 5717_PLUS) &&
2096 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2099 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2100 tg3_phy_fet_toggle_apd(tp, enable);
2104 reg = MII_TG3_MISC_SHDW_WREN |
2105 MII_TG3_MISC_SHDW_SCR5_SEL |
2106 MII_TG3_MISC_SHDW_SCR5_LPED |
2107 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2108 MII_TG3_MISC_SHDW_SCR5_SDTL |
2109 MII_TG3_MISC_SHDW_SCR5_C125OE;
2110 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2111 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2113 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2116 reg = MII_TG3_MISC_SHDW_WREN |
2117 MII_TG3_MISC_SHDW_APD_SEL |
2118 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2120 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2122 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2125 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2129 if (!tg3_flag(tp, 5705_PLUS) ||
2130 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2133 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2136 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2137 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2139 tg3_writephy(tp, MII_TG3_FET_TEST,
2140 ephy | MII_TG3_FET_SHADOW_EN);
2141 if (!tg3_readphy(tp, reg, &phy)) {
2143 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2145 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2146 tg3_writephy(tp, reg, phy);
2148 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2153 ret = tg3_phy_auxctl_read(tp,
2154 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2157 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2159 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2160 tg3_phy_auxctl_write(tp,
2161 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2166 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2171 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2174 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2176 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2177 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2180 static void tg3_phy_apply_otp(struct tg3 *tp)
2189 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2192 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2193 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2194 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2196 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2197 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2198 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2200 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2201 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2202 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2204 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2205 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2207 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2208 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2210 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2211 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2212 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2214 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2217 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2221 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2226 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2227 current_link_up == 1 &&
2228 tp->link_config.active_duplex == DUPLEX_FULL &&
2229 (tp->link_config.active_speed == SPEED_100 ||
2230 tp->link_config.active_speed == SPEED_1000)) {
2233 if (tp->link_config.active_speed == SPEED_1000)
2234 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2236 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2238 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2240 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2241 TG3_CL45_D7_EEERES_STAT, &val);
2243 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2244 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2248 if (!tp->setlpicnt) {
2249 if (current_link_up == 1 &&
2250 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2251 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2252 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2255 val = tr32(TG3_CPMU_EEE_MODE);
2256 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2260 static void tg3_phy_eee_enable(struct tg3 *tp)
2264 if (tp->link_config.active_speed == SPEED_1000 &&
2265 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2266 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2267 tg3_flag(tp, 57765_CLASS)) &&
2268 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2269 val = MII_TG3_DSP_TAP26_ALNOKO |
2270 MII_TG3_DSP_TAP26_RMRXSTO;
2271 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2272 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2275 val = tr32(TG3_CPMU_EEE_MODE);
2276 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2279 static int tg3_wait_macro_done(struct tg3 *tp)
2286 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2287 if ((tmp32 & 0x1000) == 0)
2297 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2299 static const u32 test_pat[4][6] = {
2300 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2301 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2302 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2303 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2307 for (chan = 0; chan < 4; chan++) {
2310 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2311 (chan * 0x2000) | 0x0200);
2312 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2314 for (i = 0; i < 6; i++)
2315 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2318 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2319 if (tg3_wait_macro_done(tp)) {
2324 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2325 (chan * 0x2000) | 0x0200);
2326 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2327 if (tg3_wait_macro_done(tp)) {
2332 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2333 if (tg3_wait_macro_done(tp)) {
2338 for (i = 0; i < 6; i += 2) {
2341 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2342 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2343 tg3_wait_macro_done(tp)) {
2349 if (low != test_pat[chan][i] ||
2350 high != test_pat[chan][i+1]) {
2351 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2352 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2353 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2363 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2367 for (chan = 0; chan < 4; chan++) {
2370 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2371 (chan * 0x2000) | 0x0200);
2372 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2373 for (i = 0; i < 6; i++)
2374 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2375 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2376 if (tg3_wait_macro_done(tp))
2383 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2385 u32 reg32, phy9_orig;
2386 int retries, do_phy_reset, err;
2392 err = tg3_bmcr_reset(tp);
2398 /* Disable transmitter and interrupt. */
2399 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2403 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2405 /* Set full-duplex, 1000 mbps. */
2406 tg3_writephy(tp, MII_BMCR,
2407 BMCR_FULLDPLX | BMCR_SPEED1000);
2409 /* Set to master mode. */
2410 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2413 tg3_writephy(tp, MII_CTRL1000,
2414 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2416 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2420 /* Block the PHY control access. */
2421 tg3_phydsp_write(tp, 0x8005, 0x0800);
2423 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2426 } while (--retries);
2428 err = tg3_phy_reset_chanpat(tp);
2432 tg3_phydsp_write(tp, 0x8005, 0x0000);
2434 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2435 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2437 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2439 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2441 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
2443 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2450 /* This will reset the tigon3 PHY if there is no valid
2451 * link unless the FORCE argument is non-zero.
2453 static int tg3_phy_reset(struct tg3 *tp)
2458 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2459 val = tr32(GRC_MISC_CFG);
2460 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2463 err = tg3_readphy(tp, MII_BMSR, &val);
2464 err |= tg3_readphy(tp, MII_BMSR, &val);
2468 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2469 netif_carrier_off(tp->dev);
2470 tg3_link_report(tp);
2473 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2474 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2475 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2476 err = tg3_phy_reset_5703_4_5(tp);
2483 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2484 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2485 cpmuctrl = tr32(TG3_CPMU_CTRL);
2486 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2488 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2491 err = tg3_bmcr_reset(tp);
2495 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2496 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2497 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2499 tw32(TG3_CPMU_CTRL, cpmuctrl);
2502 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2503 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2504 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2505 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2506 CPMU_LSPD_1000MB_MACCLK_12_5) {
2507 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2509 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2513 if (tg3_flag(tp, 5717_PLUS) &&
2514 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2517 tg3_phy_apply_otp(tp);
2519 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2520 tg3_phy_toggle_apd(tp, true);
2522 tg3_phy_toggle_apd(tp, false);
2525 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2526 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2527 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2528 tg3_phydsp_write(tp, 0x000a, 0x0323);
2529 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2532 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2533 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2534 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2537 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2538 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2539 tg3_phydsp_write(tp, 0x000a, 0x310b);
2540 tg3_phydsp_write(tp, 0x201f, 0x9506);
2541 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2542 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2544 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2545 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2546 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2547 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2548 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2549 tg3_writephy(tp, MII_TG3_TEST1,
2550 MII_TG3_TEST1_TRIM_EN | 0x4);
2552 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2554 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2558 /* Set Extended packet length bit (bit 14) on all chips that */
2559 /* support jumbo frames */
2560 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2561 /* Cannot do read-modify-write on 5401 */
2562 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2563 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2564 /* Set bit 14 with read-modify-write to preserve other bits */
2565 err = tg3_phy_auxctl_read(tp,
2566 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2568 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2569 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2572 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2573 * jumbo frames transmission.
2575 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2576 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2577 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2578 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2581 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2582 /* adjust output voltage */
2583 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2586 tg3_phy_toggle_automdix(tp, 1);
2587 tg3_phy_set_wirespeed(tp);
2591 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2592 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2593 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2594 TG3_GPIO_MSG_NEED_VAUX)
2595 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2596 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2597 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2598 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2599 (TG3_GPIO_MSG_DRVR_PRES << 12))
2601 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2602 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2603 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2604 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2605 (TG3_GPIO_MSG_NEED_VAUX << 12))
2607 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2611 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2612 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2613 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2615 status = tr32(TG3_CPMU_DRV_STATUS);
2617 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2618 status &= ~(TG3_GPIO_MSG_MASK << shift);
2619 status |= (newstat << shift);
2621 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2622 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2623 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2625 tw32(TG3_CPMU_DRV_STATUS, status);
2627 return status >> TG3_APE_GPIO_MSG_SHIFT;
2630 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2632 if (!tg3_flag(tp, IS_NIC))
2635 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2636 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2637 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2638 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2641 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2643 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2644 TG3_GRC_LCLCTL_PWRSW_DELAY);
2646 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2648 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2649 TG3_GRC_LCLCTL_PWRSW_DELAY);
2655 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2659 if (!tg3_flag(tp, IS_NIC) ||
2660 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2661 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2664 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2666 tw32_wait_f(GRC_LOCAL_CTRL,
2667 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2668 TG3_GRC_LCLCTL_PWRSW_DELAY);
2670 tw32_wait_f(GRC_LOCAL_CTRL,
2672 TG3_GRC_LCLCTL_PWRSW_DELAY);
2674 tw32_wait_f(GRC_LOCAL_CTRL,
2675 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2676 TG3_GRC_LCLCTL_PWRSW_DELAY);
2679 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2681 if (!tg3_flag(tp, IS_NIC))
2684 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2685 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2686 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2687 (GRC_LCLCTRL_GPIO_OE0 |
2688 GRC_LCLCTRL_GPIO_OE1 |
2689 GRC_LCLCTRL_GPIO_OE2 |
2690 GRC_LCLCTRL_GPIO_OUTPUT0 |
2691 GRC_LCLCTRL_GPIO_OUTPUT1),
2692 TG3_GRC_LCLCTL_PWRSW_DELAY);
2693 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2694 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2695 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2696 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2697 GRC_LCLCTRL_GPIO_OE1 |
2698 GRC_LCLCTRL_GPIO_OE2 |
2699 GRC_LCLCTRL_GPIO_OUTPUT0 |
2700 GRC_LCLCTRL_GPIO_OUTPUT1 |
2702 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2703 TG3_GRC_LCLCTL_PWRSW_DELAY);
2705 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2706 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2707 TG3_GRC_LCLCTL_PWRSW_DELAY);
2709 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2710 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2711 TG3_GRC_LCLCTL_PWRSW_DELAY);
2714 u32 grc_local_ctrl = 0;
2716 /* Workaround to prevent overdrawing Amps. */
2717 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2718 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2719 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2721 TG3_GRC_LCLCTL_PWRSW_DELAY);
2724 /* On 5753 and variants, GPIO2 cannot be used. */
2725 no_gpio2 = tp->nic_sram_data_cfg &
2726 NIC_SRAM_DATA_CFG_NO_GPIO2;
2728 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2729 GRC_LCLCTRL_GPIO_OE1 |
2730 GRC_LCLCTRL_GPIO_OE2 |
2731 GRC_LCLCTRL_GPIO_OUTPUT1 |
2732 GRC_LCLCTRL_GPIO_OUTPUT2;
2734 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2735 GRC_LCLCTRL_GPIO_OUTPUT2);
2737 tw32_wait_f(GRC_LOCAL_CTRL,
2738 tp->grc_local_ctrl | grc_local_ctrl,
2739 TG3_GRC_LCLCTL_PWRSW_DELAY);
2741 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2743 tw32_wait_f(GRC_LOCAL_CTRL,
2744 tp->grc_local_ctrl | grc_local_ctrl,
2745 TG3_GRC_LCLCTL_PWRSW_DELAY);
2748 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2749 tw32_wait_f(GRC_LOCAL_CTRL,
2750 tp->grc_local_ctrl | grc_local_ctrl,
2751 TG3_GRC_LCLCTL_PWRSW_DELAY);
2756 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2760 /* Serialize power state transitions */
2761 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2764 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2765 msg = TG3_GPIO_MSG_NEED_VAUX;
2767 msg = tg3_set_function_status(tp, msg);
2769 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2772 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2773 tg3_pwrsrc_switch_to_vaux(tp);
2775 tg3_pwrsrc_die_with_vmain(tp);
2778 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2781 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2783 bool need_vaux = false;
2785 /* The GPIOs do something completely different on 57765. */
2786 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2789 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2790 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2791 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2792 tg3_frob_aux_power_5717(tp, include_wol ?
2793 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2797 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2798 struct net_device *dev_peer;
2800 dev_peer = pci_get_drvdata(tp->pdev_peer);
2802 /* remove_one() may have been run on the peer. */
2804 struct tg3 *tp_peer = netdev_priv(dev_peer);
2806 if (tg3_flag(tp_peer, INIT_COMPLETE))
2809 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2810 tg3_flag(tp_peer, ENABLE_ASF))
2815 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2816 tg3_flag(tp, ENABLE_ASF))
2820 tg3_pwrsrc_switch_to_vaux(tp);
2822 tg3_pwrsrc_die_with_vmain(tp);
2825 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2827 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2829 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2830 if (speed != SPEED_10)
2832 } else if (speed == SPEED_10)
2838 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2842 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2843 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2844 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2845 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2848 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2849 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2850 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2855 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2857 val = tr32(GRC_MISC_CFG);
2858 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2861 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2863 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2866 tg3_writephy(tp, MII_ADVERTISE, 0);
2867 tg3_writephy(tp, MII_BMCR,
2868 BMCR_ANENABLE | BMCR_ANRESTART);
2870 tg3_writephy(tp, MII_TG3_FET_TEST,
2871 phytest | MII_TG3_FET_SHADOW_EN);
2872 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2873 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2875 MII_TG3_FET_SHDW_AUXMODE4,
2878 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2881 } else if (do_low_power) {
2882 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2883 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2885 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2886 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2887 MII_TG3_AUXCTL_PCTL_VREG_11V;
2888 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2891 /* The PHY should not be powered down on some chips because
2894 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2895 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2896 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2897 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2898 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2902 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2903 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2904 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2905 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2906 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2907 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2910 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2913 /* tp->lock is held. */
2914 static int tg3_nvram_lock(struct tg3 *tp)
2916 if (tg3_flag(tp, NVRAM)) {
2919 if (tp->nvram_lock_cnt == 0) {
2920 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2921 for (i = 0; i < 8000; i++) {
2922 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2927 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2931 tp->nvram_lock_cnt++;
2936 /* tp->lock is held. */
2937 static void tg3_nvram_unlock(struct tg3 *tp)
2939 if (tg3_flag(tp, NVRAM)) {
2940 if (tp->nvram_lock_cnt > 0)
2941 tp->nvram_lock_cnt--;
2942 if (tp->nvram_lock_cnt == 0)
2943 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2947 /* tp->lock is held. */
2948 static void tg3_enable_nvram_access(struct tg3 *tp)
2950 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2951 u32 nvaccess = tr32(NVRAM_ACCESS);
2953 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2957 /* tp->lock is held. */
2958 static void tg3_disable_nvram_access(struct tg3 *tp)
2960 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2961 u32 nvaccess = tr32(NVRAM_ACCESS);
2963 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2967 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2968 u32 offset, u32 *val)
2973 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2976 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2977 EEPROM_ADDR_DEVID_MASK |
2979 tw32(GRC_EEPROM_ADDR,
2981 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2982 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2983 EEPROM_ADDR_ADDR_MASK) |
2984 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2986 for (i = 0; i < 1000; i++) {
2987 tmp = tr32(GRC_EEPROM_ADDR);
2989 if (tmp & EEPROM_ADDR_COMPLETE)
2993 if (!(tmp & EEPROM_ADDR_COMPLETE))
2996 tmp = tr32(GRC_EEPROM_DATA);
2999 * The data will always be opposite the native endian
3000 * format. Perform a blind byteswap to compensate.
3007 #define NVRAM_CMD_TIMEOUT 10000
3009 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3013 tw32(NVRAM_CMD, nvram_cmd);
3014 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3016 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3022 if (i == NVRAM_CMD_TIMEOUT)
3028 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3030 if (tg3_flag(tp, NVRAM) &&
3031 tg3_flag(tp, NVRAM_BUFFERED) &&
3032 tg3_flag(tp, FLASH) &&
3033 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3034 (tp->nvram_jedecnum == JEDEC_ATMEL))
3036 addr = ((addr / tp->nvram_pagesize) <<
3037 ATMEL_AT45DB0X1B_PAGE_POS) +
3038 (addr % tp->nvram_pagesize);
3043 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3045 if (tg3_flag(tp, NVRAM) &&
3046 tg3_flag(tp, NVRAM_BUFFERED) &&
3047 tg3_flag(tp, FLASH) &&
3048 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3049 (tp->nvram_jedecnum == JEDEC_ATMEL))
3051 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3052 tp->nvram_pagesize) +
3053 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3058 /* NOTE: Data read in from NVRAM is byteswapped according to
3059 * the byteswapping settings for all other register accesses.
3060 * tg3 devices are BE devices, so on a BE machine, the data
3061 * returned will be exactly as it is seen in NVRAM. On a LE
3062 * machine, the 32-bit value will be byteswapped.
3064 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3068 if (!tg3_flag(tp, NVRAM))
3069 return tg3_nvram_read_using_eeprom(tp, offset, val);
3071 offset = tg3_nvram_phys_addr(tp, offset);
3073 if (offset > NVRAM_ADDR_MSK)
3076 ret = tg3_nvram_lock(tp);
3080 tg3_enable_nvram_access(tp);
3082 tw32(NVRAM_ADDR, offset);
3083 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3084 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3087 *val = tr32(NVRAM_RDDATA);
3089 tg3_disable_nvram_access(tp);
3091 tg3_nvram_unlock(tp);
3096 /* Ensures NVRAM data is in bytestream format. */
3097 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3100 int res = tg3_nvram_read(tp, offset, &v);
3102 *val = cpu_to_be32(v);
3106 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3107 u32 offset, u32 len, u8 *buf)
3112 for (i = 0; i < len; i += 4) {
3118 memcpy(&data, buf + i, 4);
3121 * The SEEPROM interface expects the data to always be opposite
3122 * the native endian format. We accomplish this by reversing
3123 * all the operations that would have been performed on the
3124 * data from a call to tg3_nvram_read_be32().
3126 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3128 val = tr32(GRC_EEPROM_ADDR);
3129 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3131 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3133 tw32(GRC_EEPROM_ADDR, val |
3134 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3135 (addr & EEPROM_ADDR_ADDR_MASK) |
3139 for (j = 0; j < 1000; j++) {
3140 val = tr32(GRC_EEPROM_ADDR);
3142 if (val & EEPROM_ADDR_COMPLETE)
3146 if (!(val & EEPROM_ADDR_COMPLETE)) {
3155 /* offset and length are dword aligned */
3156 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3160 u32 pagesize = tp->nvram_pagesize;
3161 u32 pagemask = pagesize - 1;
3165 tmp = kmalloc(pagesize, GFP_KERNEL);
3171 u32 phy_addr, page_off, size;
3173 phy_addr = offset & ~pagemask;
3175 for (j = 0; j < pagesize; j += 4) {
3176 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3177 (__be32 *) (tmp + j));
3184 page_off = offset & pagemask;
3191 memcpy(tmp + page_off, buf, size);
3193 offset = offset + (pagesize - page_off);
3195 tg3_enable_nvram_access(tp);
3198 * Before we can erase the flash page, we need
3199 * to issue a special "write enable" command.
3201 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3203 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3206 /* Erase the target page */
3207 tw32(NVRAM_ADDR, phy_addr);
3209 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3210 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3212 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3215 /* Issue another write enable to start the write. */
3216 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3218 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3221 for (j = 0; j < pagesize; j += 4) {
3224 data = *((__be32 *) (tmp + j));
3226 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3228 tw32(NVRAM_ADDR, phy_addr + j);
3230 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3234 nvram_cmd |= NVRAM_CMD_FIRST;
3235 else if (j == (pagesize - 4))
3236 nvram_cmd |= NVRAM_CMD_LAST;
3238 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3246 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3247 tg3_nvram_exec_cmd(tp, nvram_cmd);
3254 /* offset and length are dword aligned */
3255 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3260 for (i = 0; i < len; i += 4, offset += 4) {
3261 u32 page_off, phy_addr, nvram_cmd;
3264 memcpy(&data, buf + i, 4);
3265 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3267 page_off = offset % tp->nvram_pagesize;
3269 phy_addr = tg3_nvram_phys_addr(tp, offset);
3271 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3273 if (page_off == 0 || i == 0)
3274 nvram_cmd |= NVRAM_CMD_FIRST;
3275 if (page_off == (tp->nvram_pagesize - 4))
3276 nvram_cmd |= NVRAM_CMD_LAST;
3279 nvram_cmd |= NVRAM_CMD_LAST;
3281 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3282 !tg3_flag(tp, FLASH) ||
3283 !tg3_flag(tp, 57765_PLUS))
3284 tw32(NVRAM_ADDR, phy_addr);
3286 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3287 !tg3_flag(tp, 5755_PLUS) &&
3288 (tp->nvram_jedecnum == JEDEC_ST) &&
3289 (nvram_cmd & NVRAM_CMD_FIRST)) {
3292 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3293 ret = tg3_nvram_exec_cmd(tp, cmd);
3297 if (!tg3_flag(tp, FLASH)) {
3298 /* We always do complete word writes to eeprom. */
3299 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3302 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3309 /* offset and length are dword aligned */
3310 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3314 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3315 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3316 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3320 if (!tg3_flag(tp, NVRAM)) {
3321 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3325 ret = tg3_nvram_lock(tp);
3329 tg3_enable_nvram_access(tp);
3330 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3331 tw32(NVRAM_WRITE1, 0x406);
3333 grc_mode = tr32(GRC_MODE);
3334 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3336 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3337 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3340 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3344 grc_mode = tr32(GRC_MODE);
3345 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3347 tg3_disable_nvram_access(tp);
3348 tg3_nvram_unlock(tp);
3351 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3352 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3359 #define RX_CPU_SCRATCH_BASE 0x30000
3360 #define RX_CPU_SCRATCH_SIZE 0x04000
3361 #define TX_CPU_SCRATCH_BASE 0x34000
3362 #define TX_CPU_SCRATCH_SIZE 0x04000
3364 /* tp->lock is held. */
3365 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3369 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3371 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3372 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3374 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3377 if (offset == RX_CPU_BASE) {
3378 for (i = 0; i < 10000; i++) {
3379 tw32(offset + CPU_STATE, 0xffffffff);
3380 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3381 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3385 tw32(offset + CPU_STATE, 0xffffffff);
3386 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3389 for (i = 0; i < 10000; i++) {
3390 tw32(offset + CPU_STATE, 0xffffffff);
3391 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3392 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3398 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3399 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3403 /* Clear firmware's nvram arbitration. */
3404 if (tg3_flag(tp, NVRAM))
3405 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3410 unsigned int fw_base;
3411 unsigned int fw_len;
3412 const __be32 *fw_data;
3415 /* tp->lock is held. */
3416 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3417 u32 cpu_scratch_base, int cpu_scratch_size,
3418 struct fw_info *info)
3420 int err, lock_err, i;
3421 void (*write_op)(struct tg3 *, u32, u32);
3423 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3425 "%s: Trying to load TX cpu firmware which is 5705\n",
3430 if (tg3_flag(tp, 5705_PLUS))
3431 write_op = tg3_write_mem;
3433 write_op = tg3_write_indirect_reg32;
3435 /* It is possible that bootcode is still loading at this point.
3436 * Get the nvram lock first before halting the cpu.
3438 lock_err = tg3_nvram_lock(tp);
3439 err = tg3_halt_cpu(tp, cpu_base);
3441 tg3_nvram_unlock(tp);
3445 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3446 write_op(tp, cpu_scratch_base + i, 0);
3447 tw32(cpu_base + CPU_STATE, 0xffffffff);
3448 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3449 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3450 write_op(tp, (cpu_scratch_base +
3451 (info->fw_base & 0xffff) +
3453 be32_to_cpu(info->fw_data[i]));
3461 /* tp->lock is held. */
3462 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3464 struct fw_info info;
3465 const __be32 *fw_data;
3468 fw_data = (void *)tp->fw->data;
3470 /* Firmware blob starts with version numbers, followed by
3471 start address and length. We are setting complete length.
3472 length = end_address_of_bss - start_address_of_text.
3473 Remainder is the blob to be loaded contiguously
3474 from start address. */
3476 info.fw_base = be32_to_cpu(fw_data[1]);
3477 info.fw_len = tp->fw->size - 12;
3478 info.fw_data = &fw_data[3];
3480 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3481 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3486 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3487 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3492 /* Now startup only the RX cpu. */
3493 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3494 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3496 for (i = 0; i < 5; i++) {
3497 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3499 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3500 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3501 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3505 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3506 "should be %08x\n", __func__,
3507 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3510 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3511 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
3516 /* tp->lock is held. */
3517 static int tg3_load_tso_firmware(struct tg3 *tp)
3519 struct fw_info info;
3520 const __be32 *fw_data;
3521 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3524 if (tg3_flag(tp, HW_TSO_1) ||
3525 tg3_flag(tp, HW_TSO_2) ||
3526 tg3_flag(tp, HW_TSO_3))
3529 fw_data = (void *)tp->fw->data;
3531 /* Firmware blob starts with version numbers, followed by
3532 start address and length. We are setting complete length.
3533 length = end_address_of_bss - start_address_of_text.
3534 Remainder is the blob to be loaded contiguously
3535 from start address. */
3537 info.fw_base = be32_to_cpu(fw_data[1]);
3538 cpu_scratch_size = tp->fw_len;
3539 info.fw_len = tp->fw->size - 12;
3540 info.fw_data = &fw_data[3];
3542 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3543 cpu_base = RX_CPU_BASE;
3544 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3546 cpu_base = TX_CPU_BASE;
3547 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3548 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3551 err = tg3_load_firmware_cpu(tp, cpu_base,
3552 cpu_scratch_base, cpu_scratch_size,
3557 /* Now startup the cpu. */
3558 tw32(cpu_base + CPU_STATE, 0xffffffff);
3559 tw32_f(cpu_base + CPU_PC, info.fw_base);
3561 for (i = 0; i < 5; i++) {
3562 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3564 tw32(cpu_base + CPU_STATE, 0xffffffff);
3565 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3566 tw32_f(cpu_base + CPU_PC, info.fw_base);
3571 "%s fails to set CPU PC, is %08x should be %08x\n",
3572 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3575 tw32(cpu_base + CPU_STATE, 0xffffffff);
3576 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3581 /* tp->lock is held. */
3582 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3584 u32 addr_high, addr_low;
3587 addr_high = ((tp->dev->dev_addr[0] << 8) |
3588 tp->dev->dev_addr[1]);
3589 addr_low = ((tp->dev->dev_addr[2] << 24) |
3590 (tp->dev->dev_addr[3] << 16) |
3591 (tp->dev->dev_addr[4] << 8) |
3592 (tp->dev->dev_addr[5] << 0));
3593 for (i = 0; i < 4; i++) {
3594 if (i == 1 && skip_mac_1)
3596 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3597 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3600 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3601 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3602 for (i = 0; i < 12; i++) {
3603 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3604 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3608 addr_high = (tp->dev->dev_addr[0] +
3609 tp->dev->dev_addr[1] +
3610 tp->dev->dev_addr[2] +
3611 tp->dev->dev_addr[3] +
3612 tp->dev->dev_addr[4] +
3613 tp->dev->dev_addr[5]) &
3614 TX_BACKOFF_SEED_MASK;
3615 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3618 static void tg3_enable_register_access(struct tg3 *tp)
3621 * Make sure register accesses (indirect or otherwise) will function
3624 pci_write_config_dword(tp->pdev,
3625 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3628 static int tg3_power_up(struct tg3 *tp)
3632 tg3_enable_register_access(tp);
3634 err = pci_set_power_state(tp->pdev, PCI_D0);
3636 /* Switch out of Vaux if it is a NIC */
3637 tg3_pwrsrc_switch_to_vmain(tp);
3639 netdev_err(tp->dev, "Transition to D0 failed\n");
3645 static int tg3_setup_phy(struct tg3 *, int);
3647 static int tg3_power_down_prepare(struct tg3 *tp)
3650 bool device_should_wake, do_low_power;
3652 tg3_enable_register_access(tp);
3654 /* Restore the CLKREQ setting. */
3655 if (tg3_flag(tp, CLKREQ_BUG))
3656 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3657 PCI_EXP_LNKCTL_CLKREQ_EN);
3659 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3660 tw32(TG3PCI_MISC_HOST_CTRL,
3661 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3663 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3664 tg3_flag(tp, WOL_ENABLE);
3666 if (tg3_flag(tp, USE_PHYLIB)) {
3667 do_low_power = false;
3668 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3669 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3670 struct phy_device *phydev;
3671 u32 phyid, advertising;
3673 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3675 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3677 tp->link_config.speed = phydev->speed;
3678 tp->link_config.duplex = phydev->duplex;
3679 tp->link_config.autoneg = phydev->autoneg;
3680 tp->link_config.advertising = phydev->advertising;
3682 advertising = ADVERTISED_TP |
3684 ADVERTISED_Autoneg |
3685 ADVERTISED_10baseT_Half;
3687 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3688 if (tg3_flag(tp, WOL_SPEED_100MB))
3690 ADVERTISED_100baseT_Half |
3691 ADVERTISED_100baseT_Full |
3692 ADVERTISED_10baseT_Full;
3694 advertising |= ADVERTISED_10baseT_Full;
3697 phydev->advertising = advertising;
3699 phy_start_aneg(phydev);
3701 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3702 if (phyid != PHY_ID_BCMAC131) {
3703 phyid &= PHY_BCM_OUI_MASK;
3704 if (phyid == PHY_BCM_OUI_1 ||
3705 phyid == PHY_BCM_OUI_2 ||
3706 phyid == PHY_BCM_OUI_3)
3707 do_low_power = true;
3711 do_low_power = true;
3713 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3714 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3716 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3717 tg3_setup_phy(tp, 0);
3720 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3723 val = tr32(GRC_VCPU_EXT_CTRL);
3724 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3725 } else if (!tg3_flag(tp, ENABLE_ASF)) {
3729 for (i = 0; i < 200; i++) {
3730 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3731 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3736 if (tg3_flag(tp, WOL_CAP))
3737 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3738 WOL_DRV_STATE_SHUTDOWN |
3742 if (device_should_wake) {
3745 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3747 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3748 tg3_phy_auxctl_write(tp,
3749 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3750 MII_TG3_AUXCTL_PCTL_WOL_EN |
3751 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3752 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3756 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3757 mac_mode = MAC_MODE_PORT_MODE_GMII;
3759 mac_mode = MAC_MODE_PORT_MODE_MII;
3761 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3762 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3764 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3765 SPEED_100 : SPEED_10;
3766 if (tg3_5700_link_polarity(tp, speed))
3767 mac_mode |= MAC_MODE_LINK_POLARITY;
3769 mac_mode &= ~MAC_MODE_LINK_POLARITY;
3772 mac_mode = MAC_MODE_PORT_MODE_TBI;
3775 if (!tg3_flag(tp, 5750_PLUS))
3776 tw32(MAC_LED_CTRL, tp->led_ctrl);
3778 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3779 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3780 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3781 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3783 if (tg3_flag(tp, ENABLE_APE))
3784 mac_mode |= MAC_MODE_APE_TX_EN |
3785 MAC_MODE_APE_RX_EN |
3786 MAC_MODE_TDE_ENABLE;
3788 tw32_f(MAC_MODE, mac_mode);
3791 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3795 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3796 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3797 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3800 base_val = tp->pci_clock_ctrl;
3801 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3802 CLOCK_CTRL_TXCLK_DISABLE);
3804 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3805 CLOCK_CTRL_PWRDOWN_PLL133, 40);
3806 } else if (tg3_flag(tp, 5780_CLASS) ||
3807 tg3_flag(tp, CPMU_PRESENT) ||
3808 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3810 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3811 u32 newbits1, newbits2;
3813 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3814 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3815 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3816 CLOCK_CTRL_TXCLK_DISABLE |
3818 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3819 } else if (tg3_flag(tp, 5705_PLUS)) {
3820 newbits1 = CLOCK_CTRL_625_CORE;
3821 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3823 newbits1 = CLOCK_CTRL_ALTCLK;
3824 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3827 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3830 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3833 if (!tg3_flag(tp, 5705_PLUS)) {
3836 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3837 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3838 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3839 CLOCK_CTRL_TXCLK_DISABLE |
3840 CLOCK_CTRL_44MHZ_CORE);
3842 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3845 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3846 tp->pci_clock_ctrl | newbits3, 40);
3850 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3851 tg3_power_down_phy(tp, do_low_power);
3853 tg3_frob_aux_power(tp, true);
3855 /* Workaround for unstable PLL clock */
3856 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3857 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3858 u32 val = tr32(0x7d00);
3860 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3862 if (!tg3_flag(tp, ENABLE_ASF)) {
3865 err = tg3_nvram_lock(tp);
3866 tg3_halt_cpu(tp, RX_CPU_BASE);
3868 tg3_nvram_unlock(tp);
3872 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3877 static void tg3_power_down(struct tg3 *tp)
3879 tg3_power_down_prepare(tp);
3881 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3882 pci_set_power_state(tp->pdev, PCI_D3hot);
3885 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3887 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3888 case MII_TG3_AUX_STAT_10HALF:
3890 *duplex = DUPLEX_HALF;
3893 case MII_TG3_AUX_STAT_10FULL:
3895 *duplex = DUPLEX_FULL;
3898 case MII_TG3_AUX_STAT_100HALF:
3900 *duplex = DUPLEX_HALF;
3903 case MII_TG3_AUX_STAT_100FULL:
3905 *duplex = DUPLEX_FULL;
3908 case MII_TG3_AUX_STAT_1000HALF:
3909 *speed = SPEED_1000;
3910 *duplex = DUPLEX_HALF;
3913 case MII_TG3_AUX_STAT_1000FULL:
3914 *speed = SPEED_1000;
3915 *duplex = DUPLEX_FULL;
3919 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3920 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3922 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3926 *speed = SPEED_UNKNOWN;
3927 *duplex = DUPLEX_UNKNOWN;
3932 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3937 new_adv = ADVERTISE_CSMA;
3938 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3939 new_adv |= mii_advertise_flowctrl(flowctrl);
3941 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3945 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3946 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3948 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3949 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3950 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3952 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3957 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3960 tw32(TG3_CPMU_EEE_MODE,
3961 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3963 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3968 /* Advertise 100-BaseTX EEE ability */
3969 if (advertise & ADVERTISED_100baseT_Full)
3970 val |= MDIO_AN_EEE_ADV_100TX;
3971 /* Advertise 1000-BaseT EEE ability */
3972 if (advertise & ADVERTISED_1000baseT_Full)
3973 val |= MDIO_AN_EEE_ADV_1000T;
3974 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3978 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3980 case ASIC_REV_57765:
3981 case ASIC_REV_57766:
3983 /* If we advertised any eee advertisements above... */
3985 val = MII_TG3_DSP_TAP26_ALNOKO |
3986 MII_TG3_DSP_TAP26_RMRXSTO |
3987 MII_TG3_DSP_TAP26_OPCSINPT;
3988 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3991 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3992 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3993 MII_TG3_DSP_CH34TP2_HIBW01);
3996 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
4005 static void tg3_phy_copper_begin(struct tg3 *tp)
4007 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4008 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4011 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4012 adv = ADVERTISED_10baseT_Half |
4013 ADVERTISED_10baseT_Full;
4014 if (tg3_flag(tp, WOL_SPEED_100MB))
4015 adv |= ADVERTISED_100baseT_Half |
4016 ADVERTISED_100baseT_Full;
4018 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4020 adv = tp->link_config.advertising;
4021 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4022 adv &= ~(ADVERTISED_1000baseT_Half |
4023 ADVERTISED_1000baseT_Full);
4025 fc = tp->link_config.flowctrl;
4028 tg3_phy_autoneg_cfg(tp, adv, fc);
4030 tg3_writephy(tp, MII_BMCR,
4031 BMCR_ANENABLE | BMCR_ANRESTART);
4034 u32 bmcr, orig_bmcr;
4036 tp->link_config.active_speed = tp->link_config.speed;
4037 tp->link_config.active_duplex = tp->link_config.duplex;
4040 switch (tp->link_config.speed) {
4046 bmcr |= BMCR_SPEED100;
4050 bmcr |= BMCR_SPEED1000;
4054 if (tp->link_config.duplex == DUPLEX_FULL)
4055 bmcr |= BMCR_FULLDPLX;
4057 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4058 (bmcr != orig_bmcr)) {
4059 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4060 for (i = 0; i < 1500; i++) {
4064 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4065 tg3_readphy(tp, MII_BMSR, &tmp))
4067 if (!(tmp & BMSR_LSTATUS)) {
4072 tg3_writephy(tp, MII_BMCR, bmcr);
4078 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4082 /* Turn off tap power management. */
4083 /* Set Extended packet length bit */
4084 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4086 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4087 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4088 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4089 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4090 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4097 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4099 u32 advmsk, tgtadv, advertising;
4101 advertising = tp->link_config.advertising;
4102 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4104 advmsk = ADVERTISE_ALL;
4105 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4106 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4107 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4110 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4113 if ((*lcladv & advmsk) != tgtadv)
4116 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4119 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4121 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4125 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4126 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4127 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4128 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4129 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4131 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4134 if (tg3_ctrl != tgtadv)
4141 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4145 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4148 if (tg3_readphy(tp, MII_STAT1000, &val))
4151 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4154 if (tg3_readphy(tp, MII_LPA, rmtadv))
4157 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4158 tp->link_config.rmt_adv = lpeth;
4163 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4165 int current_link_up;
4167 u32 lcl_adv, rmt_adv;
4175 (MAC_STATUS_SYNC_CHANGED |
4176 MAC_STATUS_CFG_CHANGED |
4177 MAC_STATUS_MI_COMPLETION |
4178 MAC_STATUS_LNKSTATE_CHANGED));
4181 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4183 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4187 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4189 /* Some third-party PHYs need to be reset on link going
4192 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4193 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4194 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4195 netif_carrier_ok(tp->dev)) {
4196 tg3_readphy(tp, MII_BMSR, &bmsr);
4197 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4198 !(bmsr & BMSR_LSTATUS))
4204 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4205 tg3_readphy(tp, MII_BMSR, &bmsr);
4206 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4207 !tg3_flag(tp, INIT_COMPLETE))
4210 if (!(bmsr & BMSR_LSTATUS)) {
4211 err = tg3_init_5401phy_dsp(tp);
4215 tg3_readphy(tp, MII_BMSR, &bmsr);
4216 for (i = 0; i < 1000; i++) {
4218 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4219 (bmsr & BMSR_LSTATUS)) {
4225 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4226 TG3_PHY_REV_BCM5401_B0 &&
4227 !(bmsr & BMSR_LSTATUS) &&
4228 tp->link_config.active_speed == SPEED_1000) {
4229 err = tg3_phy_reset(tp);
4231 err = tg3_init_5401phy_dsp(tp);
4236 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4237 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4238 /* 5701 {A0,B0} CRC bug workaround */
4239 tg3_writephy(tp, 0x15, 0x0a75);
4240 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4241 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4242 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4245 /* Clear pending interrupts... */
4246 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4247 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4249 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4250 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4251 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4252 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4254 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4255 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4256 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4257 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4258 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4260 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4263 current_link_up = 0;
4264 current_speed = SPEED_UNKNOWN;
4265 current_duplex = DUPLEX_UNKNOWN;
4266 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4267 tp->link_config.rmt_adv = 0;
4269 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4270 err = tg3_phy_auxctl_read(tp,
4271 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4273 if (!err && !(val & (1 << 10))) {
4274 tg3_phy_auxctl_write(tp,
4275 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4282 for (i = 0; i < 100; i++) {
4283 tg3_readphy(tp, MII_BMSR, &bmsr);
4284 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4285 (bmsr & BMSR_LSTATUS))
4290 if (bmsr & BMSR_LSTATUS) {
4293 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4294 for (i = 0; i < 2000; i++) {
4296 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4301 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4306 for (i = 0; i < 200; i++) {
4307 tg3_readphy(tp, MII_BMCR, &bmcr);
4308 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4310 if (bmcr && bmcr != 0x7fff)
4318 tp->link_config.active_speed = current_speed;
4319 tp->link_config.active_duplex = current_duplex;
4321 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4322 if ((bmcr & BMCR_ANENABLE) &&
4323 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4324 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4325 current_link_up = 1;
4327 if (!(bmcr & BMCR_ANENABLE) &&
4328 tp->link_config.speed == current_speed &&
4329 tp->link_config.duplex == current_duplex &&
4330 tp->link_config.flowctrl ==
4331 tp->link_config.active_flowctrl) {
4332 current_link_up = 1;
4336 if (current_link_up == 1 &&
4337 tp->link_config.active_duplex == DUPLEX_FULL) {
4340 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4341 reg = MII_TG3_FET_GEN_STAT;
4342 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4344 reg = MII_TG3_EXT_STAT;
4345 bit = MII_TG3_EXT_STAT_MDIX;
4348 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4349 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4351 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4356 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4357 tg3_phy_copper_begin(tp);
4359 tg3_readphy(tp, MII_BMSR, &bmsr);
4360 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4361 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4362 current_link_up = 1;
4365 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4366 if (current_link_up == 1) {
4367 if (tp->link_config.active_speed == SPEED_100 ||
4368 tp->link_config.active_speed == SPEED_10)
4369 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4371 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4372 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4373 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4375 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4377 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4378 if (tp->link_config.active_duplex == DUPLEX_HALF)
4379 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4381 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4382 if (current_link_up == 1 &&
4383 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4384 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4386 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4389 /* ??? Without this setting Netgear GA302T PHY does not
4390 * ??? send/receive packets...
4392 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4393 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4394 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4395 tw32_f(MAC_MI_MODE, tp->mi_mode);
4399 tw32_f(MAC_MODE, tp->mac_mode);
4402 tg3_phy_eee_adjust(tp, current_link_up);
4404 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4405 /* Polled via timer. */
4406 tw32_f(MAC_EVENT, 0);
4408 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4412 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4413 current_link_up == 1 &&
4414 tp->link_config.active_speed == SPEED_1000 &&
4415 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4418 (MAC_STATUS_SYNC_CHANGED |
4419 MAC_STATUS_CFG_CHANGED));
4422 NIC_SRAM_FIRMWARE_MBOX,
4423 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4426 /* Prevent send BD corruption. */
4427 if (tg3_flag(tp, CLKREQ_BUG)) {
4428 if (tp->link_config.active_speed == SPEED_100 ||
4429 tp->link_config.active_speed == SPEED_10)
4430 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4431 PCI_EXP_LNKCTL_CLKREQ_EN);
4433 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4434 PCI_EXP_LNKCTL_CLKREQ_EN);
4437 if (current_link_up != netif_carrier_ok(tp->dev)) {
4438 if (current_link_up)
4439 netif_carrier_on(tp->dev);
4441 netif_carrier_off(tp->dev);
4442 tg3_link_report(tp);
4448 struct tg3_fiber_aneginfo {
4450 #define ANEG_STATE_UNKNOWN 0
4451 #define ANEG_STATE_AN_ENABLE 1
4452 #define ANEG_STATE_RESTART_INIT 2
4453 #define ANEG_STATE_RESTART 3
4454 #define ANEG_STATE_DISABLE_LINK_OK 4
4455 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4456 #define ANEG_STATE_ABILITY_DETECT 6
4457 #define ANEG_STATE_ACK_DETECT_INIT 7
4458 #define ANEG_STATE_ACK_DETECT 8
4459 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4460 #define ANEG_STATE_COMPLETE_ACK 10
4461 #define ANEG_STATE_IDLE_DETECT_INIT 11
4462 #define ANEG_STATE_IDLE_DETECT 12
4463 #define ANEG_STATE_LINK_OK 13
4464 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4465 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4468 #define MR_AN_ENABLE 0x00000001
4469 #define MR_RESTART_AN 0x00000002
4470 #define MR_AN_COMPLETE 0x00000004
4471 #define MR_PAGE_RX 0x00000008
4472 #define MR_NP_LOADED 0x00000010
4473 #define MR_TOGGLE_TX 0x00000020
4474 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4475 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4476 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4477 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4478 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4479 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4480 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4481 #define MR_TOGGLE_RX 0x00002000
4482 #define MR_NP_RX 0x00004000
4484 #define MR_LINK_OK 0x80000000
4486 unsigned long link_time, cur_time;
4488 u32 ability_match_cfg;
4489 int ability_match_count;
4491 char ability_match, idle_match, ack_match;
4493 u32 txconfig, rxconfig;
4494 #define ANEG_CFG_NP 0x00000080
4495 #define ANEG_CFG_ACK 0x00000040
4496 #define ANEG_CFG_RF2 0x00000020
4497 #define ANEG_CFG_RF1 0x00000010
4498 #define ANEG_CFG_PS2 0x00000001
4499 #define ANEG_CFG_PS1 0x00008000
4500 #define ANEG_CFG_HD 0x00004000
4501 #define ANEG_CFG_FD 0x00002000
4502 #define ANEG_CFG_INVAL 0x00001f06
4507 #define ANEG_TIMER_ENAB 2
4508 #define ANEG_FAILED -1
4510 #define ANEG_STATE_SETTLE_TIME 10000
4512 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4513 struct tg3_fiber_aneginfo *ap)
4516 unsigned long delta;
4520 if (ap->state == ANEG_STATE_UNKNOWN) {
4524 ap->ability_match_cfg = 0;
4525 ap->ability_match_count = 0;
4526 ap->ability_match = 0;
4532 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4533 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4535 if (rx_cfg_reg != ap->ability_match_cfg) {
4536 ap->ability_match_cfg = rx_cfg_reg;
4537 ap->ability_match = 0;
4538 ap->ability_match_count = 0;
4540 if (++ap->ability_match_count > 1) {
4541 ap->ability_match = 1;
4542 ap->ability_match_cfg = rx_cfg_reg;
4545 if (rx_cfg_reg & ANEG_CFG_ACK)
4553 ap->ability_match_cfg = 0;
4554 ap->ability_match_count = 0;
4555 ap->ability_match = 0;
4561 ap->rxconfig = rx_cfg_reg;
4564 switch (ap->state) {
4565 case ANEG_STATE_UNKNOWN:
4566 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4567 ap->state = ANEG_STATE_AN_ENABLE;
4570 case ANEG_STATE_AN_ENABLE:
4571 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4572 if (ap->flags & MR_AN_ENABLE) {
4575 ap->ability_match_cfg = 0;
4576 ap->ability_match_count = 0;
4577 ap->ability_match = 0;
4581 ap->state = ANEG_STATE_RESTART_INIT;
4583 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4587 case ANEG_STATE_RESTART_INIT:
4588 ap->link_time = ap->cur_time;
4589 ap->flags &= ~(MR_NP_LOADED);
4591 tw32(MAC_TX_AUTO_NEG, 0);
4592 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4593 tw32_f(MAC_MODE, tp->mac_mode);
4596 ret = ANEG_TIMER_ENAB;
4597 ap->state = ANEG_STATE_RESTART;
4600 case ANEG_STATE_RESTART:
4601 delta = ap->cur_time - ap->link_time;
4602 if (delta > ANEG_STATE_SETTLE_TIME)
4603 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4605 ret = ANEG_TIMER_ENAB;
4608 case ANEG_STATE_DISABLE_LINK_OK:
4612 case ANEG_STATE_ABILITY_DETECT_INIT:
4613 ap->flags &= ~(MR_TOGGLE_TX);
4614 ap->txconfig = ANEG_CFG_FD;
4615 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4616 if (flowctrl & ADVERTISE_1000XPAUSE)
4617 ap->txconfig |= ANEG_CFG_PS1;
4618 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4619 ap->txconfig |= ANEG_CFG_PS2;
4620 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4621 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4622 tw32_f(MAC_MODE, tp->mac_mode);
4625 ap->state = ANEG_STATE_ABILITY_DETECT;
4628 case ANEG_STATE_ABILITY_DETECT:
4629 if (ap->ability_match != 0 && ap->rxconfig != 0)
4630 ap->state = ANEG_STATE_ACK_DETECT_INIT;
4633 case ANEG_STATE_ACK_DETECT_INIT:
4634 ap->txconfig |= ANEG_CFG_ACK;
4635 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4636 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4637 tw32_f(MAC_MODE, tp->mac_mode);
4640 ap->state = ANEG_STATE_ACK_DETECT;
4643 case ANEG_STATE_ACK_DETECT:
4644 if (ap->ack_match != 0) {
4645 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4646 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4647 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4649 ap->state = ANEG_STATE_AN_ENABLE;
4651 } else if (ap->ability_match != 0 &&
4652 ap->rxconfig == 0) {
4653 ap->state = ANEG_STATE_AN_ENABLE;
4657 case ANEG_STATE_COMPLETE_ACK_INIT:
4658 if (ap->rxconfig & ANEG_CFG_INVAL) {
4662 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4663 MR_LP_ADV_HALF_DUPLEX |
4664 MR_LP_ADV_SYM_PAUSE |
4665 MR_LP_ADV_ASYM_PAUSE |
4666 MR_LP_ADV_REMOTE_FAULT1 |
4667 MR_LP_ADV_REMOTE_FAULT2 |
4668 MR_LP_ADV_NEXT_PAGE |
4671 if (ap->rxconfig & ANEG_CFG_FD)
4672 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4673 if (ap->rxconfig & ANEG_CFG_HD)
4674 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4675 if (ap->rxconfig & ANEG_CFG_PS1)
4676 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4677 if (ap->rxconfig & ANEG_CFG_PS2)
4678 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4679 if (ap->rxconfig & ANEG_CFG_RF1)
4680 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4681 if (ap->rxconfig & ANEG_CFG_RF2)
4682 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4683 if (ap->rxconfig & ANEG_CFG_NP)
4684 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4686 ap->link_time = ap->cur_time;
4688 ap->flags ^= (MR_TOGGLE_TX);
4689 if (ap->rxconfig & 0x0008)
4690 ap->flags |= MR_TOGGLE_RX;
4691 if (ap->rxconfig & ANEG_CFG_NP)
4692 ap->flags |= MR_NP_RX;
4693 ap->flags |= MR_PAGE_RX;
4695 ap->state = ANEG_STATE_COMPLETE_ACK;
4696 ret = ANEG_TIMER_ENAB;
4699 case ANEG_STATE_COMPLETE_ACK:
4700 if (ap->ability_match != 0 &&
4701 ap->rxconfig == 0) {
4702 ap->state = ANEG_STATE_AN_ENABLE;
4705 delta = ap->cur_time - ap->link_time;
4706 if (delta > ANEG_STATE_SETTLE_TIME) {
4707 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4708 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4710 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4711 !(ap->flags & MR_NP_RX)) {
4712 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4720 case ANEG_STATE_IDLE_DETECT_INIT:
4721 ap->link_time = ap->cur_time;
4722 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4723 tw32_f(MAC_MODE, tp->mac_mode);
4726 ap->state = ANEG_STATE_IDLE_DETECT;
4727 ret = ANEG_TIMER_ENAB;
4730 case ANEG_STATE_IDLE_DETECT:
4731 if (ap->ability_match != 0 &&
4732 ap->rxconfig == 0) {
4733 ap->state = ANEG_STATE_AN_ENABLE;
4736 delta = ap->cur_time - ap->link_time;
4737 if (delta > ANEG_STATE_SETTLE_TIME) {
4738 /* XXX another gem from the Broadcom driver :( */
4739 ap->state = ANEG_STATE_LINK_OK;
4743 case ANEG_STATE_LINK_OK:
4744 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4748 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4749 /* ??? unimplemented */
4752 case ANEG_STATE_NEXT_PAGE_WAIT:
4753 /* ??? unimplemented */
4764 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4767 struct tg3_fiber_aneginfo aninfo;
4768 int status = ANEG_FAILED;
4772 tw32_f(MAC_TX_AUTO_NEG, 0);
4774 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4775 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4778 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4781 memset(&aninfo, 0, sizeof(aninfo));
4782 aninfo.flags |= MR_AN_ENABLE;
4783 aninfo.state = ANEG_STATE_UNKNOWN;
4784 aninfo.cur_time = 0;
4786 while (++tick < 195000) {
4787 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4788 if (status == ANEG_DONE || status == ANEG_FAILED)
4794 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4795 tw32_f(MAC_MODE, tp->mac_mode);
4798 *txflags = aninfo.txconfig;
4799 *rxflags = aninfo.flags;
4801 if (status == ANEG_DONE &&
4802 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4803 MR_LP_ADV_FULL_DUPLEX)))
4809 static void tg3_init_bcm8002(struct tg3 *tp)
4811 u32 mac_status = tr32(MAC_STATUS);
4814 /* Reset when initting first time or we have a link. */
4815 if (tg3_flag(tp, INIT_COMPLETE) &&
4816 !(mac_status & MAC_STATUS_PCS_SYNCED))
4819 /* Set PLL lock range. */
4820 tg3_writephy(tp, 0x16, 0x8007);
4823 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4825 /* Wait for reset to complete. */
4826 /* XXX schedule_timeout() ... */
4827 for (i = 0; i < 500; i++)
4830 /* Config mode; select PMA/Ch 1 regs. */
4831 tg3_writephy(tp, 0x10, 0x8411);
4833 /* Enable auto-lock and comdet, select txclk for tx. */
4834 tg3_writephy(tp, 0x11, 0x0a10);
4836 tg3_writephy(tp, 0x18, 0x00a0);
4837 tg3_writephy(tp, 0x16, 0x41ff);
4839 /* Assert and deassert POR. */
4840 tg3_writephy(tp, 0x13, 0x0400);
4842 tg3_writephy(tp, 0x13, 0x0000);
4844 tg3_writephy(tp, 0x11, 0x0a50);
4846 tg3_writephy(tp, 0x11, 0x0a10);
4848 /* Wait for signal to stabilize */
4849 /* XXX schedule_timeout() ... */
4850 for (i = 0; i < 15000; i++)
4853 /* Deselect the channel register so we can read the PHYID
4856 tg3_writephy(tp, 0x10, 0x8011);
4859 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4862 u32 sg_dig_ctrl, sg_dig_status;
4863 u32 serdes_cfg, expected_sg_dig_ctrl;
4864 int workaround, port_a;
4865 int current_link_up;
4868 expected_sg_dig_ctrl = 0;
4871 current_link_up = 0;
4873 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4874 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4876 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4879 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4880 /* preserve bits 20-23 for voltage regulator */
4881 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4884 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4886 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4887 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4889 u32 val = serdes_cfg;
4895 tw32_f(MAC_SERDES_CFG, val);
4898 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4900 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4901 tg3_setup_flow_control(tp, 0, 0);
4902 current_link_up = 1;
4907 /* Want auto-negotiation. */
4908 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4910 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4911 if (flowctrl & ADVERTISE_1000XPAUSE)
4912 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4913 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4914 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4916 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4917 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4918 tp->serdes_counter &&
4919 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4920 MAC_STATUS_RCVD_CFG)) ==
4921 MAC_STATUS_PCS_SYNCED)) {
4922 tp->serdes_counter--;
4923 current_link_up = 1;
4928 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4929 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4931 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4933 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4934 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4935 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4936 MAC_STATUS_SIGNAL_DET)) {
4937 sg_dig_status = tr32(SG_DIG_STATUS);
4938 mac_status = tr32(MAC_STATUS);
4940 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4941 (mac_status & MAC_STATUS_PCS_SYNCED)) {
4942 u32 local_adv = 0, remote_adv = 0;
4944 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4945 local_adv |= ADVERTISE_1000XPAUSE;
4946 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4947 local_adv |= ADVERTISE_1000XPSE_ASYM;
4949 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4950 remote_adv |= LPA_1000XPAUSE;
4951 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4952 remote_adv |= LPA_1000XPAUSE_ASYM;
4954 tp->link_config.rmt_adv =
4955 mii_adv_to_ethtool_adv_x(remote_adv);
4957 tg3_setup_flow_control(tp, local_adv, remote_adv);
4958 current_link_up = 1;
4959 tp->serdes_counter = 0;
4960 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4961 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4962 if (tp->serdes_counter)
4963 tp->serdes_counter--;
4966 u32 val = serdes_cfg;
4973 tw32_f(MAC_SERDES_CFG, val);
4976 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4979 /* Link parallel detection - link is up */
4980 /* only if we have PCS_SYNC and not */
4981 /* receiving config code words */
4982 mac_status = tr32(MAC_STATUS);
4983 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4984 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4985 tg3_setup_flow_control(tp, 0, 0);
4986 current_link_up = 1;
4988 TG3_PHYFLG_PARALLEL_DETECT;
4989 tp->serdes_counter =
4990 SERDES_PARALLEL_DET_TIMEOUT;
4992 goto restart_autoneg;
4996 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4997 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5001 return current_link_up;
5004 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5006 int current_link_up = 0;
5008 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5011 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5012 u32 txflags, rxflags;
5015 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5016 u32 local_adv = 0, remote_adv = 0;
5018 if (txflags & ANEG_CFG_PS1)
5019 local_adv |= ADVERTISE_1000XPAUSE;
5020 if (txflags & ANEG_CFG_PS2)
5021 local_adv |= ADVERTISE_1000XPSE_ASYM;
5023 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5024 remote_adv |= LPA_1000XPAUSE;
5025 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5026 remote_adv |= LPA_1000XPAUSE_ASYM;
5028 tp->link_config.rmt_adv =
5029 mii_adv_to_ethtool_adv_x(remote_adv);
5031 tg3_setup_flow_control(tp, local_adv, remote_adv);
5033 current_link_up = 1;
5035 for (i = 0; i < 30; i++) {
5038 (MAC_STATUS_SYNC_CHANGED |
5039 MAC_STATUS_CFG_CHANGED));
5041 if ((tr32(MAC_STATUS) &
5042 (MAC_STATUS_SYNC_CHANGED |
5043 MAC_STATUS_CFG_CHANGED)) == 0)
5047 mac_status = tr32(MAC_STATUS);
5048 if (current_link_up == 0 &&
5049 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5050 !(mac_status & MAC_STATUS_RCVD_CFG))
5051 current_link_up = 1;
5053 tg3_setup_flow_control(tp, 0, 0);
5055 /* Forcing 1000FD link up. */
5056 current_link_up = 1;
5058 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5061 tw32_f(MAC_MODE, tp->mac_mode);
5066 return current_link_up;
5069 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5072 u16 orig_active_speed;
5073 u8 orig_active_duplex;
5075 int current_link_up;
5078 orig_pause_cfg = tp->link_config.active_flowctrl;
5079 orig_active_speed = tp->link_config.active_speed;
5080 orig_active_duplex = tp->link_config.active_duplex;
5082 if (!tg3_flag(tp, HW_AUTONEG) &&
5083 netif_carrier_ok(tp->dev) &&
5084 tg3_flag(tp, INIT_COMPLETE)) {
5085 mac_status = tr32(MAC_STATUS);
5086 mac_status &= (MAC_STATUS_PCS_SYNCED |
5087 MAC_STATUS_SIGNAL_DET |
5088 MAC_STATUS_CFG_CHANGED |
5089 MAC_STATUS_RCVD_CFG);
5090 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5091 MAC_STATUS_SIGNAL_DET)) {
5092 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5093 MAC_STATUS_CFG_CHANGED));
5098 tw32_f(MAC_TX_AUTO_NEG, 0);
5100 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5101 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5102 tw32_f(MAC_MODE, tp->mac_mode);
5105 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5106 tg3_init_bcm8002(tp);
5108 /* Enable link change event even when serdes polling. */
5109 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5112 current_link_up = 0;
5113 tp->link_config.rmt_adv = 0;
5114 mac_status = tr32(MAC_STATUS);
5116 if (tg3_flag(tp, HW_AUTONEG))
5117 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5119 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5121 tp->napi[0].hw_status->status =
5122 (SD_STATUS_UPDATED |
5123 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5125 for (i = 0; i < 100; i++) {
5126 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5127 MAC_STATUS_CFG_CHANGED));
5129 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5130 MAC_STATUS_CFG_CHANGED |
5131 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5135 mac_status = tr32(MAC_STATUS);
5136 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5137 current_link_up = 0;
5138 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5139 tp->serdes_counter == 0) {
5140 tw32_f(MAC_MODE, (tp->mac_mode |
5141 MAC_MODE_SEND_CONFIGS));
5143 tw32_f(MAC_MODE, tp->mac_mode);
5147 if (current_link_up == 1) {
5148 tp->link_config.active_speed = SPEED_1000;
5149 tp->link_config.active_duplex = DUPLEX_FULL;
5150 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5151 LED_CTRL_LNKLED_OVERRIDE |
5152 LED_CTRL_1000MBPS_ON));
5154 tp->link_config.active_speed = SPEED_UNKNOWN;
5155 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5156 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5157 LED_CTRL_LNKLED_OVERRIDE |
5158 LED_CTRL_TRAFFIC_OVERRIDE));
5161 if (current_link_up != netif_carrier_ok(tp->dev)) {
5162 if (current_link_up)
5163 netif_carrier_on(tp->dev);
5165 netif_carrier_off(tp->dev);
5166 tg3_link_report(tp);
5168 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5169 if (orig_pause_cfg != now_pause_cfg ||
5170 orig_active_speed != tp->link_config.active_speed ||
5171 orig_active_duplex != tp->link_config.active_duplex)
5172 tg3_link_report(tp);
5178 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5180 int current_link_up, err = 0;
5184 u32 local_adv, remote_adv;
5186 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5187 tw32_f(MAC_MODE, tp->mac_mode);
5193 (MAC_STATUS_SYNC_CHANGED |
5194 MAC_STATUS_CFG_CHANGED |
5195 MAC_STATUS_MI_COMPLETION |
5196 MAC_STATUS_LNKSTATE_CHANGED));
5202 current_link_up = 0;
5203 current_speed = SPEED_UNKNOWN;
5204 current_duplex = DUPLEX_UNKNOWN;
5205 tp->link_config.rmt_adv = 0;
5207 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5208 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5209 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5210 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5211 bmsr |= BMSR_LSTATUS;
5213 bmsr &= ~BMSR_LSTATUS;
5216 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5218 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5219 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5220 /* do nothing, just check for link up at the end */
5221 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5224 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5225 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5226 ADVERTISE_1000XPAUSE |
5227 ADVERTISE_1000XPSE_ASYM |
5230 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5231 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5233 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5234 tg3_writephy(tp, MII_ADVERTISE, newadv);
5235 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5236 tg3_writephy(tp, MII_BMCR, bmcr);
5238 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5239 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5240 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5247 bmcr &= ~BMCR_SPEED1000;
5248 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5250 if (tp->link_config.duplex == DUPLEX_FULL)
5251 new_bmcr |= BMCR_FULLDPLX;
5253 if (new_bmcr != bmcr) {
5254 /* BMCR_SPEED1000 is a reserved bit that needs
5255 * to be set on write.
5257 new_bmcr |= BMCR_SPEED1000;
5259 /* Force a linkdown */
5260 if (netif_carrier_ok(tp->dev)) {
5263 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5264 adv &= ~(ADVERTISE_1000XFULL |
5265 ADVERTISE_1000XHALF |
5267 tg3_writephy(tp, MII_ADVERTISE, adv);
5268 tg3_writephy(tp, MII_BMCR, bmcr |
5272 netif_carrier_off(tp->dev);
5274 tg3_writephy(tp, MII_BMCR, new_bmcr);
5276 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5277 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5278 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5280 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5281 bmsr |= BMSR_LSTATUS;
5283 bmsr &= ~BMSR_LSTATUS;
5285 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5289 if (bmsr & BMSR_LSTATUS) {
5290 current_speed = SPEED_1000;
5291 current_link_up = 1;
5292 if (bmcr & BMCR_FULLDPLX)
5293 current_duplex = DUPLEX_FULL;
5295 current_duplex = DUPLEX_HALF;
5300 if (bmcr & BMCR_ANENABLE) {
5303 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5304 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5305 common = local_adv & remote_adv;
5306 if (common & (ADVERTISE_1000XHALF |
5307 ADVERTISE_1000XFULL)) {
5308 if (common & ADVERTISE_1000XFULL)
5309 current_duplex = DUPLEX_FULL;
5311 current_duplex = DUPLEX_HALF;
5313 tp->link_config.rmt_adv =
5314 mii_adv_to_ethtool_adv_x(remote_adv);
5315 } else if (!tg3_flag(tp, 5780_CLASS)) {
5316 /* Link is up via parallel detect */
5318 current_link_up = 0;
5323 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5324 tg3_setup_flow_control(tp, local_adv, remote_adv);
5326 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5327 if (tp->link_config.active_duplex == DUPLEX_HALF)
5328 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5330 tw32_f(MAC_MODE, tp->mac_mode);
5333 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5335 tp->link_config.active_speed = current_speed;
5336 tp->link_config.active_duplex = current_duplex;
5338 if (current_link_up != netif_carrier_ok(tp->dev)) {
5339 if (current_link_up)
5340 netif_carrier_on(tp->dev);
5342 netif_carrier_off(tp->dev);
5343 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5345 tg3_link_report(tp);
5350 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5352 if (tp->serdes_counter) {
5353 /* Give autoneg time to complete. */
5354 tp->serdes_counter--;
5358 if (!netif_carrier_ok(tp->dev) &&
5359 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5362 tg3_readphy(tp, MII_BMCR, &bmcr);
5363 if (bmcr & BMCR_ANENABLE) {
5366 /* Select shadow register 0x1f */
5367 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5368 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5370 /* Select expansion interrupt status register */
5371 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5372 MII_TG3_DSP_EXP1_INT_STAT);
5373 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5374 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5376 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5377 /* We have signal detect and not receiving
5378 * config code words, link is up by parallel
5382 bmcr &= ~BMCR_ANENABLE;
5383 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5384 tg3_writephy(tp, MII_BMCR, bmcr);
5385 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5388 } else if (netif_carrier_ok(tp->dev) &&
5389 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5390 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5393 /* Select expansion interrupt status register */
5394 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5395 MII_TG3_DSP_EXP1_INT_STAT);
5396 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5400 /* Config code words received, turn on autoneg. */
5401 tg3_readphy(tp, MII_BMCR, &bmcr);
5402 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5404 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5410 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5415 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5416 err = tg3_setup_fiber_phy(tp, force_reset);
5417 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5418 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5420 err = tg3_setup_copper_phy(tp, force_reset);
5422 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5425 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5426 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5428 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5433 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5434 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5435 tw32(GRC_MISC_CFG, val);
5438 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5439 (6 << TX_LENGTHS_IPG_SHIFT);
5440 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5441 val |= tr32(MAC_TX_LENGTHS) &
5442 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5443 TX_LENGTHS_CNT_DWN_VAL_MSK);
5445 if (tp->link_config.active_speed == SPEED_1000 &&
5446 tp->link_config.active_duplex == DUPLEX_HALF)
5447 tw32(MAC_TX_LENGTHS, val |
5448 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5450 tw32(MAC_TX_LENGTHS, val |
5451 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5453 if (!tg3_flag(tp, 5705_PLUS)) {
5454 if (netif_carrier_ok(tp->dev)) {
5455 tw32(HOSTCC_STAT_COAL_TICKS,
5456 tp->coal.stats_block_coalesce_usecs);
5458 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5462 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5463 val = tr32(PCIE_PWR_MGMT_THRESH);
5464 if (!netif_carrier_ok(tp->dev))
5465 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5468 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5469 tw32(PCIE_PWR_MGMT_THRESH, val);
5475 static inline int tg3_irq_sync(struct tg3 *tp)
5477 return tp->irq_sync;
5480 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5484 dst = (u32 *)((u8 *)dst + off);
5485 for (i = 0; i < len; i += sizeof(u32))
5486 *dst++ = tr32(off + i);
5489 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5491 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5492 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5493 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5494 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5495 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5496 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5497 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5498 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5499 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5500 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5501 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5502 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5503 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5504 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5505 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5506 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5507 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5508 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5509 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5511 if (tg3_flag(tp, SUPPORT_MSIX))
5512 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5514 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5515 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5516 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5517 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5518 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5519 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5520 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5521 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5523 if (!tg3_flag(tp, 5705_PLUS)) {
5524 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5525 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5526 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5529 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5530 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5531 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5532 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5533 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5535 if (tg3_flag(tp, NVRAM))
5536 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5539 static void tg3_dump_state(struct tg3 *tp)
5544 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5546 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5550 if (tg3_flag(tp, PCI_EXPRESS)) {
5551 /* Read up to but not including private PCI registers */
5552 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5553 regs[i / sizeof(u32)] = tr32(i);
5555 tg3_dump_legacy_regs(tp, regs);
5557 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5558 if (!regs[i + 0] && !regs[i + 1] &&
5559 !regs[i + 2] && !regs[i + 3])
5562 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5564 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5569 for (i = 0; i < tp->irq_cnt; i++) {
5570 struct tg3_napi *tnapi = &tp->napi[i];
5572 /* SW status block */
5574 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5576 tnapi->hw_status->status,
5577 tnapi->hw_status->status_tag,
5578 tnapi->hw_status->rx_jumbo_consumer,
5579 tnapi->hw_status->rx_consumer,
5580 tnapi->hw_status->rx_mini_consumer,
5581 tnapi->hw_status->idx[0].rx_producer,
5582 tnapi->hw_status->idx[0].tx_consumer);
5585 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5587 tnapi->last_tag, tnapi->last_irq_tag,
5588 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5590 tnapi->prodring.rx_std_prod_idx,
5591 tnapi->prodring.rx_std_cons_idx,
5592 tnapi->prodring.rx_jmb_prod_idx,
5593 tnapi->prodring.rx_jmb_cons_idx);
5597 /* This is called whenever we suspect that the system chipset is re-
5598 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5599 * is bogus tx completions. We try to recover by setting the
5600 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5603 static void tg3_tx_recover(struct tg3 *tp)
5605 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5606 tp->write32_tx_mbox == tg3_write_indirect_mbox);
5608 netdev_warn(tp->dev,
5609 "The system may be re-ordering memory-mapped I/O "
5610 "cycles to the network device, attempting to recover. "
5611 "Please report the problem to the driver maintainer "
5612 "and include system chipset information.\n");
5614 spin_lock(&tp->lock);
5615 tg3_flag_set(tp, TX_RECOVERY_PENDING);
5616 spin_unlock(&tp->lock);
5619 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5621 /* Tell compiler to fetch tx indices from memory. */
5623 return tnapi->tx_pending -
5624 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5627 /* Tigon3 never reports partial packet sends. So we do not
5628 * need special logic to handle SKBs that have not had all
5629 * of their frags sent yet, like SunGEM does.
5631 static void tg3_tx(struct tg3_napi *tnapi)
5633 struct tg3 *tp = tnapi->tp;
5634 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5635 u32 sw_idx = tnapi->tx_cons;
5636 struct netdev_queue *txq;
5637 int index = tnapi - tp->napi;
5638 unsigned int pkts_compl = 0, bytes_compl = 0;
5640 if (tg3_flag(tp, ENABLE_TSS))
5643 txq = netdev_get_tx_queue(tp->dev, index);
5645 while (sw_idx != hw_idx) {
5646 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5647 struct sk_buff *skb = ri->skb;
5650 if (unlikely(skb == NULL)) {
5655 pci_unmap_single(tp->pdev,
5656 dma_unmap_addr(ri, mapping),
5662 while (ri->fragmented) {
5663 ri->fragmented = false;
5664 sw_idx = NEXT_TX(sw_idx);
5665 ri = &tnapi->tx_buffers[sw_idx];
5668 sw_idx = NEXT_TX(sw_idx);
5670 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5671 ri = &tnapi->tx_buffers[sw_idx];
5672 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5675 pci_unmap_page(tp->pdev,
5676 dma_unmap_addr(ri, mapping),
5677 skb_frag_size(&skb_shinfo(skb)->frags[i]),
5680 while (ri->fragmented) {
5681 ri->fragmented = false;
5682 sw_idx = NEXT_TX(sw_idx);
5683 ri = &tnapi->tx_buffers[sw_idx];
5686 sw_idx = NEXT_TX(sw_idx);
5690 bytes_compl += skb->len;
5694 if (unlikely(tx_bug)) {
5700 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5702 tnapi->tx_cons = sw_idx;
5704 /* Need to make the tx_cons update visible to tg3_start_xmit()
5705 * before checking for netif_queue_stopped(). Without the
5706 * memory barrier, there is a small possibility that tg3_start_xmit()
5707 * will miss it and cause the queue to be stopped forever.
5711 if (unlikely(netif_tx_queue_stopped(txq) &&
5712 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5713 __netif_tx_lock(txq, smp_processor_id());
5714 if (netif_tx_queue_stopped(txq) &&
5715 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5716 netif_tx_wake_queue(txq);
5717 __netif_tx_unlock(txq);
5721 static void tg3_frag_free(bool is_frag, void *data)
5724 put_page(virt_to_head_page(data));
5729 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5731 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5732 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5737 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5738 map_sz, PCI_DMA_FROMDEVICE);
5739 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
5744 /* Returns size of skb allocated or < 0 on error.
5746 * We only need to fill in the address because the other members
5747 * of the RX descriptor are invariant, see tg3_init_rings.
5749 * Note the purposeful assymetry of cpu vs. chip accesses. For
5750 * posting buffers we only dirty the first cache line of the RX
5751 * descriptor (containing the address). Whereas for the RX status
5752 * buffers the cpu only reads the last cacheline of the RX descriptor
5753 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5755 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5756 u32 opaque_key, u32 dest_idx_unmasked,
5757 unsigned int *frag_size)
5759 struct tg3_rx_buffer_desc *desc;
5760 struct ring_info *map;
5763 int skb_size, data_size, dest_idx;
5765 switch (opaque_key) {
5766 case RXD_OPAQUE_RING_STD:
5767 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5768 desc = &tpr->rx_std[dest_idx];
5769 map = &tpr->rx_std_buffers[dest_idx];
5770 data_size = tp->rx_pkt_map_sz;
5773 case RXD_OPAQUE_RING_JUMBO:
5774 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5775 desc = &tpr->rx_jmb[dest_idx].std;
5776 map = &tpr->rx_jmb_buffers[dest_idx];
5777 data_size = TG3_RX_JMB_MAP_SZ;
5784 /* Do not overwrite any of the map or rp information
5785 * until we are sure we can commit to a new buffer.
5787 * Callers depend upon this behavior and assume that
5788 * we leave everything unchanged if we fail.
5790 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5791 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5792 if (skb_size <= PAGE_SIZE) {
5793 data = netdev_alloc_frag(skb_size);
5794 *frag_size = skb_size;
5796 data = kmalloc(skb_size, GFP_ATOMIC);
5802 mapping = pci_map_single(tp->pdev,
5803 data + TG3_RX_OFFSET(tp),
5805 PCI_DMA_FROMDEVICE);
5806 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
5807 tg3_frag_free(skb_size <= PAGE_SIZE, data);
5812 dma_unmap_addr_set(map, mapping, mapping);
5814 desc->addr_hi = ((u64)mapping >> 32);
5815 desc->addr_lo = ((u64)mapping & 0xffffffff);
5820 /* We only need to move over in the address because the other
5821 * members of the RX descriptor are invariant. See notes above
5822 * tg3_alloc_rx_data for full details.
5824 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5825 struct tg3_rx_prodring_set *dpr,
5826 u32 opaque_key, int src_idx,
5827 u32 dest_idx_unmasked)
5829 struct tg3 *tp = tnapi->tp;
5830 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5831 struct ring_info *src_map, *dest_map;
5832 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5835 switch (opaque_key) {
5836 case RXD_OPAQUE_RING_STD:
5837 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5838 dest_desc = &dpr->rx_std[dest_idx];
5839 dest_map = &dpr->rx_std_buffers[dest_idx];
5840 src_desc = &spr->rx_std[src_idx];
5841 src_map = &spr->rx_std_buffers[src_idx];
5844 case RXD_OPAQUE_RING_JUMBO:
5845 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5846 dest_desc = &dpr->rx_jmb[dest_idx].std;
5847 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5848 src_desc = &spr->rx_jmb[src_idx].std;
5849 src_map = &spr->rx_jmb_buffers[src_idx];
5856 dest_map->data = src_map->data;
5857 dma_unmap_addr_set(dest_map, mapping,
5858 dma_unmap_addr(src_map, mapping));
5859 dest_desc->addr_hi = src_desc->addr_hi;
5860 dest_desc->addr_lo = src_desc->addr_lo;
5862 /* Ensure that the update to the skb happens after the physical
5863 * addresses have been transferred to the new BD location.
5867 src_map->data = NULL;
5870 /* The RX ring scheme is composed of multiple rings which post fresh
5871 * buffers to the chip, and one special ring the chip uses to report
5872 * status back to the host.
5874 * The special ring reports the status of received packets to the
5875 * host. The chip does not write into the original descriptor the
5876 * RX buffer was obtained from. The chip simply takes the original
5877 * descriptor as provided by the host, updates the status and length
5878 * field, then writes this into the next status ring entry.
5880 * Each ring the host uses to post buffers to the chip is described
5881 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5882 * it is first placed into the on-chip ram. When the packet's length
5883 * is known, it walks down the TG3_BDINFO entries to select the ring.
5884 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5885 * which is within the range of the new packet's length is chosen.
5887 * The "separate ring for rx status" scheme may sound queer, but it makes
5888 * sense from a cache coherency perspective. If only the host writes
5889 * to the buffer post rings, and only the chip writes to the rx status
5890 * rings, then cache lines never move beyond shared-modified state.
5891 * If both the host and chip were to write into the same ring, cache line
5892 * eviction could occur since both entities want it in an exclusive state.
5894 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5896 struct tg3 *tp = tnapi->tp;
5897 u32 work_mask, rx_std_posted = 0;
5898 u32 std_prod_idx, jmb_prod_idx;
5899 u32 sw_idx = tnapi->rx_rcb_ptr;
5902 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5904 hw_idx = *(tnapi->rx_rcb_prod_idx);
5906 * We need to order the read of hw_idx and the read of
5907 * the opaque cookie.
5912 std_prod_idx = tpr->rx_std_prod_idx;
5913 jmb_prod_idx = tpr->rx_jmb_prod_idx;
5914 while (sw_idx != hw_idx && budget > 0) {
5915 struct ring_info *ri;
5916 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5918 struct sk_buff *skb;
5919 dma_addr_t dma_addr;
5920 u32 opaque_key, desc_idx, *post_ptr;
5923 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5924 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5925 if (opaque_key == RXD_OPAQUE_RING_STD) {
5926 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5927 dma_addr = dma_unmap_addr(ri, mapping);
5929 post_ptr = &std_prod_idx;
5931 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5932 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5933 dma_addr = dma_unmap_addr(ri, mapping);
5935 post_ptr = &jmb_prod_idx;
5937 goto next_pkt_nopost;
5939 work_mask |= opaque_key;
5941 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5942 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5944 tg3_recycle_rx(tnapi, tpr, opaque_key,
5945 desc_idx, *post_ptr);
5947 /* Other statistics kept track of by card. */
5952 prefetch(data + TG3_RX_OFFSET(tp));
5953 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5956 if (len > TG3_RX_COPY_THRESH(tp)) {
5958 unsigned int frag_size;
5960 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5961 *post_ptr, &frag_size);
5965 pci_unmap_single(tp->pdev, dma_addr, skb_size,
5966 PCI_DMA_FROMDEVICE);
5968 skb = build_skb(data, frag_size);
5970 tg3_frag_free(frag_size != 0, data);
5971 goto drop_it_no_recycle;
5973 skb_reserve(skb, TG3_RX_OFFSET(tp));
5974 /* Ensure that the update to the data happens
5975 * after the usage of the old DMA mapping.
5982 tg3_recycle_rx(tnapi, tpr, opaque_key,
5983 desc_idx, *post_ptr);
5985 skb = netdev_alloc_skb(tp->dev,
5986 len + TG3_RAW_IP_ALIGN);
5988 goto drop_it_no_recycle;
5990 skb_reserve(skb, TG3_RAW_IP_ALIGN);
5991 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5993 data + TG3_RX_OFFSET(tp),
5995 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5999 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6000 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6001 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6002 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6003 skb->ip_summed = CHECKSUM_UNNECESSARY;
6005 skb_checksum_none_assert(skb);
6007 skb->protocol = eth_type_trans(skb, tp->dev);
6009 if (len > (tp->dev->mtu + ETH_HLEN) &&
6010 skb->protocol != htons(ETH_P_8021Q)) {
6012 goto drop_it_no_recycle;
6015 if (desc->type_flags & RXD_FLAG_VLAN &&
6016 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6017 __vlan_hwaccel_put_tag(skb,
6018 desc->err_vlan & RXD_VLAN_MASK);
6020 napi_gro_receive(&tnapi->napi, skb);
6028 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6029 tpr->rx_std_prod_idx = std_prod_idx &
6030 tp->rx_std_ring_mask;
6031 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6032 tpr->rx_std_prod_idx);
6033 work_mask &= ~RXD_OPAQUE_RING_STD;
6038 sw_idx &= tp->rx_ret_ring_mask;
6040 /* Refresh hw_idx to see if there is new work */
6041 if (sw_idx == hw_idx) {
6042 hw_idx = *(tnapi->rx_rcb_prod_idx);
6047 /* ACK the status ring. */
6048 tnapi->rx_rcb_ptr = sw_idx;
6049 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6051 /* Refill RX ring(s). */
6052 if (!tg3_flag(tp, ENABLE_RSS)) {
6053 /* Sync BD data before updating mailbox */
6056 if (work_mask & RXD_OPAQUE_RING_STD) {
6057 tpr->rx_std_prod_idx = std_prod_idx &
6058 tp->rx_std_ring_mask;
6059 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6060 tpr->rx_std_prod_idx);
6062 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6063 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6064 tp->rx_jmb_ring_mask;
6065 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6066 tpr->rx_jmb_prod_idx);
6069 } else if (work_mask) {
6070 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6071 * updated before the producer indices can be updated.
6075 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6076 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6078 if (tnapi != &tp->napi[1]) {
6079 tp->rx_refill = true;
6080 napi_schedule(&tp->napi[1].napi);
6087 static void tg3_poll_link(struct tg3 *tp)
6089 /* handle link change and other phy events */
6090 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6091 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6093 if (sblk->status & SD_STATUS_LINK_CHG) {
6094 sblk->status = SD_STATUS_UPDATED |
6095 (sblk->status & ~SD_STATUS_LINK_CHG);
6096 spin_lock(&tp->lock);
6097 if (tg3_flag(tp, USE_PHYLIB)) {
6099 (MAC_STATUS_SYNC_CHANGED |
6100 MAC_STATUS_CFG_CHANGED |
6101 MAC_STATUS_MI_COMPLETION |
6102 MAC_STATUS_LNKSTATE_CHANGED));
6105 tg3_setup_phy(tp, 0);
6106 spin_unlock(&tp->lock);
6111 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6112 struct tg3_rx_prodring_set *dpr,
6113 struct tg3_rx_prodring_set *spr)
6115 u32 si, di, cpycnt, src_prod_idx;
6119 src_prod_idx = spr->rx_std_prod_idx;
6121 /* Make sure updates to the rx_std_buffers[] entries and the
6122 * standard producer index are seen in the correct order.
6126 if (spr->rx_std_cons_idx == src_prod_idx)
6129 if (spr->rx_std_cons_idx < src_prod_idx)
6130 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6132 cpycnt = tp->rx_std_ring_mask + 1 -
6133 spr->rx_std_cons_idx;
6135 cpycnt = min(cpycnt,
6136 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6138 si = spr->rx_std_cons_idx;
6139 di = dpr->rx_std_prod_idx;
6141 for (i = di; i < di + cpycnt; i++) {
6142 if (dpr->rx_std_buffers[i].data) {
6152 /* Ensure that updates to the rx_std_buffers ring and the
6153 * shadowed hardware producer ring from tg3_recycle_skb() are
6154 * ordered correctly WRT the skb check above.
6158 memcpy(&dpr->rx_std_buffers[di],
6159 &spr->rx_std_buffers[si],
6160 cpycnt * sizeof(struct ring_info));
6162 for (i = 0; i < cpycnt; i++, di++, si++) {
6163 struct tg3_rx_buffer_desc *sbd, *dbd;
6164 sbd = &spr->rx_std[si];
6165 dbd = &dpr->rx_std[di];
6166 dbd->addr_hi = sbd->addr_hi;
6167 dbd->addr_lo = sbd->addr_lo;
6170 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6171 tp->rx_std_ring_mask;
6172 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6173 tp->rx_std_ring_mask;
6177 src_prod_idx = spr->rx_jmb_prod_idx;
6179 /* Make sure updates to the rx_jmb_buffers[] entries and
6180 * the jumbo producer index are seen in the correct order.
6184 if (spr->rx_jmb_cons_idx == src_prod_idx)
6187 if (spr->rx_jmb_cons_idx < src_prod_idx)
6188 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6190 cpycnt = tp->rx_jmb_ring_mask + 1 -
6191 spr->rx_jmb_cons_idx;
6193 cpycnt = min(cpycnt,
6194 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6196 si = spr->rx_jmb_cons_idx;
6197 di = dpr->rx_jmb_prod_idx;
6199 for (i = di; i < di + cpycnt; i++) {
6200 if (dpr->rx_jmb_buffers[i].data) {
6210 /* Ensure that updates to the rx_jmb_buffers ring and the
6211 * shadowed hardware producer ring from tg3_recycle_skb() are
6212 * ordered correctly WRT the skb check above.
6216 memcpy(&dpr->rx_jmb_buffers[di],
6217 &spr->rx_jmb_buffers[si],
6218 cpycnt * sizeof(struct ring_info));
6220 for (i = 0; i < cpycnt; i++, di++, si++) {
6221 struct tg3_rx_buffer_desc *sbd, *dbd;
6222 sbd = &spr->rx_jmb[si].std;
6223 dbd = &dpr->rx_jmb[di].std;
6224 dbd->addr_hi = sbd->addr_hi;
6225 dbd->addr_lo = sbd->addr_lo;
6228 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6229 tp->rx_jmb_ring_mask;
6230 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6231 tp->rx_jmb_ring_mask;
6237 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6239 struct tg3 *tp = tnapi->tp;
6241 /* run TX completion thread */
6242 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6244 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6248 if (!tnapi->rx_rcb_prod_idx)
6251 /* run RX thread, within the bounds set by NAPI.
6252 * All RX "locking" is done by ensuring outside
6253 * code synchronizes with tg3->napi.poll()
6255 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6256 work_done += tg3_rx(tnapi, budget - work_done);
6258 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6259 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6261 u32 std_prod_idx = dpr->rx_std_prod_idx;
6262 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6264 tp->rx_refill = false;
6265 for (i = 1; i <= tp->rxq_cnt; i++)
6266 err |= tg3_rx_prodring_xfer(tp, dpr,
6267 &tp->napi[i].prodring);
6271 if (std_prod_idx != dpr->rx_std_prod_idx)
6272 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6273 dpr->rx_std_prod_idx);
6275 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6276 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6277 dpr->rx_jmb_prod_idx);
6282 tw32_f(HOSTCC_MODE, tp->coal_now);
6288 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6290 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6291 schedule_work(&tp->reset_task);
6294 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6296 cancel_work_sync(&tp->reset_task);
6297 tg3_flag_clear(tp, RESET_TASK_PENDING);
6298 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6301 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6303 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6304 struct tg3 *tp = tnapi->tp;
6306 struct tg3_hw_status *sblk = tnapi->hw_status;
6309 work_done = tg3_poll_work(tnapi, work_done, budget);
6311 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6314 if (unlikely(work_done >= budget))
6317 /* tp->last_tag is used in tg3_int_reenable() below
6318 * to tell the hw how much work has been processed,
6319 * so we must read it before checking for more work.
6321 tnapi->last_tag = sblk->status_tag;
6322 tnapi->last_irq_tag = tnapi->last_tag;
6325 /* check for RX/TX work to do */
6326 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6327 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6329 /* This test here is not race free, but will reduce
6330 * the number of interrupts by looping again.
6332 if (tnapi == &tp->napi[1] && tp->rx_refill)
6335 napi_complete(napi);
6336 /* Reenable interrupts. */
6337 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6339 /* This test here is synchronized by napi_schedule()
6340 * and napi_complete() to close the race condition.
6342 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6343 tw32(HOSTCC_MODE, tp->coalesce_mode |
6344 HOSTCC_MODE_ENABLE |
6355 /* work_done is guaranteed to be less than budget. */
6356 napi_complete(napi);
6357 tg3_reset_task_schedule(tp);
6361 static void tg3_process_error(struct tg3 *tp)
6364 bool real_error = false;
6366 if (tg3_flag(tp, ERROR_PROCESSED))
6369 /* Check Flow Attention register */
6370 val = tr32(HOSTCC_FLOW_ATTN);
6371 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6372 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6376 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6377 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6381 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6382 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6391 tg3_flag_set(tp, ERROR_PROCESSED);
6392 tg3_reset_task_schedule(tp);
6395 static int tg3_poll(struct napi_struct *napi, int budget)
6397 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6398 struct tg3 *tp = tnapi->tp;
6400 struct tg3_hw_status *sblk = tnapi->hw_status;
6403 if (sblk->status & SD_STATUS_ERROR)
6404 tg3_process_error(tp);
6408 work_done = tg3_poll_work(tnapi, work_done, budget);
6410 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6413 if (unlikely(work_done >= budget))
6416 if (tg3_flag(tp, TAGGED_STATUS)) {
6417 /* tp->last_tag is used in tg3_int_reenable() below
6418 * to tell the hw how much work has been processed,
6419 * so we must read it before checking for more work.
6421 tnapi->last_tag = sblk->status_tag;
6422 tnapi->last_irq_tag = tnapi->last_tag;
6425 sblk->status &= ~SD_STATUS_UPDATED;
6427 if (likely(!tg3_has_work(tnapi))) {
6428 napi_complete(napi);
6429 tg3_int_reenable(tnapi);
6437 /* work_done is guaranteed to be less than budget. */
6438 napi_complete(napi);
6439 tg3_reset_task_schedule(tp);
6443 static void tg3_napi_disable(struct tg3 *tp)
6447 for (i = tp->irq_cnt - 1; i >= 0; i--)
6448 napi_disable(&tp->napi[i].napi);
6451 static void tg3_napi_enable(struct tg3 *tp)
6455 for (i = 0; i < tp->irq_cnt; i++)
6456 napi_enable(&tp->napi[i].napi);
6459 static void tg3_napi_init(struct tg3 *tp)
6463 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6464 for (i = 1; i < tp->irq_cnt; i++)
6465 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6468 static void tg3_napi_fini(struct tg3 *tp)
6472 for (i = 0; i < tp->irq_cnt; i++)
6473 netif_napi_del(&tp->napi[i].napi);
6476 static inline void tg3_netif_stop(struct tg3 *tp)
6478 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6479 tg3_napi_disable(tp);
6480 netif_tx_disable(tp->dev);
6483 static inline void tg3_netif_start(struct tg3 *tp)
6485 /* NOTE: unconditional netif_tx_wake_all_queues is only
6486 * appropriate so long as all callers are assured to
6487 * have free tx slots (such as after tg3_init_hw)
6489 netif_tx_wake_all_queues(tp->dev);
6491 tg3_napi_enable(tp);
6492 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6493 tg3_enable_ints(tp);
6496 static void tg3_irq_quiesce(struct tg3 *tp)
6500 BUG_ON(tp->irq_sync);
6505 for (i = 0; i < tp->irq_cnt; i++)
6506 synchronize_irq(tp->napi[i].irq_vec);
6509 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6510 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6511 * with as well. Most of the time, this is not necessary except when
6512 * shutting down the device.
6514 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6516 spin_lock_bh(&tp->lock);
6518 tg3_irq_quiesce(tp);
6521 static inline void tg3_full_unlock(struct tg3 *tp)
6523 spin_unlock_bh(&tp->lock);
6526 /* One-shot MSI handler - Chip automatically disables interrupt
6527 * after sending MSI so driver doesn't have to do it.
6529 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6531 struct tg3_napi *tnapi = dev_id;
6532 struct tg3 *tp = tnapi->tp;
6534 prefetch(tnapi->hw_status);
6536 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6538 if (likely(!tg3_irq_sync(tp)))
6539 napi_schedule(&tnapi->napi);
6544 /* MSI ISR - No need to check for interrupt sharing and no need to
6545 * flush status block and interrupt mailbox. PCI ordering rules
6546 * guarantee that MSI will arrive after the status block.
6548 static irqreturn_t tg3_msi(int irq, void *dev_id)
6550 struct tg3_napi *tnapi = dev_id;
6551 struct tg3 *tp = tnapi->tp;
6553 prefetch(tnapi->hw_status);
6555 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6557 * Writing any value to intr-mbox-0 clears PCI INTA# and
6558 * chip-internal interrupt pending events.
6559 * Writing non-zero to intr-mbox-0 additional tells the
6560 * NIC to stop sending us irqs, engaging "in-intr-handler"
6563 tw32_mailbox(tnapi->int_mbox, 0x00000001);
6564 if (likely(!tg3_irq_sync(tp)))
6565 napi_schedule(&tnapi->napi);
6567 return IRQ_RETVAL(1);
6570 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6572 struct tg3_napi *tnapi = dev_id;
6573 struct tg3 *tp = tnapi->tp;
6574 struct tg3_hw_status *sblk = tnapi->hw_status;
6575 unsigned int handled = 1;
6577 /* In INTx mode, it is possible for the interrupt to arrive at
6578 * the CPU before the status block posted prior to the interrupt.
6579 * Reading the PCI State register will confirm whether the
6580 * interrupt is ours and will flush the status block.
6582 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6583 if (tg3_flag(tp, CHIP_RESETTING) ||
6584 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6591 * Writing any value to intr-mbox-0 clears PCI INTA# and
6592 * chip-internal interrupt pending events.
6593 * Writing non-zero to intr-mbox-0 additional tells the
6594 * NIC to stop sending us irqs, engaging "in-intr-handler"
6597 * Flush the mailbox to de-assert the IRQ immediately to prevent
6598 * spurious interrupts. The flush impacts performance but
6599 * excessive spurious interrupts can be worse in some cases.
6601 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6602 if (tg3_irq_sync(tp))
6604 sblk->status &= ~SD_STATUS_UPDATED;
6605 if (likely(tg3_has_work(tnapi))) {
6606 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6607 napi_schedule(&tnapi->napi);
6609 /* No work, shared interrupt perhaps? re-enable
6610 * interrupts, and flush that PCI write
6612 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6616 return IRQ_RETVAL(handled);
6619 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6621 struct tg3_napi *tnapi = dev_id;
6622 struct tg3 *tp = tnapi->tp;
6623 struct tg3_hw_status *sblk = tnapi->hw_status;
6624 unsigned int handled = 1;
6626 /* In INTx mode, it is possible for the interrupt to arrive at
6627 * the CPU before the status block posted prior to the interrupt.
6628 * Reading the PCI State register will confirm whether the
6629 * interrupt is ours and will flush the status block.
6631 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6632 if (tg3_flag(tp, CHIP_RESETTING) ||
6633 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6640 * writing any value to intr-mbox-0 clears PCI INTA# and
6641 * chip-internal interrupt pending events.
6642 * writing non-zero to intr-mbox-0 additional tells the
6643 * NIC to stop sending us irqs, engaging "in-intr-handler"
6646 * Flush the mailbox to de-assert the IRQ immediately to prevent
6647 * spurious interrupts. The flush impacts performance but
6648 * excessive spurious interrupts can be worse in some cases.
6650 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6653 * In a shared interrupt configuration, sometimes other devices'
6654 * interrupts will scream. We record the current status tag here
6655 * so that the above check can report that the screaming interrupts
6656 * are unhandled. Eventually they will be silenced.
6658 tnapi->last_irq_tag = sblk->status_tag;
6660 if (tg3_irq_sync(tp))
6663 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6665 napi_schedule(&tnapi->napi);
6668 return IRQ_RETVAL(handled);
6671 /* ISR for interrupt test */
6672 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6674 struct tg3_napi *tnapi = dev_id;
6675 struct tg3 *tp = tnapi->tp;
6676 struct tg3_hw_status *sblk = tnapi->hw_status;
6678 if ((sblk->status & SD_STATUS_UPDATED) ||
6679 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6680 tg3_disable_ints(tp);
6681 return IRQ_RETVAL(1);
6683 return IRQ_RETVAL(0);
6686 #ifdef CONFIG_NET_POLL_CONTROLLER
6687 static void tg3_poll_controller(struct net_device *dev)
6690 struct tg3 *tp = netdev_priv(dev);
6692 for (i = 0; i < tp->irq_cnt; i++)
6693 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6697 static void tg3_tx_timeout(struct net_device *dev)
6699 struct tg3 *tp = netdev_priv(dev);
6701 if (netif_msg_tx_err(tp)) {
6702 netdev_err(dev, "transmit timed out, resetting\n");
6706 tg3_reset_task_schedule(tp);
6709 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6710 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6712 u32 base = (u32) mapping & 0xffffffff;
6714 return (base > 0xffffdcc0) && (base + len + 8 < base);
6717 /* Test for DMA addresses > 40-bit */
6718 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6721 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6722 if (tg3_flag(tp, 40BIT_DMA_BUG))
6723 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6730 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6731 dma_addr_t mapping, u32 len, u32 flags,
6734 txbd->addr_hi = ((u64) mapping >> 32);
6735 txbd->addr_lo = ((u64) mapping & 0xffffffff);
6736 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6737 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6740 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6741 dma_addr_t map, u32 len, u32 flags,
6744 struct tg3 *tp = tnapi->tp;
6747 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6750 if (tg3_4g_overflow_test(map, len))
6753 if (tg3_40bit_overflow_test(tp, map, len))
6756 if (tp->dma_limit) {
6757 u32 prvidx = *entry;
6758 u32 tmp_flag = flags & ~TXD_FLAG_END;
6759 while (len > tp->dma_limit && *budget) {
6760 u32 frag_len = tp->dma_limit;
6761 len -= tp->dma_limit;
6763 /* Avoid the 8byte DMA problem */
6765 len += tp->dma_limit / 2;
6766 frag_len = tp->dma_limit / 2;
6769 tnapi->tx_buffers[*entry].fragmented = true;
6771 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6772 frag_len, tmp_flag, mss, vlan);
6775 *entry = NEXT_TX(*entry);
6782 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6783 len, flags, mss, vlan);
6785 *entry = NEXT_TX(*entry);
6788 tnapi->tx_buffers[prvidx].fragmented = false;
6792 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6793 len, flags, mss, vlan);
6794 *entry = NEXT_TX(*entry);
6800 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6803 struct sk_buff *skb;
6804 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6809 pci_unmap_single(tnapi->tp->pdev,
6810 dma_unmap_addr(txb, mapping),
6814 while (txb->fragmented) {
6815 txb->fragmented = false;
6816 entry = NEXT_TX(entry);
6817 txb = &tnapi->tx_buffers[entry];
6820 for (i = 0; i <= last; i++) {
6821 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6823 entry = NEXT_TX(entry);
6824 txb = &tnapi->tx_buffers[entry];
6826 pci_unmap_page(tnapi->tp->pdev,
6827 dma_unmap_addr(txb, mapping),
6828 skb_frag_size(frag), PCI_DMA_TODEVICE);
6830 while (txb->fragmented) {
6831 txb->fragmented = false;
6832 entry = NEXT_TX(entry);
6833 txb = &tnapi->tx_buffers[entry];
6838 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6839 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6840 struct sk_buff **pskb,
6841 u32 *entry, u32 *budget,
6842 u32 base_flags, u32 mss, u32 vlan)
6844 struct tg3 *tp = tnapi->tp;
6845 struct sk_buff *new_skb, *skb = *pskb;
6846 dma_addr_t new_addr = 0;
6849 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6850 new_skb = skb_copy(skb, GFP_ATOMIC);
6852 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6854 new_skb = skb_copy_expand(skb,
6855 skb_headroom(skb) + more_headroom,
6856 skb_tailroom(skb), GFP_ATOMIC);
6862 /* New SKB is guaranteed to be linear. */
6863 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6865 /* Make sure the mapping succeeded */
6866 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6867 dev_kfree_skb(new_skb);
6870 u32 save_entry = *entry;
6872 base_flags |= TXD_FLAG_END;
6874 tnapi->tx_buffers[*entry].skb = new_skb;
6875 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6878 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6879 new_skb->len, base_flags,
6881 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6882 dev_kfree_skb(new_skb);
6893 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6895 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6896 * TSO header is greater than 80 bytes.
6898 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6900 struct sk_buff *segs, *nskb;
6901 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6903 /* Estimate the number of fragments in the worst case */
6904 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6905 netif_stop_queue(tp->dev);
6907 /* netif_tx_stop_queue() must be done before checking
6908 * checking tx index in tg3_tx_avail() below, because in
6909 * tg3_tx(), we update tx index before checking for
6910 * netif_tx_queue_stopped().
6913 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6914 return NETDEV_TX_BUSY;
6916 netif_wake_queue(tp->dev);
6919 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6921 goto tg3_tso_bug_end;
6927 tg3_start_xmit(nskb, tp->dev);
6933 return NETDEV_TX_OK;
6936 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6937 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6939 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6941 struct tg3 *tp = netdev_priv(dev);
6942 u32 len, entry, base_flags, mss, vlan = 0;
6944 int i = -1, would_hit_hwbug;
6946 struct tg3_napi *tnapi;
6947 struct netdev_queue *txq;
6950 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6951 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6952 if (tg3_flag(tp, ENABLE_TSS))
6955 budget = tg3_tx_avail(tnapi);
6957 /* We are running in BH disabled context with netif_tx_lock
6958 * and TX reclaim runs via tp->napi.poll inside of a software
6959 * interrupt. Furthermore, IRQ processing runs lockless so we have
6960 * no IRQ context deadlocks to worry about either. Rejoice!
6962 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6963 if (!netif_tx_queue_stopped(txq)) {
6964 netif_tx_stop_queue(txq);
6966 /* This is a hard error, log it. */
6968 "BUG! Tx Ring full when queue awake!\n");
6970 return NETDEV_TX_BUSY;
6973 entry = tnapi->tx_prod;
6975 if (skb->ip_summed == CHECKSUM_PARTIAL)
6976 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6978 mss = skb_shinfo(skb)->gso_size;
6981 u32 tcp_opt_len, hdr_len;
6983 if (skb_header_cloned(skb) &&
6984 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6988 tcp_opt_len = tcp_optlen(skb);
6990 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6992 if (!skb_is_gso_v6(skb)) {
6994 iph->tot_len = htons(mss + hdr_len);
6997 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6998 tg3_flag(tp, TSO_BUG))
6999 return tg3_tso_bug(tp, skb);
7001 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7002 TXD_FLAG_CPU_POST_DMA);
7004 if (tg3_flag(tp, HW_TSO_1) ||
7005 tg3_flag(tp, HW_TSO_2) ||
7006 tg3_flag(tp, HW_TSO_3)) {
7007 tcp_hdr(skb)->check = 0;
7008 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7010 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7015 if (tg3_flag(tp, HW_TSO_3)) {
7016 mss |= (hdr_len & 0xc) << 12;
7018 base_flags |= 0x00000010;
7019 base_flags |= (hdr_len & 0x3e0) << 5;
7020 } else if (tg3_flag(tp, HW_TSO_2))
7021 mss |= hdr_len << 9;
7022 else if (tg3_flag(tp, HW_TSO_1) ||
7023 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7024 if (tcp_opt_len || iph->ihl > 5) {
7027 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7028 mss |= (tsflags << 11);
7031 if (tcp_opt_len || iph->ihl > 5) {
7034 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7035 base_flags |= tsflags << 12;
7040 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7041 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7042 base_flags |= TXD_FLAG_JMB_PKT;
7044 if (vlan_tx_tag_present(skb)) {
7045 base_flags |= TXD_FLAG_VLAN;
7046 vlan = vlan_tx_tag_get(skb);
7049 len = skb_headlen(skb);
7051 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7052 if (pci_dma_mapping_error(tp->pdev, mapping))
7056 tnapi->tx_buffers[entry].skb = skb;
7057 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7059 would_hit_hwbug = 0;
7061 if (tg3_flag(tp, 5701_DMA_BUG))
7062 would_hit_hwbug = 1;
7064 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7065 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7067 would_hit_hwbug = 1;
7068 } else if (skb_shinfo(skb)->nr_frags > 0) {
7071 if (!tg3_flag(tp, HW_TSO_1) &&
7072 !tg3_flag(tp, HW_TSO_2) &&
7073 !tg3_flag(tp, HW_TSO_3))
7076 /* Now loop through additional data
7077 * fragments, and queue them.
7079 last = skb_shinfo(skb)->nr_frags - 1;
7080 for (i = 0; i <= last; i++) {
7081 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7083 len = skb_frag_size(frag);
7084 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7085 len, DMA_TO_DEVICE);
7087 tnapi->tx_buffers[entry].skb = NULL;
7088 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7090 if (dma_mapping_error(&tp->pdev->dev, mapping))
7094 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7096 ((i == last) ? TXD_FLAG_END : 0),
7098 would_hit_hwbug = 1;
7104 if (would_hit_hwbug) {
7105 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7107 /* If the workaround fails due to memory/mapping
7108 * failure, silently drop this packet.
7110 entry = tnapi->tx_prod;
7111 budget = tg3_tx_avail(tnapi);
7112 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7113 base_flags, mss, vlan))
7117 skb_tx_timestamp(skb);
7118 netdev_tx_sent_queue(txq, skb->len);
7120 /* Sync BD data before updating mailbox */
7123 /* Packets are ready, update Tx producer idx local and on card. */
7124 tw32_tx_mbox(tnapi->prodmbox, entry);
7126 tnapi->tx_prod = entry;
7127 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7128 netif_tx_stop_queue(txq);
7130 /* netif_tx_stop_queue() must be done before checking
7131 * checking tx index in tg3_tx_avail() below, because in
7132 * tg3_tx(), we update tx index before checking for
7133 * netif_tx_queue_stopped().
7136 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7137 netif_tx_wake_queue(txq);
7141 return NETDEV_TX_OK;
7144 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7145 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7150 return NETDEV_TX_OK;
7153 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7156 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7157 MAC_MODE_PORT_MODE_MASK);
7159 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7161 if (!tg3_flag(tp, 5705_PLUS))
7162 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7164 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7165 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7167 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7169 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7171 if (tg3_flag(tp, 5705_PLUS) ||
7172 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7173 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7174 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7177 tw32(MAC_MODE, tp->mac_mode);
7181 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7183 u32 val, bmcr, mac_mode, ptest = 0;
7185 tg3_phy_toggle_apd(tp, false);
7186 tg3_phy_toggle_automdix(tp, 0);
7188 if (extlpbk && tg3_phy_set_extloopbk(tp))
7191 bmcr = BMCR_FULLDPLX;
7196 bmcr |= BMCR_SPEED100;
7200 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7202 bmcr |= BMCR_SPEED100;
7205 bmcr |= BMCR_SPEED1000;
7210 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7211 tg3_readphy(tp, MII_CTRL1000, &val);
7212 val |= CTL1000_AS_MASTER |
7213 CTL1000_ENABLE_MASTER;
7214 tg3_writephy(tp, MII_CTRL1000, val);
7216 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7217 MII_TG3_FET_PTEST_TRIM_2;
7218 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7221 bmcr |= BMCR_LOOPBACK;
7223 tg3_writephy(tp, MII_BMCR, bmcr);
7225 /* The write needs to be flushed for the FETs */
7226 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7227 tg3_readphy(tp, MII_BMCR, &bmcr);
7231 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7232 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7233 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7234 MII_TG3_FET_PTEST_FRC_TX_LINK |
7235 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7237 /* The write needs to be flushed for the AC131 */
7238 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7241 /* Reset to prevent losing 1st rx packet intermittently */
7242 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7243 tg3_flag(tp, 5780_CLASS)) {
7244 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7246 tw32_f(MAC_RX_MODE, tp->rx_mode);
7249 mac_mode = tp->mac_mode &
7250 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7251 if (speed == SPEED_1000)
7252 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7254 mac_mode |= MAC_MODE_PORT_MODE_MII;
7256 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7257 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7259 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7260 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7261 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7262 mac_mode |= MAC_MODE_LINK_POLARITY;
7264 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7265 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7268 tw32(MAC_MODE, mac_mode);
7274 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7276 struct tg3 *tp = netdev_priv(dev);
7278 if (features & NETIF_F_LOOPBACK) {
7279 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7282 spin_lock_bh(&tp->lock);
7283 tg3_mac_loopback(tp, true);
7284 netif_carrier_on(tp->dev);
7285 spin_unlock_bh(&tp->lock);
7286 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7288 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7291 spin_lock_bh(&tp->lock);
7292 tg3_mac_loopback(tp, false);
7293 /* Force link status check */
7294 tg3_setup_phy(tp, 1);
7295 spin_unlock_bh(&tp->lock);
7296 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7300 static netdev_features_t tg3_fix_features(struct net_device *dev,
7301 netdev_features_t features)
7303 struct tg3 *tp = netdev_priv(dev);
7305 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7306 features &= ~NETIF_F_ALL_TSO;
7311 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7313 netdev_features_t changed = dev->features ^ features;
7315 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7316 tg3_set_loopback(dev, features);
7321 static void tg3_rx_prodring_free(struct tg3 *tp,
7322 struct tg3_rx_prodring_set *tpr)
7326 if (tpr != &tp->napi[0].prodring) {
7327 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7328 i = (i + 1) & tp->rx_std_ring_mask)
7329 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7332 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7333 for (i = tpr->rx_jmb_cons_idx;
7334 i != tpr->rx_jmb_prod_idx;
7335 i = (i + 1) & tp->rx_jmb_ring_mask) {
7336 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7344 for (i = 0; i <= tp->rx_std_ring_mask; i++)
7345 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7348 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7349 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7350 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7355 /* Initialize rx rings for packet processing.
7357 * The chip has been shut down and the driver detached from
7358 * the networking, so no interrupts or new tx packets will
7359 * end up in the driver. tp->{tx,}lock are held and thus
7362 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7363 struct tg3_rx_prodring_set *tpr)
7365 u32 i, rx_pkt_dma_sz;
7367 tpr->rx_std_cons_idx = 0;
7368 tpr->rx_std_prod_idx = 0;
7369 tpr->rx_jmb_cons_idx = 0;
7370 tpr->rx_jmb_prod_idx = 0;
7372 if (tpr != &tp->napi[0].prodring) {
7373 memset(&tpr->rx_std_buffers[0], 0,
7374 TG3_RX_STD_BUFF_RING_SIZE(tp));
7375 if (tpr->rx_jmb_buffers)
7376 memset(&tpr->rx_jmb_buffers[0], 0,
7377 TG3_RX_JMB_BUFF_RING_SIZE(tp));
7381 /* Zero out all descriptors. */
7382 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7384 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7385 if (tg3_flag(tp, 5780_CLASS) &&
7386 tp->dev->mtu > ETH_DATA_LEN)
7387 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7388 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7390 /* Initialize invariants of the rings, we only set this
7391 * stuff once. This works because the card does not
7392 * write into the rx buffer posting rings.
7394 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7395 struct tg3_rx_buffer_desc *rxd;
7397 rxd = &tpr->rx_std[i];
7398 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7399 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7400 rxd->opaque = (RXD_OPAQUE_RING_STD |
7401 (i << RXD_OPAQUE_INDEX_SHIFT));
7404 /* Now allocate fresh SKBs for each rx ring. */
7405 for (i = 0; i < tp->rx_pending; i++) {
7406 unsigned int frag_size;
7408 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7410 netdev_warn(tp->dev,
7411 "Using a smaller RX standard ring. Only "
7412 "%d out of %d buffers were allocated "
7413 "successfully\n", i, tp->rx_pending);
7421 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7424 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7426 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7429 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7430 struct tg3_rx_buffer_desc *rxd;
7432 rxd = &tpr->rx_jmb[i].std;
7433 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7434 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7436 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7437 (i << RXD_OPAQUE_INDEX_SHIFT));
7440 for (i = 0; i < tp->rx_jumbo_pending; i++) {
7441 unsigned int frag_size;
7443 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7445 netdev_warn(tp->dev,
7446 "Using a smaller RX jumbo ring. Only %d "
7447 "out of %d buffers were allocated "
7448 "successfully\n", i, tp->rx_jumbo_pending);
7451 tp->rx_jumbo_pending = i;
7460 tg3_rx_prodring_free(tp, tpr);
7464 static void tg3_rx_prodring_fini(struct tg3 *tp,
7465 struct tg3_rx_prodring_set *tpr)
7467 kfree(tpr->rx_std_buffers);
7468 tpr->rx_std_buffers = NULL;
7469 kfree(tpr->rx_jmb_buffers);
7470 tpr->rx_jmb_buffers = NULL;
7472 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7473 tpr->rx_std, tpr->rx_std_mapping);
7477 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7478 tpr->rx_jmb, tpr->rx_jmb_mapping);
7483 static int tg3_rx_prodring_init(struct tg3 *tp,
7484 struct tg3_rx_prodring_set *tpr)
7486 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7488 if (!tpr->rx_std_buffers)
7491 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7492 TG3_RX_STD_RING_BYTES(tp),
7493 &tpr->rx_std_mapping,
7498 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7499 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7501 if (!tpr->rx_jmb_buffers)
7504 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7505 TG3_RX_JMB_RING_BYTES(tp),
7506 &tpr->rx_jmb_mapping,
7515 tg3_rx_prodring_fini(tp, tpr);
7519 /* Free up pending packets in all rx/tx rings.
7521 * The chip has been shut down and the driver detached from
7522 * the networking, so no interrupts or new tx packets will
7523 * end up in the driver. tp->{tx,}lock is not held and we are not
7524 * in an interrupt context and thus may sleep.
7526 static void tg3_free_rings(struct tg3 *tp)
7530 for (j = 0; j < tp->irq_cnt; j++) {
7531 struct tg3_napi *tnapi = &tp->napi[j];
7533 tg3_rx_prodring_free(tp, &tnapi->prodring);
7535 if (!tnapi->tx_buffers)
7538 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7539 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7544 tg3_tx_skb_unmap(tnapi, i,
7545 skb_shinfo(skb)->nr_frags - 1);
7547 dev_kfree_skb_any(skb);
7549 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7553 /* Initialize tx/rx rings for packet processing.
7555 * The chip has been shut down and the driver detached from
7556 * the networking, so no interrupts or new tx packets will
7557 * end up in the driver. tp->{tx,}lock are held and thus
7560 static int tg3_init_rings(struct tg3 *tp)
7564 /* Free up all the SKBs. */
7567 for (i = 0; i < tp->irq_cnt; i++) {
7568 struct tg3_napi *tnapi = &tp->napi[i];
7570 tnapi->last_tag = 0;
7571 tnapi->last_irq_tag = 0;
7572 tnapi->hw_status->status = 0;
7573 tnapi->hw_status->status_tag = 0;
7574 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7579 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7581 tnapi->rx_rcb_ptr = 0;
7583 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7585 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7594 static void tg3_mem_tx_release(struct tg3 *tp)
7598 for (i = 0; i < tp->irq_max; i++) {
7599 struct tg3_napi *tnapi = &tp->napi[i];
7601 if (tnapi->tx_ring) {
7602 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7603 tnapi->tx_ring, tnapi->tx_desc_mapping);
7604 tnapi->tx_ring = NULL;
7607 kfree(tnapi->tx_buffers);
7608 tnapi->tx_buffers = NULL;
7612 static int tg3_mem_tx_acquire(struct tg3 *tp)
7615 struct tg3_napi *tnapi = &tp->napi[0];
7617 /* If multivector TSS is enabled, vector 0 does not handle
7618 * tx interrupts. Don't allocate any resources for it.
7620 if (tg3_flag(tp, ENABLE_TSS))
7623 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
7624 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
7625 TG3_TX_RING_SIZE, GFP_KERNEL);
7626 if (!tnapi->tx_buffers)
7629 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7631 &tnapi->tx_desc_mapping,
7633 if (!tnapi->tx_ring)
7640 tg3_mem_tx_release(tp);
7644 static void tg3_mem_rx_release(struct tg3 *tp)
7648 for (i = 0; i < tp->irq_max; i++) {
7649 struct tg3_napi *tnapi = &tp->napi[i];
7651 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7656 dma_free_coherent(&tp->pdev->dev,
7657 TG3_RX_RCB_RING_BYTES(tp),
7659 tnapi->rx_rcb_mapping);
7660 tnapi->rx_rcb = NULL;
7664 static int tg3_mem_rx_acquire(struct tg3 *tp)
7666 unsigned int i, limit;
7668 limit = tp->rxq_cnt;
7670 /* If RSS is enabled, we need a (dummy) producer ring
7671 * set on vector zero. This is the true hw prodring.
7673 if (tg3_flag(tp, ENABLE_RSS))
7676 for (i = 0; i < limit; i++) {
7677 struct tg3_napi *tnapi = &tp->napi[i];
7679 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7682 /* If multivector RSS is enabled, vector 0
7683 * does not handle rx or tx interrupts.
7684 * Don't allocate any resources for it.
7686 if (!i && tg3_flag(tp, ENABLE_RSS))
7689 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7690 TG3_RX_RCB_RING_BYTES(tp),
7691 &tnapi->rx_rcb_mapping,
7696 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7702 tg3_mem_rx_release(tp);
7707 * Must not be invoked with interrupt sources disabled and
7708 * the hardware shutdown down.
7710 static void tg3_free_consistent(struct tg3 *tp)
7714 for (i = 0; i < tp->irq_cnt; i++) {
7715 struct tg3_napi *tnapi = &tp->napi[i];
7717 if (tnapi->hw_status) {
7718 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7720 tnapi->status_mapping);
7721 tnapi->hw_status = NULL;
7725 tg3_mem_rx_release(tp);
7726 tg3_mem_tx_release(tp);
7729 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7730 tp->hw_stats, tp->stats_mapping);
7731 tp->hw_stats = NULL;
7736 * Must not be invoked with interrupt sources disabled and
7737 * the hardware shutdown down. Can sleep.
7739 static int tg3_alloc_consistent(struct tg3 *tp)
7743 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7744 sizeof(struct tg3_hw_stats),
7750 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7752 for (i = 0; i < tp->irq_cnt; i++) {
7753 struct tg3_napi *tnapi = &tp->napi[i];
7754 struct tg3_hw_status *sblk;
7756 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7758 &tnapi->status_mapping,
7760 if (!tnapi->hw_status)
7763 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7764 sblk = tnapi->hw_status;
7766 if (tg3_flag(tp, ENABLE_RSS)) {
7767 u16 *prodptr = NULL;
7770 * When RSS is enabled, the status block format changes
7771 * slightly. The "rx_jumbo_consumer", "reserved",
7772 * and "rx_mini_consumer" members get mapped to the
7773 * other three rx return ring producer indexes.
7777 prodptr = &sblk->idx[0].rx_producer;
7780 prodptr = &sblk->rx_jumbo_consumer;
7783 prodptr = &sblk->reserved;
7786 prodptr = &sblk->rx_mini_consumer;
7789 tnapi->rx_rcb_prod_idx = prodptr;
7791 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7795 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
7801 tg3_free_consistent(tp);
7805 #define MAX_WAIT_CNT 1000
7807 /* To stop a block, clear the enable bit and poll till it
7808 * clears. tp->lock is held.
7810 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7815 if (tg3_flag(tp, 5705_PLUS)) {
7822 /* We can't enable/disable these bits of the
7823 * 5705/5750, just say success.
7836 for (i = 0; i < MAX_WAIT_CNT; i++) {
7839 if ((val & enable_bit) == 0)
7843 if (i == MAX_WAIT_CNT && !silent) {
7844 dev_err(&tp->pdev->dev,
7845 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7853 /* tp->lock is held. */
7854 static int tg3_abort_hw(struct tg3 *tp, int silent)
7858 tg3_disable_ints(tp);
7860 tp->rx_mode &= ~RX_MODE_ENABLE;
7861 tw32_f(MAC_RX_MODE, tp->rx_mode);
7864 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7865 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7866 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7867 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7868 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7869 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7871 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7872 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7873 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7874 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7875 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7876 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7877 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7879 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7880 tw32_f(MAC_MODE, tp->mac_mode);
7883 tp->tx_mode &= ~TX_MODE_ENABLE;
7884 tw32_f(MAC_TX_MODE, tp->tx_mode);
7886 for (i = 0; i < MAX_WAIT_CNT; i++) {
7888 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7891 if (i >= MAX_WAIT_CNT) {
7892 dev_err(&tp->pdev->dev,
7893 "%s timed out, TX_MODE_ENABLE will not clear "
7894 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7898 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7899 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7900 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7902 tw32(FTQ_RESET, 0xffffffff);
7903 tw32(FTQ_RESET, 0x00000000);
7905 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7906 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7908 for (i = 0; i < tp->irq_cnt; i++) {
7909 struct tg3_napi *tnapi = &tp->napi[i];
7910 if (tnapi->hw_status)
7911 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7917 /* Save PCI command register before chip reset */
7918 static void tg3_save_pci_state(struct tg3 *tp)
7920 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7923 /* Restore PCI state after chip reset */
7924 static void tg3_restore_pci_state(struct tg3 *tp)
7928 /* Re-enable indirect register accesses. */
7929 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7930 tp->misc_host_ctrl);
7932 /* Set MAX PCI retry to zero. */
7933 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7934 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7935 tg3_flag(tp, PCIX_MODE))
7936 val |= PCISTATE_RETRY_SAME_DMA;
7937 /* Allow reads and writes to the APE register and memory space. */
7938 if (tg3_flag(tp, ENABLE_APE))
7939 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7940 PCISTATE_ALLOW_APE_SHMEM_WR |
7941 PCISTATE_ALLOW_APE_PSPACE_WR;
7942 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7944 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7946 if (!tg3_flag(tp, PCI_EXPRESS)) {
7947 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7948 tp->pci_cacheline_sz);
7949 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7953 /* Make sure PCI-X relaxed ordering bit is clear. */
7954 if (tg3_flag(tp, PCIX_MODE)) {
7957 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7959 pcix_cmd &= ~PCI_X_CMD_ERO;
7960 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7964 if (tg3_flag(tp, 5780_CLASS)) {
7966 /* Chip reset on 5780 will reset MSI enable bit,
7967 * so need to restore it.
7969 if (tg3_flag(tp, USING_MSI)) {
7972 pci_read_config_word(tp->pdev,
7973 tp->msi_cap + PCI_MSI_FLAGS,
7975 pci_write_config_word(tp->pdev,
7976 tp->msi_cap + PCI_MSI_FLAGS,
7977 ctrl | PCI_MSI_FLAGS_ENABLE);
7978 val = tr32(MSGINT_MODE);
7979 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7984 /* tp->lock is held. */
7985 static int tg3_chip_reset(struct tg3 *tp)
7988 void (*write_op)(struct tg3 *, u32, u32);
7993 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7995 /* No matching tg3_nvram_unlock() after this because
7996 * chip reset below will undo the nvram lock.
7998 tp->nvram_lock_cnt = 0;
8000 /* GRC_MISC_CFG core clock reset will clear the memory
8001 * enable bit in PCI register 4 and the MSI enable bit
8002 * on some chips, so we save relevant registers here.
8004 tg3_save_pci_state(tp);
8006 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8007 tg3_flag(tp, 5755_PLUS))
8008 tw32(GRC_FASTBOOT_PC, 0);
8011 * We must avoid the readl() that normally takes place.
8012 * It locks machines, causes machine checks, and other
8013 * fun things. So, temporarily disable the 5701
8014 * hardware workaround, while we do the reset.
8016 write_op = tp->write32;
8017 if (write_op == tg3_write_flush_reg32)
8018 tp->write32 = tg3_write32;
8020 /* Prevent the irq handler from reading or writing PCI registers
8021 * during chip reset when the memory enable bit in the PCI command
8022 * register may be cleared. The chip does not generate interrupt
8023 * at this time, but the irq handler may still be called due to irq
8024 * sharing or irqpoll.
8026 tg3_flag_set(tp, CHIP_RESETTING);
8027 for (i = 0; i < tp->irq_cnt; i++) {
8028 struct tg3_napi *tnapi = &tp->napi[i];
8029 if (tnapi->hw_status) {
8030 tnapi->hw_status->status = 0;
8031 tnapi->hw_status->status_tag = 0;
8033 tnapi->last_tag = 0;
8034 tnapi->last_irq_tag = 0;
8038 for (i = 0; i < tp->irq_cnt; i++)
8039 synchronize_irq(tp->napi[i].irq_vec);
8041 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8042 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8043 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8047 val = GRC_MISC_CFG_CORECLK_RESET;
8049 if (tg3_flag(tp, PCI_EXPRESS)) {
8050 /* Force PCIe 1.0a mode */
8051 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8052 !tg3_flag(tp, 57765_PLUS) &&
8053 tr32(TG3_PCIE_PHY_TSTCTL) ==
8054 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8055 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8057 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
8058 tw32(GRC_MISC_CFG, (1 << 29));
8063 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8064 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8065 tw32(GRC_VCPU_EXT_CTRL,
8066 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8069 /* Manage gphy power for all CPMU absent PCIe devices. */
8070 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8071 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8073 tw32(GRC_MISC_CFG, val);
8075 /* restore 5701 hardware bug workaround write method */
8076 tp->write32 = write_op;
8078 /* Unfortunately, we have to delay before the PCI read back.
8079 * Some 575X chips even will not respond to a PCI cfg access
8080 * when the reset command is given to the chip.
8082 * How do these hardware designers expect things to work
8083 * properly if the PCI write is posted for a long period
8084 * of time? It is always necessary to have some method by
8085 * which a register read back can occur to push the write
8086 * out which does the reset.
8088 * For most tg3 variants the trick below was working.
8093 /* Flush PCI posted writes. The normal MMIO registers
8094 * are inaccessible at this time so this is the only
8095 * way to make this reliably (actually, this is no longer
8096 * the case, see above). I tried to use indirect
8097 * register read/write but this upset some 5701 variants.
8099 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8103 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8106 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
8110 /* Wait for link training to complete. */
8111 for (j = 0; j < 5000; j++)
8114 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8115 pci_write_config_dword(tp->pdev, 0xc4,
8116 cfg_val | (1 << 15));
8119 /* Clear the "no snoop" and "relaxed ordering" bits. */
8120 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8122 * Older PCIe devices only support the 128 byte
8123 * MPS setting. Enforce the restriction.
8125 if (!tg3_flag(tp, CPMU_PRESENT))
8126 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8127 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8129 /* Clear error status */
8130 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8131 PCI_EXP_DEVSTA_CED |
8132 PCI_EXP_DEVSTA_NFED |
8133 PCI_EXP_DEVSTA_FED |
8134 PCI_EXP_DEVSTA_URD);
8137 tg3_restore_pci_state(tp);
8139 tg3_flag_clear(tp, CHIP_RESETTING);
8140 tg3_flag_clear(tp, ERROR_PROCESSED);
8143 if (tg3_flag(tp, 5780_CLASS))
8144 val = tr32(MEMARB_MODE);
8145 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8147 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
8149 tw32(0x5000, 0x400);
8152 tw32(GRC_MODE, tp->grc_mode);
8154 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
8157 tw32(0xc4, val | (1 << 15));
8160 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8161 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8162 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8163 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8164 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8165 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8168 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8169 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8171 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8172 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8177 tw32_f(MAC_MODE, val);
8180 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8182 err = tg3_poll_fw(tp);
8188 if (tg3_flag(tp, PCI_EXPRESS) &&
8189 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8190 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8191 !tg3_flag(tp, 57765_PLUS)) {
8194 tw32(0x7c00, val | (1 << 25));
8197 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8198 val = tr32(TG3_CPMU_CLCK_ORIDE);
8199 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8202 /* Reprobe ASF enable state. */
8203 tg3_flag_clear(tp, ENABLE_ASF);
8204 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8205 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8206 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8209 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8210 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8211 tg3_flag_set(tp, ENABLE_ASF);
8212 tp->last_event_jiffies = jiffies;
8213 if (tg3_flag(tp, 5750_PLUS))
8214 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8221 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8222 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8224 /* tp->lock is held. */
8225 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8231 tg3_write_sig_pre_reset(tp, kind);
8233 tg3_abort_hw(tp, silent);
8234 err = tg3_chip_reset(tp);
8236 __tg3_set_mac_addr(tp, 0);
8238 tg3_write_sig_legacy(tp, kind);
8239 tg3_write_sig_post_reset(tp, kind);
8242 /* Save the stats across chip resets... */
8243 tg3_get_nstats(tp, &tp->net_stats_prev);
8244 tg3_get_estats(tp, &tp->estats_prev);
8246 /* And make sure the next sample is new data */
8247 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8256 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8258 struct tg3 *tp = netdev_priv(dev);
8259 struct sockaddr *addr = p;
8260 int err = 0, skip_mac_1 = 0;
8262 if (!is_valid_ether_addr(addr->sa_data))
8263 return -EADDRNOTAVAIL;
8265 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8267 if (!netif_running(dev))
8270 if (tg3_flag(tp, ENABLE_ASF)) {
8271 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8273 addr0_high = tr32(MAC_ADDR_0_HIGH);
8274 addr0_low = tr32(MAC_ADDR_0_LOW);
8275 addr1_high = tr32(MAC_ADDR_1_HIGH);
8276 addr1_low = tr32(MAC_ADDR_1_LOW);
8278 /* Skip MAC addr 1 if ASF is using it. */
8279 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8280 !(addr1_high == 0 && addr1_low == 0))
8283 spin_lock_bh(&tp->lock);
8284 __tg3_set_mac_addr(tp, skip_mac_1);
8285 spin_unlock_bh(&tp->lock);
8290 /* tp->lock is held. */
8291 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8292 dma_addr_t mapping, u32 maxlen_flags,
8296 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8297 ((u64) mapping >> 32));
8299 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8300 ((u64) mapping & 0xffffffff));
8302 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8305 if (!tg3_flag(tp, 5705_PLUS))
8307 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8312 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8316 if (!tg3_flag(tp, ENABLE_TSS)) {
8317 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8318 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8319 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8321 tw32(HOSTCC_TXCOL_TICKS, 0);
8322 tw32(HOSTCC_TXMAX_FRAMES, 0);
8323 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8325 for (; i < tp->txq_cnt; i++) {
8328 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8329 tw32(reg, ec->tx_coalesce_usecs);
8330 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8331 tw32(reg, ec->tx_max_coalesced_frames);
8332 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8333 tw32(reg, ec->tx_max_coalesced_frames_irq);
8337 for (; i < tp->irq_max - 1; i++) {
8338 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8339 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8340 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8344 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8347 u32 limit = tp->rxq_cnt;
8349 if (!tg3_flag(tp, ENABLE_RSS)) {
8350 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8351 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8352 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8355 tw32(HOSTCC_RXCOL_TICKS, 0);
8356 tw32(HOSTCC_RXMAX_FRAMES, 0);
8357 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8360 for (; i < limit; i++) {
8363 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8364 tw32(reg, ec->rx_coalesce_usecs);
8365 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8366 tw32(reg, ec->rx_max_coalesced_frames);
8367 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8368 tw32(reg, ec->rx_max_coalesced_frames_irq);
8371 for (; i < tp->irq_max - 1; i++) {
8372 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8373 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8374 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8378 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8380 tg3_coal_tx_init(tp, ec);
8381 tg3_coal_rx_init(tp, ec);
8383 if (!tg3_flag(tp, 5705_PLUS)) {
8384 u32 val = ec->stats_block_coalesce_usecs;
8386 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8387 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8389 if (!netif_carrier_ok(tp->dev))
8392 tw32(HOSTCC_STAT_COAL_TICKS, val);
8396 /* tp->lock is held. */
8397 static void tg3_rings_reset(struct tg3 *tp)
8400 u32 stblk, txrcb, rxrcb, limit;
8401 struct tg3_napi *tnapi = &tp->napi[0];
8403 /* Disable all transmit rings but the first. */
8404 if (!tg3_flag(tp, 5705_PLUS))
8405 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8406 else if (tg3_flag(tp, 5717_PLUS))
8407 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8408 else if (tg3_flag(tp, 57765_CLASS))
8409 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8411 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8413 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8414 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8415 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8416 BDINFO_FLAGS_DISABLED);
8419 /* Disable all receive return rings but the first. */
8420 if (tg3_flag(tp, 5717_PLUS))
8421 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8422 else if (!tg3_flag(tp, 5705_PLUS))
8423 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8424 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8425 tg3_flag(tp, 57765_CLASS))
8426 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8428 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8430 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8431 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8432 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8433 BDINFO_FLAGS_DISABLED);
8435 /* Disable interrupts */
8436 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8437 tp->napi[0].chk_msi_cnt = 0;
8438 tp->napi[0].last_rx_cons = 0;
8439 tp->napi[0].last_tx_cons = 0;
8441 /* Zero mailbox registers. */
8442 if (tg3_flag(tp, SUPPORT_MSIX)) {
8443 for (i = 1; i < tp->irq_max; i++) {
8444 tp->napi[i].tx_prod = 0;
8445 tp->napi[i].tx_cons = 0;
8446 if (tg3_flag(tp, ENABLE_TSS))
8447 tw32_mailbox(tp->napi[i].prodmbox, 0);
8448 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8449 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8450 tp->napi[i].chk_msi_cnt = 0;
8451 tp->napi[i].last_rx_cons = 0;
8452 tp->napi[i].last_tx_cons = 0;
8454 if (!tg3_flag(tp, ENABLE_TSS))
8455 tw32_mailbox(tp->napi[0].prodmbox, 0);
8457 tp->napi[0].tx_prod = 0;
8458 tp->napi[0].tx_cons = 0;
8459 tw32_mailbox(tp->napi[0].prodmbox, 0);
8460 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8463 /* Make sure the NIC-based send BD rings are disabled. */
8464 if (!tg3_flag(tp, 5705_PLUS)) {
8465 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8466 for (i = 0; i < 16; i++)
8467 tw32_tx_mbox(mbox + i * 8, 0);
8470 txrcb = NIC_SRAM_SEND_RCB;
8471 rxrcb = NIC_SRAM_RCV_RET_RCB;
8473 /* Clear status block in ram. */
8474 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8476 /* Set status block DMA address */
8477 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8478 ((u64) tnapi->status_mapping >> 32));
8479 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8480 ((u64) tnapi->status_mapping & 0xffffffff));
8482 if (tnapi->tx_ring) {
8483 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8484 (TG3_TX_RING_SIZE <<
8485 BDINFO_FLAGS_MAXLEN_SHIFT),
8486 NIC_SRAM_TX_BUFFER_DESC);
8487 txrcb += TG3_BDINFO_SIZE;
8490 if (tnapi->rx_rcb) {
8491 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8492 (tp->rx_ret_ring_mask + 1) <<
8493 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8494 rxrcb += TG3_BDINFO_SIZE;
8497 stblk = HOSTCC_STATBLCK_RING1;
8499 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8500 u64 mapping = (u64)tnapi->status_mapping;
8501 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8502 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8504 /* Clear status block in ram. */
8505 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8507 if (tnapi->tx_ring) {
8508 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8509 (TG3_TX_RING_SIZE <<
8510 BDINFO_FLAGS_MAXLEN_SHIFT),
8511 NIC_SRAM_TX_BUFFER_DESC);
8512 txrcb += TG3_BDINFO_SIZE;
8515 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8516 ((tp->rx_ret_ring_mask + 1) <<
8517 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8520 rxrcb += TG3_BDINFO_SIZE;
8524 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8526 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8528 if (!tg3_flag(tp, 5750_PLUS) ||
8529 tg3_flag(tp, 5780_CLASS) ||
8530 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8531 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8532 tg3_flag(tp, 57765_PLUS))
8533 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8534 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8535 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8536 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8538 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8540 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8541 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8543 val = min(nic_rep_thresh, host_rep_thresh);
8544 tw32(RCVBDI_STD_THRESH, val);
8546 if (tg3_flag(tp, 57765_PLUS))
8547 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8549 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8552 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8554 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8556 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8557 tw32(RCVBDI_JUMBO_THRESH, val);
8559 if (tg3_flag(tp, 57765_PLUS))
8560 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8563 static inline u32 calc_crc(unsigned char *buf, int len)
8571 for (j = 0; j < len; j++) {
8574 for (k = 0; k < 8; k++) {
8587 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8589 /* accept or reject all multicast frames */
8590 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8591 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8592 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8593 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8596 static void __tg3_set_rx_mode(struct net_device *dev)
8598 struct tg3 *tp = netdev_priv(dev);
8601 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8602 RX_MODE_KEEP_VLAN_TAG);
8604 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8605 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8608 if (!tg3_flag(tp, ENABLE_ASF))
8609 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8612 if (dev->flags & IFF_PROMISC) {
8613 /* Promiscuous mode. */
8614 rx_mode |= RX_MODE_PROMISC;
8615 } else if (dev->flags & IFF_ALLMULTI) {
8616 /* Accept all multicast. */
8617 tg3_set_multi(tp, 1);
8618 } else if (netdev_mc_empty(dev)) {
8619 /* Reject all multicast. */
8620 tg3_set_multi(tp, 0);
8622 /* Accept one or more multicast(s). */
8623 struct netdev_hw_addr *ha;
8624 u32 mc_filter[4] = { 0, };
8629 netdev_for_each_mc_addr(ha, dev) {
8630 crc = calc_crc(ha->addr, ETH_ALEN);
8632 regidx = (bit & 0x60) >> 5;
8634 mc_filter[regidx] |= (1 << bit);
8637 tw32(MAC_HASH_REG_0, mc_filter[0]);
8638 tw32(MAC_HASH_REG_1, mc_filter[1]);
8639 tw32(MAC_HASH_REG_2, mc_filter[2]);
8640 tw32(MAC_HASH_REG_3, mc_filter[3]);
8643 if (rx_mode != tp->rx_mode) {
8644 tp->rx_mode = rx_mode;
8645 tw32_f(MAC_RX_MODE, rx_mode);
8650 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
8654 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8655 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
8658 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8662 if (!tg3_flag(tp, SUPPORT_MSIX))
8665 if (tp->irq_cnt <= 2) {
8666 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8670 /* Validate table against current IRQ count */
8671 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8672 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8676 if (i != TG3_RSS_INDIR_TBL_SIZE)
8677 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
8680 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8683 u32 reg = MAC_RSS_INDIR_TBL_0;
8685 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8686 u32 val = tp->rss_ind_tbl[i];
8688 for (; i % 8; i++) {
8690 val |= tp->rss_ind_tbl[i];
8697 /* tp->lock is held. */
8698 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8700 u32 val, rdmac_mode;
8702 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8704 tg3_disable_ints(tp);
8708 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8710 if (tg3_flag(tp, INIT_COMPLETE))
8711 tg3_abort_hw(tp, 1);
8713 /* Enable MAC control of LPI */
8714 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8715 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8716 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8717 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8719 tw32_f(TG3_CPMU_EEE_CTRL,
8720 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8722 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8723 TG3_CPMU_EEEMD_LPI_IN_TX |
8724 TG3_CPMU_EEEMD_LPI_IN_RX |
8725 TG3_CPMU_EEEMD_EEE_ENABLE;
8727 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8728 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8730 if (tg3_flag(tp, ENABLE_APE))
8731 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8733 tw32_f(TG3_CPMU_EEE_MODE, val);
8735 tw32_f(TG3_CPMU_EEE_DBTMR1,
8736 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8737 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8739 tw32_f(TG3_CPMU_EEE_DBTMR2,
8740 TG3_CPMU_DBTMR2_APE_TX_2047US |
8741 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8747 err = tg3_chip_reset(tp);
8751 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8753 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8754 val = tr32(TG3_CPMU_CTRL);
8755 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8756 tw32(TG3_CPMU_CTRL, val);
8758 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8759 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8760 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8761 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8763 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8764 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8765 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8766 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8768 val = tr32(TG3_CPMU_HST_ACC);
8769 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8770 val |= CPMU_HST_ACC_MACCLK_6_25;
8771 tw32(TG3_CPMU_HST_ACC, val);
8774 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8775 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8776 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8777 PCIE_PWR_MGMT_L1_THRESH_4MS;
8778 tw32(PCIE_PWR_MGMT_THRESH, val);
8780 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8781 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8783 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8785 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8786 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8789 if (tg3_flag(tp, L1PLLPD_EN)) {
8790 u32 grc_mode = tr32(GRC_MODE);
8792 /* Access the lower 1K of PL PCIE block registers. */
8793 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8794 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8796 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8797 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8798 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8800 tw32(GRC_MODE, grc_mode);
8803 if (tg3_flag(tp, 57765_CLASS)) {
8804 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8805 u32 grc_mode = tr32(GRC_MODE);
8807 /* Access the lower 1K of PL PCIE block registers. */
8808 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8809 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8811 val = tr32(TG3_PCIE_TLDLPL_PORT +
8812 TG3_PCIE_PL_LO_PHYCTL5);
8813 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8814 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8816 tw32(GRC_MODE, grc_mode);
8819 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8820 u32 grc_mode = tr32(GRC_MODE);
8822 /* Access the lower 1K of DL PCIE block registers. */
8823 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8824 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8826 val = tr32(TG3_PCIE_TLDLPL_PORT +
8827 TG3_PCIE_DL_LO_FTSMAX);
8828 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8829 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8830 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8832 tw32(GRC_MODE, grc_mode);
8835 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8836 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8837 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8838 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8841 /* This works around an issue with Athlon chipsets on
8842 * B3 tigon3 silicon. This bit has no effect on any
8843 * other revision. But do not set this on PCI Express
8844 * chips and don't even touch the clocks if the CPMU is present.
8846 if (!tg3_flag(tp, CPMU_PRESENT)) {
8847 if (!tg3_flag(tp, PCI_EXPRESS))
8848 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8849 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8852 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8853 tg3_flag(tp, PCIX_MODE)) {
8854 val = tr32(TG3PCI_PCISTATE);
8855 val |= PCISTATE_RETRY_SAME_DMA;
8856 tw32(TG3PCI_PCISTATE, val);
8859 if (tg3_flag(tp, ENABLE_APE)) {
8860 /* Allow reads and writes to the
8861 * APE register and memory space.
8863 val = tr32(TG3PCI_PCISTATE);
8864 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8865 PCISTATE_ALLOW_APE_SHMEM_WR |
8866 PCISTATE_ALLOW_APE_PSPACE_WR;
8867 tw32(TG3PCI_PCISTATE, val);
8870 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8871 /* Enable some hw fixes. */
8872 val = tr32(TG3PCI_MSI_DATA);
8873 val |= (1 << 26) | (1 << 28) | (1 << 29);
8874 tw32(TG3PCI_MSI_DATA, val);
8877 /* Descriptor ring init may make accesses to the
8878 * NIC SRAM area to setup the TX descriptors, so we
8879 * can only do this after the hardware has been
8880 * successfully reset.
8882 err = tg3_init_rings(tp);
8886 if (tg3_flag(tp, 57765_PLUS)) {
8887 val = tr32(TG3PCI_DMA_RW_CTRL) &
8888 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8889 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8890 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8891 if (!tg3_flag(tp, 57765_CLASS) &&
8892 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8893 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8894 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8895 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8896 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8897 /* This value is determined during the probe time DMA
8898 * engine test, tg3_test_dma.
8900 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8903 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8904 GRC_MODE_4X_NIC_SEND_RINGS |
8905 GRC_MODE_NO_TX_PHDR_CSUM |
8906 GRC_MODE_NO_RX_PHDR_CSUM);
8907 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8909 /* Pseudo-header checksum is done by hardware logic and not
8910 * the offload processers, so make the chip do the pseudo-
8911 * header checksums on receive. For transmit it is more
8912 * convenient to do the pseudo-header checksum in software
8913 * as Linux does that on transmit for us in all cases.
8915 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8919 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8921 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8922 val = tr32(GRC_MISC_CFG);
8924 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8925 tw32(GRC_MISC_CFG, val);
8927 /* Initialize MBUF/DESC pool. */
8928 if (tg3_flag(tp, 5750_PLUS)) {
8930 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8931 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8932 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8933 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8935 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8936 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8937 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8938 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8941 fw_len = tp->fw_len;
8942 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8943 tw32(BUFMGR_MB_POOL_ADDR,
8944 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8945 tw32(BUFMGR_MB_POOL_SIZE,
8946 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8949 if (tp->dev->mtu <= ETH_DATA_LEN) {
8950 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8951 tp->bufmgr_config.mbuf_read_dma_low_water);
8952 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8953 tp->bufmgr_config.mbuf_mac_rx_low_water);
8954 tw32(BUFMGR_MB_HIGH_WATER,
8955 tp->bufmgr_config.mbuf_high_water);
8957 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8958 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8959 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8960 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8961 tw32(BUFMGR_MB_HIGH_WATER,
8962 tp->bufmgr_config.mbuf_high_water_jumbo);
8964 tw32(BUFMGR_DMA_LOW_WATER,
8965 tp->bufmgr_config.dma_low_water);
8966 tw32(BUFMGR_DMA_HIGH_WATER,
8967 tp->bufmgr_config.dma_high_water);
8969 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8970 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8971 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8972 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8973 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8974 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8975 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8976 tw32(BUFMGR_MODE, val);
8977 for (i = 0; i < 2000; i++) {
8978 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8983 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8987 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8988 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8990 tg3_setup_rxbd_thresholds(tp);
8992 /* Initialize TG3_BDINFO's at:
8993 * RCVDBDI_STD_BD: standard eth size rx ring
8994 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8995 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8998 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8999 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9000 * ring attribute flags
9001 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9003 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9004 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9006 * The size of each ring is fixed in the firmware, but the location is
9009 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9010 ((u64) tpr->rx_std_mapping >> 32));
9011 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9012 ((u64) tpr->rx_std_mapping & 0xffffffff));
9013 if (!tg3_flag(tp, 5717_PLUS))
9014 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9015 NIC_SRAM_RX_BUFFER_DESC);
9017 /* Disable the mini ring */
9018 if (!tg3_flag(tp, 5705_PLUS))
9019 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9020 BDINFO_FLAGS_DISABLED);
9022 /* Program the jumbo buffer descriptor ring control
9023 * blocks on those devices that have them.
9025 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9026 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9028 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9029 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9030 ((u64) tpr->rx_jmb_mapping >> 32));
9031 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9032 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9033 val = TG3_RX_JMB_RING_SIZE(tp) <<
9034 BDINFO_FLAGS_MAXLEN_SHIFT;
9035 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9036 val | BDINFO_FLAGS_USE_EXT_RECV);
9037 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9038 tg3_flag(tp, 57765_CLASS))
9039 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9040 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9042 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9043 BDINFO_FLAGS_DISABLED);
9046 if (tg3_flag(tp, 57765_PLUS)) {
9047 val = TG3_RX_STD_RING_SIZE(tp);
9048 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9049 val |= (TG3_RX_STD_DMA_SZ << 2);
9051 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9053 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9055 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9057 tpr->rx_std_prod_idx = tp->rx_pending;
9058 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9060 tpr->rx_jmb_prod_idx =
9061 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9062 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9064 tg3_rings_reset(tp);
9066 /* Initialize MAC address and backoff seed. */
9067 __tg3_set_mac_addr(tp, 0);
9069 /* MTU + ethernet header + FCS + optional VLAN tag */
9070 tw32(MAC_RX_MTU_SIZE,
9071 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9073 /* The slot time is changed by tg3_setup_phy if we
9074 * run at gigabit with half duplex.
9076 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9077 (6 << TX_LENGTHS_IPG_SHIFT) |
9078 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9080 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9081 val |= tr32(MAC_TX_LENGTHS) &
9082 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9083 TX_LENGTHS_CNT_DWN_VAL_MSK);
9085 tw32(MAC_TX_LENGTHS, val);
9087 /* Receive rules. */
9088 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9089 tw32(RCVLPC_CONFIG, 0x0181);
9091 /* Calculate RDMAC_MODE setting early, we need it to determine
9092 * the RCVLPC_STATE_ENABLE mask.
9094 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9095 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9096 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9097 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9098 RDMAC_MODE_LNGREAD_ENAB);
9100 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
9101 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9103 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9104 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9105 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9106 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9107 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9108 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9110 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9111 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9112 if (tg3_flag(tp, TSO_CAPABLE) &&
9113 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
9114 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9115 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9116 !tg3_flag(tp, IS_5788)) {
9117 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9121 if (tg3_flag(tp, PCI_EXPRESS))
9122 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9124 if (tg3_flag(tp, HW_TSO_1) ||
9125 tg3_flag(tp, HW_TSO_2) ||
9126 tg3_flag(tp, HW_TSO_3))
9127 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9129 if (tg3_flag(tp, 57765_PLUS) ||
9130 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9131 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9132 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9134 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9135 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9137 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9138 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9139 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9140 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
9141 tg3_flag(tp, 57765_PLUS)) {
9142 val = tr32(TG3_RDMA_RSRVCTRL_REG);
9143 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
9144 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9145 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9146 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9147 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9148 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9149 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9151 tw32(TG3_RDMA_RSRVCTRL_REG,
9152 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9155 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9156 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9157 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9158 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
9159 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9160 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9163 /* Receive/send statistics. */
9164 if (tg3_flag(tp, 5750_PLUS)) {
9165 val = tr32(RCVLPC_STATS_ENABLE);
9166 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9167 tw32(RCVLPC_STATS_ENABLE, val);
9168 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9169 tg3_flag(tp, TSO_CAPABLE)) {
9170 val = tr32(RCVLPC_STATS_ENABLE);
9171 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9172 tw32(RCVLPC_STATS_ENABLE, val);
9174 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9176 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9177 tw32(SNDDATAI_STATSENAB, 0xffffff);
9178 tw32(SNDDATAI_STATSCTRL,
9179 (SNDDATAI_SCTRL_ENABLE |
9180 SNDDATAI_SCTRL_FASTUPD));
9182 /* Setup host coalescing engine. */
9183 tw32(HOSTCC_MODE, 0);
9184 for (i = 0; i < 2000; i++) {
9185 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9190 __tg3_set_coalesce(tp, &tp->coal);
9192 if (!tg3_flag(tp, 5705_PLUS)) {
9193 /* Status/statistics block address. See tg3_timer,
9194 * the tg3_periodic_fetch_stats call there, and
9195 * tg3_get_stats to see how this works for 5705/5750 chips.
9197 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9198 ((u64) tp->stats_mapping >> 32));
9199 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9200 ((u64) tp->stats_mapping & 0xffffffff));
9201 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9203 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9205 /* Clear statistics and status block memory areas */
9206 for (i = NIC_SRAM_STATS_BLK;
9207 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9209 tg3_write_mem(tp, i, 0);
9214 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9216 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9217 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9218 if (!tg3_flag(tp, 5705_PLUS))
9219 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9221 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9222 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9223 /* reset to prevent losing 1st rx packet intermittently */
9224 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9228 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9229 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9230 MAC_MODE_FHDE_ENABLE;
9231 if (tg3_flag(tp, ENABLE_APE))
9232 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9233 if (!tg3_flag(tp, 5705_PLUS) &&
9234 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9235 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9236 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9237 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9240 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9241 * If TG3_FLAG_IS_NIC is zero, we should read the
9242 * register to preserve the GPIO settings for LOMs. The GPIOs,
9243 * whether used as inputs or outputs, are set by boot code after
9246 if (!tg3_flag(tp, IS_NIC)) {
9249 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9250 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9251 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9253 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9254 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9255 GRC_LCLCTRL_GPIO_OUTPUT3;
9257 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9258 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9260 tp->grc_local_ctrl &= ~gpio_mask;
9261 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9263 /* GPIO1 must be driven high for eeprom write protect */
9264 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9265 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9266 GRC_LCLCTRL_GPIO_OUTPUT1);
9268 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9271 if (tg3_flag(tp, USING_MSIX)) {
9272 val = tr32(MSGINT_MODE);
9273 val |= MSGINT_MODE_ENABLE;
9274 if (tp->irq_cnt > 1)
9275 val |= MSGINT_MODE_MULTIVEC_EN;
9276 if (!tg3_flag(tp, 1SHOT_MSI))
9277 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9278 tw32(MSGINT_MODE, val);
9281 if (!tg3_flag(tp, 5705_PLUS)) {
9282 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9286 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9287 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9288 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9289 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9290 WDMAC_MODE_LNGREAD_ENAB);
9292 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9293 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9294 if (tg3_flag(tp, TSO_CAPABLE) &&
9295 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9296 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9298 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9299 !tg3_flag(tp, IS_5788)) {
9300 val |= WDMAC_MODE_RX_ACCEL;
9304 /* Enable host coalescing bug fix */
9305 if (tg3_flag(tp, 5755_PLUS))
9306 val |= WDMAC_MODE_STATUS_TAG_FIX;
9308 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9309 val |= WDMAC_MODE_BURST_ALL_DATA;
9311 tw32_f(WDMAC_MODE, val);
9314 if (tg3_flag(tp, PCIX_MODE)) {
9317 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9319 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9320 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9321 pcix_cmd |= PCI_X_CMD_READ_2K;
9322 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9323 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9324 pcix_cmd |= PCI_X_CMD_READ_2K;
9326 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9330 tw32_f(RDMAC_MODE, rdmac_mode);
9333 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
9334 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9335 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9338 if (i < TG3_NUM_RDMA_CHANNELS) {
9339 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9340 val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9341 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9342 tg3_flag_set(tp, 5719_RDMA_BUG);
9346 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9347 if (!tg3_flag(tp, 5705_PLUS))
9348 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9350 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9352 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9354 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9356 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9357 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9358 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9359 if (tg3_flag(tp, LRG_PROD_RING_CAP))
9360 val |= RCVDBDI_MODE_LRG_RING_SZ;
9361 tw32(RCVDBDI_MODE, val);
9362 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9363 if (tg3_flag(tp, HW_TSO_1) ||
9364 tg3_flag(tp, HW_TSO_2) ||
9365 tg3_flag(tp, HW_TSO_3))
9366 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9367 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9368 if (tg3_flag(tp, ENABLE_TSS))
9369 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9370 tw32(SNDBDI_MODE, val);
9371 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9373 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9374 err = tg3_load_5701_a0_firmware_fix(tp);
9379 if (tg3_flag(tp, TSO_CAPABLE)) {
9380 err = tg3_load_tso_firmware(tp);
9385 tp->tx_mode = TX_MODE_ENABLE;
9387 if (tg3_flag(tp, 5755_PLUS) ||
9388 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9389 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9391 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9392 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9393 tp->tx_mode &= ~val;
9394 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9397 tw32_f(MAC_TX_MODE, tp->tx_mode);
9400 if (tg3_flag(tp, ENABLE_RSS)) {
9401 tg3_rss_write_indir_tbl(tp);
9403 /* Setup the "secret" hash key. */
9404 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9405 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9406 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9407 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9408 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9409 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9410 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9411 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9412 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9413 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9416 tp->rx_mode = RX_MODE_ENABLE;
9417 if (tg3_flag(tp, 5755_PLUS))
9418 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9420 if (tg3_flag(tp, ENABLE_RSS))
9421 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9422 RX_MODE_RSS_ITBL_HASH_BITS_7 |
9423 RX_MODE_RSS_IPV6_HASH_EN |
9424 RX_MODE_RSS_TCP_IPV6_HASH_EN |
9425 RX_MODE_RSS_IPV4_HASH_EN |
9426 RX_MODE_RSS_TCP_IPV4_HASH_EN;
9428 tw32_f(MAC_RX_MODE, tp->rx_mode);
9431 tw32(MAC_LED_CTRL, tp->led_ctrl);
9433 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9434 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9435 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9438 tw32_f(MAC_RX_MODE, tp->rx_mode);
9441 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9442 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9443 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9444 /* Set drive transmission level to 1.2V */
9445 /* only if the signal pre-emphasis bit is not set */
9446 val = tr32(MAC_SERDES_CFG);
9449 tw32(MAC_SERDES_CFG, val);
9451 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9452 tw32(MAC_SERDES_CFG, 0x616000);
9455 /* Prevent chip from dropping frames when flow control
9458 if (tg3_flag(tp, 57765_CLASS))
9462 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9464 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9465 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9466 /* Use hardware link auto-negotiation */
9467 tg3_flag_set(tp, HW_AUTONEG);
9470 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9471 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9474 tmp = tr32(SERDES_RX_CTRL);
9475 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9476 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9477 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9478 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9481 if (!tg3_flag(tp, USE_PHYLIB)) {
9482 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9483 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9485 err = tg3_setup_phy(tp, 0);
9489 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9490 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9493 /* Clear CRC stats. */
9494 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9495 tg3_writephy(tp, MII_TG3_TEST1,
9496 tmp | MII_TG3_TEST1_CRC_EN);
9497 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9502 __tg3_set_rx_mode(tp->dev);
9504 /* Initialize receive rules. */
9505 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9506 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9507 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9508 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9510 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9514 if (tg3_flag(tp, ENABLE_ASF))
9518 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9520 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9522 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9524 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9526 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9528 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9530 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9532 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9534 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9536 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9538 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9540 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9542 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9544 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9552 if (tg3_flag(tp, ENABLE_APE))
9553 /* Write our heartbeat update interval to APE. */
9554 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9555 APE_HOST_HEARTBEAT_INT_DISABLE);
9557 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9562 /* Called at device open time to get the chip ready for
9563 * packet processing. Invoked with tp->lock held.
9565 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9567 tg3_switch_clocks(tp);
9569 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9571 return tg3_reset_hw(tp, reset_phy);
9574 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9578 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9579 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9581 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9584 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9585 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9586 memset(ocir, 0, TG3_OCIR_LEN);
9590 /* sysfs attributes for hwmon */
9591 static ssize_t tg3_show_temp(struct device *dev,
9592 struct device_attribute *devattr, char *buf)
9594 struct pci_dev *pdev = to_pci_dev(dev);
9595 struct net_device *netdev = pci_get_drvdata(pdev);
9596 struct tg3 *tp = netdev_priv(netdev);
9597 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
9600 spin_lock_bh(&tp->lock);
9601 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
9602 sizeof(temperature));
9603 spin_unlock_bh(&tp->lock);
9604 return sprintf(buf, "%u\n", temperature);
9608 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
9609 TG3_TEMP_SENSOR_OFFSET);
9610 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
9611 TG3_TEMP_CAUTION_OFFSET);
9612 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
9613 TG3_TEMP_MAX_OFFSET);
9615 static struct attribute *tg3_attributes[] = {
9616 &sensor_dev_attr_temp1_input.dev_attr.attr,
9617 &sensor_dev_attr_temp1_crit.dev_attr.attr,
9618 &sensor_dev_attr_temp1_max.dev_attr.attr,
9622 static const struct attribute_group tg3_group = {
9623 .attrs = tg3_attributes,
9626 static void tg3_hwmon_close(struct tg3 *tp)
9628 if (tp->hwmon_dev) {
9629 hwmon_device_unregister(tp->hwmon_dev);
9630 tp->hwmon_dev = NULL;
9631 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
9635 static void tg3_hwmon_open(struct tg3 *tp)
9639 struct pci_dev *pdev = tp->pdev;
9640 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
9642 tg3_sd_scan_scratchpad(tp, ocirs);
9644 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
9645 if (!ocirs[i].src_data_length)
9648 size += ocirs[i].src_hdr_length;
9649 size += ocirs[i].src_data_length;
9655 /* Register hwmon sysfs hooks */
9656 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
9658 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
9662 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
9663 if (IS_ERR(tp->hwmon_dev)) {
9664 tp->hwmon_dev = NULL;
9665 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
9666 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
9671 #define TG3_STAT_ADD32(PSTAT, REG) \
9672 do { u32 __val = tr32(REG); \
9673 (PSTAT)->low += __val; \
9674 if ((PSTAT)->low < __val) \
9675 (PSTAT)->high += 1; \
9678 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9680 struct tg3_hw_stats *sp = tp->hw_stats;
9682 if (!netif_carrier_ok(tp->dev))
9685 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9686 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9687 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9688 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9689 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9690 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9691 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9692 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9693 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9694 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9695 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9696 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9697 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9698 if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
9699 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
9700 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
9703 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9704 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
9705 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9706 tg3_flag_clear(tp, 5719_RDMA_BUG);
9709 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9710 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9711 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9712 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9713 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9714 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9715 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9716 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9717 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9718 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9719 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9720 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9721 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9722 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9724 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9725 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9726 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9727 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9728 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9730 u32 val = tr32(HOSTCC_FLOW_ATTN);
9731 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9733 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9734 sp->rx_discards.low += val;
9735 if (sp->rx_discards.low < val)
9736 sp->rx_discards.high += 1;
9738 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9740 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9743 static void tg3_chk_missed_msi(struct tg3 *tp)
9747 for (i = 0; i < tp->irq_cnt; i++) {
9748 struct tg3_napi *tnapi = &tp->napi[i];
9750 if (tg3_has_work(tnapi)) {
9751 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9752 tnapi->last_tx_cons == tnapi->tx_cons) {
9753 if (tnapi->chk_msi_cnt < 1) {
9754 tnapi->chk_msi_cnt++;
9760 tnapi->chk_msi_cnt = 0;
9761 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9762 tnapi->last_tx_cons = tnapi->tx_cons;
9766 static void tg3_timer(unsigned long __opaque)
9768 struct tg3 *tp = (struct tg3 *) __opaque;
9770 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9773 spin_lock(&tp->lock);
9775 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9776 tg3_flag(tp, 57765_CLASS))
9777 tg3_chk_missed_msi(tp);
9779 if (!tg3_flag(tp, TAGGED_STATUS)) {
9780 /* All of this garbage is because when using non-tagged
9781 * IRQ status the mailbox/status_block protocol the chip
9782 * uses with the cpu is race prone.
9784 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9785 tw32(GRC_LOCAL_CTRL,
9786 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9788 tw32(HOSTCC_MODE, tp->coalesce_mode |
9789 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9792 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9793 spin_unlock(&tp->lock);
9794 tg3_reset_task_schedule(tp);
9799 /* This part only runs once per second. */
9800 if (!--tp->timer_counter) {
9801 if (tg3_flag(tp, 5705_PLUS))
9802 tg3_periodic_fetch_stats(tp);
9804 if (tp->setlpicnt && !--tp->setlpicnt)
9805 tg3_phy_eee_enable(tp);
9807 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9811 mac_stat = tr32(MAC_STATUS);
9814 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9815 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9817 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9821 tg3_setup_phy(tp, 0);
9822 } else if (tg3_flag(tp, POLL_SERDES)) {
9823 u32 mac_stat = tr32(MAC_STATUS);
9826 if (netif_carrier_ok(tp->dev) &&
9827 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9830 if (!netif_carrier_ok(tp->dev) &&
9831 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9832 MAC_STATUS_SIGNAL_DET))) {
9836 if (!tp->serdes_counter) {
9839 ~MAC_MODE_PORT_MODE_MASK));
9841 tw32_f(MAC_MODE, tp->mac_mode);
9844 tg3_setup_phy(tp, 0);
9846 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9847 tg3_flag(tp, 5780_CLASS)) {
9848 tg3_serdes_parallel_detect(tp);
9851 tp->timer_counter = tp->timer_multiplier;
9854 /* Heartbeat is only sent once every 2 seconds.
9856 * The heartbeat is to tell the ASF firmware that the host
9857 * driver is still alive. In the event that the OS crashes,
9858 * ASF needs to reset the hardware to free up the FIFO space
9859 * that may be filled with rx packets destined for the host.
9860 * If the FIFO is full, ASF will no longer function properly.
9862 * Unintended resets have been reported on real time kernels
9863 * where the timer doesn't run on time. Netpoll will also have
9866 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9867 * to check the ring condition when the heartbeat is expiring
9868 * before doing the reset. This will prevent most unintended
9871 if (!--tp->asf_counter) {
9872 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9873 tg3_wait_for_event_ack(tp);
9875 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9876 FWCMD_NICDRV_ALIVE3);
9877 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9878 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9879 TG3_FW_UPDATE_TIMEOUT_SEC);
9881 tg3_generate_fw_event(tp);
9883 tp->asf_counter = tp->asf_multiplier;
9886 spin_unlock(&tp->lock);
9889 tp->timer.expires = jiffies + tp->timer_offset;
9890 add_timer(&tp->timer);
9893 static void __devinit tg3_timer_init(struct tg3 *tp)
9895 if (tg3_flag(tp, TAGGED_STATUS) &&
9896 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9897 !tg3_flag(tp, 57765_CLASS))
9898 tp->timer_offset = HZ;
9900 tp->timer_offset = HZ / 10;
9902 BUG_ON(tp->timer_offset > HZ);
9904 tp->timer_multiplier = (HZ / tp->timer_offset);
9905 tp->asf_multiplier = (HZ / tp->timer_offset) *
9906 TG3_FW_UPDATE_FREQ_SEC;
9908 init_timer(&tp->timer);
9909 tp->timer.data = (unsigned long) tp;
9910 tp->timer.function = tg3_timer;
9913 static void tg3_timer_start(struct tg3 *tp)
9915 tp->asf_counter = tp->asf_multiplier;
9916 tp->timer_counter = tp->timer_multiplier;
9918 tp->timer.expires = jiffies + tp->timer_offset;
9919 add_timer(&tp->timer);
9922 static void tg3_timer_stop(struct tg3 *tp)
9924 del_timer_sync(&tp->timer);
9927 /* Restart hardware after configuration changes, self-test, etc.
9928 * Invoked with tp->lock held.
9930 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9931 __releases(tp->lock)
9932 __acquires(tp->lock)
9936 err = tg3_init_hw(tp, reset_phy);
9939 "Failed to re-initialize device, aborting\n");
9940 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9941 tg3_full_unlock(tp);
9944 tg3_napi_enable(tp);
9946 tg3_full_lock(tp, 0);
9951 static void tg3_reset_task(struct work_struct *work)
9953 struct tg3 *tp = container_of(work, struct tg3, reset_task);
9956 tg3_full_lock(tp, 0);
9958 if (!netif_running(tp->dev)) {
9959 tg3_flag_clear(tp, RESET_TASK_PENDING);
9960 tg3_full_unlock(tp);
9964 tg3_full_unlock(tp);
9970 tg3_full_lock(tp, 1);
9972 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9973 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9974 tp->write32_rx_mbox = tg3_write_flush_reg32;
9975 tg3_flag_set(tp, MBOX_WRITE_REORDER);
9976 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9979 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9980 err = tg3_init_hw(tp, 1);
9984 tg3_netif_start(tp);
9987 tg3_full_unlock(tp);
9992 tg3_flag_clear(tp, RESET_TASK_PENDING);
9995 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9998 unsigned long flags;
10000 struct tg3_napi *tnapi = &tp->napi[irq_num];
10002 if (tp->irq_cnt == 1)
10003 name = tp->dev->name;
10005 name = &tnapi->irq_lbl[0];
10006 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10007 name[IFNAMSIZ-1] = 0;
10010 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10012 if (tg3_flag(tp, 1SHOT_MSI))
10013 fn = tg3_msi_1shot;
10016 fn = tg3_interrupt;
10017 if (tg3_flag(tp, TAGGED_STATUS))
10018 fn = tg3_interrupt_tagged;
10019 flags = IRQF_SHARED;
10022 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10025 static int tg3_test_interrupt(struct tg3 *tp)
10027 struct tg3_napi *tnapi = &tp->napi[0];
10028 struct net_device *dev = tp->dev;
10029 int err, i, intr_ok = 0;
10032 if (!netif_running(dev))
10035 tg3_disable_ints(tp);
10037 free_irq(tnapi->irq_vec, tnapi);
10040 * Turn off MSI one shot mode. Otherwise this test has no
10041 * observable way to know whether the interrupt was delivered.
10043 if (tg3_flag(tp, 57765_PLUS)) {
10044 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10045 tw32(MSGINT_MODE, val);
10048 err = request_irq(tnapi->irq_vec, tg3_test_isr,
10049 IRQF_SHARED, dev->name, tnapi);
10053 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10054 tg3_enable_ints(tp);
10056 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10059 for (i = 0; i < 5; i++) {
10060 u32 int_mbox, misc_host_ctrl;
10062 int_mbox = tr32_mailbox(tnapi->int_mbox);
10063 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10065 if ((int_mbox != 0) ||
10066 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10071 if (tg3_flag(tp, 57765_PLUS) &&
10072 tnapi->hw_status->status_tag != tnapi->last_tag)
10073 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10078 tg3_disable_ints(tp);
10080 free_irq(tnapi->irq_vec, tnapi);
10082 err = tg3_request_irq(tp, 0);
10088 /* Reenable MSI one shot mode. */
10089 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10090 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10091 tw32(MSGINT_MODE, val);
10099 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10100 * successfully restored
10102 static int tg3_test_msi(struct tg3 *tp)
10107 if (!tg3_flag(tp, USING_MSI))
10110 /* Turn off SERR reporting in case MSI terminates with Master
10113 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10114 pci_write_config_word(tp->pdev, PCI_COMMAND,
10115 pci_cmd & ~PCI_COMMAND_SERR);
10117 err = tg3_test_interrupt(tp);
10119 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10124 /* other failures */
10128 /* MSI test failed, go back to INTx mode */
10129 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10130 "to INTx mode. Please report this failure to the PCI "
10131 "maintainer and include system chipset information\n");
10133 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10135 pci_disable_msi(tp->pdev);
10137 tg3_flag_clear(tp, USING_MSI);
10138 tp->napi[0].irq_vec = tp->pdev->irq;
10140 err = tg3_request_irq(tp, 0);
10144 /* Need to reset the chip because the MSI cycle may have terminated
10145 * with Master Abort.
10147 tg3_full_lock(tp, 1);
10149 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10150 err = tg3_init_hw(tp, 1);
10152 tg3_full_unlock(tp);
10155 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10160 static int tg3_request_firmware(struct tg3 *tp)
10162 const __be32 *fw_data;
10164 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10165 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10170 fw_data = (void *)tp->fw->data;
10172 /* Firmware blob starts with version numbers, followed by
10173 * start address and _full_ length including BSS sections
10174 * (which must be longer than the actual data, of course
10177 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
10178 if (tp->fw_len < (tp->fw->size - 12)) {
10179 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10180 tp->fw_len, tp->fw_needed);
10181 release_firmware(tp->fw);
10186 /* We no longer need firmware; we have it. */
10187 tp->fw_needed = NULL;
10191 static u32 tg3_irq_count(struct tg3 *tp)
10193 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10196 /* We want as many rx rings enabled as there are cpus.
10197 * In multiqueue MSI-X mode, the first MSI-X vector
10198 * only deals with link interrupts, etc, so we add
10199 * one to the number of vectors we are requesting.
10201 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10207 static bool tg3_enable_msix(struct tg3 *tp)
10210 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10212 tp->txq_cnt = tp->txq_req;
10213 tp->rxq_cnt = tp->rxq_req;
10215 tp->rxq_cnt = netif_get_num_default_rss_queues();
10216 if (tp->rxq_cnt > tp->rxq_max)
10217 tp->rxq_cnt = tp->rxq_max;
10219 /* Disable multiple TX rings by default. Simple round-robin hardware
10220 * scheduling of the TX rings can cause starvation of rings with
10221 * small packets when other rings have TSO or jumbo packets.
10226 tp->irq_cnt = tg3_irq_count(tp);
10228 for (i = 0; i < tp->irq_max; i++) {
10229 msix_ent[i].entry = i;
10230 msix_ent[i].vector = 0;
10233 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10236 } else if (rc != 0) {
10237 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10239 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10242 tp->rxq_cnt = max(rc - 1, 1);
10244 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10247 for (i = 0; i < tp->irq_max; i++)
10248 tp->napi[i].irq_vec = msix_ent[i].vector;
10250 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10251 pci_disable_msix(tp->pdev);
10255 if (tp->irq_cnt == 1)
10258 tg3_flag_set(tp, ENABLE_RSS);
10260 if (tp->txq_cnt > 1)
10261 tg3_flag_set(tp, ENABLE_TSS);
10263 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10268 static void tg3_ints_init(struct tg3 *tp)
10270 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10271 !tg3_flag(tp, TAGGED_STATUS)) {
10272 /* All MSI supporting chips should support tagged
10273 * status. Assert that this is the case.
10275 netdev_warn(tp->dev,
10276 "MSI without TAGGED_STATUS? Not using MSI\n");
10280 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10281 tg3_flag_set(tp, USING_MSIX);
10282 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10283 tg3_flag_set(tp, USING_MSI);
10285 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10286 u32 msi_mode = tr32(MSGINT_MODE);
10287 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10288 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10289 if (!tg3_flag(tp, 1SHOT_MSI))
10290 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10291 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10294 if (!tg3_flag(tp, USING_MSIX)) {
10296 tp->napi[0].irq_vec = tp->pdev->irq;
10299 if (tp->irq_cnt == 1) {
10302 netif_set_real_num_tx_queues(tp->dev, 1);
10303 netif_set_real_num_rx_queues(tp->dev, 1);
10307 static void tg3_ints_fini(struct tg3 *tp)
10309 if (tg3_flag(tp, USING_MSIX))
10310 pci_disable_msix(tp->pdev);
10311 else if (tg3_flag(tp, USING_MSI))
10312 pci_disable_msi(tp->pdev);
10313 tg3_flag_clear(tp, USING_MSI);
10314 tg3_flag_clear(tp, USING_MSIX);
10315 tg3_flag_clear(tp, ENABLE_RSS);
10316 tg3_flag_clear(tp, ENABLE_TSS);
10319 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq)
10321 struct net_device *dev = tp->dev;
10325 * Setup interrupts first so we know how
10326 * many NAPI resources to allocate
10330 tg3_rss_check_indir_tbl(tp);
10332 /* The placement of this call is tied
10333 * to the setup and use of Host TX descriptors.
10335 err = tg3_alloc_consistent(tp);
10341 tg3_napi_enable(tp);
10343 for (i = 0; i < tp->irq_cnt; i++) {
10344 struct tg3_napi *tnapi = &tp->napi[i];
10345 err = tg3_request_irq(tp, i);
10347 for (i--; i >= 0; i--) {
10348 tnapi = &tp->napi[i];
10349 free_irq(tnapi->irq_vec, tnapi);
10355 tg3_full_lock(tp, 0);
10357 err = tg3_init_hw(tp, reset_phy);
10359 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10360 tg3_free_rings(tp);
10363 tg3_full_unlock(tp);
10368 if (test_irq && tg3_flag(tp, USING_MSI)) {
10369 err = tg3_test_msi(tp);
10372 tg3_full_lock(tp, 0);
10373 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10374 tg3_free_rings(tp);
10375 tg3_full_unlock(tp);
10380 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10381 u32 val = tr32(PCIE_TRANSACTION_CFG);
10383 tw32(PCIE_TRANSACTION_CFG,
10384 val | PCIE_TRANS_CFG_1SHOT_MSI);
10390 tg3_hwmon_open(tp);
10392 tg3_full_lock(tp, 0);
10394 tg3_timer_start(tp);
10395 tg3_flag_set(tp, INIT_COMPLETE);
10396 tg3_enable_ints(tp);
10398 tg3_full_unlock(tp);
10400 netif_tx_start_all_queues(dev);
10403 * Reset loopback feature if it was turned on while the device was down
10404 * make sure that it's installed properly now.
10406 if (dev->features & NETIF_F_LOOPBACK)
10407 tg3_set_loopback(dev, dev->features);
10412 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10413 struct tg3_napi *tnapi = &tp->napi[i];
10414 free_irq(tnapi->irq_vec, tnapi);
10418 tg3_napi_disable(tp);
10420 tg3_free_consistent(tp);
10428 static void tg3_stop(struct tg3 *tp)
10432 tg3_napi_disable(tp);
10433 tg3_reset_task_cancel(tp);
10435 netif_tx_disable(tp->dev);
10437 tg3_timer_stop(tp);
10439 tg3_hwmon_close(tp);
10443 tg3_full_lock(tp, 1);
10445 tg3_disable_ints(tp);
10447 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10448 tg3_free_rings(tp);
10449 tg3_flag_clear(tp, INIT_COMPLETE);
10451 tg3_full_unlock(tp);
10453 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10454 struct tg3_napi *tnapi = &tp->napi[i];
10455 free_irq(tnapi->irq_vec, tnapi);
10462 tg3_free_consistent(tp);
10465 static int tg3_open(struct net_device *dev)
10467 struct tg3 *tp = netdev_priv(dev);
10470 if (tp->fw_needed) {
10471 err = tg3_request_firmware(tp);
10472 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10476 netdev_warn(tp->dev, "TSO capability disabled\n");
10477 tg3_flag_clear(tp, TSO_CAPABLE);
10478 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10479 netdev_notice(tp->dev, "TSO capability restored\n");
10480 tg3_flag_set(tp, TSO_CAPABLE);
10484 netif_carrier_off(tp->dev);
10486 err = tg3_power_up(tp);
10490 tg3_full_lock(tp, 0);
10492 tg3_disable_ints(tp);
10493 tg3_flag_clear(tp, INIT_COMPLETE);
10495 tg3_full_unlock(tp);
10497 err = tg3_start(tp, true, true);
10499 tg3_frob_aux_power(tp, false);
10500 pci_set_power_state(tp->pdev, PCI_D3hot);
10505 static int tg3_close(struct net_device *dev)
10507 struct tg3 *tp = netdev_priv(dev);
10511 /* Clear stats across close / open calls */
10512 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10513 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10515 tg3_power_down(tp);
10517 netif_carrier_off(tp->dev);
10522 static inline u64 get_stat64(tg3_stat64_t *val)
10524 return ((u64)val->high << 32) | ((u64)val->low);
10527 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10529 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10531 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10532 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10533 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10536 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10537 tg3_writephy(tp, MII_TG3_TEST1,
10538 val | MII_TG3_TEST1_CRC_EN);
10539 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10543 tp->phy_crc_errors += val;
10545 return tp->phy_crc_errors;
10548 return get_stat64(&hw_stats->rx_fcs_errors);
10551 #define ESTAT_ADD(member) \
10552 estats->member = old_estats->member + \
10553 get_stat64(&hw_stats->member)
10555 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10557 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10558 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10560 ESTAT_ADD(rx_octets);
10561 ESTAT_ADD(rx_fragments);
10562 ESTAT_ADD(rx_ucast_packets);
10563 ESTAT_ADD(rx_mcast_packets);
10564 ESTAT_ADD(rx_bcast_packets);
10565 ESTAT_ADD(rx_fcs_errors);
10566 ESTAT_ADD(rx_align_errors);
10567 ESTAT_ADD(rx_xon_pause_rcvd);
10568 ESTAT_ADD(rx_xoff_pause_rcvd);
10569 ESTAT_ADD(rx_mac_ctrl_rcvd);
10570 ESTAT_ADD(rx_xoff_entered);
10571 ESTAT_ADD(rx_frame_too_long_errors);
10572 ESTAT_ADD(rx_jabbers);
10573 ESTAT_ADD(rx_undersize_packets);
10574 ESTAT_ADD(rx_in_length_errors);
10575 ESTAT_ADD(rx_out_length_errors);
10576 ESTAT_ADD(rx_64_or_less_octet_packets);
10577 ESTAT_ADD(rx_65_to_127_octet_packets);
10578 ESTAT_ADD(rx_128_to_255_octet_packets);
10579 ESTAT_ADD(rx_256_to_511_octet_packets);
10580 ESTAT_ADD(rx_512_to_1023_octet_packets);
10581 ESTAT_ADD(rx_1024_to_1522_octet_packets);
10582 ESTAT_ADD(rx_1523_to_2047_octet_packets);
10583 ESTAT_ADD(rx_2048_to_4095_octet_packets);
10584 ESTAT_ADD(rx_4096_to_8191_octet_packets);
10585 ESTAT_ADD(rx_8192_to_9022_octet_packets);
10587 ESTAT_ADD(tx_octets);
10588 ESTAT_ADD(tx_collisions);
10589 ESTAT_ADD(tx_xon_sent);
10590 ESTAT_ADD(tx_xoff_sent);
10591 ESTAT_ADD(tx_flow_control);
10592 ESTAT_ADD(tx_mac_errors);
10593 ESTAT_ADD(tx_single_collisions);
10594 ESTAT_ADD(tx_mult_collisions);
10595 ESTAT_ADD(tx_deferred);
10596 ESTAT_ADD(tx_excessive_collisions);
10597 ESTAT_ADD(tx_late_collisions);
10598 ESTAT_ADD(tx_collide_2times);
10599 ESTAT_ADD(tx_collide_3times);
10600 ESTAT_ADD(tx_collide_4times);
10601 ESTAT_ADD(tx_collide_5times);
10602 ESTAT_ADD(tx_collide_6times);
10603 ESTAT_ADD(tx_collide_7times);
10604 ESTAT_ADD(tx_collide_8times);
10605 ESTAT_ADD(tx_collide_9times);
10606 ESTAT_ADD(tx_collide_10times);
10607 ESTAT_ADD(tx_collide_11times);
10608 ESTAT_ADD(tx_collide_12times);
10609 ESTAT_ADD(tx_collide_13times);
10610 ESTAT_ADD(tx_collide_14times);
10611 ESTAT_ADD(tx_collide_15times);
10612 ESTAT_ADD(tx_ucast_packets);
10613 ESTAT_ADD(tx_mcast_packets);
10614 ESTAT_ADD(tx_bcast_packets);
10615 ESTAT_ADD(tx_carrier_sense_errors);
10616 ESTAT_ADD(tx_discards);
10617 ESTAT_ADD(tx_errors);
10619 ESTAT_ADD(dma_writeq_full);
10620 ESTAT_ADD(dma_write_prioq_full);
10621 ESTAT_ADD(rxbds_empty);
10622 ESTAT_ADD(rx_discards);
10623 ESTAT_ADD(rx_errors);
10624 ESTAT_ADD(rx_threshold_hit);
10626 ESTAT_ADD(dma_readq_full);
10627 ESTAT_ADD(dma_read_prioq_full);
10628 ESTAT_ADD(tx_comp_queue_full);
10630 ESTAT_ADD(ring_set_send_prod_index);
10631 ESTAT_ADD(ring_status_update);
10632 ESTAT_ADD(nic_irqs);
10633 ESTAT_ADD(nic_avoided_irqs);
10634 ESTAT_ADD(nic_tx_threshold_hit);
10636 ESTAT_ADD(mbuf_lwm_thresh_hit);
10639 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10641 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10642 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10644 stats->rx_packets = old_stats->rx_packets +
10645 get_stat64(&hw_stats->rx_ucast_packets) +
10646 get_stat64(&hw_stats->rx_mcast_packets) +
10647 get_stat64(&hw_stats->rx_bcast_packets);
10649 stats->tx_packets = old_stats->tx_packets +
10650 get_stat64(&hw_stats->tx_ucast_packets) +
10651 get_stat64(&hw_stats->tx_mcast_packets) +
10652 get_stat64(&hw_stats->tx_bcast_packets);
10654 stats->rx_bytes = old_stats->rx_bytes +
10655 get_stat64(&hw_stats->rx_octets);
10656 stats->tx_bytes = old_stats->tx_bytes +
10657 get_stat64(&hw_stats->tx_octets);
10659 stats->rx_errors = old_stats->rx_errors +
10660 get_stat64(&hw_stats->rx_errors);
10661 stats->tx_errors = old_stats->tx_errors +
10662 get_stat64(&hw_stats->tx_errors) +
10663 get_stat64(&hw_stats->tx_mac_errors) +
10664 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10665 get_stat64(&hw_stats->tx_discards);
10667 stats->multicast = old_stats->multicast +
10668 get_stat64(&hw_stats->rx_mcast_packets);
10669 stats->collisions = old_stats->collisions +
10670 get_stat64(&hw_stats->tx_collisions);
10672 stats->rx_length_errors = old_stats->rx_length_errors +
10673 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10674 get_stat64(&hw_stats->rx_undersize_packets);
10676 stats->rx_over_errors = old_stats->rx_over_errors +
10677 get_stat64(&hw_stats->rxbds_empty);
10678 stats->rx_frame_errors = old_stats->rx_frame_errors +
10679 get_stat64(&hw_stats->rx_align_errors);
10680 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10681 get_stat64(&hw_stats->tx_discards);
10682 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10683 get_stat64(&hw_stats->tx_carrier_sense_errors);
10685 stats->rx_crc_errors = old_stats->rx_crc_errors +
10686 tg3_calc_crc_errors(tp);
10688 stats->rx_missed_errors = old_stats->rx_missed_errors +
10689 get_stat64(&hw_stats->rx_discards);
10691 stats->rx_dropped = tp->rx_dropped;
10692 stats->tx_dropped = tp->tx_dropped;
10695 static int tg3_get_regs_len(struct net_device *dev)
10697 return TG3_REG_BLK_SIZE;
10700 static void tg3_get_regs(struct net_device *dev,
10701 struct ethtool_regs *regs, void *_p)
10703 struct tg3 *tp = netdev_priv(dev);
10707 memset(_p, 0, TG3_REG_BLK_SIZE);
10709 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10712 tg3_full_lock(tp, 0);
10714 tg3_dump_legacy_regs(tp, (u32 *)_p);
10716 tg3_full_unlock(tp);
10719 static int tg3_get_eeprom_len(struct net_device *dev)
10721 struct tg3 *tp = netdev_priv(dev);
10723 return tp->nvram_size;
10726 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10728 struct tg3 *tp = netdev_priv(dev);
10731 u32 i, offset, len, b_offset, b_count;
10734 if (tg3_flag(tp, NO_NVRAM))
10737 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10740 offset = eeprom->offset;
10744 eeprom->magic = TG3_EEPROM_MAGIC;
10747 /* adjustments to start on required 4 byte boundary */
10748 b_offset = offset & 3;
10749 b_count = 4 - b_offset;
10750 if (b_count > len) {
10751 /* i.e. offset=1 len=2 */
10754 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10757 memcpy(data, ((char *)&val) + b_offset, b_count);
10760 eeprom->len += b_count;
10763 /* read bytes up to the last 4 byte boundary */
10764 pd = &data[eeprom->len];
10765 for (i = 0; i < (len - (len & 3)); i += 4) {
10766 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10771 memcpy(pd + i, &val, 4);
10776 /* read last bytes not ending on 4 byte boundary */
10777 pd = &data[eeprom->len];
10779 b_offset = offset + len - b_count;
10780 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10783 memcpy(pd, &val, b_count);
10784 eeprom->len += b_count;
10789 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10791 struct tg3 *tp = netdev_priv(dev);
10793 u32 offset, len, b_offset, odd_len;
10797 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10800 if (tg3_flag(tp, NO_NVRAM) ||
10801 eeprom->magic != TG3_EEPROM_MAGIC)
10804 offset = eeprom->offset;
10807 if ((b_offset = (offset & 3))) {
10808 /* adjustments to start on required 4 byte boundary */
10809 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10820 /* adjustments to end on required 4 byte boundary */
10822 len = (len + 3) & ~3;
10823 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10829 if (b_offset || odd_len) {
10830 buf = kmalloc(len, GFP_KERNEL);
10834 memcpy(buf, &start, 4);
10836 memcpy(buf+len-4, &end, 4);
10837 memcpy(buf + b_offset, data, eeprom->len);
10840 ret = tg3_nvram_write_block(tp, offset, len, buf);
10848 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10850 struct tg3 *tp = netdev_priv(dev);
10852 if (tg3_flag(tp, USE_PHYLIB)) {
10853 struct phy_device *phydev;
10854 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10856 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10857 return phy_ethtool_gset(phydev, cmd);
10860 cmd->supported = (SUPPORTED_Autoneg);
10862 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10863 cmd->supported |= (SUPPORTED_1000baseT_Half |
10864 SUPPORTED_1000baseT_Full);
10866 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10867 cmd->supported |= (SUPPORTED_100baseT_Half |
10868 SUPPORTED_100baseT_Full |
10869 SUPPORTED_10baseT_Half |
10870 SUPPORTED_10baseT_Full |
10872 cmd->port = PORT_TP;
10874 cmd->supported |= SUPPORTED_FIBRE;
10875 cmd->port = PORT_FIBRE;
10878 cmd->advertising = tp->link_config.advertising;
10879 if (tg3_flag(tp, PAUSE_AUTONEG)) {
10880 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10881 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10882 cmd->advertising |= ADVERTISED_Pause;
10884 cmd->advertising |= ADVERTISED_Pause |
10885 ADVERTISED_Asym_Pause;
10887 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10888 cmd->advertising |= ADVERTISED_Asym_Pause;
10891 if (netif_running(dev) && netif_carrier_ok(dev)) {
10892 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10893 cmd->duplex = tp->link_config.active_duplex;
10894 cmd->lp_advertising = tp->link_config.rmt_adv;
10895 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10896 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10897 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10899 cmd->eth_tp_mdix = ETH_TP_MDI;
10902 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10903 cmd->duplex = DUPLEX_UNKNOWN;
10904 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10906 cmd->phy_address = tp->phy_addr;
10907 cmd->transceiver = XCVR_INTERNAL;
10908 cmd->autoneg = tp->link_config.autoneg;
10914 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10916 struct tg3 *tp = netdev_priv(dev);
10917 u32 speed = ethtool_cmd_speed(cmd);
10919 if (tg3_flag(tp, USE_PHYLIB)) {
10920 struct phy_device *phydev;
10921 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10923 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10924 return phy_ethtool_sset(phydev, cmd);
10927 if (cmd->autoneg != AUTONEG_ENABLE &&
10928 cmd->autoneg != AUTONEG_DISABLE)
10931 if (cmd->autoneg == AUTONEG_DISABLE &&
10932 cmd->duplex != DUPLEX_FULL &&
10933 cmd->duplex != DUPLEX_HALF)
10936 if (cmd->autoneg == AUTONEG_ENABLE) {
10937 u32 mask = ADVERTISED_Autoneg |
10939 ADVERTISED_Asym_Pause;
10941 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10942 mask |= ADVERTISED_1000baseT_Half |
10943 ADVERTISED_1000baseT_Full;
10945 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10946 mask |= ADVERTISED_100baseT_Half |
10947 ADVERTISED_100baseT_Full |
10948 ADVERTISED_10baseT_Half |
10949 ADVERTISED_10baseT_Full |
10952 mask |= ADVERTISED_FIBRE;
10954 if (cmd->advertising & ~mask)
10957 mask &= (ADVERTISED_1000baseT_Half |
10958 ADVERTISED_1000baseT_Full |
10959 ADVERTISED_100baseT_Half |
10960 ADVERTISED_100baseT_Full |
10961 ADVERTISED_10baseT_Half |
10962 ADVERTISED_10baseT_Full);
10964 cmd->advertising &= mask;
10966 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10967 if (speed != SPEED_1000)
10970 if (cmd->duplex != DUPLEX_FULL)
10973 if (speed != SPEED_100 &&
10979 tg3_full_lock(tp, 0);
10981 tp->link_config.autoneg = cmd->autoneg;
10982 if (cmd->autoneg == AUTONEG_ENABLE) {
10983 tp->link_config.advertising = (cmd->advertising |
10984 ADVERTISED_Autoneg);
10985 tp->link_config.speed = SPEED_UNKNOWN;
10986 tp->link_config.duplex = DUPLEX_UNKNOWN;
10988 tp->link_config.advertising = 0;
10989 tp->link_config.speed = speed;
10990 tp->link_config.duplex = cmd->duplex;
10993 if (netif_running(dev))
10994 tg3_setup_phy(tp, 1);
10996 tg3_full_unlock(tp);
11001 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11003 struct tg3 *tp = netdev_priv(dev);
11005 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11006 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11007 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11008 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11011 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11013 struct tg3 *tp = netdev_priv(dev);
11015 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11016 wol->supported = WAKE_MAGIC;
11018 wol->supported = 0;
11020 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11021 wol->wolopts = WAKE_MAGIC;
11022 memset(&wol->sopass, 0, sizeof(wol->sopass));
11025 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11027 struct tg3 *tp = netdev_priv(dev);
11028 struct device *dp = &tp->pdev->dev;
11030 if (wol->wolopts & ~WAKE_MAGIC)
11032 if ((wol->wolopts & WAKE_MAGIC) &&
11033 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11036 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11038 spin_lock_bh(&tp->lock);
11039 if (device_may_wakeup(dp))
11040 tg3_flag_set(tp, WOL_ENABLE);
11042 tg3_flag_clear(tp, WOL_ENABLE);
11043 spin_unlock_bh(&tp->lock);
11048 static u32 tg3_get_msglevel(struct net_device *dev)
11050 struct tg3 *tp = netdev_priv(dev);
11051 return tp->msg_enable;
11054 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11056 struct tg3 *tp = netdev_priv(dev);
11057 tp->msg_enable = value;
11060 static int tg3_nway_reset(struct net_device *dev)
11062 struct tg3 *tp = netdev_priv(dev);
11065 if (!netif_running(dev))
11068 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11071 if (tg3_flag(tp, USE_PHYLIB)) {
11072 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11074 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11078 spin_lock_bh(&tp->lock);
11080 tg3_readphy(tp, MII_BMCR, &bmcr);
11081 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11082 ((bmcr & BMCR_ANENABLE) ||
11083 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11084 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11088 spin_unlock_bh(&tp->lock);
11094 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11096 struct tg3 *tp = netdev_priv(dev);
11098 ering->rx_max_pending = tp->rx_std_ring_mask;
11099 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11100 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11102 ering->rx_jumbo_max_pending = 0;
11104 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11106 ering->rx_pending = tp->rx_pending;
11107 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11108 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11110 ering->rx_jumbo_pending = 0;
11112 ering->tx_pending = tp->napi[0].tx_pending;
11115 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11117 struct tg3 *tp = netdev_priv(dev);
11118 int i, irq_sync = 0, err = 0;
11120 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11121 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11122 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11123 (ering->tx_pending <= MAX_SKB_FRAGS) ||
11124 (tg3_flag(tp, TSO_BUG) &&
11125 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11128 if (netif_running(dev)) {
11130 tg3_netif_stop(tp);
11134 tg3_full_lock(tp, irq_sync);
11136 tp->rx_pending = ering->rx_pending;
11138 if (tg3_flag(tp, MAX_RXPEND_64) &&
11139 tp->rx_pending > 63)
11140 tp->rx_pending = 63;
11141 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11143 for (i = 0; i < tp->irq_max; i++)
11144 tp->napi[i].tx_pending = ering->tx_pending;
11146 if (netif_running(dev)) {
11147 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11148 err = tg3_restart_hw(tp, 1);
11150 tg3_netif_start(tp);
11153 tg3_full_unlock(tp);
11155 if (irq_sync && !err)
11161 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11163 struct tg3 *tp = netdev_priv(dev);
11165 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11167 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11168 epause->rx_pause = 1;
11170 epause->rx_pause = 0;
11172 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11173 epause->tx_pause = 1;
11175 epause->tx_pause = 0;
11178 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11180 struct tg3 *tp = netdev_priv(dev);
11183 if (tg3_flag(tp, USE_PHYLIB)) {
11185 struct phy_device *phydev;
11187 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11189 if (!(phydev->supported & SUPPORTED_Pause) ||
11190 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11191 (epause->rx_pause != epause->tx_pause)))
11194 tp->link_config.flowctrl = 0;
11195 if (epause->rx_pause) {
11196 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11198 if (epause->tx_pause) {
11199 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11200 newadv = ADVERTISED_Pause;
11202 newadv = ADVERTISED_Pause |
11203 ADVERTISED_Asym_Pause;
11204 } else if (epause->tx_pause) {
11205 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11206 newadv = ADVERTISED_Asym_Pause;
11210 if (epause->autoneg)
11211 tg3_flag_set(tp, PAUSE_AUTONEG);
11213 tg3_flag_clear(tp, PAUSE_AUTONEG);
11215 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11216 u32 oldadv = phydev->advertising &
11217 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11218 if (oldadv != newadv) {
11219 phydev->advertising &=
11220 ~(ADVERTISED_Pause |
11221 ADVERTISED_Asym_Pause);
11222 phydev->advertising |= newadv;
11223 if (phydev->autoneg) {
11225 * Always renegotiate the link to
11226 * inform our link partner of our
11227 * flow control settings, even if the
11228 * flow control is forced. Let
11229 * tg3_adjust_link() do the final
11230 * flow control setup.
11232 return phy_start_aneg(phydev);
11236 if (!epause->autoneg)
11237 tg3_setup_flow_control(tp, 0, 0);
11239 tp->link_config.advertising &=
11240 ~(ADVERTISED_Pause |
11241 ADVERTISED_Asym_Pause);
11242 tp->link_config.advertising |= newadv;
11247 if (netif_running(dev)) {
11248 tg3_netif_stop(tp);
11252 tg3_full_lock(tp, irq_sync);
11254 if (epause->autoneg)
11255 tg3_flag_set(tp, PAUSE_AUTONEG);
11257 tg3_flag_clear(tp, PAUSE_AUTONEG);
11258 if (epause->rx_pause)
11259 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11261 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11262 if (epause->tx_pause)
11263 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11265 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11267 if (netif_running(dev)) {
11268 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11269 err = tg3_restart_hw(tp, 1);
11271 tg3_netif_start(tp);
11274 tg3_full_unlock(tp);
11280 static int tg3_get_sset_count(struct net_device *dev, int sset)
11284 return TG3_NUM_TEST;
11286 return TG3_NUM_STATS;
11288 return -EOPNOTSUPP;
11292 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11293 u32 *rules __always_unused)
11295 struct tg3 *tp = netdev_priv(dev);
11297 if (!tg3_flag(tp, SUPPORT_MSIX))
11298 return -EOPNOTSUPP;
11300 switch (info->cmd) {
11301 case ETHTOOL_GRXRINGS:
11302 if (netif_running(tp->dev))
11303 info->data = tp->rxq_cnt;
11305 info->data = num_online_cpus();
11306 if (info->data > TG3_RSS_MAX_NUM_QS)
11307 info->data = TG3_RSS_MAX_NUM_QS;
11310 /* The first interrupt vector only
11311 * handles link interrupts.
11317 return -EOPNOTSUPP;
11321 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11324 struct tg3 *tp = netdev_priv(dev);
11326 if (tg3_flag(tp, SUPPORT_MSIX))
11327 size = TG3_RSS_INDIR_TBL_SIZE;
11332 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11334 struct tg3 *tp = netdev_priv(dev);
11337 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11338 indir[i] = tp->rss_ind_tbl[i];
11343 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11345 struct tg3 *tp = netdev_priv(dev);
11348 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11349 tp->rss_ind_tbl[i] = indir[i];
11351 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11354 /* It is legal to write the indirection
11355 * table while the device is running.
11357 tg3_full_lock(tp, 0);
11358 tg3_rss_write_indir_tbl(tp);
11359 tg3_full_unlock(tp);
11364 static void tg3_get_channels(struct net_device *dev,
11365 struct ethtool_channels *channel)
11367 struct tg3 *tp = netdev_priv(dev);
11368 u32 deflt_qs = netif_get_num_default_rss_queues();
11370 channel->max_rx = tp->rxq_max;
11371 channel->max_tx = tp->txq_max;
11373 if (netif_running(dev)) {
11374 channel->rx_count = tp->rxq_cnt;
11375 channel->tx_count = tp->txq_cnt;
11378 channel->rx_count = tp->rxq_req;
11380 channel->rx_count = min(deflt_qs, tp->rxq_max);
11383 channel->tx_count = tp->txq_req;
11385 channel->tx_count = min(deflt_qs, tp->txq_max);
11389 static int tg3_set_channels(struct net_device *dev,
11390 struct ethtool_channels *channel)
11392 struct tg3 *tp = netdev_priv(dev);
11394 if (!tg3_flag(tp, SUPPORT_MSIX))
11395 return -EOPNOTSUPP;
11397 if (channel->rx_count > tp->rxq_max ||
11398 channel->tx_count > tp->txq_max)
11401 tp->rxq_req = channel->rx_count;
11402 tp->txq_req = channel->tx_count;
11404 if (!netif_running(dev))
11409 netif_carrier_off(dev);
11411 tg3_start(tp, true, false);
11416 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11418 switch (stringset) {
11420 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
11423 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
11426 WARN_ON(1); /* we need a WARN() */
11431 static int tg3_set_phys_id(struct net_device *dev,
11432 enum ethtool_phys_id_state state)
11434 struct tg3 *tp = netdev_priv(dev);
11436 if (!netif_running(tp->dev))
11440 case ETHTOOL_ID_ACTIVE:
11441 return 1; /* cycle on/off once per second */
11443 case ETHTOOL_ID_ON:
11444 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11445 LED_CTRL_1000MBPS_ON |
11446 LED_CTRL_100MBPS_ON |
11447 LED_CTRL_10MBPS_ON |
11448 LED_CTRL_TRAFFIC_OVERRIDE |
11449 LED_CTRL_TRAFFIC_BLINK |
11450 LED_CTRL_TRAFFIC_LED);
11453 case ETHTOOL_ID_OFF:
11454 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11455 LED_CTRL_TRAFFIC_OVERRIDE);
11458 case ETHTOOL_ID_INACTIVE:
11459 tw32(MAC_LED_CTRL, tp->led_ctrl);
11466 static void tg3_get_ethtool_stats(struct net_device *dev,
11467 struct ethtool_stats *estats, u64 *tmp_stats)
11469 struct tg3 *tp = netdev_priv(dev);
11472 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11474 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11477 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11481 u32 offset = 0, len = 0;
11484 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11487 if (magic == TG3_EEPROM_MAGIC) {
11488 for (offset = TG3_NVM_DIR_START;
11489 offset < TG3_NVM_DIR_END;
11490 offset += TG3_NVM_DIRENT_SIZE) {
11491 if (tg3_nvram_read(tp, offset, &val))
11494 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11495 TG3_NVM_DIRTYPE_EXTVPD)
11499 if (offset != TG3_NVM_DIR_END) {
11500 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11501 if (tg3_nvram_read(tp, offset + 4, &offset))
11504 offset = tg3_nvram_logical_addr(tp, offset);
11508 if (!offset || !len) {
11509 offset = TG3_NVM_VPD_OFF;
11510 len = TG3_NVM_VPD_LEN;
11513 buf = kmalloc(len, GFP_KERNEL);
11517 if (magic == TG3_EEPROM_MAGIC) {
11518 for (i = 0; i < len; i += 4) {
11519 /* The data is in little-endian format in NVRAM.
11520 * Use the big-endian read routines to preserve
11521 * the byte order as it exists in NVRAM.
11523 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11529 unsigned int pos = 0;
11531 ptr = (u8 *)&buf[0];
11532 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11533 cnt = pci_read_vpd(tp->pdev, pos,
11535 if (cnt == -ETIMEDOUT || cnt == -EINTR)
11553 #define NVRAM_TEST_SIZE 0x100
11554 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
11555 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
11556 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
11557 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
11558 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
11559 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
11560 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11561 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11563 static int tg3_test_nvram(struct tg3 *tp)
11565 u32 csum, magic, len;
11567 int i, j, k, err = 0, size;
11569 if (tg3_flag(tp, NO_NVRAM))
11572 if (tg3_nvram_read(tp, 0, &magic) != 0)
11575 if (magic == TG3_EEPROM_MAGIC)
11576 size = NVRAM_TEST_SIZE;
11577 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11578 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11579 TG3_EEPROM_SB_FORMAT_1) {
11580 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11581 case TG3_EEPROM_SB_REVISION_0:
11582 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11584 case TG3_EEPROM_SB_REVISION_2:
11585 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11587 case TG3_EEPROM_SB_REVISION_3:
11588 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11590 case TG3_EEPROM_SB_REVISION_4:
11591 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11593 case TG3_EEPROM_SB_REVISION_5:
11594 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11596 case TG3_EEPROM_SB_REVISION_6:
11597 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11604 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11605 size = NVRAM_SELFBOOT_HW_SIZE;
11609 buf = kmalloc(size, GFP_KERNEL);
11614 for (i = 0, j = 0; i < size; i += 4, j++) {
11615 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11622 /* Selfboot format */
11623 magic = be32_to_cpu(buf[0]);
11624 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11625 TG3_EEPROM_MAGIC_FW) {
11626 u8 *buf8 = (u8 *) buf, csum8 = 0;
11628 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11629 TG3_EEPROM_SB_REVISION_2) {
11630 /* For rev 2, the csum doesn't include the MBA. */
11631 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11633 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11636 for (i = 0; i < size; i++)
11649 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11650 TG3_EEPROM_MAGIC_HW) {
11651 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11652 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11653 u8 *buf8 = (u8 *) buf;
11655 /* Separate the parity bits and the data bytes. */
11656 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11657 if ((i == 0) || (i == 8)) {
11661 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11662 parity[k++] = buf8[i] & msk;
11664 } else if (i == 16) {
11668 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11669 parity[k++] = buf8[i] & msk;
11672 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11673 parity[k++] = buf8[i] & msk;
11676 data[j++] = buf8[i];
11680 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11681 u8 hw8 = hweight8(data[i]);
11683 if ((hw8 & 0x1) && parity[i])
11685 else if (!(hw8 & 0x1) && !parity[i])
11694 /* Bootstrap checksum at offset 0x10 */
11695 csum = calc_crc((unsigned char *) buf, 0x10);
11696 if (csum != le32_to_cpu(buf[0x10/4]))
11699 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11700 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11701 if (csum != le32_to_cpu(buf[0xfc/4]))
11706 buf = tg3_vpd_readblock(tp, &len);
11710 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11712 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11716 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11719 i += PCI_VPD_LRDT_TAG_SIZE;
11720 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11721 PCI_VPD_RO_KEYWORD_CHKSUM);
11725 j += PCI_VPD_INFO_FLD_HDR_SIZE;
11727 for (i = 0; i <= j; i++)
11728 csum8 += ((u8 *)buf)[i];
11742 #define TG3_SERDES_TIMEOUT_SEC 2
11743 #define TG3_COPPER_TIMEOUT_SEC 6
11745 static int tg3_test_link(struct tg3 *tp)
11749 if (!netif_running(tp->dev))
11752 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11753 max = TG3_SERDES_TIMEOUT_SEC;
11755 max = TG3_COPPER_TIMEOUT_SEC;
11757 for (i = 0; i < max; i++) {
11758 if (netif_carrier_ok(tp->dev))
11761 if (msleep_interruptible(1000))
11768 /* Only test the commonly used registers */
11769 static int tg3_test_registers(struct tg3 *tp)
11771 int i, is_5705, is_5750;
11772 u32 offset, read_mask, write_mask, val, save_val, read_val;
11776 #define TG3_FL_5705 0x1
11777 #define TG3_FL_NOT_5705 0x2
11778 #define TG3_FL_NOT_5788 0x4
11779 #define TG3_FL_NOT_5750 0x8
11783 /* MAC Control Registers */
11784 { MAC_MODE, TG3_FL_NOT_5705,
11785 0x00000000, 0x00ef6f8c },
11786 { MAC_MODE, TG3_FL_5705,
11787 0x00000000, 0x01ef6b8c },
11788 { MAC_STATUS, TG3_FL_NOT_5705,
11789 0x03800107, 0x00000000 },
11790 { MAC_STATUS, TG3_FL_5705,
11791 0x03800100, 0x00000000 },
11792 { MAC_ADDR_0_HIGH, 0x0000,
11793 0x00000000, 0x0000ffff },
11794 { MAC_ADDR_0_LOW, 0x0000,
11795 0x00000000, 0xffffffff },
11796 { MAC_RX_MTU_SIZE, 0x0000,
11797 0x00000000, 0x0000ffff },
11798 { MAC_TX_MODE, 0x0000,
11799 0x00000000, 0x00000070 },
11800 { MAC_TX_LENGTHS, 0x0000,
11801 0x00000000, 0x00003fff },
11802 { MAC_RX_MODE, TG3_FL_NOT_5705,
11803 0x00000000, 0x000007fc },
11804 { MAC_RX_MODE, TG3_FL_5705,
11805 0x00000000, 0x000007dc },
11806 { MAC_HASH_REG_0, 0x0000,
11807 0x00000000, 0xffffffff },
11808 { MAC_HASH_REG_1, 0x0000,
11809 0x00000000, 0xffffffff },
11810 { MAC_HASH_REG_2, 0x0000,
11811 0x00000000, 0xffffffff },
11812 { MAC_HASH_REG_3, 0x0000,
11813 0x00000000, 0xffffffff },
11815 /* Receive Data and Receive BD Initiator Control Registers. */
11816 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11817 0x00000000, 0xffffffff },
11818 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11819 0x00000000, 0xffffffff },
11820 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11821 0x00000000, 0x00000003 },
11822 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11823 0x00000000, 0xffffffff },
11824 { RCVDBDI_STD_BD+0, 0x0000,
11825 0x00000000, 0xffffffff },
11826 { RCVDBDI_STD_BD+4, 0x0000,
11827 0x00000000, 0xffffffff },
11828 { RCVDBDI_STD_BD+8, 0x0000,
11829 0x00000000, 0xffff0002 },
11830 { RCVDBDI_STD_BD+0xc, 0x0000,
11831 0x00000000, 0xffffffff },
11833 /* Receive BD Initiator Control Registers. */
11834 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11835 0x00000000, 0xffffffff },
11836 { RCVBDI_STD_THRESH, TG3_FL_5705,
11837 0x00000000, 0x000003ff },
11838 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11839 0x00000000, 0xffffffff },
11841 /* Host Coalescing Control Registers. */
11842 { HOSTCC_MODE, TG3_FL_NOT_5705,
11843 0x00000000, 0x00000004 },
11844 { HOSTCC_MODE, TG3_FL_5705,
11845 0x00000000, 0x000000f6 },
11846 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11847 0x00000000, 0xffffffff },
11848 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11849 0x00000000, 0x000003ff },
11850 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11851 0x00000000, 0xffffffff },
11852 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11853 0x00000000, 0x000003ff },
11854 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11855 0x00000000, 0xffffffff },
11856 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11857 0x00000000, 0x000000ff },
11858 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11859 0x00000000, 0xffffffff },
11860 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11861 0x00000000, 0x000000ff },
11862 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11863 0x00000000, 0xffffffff },
11864 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11865 0x00000000, 0xffffffff },
11866 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11867 0x00000000, 0xffffffff },
11868 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11869 0x00000000, 0x000000ff },
11870 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11871 0x00000000, 0xffffffff },
11872 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11873 0x00000000, 0x000000ff },
11874 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11875 0x00000000, 0xffffffff },
11876 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11877 0x00000000, 0xffffffff },
11878 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11879 0x00000000, 0xffffffff },
11880 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11881 0x00000000, 0xffffffff },
11882 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11883 0x00000000, 0xffffffff },
11884 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11885 0xffffffff, 0x00000000 },
11886 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11887 0xffffffff, 0x00000000 },
11889 /* Buffer Manager Control Registers. */
11890 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11891 0x00000000, 0x007fff80 },
11892 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11893 0x00000000, 0x007fffff },
11894 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11895 0x00000000, 0x0000003f },
11896 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11897 0x00000000, 0x000001ff },
11898 { BUFMGR_MB_HIGH_WATER, 0x0000,
11899 0x00000000, 0x000001ff },
11900 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11901 0xffffffff, 0x00000000 },
11902 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11903 0xffffffff, 0x00000000 },
11905 /* Mailbox Registers */
11906 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11907 0x00000000, 0x000001ff },
11908 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11909 0x00000000, 0x000001ff },
11910 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11911 0x00000000, 0x000007ff },
11912 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11913 0x00000000, 0x000001ff },
11915 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11918 is_5705 = is_5750 = 0;
11919 if (tg3_flag(tp, 5705_PLUS)) {
11921 if (tg3_flag(tp, 5750_PLUS))
11925 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11926 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11929 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11932 if (tg3_flag(tp, IS_5788) &&
11933 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11936 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11939 offset = (u32) reg_tbl[i].offset;
11940 read_mask = reg_tbl[i].read_mask;
11941 write_mask = reg_tbl[i].write_mask;
11943 /* Save the original register content */
11944 save_val = tr32(offset);
11946 /* Determine the read-only value. */
11947 read_val = save_val & read_mask;
11949 /* Write zero to the register, then make sure the read-only bits
11950 * are not changed and the read/write bits are all zeros.
11954 val = tr32(offset);
11956 /* Test the read-only and read/write bits. */
11957 if (((val & read_mask) != read_val) || (val & write_mask))
11960 /* Write ones to all the bits defined by RdMask and WrMask, then
11961 * make sure the read-only bits are not changed and the
11962 * read/write bits are all ones.
11964 tw32(offset, read_mask | write_mask);
11966 val = tr32(offset);
11968 /* Test the read-only bits. */
11969 if ((val & read_mask) != read_val)
11972 /* Test the read/write bits. */
11973 if ((val & write_mask) != write_mask)
11976 tw32(offset, save_val);
11982 if (netif_msg_hw(tp))
11983 netdev_err(tp->dev,
11984 "Register test failed at offset %x\n", offset);
11985 tw32(offset, save_val);
11989 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11991 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11995 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11996 for (j = 0; j < len; j += 4) {
11999 tg3_write_mem(tp, offset + j, test_pattern[i]);
12000 tg3_read_mem(tp, offset + j, &val);
12001 if (val != test_pattern[i])
12008 static int tg3_test_memory(struct tg3 *tp)
12010 static struct mem_entry {
12013 } mem_tbl_570x[] = {
12014 { 0x00000000, 0x00b50},
12015 { 0x00002000, 0x1c000},
12016 { 0xffffffff, 0x00000}
12017 }, mem_tbl_5705[] = {
12018 { 0x00000100, 0x0000c},
12019 { 0x00000200, 0x00008},
12020 { 0x00004000, 0x00800},
12021 { 0x00006000, 0x01000},
12022 { 0x00008000, 0x02000},
12023 { 0x00010000, 0x0e000},
12024 { 0xffffffff, 0x00000}
12025 }, mem_tbl_5755[] = {
12026 { 0x00000200, 0x00008},
12027 { 0x00004000, 0x00800},
12028 { 0x00006000, 0x00800},
12029 { 0x00008000, 0x02000},
12030 { 0x00010000, 0x0c000},
12031 { 0xffffffff, 0x00000}
12032 }, mem_tbl_5906[] = {
12033 { 0x00000200, 0x00008},
12034 { 0x00004000, 0x00400},
12035 { 0x00006000, 0x00400},
12036 { 0x00008000, 0x01000},
12037 { 0x00010000, 0x01000},
12038 { 0xffffffff, 0x00000}
12039 }, mem_tbl_5717[] = {
12040 { 0x00000200, 0x00008},
12041 { 0x00010000, 0x0a000},
12042 { 0x00020000, 0x13c00},
12043 { 0xffffffff, 0x00000}
12044 }, mem_tbl_57765[] = {
12045 { 0x00000200, 0x00008},
12046 { 0x00004000, 0x00800},
12047 { 0x00006000, 0x09800},
12048 { 0x00010000, 0x0a000},
12049 { 0xffffffff, 0x00000}
12051 struct mem_entry *mem_tbl;
12055 if (tg3_flag(tp, 5717_PLUS))
12056 mem_tbl = mem_tbl_5717;
12057 else if (tg3_flag(tp, 57765_CLASS))
12058 mem_tbl = mem_tbl_57765;
12059 else if (tg3_flag(tp, 5755_PLUS))
12060 mem_tbl = mem_tbl_5755;
12061 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12062 mem_tbl = mem_tbl_5906;
12063 else if (tg3_flag(tp, 5705_PLUS))
12064 mem_tbl = mem_tbl_5705;
12066 mem_tbl = mem_tbl_570x;
12068 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12069 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12077 #define TG3_TSO_MSS 500
12079 #define TG3_TSO_IP_HDR_LEN 20
12080 #define TG3_TSO_TCP_HDR_LEN 20
12081 #define TG3_TSO_TCP_OPT_LEN 12
12083 static const u8 tg3_tso_header[] = {
12085 0x45, 0x00, 0x00, 0x00,
12086 0x00, 0x00, 0x40, 0x00,
12087 0x40, 0x06, 0x00, 0x00,
12088 0x0a, 0x00, 0x00, 0x01,
12089 0x0a, 0x00, 0x00, 0x02,
12090 0x0d, 0x00, 0xe0, 0x00,
12091 0x00, 0x00, 0x01, 0x00,
12092 0x00, 0x00, 0x02, 0x00,
12093 0x80, 0x10, 0x10, 0x00,
12094 0x14, 0x09, 0x00, 0x00,
12095 0x01, 0x01, 0x08, 0x0a,
12096 0x11, 0x11, 0x11, 0x11,
12097 0x11, 0x11, 0x11, 0x11,
12100 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12102 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12103 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12105 struct sk_buff *skb;
12106 u8 *tx_data, *rx_data;
12108 int num_pkts, tx_len, rx_len, i, err;
12109 struct tg3_rx_buffer_desc *desc;
12110 struct tg3_napi *tnapi, *rnapi;
12111 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12113 tnapi = &tp->napi[0];
12114 rnapi = &tp->napi[0];
12115 if (tp->irq_cnt > 1) {
12116 if (tg3_flag(tp, ENABLE_RSS))
12117 rnapi = &tp->napi[1];
12118 if (tg3_flag(tp, ENABLE_TSS))
12119 tnapi = &tp->napi[1];
12121 coal_now = tnapi->coal_now | rnapi->coal_now;
12126 skb = netdev_alloc_skb(tp->dev, tx_len);
12130 tx_data = skb_put(skb, tx_len);
12131 memcpy(tx_data, tp->dev->dev_addr, 6);
12132 memset(tx_data + 6, 0x0, 8);
12134 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12136 if (tso_loopback) {
12137 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12139 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12140 TG3_TSO_TCP_OPT_LEN;
12142 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12143 sizeof(tg3_tso_header));
12146 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12147 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12149 /* Set the total length field in the IP header */
12150 iph->tot_len = htons((u16)(mss + hdr_len));
12152 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12153 TXD_FLAG_CPU_POST_DMA);
12155 if (tg3_flag(tp, HW_TSO_1) ||
12156 tg3_flag(tp, HW_TSO_2) ||
12157 tg3_flag(tp, HW_TSO_3)) {
12159 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12160 th = (struct tcphdr *)&tx_data[val];
12163 base_flags |= TXD_FLAG_TCPUDP_CSUM;
12165 if (tg3_flag(tp, HW_TSO_3)) {
12166 mss |= (hdr_len & 0xc) << 12;
12167 if (hdr_len & 0x10)
12168 base_flags |= 0x00000010;
12169 base_flags |= (hdr_len & 0x3e0) << 5;
12170 } else if (tg3_flag(tp, HW_TSO_2))
12171 mss |= hdr_len << 9;
12172 else if (tg3_flag(tp, HW_TSO_1) ||
12173 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
12174 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12176 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12179 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12182 data_off = ETH_HLEN;
12184 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12185 tx_len > VLAN_ETH_FRAME_LEN)
12186 base_flags |= TXD_FLAG_JMB_PKT;
12189 for (i = data_off; i < tx_len; i++)
12190 tx_data[i] = (u8) (i & 0xff);
12192 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12193 if (pci_dma_mapping_error(tp->pdev, map)) {
12194 dev_kfree_skb(skb);
12198 val = tnapi->tx_prod;
12199 tnapi->tx_buffers[val].skb = skb;
12200 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12202 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12207 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12209 budget = tg3_tx_avail(tnapi);
12210 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12211 base_flags | TXD_FLAG_END, mss, 0)) {
12212 tnapi->tx_buffers[val].skb = NULL;
12213 dev_kfree_skb(skb);
12219 /* Sync BD data before updating mailbox */
12222 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12223 tr32_mailbox(tnapi->prodmbox);
12227 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
12228 for (i = 0; i < 35; i++) {
12229 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12234 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12235 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12236 if ((tx_idx == tnapi->tx_prod) &&
12237 (rx_idx == (rx_start_idx + num_pkts)))
12241 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12242 dev_kfree_skb(skb);
12244 if (tx_idx != tnapi->tx_prod)
12247 if (rx_idx != rx_start_idx + num_pkts)
12251 while (rx_idx != rx_start_idx) {
12252 desc = &rnapi->rx_rcb[rx_start_idx++];
12253 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12254 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12256 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12257 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12260 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12263 if (!tso_loopback) {
12264 if (rx_len != tx_len)
12267 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12268 if (opaque_key != RXD_OPAQUE_RING_STD)
12271 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12274 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12275 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12276 >> RXD_TCPCSUM_SHIFT != 0xffff) {
12280 if (opaque_key == RXD_OPAQUE_RING_STD) {
12281 rx_data = tpr->rx_std_buffers[desc_idx].data;
12282 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12284 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12285 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12286 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12291 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12292 PCI_DMA_FROMDEVICE);
12294 rx_data += TG3_RX_OFFSET(tp);
12295 for (i = data_off; i < rx_len; i++, val++) {
12296 if (*(rx_data + i) != (u8) (val & 0xff))
12303 /* tg3_free_rings will unmap and free the rx_data */
12308 #define TG3_STD_LOOPBACK_FAILED 1
12309 #define TG3_JMB_LOOPBACK_FAILED 2
12310 #define TG3_TSO_LOOPBACK_FAILED 4
12311 #define TG3_LOOPBACK_FAILED \
12312 (TG3_STD_LOOPBACK_FAILED | \
12313 TG3_JMB_LOOPBACK_FAILED | \
12314 TG3_TSO_LOOPBACK_FAILED)
12316 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12320 u32 jmb_pkt_sz = 9000;
12323 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12325 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12326 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12328 if (!netif_running(tp->dev)) {
12329 data[0] = TG3_LOOPBACK_FAILED;
12330 data[1] = TG3_LOOPBACK_FAILED;
12332 data[2] = TG3_LOOPBACK_FAILED;
12336 err = tg3_reset_hw(tp, 1);
12338 data[0] = TG3_LOOPBACK_FAILED;
12339 data[1] = TG3_LOOPBACK_FAILED;
12341 data[2] = TG3_LOOPBACK_FAILED;
12345 if (tg3_flag(tp, ENABLE_RSS)) {
12348 /* Reroute all rx packets to the 1st queue */
12349 for (i = MAC_RSS_INDIR_TBL_0;
12350 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12354 /* HW errata - mac loopback fails in some cases on 5780.
12355 * Normal traffic and PHY loopback are not affected by
12356 * errata. Also, the MAC loopback test is deprecated for
12357 * all newer ASIC revisions.
12359 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
12360 !tg3_flag(tp, CPMU_PRESENT)) {
12361 tg3_mac_loopback(tp, true);
12363 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12364 data[0] |= TG3_STD_LOOPBACK_FAILED;
12366 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12367 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12368 data[0] |= TG3_JMB_LOOPBACK_FAILED;
12370 tg3_mac_loopback(tp, false);
12373 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12374 !tg3_flag(tp, USE_PHYLIB)) {
12377 tg3_phy_lpbk_set(tp, 0, false);
12379 /* Wait for link */
12380 for (i = 0; i < 100; i++) {
12381 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12386 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12387 data[1] |= TG3_STD_LOOPBACK_FAILED;
12388 if (tg3_flag(tp, TSO_CAPABLE) &&
12389 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12390 data[1] |= TG3_TSO_LOOPBACK_FAILED;
12391 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12392 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12393 data[1] |= TG3_JMB_LOOPBACK_FAILED;
12396 tg3_phy_lpbk_set(tp, 0, true);
12398 /* All link indications report up, but the hardware
12399 * isn't really ready for about 20 msec. Double it
12404 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12405 data[2] |= TG3_STD_LOOPBACK_FAILED;
12406 if (tg3_flag(tp, TSO_CAPABLE) &&
12407 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12408 data[2] |= TG3_TSO_LOOPBACK_FAILED;
12409 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12410 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12411 data[2] |= TG3_JMB_LOOPBACK_FAILED;
12414 /* Re-enable gphy autopowerdown. */
12415 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12416 tg3_phy_toggle_apd(tp, true);
12419 err = (data[0] | data[1] | data[2]) ? -EIO : 0;
12422 tp->phy_flags |= eee_cap;
12427 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12430 struct tg3 *tp = netdev_priv(dev);
12431 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12433 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12434 tg3_power_up(tp)) {
12435 etest->flags |= ETH_TEST_FL_FAILED;
12436 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12440 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12442 if (tg3_test_nvram(tp) != 0) {
12443 etest->flags |= ETH_TEST_FL_FAILED;
12446 if (!doextlpbk && tg3_test_link(tp)) {
12447 etest->flags |= ETH_TEST_FL_FAILED;
12450 if (etest->flags & ETH_TEST_FL_OFFLINE) {
12451 int err, err2 = 0, irq_sync = 0;
12453 if (netif_running(dev)) {
12455 tg3_netif_stop(tp);
12459 tg3_full_lock(tp, irq_sync);
12461 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12462 err = tg3_nvram_lock(tp);
12463 tg3_halt_cpu(tp, RX_CPU_BASE);
12464 if (!tg3_flag(tp, 5705_PLUS))
12465 tg3_halt_cpu(tp, TX_CPU_BASE);
12467 tg3_nvram_unlock(tp);
12469 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12472 if (tg3_test_registers(tp) != 0) {
12473 etest->flags |= ETH_TEST_FL_FAILED;
12477 if (tg3_test_memory(tp) != 0) {
12478 etest->flags |= ETH_TEST_FL_FAILED;
12483 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12485 if (tg3_test_loopback(tp, &data[4], doextlpbk))
12486 etest->flags |= ETH_TEST_FL_FAILED;
12488 tg3_full_unlock(tp);
12490 if (tg3_test_interrupt(tp) != 0) {
12491 etest->flags |= ETH_TEST_FL_FAILED;
12495 tg3_full_lock(tp, 0);
12497 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12498 if (netif_running(dev)) {
12499 tg3_flag_set(tp, INIT_COMPLETE);
12500 err2 = tg3_restart_hw(tp, 1);
12502 tg3_netif_start(tp);
12505 tg3_full_unlock(tp);
12507 if (irq_sync && !err2)
12510 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12511 tg3_power_down(tp);
12515 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12517 struct mii_ioctl_data *data = if_mii(ifr);
12518 struct tg3 *tp = netdev_priv(dev);
12521 if (tg3_flag(tp, USE_PHYLIB)) {
12522 struct phy_device *phydev;
12523 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12525 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12526 return phy_mii_ioctl(phydev, ifr, cmd);
12531 data->phy_id = tp->phy_addr;
12534 case SIOCGMIIREG: {
12537 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12538 break; /* We have no PHY */
12540 if (!netif_running(dev))
12543 spin_lock_bh(&tp->lock);
12544 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12545 spin_unlock_bh(&tp->lock);
12547 data->val_out = mii_regval;
12553 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12554 break; /* We have no PHY */
12556 if (!netif_running(dev))
12559 spin_lock_bh(&tp->lock);
12560 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12561 spin_unlock_bh(&tp->lock);
12569 return -EOPNOTSUPP;
12572 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12574 struct tg3 *tp = netdev_priv(dev);
12576 memcpy(ec, &tp->coal, sizeof(*ec));
12580 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12582 struct tg3 *tp = netdev_priv(dev);
12583 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12584 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12586 if (!tg3_flag(tp, 5705_PLUS)) {
12587 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12588 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12589 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12590 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12593 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12594 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12595 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12596 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12597 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12598 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12599 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12600 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12601 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12602 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12605 /* No rx interrupts will be generated if both are zero */
12606 if ((ec->rx_coalesce_usecs == 0) &&
12607 (ec->rx_max_coalesced_frames == 0))
12610 /* No tx interrupts will be generated if both are zero */
12611 if ((ec->tx_coalesce_usecs == 0) &&
12612 (ec->tx_max_coalesced_frames == 0))
12615 /* Only copy relevant parameters, ignore all others. */
12616 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12617 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12618 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12619 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12620 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12621 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12622 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12623 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12624 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12626 if (netif_running(dev)) {
12627 tg3_full_lock(tp, 0);
12628 __tg3_set_coalesce(tp, &tp->coal);
12629 tg3_full_unlock(tp);
12634 static const struct ethtool_ops tg3_ethtool_ops = {
12635 .get_settings = tg3_get_settings,
12636 .set_settings = tg3_set_settings,
12637 .get_drvinfo = tg3_get_drvinfo,
12638 .get_regs_len = tg3_get_regs_len,
12639 .get_regs = tg3_get_regs,
12640 .get_wol = tg3_get_wol,
12641 .set_wol = tg3_set_wol,
12642 .get_msglevel = tg3_get_msglevel,
12643 .set_msglevel = tg3_set_msglevel,
12644 .nway_reset = tg3_nway_reset,
12645 .get_link = ethtool_op_get_link,
12646 .get_eeprom_len = tg3_get_eeprom_len,
12647 .get_eeprom = tg3_get_eeprom,
12648 .set_eeprom = tg3_set_eeprom,
12649 .get_ringparam = tg3_get_ringparam,
12650 .set_ringparam = tg3_set_ringparam,
12651 .get_pauseparam = tg3_get_pauseparam,
12652 .set_pauseparam = tg3_set_pauseparam,
12653 .self_test = tg3_self_test,
12654 .get_strings = tg3_get_strings,
12655 .set_phys_id = tg3_set_phys_id,
12656 .get_ethtool_stats = tg3_get_ethtool_stats,
12657 .get_coalesce = tg3_get_coalesce,
12658 .set_coalesce = tg3_set_coalesce,
12659 .get_sset_count = tg3_get_sset_count,
12660 .get_rxnfc = tg3_get_rxnfc,
12661 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
12662 .get_rxfh_indir = tg3_get_rxfh_indir,
12663 .set_rxfh_indir = tg3_set_rxfh_indir,
12664 .get_channels = tg3_get_channels,
12665 .set_channels = tg3_set_channels,
12666 .get_ts_info = ethtool_op_get_ts_info,
12669 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12670 struct rtnl_link_stats64 *stats)
12672 struct tg3 *tp = netdev_priv(dev);
12674 spin_lock_bh(&tp->lock);
12675 if (!tp->hw_stats) {
12676 spin_unlock_bh(&tp->lock);
12677 return &tp->net_stats_prev;
12680 tg3_get_nstats(tp, stats);
12681 spin_unlock_bh(&tp->lock);
12686 static void tg3_set_rx_mode(struct net_device *dev)
12688 struct tg3 *tp = netdev_priv(dev);
12690 if (!netif_running(dev))
12693 tg3_full_lock(tp, 0);
12694 __tg3_set_rx_mode(dev);
12695 tg3_full_unlock(tp);
12698 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12701 dev->mtu = new_mtu;
12703 if (new_mtu > ETH_DATA_LEN) {
12704 if (tg3_flag(tp, 5780_CLASS)) {
12705 netdev_update_features(dev);
12706 tg3_flag_clear(tp, TSO_CAPABLE);
12708 tg3_flag_set(tp, JUMBO_RING_ENABLE);
12711 if (tg3_flag(tp, 5780_CLASS)) {
12712 tg3_flag_set(tp, TSO_CAPABLE);
12713 netdev_update_features(dev);
12715 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12719 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12721 struct tg3 *tp = netdev_priv(dev);
12722 int err, reset_phy = 0;
12724 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12727 if (!netif_running(dev)) {
12728 /* We'll just catch it later when the
12731 tg3_set_mtu(dev, tp, new_mtu);
12737 tg3_netif_stop(tp);
12739 tg3_full_lock(tp, 1);
12741 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12743 tg3_set_mtu(dev, tp, new_mtu);
12745 /* Reset PHY, otherwise the read DMA engine will be in a mode that
12746 * breaks all requests to 256 bytes.
12748 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
12751 err = tg3_restart_hw(tp, reset_phy);
12754 tg3_netif_start(tp);
12756 tg3_full_unlock(tp);
12764 static const struct net_device_ops tg3_netdev_ops = {
12765 .ndo_open = tg3_open,
12766 .ndo_stop = tg3_close,
12767 .ndo_start_xmit = tg3_start_xmit,
12768 .ndo_get_stats64 = tg3_get_stats64,
12769 .ndo_validate_addr = eth_validate_addr,
12770 .ndo_set_rx_mode = tg3_set_rx_mode,
12771 .ndo_set_mac_address = tg3_set_mac_addr,
12772 .ndo_do_ioctl = tg3_ioctl,
12773 .ndo_tx_timeout = tg3_tx_timeout,
12774 .ndo_change_mtu = tg3_change_mtu,
12775 .ndo_fix_features = tg3_fix_features,
12776 .ndo_set_features = tg3_set_features,
12777 #ifdef CONFIG_NET_POLL_CONTROLLER
12778 .ndo_poll_controller = tg3_poll_controller,
12782 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12784 u32 cursize, val, magic;
12786 tp->nvram_size = EEPROM_CHIP_SIZE;
12788 if (tg3_nvram_read(tp, 0, &magic) != 0)
12791 if ((magic != TG3_EEPROM_MAGIC) &&
12792 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12793 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12797 * Size the chip by reading offsets at increasing powers of two.
12798 * When we encounter our validation signature, we know the addressing
12799 * has wrapped around, and thus have our chip size.
12803 while (cursize < tp->nvram_size) {
12804 if (tg3_nvram_read(tp, cursize, &val) != 0)
12813 tp->nvram_size = cursize;
12816 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12820 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12823 /* Selfboot format */
12824 if (val != TG3_EEPROM_MAGIC) {
12825 tg3_get_eeprom_size(tp);
12829 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12831 /* This is confusing. We want to operate on the
12832 * 16-bit value at offset 0xf2. The tg3_nvram_read()
12833 * call will read from NVRAM and byteswap the data
12834 * according to the byteswapping settings for all
12835 * other register accesses. This ensures the data we
12836 * want will always reside in the lower 16-bits.
12837 * However, the data in NVRAM is in LE format, which
12838 * means the data from the NVRAM read will always be
12839 * opposite the endianness of the CPU. The 16-bit
12840 * byteswap then brings the data to CPU endianness.
12842 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12846 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12849 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12853 nvcfg1 = tr32(NVRAM_CFG1);
12854 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12855 tg3_flag_set(tp, FLASH);
12857 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12858 tw32(NVRAM_CFG1, nvcfg1);
12861 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12862 tg3_flag(tp, 5780_CLASS)) {
12863 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12864 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12865 tp->nvram_jedecnum = JEDEC_ATMEL;
12866 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12867 tg3_flag_set(tp, NVRAM_BUFFERED);
12869 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12870 tp->nvram_jedecnum = JEDEC_ATMEL;
12871 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12873 case FLASH_VENDOR_ATMEL_EEPROM:
12874 tp->nvram_jedecnum = JEDEC_ATMEL;
12875 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12876 tg3_flag_set(tp, NVRAM_BUFFERED);
12878 case FLASH_VENDOR_ST:
12879 tp->nvram_jedecnum = JEDEC_ST;
12880 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12881 tg3_flag_set(tp, NVRAM_BUFFERED);
12883 case FLASH_VENDOR_SAIFUN:
12884 tp->nvram_jedecnum = JEDEC_SAIFUN;
12885 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12887 case FLASH_VENDOR_SST_SMALL:
12888 case FLASH_VENDOR_SST_LARGE:
12889 tp->nvram_jedecnum = JEDEC_SST;
12890 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12894 tp->nvram_jedecnum = JEDEC_ATMEL;
12895 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12896 tg3_flag_set(tp, NVRAM_BUFFERED);
12900 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12902 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12903 case FLASH_5752PAGE_SIZE_256:
12904 tp->nvram_pagesize = 256;
12906 case FLASH_5752PAGE_SIZE_512:
12907 tp->nvram_pagesize = 512;
12909 case FLASH_5752PAGE_SIZE_1K:
12910 tp->nvram_pagesize = 1024;
12912 case FLASH_5752PAGE_SIZE_2K:
12913 tp->nvram_pagesize = 2048;
12915 case FLASH_5752PAGE_SIZE_4K:
12916 tp->nvram_pagesize = 4096;
12918 case FLASH_5752PAGE_SIZE_264:
12919 tp->nvram_pagesize = 264;
12921 case FLASH_5752PAGE_SIZE_528:
12922 tp->nvram_pagesize = 528;
12927 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12931 nvcfg1 = tr32(NVRAM_CFG1);
12933 /* NVRAM protection for TPM */
12934 if (nvcfg1 & (1 << 27))
12935 tg3_flag_set(tp, PROTECTED_NVRAM);
12937 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12938 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12939 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12940 tp->nvram_jedecnum = JEDEC_ATMEL;
12941 tg3_flag_set(tp, NVRAM_BUFFERED);
12943 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12944 tp->nvram_jedecnum = JEDEC_ATMEL;
12945 tg3_flag_set(tp, NVRAM_BUFFERED);
12946 tg3_flag_set(tp, FLASH);
12948 case FLASH_5752VENDOR_ST_M45PE10:
12949 case FLASH_5752VENDOR_ST_M45PE20:
12950 case FLASH_5752VENDOR_ST_M45PE40:
12951 tp->nvram_jedecnum = JEDEC_ST;
12952 tg3_flag_set(tp, NVRAM_BUFFERED);
12953 tg3_flag_set(tp, FLASH);
12957 if (tg3_flag(tp, FLASH)) {
12958 tg3_nvram_get_pagesize(tp, nvcfg1);
12960 /* For eeprom, set pagesize to maximum eeprom size */
12961 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12963 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12964 tw32(NVRAM_CFG1, nvcfg1);
12968 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12970 u32 nvcfg1, protect = 0;
12972 nvcfg1 = tr32(NVRAM_CFG1);
12974 /* NVRAM protection for TPM */
12975 if (nvcfg1 & (1 << 27)) {
12976 tg3_flag_set(tp, PROTECTED_NVRAM);
12980 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12982 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12983 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12984 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12985 case FLASH_5755VENDOR_ATMEL_FLASH_5:
12986 tp->nvram_jedecnum = JEDEC_ATMEL;
12987 tg3_flag_set(tp, NVRAM_BUFFERED);
12988 tg3_flag_set(tp, FLASH);
12989 tp->nvram_pagesize = 264;
12990 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12991 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12992 tp->nvram_size = (protect ? 0x3e200 :
12993 TG3_NVRAM_SIZE_512KB);
12994 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12995 tp->nvram_size = (protect ? 0x1f200 :
12996 TG3_NVRAM_SIZE_256KB);
12998 tp->nvram_size = (protect ? 0x1f200 :
12999 TG3_NVRAM_SIZE_128KB);
13001 case FLASH_5752VENDOR_ST_M45PE10:
13002 case FLASH_5752VENDOR_ST_M45PE20:
13003 case FLASH_5752VENDOR_ST_M45PE40:
13004 tp->nvram_jedecnum = JEDEC_ST;
13005 tg3_flag_set(tp, NVRAM_BUFFERED);
13006 tg3_flag_set(tp, FLASH);
13007 tp->nvram_pagesize = 256;
13008 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13009 tp->nvram_size = (protect ?
13010 TG3_NVRAM_SIZE_64KB :
13011 TG3_NVRAM_SIZE_128KB);
13012 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13013 tp->nvram_size = (protect ?
13014 TG3_NVRAM_SIZE_64KB :
13015 TG3_NVRAM_SIZE_256KB);
13017 tp->nvram_size = (protect ?
13018 TG3_NVRAM_SIZE_128KB :
13019 TG3_NVRAM_SIZE_512KB);
13024 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
13028 nvcfg1 = tr32(NVRAM_CFG1);
13030 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13031 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13032 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13033 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13034 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13035 tp->nvram_jedecnum = JEDEC_ATMEL;
13036 tg3_flag_set(tp, NVRAM_BUFFERED);
13037 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13039 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13040 tw32(NVRAM_CFG1, nvcfg1);
13042 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13043 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13044 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13045 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13046 tp->nvram_jedecnum = JEDEC_ATMEL;
13047 tg3_flag_set(tp, NVRAM_BUFFERED);
13048 tg3_flag_set(tp, FLASH);
13049 tp->nvram_pagesize = 264;
13051 case FLASH_5752VENDOR_ST_M45PE10:
13052 case FLASH_5752VENDOR_ST_M45PE20:
13053 case FLASH_5752VENDOR_ST_M45PE40:
13054 tp->nvram_jedecnum = JEDEC_ST;
13055 tg3_flag_set(tp, NVRAM_BUFFERED);
13056 tg3_flag_set(tp, FLASH);
13057 tp->nvram_pagesize = 256;
13062 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
13064 u32 nvcfg1, protect = 0;
13066 nvcfg1 = tr32(NVRAM_CFG1);
13068 /* NVRAM protection for TPM */
13069 if (nvcfg1 & (1 << 27)) {
13070 tg3_flag_set(tp, PROTECTED_NVRAM);
13074 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13076 case FLASH_5761VENDOR_ATMEL_ADB021D:
13077 case FLASH_5761VENDOR_ATMEL_ADB041D:
13078 case FLASH_5761VENDOR_ATMEL_ADB081D:
13079 case FLASH_5761VENDOR_ATMEL_ADB161D:
13080 case FLASH_5761VENDOR_ATMEL_MDB021D:
13081 case FLASH_5761VENDOR_ATMEL_MDB041D:
13082 case FLASH_5761VENDOR_ATMEL_MDB081D:
13083 case FLASH_5761VENDOR_ATMEL_MDB161D:
13084 tp->nvram_jedecnum = JEDEC_ATMEL;
13085 tg3_flag_set(tp, NVRAM_BUFFERED);
13086 tg3_flag_set(tp, FLASH);
13087 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13088 tp->nvram_pagesize = 256;
13090 case FLASH_5761VENDOR_ST_A_M45PE20:
13091 case FLASH_5761VENDOR_ST_A_M45PE40:
13092 case FLASH_5761VENDOR_ST_A_M45PE80:
13093 case FLASH_5761VENDOR_ST_A_M45PE16:
13094 case FLASH_5761VENDOR_ST_M_M45PE20:
13095 case FLASH_5761VENDOR_ST_M_M45PE40:
13096 case FLASH_5761VENDOR_ST_M_M45PE80:
13097 case FLASH_5761VENDOR_ST_M_M45PE16:
13098 tp->nvram_jedecnum = JEDEC_ST;
13099 tg3_flag_set(tp, NVRAM_BUFFERED);
13100 tg3_flag_set(tp, FLASH);
13101 tp->nvram_pagesize = 256;
13106 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13109 case FLASH_5761VENDOR_ATMEL_ADB161D:
13110 case FLASH_5761VENDOR_ATMEL_MDB161D:
13111 case FLASH_5761VENDOR_ST_A_M45PE16:
13112 case FLASH_5761VENDOR_ST_M_M45PE16:
13113 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13115 case FLASH_5761VENDOR_ATMEL_ADB081D:
13116 case FLASH_5761VENDOR_ATMEL_MDB081D:
13117 case FLASH_5761VENDOR_ST_A_M45PE80:
13118 case FLASH_5761VENDOR_ST_M_M45PE80:
13119 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13121 case FLASH_5761VENDOR_ATMEL_ADB041D:
13122 case FLASH_5761VENDOR_ATMEL_MDB041D:
13123 case FLASH_5761VENDOR_ST_A_M45PE40:
13124 case FLASH_5761VENDOR_ST_M_M45PE40:
13125 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13127 case FLASH_5761VENDOR_ATMEL_ADB021D:
13128 case FLASH_5761VENDOR_ATMEL_MDB021D:
13129 case FLASH_5761VENDOR_ST_A_M45PE20:
13130 case FLASH_5761VENDOR_ST_M_M45PE20:
13131 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13137 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
13139 tp->nvram_jedecnum = JEDEC_ATMEL;
13140 tg3_flag_set(tp, NVRAM_BUFFERED);
13141 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13144 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
13148 nvcfg1 = tr32(NVRAM_CFG1);
13150 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13151 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13152 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13153 tp->nvram_jedecnum = JEDEC_ATMEL;
13154 tg3_flag_set(tp, NVRAM_BUFFERED);
13155 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13157 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13158 tw32(NVRAM_CFG1, nvcfg1);
13160 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13161 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13162 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13163 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13164 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13165 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13166 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13167 tp->nvram_jedecnum = JEDEC_ATMEL;
13168 tg3_flag_set(tp, NVRAM_BUFFERED);
13169 tg3_flag_set(tp, FLASH);
13171 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13172 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13173 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13174 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13175 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13177 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13178 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13179 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13181 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13182 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13183 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13187 case FLASH_5752VENDOR_ST_M45PE10:
13188 case FLASH_5752VENDOR_ST_M45PE20:
13189 case FLASH_5752VENDOR_ST_M45PE40:
13190 tp->nvram_jedecnum = JEDEC_ST;
13191 tg3_flag_set(tp, NVRAM_BUFFERED);
13192 tg3_flag_set(tp, FLASH);
13194 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13195 case FLASH_5752VENDOR_ST_M45PE10:
13196 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13198 case FLASH_5752VENDOR_ST_M45PE20:
13199 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13201 case FLASH_5752VENDOR_ST_M45PE40:
13202 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13207 tg3_flag_set(tp, NO_NVRAM);
13211 tg3_nvram_get_pagesize(tp, nvcfg1);
13212 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13213 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13217 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
13221 nvcfg1 = tr32(NVRAM_CFG1);
13223 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13224 case FLASH_5717VENDOR_ATMEL_EEPROM:
13225 case FLASH_5717VENDOR_MICRO_EEPROM:
13226 tp->nvram_jedecnum = JEDEC_ATMEL;
13227 tg3_flag_set(tp, NVRAM_BUFFERED);
13228 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13230 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13231 tw32(NVRAM_CFG1, nvcfg1);
13233 case FLASH_5717VENDOR_ATMEL_MDB011D:
13234 case FLASH_5717VENDOR_ATMEL_ADB011B:
13235 case FLASH_5717VENDOR_ATMEL_ADB011D:
13236 case FLASH_5717VENDOR_ATMEL_MDB021D:
13237 case FLASH_5717VENDOR_ATMEL_ADB021B:
13238 case FLASH_5717VENDOR_ATMEL_ADB021D:
13239 case FLASH_5717VENDOR_ATMEL_45USPT:
13240 tp->nvram_jedecnum = JEDEC_ATMEL;
13241 tg3_flag_set(tp, NVRAM_BUFFERED);
13242 tg3_flag_set(tp, FLASH);
13244 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13245 case FLASH_5717VENDOR_ATMEL_MDB021D:
13246 /* Detect size with tg3_nvram_get_size() */
13248 case FLASH_5717VENDOR_ATMEL_ADB021B:
13249 case FLASH_5717VENDOR_ATMEL_ADB021D:
13250 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13253 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13257 case FLASH_5717VENDOR_ST_M_M25PE10:
13258 case FLASH_5717VENDOR_ST_A_M25PE10:
13259 case FLASH_5717VENDOR_ST_M_M45PE10:
13260 case FLASH_5717VENDOR_ST_A_M45PE10:
13261 case FLASH_5717VENDOR_ST_M_M25PE20:
13262 case FLASH_5717VENDOR_ST_A_M25PE20:
13263 case FLASH_5717VENDOR_ST_M_M45PE20:
13264 case FLASH_5717VENDOR_ST_A_M45PE20:
13265 case FLASH_5717VENDOR_ST_25USPT:
13266 case FLASH_5717VENDOR_ST_45USPT:
13267 tp->nvram_jedecnum = JEDEC_ST;
13268 tg3_flag_set(tp, NVRAM_BUFFERED);
13269 tg3_flag_set(tp, FLASH);
13271 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13272 case FLASH_5717VENDOR_ST_M_M25PE20:
13273 case FLASH_5717VENDOR_ST_M_M45PE20:
13274 /* Detect size with tg3_nvram_get_size() */
13276 case FLASH_5717VENDOR_ST_A_M25PE20:
13277 case FLASH_5717VENDOR_ST_A_M45PE20:
13278 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13281 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13286 tg3_flag_set(tp, NO_NVRAM);
13290 tg3_nvram_get_pagesize(tp, nvcfg1);
13291 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13292 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13295 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
13297 u32 nvcfg1, nvmpinstrp;
13299 nvcfg1 = tr32(NVRAM_CFG1);
13300 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13302 switch (nvmpinstrp) {
13303 case FLASH_5720_EEPROM_HD:
13304 case FLASH_5720_EEPROM_LD:
13305 tp->nvram_jedecnum = JEDEC_ATMEL;
13306 tg3_flag_set(tp, NVRAM_BUFFERED);
13308 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13309 tw32(NVRAM_CFG1, nvcfg1);
13310 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13311 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13313 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13315 case FLASH_5720VENDOR_M_ATMEL_DB011D:
13316 case FLASH_5720VENDOR_A_ATMEL_DB011B:
13317 case FLASH_5720VENDOR_A_ATMEL_DB011D:
13318 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13319 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13320 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13321 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13322 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13323 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13324 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13325 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13326 case FLASH_5720VENDOR_ATMEL_45USPT:
13327 tp->nvram_jedecnum = JEDEC_ATMEL;
13328 tg3_flag_set(tp, NVRAM_BUFFERED);
13329 tg3_flag_set(tp, FLASH);
13331 switch (nvmpinstrp) {
13332 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13333 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13334 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13335 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13337 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13338 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13339 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13340 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13342 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13343 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13344 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13347 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13351 case FLASH_5720VENDOR_M_ST_M25PE10:
13352 case FLASH_5720VENDOR_M_ST_M45PE10:
13353 case FLASH_5720VENDOR_A_ST_M25PE10:
13354 case FLASH_5720VENDOR_A_ST_M45PE10:
13355 case FLASH_5720VENDOR_M_ST_M25PE20:
13356 case FLASH_5720VENDOR_M_ST_M45PE20:
13357 case FLASH_5720VENDOR_A_ST_M25PE20:
13358 case FLASH_5720VENDOR_A_ST_M45PE20:
13359 case FLASH_5720VENDOR_M_ST_M25PE40:
13360 case FLASH_5720VENDOR_M_ST_M45PE40:
13361 case FLASH_5720VENDOR_A_ST_M25PE40:
13362 case FLASH_5720VENDOR_A_ST_M45PE40:
13363 case FLASH_5720VENDOR_M_ST_M25PE80:
13364 case FLASH_5720VENDOR_M_ST_M45PE80:
13365 case FLASH_5720VENDOR_A_ST_M25PE80:
13366 case FLASH_5720VENDOR_A_ST_M45PE80:
13367 case FLASH_5720VENDOR_ST_25USPT:
13368 case FLASH_5720VENDOR_ST_45USPT:
13369 tp->nvram_jedecnum = JEDEC_ST;
13370 tg3_flag_set(tp, NVRAM_BUFFERED);
13371 tg3_flag_set(tp, FLASH);
13373 switch (nvmpinstrp) {
13374 case FLASH_5720VENDOR_M_ST_M25PE20:
13375 case FLASH_5720VENDOR_M_ST_M45PE20:
13376 case FLASH_5720VENDOR_A_ST_M25PE20:
13377 case FLASH_5720VENDOR_A_ST_M45PE20:
13378 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13380 case FLASH_5720VENDOR_M_ST_M25PE40:
13381 case FLASH_5720VENDOR_M_ST_M45PE40:
13382 case FLASH_5720VENDOR_A_ST_M25PE40:
13383 case FLASH_5720VENDOR_A_ST_M45PE40:
13384 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13386 case FLASH_5720VENDOR_M_ST_M25PE80:
13387 case FLASH_5720VENDOR_M_ST_M45PE80:
13388 case FLASH_5720VENDOR_A_ST_M25PE80:
13389 case FLASH_5720VENDOR_A_ST_M45PE80:
13390 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13393 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13398 tg3_flag_set(tp, NO_NVRAM);
13402 tg3_nvram_get_pagesize(tp, nvcfg1);
13403 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13404 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13407 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13408 static void __devinit tg3_nvram_init(struct tg3 *tp)
13410 tw32_f(GRC_EEPROM_ADDR,
13411 (EEPROM_ADDR_FSM_RESET |
13412 (EEPROM_DEFAULT_CLOCK_PERIOD <<
13413 EEPROM_ADDR_CLKPERD_SHIFT)));
13417 /* Enable seeprom accesses. */
13418 tw32_f(GRC_LOCAL_CTRL,
13419 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13422 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13423 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13424 tg3_flag_set(tp, NVRAM);
13426 if (tg3_nvram_lock(tp)) {
13427 netdev_warn(tp->dev,
13428 "Cannot get nvram lock, %s failed\n",
13432 tg3_enable_nvram_access(tp);
13434 tp->nvram_size = 0;
13436 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13437 tg3_get_5752_nvram_info(tp);
13438 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13439 tg3_get_5755_nvram_info(tp);
13440 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13441 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13442 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13443 tg3_get_5787_nvram_info(tp);
13444 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13445 tg3_get_5761_nvram_info(tp);
13446 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13447 tg3_get_5906_nvram_info(tp);
13448 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13449 tg3_flag(tp, 57765_CLASS))
13450 tg3_get_57780_nvram_info(tp);
13451 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13452 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13453 tg3_get_5717_nvram_info(tp);
13454 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13455 tg3_get_5720_nvram_info(tp);
13457 tg3_get_nvram_info(tp);
13459 if (tp->nvram_size == 0)
13460 tg3_get_nvram_size(tp);
13462 tg3_disable_nvram_access(tp);
13463 tg3_nvram_unlock(tp);
13466 tg3_flag_clear(tp, NVRAM);
13467 tg3_flag_clear(tp, NVRAM_BUFFERED);
13469 tg3_get_eeprom_size(tp);
13473 struct subsys_tbl_ent {
13474 u16 subsys_vendor, subsys_devid;
13478 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13479 /* Broadcom boards. */
13480 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13481 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13482 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13483 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13484 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13485 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13486 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13487 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13488 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13489 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13490 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13491 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13492 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13493 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13494 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13495 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13496 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13497 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13498 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13499 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13500 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13501 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13504 { TG3PCI_SUBVENDOR_ID_3COM,
13505 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13506 { TG3PCI_SUBVENDOR_ID_3COM,
13507 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13508 { TG3PCI_SUBVENDOR_ID_3COM,
13509 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13510 { TG3PCI_SUBVENDOR_ID_3COM,
13511 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13512 { TG3PCI_SUBVENDOR_ID_3COM,
13513 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13516 { TG3PCI_SUBVENDOR_ID_DELL,
13517 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13518 { TG3PCI_SUBVENDOR_ID_DELL,
13519 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13520 { TG3PCI_SUBVENDOR_ID_DELL,
13521 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13522 { TG3PCI_SUBVENDOR_ID_DELL,
13523 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13525 /* Compaq boards. */
13526 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13527 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13528 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13529 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13530 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13531 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13532 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13533 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13534 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13535 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13538 { TG3PCI_SUBVENDOR_ID_IBM,
13539 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13542 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13546 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13547 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13548 tp->pdev->subsystem_vendor) &&
13549 (subsys_id_to_phy_id[i].subsys_devid ==
13550 tp->pdev->subsystem_device))
13551 return &subsys_id_to_phy_id[i];
13556 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13560 tp->phy_id = TG3_PHY_ID_INVALID;
13561 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13563 /* Assume an onboard device and WOL capable by default. */
13564 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13565 tg3_flag_set(tp, WOL_CAP);
13567 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13568 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13569 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13570 tg3_flag_set(tp, IS_NIC);
13572 val = tr32(VCPU_CFGSHDW);
13573 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13574 tg3_flag_set(tp, ASPM_WORKAROUND);
13575 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13576 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13577 tg3_flag_set(tp, WOL_ENABLE);
13578 device_set_wakeup_enable(&tp->pdev->dev, true);
13583 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13584 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13585 u32 nic_cfg, led_cfg;
13586 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13587 int eeprom_phy_serdes = 0;
13589 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13590 tp->nic_sram_data_cfg = nic_cfg;
13592 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13593 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13594 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13595 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13596 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13597 (ver > 0) && (ver < 0x100))
13598 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13600 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13601 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13603 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13604 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13605 eeprom_phy_serdes = 1;
13607 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13608 if (nic_phy_id != 0) {
13609 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13610 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13612 eeprom_phy_id = (id1 >> 16) << 10;
13613 eeprom_phy_id |= (id2 & 0xfc00) << 16;
13614 eeprom_phy_id |= (id2 & 0x03ff) << 0;
13618 tp->phy_id = eeprom_phy_id;
13619 if (eeprom_phy_serdes) {
13620 if (!tg3_flag(tp, 5705_PLUS))
13621 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13623 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13626 if (tg3_flag(tp, 5750_PLUS))
13627 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13628 SHASTA_EXT_LED_MODE_MASK);
13630 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13634 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13635 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13638 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13639 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13642 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13643 tp->led_ctrl = LED_CTRL_MODE_MAC;
13645 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13646 * read on some older 5700/5701 bootcode.
13648 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13650 GET_ASIC_REV(tp->pci_chip_rev_id) ==
13652 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13656 case SHASTA_EXT_LED_SHARED:
13657 tp->led_ctrl = LED_CTRL_MODE_SHARED;
13658 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13659 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13660 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13661 LED_CTRL_MODE_PHY_2);
13664 case SHASTA_EXT_LED_MAC:
13665 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13668 case SHASTA_EXT_LED_COMBO:
13669 tp->led_ctrl = LED_CTRL_MODE_COMBO;
13670 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13671 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13672 LED_CTRL_MODE_PHY_2);
13677 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13678 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13679 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13680 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13682 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13683 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13685 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13686 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13687 if ((tp->pdev->subsystem_vendor ==
13688 PCI_VENDOR_ID_ARIMA) &&
13689 (tp->pdev->subsystem_device == 0x205a ||
13690 tp->pdev->subsystem_device == 0x2063))
13691 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13693 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13694 tg3_flag_set(tp, IS_NIC);
13697 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13698 tg3_flag_set(tp, ENABLE_ASF);
13699 if (tg3_flag(tp, 5750_PLUS))
13700 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13703 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13704 tg3_flag(tp, 5750_PLUS))
13705 tg3_flag_set(tp, ENABLE_APE);
13707 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13708 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13709 tg3_flag_clear(tp, WOL_CAP);
13711 if (tg3_flag(tp, WOL_CAP) &&
13712 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13713 tg3_flag_set(tp, WOL_ENABLE);
13714 device_set_wakeup_enable(&tp->pdev->dev, true);
13717 if (cfg2 & (1 << 17))
13718 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13720 /* serdes signal pre-emphasis in register 0x590 set by */
13721 /* bootcode if bit 18 is set */
13722 if (cfg2 & (1 << 18))
13723 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13725 if ((tg3_flag(tp, 57765_PLUS) ||
13726 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13727 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13728 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13729 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13731 if (tg3_flag(tp, PCI_EXPRESS) &&
13732 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13733 !tg3_flag(tp, 57765_PLUS)) {
13736 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13737 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13738 tg3_flag_set(tp, ASPM_WORKAROUND);
13741 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13742 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13743 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13744 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13745 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13746 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13749 if (tg3_flag(tp, WOL_CAP))
13750 device_set_wakeup_enable(&tp->pdev->dev,
13751 tg3_flag(tp, WOL_ENABLE));
13753 device_set_wakeup_capable(&tp->pdev->dev, false);
13756 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13761 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13762 tw32(OTP_CTRL, cmd);
13764 /* Wait for up to 1 ms for command to execute. */
13765 for (i = 0; i < 100; i++) {
13766 val = tr32(OTP_STATUS);
13767 if (val & OTP_STATUS_CMD_DONE)
13772 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13775 /* Read the gphy configuration from the OTP region of the chip. The gphy
13776 * configuration is a 32-bit value that straddles the alignment boundary.
13777 * We do two 32-bit reads and then shift and merge the results.
13779 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13781 u32 bhalf_otp, thalf_otp;
13783 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13785 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13788 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13790 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13793 thalf_otp = tr32(OTP_READ_DATA);
13795 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13797 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13800 bhalf_otp = tr32(OTP_READ_DATA);
13802 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13805 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13807 u32 adv = ADVERTISED_Autoneg;
13809 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13810 adv |= ADVERTISED_1000baseT_Half |
13811 ADVERTISED_1000baseT_Full;
13813 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13814 adv |= ADVERTISED_100baseT_Half |
13815 ADVERTISED_100baseT_Full |
13816 ADVERTISED_10baseT_Half |
13817 ADVERTISED_10baseT_Full |
13820 adv |= ADVERTISED_FIBRE;
13822 tp->link_config.advertising = adv;
13823 tp->link_config.speed = SPEED_UNKNOWN;
13824 tp->link_config.duplex = DUPLEX_UNKNOWN;
13825 tp->link_config.autoneg = AUTONEG_ENABLE;
13826 tp->link_config.active_speed = SPEED_UNKNOWN;
13827 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
13832 static int __devinit tg3_phy_probe(struct tg3 *tp)
13834 u32 hw_phy_id_1, hw_phy_id_2;
13835 u32 hw_phy_id, hw_phy_id_masked;
13838 /* flow control autonegotiation is default behavior */
13839 tg3_flag_set(tp, PAUSE_AUTONEG);
13840 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13842 if (tg3_flag(tp, ENABLE_APE)) {
13843 switch (tp->pci_fn) {
13845 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
13848 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
13851 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
13854 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
13859 if (tg3_flag(tp, USE_PHYLIB))
13860 return tg3_phy_init(tp);
13862 /* Reading the PHY ID register can conflict with ASF
13863 * firmware access to the PHY hardware.
13866 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13867 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13869 /* Now read the physical PHY_ID from the chip and verify
13870 * that it is sane. If it doesn't look good, we fall back
13871 * to either the hard-coded table based PHY_ID and failing
13872 * that the value found in the eeprom area.
13874 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13875 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13877 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13878 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13879 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13881 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13884 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13885 tp->phy_id = hw_phy_id;
13886 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13887 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13889 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13891 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13892 /* Do nothing, phy ID already set up in
13893 * tg3_get_eeprom_hw_cfg().
13896 struct subsys_tbl_ent *p;
13898 /* No eeprom signature? Try the hardcoded
13899 * subsys device table.
13901 p = tg3_lookup_by_subsys(tp);
13905 tp->phy_id = p->phy_id;
13907 tp->phy_id == TG3_PHY_ID_BCM8002)
13908 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13912 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13913 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13914 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13915 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13916 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13917 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13918 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13919 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13921 tg3_phy_init_link_config(tp);
13923 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13924 !tg3_flag(tp, ENABLE_APE) &&
13925 !tg3_flag(tp, ENABLE_ASF)) {
13928 tg3_readphy(tp, MII_BMSR, &bmsr);
13929 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13930 (bmsr & BMSR_LSTATUS))
13931 goto skip_phy_reset;
13933 err = tg3_phy_reset(tp);
13937 tg3_phy_set_wirespeed(tp);
13939 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13940 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13941 tp->link_config.flowctrl);
13943 tg3_writephy(tp, MII_BMCR,
13944 BMCR_ANENABLE | BMCR_ANRESTART);
13949 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13950 err = tg3_init_5401phy_dsp(tp);
13954 err = tg3_init_5401phy_dsp(tp);
13960 static void __devinit tg3_read_vpd(struct tg3 *tp)
13963 unsigned int block_end, rosize, len;
13967 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13971 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13973 goto out_not_found;
13975 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13976 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13977 i += PCI_VPD_LRDT_TAG_SIZE;
13979 if (block_end > vpdlen)
13980 goto out_not_found;
13982 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13983 PCI_VPD_RO_KEYWORD_MFR_ID);
13985 len = pci_vpd_info_field_size(&vpd_data[j]);
13987 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13988 if (j + len > block_end || len != 4 ||
13989 memcmp(&vpd_data[j], "1028", 4))
13992 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13993 PCI_VPD_RO_KEYWORD_VENDOR0);
13997 len = pci_vpd_info_field_size(&vpd_data[j]);
13999 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14000 if (j + len > block_end)
14003 memcpy(tp->fw_ver, &vpd_data[j], len);
14004 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
14008 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14009 PCI_VPD_RO_KEYWORD_PARTNO);
14011 goto out_not_found;
14013 len = pci_vpd_info_field_size(&vpd_data[i]);
14015 i += PCI_VPD_INFO_FLD_HDR_SIZE;
14016 if (len > TG3_BPN_SIZE ||
14017 (len + i) > vpdlen)
14018 goto out_not_found;
14020 memcpy(tp->board_part_number, &vpd_data[i], len);
14024 if (tp->board_part_number[0])
14028 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14029 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
14030 strcpy(tp->board_part_number, "BCM5717");
14031 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14032 strcpy(tp->board_part_number, "BCM5718");
14035 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14036 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14037 strcpy(tp->board_part_number, "BCM57780");
14038 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14039 strcpy(tp->board_part_number, "BCM57760");
14040 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14041 strcpy(tp->board_part_number, "BCM57790");
14042 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14043 strcpy(tp->board_part_number, "BCM57788");
14046 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14047 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14048 strcpy(tp->board_part_number, "BCM57761");
14049 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14050 strcpy(tp->board_part_number, "BCM57765");
14051 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14052 strcpy(tp->board_part_number, "BCM57781");
14053 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14054 strcpy(tp->board_part_number, "BCM57785");
14055 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14056 strcpy(tp->board_part_number, "BCM57791");
14057 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14058 strcpy(tp->board_part_number, "BCM57795");
14061 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
14062 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14063 strcpy(tp->board_part_number, "BCM57762");
14064 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14065 strcpy(tp->board_part_number, "BCM57766");
14066 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14067 strcpy(tp->board_part_number, "BCM57782");
14068 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14069 strcpy(tp->board_part_number, "BCM57786");
14072 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14073 strcpy(tp->board_part_number, "BCM95906");
14076 strcpy(tp->board_part_number, "none");
14080 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14084 if (tg3_nvram_read(tp, offset, &val) ||
14085 (val & 0xfc000000) != 0x0c000000 ||
14086 tg3_nvram_read(tp, offset + 4, &val) ||
14093 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
14095 u32 val, offset, start, ver_offset;
14097 bool newver = false;
14099 if (tg3_nvram_read(tp, 0xc, &offset) ||
14100 tg3_nvram_read(tp, 0x4, &start))
14103 offset = tg3_nvram_logical_addr(tp, offset);
14105 if (tg3_nvram_read(tp, offset, &val))
14108 if ((val & 0xfc000000) == 0x0c000000) {
14109 if (tg3_nvram_read(tp, offset + 4, &val))
14116 dst_off = strlen(tp->fw_ver);
14119 if (TG3_VER_SIZE - dst_off < 16 ||
14120 tg3_nvram_read(tp, offset + 8, &ver_offset))
14123 offset = offset + ver_offset - start;
14124 for (i = 0; i < 16; i += 4) {
14126 if (tg3_nvram_read_be32(tp, offset + i, &v))
14129 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14134 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14137 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14138 TG3_NVM_BCVER_MAJSFT;
14139 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14140 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14141 "v%d.%02d", major, minor);
14145 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
14147 u32 val, major, minor;
14149 /* Use native endian representation */
14150 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14153 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14154 TG3_NVM_HWSB_CFG1_MAJSFT;
14155 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14156 TG3_NVM_HWSB_CFG1_MINSFT;
14158 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14161 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
14163 u32 offset, major, minor, build;
14165 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14167 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14170 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14171 case TG3_EEPROM_SB_REVISION_0:
14172 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14174 case TG3_EEPROM_SB_REVISION_2:
14175 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14177 case TG3_EEPROM_SB_REVISION_3:
14178 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14180 case TG3_EEPROM_SB_REVISION_4:
14181 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14183 case TG3_EEPROM_SB_REVISION_5:
14184 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14186 case TG3_EEPROM_SB_REVISION_6:
14187 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14193 if (tg3_nvram_read(tp, offset, &val))
14196 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14197 TG3_EEPROM_SB_EDH_BLD_SHFT;
14198 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14199 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14200 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
14202 if (minor > 99 || build > 26)
14205 offset = strlen(tp->fw_ver);
14206 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14207 " v%d.%02d", major, minor);
14210 offset = strlen(tp->fw_ver);
14211 if (offset < TG3_VER_SIZE - 1)
14212 tp->fw_ver[offset] = 'a' + build - 1;
14216 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
14218 u32 val, offset, start;
14221 for (offset = TG3_NVM_DIR_START;
14222 offset < TG3_NVM_DIR_END;
14223 offset += TG3_NVM_DIRENT_SIZE) {
14224 if (tg3_nvram_read(tp, offset, &val))
14227 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14231 if (offset == TG3_NVM_DIR_END)
14234 if (!tg3_flag(tp, 5705_PLUS))
14235 start = 0x08000000;
14236 else if (tg3_nvram_read(tp, offset - 4, &start))
14239 if (tg3_nvram_read(tp, offset + 4, &offset) ||
14240 !tg3_fw_img_is_valid(tp, offset) ||
14241 tg3_nvram_read(tp, offset + 8, &val))
14244 offset += val - start;
14246 vlen = strlen(tp->fw_ver);
14248 tp->fw_ver[vlen++] = ',';
14249 tp->fw_ver[vlen++] = ' ';
14251 for (i = 0; i < 4; i++) {
14253 if (tg3_nvram_read_be32(tp, offset, &v))
14256 offset += sizeof(v);
14258 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14259 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
14263 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14268 static void __devinit tg3_probe_ncsi(struct tg3 *tp)
14272 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14273 if (apedata != APE_SEG_SIG_MAGIC)
14276 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14277 if (!(apedata & APE_FW_STATUS_READY))
14280 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14281 tg3_flag_set(tp, APE_HAS_NCSI);
14284 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
14290 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14292 if (tg3_flag(tp, APE_HAS_NCSI))
14297 vlen = strlen(tp->fw_ver);
14299 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14301 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14302 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14303 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14304 (apedata & APE_FW_VERSION_BLDMSK));
14307 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
14310 bool vpd_vers = false;
14312 if (tp->fw_ver[0] != 0)
14315 if (tg3_flag(tp, NO_NVRAM)) {
14316 strcat(tp->fw_ver, "sb");
14320 if (tg3_nvram_read(tp, 0, &val))
14323 if (val == TG3_EEPROM_MAGIC)
14324 tg3_read_bc_ver(tp);
14325 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14326 tg3_read_sb_ver(tp, val);
14327 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14328 tg3_read_hwsb_ver(tp);
14330 if (tg3_flag(tp, ENABLE_ASF)) {
14331 if (tg3_flag(tp, ENABLE_APE)) {
14332 tg3_probe_ncsi(tp);
14334 tg3_read_dash_ver(tp);
14335 } else if (!vpd_vers) {
14336 tg3_read_mgmtfw_ver(tp);
14340 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
14343 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14345 if (tg3_flag(tp, LRG_PROD_RING_CAP))
14346 return TG3_RX_RET_MAX_SIZE_5717;
14347 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
14348 return TG3_RX_RET_MAX_SIZE_5700;
14350 return TG3_RX_RET_MAX_SIZE_5705;
14353 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
14354 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14355 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14356 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14360 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14362 struct pci_dev *peer;
14363 unsigned int func, devnr = tp->pdev->devfn & ~7;
14365 for (func = 0; func < 8; func++) {
14366 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14367 if (peer && peer != tp->pdev)
14371 /* 5704 can be configured in single-port mode, set peer to
14372 * tp->pdev in that case.
14380 * We don't need to keep the refcount elevated; there's no way
14381 * to remove one half of this device without removing the other
14388 static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14390 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
14391 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
14394 /* All devices that use the alternate
14395 * ASIC REV location have a CPMU.
14397 tg3_flag_set(tp, CPMU_PRESENT);
14399 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14400 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
14401 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
14402 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
14403 reg = TG3PCI_GEN2_PRODID_ASICREV;
14404 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
14405 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
14406 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
14407 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14408 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14409 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14410 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14411 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14412 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14413 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14414 reg = TG3PCI_GEN15_PRODID_ASICREV;
14416 reg = TG3PCI_PRODID_ASICREV;
14418 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14421 /* Wrong chip ID in 5752 A0. This code can be removed later
14422 * as A0 is not in production.
14424 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14425 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14427 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14428 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14429 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14430 tg3_flag_set(tp, 5717_PLUS);
14432 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14433 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14434 tg3_flag_set(tp, 57765_CLASS);
14436 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14437 tg3_flag_set(tp, 57765_PLUS);
14439 /* Intentionally exclude ASIC_REV_5906 */
14440 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14441 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14442 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14443 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14444 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14445 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14446 tg3_flag(tp, 57765_PLUS))
14447 tg3_flag_set(tp, 5755_PLUS);
14449 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14450 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14451 tg3_flag_set(tp, 5780_CLASS);
14453 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14454 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14455 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14456 tg3_flag(tp, 5755_PLUS) ||
14457 tg3_flag(tp, 5780_CLASS))
14458 tg3_flag_set(tp, 5750_PLUS);
14460 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14461 tg3_flag(tp, 5750_PLUS))
14462 tg3_flag_set(tp, 5705_PLUS);
14465 static int __devinit tg3_get_invariants(struct tg3 *tp)
14468 u32 pci_state_reg, grc_misc_cfg;
14473 /* Force memory write invalidate off. If we leave it on,
14474 * then on 5700_BX chips we have to enable a workaround.
14475 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14476 * to match the cacheline size. The Broadcom driver have this
14477 * workaround but turns MWI off all the times so never uses
14478 * it. This seems to suggest that the workaround is insufficient.
14480 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14481 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14482 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14484 /* Important! -- Make sure register accesses are byteswapped
14485 * correctly. Also, for those chips that require it, make
14486 * sure that indirect register accesses are enabled before
14487 * the first operation.
14489 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14491 tp->misc_host_ctrl |= (misc_ctrl_reg &
14492 MISC_HOST_CTRL_CHIPREV);
14493 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14494 tp->misc_host_ctrl);
14496 tg3_detect_asic_rev(tp, misc_ctrl_reg);
14498 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14499 * we need to disable memory and use config. cycles
14500 * only to access all registers. The 5702/03 chips
14501 * can mistakenly decode the special cycles from the
14502 * ICH chipsets as memory write cycles, causing corruption
14503 * of register and memory space. Only certain ICH bridges
14504 * will drive special cycles with non-zero data during the
14505 * address phase which can fall within the 5703's address
14506 * range. This is not an ICH bug as the PCI spec allows
14507 * non-zero address during special cycles. However, only
14508 * these ICH bridges are known to drive non-zero addresses
14509 * during special cycles.
14511 * Since special cycles do not cross PCI bridges, we only
14512 * enable this workaround if the 5703 is on the secondary
14513 * bus of these ICH bridges.
14515 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14516 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14517 static struct tg3_dev_id {
14521 } ich_chipsets[] = {
14522 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14524 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14526 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14528 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14532 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14533 struct pci_dev *bridge = NULL;
14535 while (pci_id->vendor != 0) {
14536 bridge = pci_get_device(pci_id->vendor, pci_id->device,
14542 if (pci_id->rev != PCI_ANY_ID) {
14543 if (bridge->revision > pci_id->rev)
14546 if (bridge->subordinate &&
14547 (bridge->subordinate->number ==
14548 tp->pdev->bus->number)) {
14549 tg3_flag_set(tp, ICH_WORKAROUND);
14550 pci_dev_put(bridge);
14556 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14557 static struct tg3_dev_id {
14560 } bridge_chipsets[] = {
14561 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14562 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14565 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14566 struct pci_dev *bridge = NULL;
14568 while (pci_id->vendor != 0) {
14569 bridge = pci_get_device(pci_id->vendor,
14576 if (bridge->subordinate &&
14577 (bridge->subordinate->number <=
14578 tp->pdev->bus->number) &&
14579 (bridge->subordinate->busn_res.end >=
14580 tp->pdev->bus->number)) {
14581 tg3_flag_set(tp, 5701_DMA_BUG);
14582 pci_dev_put(bridge);
14588 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14589 * DMA addresses > 40-bit. This bridge may have other additional
14590 * 57xx devices behind it in some 4-port NIC designs for example.
14591 * Any tg3 device found behind the bridge will also need the 40-bit
14594 if (tg3_flag(tp, 5780_CLASS)) {
14595 tg3_flag_set(tp, 40BIT_DMA_BUG);
14596 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14598 struct pci_dev *bridge = NULL;
14601 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14602 PCI_DEVICE_ID_SERVERWORKS_EPB,
14604 if (bridge && bridge->subordinate &&
14605 (bridge->subordinate->number <=
14606 tp->pdev->bus->number) &&
14607 (bridge->subordinate->busn_res.end >=
14608 tp->pdev->bus->number)) {
14609 tg3_flag_set(tp, 40BIT_DMA_BUG);
14610 pci_dev_put(bridge);
14616 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14617 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14618 tp->pdev_peer = tg3_find_peer(tp);
14620 /* Determine TSO capabilities */
14621 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14622 ; /* Do nothing. HW bug. */
14623 else if (tg3_flag(tp, 57765_PLUS))
14624 tg3_flag_set(tp, HW_TSO_3);
14625 else if (tg3_flag(tp, 5755_PLUS) ||
14626 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14627 tg3_flag_set(tp, HW_TSO_2);
14628 else if (tg3_flag(tp, 5750_PLUS)) {
14629 tg3_flag_set(tp, HW_TSO_1);
14630 tg3_flag_set(tp, TSO_BUG);
14631 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14632 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14633 tg3_flag_clear(tp, TSO_BUG);
14634 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14635 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14636 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14637 tg3_flag_set(tp, TSO_BUG);
14638 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14639 tp->fw_needed = FIRMWARE_TG3TSO5;
14641 tp->fw_needed = FIRMWARE_TG3TSO;
14644 /* Selectively allow TSO based on operating conditions */
14645 if (tg3_flag(tp, HW_TSO_1) ||
14646 tg3_flag(tp, HW_TSO_2) ||
14647 tg3_flag(tp, HW_TSO_3) ||
14649 /* For firmware TSO, assume ASF is disabled.
14650 * We'll disable TSO later if we discover ASF
14651 * is enabled in tg3_get_eeprom_hw_cfg().
14653 tg3_flag_set(tp, TSO_CAPABLE);
14655 tg3_flag_clear(tp, TSO_CAPABLE);
14656 tg3_flag_clear(tp, TSO_BUG);
14657 tp->fw_needed = NULL;
14660 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14661 tp->fw_needed = FIRMWARE_TG3;
14665 if (tg3_flag(tp, 5750_PLUS)) {
14666 tg3_flag_set(tp, SUPPORT_MSI);
14667 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14668 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14669 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14670 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14671 tp->pdev_peer == tp->pdev))
14672 tg3_flag_clear(tp, SUPPORT_MSI);
14674 if (tg3_flag(tp, 5755_PLUS) ||
14675 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14676 tg3_flag_set(tp, 1SHOT_MSI);
14679 if (tg3_flag(tp, 57765_PLUS)) {
14680 tg3_flag_set(tp, SUPPORT_MSIX);
14681 tp->irq_max = TG3_IRQ_MAX_VECS;
14687 if (tp->irq_max > 1) {
14688 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
14689 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
14691 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14692 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14693 tp->txq_max = tp->irq_max - 1;
14696 if (tg3_flag(tp, 5755_PLUS) ||
14697 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14698 tg3_flag_set(tp, SHORT_DMA_BUG);
14700 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14701 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14703 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14704 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14705 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14706 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14708 if (tg3_flag(tp, 57765_PLUS) &&
14709 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14710 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14712 if (!tg3_flag(tp, 5705_PLUS) ||
14713 tg3_flag(tp, 5780_CLASS) ||
14714 tg3_flag(tp, USE_JUMBO_BDFLAG))
14715 tg3_flag_set(tp, JUMBO_CAPABLE);
14717 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14720 if (pci_is_pcie(tp->pdev)) {
14723 tg3_flag_set(tp, PCI_EXPRESS);
14725 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
14726 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14727 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14729 tg3_flag_clear(tp, HW_TSO_2);
14730 tg3_flag_clear(tp, TSO_CAPABLE);
14732 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14733 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14734 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14735 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14736 tg3_flag_set(tp, CLKREQ_BUG);
14737 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14738 tg3_flag_set(tp, L1PLLPD_EN);
14740 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14741 /* BCM5785 devices are effectively PCIe devices, and should
14742 * follow PCIe codepaths, but do not have a PCIe capabilities
14745 tg3_flag_set(tp, PCI_EXPRESS);
14746 } else if (!tg3_flag(tp, 5705_PLUS) ||
14747 tg3_flag(tp, 5780_CLASS)) {
14748 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14749 if (!tp->pcix_cap) {
14750 dev_err(&tp->pdev->dev,
14751 "Cannot find PCI-X capability, aborting\n");
14755 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14756 tg3_flag_set(tp, PCIX_MODE);
14759 /* If we have an AMD 762 or VIA K8T800 chipset, write
14760 * reordering to the mailbox registers done by the host
14761 * controller can cause major troubles. We read back from
14762 * every mailbox register write to force the writes to be
14763 * posted to the chip in order.
14765 if (pci_dev_present(tg3_write_reorder_chipsets) &&
14766 !tg3_flag(tp, PCI_EXPRESS))
14767 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14769 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14770 &tp->pci_cacheline_sz);
14771 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14772 &tp->pci_lat_timer);
14773 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14774 tp->pci_lat_timer < 64) {
14775 tp->pci_lat_timer = 64;
14776 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14777 tp->pci_lat_timer);
14780 /* Important! -- It is critical that the PCI-X hw workaround
14781 * situation is decided before the first MMIO register access.
14783 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14784 /* 5700 BX chips need to have their TX producer index
14785 * mailboxes written twice to workaround a bug.
14787 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14789 /* If we are in PCI-X mode, enable register write workaround.
14791 * The workaround is to use indirect register accesses
14792 * for all chip writes not to mailbox registers.
14794 if (tg3_flag(tp, PCIX_MODE)) {
14797 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14799 /* The chip can have it's power management PCI config
14800 * space registers clobbered due to this bug.
14801 * So explicitly force the chip into D0 here.
14803 pci_read_config_dword(tp->pdev,
14804 tp->pm_cap + PCI_PM_CTRL,
14806 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14807 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14808 pci_write_config_dword(tp->pdev,
14809 tp->pm_cap + PCI_PM_CTRL,
14812 /* Also, force SERR#/PERR# in PCI command. */
14813 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14814 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14815 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14819 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14820 tg3_flag_set(tp, PCI_HIGH_SPEED);
14821 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14822 tg3_flag_set(tp, PCI_32BIT);
14824 /* Chip-specific fixup from Broadcom driver */
14825 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14826 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14827 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14828 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14831 /* Default fast path register access methods */
14832 tp->read32 = tg3_read32;
14833 tp->write32 = tg3_write32;
14834 tp->read32_mbox = tg3_read32;
14835 tp->write32_mbox = tg3_write32;
14836 tp->write32_tx_mbox = tg3_write32;
14837 tp->write32_rx_mbox = tg3_write32;
14839 /* Various workaround register access methods */
14840 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14841 tp->write32 = tg3_write_indirect_reg32;
14842 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14843 (tg3_flag(tp, PCI_EXPRESS) &&
14844 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14846 * Back to back register writes can cause problems on these
14847 * chips, the workaround is to read back all reg writes
14848 * except those to mailbox regs.
14850 * See tg3_write_indirect_reg32().
14852 tp->write32 = tg3_write_flush_reg32;
14855 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14856 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14857 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14858 tp->write32_rx_mbox = tg3_write_flush_reg32;
14861 if (tg3_flag(tp, ICH_WORKAROUND)) {
14862 tp->read32 = tg3_read_indirect_reg32;
14863 tp->write32 = tg3_write_indirect_reg32;
14864 tp->read32_mbox = tg3_read_indirect_mbox;
14865 tp->write32_mbox = tg3_write_indirect_mbox;
14866 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14867 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14872 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14873 pci_cmd &= ~PCI_COMMAND_MEMORY;
14874 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14876 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14877 tp->read32_mbox = tg3_read32_mbox_5906;
14878 tp->write32_mbox = tg3_write32_mbox_5906;
14879 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14880 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14883 if (tp->write32 == tg3_write_indirect_reg32 ||
14884 (tg3_flag(tp, PCIX_MODE) &&
14885 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14886 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14887 tg3_flag_set(tp, SRAM_USE_CONFIG);
14889 /* The memory arbiter has to be enabled in order for SRAM accesses
14890 * to succeed. Normally on powerup the tg3 chip firmware will make
14891 * sure it is enabled, but other entities such as system netboot
14892 * code might disable it.
14894 val = tr32(MEMARB_MODE);
14895 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14897 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14898 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14899 tg3_flag(tp, 5780_CLASS)) {
14900 if (tg3_flag(tp, PCIX_MODE)) {
14901 pci_read_config_dword(tp->pdev,
14902 tp->pcix_cap + PCI_X_STATUS,
14904 tp->pci_fn = val & 0x7;
14906 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14907 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14908 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14909 NIC_SRAM_CPMUSTAT_SIG) {
14910 tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14911 tp->pci_fn = tp->pci_fn ? 1 : 0;
14913 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14914 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14915 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14916 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14917 NIC_SRAM_CPMUSTAT_SIG) {
14918 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14919 TG3_CPMU_STATUS_FSHFT_5719;
14923 /* Get eeprom hw config before calling tg3_set_power_state().
14924 * In particular, the TG3_FLAG_IS_NIC flag must be
14925 * determined before calling tg3_set_power_state() so that
14926 * we know whether or not to switch out of Vaux power.
14927 * When the flag is set, it means that GPIO1 is used for eeprom
14928 * write protect and also implies that it is a LOM where GPIOs
14929 * are not used to switch power.
14931 tg3_get_eeprom_hw_cfg(tp);
14933 if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14934 tg3_flag_clear(tp, TSO_CAPABLE);
14935 tg3_flag_clear(tp, TSO_BUG);
14936 tp->fw_needed = NULL;
14939 if (tg3_flag(tp, ENABLE_APE)) {
14940 /* Allow reads and writes to the
14941 * APE register and memory space.
14943 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14944 PCISTATE_ALLOW_APE_SHMEM_WR |
14945 PCISTATE_ALLOW_APE_PSPACE_WR;
14946 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14949 tg3_ape_lock_init(tp);
14952 /* Set up tp->grc_local_ctrl before calling
14953 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14954 * will bring 5700's external PHY out of reset.
14955 * It is also used as eeprom write protect on LOMs.
14957 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14958 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14959 tg3_flag(tp, EEPROM_WRITE_PROT))
14960 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14961 GRC_LCLCTRL_GPIO_OUTPUT1);
14962 /* Unused GPIO3 must be driven as output on 5752 because there
14963 * are no pull-up resistors on unused GPIO pins.
14965 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14966 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14968 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14969 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14970 tg3_flag(tp, 57765_CLASS))
14971 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14973 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14974 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14975 /* Turn off the debug UART. */
14976 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14977 if (tg3_flag(tp, IS_NIC))
14978 /* Keep VMain power. */
14979 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14980 GRC_LCLCTRL_GPIO_OUTPUT0;
14983 /* Switch out of Vaux if it is a NIC */
14984 tg3_pwrsrc_switch_to_vmain(tp);
14986 /* Derive initial jumbo mode from MTU assigned in
14987 * ether_setup() via the alloc_etherdev() call
14989 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14990 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14992 /* Determine WakeOnLan speed to use. */
14993 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14994 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14995 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14996 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14997 tg3_flag_clear(tp, WOL_SPEED_100MB);
14999 tg3_flag_set(tp, WOL_SPEED_100MB);
15002 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15003 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15005 /* A few boards don't want Ethernet@WireSpeed phy feature */
15006 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15007 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15008 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
15009 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
15010 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15011 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15012 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15014 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
15015 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
15016 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15017 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
15018 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15020 if (tg3_flag(tp, 5705_PLUS) &&
15021 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15022 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
15023 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
15024 !tg3_flag(tp, 57765_PLUS)) {
15025 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15026 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
15027 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15028 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
15029 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15030 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15031 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15032 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15033 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15035 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15038 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15039 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
15040 tp->phy_otp = tg3_read_otp_phycfg(tp);
15041 if (tp->phy_otp == 0)
15042 tp->phy_otp = TG3_OTP_DEFAULT;
15045 if (tg3_flag(tp, CPMU_PRESENT))
15046 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15048 tp->mi_mode = MAC_MI_MODE_BASE;
15050 tp->coalesce_mode = 0;
15051 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
15052 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
15053 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15055 /* Set these bits to enable statistics workaround. */
15056 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15057 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
15058 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
15059 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15060 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15063 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15064 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15065 tg3_flag_set(tp, USE_PHYLIB);
15067 err = tg3_mdio_init(tp);
15071 /* Initialize data/descriptor byte/word swapping. */
15072 val = tr32(GRC_MODE);
15073 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15074 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15075 GRC_MODE_WORD_SWAP_B2HRX_DATA |
15076 GRC_MODE_B2HRX_ENABLE |
15077 GRC_MODE_HTX2B_ENABLE |
15078 GRC_MODE_HOST_STACKUP);
15080 val &= GRC_MODE_HOST_STACKUP;
15082 tw32(GRC_MODE, val | tp->grc_mode);
15084 tg3_switch_clocks(tp);
15086 /* Clear this out for sanity. */
15087 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15089 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15091 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15092 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15093 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
15095 if (chiprevid == CHIPREV_ID_5701_A0 ||
15096 chiprevid == CHIPREV_ID_5701_B0 ||
15097 chiprevid == CHIPREV_ID_5701_B2 ||
15098 chiprevid == CHIPREV_ID_5701_B5) {
15099 void __iomem *sram_base;
15101 /* Write some dummy words into the SRAM status block
15102 * area, see if it reads back correctly. If the return
15103 * value is bad, force enable the PCIX workaround.
15105 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15107 writel(0x00000000, sram_base);
15108 writel(0x00000000, sram_base + 4);
15109 writel(0xffffffff, sram_base + 4);
15110 if (readl(sram_base) != 0x00000000)
15111 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15116 tg3_nvram_init(tp);
15118 grc_misc_cfg = tr32(GRC_MISC_CFG);
15119 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15121 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15122 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15123 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15124 tg3_flag_set(tp, IS_5788);
15126 if (!tg3_flag(tp, IS_5788) &&
15127 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
15128 tg3_flag_set(tp, TAGGED_STATUS);
15129 if (tg3_flag(tp, TAGGED_STATUS)) {
15130 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15131 HOSTCC_MODE_CLRTICK_TXBD);
15133 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15134 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15135 tp->misc_host_ctrl);
15138 /* Preserve the APE MAC_MODE bits */
15139 if (tg3_flag(tp, ENABLE_APE))
15140 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15144 /* these are limited to 10/100 only */
15145 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
15146 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15147 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15148 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
15149 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
15150 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
15151 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
15152 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
15153 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
15154 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
15155 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
15156 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
15157 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15158 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15159 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15160 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15162 err = tg3_phy_probe(tp);
15164 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15165 /* ... but do not return immediately ... */
15170 tg3_read_fw_ver(tp);
15172 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15173 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15175 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15176 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15178 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15181 /* 5700 {AX,BX} chips have a broken status block link
15182 * change bit implementation, so we must use the
15183 * status register in those cases.
15185 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15186 tg3_flag_set(tp, USE_LINKCHG_REG);
15188 tg3_flag_clear(tp, USE_LINKCHG_REG);
15190 /* The led_ctrl is set during tg3_phy_probe, here we might
15191 * have to force the link status polling mechanism based
15192 * upon subsystem IDs.
15194 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
15195 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15196 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15197 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15198 tg3_flag_set(tp, USE_LINKCHG_REG);
15201 /* For all SERDES we poll the MAC status register. */
15202 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
15203 tg3_flag_set(tp, POLL_SERDES);
15205 tg3_flag_clear(tp, POLL_SERDES);
15207 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
15208 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
15209 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15210 tg3_flag(tp, PCIX_MODE)) {
15211 tp->rx_offset = NET_SKB_PAD;
15212 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15213 tp->rx_copy_thresh = ~(u16)0;
15217 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15218 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
15219 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15221 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
15223 /* Increment the rx prod index on the rx std ring by at most
15224 * 8 for these chips to workaround hw errata.
15226 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15227 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15228 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
15229 tp->rx_std_max_post = 8;
15231 if (tg3_flag(tp, ASPM_WORKAROUND))
15232 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15233 PCIE_PWR_MGMT_L1_THRESH_MSK;
15238 #ifdef CONFIG_SPARC
15239 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
15241 struct net_device *dev = tp->dev;
15242 struct pci_dev *pdev = tp->pdev;
15243 struct device_node *dp = pci_device_to_OF_node(pdev);
15244 const unsigned char *addr;
15247 addr = of_get_property(dp, "local-mac-address", &len);
15248 if (addr && len == 6) {
15249 memcpy(dev->dev_addr, addr, 6);
15250 memcpy(dev->perm_addr, dev->dev_addr, 6);
15256 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
15258 struct net_device *dev = tp->dev;
15260 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
15261 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
15266 static int __devinit tg3_get_device_address(struct tg3 *tp)
15268 struct net_device *dev = tp->dev;
15269 u32 hi, lo, mac_offset;
15272 #ifdef CONFIG_SPARC
15273 if (!tg3_get_macaddr_sparc(tp))
15278 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15279 tg3_flag(tp, 5780_CLASS)) {
15280 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15282 if (tg3_nvram_lock(tp))
15283 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15285 tg3_nvram_unlock(tp);
15286 } else if (tg3_flag(tp, 5717_PLUS)) {
15287 if (tp->pci_fn & 1)
15289 if (tp->pci_fn > 1)
15290 mac_offset += 0x18c;
15291 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15294 /* First try to get it from MAC address mailbox. */
15295 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15296 if ((hi >> 16) == 0x484b) {
15297 dev->dev_addr[0] = (hi >> 8) & 0xff;
15298 dev->dev_addr[1] = (hi >> 0) & 0xff;
15300 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15301 dev->dev_addr[2] = (lo >> 24) & 0xff;
15302 dev->dev_addr[3] = (lo >> 16) & 0xff;
15303 dev->dev_addr[4] = (lo >> 8) & 0xff;
15304 dev->dev_addr[5] = (lo >> 0) & 0xff;
15306 /* Some old bootcode may report a 0 MAC address in SRAM */
15307 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15310 /* Next, try NVRAM. */
15311 if (!tg3_flag(tp, NO_NVRAM) &&
15312 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
15313 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
15314 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15315 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
15317 /* Finally just fetch it out of the MAC control regs. */
15319 hi = tr32(MAC_ADDR_0_HIGH);
15320 lo = tr32(MAC_ADDR_0_LOW);
15322 dev->dev_addr[5] = lo & 0xff;
15323 dev->dev_addr[4] = (lo >> 8) & 0xff;
15324 dev->dev_addr[3] = (lo >> 16) & 0xff;
15325 dev->dev_addr[2] = (lo >> 24) & 0xff;
15326 dev->dev_addr[1] = hi & 0xff;
15327 dev->dev_addr[0] = (hi >> 8) & 0xff;
15331 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
15332 #ifdef CONFIG_SPARC
15333 if (!tg3_get_default_macaddr_sparc(tp))
15338 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
15342 #define BOUNDARY_SINGLE_CACHELINE 1
15343 #define BOUNDARY_MULTI_CACHELINE 2
15345 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
15347 int cacheline_size;
15351 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
15353 cacheline_size = 1024;
15355 cacheline_size = (int) byte * 4;
15357 /* On 5703 and later chips, the boundary bits have no
15360 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15361 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15362 !tg3_flag(tp, PCI_EXPRESS))
15365 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
15366 goal = BOUNDARY_MULTI_CACHELINE;
15368 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
15369 goal = BOUNDARY_SINGLE_CACHELINE;
15375 if (tg3_flag(tp, 57765_PLUS)) {
15376 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
15383 /* PCI controllers on most RISC systems tend to disconnect
15384 * when a device tries to burst across a cache-line boundary.
15385 * Therefore, letting tg3 do so just wastes PCI bandwidth.
15387 * Unfortunately, for PCI-E there are only limited
15388 * write-side controls for this, and thus for reads
15389 * we will still get the disconnects. We'll also waste
15390 * these PCI cycles for both read and write for chips
15391 * other than 5700 and 5701 which do not implement the
15394 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
15395 switch (cacheline_size) {
15400 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15401 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
15402 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
15404 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15405 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15410 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
15411 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
15415 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15416 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15419 } else if (tg3_flag(tp, PCI_EXPRESS)) {
15420 switch (cacheline_size) {
15424 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15425 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15426 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15432 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15433 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15437 switch (cacheline_size) {
15439 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15440 val |= (DMA_RWCTRL_READ_BNDRY_16 |
15441 DMA_RWCTRL_WRITE_BNDRY_16);
15446 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15447 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15448 DMA_RWCTRL_WRITE_BNDRY_32);
15453 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15454 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15455 DMA_RWCTRL_WRITE_BNDRY_64);
15460 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15461 val |= (DMA_RWCTRL_READ_BNDRY_128 |
15462 DMA_RWCTRL_WRITE_BNDRY_128);
15467 val |= (DMA_RWCTRL_READ_BNDRY_256 |
15468 DMA_RWCTRL_WRITE_BNDRY_256);
15471 val |= (DMA_RWCTRL_READ_BNDRY_512 |
15472 DMA_RWCTRL_WRITE_BNDRY_512);
15476 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15477 DMA_RWCTRL_WRITE_BNDRY_1024);
15486 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15488 struct tg3_internal_buffer_desc test_desc;
15489 u32 sram_dma_descs;
15492 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15494 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15495 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15496 tw32(RDMAC_STATUS, 0);
15497 tw32(WDMAC_STATUS, 0);
15499 tw32(BUFMGR_MODE, 0);
15500 tw32(FTQ_RESET, 0);
15502 test_desc.addr_hi = ((u64) buf_dma) >> 32;
15503 test_desc.addr_lo = buf_dma & 0xffffffff;
15504 test_desc.nic_mbuf = 0x00002100;
15505 test_desc.len = size;
15508 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15509 * the *second* time the tg3 driver was getting loaded after an
15512 * Broadcom tells me:
15513 * ...the DMA engine is connected to the GRC block and a DMA
15514 * reset may affect the GRC block in some unpredictable way...
15515 * The behavior of resets to individual blocks has not been tested.
15517 * Broadcom noted the GRC reset will also reset all sub-components.
15520 test_desc.cqid_sqid = (13 << 8) | 2;
15522 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15525 test_desc.cqid_sqid = (16 << 8) | 7;
15527 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15530 test_desc.flags = 0x00000005;
15532 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15535 val = *(((u32 *)&test_desc) + i);
15536 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15537 sram_dma_descs + (i * sizeof(u32)));
15538 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15540 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15543 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15545 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15548 for (i = 0; i < 40; i++) {
15552 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15554 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15555 if ((val & 0xffff) == sram_dma_descs) {
15566 #define TEST_BUFFER_SIZE 0x2000
15568 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15569 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15573 static int __devinit tg3_test_dma(struct tg3 *tp)
15575 dma_addr_t buf_dma;
15576 u32 *buf, saved_dma_rwctrl;
15579 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15580 &buf_dma, GFP_KERNEL);
15586 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15587 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15589 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15591 if (tg3_flag(tp, 57765_PLUS))
15594 if (tg3_flag(tp, PCI_EXPRESS)) {
15595 /* DMA read watermark not used on PCIE */
15596 tp->dma_rwctrl |= 0x00180000;
15597 } else if (!tg3_flag(tp, PCIX_MODE)) {
15598 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15599 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15600 tp->dma_rwctrl |= 0x003f0000;
15602 tp->dma_rwctrl |= 0x003f000f;
15604 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15605 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15606 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15607 u32 read_water = 0x7;
15609 /* If the 5704 is behind the EPB bridge, we can
15610 * do the less restrictive ONE_DMA workaround for
15611 * better performance.
15613 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15614 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15615 tp->dma_rwctrl |= 0x8000;
15616 else if (ccval == 0x6 || ccval == 0x7)
15617 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15619 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15621 /* Set bit 23 to enable PCIX hw bug fix */
15623 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15624 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15626 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15627 /* 5780 always in PCIX mode */
15628 tp->dma_rwctrl |= 0x00144000;
15629 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15630 /* 5714 always in PCIX mode */
15631 tp->dma_rwctrl |= 0x00148000;
15633 tp->dma_rwctrl |= 0x001b000f;
15637 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15638 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15639 tp->dma_rwctrl &= 0xfffffff0;
15641 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15642 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15643 /* Remove this if it causes problems for some boards. */
15644 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15646 /* On 5700/5701 chips, we need to set this bit.
15647 * Otherwise the chip will issue cacheline transactions
15648 * to streamable DMA memory with not all the byte
15649 * enables turned on. This is an error on several
15650 * RISC PCI controllers, in particular sparc64.
15652 * On 5703/5704 chips, this bit has been reassigned
15653 * a different meaning. In particular, it is used
15654 * on those chips to enable a PCI-X workaround.
15656 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15659 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15662 /* Unneeded, already done by tg3_get_invariants. */
15663 tg3_switch_clocks(tp);
15666 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15667 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15670 /* It is best to perform DMA test with maximum write burst size
15671 * to expose the 5700/5701 write DMA bug.
15673 saved_dma_rwctrl = tp->dma_rwctrl;
15674 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15675 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15680 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15683 /* Send the buffer to the chip. */
15684 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15686 dev_err(&tp->pdev->dev,
15687 "%s: Buffer write failed. err = %d\n",
15693 /* validate data reached card RAM correctly. */
15694 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15696 tg3_read_mem(tp, 0x2100 + (i*4), &val);
15697 if (le32_to_cpu(val) != p[i]) {
15698 dev_err(&tp->pdev->dev,
15699 "%s: Buffer corrupted on device! "
15700 "(%d != %d)\n", __func__, val, i);
15701 /* ret = -ENODEV here? */
15706 /* Now read it back. */
15707 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15709 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15710 "err = %d\n", __func__, ret);
15715 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15719 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15720 DMA_RWCTRL_WRITE_BNDRY_16) {
15721 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15722 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15723 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15726 dev_err(&tp->pdev->dev,
15727 "%s: Buffer corrupted on read back! "
15728 "(%d != %d)\n", __func__, p[i], i);
15734 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15740 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15741 DMA_RWCTRL_WRITE_BNDRY_16) {
15742 /* DMA test passed without adjusting DMA boundary,
15743 * now look for chipsets that are known to expose the
15744 * DMA bug without failing the test.
15746 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15747 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15748 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15750 /* Safe to use the calculated DMA boundary. */
15751 tp->dma_rwctrl = saved_dma_rwctrl;
15754 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15758 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15763 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15765 if (tg3_flag(tp, 57765_PLUS)) {
15766 tp->bufmgr_config.mbuf_read_dma_low_water =
15767 DEFAULT_MB_RDMA_LOW_WATER_5705;
15768 tp->bufmgr_config.mbuf_mac_rx_low_water =
15769 DEFAULT_MB_MACRX_LOW_WATER_57765;
15770 tp->bufmgr_config.mbuf_high_water =
15771 DEFAULT_MB_HIGH_WATER_57765;
15773 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15774 DEFAULT_MB_RDMA_LOW_WATER_5705;
15775 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15776 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15777 tp->bufmgr_config.mbuf_high_water_jumbo =
15778 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15779 } else if (tg3_flag(tp, 5705_PLUS)) {
15780 tp->bufmgr_config.mbuf_read_dma_low_water =
15781 DEFAULT_MB_RDMA_LOW_WATER_5705;
15782 tp->bufmgr_config.mbuf_mac_rx_low_water =
15783 DEFAULT_MB_MACRX_LOW_WATER_5705;
15784 tp->bufmgr_config.mbuf_high_water =
15785 DEFAULT_MB_HIGH_WATER_5705;
15786 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15787 tp->bufmgr_config.mbuf_mac_rx_low_water =
15788 DEFAULT_MB_MACRX_LOW_WATER_5906;
15789 tp->bufmgr_config.mbuf_high_water =
15790 DEFAULT_MB_HIGH_WATER_5906;
15793 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15794 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15795 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15796 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15797 tp->bufmgr_config.mbuf_high_water_jumbo =
15798 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15800 tp->bufmgr_config.mbuf_read_dma_low_water =
15801 DEFAULT_MB_RDMA_LOW_WATER;
15802 tp->bufmgr_config.mbuf_mac_rx_low_water =
15803 DEFAULT_MB_MACRX_LOW_WATER;
15804 tp->bufmgr_config.mbuf_high_water =
15805 DEFAULT_MB_HIGH_WATER;
15807 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15808 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15809 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15810 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15811 tp->bufmgr_config.mbuf_high_water_jumbo =
15812 DEFAULT_MB_HIGH_WATER_JUMBO;
15815 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15816 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15819 static char * __devinit tg3_phy_string(struct tg3 *tp)
15821 switch (tp->phy_id & TG3_PHY_ID_MASK) {
15822 case TG3_PHY_ID_BCM5400: return "5400";
15823 case TG3_PHY_ID_BCM5401: return "5401";
15824 case TG3_PHY_ID_BCM5411: return "5411";
15825 case TG3_PHY_ID_BCM5701: return "5701";
15826 case TG3_PHY_ID_BCM5703: return "5703";
15827 case TG3_PHY_ID_BCM5704: return "5704";
15828 case TG3_PHY_ID_BCM5705: return "5705";
15829 case TG3_PHY_ID_BCM5750: return "5750";
15830 case TG3_PHY_ID_BCM5752: return "5752";
15831 case TG3_PHY_ID_BCM5714: return "5714";
15832 case TG3_PHY_ID_BCM5780: return "5780";
15833 case TG3_PHY_ID_BCM5755: return "5755";
15834 case TG3_PHY_ID_BCM5787: return "5787";
15835 case TG3_PHY_ID_BCM5784: return "5784";
15836 case TG3_PHY_ID_BCM5756: return "5722/5756";
15837 case TG3_PHY_ID_BCM5906: return "5906";
15838 case TG3_PHY_ID_BCM5761: return "5761";
15839 case TG3_PHY_ID_BCM5718C: return "5718C";
15840 case TG3_PHY_ID_BCM5718S: return "5718S";
15841 case TG3_PHY_ID_BCM57765: return "57765";
15842 case TG3_PHY_ID_BCM5719C: return "5719C";
15843 case TG3_PHY_ID_BCM5720C: return "5720C";
15844 case TG3_PHY_ID_BCM8002: return "8002/serdes";
15845 case 0: return "serdes";
15846 default: return "unknown";
15850 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15852 if (tg3_flag(tp, PCI_EXPRESS)) {
15853 strcpy(str, "PCI Express");
15855 } else if (tg3_flag(tp, PCIX_MODE)) {
15856 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15858 strcpy(str, "PCIX:");
15860 if ((clock_ctrl == 7) ||
15861 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15862 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15863 strcat(str, "133MHz");
15864 else if (clock_ctrl == 0)
15865 strcat(str, "33MHz");
15866 else if (clock_ctrl == 2)
15867 strcat(str, "50MHz");
15868 else if (clock_ctrl == 4)
15869 strcat(str, "66MHz");
15870 else if (clock_ctrl == 6)
15871 strcat(str, "100MHz");
15873 strcpy(str, "PCI:");
15874 if (tg3_flag(tp, PCI_HIGH_SPEED))
15875 strcat(str, "66MHz");
15877 strcat(str, "33MHz");
15879 if (tg3_flag(tp, PCI_32BIT))
15880 strcat(str, ":32-bit");
15882 strcat(str, ":64-bit");
15886 static void __devinit tg3_init_coal(struct tg3 *tp)
15888 struct ethtool_coalesce *ec = &tp->coal;
15890 memset(ec, 0, sizeof(*ec));
15891 ec->cmd = ETHTOOL_GCOALESCE;
15892 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15893 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15894 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15895 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15896 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15897 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15898 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15899 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15900 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15902 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15903 HOSTCC_MODE_CLRTICK_TXBD)) {
15904 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15905 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15906 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15907 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15910 if (tg3_flag(tp, 5705_PLUS)) {
15911 ec->rx_coalesce_usecs_irq = 0;
15912 ec->tx_coalesce_usecs_irq = 0;
15913 ec->stats_block_coalesce_usecs = 0;
15917 static int __devinit tg3_init_one(struct pci_dev *pdev,
15918 const struct pci_device_id *ent)
15920 struct net_device *dev;
15922 int i, err, pm_cap;
15923 u32 sndmbx, rcvmbx, intmbx;
15925 u64 dma_mask, persist_dma_mask;
15926 netdev_features_t features = 0;
15928 printk_once(KERN_INFO "%s\n", version);
15930 err = pci_enable_device(pdev);
15932 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15936 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15938 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15939 goto err_out_disable_pdev;
15942 pci_set_master(pdev);
15944 /* Find power-management capability. */
15945 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15947 dev_err(&pdev->dev,
15948 "Cannot find Power Management capability, aborting\n");
15950 goto err_out_free_res;
15953 err = pci_set_power_state(pdev, PCI_D0);
15955 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15956 goto err_out_free_res;
15959 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15962 goto err_out_power_down;
15965 SET_NETDEV_DEV(dev, &pdev->dev);
15967 tp = netdev_priv(dev);
15970 tp->pm_cap = pm_cap;
15971 tp->rx_mode = TG3_DEF_RX_MODE;
15972 tp->tx_mode = TG3_DEF_TX_MODE;
15975 tp->msg_enable = tg3_debug;
15977 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15979 /* The word/byte swap controls here control register access byte
15980 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15983 tp->misc_host_ctrl =
15984 MISC_HOST_CTRL_MASK_PCI_INT |
15985 MISC_HOST_CTRL_WORD_SWAP |
15986 MISC_HOST_CTRL_INDIR_ACCESS |
15987 MISC_HOST_CTRL_PCISTATE_RW;
15989 /* The NONFRM (non-frame) byte/word swap controls take effect
15990 * on descriptor entries, anything which isn't packet data.
15992 * The StrongARM chips on the board (one for tx, one for rx)
15993 * are running in big-endian mode.
15995 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15996 GRC_MODE_WSWAP_NONFRM_DATA);
15997 #ifdef __BIG_ENDIAN
15998 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16000 spin_lock_init(&tp->lock);
16001 spin_lock_init(&tp->indirect_lock);
16002 INIT_WORK(&tp->reset_task, tg3_reset_task);
16004 tp->regs = pci_ioremap_bar(pdev, BAR_0);
16006 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16008 goto err_out_free_dev;
16011 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16012 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16013 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16014 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16015 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16016 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16017 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16018 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
16019 tg3_flag_set(tp, ENABLE_APE);
16020 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16021 if (!tp->aperegs) {
16022 dev_err(&pdev->dev,
16023 "Cannot map APE registers, aborting\n");
16025 goto err_out_iounmap;
16029 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16030 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16032 dev->ethtool_ops = &tg3_ethtool_ops;
16033 dev->watchdog_timeo = TG3_TX_TIMEOUT;
16034 dev->netdev_ops = &tg3_netdev_ops;
16035 dev->irq = pdev->irq;
16037 err = tg3_get_invariants(tp);
16039 dev_err(&pdev->dev,
16040 "Problem fetching invariants of chip, aborting\n");
16041 goto err_out_apeunmap;
16044 /* The EPB bridge inside 5714, 5715, and 5780 and any
16045 * device behind the EPB cannot support DMA addresses > 40-bit.
16046 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16047 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16048 * do DMA address check in tg3_start_xmit().
16050 if (tg3_flag(tp, IS_5788))
16051 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16052 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16053 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16054 #ifdef CONFIG_HIGHMEM
16055 dma_mask = DMA_BIT_MASK(64);
16058 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16060 /* Configure DMA attributes. */
16061 if (dma_mask > DMA_BIT_MASK(32)) {
16062 err = pci_set_dma_mask(pdev, dma_mask);
16064 features |= NETIF_F_HIGHDMA;
16065 err = pci_set_consistent_dma_mask(pdev,
16068 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16069 "DMA for consistent allocations\n");
16070 goto err_out_apeunmap;
16074 if (err || dma_mask == DMA_BIT_MASK(32)) {
16075 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16077 dev_err(&pdev->dev,
16078 "No usable DMA configuration, aborting\n");
16079 goto err_out_apeunmap;
16083 tg3_init_bufmgr_config(tp);
16085 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16087 /* 5700 B0 chips do not support checksumming correctly due
16088 * to hardware bugs.
16090 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
16091 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16093 if (tg3_flag(tp, 5755_PLUS))
16094 features |= NETIF_F_IPV6_CSUM;
16097 /* TSO is on by default on chips that support hardware TSO.
16098 * Firmware TSO on older chips gives lower performance, so it
16099 * is off by default, but can be enabled using ethtool.
16101 if ((tg3_flag(tp, HW_TSO_1) ||
16102 tg3_flag(tp, HW_TSO_2) ||
16103 tg3_flag(tp, HW_TSO_3)) &&
16104 (features & NETIF_F_IP_CSUM))
16105 features |= NETIF_F_TSO;
16106 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16107 if (features & NETIF_F_IPV6_CSUM)
16108 features |= NETIF_F_TSO6;
16109 if (tg3_flag(tp, HW_TSO_3) ||
16110 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
16111 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
16112 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
16113 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
16114 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
16115 features |= NETIF_F_TSO_ECN;
16118 dev->features |= features;
16119 dev->vlan_features |= features;
16122 * Add loopback capability only for a subset of devices that support
16123 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16124 * loopback for the remaining devices.
16126 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
16127 !tg3_flag(tp, CPMU_PRESENT))
16128 /* Add the loopback capability */
16129 features |= NETIF_F_LOOPBACK;
16131 dev->hw_features |= features;
16133 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
16134 !tg3_flag(tp, TSO_CAPABLE) &&
16135 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16136 tg3_flag_set(tp, MAX_RXPEND_64);
16137 tp->rx_pending = 63;
16140 err = tg3_get_device_address(tp);
16142 dev_err(&pdev->dev,
16143 "Could not obtain valid ethernet address, aborting\n");
16144 goto err_out_apeunmap;
16148 * Reset chip in case UNDI or EFI driver did not shutdown
16149 * DMA self test will enable WDMAC and we'll see (spurious)
16150 * pending DMA on the PCI bus at that point.
16152 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16153 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
16154 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16155 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16158 err = tg3_test_dma(tp);
16160 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
16161 goto err_out_apeunmap;
16164 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16165 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16166 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
16167 for (i = 0; i < tp->irq_max; i++) {
16168 struct tg3_napi *tnapi = &tp->napi[i];
16171 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16173 tnapi->int_mbox = intmbx;
16179 tnapi->consmbox = rcvmbx;
16180 tnapi->prodmbox = sndmbx;
16183 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
16185 tnapi->coal_now = HOSTCC_MODE_NOW;
16187 if (!tg3_flag(tp, SUPPORT_MSIX))
16191 * If we support MSIX, we'll be using RSS. If we're using
16192 * RSS, the first vector only handles link interrupts and the
16193 * remaining vectors handle rx and tx interrupts. Reuse the
16194 * mailbox values for the next iteration. The values we setup
16195 * above are still useful for the single vectored mode.
16210 pci_set_drvdata(pdev, dev);
16212 if (tg3_flag(tp, 5717_PLUS)) {
16213 /* Resume a low-power mode */
16214 tg3_frob_aux_power(tp, false);
16217 tg3_timer_init(tp);
16219 err = register_netdev(dev);
16221 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
16222 goto err_out_apeunmap;
16225 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16226 tp->board_part_number,
16227 tp->pci_chip_rev_id,
16228 tg3_bus_string(tp, str),
16231 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
16232 struct phy_device *phydev;
16233 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
16235 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16236 phydev->drv->name, dev_name(&phydev->dev));
16240 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16241 ethtype = "10/100Base-TX";
16242 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16243 ethtype = "1000Base-SX";
16245 ethtype = "10/100/1000Base-T";
16247 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
16248 "(WireSpeed[%d], EEE[%d])\n",
16249 tg3_phy_string(tp), ethtype,
16250 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16251 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
16254 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16255 (dev->features & NETIF_F_RXCSUM) != 0,
16256 tg3_flag(tp, USE_LINKCHG_REG) != 0,
16257 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
16258 tg3_flag(tp, ENABLE_ASF) != 0,
16259 tg3_flag(tp, TSO_CAPABLE) != 0);
16260 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16262 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16263 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
16265 pci_save_state(pdev);
16271 iounmap(tp->aperegs);
16272 tp->aperegs = NULL;
16284 err_out_power_down:
16285 pci_set_power_state(pdev, PCI_D3hot);
16288 pci_release_regions(pdev);
16290 err_out_disable_pdev:
16291 pci_disable_device(pdev);
16292 pci_set_drvdata(pdev, NULL);
16296 static void __devexit tg3_remove_one(struct pci_dev *pdev)
16298 struct net_device *dev = pci_get_drvdata(pdev);
16301 struct tg3 *tp = netdev_priv(dev);
16303 release_firmware(tp->fw);
16305 tg3_reset_task_cancel(tp);
16307 if (tg3_flag(tp, USE_PHYLIB)) {
16312 unregister_netdev(dev);
16314 iounmap(tp->aperegs);
16315 tp->aperegs = NULL;
16322 pci_release_regions(pdev);
16323 pci_disable_device(pdev);
16324 pci_set_drvdata(pdev, NULL);
16328 #ifdef CONFIG_PM_SLEEP
16329 static int tg3_suspend(struct device *device)
16331 struct pci_dev *pdev = to_pci_dev(device);
16332 struct net_device *dev = pci_get_drvdata(pdev);
16333 struct tg3 *tp = netdev_priv(dev);
16336 if (!netif_running(dev))
16339 tg3_reset_task_cancel(tp);
16341 tg3_netif_stop(tp);
16343 tg3_timer_stop(tp);
16345 tg3_full_lock(tp, 1);
16346 tg3_disable_ints(tp);
16347 tg3_full_unlock(tp);
16349 netif_device_detach(dev);
16351 tg3_full_lock(tp, 0);
16352 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16353 tg3_flag_clear(tp, INIT_COMPLETE);
16354 tg3_full_unlock(tp);
16356 err = tg3_power_down_prepare(tp);
16360 tg3_full_lock(tp, 0);
16362 tg3_flag_set(tp, INIT_COMPLETE);
16363 err2 = tg3_restart_hw(tp, 1);
16367 tg3_timer_start(tp);
16369 netif_device_attach(dev);
16370 tg3_netif_start(tp);
16373 tg3_full_unlock(tp);
16382 static int tg3_resume(struct device *device)
16384 struct pci_dev *pdev = to_pci_dev(device);
16385 struct net_device *dev = pci_get_drvdata(pdev);
16386 struct tg3 *tp = netdev_priv(dev);
16389 if (!netif_running(dev))
16392 netif_device_attach(dev);
16394 tg3_full_lock(tp, 0);
16396 tg3_flag_set(tp, INIT_COMPLETE);
16397 err = tg3_restart_hw(tp, 1);
16401 tg3_timer_start(tp);
16403 tg3_netif_start(tp);
16406 tg3_full_unlock(tp);
16414 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
16415 #define TG3_PM_OPS (&tg3_pm_ops)
16419 #define TG3_PM_OPS NULL
16421 #endif /* CONFIG_PM_SLEEP */
16424 * tg3_io_error_detected - called when PCI error is detected
16425 * @pdev: Pointer to PCI device
16426 * @state: The current pci connection state
16428 * This function is called after a PCI bus error affecting
16429 * this device has been detected.
16431 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16432 pci_channel_state_t state)
16434 struct net_device *netdev = pci_get_drvdata(pdev);
16435 struct tg3 *tp = netdev_priv(netdev);
16436 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16438 netdev_info(netdev, "PCI I/O error detected\n");
16442 if (!netif_running(netdev))
16447 tg3_netif_stop(tp);
16449 tg3_timer_stop(tp);
16451 /* Want to make sure that the reset task doesn't run */
16452 tg3_reset_task_cancel(tp);
16454 netif_device_detach(netdev);
16456 /* Clean up software state, even if MMIO is blocked */
16457 tg3_full_lock(tp, 0);
16458 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16459 tg3_full_unlock(tp);
16462 if (state == pci_channel_io_perm_failure)
16463 err = PCI_ERS_RESULT_DISCONNECT;
16465 pci_disable_device(pdev);
16473 * tg3_io_slot_reset - called after the pci bus has been reset.
16474 * @pdev: Pointer to PCI device
16476 * Restart the card from scratch, as if from a cold-boot.
16477 * At this point, the card has exprienced a hard reset,
16478 * followed by fixups by BIOS, and has its config space
16479 * set up identically to what it was at cold boot.
16481 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16483 struct net_device *netdev = pci_get_drvdata(pdev);
16484 struct tg3 *tp = netdev_priv(netdev);
16485 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16490 if (pci_enable_device(pdev)) {
16491 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16495 pci_set_master(pdev);
16496 pci_restore_state(pdev);
16497 pci_save_state(pdev);
16499 if (!netif_running(netdev)) {
16500 rc = PCI_ERS_RESULT_RECOVERED;
16504 err = tg3_power_up(tp);
16508 rc = PCI_ERS_RESULT_RECOVERED;
16517 * tg3_io_resume - called when traffic can start flowing again.
16518 * @pdev: Pointer to PCI device
16520 * This callback is called when the error recovery driver tells
16521 * us that its OK to resume normal operation.
16523 static void tg3_io_resume(struct pci_dev *pdev)
16525 struct net_device *netdev = pci_get_drvdata(pdev);
16526 struct tg3 *tp = netdev_priv(netdev);
16531 if (!netif_running(netdev))
16534 tg3_full_lock(tp, 0);
16535 tg3_flag_set(tp, INIT_COMPLETE);
16536 err = tg3_restart_hw(tp, 1);
16537 tg3_full_unlock(tp);
16539 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16543 netif_device_attach(netdev);
16545 tg3_timer_start(tp);
16547 tg3_netif_start(tp);
16555 static const struct pci_error_handlers tg3_err_handler = {
16556 .error_detected = tg3_io_error_detected,
16557 .slot_reset = tg3_io_slot_reset,
16558 .resume = tg3_io_resume
16561 static struct pci_driver tg3_driver = {
16562 .name = DRV_MODULE_NAME,
16563 .id_table = tg3_pci_tbl,
16564 .probe = tg3_init_one,
16565 .remove = __devexit_p(tg3_remove_one),
16566 .err_handler = &tg3_err_handler,
16567 .driver.pm = TG3_PM_OPS,
16570 static int __init tg3_init(void)
16572 return pci_register_driver(&tg3_driver);
16575 static void __exit tg3_cleanup(void)
16577 pci_unregister_driver(&tg3_driver);
16580 module_init(tg3_init);
16581 module_exit(tg3_cleanup);