1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2008 Intel Corporation. */
4 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/prefetch.h>
9 char ixgb_driver_name[] = "ixgb";
10 static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
12 static const char ixgb_copyright[] = "Copyright (c) 1999-2008 Intel Corporation.";
14 #define IXGB_CB_LENGTH 256
15 static unsigned int copybreak __read_mostly = IXGB_CB_LENGTH;
16 module_param(copybreak, uint, 0644);
17 MODULE_PARM_DESC(copybreak,
18 "Maximum size of packet that is copied to a new buffer on receive");
20 /* ixgb_pci_tbl - PCI Device ID Table
22 * Wildcard entries (PCI_ANY_ID) should come last
23 * Last entry must be all 0s
25 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
26 * Class, Class Mask, private data (not used) }
28 static const struct pci_device_id ixgb_pci_tbl[] = {
29 {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX,
30 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
31 {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_CX4,
32 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
33 {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_SR,
34 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
35 {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_LR,
36 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
38 /* required last entry */
42 MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
44 /* Local Function Prototypes */
45 static int ixgb_init_module(void);
46 static void ixgb_exit_module(void);
47 static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
48 static void ixgb_remove(struct pci_dev *pdev);
49 static int ixgb_sw_init(struct ixgb_adapter *adapter);
50 static int ixgb_open(struct net_device *netdev);
51 static int ixgb_close(struct net_device *netdev);
52 static void ixgb_configure_tx(struct ixgb_adapter *adapter);
53 static void ixgb_configure_rx(struct ixgb_adapter *adapter);
54 static void ixgb_setup_rctl(struct ixgb_adapter *adapter);
55 static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter);
56 static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter);
57 static void ixgb_set_multi(struct net_device *netdev);
58 static void ixgb_watchdog(struct timer_list *t);
59 static netdev_tx_t ixgb_xmit_frame(struct sk_buff *skb,
60 struct net_device *netdev);
61 static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
62 static int ixgb_set_mac(struct net_device *netdev, void *p);
63 static irqreturn_t ixgb_intr(int irq, void *data);
64 static bool ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
66 static int ixgb_clean(struct napi_struct *, int);
67 static bool ixgb_clean_rx_irq(struct ixgb_adapter *, int *, int);
68 static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int);
70 static void ixgb_tx_timeout(struct net_device *dev, unsigned int txqueue);
71 static void ixgb_tx_timeout_task(struct work_struct *work);
73 static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
74 static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
75 static int ixgb_vlan_rx_add_vid(struct net_device *netdev,
76 __be16 proto, u16 vid);
77 static int ixgb_vlan_rx_kill_vid(struct net_device *netdev,
78 __be16 proto, u16 vid);
79 static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
81 static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
82 pci_channel_state_t state);
83 static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
84 static void ixgb_io_resume (struct pci_dev *pdev);
86 static const struct pci_error_handlers ixgb_err_handler = {
87 .error_detected = ixgb_io_error_detected,
88 .slot_reset = ixgb_io_slot_reset,
89 .resume = ixgb_io_resume,
92 static struct pci_driver ixgb_driver = {
93 .name = ixgb_driver_name,
94 .id_table = ixgb_pci_tbl,
96 .remove = ixgb_remove,
97 .err_handler = &ixgb_err_handler
100 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
101 MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
102 MODULE_LICENSE("GPL v2");
104 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
105 static int debug = -1;
106 module_param(debug, int, 0);
107 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
110 * ixgb_init_module - Driver Registration Routine
112 * ixgb_init_module is the first routine called when the driver is
113 * loaded. All it does is register with the PCI subsystem.
117 ixgb_init_module(void)
119 pr_info("%s\n", ixgb_driver_string);
120 pr_info("%s\n", ixgb_copyright);
122 return pci_register_driver(&ixgb_driver);
125 module_init(ixgb_init_module);
128 * ixgb_exit_module - Driver Exit Cleanup Routine
130 * ixgb_exit_module is called just before the driver is removed
135 ixgb_exit_module(void)
137 pci_unregister_driver(&ixgb_driver);
140 module_exit(ixgb_exit_module);
143 * ixgb_irq_disable - Mask off interrupt generation on the NIC
144 * @adapter: board private structure
148 ixgb_irq_disable(struct ixgb_adapter *adapter)
150 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
151 IXGB_WRITE_FLUSH(&adapter->hw);
152 synchronize_irq(adapter->pdev->irq);
156 * ixgb_irq_enable - Enable default interrupt generation settings
157 * @adapter: board private structure
161 ixgb_irq_enable(struct ixgb_adapter *adapter)
163 u32 val = IXGB_INT_RXT0 | IXGB_INT_RXDMT0 |
164 IXGB_INT_TXDW | IXGB_INT_LSC;
165 if (adapter->hw.subsystem_vendor_id == PCI_VENDOR_ID_SUN)
166 val |= IXGB_INT_GPI0;
167 IXGB_WRITE_REG(&adapter->hw, IMS, val);
168 IXGB_WRITE_FLUSH(&adapter->hw);
172 ixgb_up(struct ixgb_adapter *adapter)
174 struct net_device *netdev = adapter->netdev;
175 int err, irq_flags = IRQF_SHARED;
176 int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
177 struct ixgb_hw *hw = &adapter->hw;
179 /* hardware has been reset, we need to reload some things */
181 ixgb_rar_set(hw, netdev->dev_addr, 0);
182 ixgb_set_multi(netdev);
184 ixgb_restore_vlan(adapter);
186 ixgb_configure_tx(adapter);
187 ixgb_setup_rctl(adapter);
188 ixgb_configure_rx(adapter);
189 ixgb_alloc_rx_buffers(adapter, IXGB_DESC_UNUSED(&adapter->rx_ring));
191 /* disable interrupts and get the hardware into a known state */
192 IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
194 /* only enable MSI if bus is in PCI-X mode */
195 if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) {
196 err = pci_enable_msi(adapter->pdev);
198 adapter->have_msi = true;
201 /* proceed to try to request regular interrupt */
204 err = request_irq(adapter->pdev->irq, ixgb_intr, irq_flags,
205 netdev->name, netdev);
207 if (adapter->have_msi)
208 pci_disable_msi(adapter->pdev);
209 netif_err(adapter, probe, adapter->netdev,
210 "Unable to allocate interrupt Error: %d\n", err);
214 if ((hw->max_frame_size != max_frame) ||
215 (hw->max_frame_size !=
216 (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) {
218 hw->max_frame_size = max_frame;
220 IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
222 if (hw->max_frame_size >
223 IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
224 u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
226 if (!(ctrl0 & IXGB_CTRL0_JFE)) {
227 ctrl0 |= IXGB_CTRL0_JFE;
228 IXGB_WRITE_REG(hw, CTRL0, ctrl0);
233 clear_bit(__IXGB_DOWN, &adapter->flags);
235 napi_enable(&adapter->napi);
236 ixgb_irq_enable(adapter);
238 netif_wake_queue(netdev);
240 mod_timer(&adapter->watchdog_timer, jiffies);
246 ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog)
248 struct net_device *netdev = adapter->netdev;
250 /* prevent the interrupt handler from restarting watchdog */
251 set_bit(__IXGB_DOWN, &adapter->flags);
253 netif_carrier_off(netdev);
255 napi_disable(&adapter->napi);
256 /* waiting for NAPI to complete can re-enable interrupts */
257 ixgb_irq_disable(adapter);
258 free_irq(adapter->pdev->irq, netdev);
260 if (adapter->have_msi)
261 pci_disable_msi(adapter->pdev);
264 del_timer_sync(&adapter->watchdog_timer);
266 adapter->link_speed = 0;
267 adapter->link_duplex = 0;
268 netif_stop_queue(netdev);
271 ixgb_clean_tx_ring(adapter);
272 ixgb_clean_rx_ring(adapter);
276 ixgb_reset(struct ixgb_adapter *adapter)
278 struct ixgb_hw *hw = &adapter->hw;
280 ixgb_adapter_stop(hw);
281 if (!ixgb_init_hw(hw))
282 netif_err(adapter, probe, adapter->netdev, "ixgb_init_hw failed\n");
284 /* restore frame size information */
285 IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
286 if (hw->max_frame_size >
287 IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
288 u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
289 if (!(ctrl0 & IXGB_CTRL0_JFE)) {
290 ctrl0 |= IXGB_CTRL0_JFE;
291 IXGB_WRITE_REG(hw, CTRL0, ctrl0);
296 static netdev_features_t
297 ixgb_fix_features(struct net_device *netdev, netdev_features_t features)
300 * Tx VLAN insertion does not work per HW design when Rx stripping is
303 if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
304 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
310 ixgb_set_features(struct net_device *netdev, netdev_features_t features)
312 struct ixgb_adapter *adapter = netdev_priv(netdev);
313 netdev_features_t changed = features ^ netdev->features;
315 if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_CTAG_RX)))
318 adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
320 if (netif_running(netdev)) {
321 ixgb_down(adapter, true);
323 ixgb_set_speed_duplex(netdev);
331 static const struct net_device_ops ixgb_netdev_ops = {
332 .ndo_open = ixgb_open,
333 .ndo_stop = ixgb_close,
334 .ndo_start_xmit = ixgb_xmit_frame,
335 .ndo_set_rx_mode = ixgb_set_multi,
336 .ndo_validate_addr = eth_validate_addr,
337 .ndo_set_mac_address = ixgb_set_mac,
338 .ndo_change_mtu = ixgb_change_mtu,
339 .ndo_tx_timeout = ixgb_tx_timeout,
340 .ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid,
341 .ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid,
342 .ndo_fix_features = ixgb_fix_features,
343 .ndo_set_features = ixgb_set_features,
347 * ixgb_probe - Device Initialization Routine
348 * @pdev: PCI device information struct
349 * @ent: entry in ixgb_pci_tbl
351 * Returns 0 on success, negative on failure
353 * ixgb_probe initializes an adapter identified by a pci_dev structure.
354 * The OS initialization, configuring of the adapter private structure,
355 * and a hardware reset occur.
359 ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
361 struct net_device *netdev = NULL;
362 struct ixgb_adapter *adapter;
363 static int cards_found = 0;
368 err = pci_enable_device(pdev);
372 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
374 pr_err("No usable DMA configuration, aborting\n");
378 err = pci_request_regions(pdev, ixgb_driver_name);
380 goto err_request_regions;
382 pci_set_master(pdev);
384 netdev = alloc_etherdev(sizeof(struct ixgb_adapter));
387 goto err_alloc_etherdev;
390 SET_NETDEV_DEV(netdev, &pdev->dev);
392 pci_set_drvdata(pdev, netdev);
393 adapter = netdev_priv(netdev);
394 adapter->netdev = netdev;
395 adapter->pdev = pdev;
396 adapter->hw.back = adapter;
397 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
399 adapter->hw.hw_addr = pci_ioremap_bar(pdev, BAR_0);
400 if (!adapter->hw.hw_addr) {
405 for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) {
406 if (pci_resource_len(pdev, i) == 0)
408 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
409 adapter->hw.io_base = pci_resource_start(pdev, i);
414 netdev->netdev_ops = &ixgb_netdev_ops;
415 ixgb_set_ethtool_ops(netdev);
416 netdev->watchdog_timeo = 5 * HZ;
417 netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64);
419 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
421 adapter->bd_number = cards_found;
422 adapter->link_speed = 0;
423 adapter->link_duplex = 0;
425 /* setup the private structure */
427 err = ixgb_sw_init(adapter);
431 netdev->hw_features = NETIF_F_SG |
434 NETIF_F_HW_VLAN_CTAG_TX |
435 NETIF_F_HW_VLAN_CTAG_RX;
436 netdev->features = netdev->hw_features |
437 NETIF_F_HW_VLAN_CTAG_FILTER;
438 netdev->hw_features |= NETIF_F_RXCSUM;
440 netdev->features |= NETIF_F_HIGHDMA;
441 netdev->vlan_features |= NETIF_F_HIGHDMA;
443 /* MTU range: 68 - 16114 */
444 netdev->min_mtu = ETH_MIN_MTU;
445 netdev->max_mtu = IXGB_MAX_JUMBO_FRAME_SIZE - ETH_HLEN;
447 /* make sure the EEPROM is good */
449 if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
450 netif_err(adapter, probe, adapter->netdev,
451 "The EEPROM Checksum Is Not Valid\n");
456 ixgb_get_ee_mac_addr(&adapter->hw, addr);
457 eth_hw_addr_set(netdev, addr);
459 if (!is_valid_ether_addr(netdev->dev_addr)) {
460 netif_err(adapter, probe, adapter->netdev, "Invalid MAC Address\n");
465 adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw);
467 timer_setup(&adapter->watchdog_timer, ixgb_watchdog, 0);
469 INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
471 strcpy(netdev->name, "eth%d");
472 err = register_netdev(netdev);
476 /* carrier off reporting is important to ethtool even BEFORE open */
477 netif_carrier_off(netdev);
479 netif_info(adapter, probe, adapter->netdev,
480 "Intel(R) PRO/10GbE Network Connection\n");
481 ixgb_check_options(adapter);
482 /* reset the hardware with the new settings */
492 iounmap(adapter->hw.hw_addr);
496 pci_release_regions(pdev);
499 pci_disable_device(pdev);
504 * ixgb_remove - Device Removal Routine
505 * @pdev: PCI device information struct
507 * ixgb_remove is called by the PCI subsystem to alert the driver
508 * that it should release a PCI device. The could be caused by a
509 * Hot-Plug event, or because the driver is going to be removed from
514 ixgb_remove(struct pci_dev *pdev)
516 struct net_device *netdev = pci_get_drvdata(pdev);
517 struct ixgb_adapter *adapter = netdev_priv(netdev);
519 cancel_work_sync(&adapter->tx_timeout_task);
521 unregister_netdev(netdev);
523 iounmap(adapter->hw.hw_addr);
524 pci_release_regions(pdev);
527 pci_disable_device(pdev);
531 * ixgb_sw_init - Initialize general software structures (struct ixgb_adapter)
532 * @adapter: board private structure to initialize
534 * ixgb_sw_init initializes the Adapter private data structure.
535 * Fields are initialized based on PCI device information and
536 * OS network device settings (MTU size).
540 ixgb_sw_init(struct ixgb_adapter *adapter)
542 struct ixgb_hw *hw = &adapter->hw;
543 struct net_device *netdev = adapter->netdev;
544 struct pci_dev *pdev = adapter->pdev;
546 /* PCI config space info */
548 hw->vendor_id = pdev->vendor;
549 hw->device_id = pdev->device;
550 hw->subsystem_vendor_id = pdev->subsystem_vendor;
551 hw->subsystem_id = pdev->subsystem_device;
553 hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
554 adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */
556 if ((hw->device_id == IXGB_DEVICE_ID_82597EX) ||
557 (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) ||
558 (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) ||
559 (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
560 hw->mac_type = ixgb_82597;
562 /* should never have loaded on this device */
563 netif_err(adapter, probe, adapter->netdev, "unsupported device id\n");
566 /* enable flow control to be programmed */
569 set_bit(__IXGB_DOWN, &adapter->flags);
574 * ixgb_open - Called when a network interface is made active
575 * @netdev: network interface device structure
577 * Returns 0 on success, negative value on failure
579 * The open entry point is called when a network interface is made
580 * active by the system (IFF_UP). At this point all resources needed
581 * for transmit and receive operations are allocated, the interrupt
582 * handler is registered with the OS, the watchdog timer is started,
583 * and the stack is notified that the interface is ready.
587 ixgb_open(struct net_device *netdev)
589 struct ixgb_adapter *adapter = netdev_priv(netdev);
592 /* allocate transmit descriptors */
593 err = ixgb_setup_tx_resources(adapter);
597 netif_carrier_off(netdev);
599 /* allocate receive descriptors */
601 err = ixgb_setup_rx_resources(adapter);
605 err = ixgb_up(adapter);
609 netif_start_queue(netdev);
614 ixgb_free_rx_resources(adapter);
616 ixgb_free_tx_resources(adapter);
624 * ixgb_close - Disables a network interface
625 * @netdev: network interface device structure
627 * Returns 0, this is not allowed to fail
629 * The close entry point is called when an interface is de-activated
630 * by the OS. The hardware is still under the drivers control, but
631 * needs to be disabled. A global MAC reset is issued to stop the
632 * hardware, and all transmit and receive resources are freed.
636 ixgb_close(struct net_device *netdev)
638 struct ixgb_adapter *adapter = netdev_priv(netdev);
640 ixgb_down(adapter, true);
642 ixgb_free_tx_resources(adapter);
643 ixgb_free_rx_resources(adapter);
649 * ixgb_setup_tx_resources - allocate Tx resources (Descriptors)
650 * @adapter: board private structure
652 * Return 0 on success, negative on failure
656 ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
658 struct ixgb_desc_ring *txdr = &adapter->tx_ring;
659 struct pci_dev *pdev = adapter->pdev;
662 size = sizeof(struct ixgb_buffer) * txdr->count;
663 txdr->buffer_info = vzalloc(size);
664 if (!txdr->buffer_info)
667 /* round up to nearest 4K */
669 txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
670 txdr->size = ALIGN(txdr->size, 4096);
672 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
675 vfree(txdr->buffer_info);
679 txdr->next_to_use = 0;
680 txdr->next_to_clean = 0;
686 * ixgb_configure_tx - Configure 82597 Transmit Unit after Reset.
687 * @adapter: board private structure
689 * Configure the Tx unit of the MAC after a reset.
693 ixgb_configure_tx(struct ixgb_adapter *adapter)
695 u64 tdba = adapter->tx_ring.dma;
696 u32 tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
698 struct ixgb_hw *hw = &adapter->hw;
700 /* Setup the Base and Length of the Tx Descriptor Ring
701 * tx_ring.dma can be either a 32 or 64 bit value
704 IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
705 IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32));
707 IXGB_WRITE_REG(hw, TDLEN, tdlen);
709 /* Setup the HW Tx Head and Tail descriptor pointers */
711 IXGB_WRITE_REG(hw, TDH, 0);
712 IXGB_WRITE_REG(hw, TDT, 0);
714 /* don't set up txdctl, it induces performance problems if configured
716 /* Set the Tx Interrupt Delay register */
718 IXGB_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
720 /* Program the Transmit Control Register */
722 tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
723 IXGB_WRITE_REG(hw, TCTL, tctl);
725 /* Setup Transmit Descriptor Settings for this adapter */
726 adapter->tx_cmd_type =
728 (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
732 * ixgb_setup_rx_resources - allocate Rx resources (Descriptors)
733 * @adapter: board private structure
735 * Returns 0 on success, negative on failure
739 ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
741 struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
742 struct pci_dev *pdev = adapter->pdev;
745 size = sizeof(struct ixgb_buffer) * rxdr->count;
746 rxdr->buffer_info = vzalloc(size);
747 if (!rxdr->buffer_info)
750 /* Round up to nearest 4K */
752 rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
753 rxdr->size = ALIGN(rxdr->size, 4096);
755 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
759 vfree(rxdr->buffer_info);
763 rxdr->next_to_clean = 0;
764 rxdr->next_to_use = 0;
770 * ixgb_setup_rctl - configure the receive control register
771 * @adapter: Board private structure
775 ixgb_setup_rctl(struct ixgb_adapter *adapter)
779 rctl = IXGB_READ_REG(&adapter->hw, RCTL);
781 rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
784 IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 |
785 IXGB_RCTL_RXEN | IXGB_RCTL_CFF |
786 (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
788 rctl |= IXGB_RCTL_SECRC;
790 if (adapter->rx_buffer_len <= IXGB_RXBUFFER_2048)
791 rctl |= IXGB_RCTL_BSIZE_2048;
792 else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_4096)
793 rctl |= IXGB_RCTL_BSIZE_4096;
794 else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_8192)
795 rctl |= IXGB_RCTL_BSIZE_8192;
796 else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_16384)
797 rctl |= IXGB_RCTL_BSIZE_16384;
799 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
803 * ixgb_configure_rx - Configure 82597 Receive Unit after Reset.
804 * @adapter: board private structure
806 * Configure the Rx unit of the MAC after a reset.
810 ixgb_configure_rx(struct ixgb_adapter *adapter)
812 u64 rdba = adapter->rx_ring.dma;
813 u32 rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
814 struct ixgb_hw *hw = &adapter->hw;
818 /* make sure receives are disabled while setting up the descriptors */
820 rctl = IXGB_READ_REG(hw, RCTL);
821 IXGB_WRITE_REG(hw, RCTL, rctl & ~IXGB_RCTL_RXEN);
823 /* set the Receive Delay Timer Register */
825 IXGB_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
827 /* Setup the Base and Length of the Rx Descriptor Ring */
829 IXGB_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
830 IXGB_WRITE_REG(hw, RDBAH, (rdba >> 32));
832 IXGB_WRITE_REG(hw, RDLEN, rdlen);
834 /* Setup the HW Rx Head and Tail Descriptor Pointers */
835 IXGB_WRITE_REG(hw, RDH, 0);
836 IXGB_WRITE_REG(hw, RDT, 0);
838 /* due to the hardware errata with RXDCTL, we are unable to use any of
839 * the performance enhancing features of it without causing other
840 * subtle bugs, some of the bugs could include receive length
841 * corruption at high data rates (WTHRESH > 0) and/or receive
842 * descriptor ring irregularites (particularly in hardware cache) */
843 IXGB_WRITE_REG(hw, RXDCTL, 0);
845 /* Enable Receive Checksum Offload for TCP and UDP */
846 if (adapter->rx_csum) {
847 rxcsum = IXGB_READ_REG(hw, RXCSUM);
848 rxcsum |= IXGB_RXCSUM_TUOFL;
849 IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
852 /* Enable Receives */
854 IXGB_WRITE_REG(hw, RCTL, rctl);
858 * ixgb_free_tx_resources - Free Tx Resources
859 * @adapter: board private structure
861 * Free all transmit software resources
865 ixgb_free_tx_resources(struct ixgb_adapter *adapter)
867 struct pci_dev *pdev = adapter->pdev;
869 ixgb_clean_tx_ring(adapter);
871 vfree(adapter->tx_ring.buffer_info);
872 adapter->tx_ring.buffer_info = NULL;
874 dma_free_coherent(&pdev->dev, adapter->tx_ring.size,
875 adapter->tx_ring.desc, adapter->tx_ring.dma);
877 adapter->tx_ring.desc = NULL;
881 ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
882 struct ixgb_buffer *buffer_info)
884 if (buffer_info->dma) {
885 if (buffer_info->mapped_as_page)
886 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
887 buffer_info->length, DMA_TO_DEVICE);
889 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
890 buffer_info->length, DMA_TO_DEVICE);
891 buffer_info->dma = 0;
894 if (buffer_info->skb) {
895 dev_kfree_skb_any(buffer_info->skb);
896 buffer_info->skb = NULL;
898 buffer_info->time_stamp = 0;
899 /* these fields must always be initialized in tx
900 * buffer_info->length = 0;
901 * buffer_info->next_to_watch = 0; */
905 * ixgb_clean_tx_ring - Free Tx Buffers
906 * @adapter: board private structure
910 ixgb_clean_tx_ring(struct ixgb_adapter *adapter)
912 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
913 struct ixgb_buffer *buffer_info;
917 /* Free all the Tx ring sk_buffs */
919 for (i = 0; i < tx_ring->count; i++) {
920 buffer_info = &tx_ring->buffer_info[i];
921 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
924 size = sizeof(struct ixgb_buffer) * tx_ring->count;
925 memset(tx_ring->buffer_info, 0, size);
927 /* Zero out the descriptor ring */
929 memset(tx_ring->desc, 0, tx_ring->size);
931 tx_ring->next_to_use = 0;
932 tx_ring->next_to_clean = 0;
934 IXGB_WRITE_REG(&adapter->hw, TDH, 0);
935 IXGB_WRITE_REG(&adapter->hw, TDT, 0);
939 * ixgb_free_rx_resources - Free Rx Resources
940 * @adapter: board private structure
942 * Free all receive software resources
946 ixgb_free_rx_resources(struct ixgb_adapter *adapter)
948 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
949 struct pci_dev *pdev = adapter->pdev;
951 ixgb_clean_rx_ring(adapter);
953 vfree(rx_ring->buffer_info);
954 rx_ring->buffer_info = NULL;
956 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
959 rx_ring->desc = NULL;
963 * ixgb_clean_rx_ring - Free Rx Buffers
964 * @adapter: board private structure
968 ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
970 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
971 struct ixgb_buffer *buffer_info;
972 struct pci_dev *pdev = adapter->pdev;
976 /* Free all the Rx ring sk_buffs */
978 for (i = 0; i < rx_ring->count; i++) {
979 buffer_info = &rx_ring->buffer_info[i];
980 if (buffer_info->dma) {
981 dma_unmap_single(&pdev->dev,
985 buffer_info->dma = 0;
986 buffer_info->length = 0;
989 if (buffer_info->skb) {
990 dev_kfree_skb(buffer_info->skb);
991 buffer_info->skb = NULL;
995 size = sizeof(struct ixgb_buffer) * rx_ring->count;
996 memset(rx_ring->buffer_info, 0, size);
998 /* Zero out the descriptor ring */
1000 memset(rx_ring->desc, 0, rx_ring->size);
1002 rx_ring->next_to_clean = 0;
1003 rx_ring->next_to_use = 0;
1005 IXGB_WRITE_REG(&adapter->hw, RDH, 0);
1006 IXGB_WRITE_REG(&adapter->hw, RDT, 0);
1010 * ixgb_set_mac - Change the Ethernet Address of the NIC
1011 * @netdev: network interface device structure
1012 * @p: pointer to an address structure
1014 * Returns 0 on success, negative on failure
1018 ixgb_set_mac(struct net_device *netdev, void *p)
1020 struct ixgb_adapter *adapter = netdev_priv(netdev);
1021 struct sockaddr *addr = p;
1023 if (!is_valid_ether_addr(addr->sa_data))
1024 return -EADDRNOTAVAIL;
1026 eth_hw_addr_set(netdev, addr->sa_data);
1028 ixgb_rar_set(&adapter->hw, addr->sa_data, 0);
1034 * ixgb_set_multi - Multicast and Promiscuous mode set
1035 * @netdev: network interface device structure
1037 * The set_multi entry point is called whenever the multicast address
1038 * list or the network interface flags are updated. This routine is
1039 * responsible for configuring the hardware for proper multicast,
1040 * promiscuous mode, and all-multi behavior.
1044 ixgb_set_multi(struct net_device *netdev)
1046 struct ixgb_adapter *adapter = netdev_priv(netdev);
1047 struct ixgb_hw *hw = &adapter->hw;
1048 struct netdev_hw_addr *ha;
1051 /* Check for Promiscuous and All Multicast modes */
1053 rctl = IXGB_READ_REG(hw, RCTL);
1055 if (netdev->flags & IFF_PROMISC) {
1056 rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1057 /* disable VLAN filtering */
1058 rctl &= ~IXGB_RCTL_CFIEN;
1059 rctl &= ~IXGB_RCTL_VFE;
1061 if (netdev->flags & IFF_ALLMULTI) {
1062 rctl |= IXGB_RCTL_MPE;
1063 rctl &= ~IXGB_RCTL_UPE;
1065 rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1067 /* enable VLAN filtering */
1068 rctl |= IXGB_RCTL_VFE;
1069 rctl &= ~IXGB_RCTL_CFIEN;
1072 if (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
1073 rctl |= IXGB_RCTL_MPE;
1074 IXGB_WRITE_REG(hw, RCTL, rctl);
1076 u8 *mta = kmalloc_array(ETH_ALEN,
1077 IXGB_MAX_NUM_MULTICAST_ADDRESSES,
1083 IXGB_WRITE_REG(hw, RCTL, rctl);
1086 netdev_for_each_mc_addr(ha, netdev) {
1087 memcpy(addr, ha->addr, ETH_ALEN);
1091 ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
1096 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
1097 ixgb_vlan_strip_enable(adapter);
1099 ixgb_vlan_strip_disable(adapter);
1104 * ixgb_watchdog - Timer Call-back
1105 * @t: pointer to timer_list containing our private info pointer
1109 ixgb_watchdog(struct timer_list *t)
1111 struct ixgb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
1112 struct net_device *netdev = adapter->netdev;
1113 struct ixgb_desc_ring *txdr = &adapter->tx_ring;
1115 ixgb_check_for_link(&adapter->hw);
1117 if (ixgb_check_for_bad_link(&adapter->hw)) {
1118 /* force the reset path */
1119 netif_stop_queue(netdev);
1122 if (adapter->hw.link_up) {
1123 if (!netif_carrier_ok(netdev)) {
1125 "NIC Link is Up 10 Gbps Full Duplex, Flow Control: %s\n",
1126 (adapter->hw.fc.type == ixgb_fc_full) ?
1128 (adapter->hw.fc.type == ixgb_fc_rx_pause) ?
1130 (adapter->hw.fc.type == ixgb_fc_tx_pause) ?
1132 adapter->link_speed = 10000;
1133 adapter->link_duplex = FULL_DUPLEX;
1134 netif_carrier_on(netdev);
1137 if (netif_carrier_ok(netdev)) {
1138 adapter->link_speed = 0;
1139 adapter->link_duplex = 0;
1140 netdev_info(netdev, "NIC Link is Down\n");
1141 netif_carrier_off(netdev);
1145 ixgb_update_stats(adapter);
1147 if (!netif_carrier_ok(netdev)) {
1148 if (IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) {
1149 /* We've lost link, so the controller stops DMA,
1150 * but we've got queued Tx work that's never going
1151 * to get done, so reset controller to flush Tx.
1152 * (Do the reset outside of interrupt context). */
1153 schedule_work(&adapter->tx_timeout_task);
1154 /* return immediately since reset is imminent */
1159 /* Force detection of hung controller every watchdog period */
1160 adapter->detect_tx_hung = true;
1162 /* generate an interrupt to force clean up of any stragglers */
1163 IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW);
1165 /* Reset the timer */
1166 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1169 #define IXGB_TX_FLAGS_CSUM 0x00000001
1170 #define IXGB_TX_FLAGS_VLAN 0x00000002
1171 #define IXGB_TX_FLAGS_TSO 0x00000004
1174 ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1176 struct ixgb_context_desc *context_desc;
1178 u8 ipcss, ipcso, tucss, tucso, hdr_len;
1179 u16 ipcse, tucse, mss;
1181 if (likely(skb_is_gso(skb))) {
1182 struct ixgb_buffer *buffer_info;
1186 err = skb_cow_head(skb, 0);
1190 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1191 mss = skb_shinfo(skb)->gso_size;
1195 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1198 ipcss = skb_network_offset(skb);
1199 ipcso = (void *)&(iph->check) - (void *)skb->data;
1200 ipcse = skb_transport_offset(skb) - 1;
1201 tucss = skb_transport_offset(skb);
1202 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
1205 i = adapter->tx_ring.next_to_use;
1206 context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1207 buffer_info = &adapter->tx_ring.buffer_info[i];
1208 WARN_ON(buffer_info->dma != 0);
1210 context_desc->ipcss = ipcss;
1211 context_desc->ipcso = ipcso;
1212 context_desc->ipcse = cpu_to_le16(ipcse);
1213 context_desc->tucss = tucss;
1214 context_desc->tucso = tucso;
1215 context_desc->tucse = cpu_to_le16(tucse);
1216 context_desc->mss = cpu_to_le16(mss);
1217 context_desc->hdr_len = hdr_len;
1218 context_desc->status = 0;
1219 context_desc->cmd_type_len = cpu_to_le32(
1220 IXGB_CONTEXT_DESC_TYPE
1221 | IXGB_CONTEXT_DESC_CMD_TSE
1222 | IXGB_CONTEXT_DESC_CMD_IP
1223 | IXGB_CONTEXT_DESC_CMD_TCP
1224 | IXGB_CONTEXT_DESC_CMD_IDE
1225 | (skb->len - (hdr_len)));
1228 if (++i == adapter->tx_ring.count) i = 0;
1229 adapter->tx_ring.next_to_use = i;
1238 ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1240 struct ixgb_context_desc *context_desc;
1244 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1245 struct ixgb_buffer *buffer_info;
1246 css = skb_checksum_start_offset(skb);
1247 cso = css + skb->csum_offset;
1249 i = adapter->tx_ring.next_to_use;
1250 context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1251 buffer_info = &adapter->tx_ring.buffer_info[i];
1252 WARN_ON(buffer_info->dma != 0);
1254 context_desc->tucss = css;
1255 context_desc->tucso = cso;
1256 context_desc->tucse = 0;
1257 /* zero out any previously existing data in one instruction */
1258 *(u32 *)&(context_desc->ipcss) = 0;
1259 context_desc->status = 0;
1260 context_desc->hdr_len = 0;
1261 context_desc->mss = 0;
1262 context_desc->cmd_type_len =
1263 cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
1264 | IXGB_TX_DESC_CMD_IDE);
1266 if (++i == adapter->tx_ring.count) i = 0;
1267 adapter->tx_ring.next_to_use = i;
1275 #define IXGB_MAX_TXD_PWR 14
1276 #define IXGB_MAX_DATA_PER_TXD (1<<IXGB_MAX_TXD_PWR)
1279 ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1282 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1283 struct pci_dev *pdev = adapter->pdev;
1284 struct ixgb_buffer *buffer_info;
1285 int len = skb_headlen(skb);
1286 unsigned int offset = 0, size, count = 0, i;
1287 unsigned int mss = skb_shinfo(skb)->gso_size;
1288 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1291 i = tx_ring->next_to_use;
1294 buffer_info = &tx_ring->buffer_info[i];
1295 size = min(len, IXGB_MAX_DATA_PER_TXD);
1296 /* Workaround for premature desc write-backs
1297 * in TSO mode. Append 4-byte sentinel desc */
1298 if (unlikely(mss && !nr_frags && size == len && size > 8))
1301 buffer_info->length = size;
1302 WARN_ON(buffer_info->dma != 0);
1303 buffer_info->time_stamp = jiffies;
1304 buffer_info->mapped_as_page = false;
1305 buffer_info->dma = dma_map_single(&pdev->dev,
1307 size, DMA_TO_DEVICE);
1308 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
1310 buffer_info->next_to_watch = 0;
1317 if (i == tx_ring->count)
1322 for (f = 0; f < nr_frags; f++) {
1323 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1324 len = skb_frag_size(frag);
1329 if (i == tx_ring->count)
1332 buffer_info = &tx_ring->buffer_info[i];
1333 size = min(len, IXGB_MAX_DATA_PER_TXD);
1335 /* Workaround for premature desc write-backs
1336 * in TSO mode. Append 4-byte sentinel desc */
1337 if (unlikely(mss && (f == (nr_frags - 1))
1338 && size == len && size > 8))
1341 buffer_info->length = size;
1342 buffer_info->time_stamp = jiffies;
1343 buffer_info->mapped_as_page = true;
1345 skb_frag_dma_map(&pdev->dev, frag, offset, size,
1347 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
1349 buffer_info->next_to_watch = 0;
1356 tx_ring->buffer_info[i].skb = skb;
1357 tx_ring->buffer_info[first].next_to_watch = i;
1362 dev_err(&pdev->dev, "TX DMA map failed\n");
1363 buffer_info->dma = 0;
1369 i += tx_ring->count;
1371 buffer_info = &tx_ring->buffer_info[i];
1372 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1379 ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
1381 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1382 struct ixgb_tx_desc *tx_desc = NULL;
1383 struct ixgb_buffer *buffer_info;
1384 u32 cmd_type_len = adapter->tx_cmd_type;
1389 if (tx_flags & IXGB_TX_FLAGS_TSO) {
1390 cmd_type_len |= IXGB_TX_DESC_CMD_TSE;
1391 popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM);
1394 if (tx_flags & IXGB_TX_FLAGS_CSUM)
1395 popts |= IXGB_TX_DESC_POPTS_TXSM;
1397 if (tx_flags & IXGB_TX_FLAGS_VLAN)
1398 cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1400 i = tx_ring->next_to_use;
1403 buffer_info = &tx_ring->buffer_info[i];
1404 tx_desc = IXGB_TX_DESC(*tx_ring, i);
1405 tx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
1406 tx_desc->cmd_type_len =
1407 cpu_to_le32(cmd_type_len | buffer_info->length);
1408 tx_desc->status = status;
1409 tx_desc->popts = popts;
1410 tx_desc->vlan = cpu_to_le16(vlan_id);
1412 if (++i == tx_ring->count) i = 0;
1415 tx_desc->cmd_type_len |=
1416 cpu_to_le32(IXGB_TX_DESC_CMD_EOP | IXGB_TX_DESC_CMD_RS);
1418 /* Force memory writes to complete before letting h/w
1419 * know there are new descriptors to fetch. (Only
1420 * applicable for weak-ordered memory model archs,
1421 * such as IA-64). */
1424 tx_ring->next_to_use = i;
1425 IXGB_WRITE_REG(&adapter->hw, TDT, i);
1428 static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size)
1430 struct ixgb_adapter *adapter = netdev_priv(netdev);
1431 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1433 netif_stop_queue(netdev);
1434 /* Herbert's original patch had:
1435 * smp_mb__after_netif_stop_queue();
1436 * but since that doesn't exist yet, just open code it. */
1439 /* We need to check again in a case another CPU has just
1440 * made room available. */
1441 if (likely(IXGB_DESC_UNUSED(tx_ring) < size))
1445 netif_start_queue(netdev);
1446 ++adapter->restart_queue;
1450 static int ixgb_maybe_stop_tx(struct net_device *netdev,
1451 struct ixgb_desc_ring *tx_ring, int size)
1453 if (likely(IXGB_DESC_UNUSED(tx_ring) >= size))
1455 return __ixgb_maybe_stop_tx(netdev, size);
1459 /* Tx Descriptors needed, worst case */
1460 #define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
1461 (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
1462 #define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \
1463 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \
1464 + 1 /* one more needed for sentinel TSO workaround */
1467 ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1469 struct ixgb_adapter *adapter = netdev_priv(netdev);
1471 unsigned int tx_flags = 0;
1476 if (test_bit(__IXGB_DOWN, &adapter->flags)) {
1477 dev_kfree_skb_any(skb);
1478 return NETDEV_TX_OK;
1481 if (skb->len <= 0) {
1482 dev_kfree_skb_any(skb);
1483 return NETDEV_TX_OK;
1486 if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring,
1488 return NETDEV_TX_BUSY;
1490 if (skb_vlan_tag_present(skb)) {
1491 tx_flags |= IXGB_TX_FLAGS_VLAN;
1492 vlan_id = skb_vlan_tag_get(skb);
1495 first = adapter->tx_ring.next_to_use;
1497 tso = ixgb_tso(adapter, skb);
1499 dev_kfree_skb_any(skb);
1500 return NETDEV_TX_OK;
1504 tx_flags |= IXGB_TX_FLAGS_TSO;
1505 else if (ixgb_tx_csum(adapter, skb))
1506 tx_flags |= IXGB_TX_FLAGS_CSUM;
1508 count = ixgb_tx_map(adapter, skb, first);
1511 ixgb_tx_queue(adapter, count, vlan_id, tx_flags);
1512 /* Make sure there is space in the ring for the next send. */
1513 ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
1516 dev_kfree_skb_any(skb);
1517 adapter->tx_ring.buffer_info[first].time_stamp = 0;
1518 adapter->tx_ring.next_to_use = first;
1521 return NETDEV_TX_OK;
1525 * ixgb_tx_timeout - Respond to a Tx Hang
1526 * @netdev: network interface device structure
1527 * @txqueue: queue hanging (unused)
1531 ixgb_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
1533 struct ixgb_adapter *adapter = netdev_priv(netdev);
1535 /* Do the reset outside of interrupt context */
1536 schedule_work(&adapter->tx_timeout_task);
1540 ixgb_tx_timeout_task(struct work_struct *work)
1542 struct ixgb_adapter *adapter =
1543 container_of(work, struct ixgb_adapter, tx_timeout_task);
1545 adapter->tx_timeout_count++;
1546 ixgb_down(adapter, true);
1551 * ixgb_change_mtu - Change the Maximum Transfer Unit
1552 * @netdev: network interface device structure
1553 * @new_mtu: new value for maximum frame size
1555 * Returns 0 on success, negative on failure
1559 ixgb_change_mtu(struct net_device *netdev, int new_mtu)
1561 struct ixgb_adapter *adapter = netdev_priv(netdev);
1562 int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1564 if (netif_running(netdev))
1565 ixgb_down(adapter, true);
1567 adapter->rx_buffer_len = max_frame + 8; /* + 8 for errata */
1569 netdev->mtu = new_mtu;
1571 if (netif_running(netdev))
1578 * ixgb_update_stats - Update the board statistics counters.
1579 * @adapter: board private structure
1583 ixgb_update_stats(struct ixgb_adapter *adapter)
1585 struct net_device *netdev = adapter->netdev;
1586 struct pci_dev *pdev = adapter->pdev;
1588 /* Prevent stats update while adapter is being reset */
1589 if (pci_channel_offline(pdev))
1592 if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
1593 (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
1594 u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
1595 u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
1596 u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
1597 u64 bcast = ((u64)bcast_h << 32) | bcast_l;
1599 multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
1600 /* fix up multicast stats by removing broadcasts */
1604 adapter->stats.mprcl += (multi & 0xFFFFFFFF);
1605 adapter->stats.mprch += (multi >> 32);
1606 adapter->stats.bprcl += bcast_l;
1607 adapter->stats.bprch += bcast_h;
1609 adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
1610 adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
1611 adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
1612 adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
1614 adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
1615 adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
1616 adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
1617 adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
1618 adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
1619 adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
1620 adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
1621 adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
1622 adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
1623 adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
1624 adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
1625 adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
1626 adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
1627 adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
1628 adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
1629 adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
1630 adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
1631 adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
1632 adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
1633 adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
1634 adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
1635 adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
1636 adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
1637 adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
1638 adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
1639 adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
1640 adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
1641 adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
1642 adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
1643 adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
1644 adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
1645 adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
1646 adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
1647 adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
1648 adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
1649 adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
1650 adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
1651 adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
1652 adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
1653 adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
1654 adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
1655 adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
1656 adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
1657 adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
1658 adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
1659 adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
1660 adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
1661 adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
1662 adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
1663 adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
1664 adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC);
1665 adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
1666 adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
1667 adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
1668 adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
1669 adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
1671 /* Fill out the OS statistics structure */
1673 netdev->stats.rx_packets = adapter->stats.gprcl;
1674 netdev->stats.tx_packets = adapter->stats.gptcl;
1675 netdev->stats.rx_bytes = adapter->stats.gorcl;
1676 netdev->stats.tx_bytes = adapter->stats.gotcl;
1677 netdev->stats.multicast = adapter->stats.mprcl;
1678 netdev->stats.collisions = 0;
1680 /* ignore RLEC as it reports errors for padded (<64bytes) frames
1681 * with a length in the type/len field */
1682 netdev->stats.rx_errors =
1683 /* adapter->stats.rnbc + */ adapter->stats.crcerrs +
1684 adapter->stats.ruc +
1685 adapter->stats.roc /*+ adapter->stats.rlec */ +
1686 adapter->stats.icbc +
1687 adapter->stats.ecbc + adapter->stats.mpc;
1690 * netdev->stats.rx_length_errors = adapter->stats.rlec;
1693 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
1694 netdev->stats.rx_fifo_errors = adapter->stats.mpc;
1695 netdev->stats.rx_missed_errors = adapter->stats.mpc;
1696 netdev->stats.rx_over_errors = adapter->stats.mpc;
1698 netdev->stats.tx_errors = 0;
1699 netdev->stats.rx_frame_errors = 0;
1700 netdev->stats.tx_aborted_errors = 0;
1701 netdev->stats.tx_carrier_errors = 0;
1702 netdev->stats.tx_fifo_errors = 0;
1703 netdev->stats.tx_heartbeat_errors = 0;
1704 netdev->stats.tx_window_errors = 0;
1708 * ixgb_intr - Interrupt Handler
1709 * @irq: interrupt number
1710 * @data: pointer to a network interface device structure
1714 ixgb_intr(int irq, void *data)
1716 struct net_device *netdev = data;
1717 struct ixgb_adapter *adapter = netdev_priv(netdev);
1718 struct ixgb_hw *hw = &adapter->hw;
1719 u32 icr = IXGB_READ_REG(hw, ICR);
1722 return IRQ_NONE; /* Not our interrupt */
1724 if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)))
1725 if (!test_bit(__IXGB_DOWN, &adapter->flags))
1726 mod_timer(&adapter->watchdog_timer, jiffies);
1728 if (napi_schedule_prep(&adapter->napi)) {
1730 /* Disable interrupts and register for poll. The flush
1731 of the posted write is intentionally left out.
1734 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
1735 __napi_schedule(&adapter->napi);
1741 * ixgb_clean - NAPI Rx polling callback
1742 * @napi: napi struct pointer
1743 * @budget: max number of receives to clean
1747 ixgb_clean(struct napi_struct *napi, int budget)
1749 struct ixgb_adapter *adapter = container_of(napi, struct ixgb_adapter, napi);
1752 ixgb_clean_tx_irq(adapter);
1753 ixgb_clean_rx_irq(adapter, &work_done, budget);
1755 /* If budget not fully consumed, exit the polling mode */
1756 if (work_done < budget) {
1757 napi_complete_done(napi, work_done);
1758 if (!test_bit(__IXGB_DOWN, &adapter->flags))
1759 ixgb_irq_enable(adapter);
1766 * ixgb_clean_tx_irq - Reclaim resources after transmit completes
1767 * @adapter: board private structure
1771 ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1773 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1774 struct net_device *netdev = adapter->netdev;
1775 struct ixgb_tx_desc *tx_desc, *eop_desc;
1776 struct ixgb_buffer *buffer_info;
1777 unsigned int i, eop;
1778 bool cleaned = false;
1780 i = tx_ring->next_to_clean;
1781 eop = tx_ring->buffer_info[i].next_to_watch;
1782 eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1784 while (eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
1786 rmb(); /* read buffer_info after eop_desc */
1787 for (cleaned = false; !cleaned; ) {
1788 tx_desc = IXGB_TX_DESC(*tx_ring, i);
1789 buffer_info = &tx_ring->buffer_info[i];
1791 if (tx_desc->popts &
1792 (IXGB_TX_DESC_POPTS_TXSM |
1793 IXGB_TX_DESC_POPTS_IXSM))
1794 adapter->hw_csum_tx_good++;
1796 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1798 *(u32 *)&(tx_desc->status) = 0;
1800 cleaned = (i == eop);
1801 if (++i == tx_ring->count) i = 0;
1804 eop = tx_ring->buffer_info[i].next_to_watch;
1805 eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1808 tx_ring->next_to_clean = i;
1810 if (unlikely(cleaned && netif_carrier_ok(netdev) &&
1811 IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED)) {
1812 /* Make sure that anybody stopping the queue after this
1813 * sees the new next_to_clean. */
1816 if (netif_queue_stopped(netdev) &&
1817 !(test_bit(__IXGB_DOWN, &adapter->flags))) {
1818 netif_wake_queue(netdev);
1819 ++adapter->restart_queue;
1823 if (adapter->detect_tx_hung) {
1824 /* detect a transmit hang in hardware, this serializes the
1825 * check with the clearing of time_stamp and movement of i */
1826 adapter->detect_tx_hung = false;
1827 if (tx_ring->buffer_info[eop].time_stamp &&
1828 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ)
1829 && !(IXGB_READ_REG(&adapter->hw, STATUS) &
1830 IXGB_STATUS_TXOFF)) {
1831 /* detected Tx unit hang */
1832 netif_err(adapter, drv, adapter->netdev,
1833 "Detected Tx Unit Hang\n"
1836 " next_to_use <%x>\n"
1837 " next_to_clean <%x>\n"
1838 "buffer_info[next_to_clean]\n"
1839 " time_stamp <%lx>\n"
1840 " next_to_watch <%x>\n"
1842 " next_to_watch.status <%x>\n",
1843 IXGB_READ_REG(&adapter->hw, TDH),
1844 IXGB_READ_REG(&adapter->hw, TDT),
1845 tx_ring->next_to_use,
1846 tx_ring->next_to_clean,
1847 tx_ring->buffer_info[eop].time_stamp,
1851 netif_stop_queue(netdev);
1859 * ixgb_rx_checksum - Receive Checksum Offload for 82597.
1860 * @adapter: board private structure
1861 * @rx_desc: receive descriptor
1862 * @skb: socket buffer with received data
1866 ixgb_rx_checksum(struct ixgb_adapter *adapter,
1867 struct ixgb_rx_desc *rx_desc,
1868 struct sk_buff *skb)
1870 /* Ignore Checksum bit is set OR
1871 * TCP Checksum has not been calculated
1873 if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
1874 (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
1875 skb_checksum_none_assert(skb);
1879 /* At this point we know the hardware did the TCP checksum */
1880 /* now look at the TCP checksum error bit */
1881 if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
1882 /* let the stack verify checksum errors */
1883 skb_checksum_none_assert(skb);
1884 adapter->hw_csum_rx_error++;
1886 /* TCP checksum is good */
1887 skb->ip_summed = CHECKSUM_UNNECESSARY;
1888 adapter->hw_csum_rx_good++;
1893 * this should improve performance for small packets with large amounts
1894 * of reassembly being done in the stack
1896 static void ixgb_check_copybreak(struct napi_struct *napi,
1897 struct ixgb_buffer *buffer_info,
1898 u32 length, struct sk_buff **skb)
1900 struct sk_buff *new_skb;
1902 if (length > copybreak)
1905 new_skb = napi_alloc_skb(napi, length);
1909 skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
1910 (*skb)->data - NET_IP_ALIGN,
1911 length + NET_IP_ALIGN);
1912 /* save the skb in buffer_info as good */
1913 buffer_info->skb = *skb;
1918 * ixgb_clean_rx_irq - Send received data up the network stack,
1919 * @adapter: board private structure
1920 * @work_done: output pointer to amount of packets cleaned
1921 * @work_to_do: how much work we can complete
1925 ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
1927 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
1928 struct net_device *netdev = adapter->netdev;
1929 struct pci_dev *pdev = adapter->pdev;
1930 struct ixgb_rx_desc *rx_desc, *next_rxd;
1931 struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
1934 int cleaned_count = 0;
1935 bool cleaned = false;
1937 i = rx_ring->next_to_clean;
1938 rx_desc = IXGB_RX_DESC(*rx_ring, i);
1939 buffer_info = &rx_ring->buffer_info[i];
1941 while (rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
1942 struct sk_buff *skb;
1945 if (*work_done >= work_to_do)
1949 rmb(); /* read descriptor and rx_buffer_info after status DD */
1950 status = rx_desc->status;
1951 skb = buffer_info->skb;
1952 buffer_info->skb = NULL;
1954 prefetch(skb->data - NET_IP_ALIGN);
1956 if (++i == rx_ring->count)
1958 next_rxd = IXGB_RX_DESC(*rx_ring, i);
1962 if (j == rx_ring->count)
1964 next2_buffer = &rx_ring->buffer_info[j];
1965 prefetch(next2_buffer);
1967 next_buffer = &rx_ring->buffer_info[i];
1972 dma_unmap_single(&pdev->dev,
1974 buffer_info->length,
1976 buffer_info->dma = 0;
1978 length = le16_to_cpu(rx_desc->length);
1979 rx_desc->length = 0;
1981 if (unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) {
1983 /* All receives must fit into a single buffer */
1985 pr_debug("Receive packet consumed multiple buffers length<%x>\n",
1988 dev_kfree_skb_irq(skb);
1992 if (unlikely(rx_desc->errors &
1993 (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE |
1994 IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE))) {
1995 dev_kfree_skb_irq(skb);
1999 ixgb_check_copybreak(&adapter->napi, buffer_info, length, &skb);
2002 skb_put(skb, length);
2004 /* Receive Checksum Offload */
2005 ixgb_rx_checksum(adapter, rx_desc, skb);
2007 skb->protocol = eth_type_trans(skb, netdev);
2008 if (status & IXGB_RX_DESC_STATUS_VP)
2009 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2010 le16_to_cpu(rx_desc->special));
2012 netif_receive_skb(skb);
2015 /* clean up descriptor, might be written over by hw */
2016 rx_desc->status = 0;
2018 /* return some buffers to hardware, one at a time is too slow */
2019 if (unlikely(cleaned_count >= IXGB_RX_BUFFER_WRITE)) {
2020 ixgb_alloc_rx_buffers(adapter, cleaned_count);
2024 /* use prefetched values */
2026 buffer_info = next_buffer;
2029 rx_ring->next_to_clean = i;
2031 cleaned_count = IXGB_DESC_UNUSED(rx_ring);
2033 ixgb_alloc_rx_buffers(adapter, cleaned_count);
2039 * ixgb_alloc_rx_buffers - Replace used receive buffers
2040 * @adapter: address of board private structure
2041 * @cleaned_count: how many buffers to allocate
2045 ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
2047 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
2048 struct net_device *netdev = adapter->netdev;
2049 struct pci_dev *pdev = adapter->pdev;
2050 struct ixgb_rx_desc *rx_desc;
2051 struct ixgb_buffer *buffer_info;
2052 struct sk_buff *skb;
2056 i = rx_ring->next_to_use;
2057 buffer_info = &rx_ring->buffer_info[i];
2058 cleancount = IXGB_DESC_UNUSED(rx_ring);
2061 /* leave three descriptors unused */
2062 while (--cleancount > 2 && cleaned_count--) {
2063 /* recycle! its good for you */
2064 skb = buffer_info->skb;
2070 skb = netdev_alloc_skb_ip_align(netdev, adapter->rx_buffer_len);
2071 if (unlikely(!skb)) {
2072 /* Better luck next round */
2073 adapter->alloc_rx_buff_failed++;
2077 buffer_info->skb = skb;
2078 buffer_info->length = adapter->rx_buffer_len;
2080 buffer_info->dma = dma_map_single(&pdev->dev,
2082 adapter->rx_buffer_len,
2084 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
2085 adapter->alloc_rx_buff_failed++;
2089 rx_desc = IXGB_RX_DESC(*rx_ring, i);
2090 rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
2091 /* guarantee DD bit not set now before h/w gets descriptor
2092 * this is the rest of the workaround for h/w double
2094 rx_desc->status = 0;
2097 if (++i == rx_ring->count)
2099 buffer_info = &rx_ring->buffer_info[i];
2102 if (likely(rx_ring->next_to_use != i)) {
2103 rx_ring->next_to_use = i;
2104 if (unlikely(i-- == 0))
2105 i = (rx_ring->count - 1);
2107 /* Force memory writes to complete before letting h/w
2108 * know there are new descriptors to fetch. (Only
2109 * applicable for weak-ordered memory model archs, such
2112 IXGB_WRITE_REG(&adapter->hw, RDT, i);
2117 ixgb_vlan_strip_enable(struct ixgb_adapter *adapter)
2121 /* enable VLAN tag insert/strip */
2122 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2123 ctrl |= IXGB_CTRL0_VME;
2124 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2128 ixgb_vlan_strip_disable(struct ixgb_adapter *adapter)
2132 /* disable VLAN tag insert/strip */
2133 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2134 ctrl &= ~IXGB_CTRL0_VME;
2135 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2139 ixgb_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
2141 struct ixgb_adapter *adapter = netdev_priv(netdev);
2144 /* add VID to filter table */
2146 index = (vid >> 5) & 0x7F;
2147 vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2148 vfta |= (1 << (vid & 0x1F));
2149 ixgb_write_vfta(&adapter->hw, index, vfta);
2150 set_bit(vid, adapter->active_vlans);
2156 ixgb_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
2158 struct ixgb_adapter *adapter = netdev_priv(netdev);
2161 /* remove VID from filter table */
2163 index = (vid >> 5) & 0x7F;
2164 vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2165 vfta &= ~(1 << (vid & 0x1F));
2166 ixgb_write_vfta(&adapter->hw, index, vfta);
2167 clear_bit(vid, adapter->active_vlans);
2173 ixgb_restore_vlan(struct ixgb_adapter *adapter)
2177 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2178 ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
2182 * ixgb_io_error_detected - called when PCI error is detected
2183 * @pdev: pointer to pci device with error
2184 * @state: pci channel state after error
2186 * This callback is called by the PCI subsystem whenever
2187 * a PCI bus error is detected.
2189 static pci_ers_result_t ixgb_io_error_detected(struct pci_dev *pdev,
2190 pci_channel_state_t state)
2192 struct net_device *netdev = pci_get_drvdata(pdev);
2193 struct ixgb_adapter *adapter = netdev_priv(netdev);
2195 netif_device_detach(netdev);
2197 if (state == pci_channel_io_perm_failure)
2198 return PCI_ERS_RESULT_DISCONNECT;
2200 if (netif_running(netdev))
2201 ixgb_down(adapter, true);
2203 pci_disable_device(pdev);
2205 /* Request a slot reset. */
2206 return PCI_ERS_RESULT_NEED_RESET;
2210 * ixgb_io_slot_reset - called after the pci bus has been reset.
2211 * @pdev: pointer to pci device with error
2213 * This callback is called after the PCI bus has been reset.
2214 * Basically, this tries to restart the card from scratch.
2215 * This is a shortened version of the device probe/discovery code,
2216 * it resembles the first-half of the ixgb_probe() routine.
2218 static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
2220 struct net_device *netdev = pci_get_drvdata(pdev);
2221 struct ixgb_adapter *adapter = netdev_priv(netdev);
2224 if (pci_enable_device(pdev)) {
2225 netif_err(adapter, probe, adapter->netdev,
2226 "Cannot re-enable PCI device after reset\n");
2227 return PCI_ERS_RESULT_DISCONNECT;
2230 /* Perform card reset only on one instance of the card */
2231 if (0 != PCI_FUNC (pdev->devfn))
2232 return PCI_ERS_RESULT_RECOVERED;
2234 pci_set_master(pdev);
2236 netif_carrier_off(netdev);
2237 netif_stop_queue(netdev);
2238 ixgb_reset(adapter);
2240 /* Make sure the EEPROM is good */
2241 if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
2242 netif_err(adapter, probe, adapter->netdev,
2243 "After reset, the EEPROM checksum is not valid\n");
2244 return PCI_ERS_RESULT_DISCONNECT;
2246 ixgb_get_ee_mac_addr(&adapter->hw, addr);
2247 eth_hw_addr_set(netdev, addr);
2248 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
2250 if (!is_valid_ether_addr(netdev->perm_addr)) {
2251 netif_err(adapter, probe, adapter->netdev,
2252 "After reset, invalid MAC address\n");
2253 return PCI_ERS_RESULT_DISCONNECT;
2256 return PCI_ERS_RESULT_RECOVERED;
2260 * ixgb_io_resume - called when its OK to resume normal operations
2261 * @pdev: pointer to pci device with error
2263 * The error recovery driver tells us that its OK to resume
2264 * normal operation. Implementation resembles the second-half
2265 * of the ixgb_probe() routine.
2267 static void ixgb_io_resume(struct pci_dev *pdev)
2269 struct net_device *netdev = pci_get_drvdata(pdev);
2270 struct ixgb_adapter *adapter = netdev_priv(netdev);
2272 pci_set_master(pdev);
2274 if (netif_running(netdev)) {
2275 if (ixgb_up(adapter)) {
2276 pr_err("can't bring device back up after reset\n");
2281 netif_device_attach(netdev);
2282 mod_timer(&adapter->watchdog_timer, jiffies);