e1000: fix lockdep warning in e1000_reset_task
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / net / ethernet / intel / e1000 / e1000_main.c
1 /*******************************************************************************
2
3   Intel PRO/1000 Linux driver
4   Copyright(c) 1999 - 2006 Intel Corporation.
5
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21
22   Contact Information:
23   Linux NICS <linux.nics@intel.com>
24   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27 *******************************************************************************/
28
29 #include "e1000.h"
30 #include <net/ip6_checksum.h>
31 #include <linux/io.h>
32 #include <linux/prefetch.h>
33 #include <linux/bitops.h>
34 #include <linux/if_vlan.h>
35
36 char e1000_driver_name[] = "e1000";
37 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
38 #define DRV_VERSION "7.3.21-k8-NAPI"
39 const char e1000_driver_version[] = DRV_VERSION;
40 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
41
42 /* e1000_pci_tbl - PCI Device ID Table
43  *
44  * Last entry must be all 0s
45  *
46  * Macro expands to...
47  *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
48  */
49 static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
50         INTEL_E1000_ETHERNET_DEVICE(0x1000),
51         INTEL_E1000_ETHERNET_DEVICE(0x1001),
52         INTEL_E1000_ETHERNET_DEVICE(0x1004),
53         INTEL_E1000_ETHERNET_DEVICE(0x1008),
54         INTEL_E1000_ETHERNET_DEVICE(0x1009),
55         INTEL_E1000_ETHERNET_DEVICE(0x100C),
56         INTEL_E1000_ETHERNET_DEVICE(0x100D),
57         INTEL_E1000_ETHERNET_DEVICE(0x100E),
58         INTEL_E1000_ETHERNET_DEVICE(0x100F),
59         INTEL_E1000_ETHERNET_DEVICE(0x1010),
60         INTEL_E1000_ETHERNET_DEVICE(0x1011),
61         INTEL_E1000_ETHERNET_DEVICE(0x1012),
62         INTEL_E1000_ETHERNET_DEVICE(0x1013),
63         INTEL_E1000_ETHERNET_DEVICE(0x1014),
64         INTEL_E1000_ETHERNET_DEVICE(0x1015),
65         INTEL_E1000_ETHERNET_DEVICE(0x1016),
66         INTEL_E1000_ETHERNET_DEVICE(0x1017),
67         INTEL_E1000_ETHERNET_DEVICE(0x1018),
68         INTEL_E1000_ETHERNET_DEVICE(0x1019),
69         INTEL_E1000_ETHERNET_DEVICE(0x101A),
70         INTEL_E1000_ETHERNET_DEVICE(0x101D),
71         INTEL_E1000_ETHERNET_DEVICE(0x101E),
72         INTEL_E1000_ETHERNET_DEVICE(0x1026),
73         INTEL_E1000_ETHERNET_DEVICE(0x1027),
74         INTEL_E1000_ETHERNET_DEVICE(0x1028),
75         INTEL_E1000_ETHERNET_DEVICE(0x1075),
76         INTEL_E1000_ETHERNET_DEVICE(0x1076),
77         INTEL_E1000_ETHERNET_DEVICE(0x1077),
78         INTEL_E1000_ETHERNET_DEVICE(0x1078),
79         INTEL_E1000_ETHERNET_DEVICE(0x1079),
80         INTEL_E1000_ETHERNET_DEVICE(0x107A),
81         INTEL_E1000_ETHERNET_DEVICE(0x107B),
82         INTEL_E1000_ETHERNET_DEVICE(0x107C),
83         INTEL_E1000_ETHERNET_DEVICE(0x108A),
84         INTEL_E1000_ETHERNET_DEVICE(0x1099),
85         INTEL_E1000_ETHERNET_DEVICE(0x10B5),
86         INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
87         /* required last entry */
88         {0,}
89 };
90
91 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
92
93 int e1000_up(struct e1000_adapter *adapter);
94 void e1000_down(struct e1000_adapter *adapter);
95 void e1000_reinit_locked(struct e1000_adapter *adapter);
96 void e1000_reset(struct e1000_adapter *adapter);
97 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
98 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
99 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
100 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
101 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
102                              struct e1000_tx_ring *txdr);
103 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
104                              struct e1000_rx_ring *rxdr);
105 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
106                              struct e1000_tx_ring *tx_ring);
107 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
108                              struct e1000_rx_ring *rx_ring);
109 void e1000_update_stats(struct e1000_adapter *adapter);
110
111 static int e1000_init_module(void);
112 static void e1000_exit_module(void);
113 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
114 static void e1000_remove(struct pci_dev *pdev);
115 static int e1000_alloc_queues(struct e1000_adapter *adapter);
116 static int e1000_sw_init(struct e1000_adapter *adapter);
117 static int e1000_open(struct net_device *netdev);
118 static int e1000_close(struct net_device *netdev);
119 static void e1000_configure_tx(struct e1000_adapter *adapter);
120 static void e1000_configure_rx(struct e1000_adapter *adapter);
121 static void e1000_setup_rctl(struct e1000_adapter *adapter);
122 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
123 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
124 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
125                                 struct e1000_tx_ring *tx_ring);
126 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
127                                 struct e1000_rx_ring *rx_ring);
128 static void e1000_set_rx_mode(struct net_device *netdev);
129 static void e1000_update_phy_info_task(struct work_struct *work);
130 static void e1000_watchdog(struct work_struct *work);
131 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
132 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
133                                     struct net_device *netdev);
134 static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
135 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
136 static int e1000_set_mac(struct net_device *netdev, void *p);
137 static irqreturn_t e1000_intr(int irq, void *data);
138 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
139                                struct e1000_tx_ring *tx_ring);
140 static int e1000_clean(struct napi_struct *napi, int budget);
141 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
142                                struct e1000_rx_ring *rx_ring,
143                                int *work_done, int work_to_do);
144 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
145                                      struct e1000_rx_ring *rx_ring,
146                                      int *work_done, int work_to_do);
147 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
148                                    struct e1000_rx_ring *rx_ring,
149                                    int cleaned_count);
150 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
151                                          struct e1000_rx_ring *rx_ring,
152                                          int cleaned_count);
153 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
154 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
155                            int cmd);
156 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
157 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
158 static void e1000_tx_timeout(struct net_device *dev);
159 static void e1000_reset_task(struct work_struct *work);
160 static void e1000_smartspeed(struct e1000_adapter *adapter);
161 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
162                                        struct sk_buff *skb);
163
164 static bool e1000_vlan_used(struct e1000_adapter *adapter);
165 static void e1000_vlan_mode(struct net_device *netdev,
166                             netdev_features_t features);
167 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
168                                      bool filter_on);
169 static int e1000_vlan_rx_add_vid(struct net_device *netdev,
170                                  __be16 proto, u16 vid);
171 static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
172                                   __be16 proto, u16 vid);
173 static void e1000_restore_vlan(struct e1000_adapter *adapter);
174
175 #ifdef CONFIG_PM
176 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
177 static int e1000_resume(struct pci_dev *pdev);
178 #endif
179 static void e1000_shutdown(struct pci_dev *pdev);
180
181 #ifdef CONFIG_NET_POLL_CONTROLLER
182 /* for netdump / net console */
183 static void e1000_netpoll (struct net_device *netdev);
184 #endif
185
186 #define COPYBREAK_DEFAULT 256
187 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
188 module_param(copybreak, uint, 0644);
189 MODULE_PARM_DESC(copybreak,
190         "Maximum size of packet that is copied to a new buffer on receive");
191
192 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
193                      pci_channel_state_t state);
194 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
195 static void e1000_io_resume(struct pci_dev *pdev);
196
197 static const struct pci_error_handlers e1000_err_handler = {
198         .error_detected = e1000_io_error_detected,
199         .slot_reset = e1000_io_slot_reset,
200         .resume = e1000_io_resume,
201 };
202
203 static struct pci_driver e1000_driver = {
204         .name     = e1000_driver_name,
205         .id_table = e1000_pci_tbl,
206         .probe    = e1000_probe,
207         .remove   = e1000_remove,
208 #ifdef CONFIG_PM
209         /* Power Management Hooks */
210         .suspend  = e1000_suspend,
211         .resume   = e1000_resume,
212 #endif
213         .shutdown = e1000_shutdown,
214         .err_handler = &e1000_err_handler
215 };
216
217 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
218 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
219 MODULE_LICENSE("GPL");
220 MODULE_VERSION(DRV_VERSION);
221
222 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
223 static int debug = -1;
224 module_param(debug, int, 0);
225 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
226
227 /**
228  * e1000_get_hw_dev - return device
229  * used by hardware layer to print debugging information
230  *
231  **/
232 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
233 {
234         struct e1000_adapter *adapter = hw->back;
235         return adapter->netdev;
236 }
237
238 /**
239  * e1000_init_module - Driver Registration Routine
240  *
241  * e1000_init_module is the first routine called when the driver is
242  * loaded. All it does is register with the PCI subsystem.
243  **/
244 static int __init e1000_init_module(void)
245 {
246         int ret;
247         pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
248
249         pr_info("%s\n", e1000_copyright);
250
251         ret = pci_register_driver(&e1000_driver);
252         if (copybreak != COPYBREAK_DEFAULT) {
253                 if (copybreak == 0)
254                         pr_info("copybreak disabled\n");
255                 else
256                         pr_info("copybreak enabled for "
257                                    "packets <= %u bytes\n", copybreak);
258         }
259         return ret;
260 }
261
262 module_init(e1000_init_module);
263
264 /**
265  * e1000_exit_module - Driver Exit Cleanup Routine
266  *
267  * e1000_exit_module is called just before the driver is removed
268  * from memory.
269  **/
270 static void __exit e1000_exit_module(void)
271 {
272         pci_unregister_driver(&e1000_driver);
273 }
274
275 module_exit(e1000_exit_module);
276
277 static int e1000_request_irq(struct e1000_adapter *adapter)
278 {
279         struct net_device *netdev = adapter->netdev;
280         irq_handler_t handler = e1000_intr;
281         int irq_flags = IRQF_SHARED;
282         int err;
283
284         err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
285                           netdev);
286         if (err) {
287                 e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
288         }
289
290         return err;
291 }
292
293 static void e1000_free_irq(struct e1000_adapter *adapter)
294 {
295         struct net_device *netdev = adapter->netdev;
296
297         free_irq(adapter->pdev->irq, netdev);
298 }
299
300 /**
301  * e1000_irq_disable - Mask off interrupt generation on the NIC
302  * @adapter: board private structure
303  **/
304 static void e1000_irq_disable(struct e1000_adapter *adapter)
305 {
306         struct e1000_hw *hw = &adapter->hw;
307
308         ew32(IMC, ~0);
309         E1000_WRITE_FLUSH();
310         synchronize_irq(adapter->pdev->irq);
311 }
312
313 /**
314  * e1000_irq_enable - Enable default interrupt generation settings
315  * @adapter: board private structure
316  **/
317 static void e1000_irq_enable(struct e1000_adapter *adapter)
318 {
319         struct e1000_hw *hw = &adapter->hw;
320
321         ew32(IMS, IMS_ENABLE_MASK);
322         E1000_WRITE_FLUSH();
323 }
324
325 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
326 {
327         struct e1000_hw *hw = &adapter->hw;
328         struct net_device *netdev = adapter->netdev;
329         u16 vid = hw->mng_cookie.vlan_id;
330         u16 old_vid = adapter->mng_vlan_id;
331
332         if (!e1000_vlan_used(adapter))
333                 return;
334
335         if (!test_bit(vid, adapter->active_vlans)) {
336                 if (hw->mng_cookie.status &
337                     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
338                         e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
339                         adapter->mng_vlan_id = vid;
340                 } else {
341                         adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
342                 }
343                 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
344                     (vid != old_vid) &&
345                     !test_bit(old_vid, adapter->active_vlans))
346                         e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
347                                                old_vid);
348         } else {
349                 adapter->mng_vlan_id = vid;
350         }
351 }
352
353 static void e1000_init_manageability(struct e1000_adapter *adapter)
354 {
355         struct e1000_hw *hw = &adapter->hw;
356
357         if (adapter->en_mng_pt) {
358                 u32 manc = er32(MANC);
359
360                 /* disable hardware interception of ARP */
361                 manc &= ~(E1000_MANC_ARP_EN);
362
363                 ew32(MANC, manc);
364         }
365 }
366
367 static void e1000_release_manageability(struct e1000_adapter *adapter)
368 {
369         struct e1000_hw *hw = &adapter->hw;
370
371         if (adapter->en_mng_pt) {
372                 u32 manc = er32(MANC);
373
374                 /* re-enable hardware interception of ARP */
375                 manc |= E1000_MANC_ARP_EN;
376
377                 ew32(MANC, manc);
378         }
379 }
380
381 /**
382  * e1000_configure - configure the hardware for RX and TX
383  * @adapter = private board structure
384  **/
385 static void e1000_configure(struct e1000_adapter *adapter)
386 {
387         struct net_device *netdev = adapter->netdev;
388         int i;
389
390         e1000_set_rx_mode(netdev);
391
392         e1000_restore_vlan(adapter);
393         e1000_init_manageability(adapter);
394
395         e1000_configure_tx(adapter);
396         e1000_setup_rctl(adapter);
397         e1000_configure_rx(adapter);
398         /* call E1000_DESC_UNUSED which always leaves
399          * at least 1 descriptor unused to make sure
400          * next_to_use != next_to_clean
401          */
402         for (i = 0; i < adapter->num_rx_queues; i++) {
403                 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
404                 adapter->alloc_rx_buf(adapter, ring,
405                                       E1000_DESC_UNUSED(ring));
406         }
407 }
408
409 int e1000_up(struct e1000_adapter *adapter)
410 {
411         struct e1000_hw *hw = &adapter->hw;
412
413         /* hardware has been reset, we need to reload some things */
414         e1000_configure(adapter);
415
416         clear_bit(__E1000_DOWN, &adapter->flags);
417
418         napi_enable(&adapter->napi);
419
420         e1000_irq_enable(adapter);
421
422         netif_wake_queue(adapter->netdev);
423
424         /* fire a link change interrupt to start the watchdog */
425         ew32(ICS, E1000_ICS_LSC);
426         return 0;
427 }
428
429 /**
430  * e1000_power_up_phy - restore link in case the phy was powered down
431  * @adapter: address of board private structure
432  *
433  * The phy may be powered down to save power and turn off link when the
434  * driver is unloaded and wake on lan is not enabled (among others)
435  * *** this routine MUST be followed by a call to e1000_reset ***
436  **/
437 void e1000_power_up_phy(struct e1000_adapter *adapter)
438 {
439         struct e1000_hw *hw = &adapter->hw;
440         u16 mii_reg = 0;
441
442         /* Just clear the power down bit to wake the phy back up */
443         if (hw->media_type == e1000_media_type_copper) {
444                 /* according to the manual, the phy will retain its
445                  * settings across a power-down/up cycle
446                  */
447                 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
448                 mii_reg &= ~MII_CR_POWER_DOWN;
449                 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
450         }
451 }
452
453 static void e1000_power_down_phy(struct e1000_adapter *adapter)
454 {
455         struct e1000_hw *hw = &adapter->hw;
456
457         /* Power down the PHY so no link is implied when interface is down *
458          * The PHY cannot be powered down if any of the following is true *
459          * (a) WoL is enabled
460          * (b) AMT is active
461          * (c) SoL/IDER session is active
462          */
463         if (!adapter->wol && hw->mac_type >= e1000_82540 &&
464            hw->media_type == e1000_media_type_copper) {
465                 u16 mii_reg = 0;
466
467                 switch (hw->mac_type) {
468                 case e1000_82540:
469                 case e1000_82545:
470                 case e1000_82545_rev_3:
471                 case e1000_82546:
472                 case e1000_ce4100:
473                 case e1000_82546_rev_3:
474                 case e1000_82541:
475                 case e1000_82541_rev_2:
476                 case e1000_82547:
477                 case e1000_82547_rev_2:
478                         if (er32(MANC) & E1000_MANC_SMBUS_EN)
479                                 goto out;
480                         break;
481                 default:
482                         goto out;
483                 }
484                 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
485                 mii_reg |= MII_CR_POWER_DOWN;
486                 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
487                 msleep(1);
488         }
489 out:
490         return;
491 }
492
493 static void e1000_down_and_stop(struct e1000_adapter *adapter)
494 {
495         set_bit(__E1000_DOWN, &adapter->flags);
496
497         /* Only kill reset task if adapter is not resetting */
498         if (!test_bit(__E1000_RESETTING, &adapter->flags))
499                 cancel_work_sync(&adapter->reset_task);
500
501         cancel_delayed_work_sync(&adapter->watchdog_task);
502         cancel_delayed_work_sync(&adapter->phy_info_task);
503         cancel_delayed_work_sync(&adapter->fifo_stall_task);
504 }
505
506 void e1000_down(struct e1000_adapter *adapter)
507 {
508         struct e1000_hw *hw = &adapter->hw;
509         struct net_device *netdev = adapter->netdev;
510         u32 rctl, tctl;
511
512
513         /* disable receives in the hardware */
514         rctl = er32(RCTL);
515         ew32(RCTL, rctl & ~E1000_RCTL_EN);
516         /* flush and sleep below */
517
518         netif_tx_disable(netdev);
519
520         /* disable transmits in the hardware */
521         tctl = er32(TCTL);
522         tctl &= ~E1000_TCTL_EN;
523         ew32(TCTL, tctl);
524         /* flush both disables and wait for them to finish */
525         E1000_WRITE_FLUSH();
526         msleep(10);
527
528         napi_disable(&adapter->napi);
529
530         e1000_irq_disable(adapter);
531
532         /* Setting DOWN must be after irq_disable to prevent
533          * a screaming interrupt.  Setting DOWN also prevents
534          * tasks from rescheduling.
535          */
536         e1000_down_and_stop(adapter);
537
538         adapter->link_speed = 0;
539         adapter->link_duplex = 0;
540         netif_carrier_off(netdev);
541
542         e1000_reset(adapter);
543         e1000_clean_all_tx_rings(adapter);
544         e1000_clean_all_rx_rings(adapter);
545 }
546
547 void e1000_reinit_locked(struct e1000_adapter *adapter)
548 {
549         WARN_ON(in_interrupt());
550         while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
551                 msleep(1);
552         e1000_down(adapter);
553         e1000_up(adapter);
554         clear_bit(__E1000_RESETTING, &adapter->flags);
555 }
556
557 void e1000_reset(struct e1000_adapter *adapter)
558 {
559         struct e1000_hw *hw = &adapter->hw;
560         u32 pba = 0, tx_space, min_tx_space, min_rx_space;
561         bool legacy_pba_adjust = false;
562         u16 hwm;
563
564         /* Repartition Pba for greater than 9k mtu
565          * To take effect CTRL.RST is required.
566          */
567
568         switch (hw->mac_type) {
569         case e1000_82542_rev2_0:
570         case e1000_82542_rev2_1:
571         case e1000_82543:
572         case e1000_82544:
573         case e1000_82540:
574         case e1000_82541:
575         case e1000_82541_rev_2:
576                 legacy_pba_adjust = true;
577                 pba = E1000_PBA_48K;
578                 break;
579         case e1000_82545:
580         case e1000_82545_rev_3:
581         case e1000_82546:
582         case e1000_ce4100:
583         case e1000_82546_rev_3:
584                 pba = E1000_PBA_48K;
585                 break;
586         case e1000_82547:
587         case e1000_82547_rev_2:
588                 legacy_pba_adjust = true;
589                 pba = E1000_PBA_30K;
590                 break;
591         case e1000_undefined:
592         case e1000_num_macs:
593                 break;
594         }
595
596         if (legacy_pba_adjust) {
597                 if (hw->max_frame_size > E1000_RXBUFFER_8192)
598                         pba -= 8; /* allocate more FIFO for Tx */
599
600                 if (hw->mac_type == e1000_82547) {
601                         adapter->tx_fifo_head = 0;
602                         adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
603                         adapter->tx_fifo_size =
604                                 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
605                         atomic_set(&adapter->tx_fifo_stall, 0);
606                 }
607         } else if (hw->max_frame_size >  ETH_FRAME_LEN + ETH_FCS_LEN) {
608                 /* adjust PBA for jumbo frames */
609                 ew32(PBA, pba);
610
611                 /* To maintain wire speed transmits, the Tx FIFO should be
612                  * large enough to accommodate two full transmit packets,
613                  * rounded up to the next 1KB and expressed in KB.  Likewise,
614                  * the Rx FIFO should be large enough to accommodate at least
615                  * one full receive packet and is similarly rounded up and
616                  * expressed in KB.
617                  */
618                 pba = er32(PBA);
619                 /* upper 16 bits has Tx packet buffer allocation size in KB */
620                 tx_space = pba >> 16;
621                 /* lower 16 bits has Rx packet buffer allocation size in KB */
622                 pba &= 0xffff;
623                 /* the Tx fifo also stores 16 bytes of information about the Tx
624                  * but don't include ethernet FCS because hardware appends it
625                  */
626                 min_tx_space = (hw->max_frame_size +
627                                 sizeof(struct e1000_tx_desc) -
628                                 ETH_FCS_LEN) * 2;
629                 min_tx_space = ALIGN(min_tx_space, 1024);
630                 min_tx_space >>= 10;
631                 /* software strips receive CRC, so leave room for it */
632                 min_rx_space = hw->max_frame_size;
633                 min_rx_space = ALIGN(min_rx_space, 1024);
634                 min_rx_space >>= 10;
635
636                 /* If current Tx allocation is less than the min Tx FIFO size,
637                  * and the min Tx FIFO size is less than the current Rx FIFO
638                  * allocation, take space away from current Rx allocation
639                  */
640                 if (tx_space < min_tx_space &&
641                     ((min_tx_space - tx_space) < pba)) {
642                         pba = pba - (min_tx_space - tx_space);
643
644                         /* PCI/PCIx hardware has PBA alignment constraints */
645                         switch (hw->mac_type) {
646                         case e1000_82545 ... e1000_82546_rev_3:
647                                 pba &= ~(E1000_PBA_8K - 1);
648                                 break;
649                         default:
650                                 break;
651                         }
652
653                         /* if short on Rx space, Rx wins and must trump Tx
654                          * adjustment or use Early Receive if available
655                          */
656                         if (pba < min_rx_space)
657                                 pba = min_rx_space;
658                 }
659         }
660
661         ew32(PBA, pba);
662
663         /* flow control settings:
664          * The high water mark must be low enough to fit one full frame
665          * (or the size used for early receive) above it in the Rx FIFO.
666          * Set it to the lower of:
667          * - 90% of the Rx FIFO size, and
668          * - the full Rx FIFO size minus the early receive size (for parts
669          *   with ERT support assuming ERT set to E1000_ERT_2048), or
670          * - the full Rx FIFO size minus one full frame
671          */
672         hwm = min(((pba << 10) * 9 / 10),
673                   ((pba << 10) - hw->max_frame_size));
674
675         hw->fc_high_water = hwm & 0xFFF8;       /* 8-byte granularity */
676         hw->fc_low_water = hw->fc_high_water - 8;
677         hw->fc_pause_time = E1000_FC_PAUSE_TIME;
678         hw->fc_send_xon = 1;
679         hw->fc = hw->original_fc;
680
681         /* Allow time for pending master requests to run */
682         e1000_reset_hw(hw);
683         if (hw->mac_type >= e1000_82544)
684                 ew32(WUC, 0);
685
686         if (e1000_init_hw(hw))
687                 e_dev_err("Hardware Error\n");
688         e1000_update_mng_vlan(adapter);
689
690         /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
691         if (hw->mac_type >= e1000_82544 &&
692             hw->autoneg == 1 &&
693             hw->autoneg_advertised == ADVERTISE_1000_FULL) {
694                 u32 ctrl = er32(CTRL);
695                 /* clear phy power management bit if we are in gig only mode,
696                  * which if enabled will attempt negotiation to 100Mb, which
697                  * can cause a loss of link at power off or driver unload
698                  */
699                 ctrl &= ~E1000_CTRL_SWDPIN3;
700                 ew32(CTRL, ctrl);
701         }
702
703         /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
704         ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
705
706         e1000_reset_adaptive(hw);
707         e1000_phy_get_info(hw, &adapter->phy_info);
708
709         e1000_release_manageability(adapter);
710 }
711
712 /* Dump the eeprom for users having checksum issues */
713 static void e1000_dump_eeprom(struct e1000_adapter *adapter)
714 {
715         struct net_device *netdev = adapter->netdev;
716         struct ethtool_eeprom eeprom;
717         const struct ethtool_ops *ops = netdev->ethtool_ops;
718         u8 *data;
719         int i;
720         u16 csum_old, csum_new = 0;
721
722         eeprom.len = ops->get_eeprom_len(netdev);
723         eeprom.offset = 0;
724
725         data = kmalloc(eeprom.len, GFP_KERNEL);
726         if (!data)
727                 return;
728
729         ops->get_eeprom(netdev, &eeprom, data);
730
731         csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
732                    (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
733         for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
734                 csum_new += data[i] + (data[i + 1] << 8);
735         csum_new = EEPROM_SUM - csum_new;
736
737         pr_err("/*********************/\n");
738         pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
739         pr_err("Calculated              : 0x%04x\n", csum_new);
740
741         pr_err("Offset    Values\n");
742         pr_err("========  ======\n");
743         print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
744
745         pr_err("Include this output when contacting your support provider.\n");
746         pr_err("This is not a software error! Something bad happened to\n");
747         pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
748         pr_err("result in further problems, possibly loss of data,\n");
749         pr_err("corruption or system hangs!\n");
750         pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
751         pr_err("which is invalid and requires you to set the proper MAC\n");
752         pr_err("address manually before continuing to enable this network\n");
753         pr_err("device. Please inspect the EEPROM dump and report the\n");
754         pr_err("issue to your hardware vendor or Intel Customer Support.\n");
755         pr_err("/*********************/\n");
756
757         kfree(data);
758 }
759
760 /**
761  * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
762  * @pdev: PCI device information struct
763  *
764  * Return true if an adapter needs ioport resources
765  **/
766 static int e1000_is_need_ioport(struct pci_dev *pdev)
767 {
768         switch (pdev->device) {
769         case E1000_DEV_ID_82540EM:
770         case E1000_DEV_ID_82540EM_LOM:
771         case E1000_DEV_ID_82540EP:
772         case E1000_DEV_ID_82540EP_LOM:
773         case E1000_DEV_ID_82540EP_LP:
774         case E1000_DEV_ID_82541EI:
775         case E1000_DEV_ID_82541EI_MOBILE:
776         case E1000_DEV_ID_82541ER:
777         case E1000_DEV_ID_82541ER_LOM:
778         case E1000_DEV_ID_82541GI:
779         case E1000_DEV_ID_82541GI_LF:
780         case E1000_DEV_ID_82541GI_MOBILE:
781         case E1000_DEV_ID_82544EI_COPPER:
782         case E1000_DEV_ID_82544EI_FIBER:
783         case E1000_DEV_ID_82544GC_COPPER:
784         case E1000_DEV_ID_82544GC_LOM:
785         case E1000_DEV_ID_82545EM_COPPER:
786         case E1000_DEV_ID_82545EM_FIBER:
787         case E1000_DEV_ID_82546EB_COPPER:
788         case E1000_DEV_ID_82546EB_FIBER:
789         case E1000_DEV_ID_82546EB_QUAD_COPPER:
790                 return true;
791         default:
792                 return false;
793         }
794 }
795
796 static netdev_features_t e1000_fix_features(struct net_device *netdev,
797         netdev_features_t features)
798 {
799         /* Since there is no support for separate Rx/Tx vlan accel
800          * enable/disable make sure Tx flag is always in same state as Rx.
801          */
802         if (features & NETIF_F_HW_VLAN_CTAG_RX)
803                 features |= NETIF_F_HW_VLAN_CTAG_TX;
804         else
805                 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
806
807         return features;
808 }
809
810 static int e1000_set_features(struct net_device *netdev,
811         netdev_features_t features)
812 {
813         struct e1000_adapter *adapter = netdev_priv(netdev);
814         netdev_features_t changed = features ^ netdev->features;
815
816         if (changed & NETIF_F_HW_VLAN_CTAG_RX)
817                 e1000_vlan_mode(netdev, features);
818
819         if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
820                 return 0;
821
822         netdev->features = features;
823         adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
824
825         if (netif_running(netdev))
826                 e1000_reinit_locked(adapter);
827         else
828                 e1000_reset(adapter);
829
830         return 0;
831 }
832
833 static const struct net_device_ops e1000_netdev_ops = {
834         .ndo_open               = e1000_open,
835         .ndo_stop               = e1000_close,
836         .ndo_start_xmit         = e1000_xmit_frame,
837         .ndo_get_stats          = e1000_get_stats,
838         .ndo_set_rx_mode        = e1000_set_rx_mode,
839         .ndo_set_mac_address    = e1000_set_mac,
840         .ndo_tx_timeout         = e1000_tx_timeout,
841         .ndo_change_mtu         = e1000_change_mtu,
842         .ndo_do_ioctl           = e1000_ioctl,
843         .ndo_validate_addr      = eth_validate_addr,
844         .ndo_vlan_rx_add_vid    = e1000_vlan_rx_add_vid,
845         .ndo_vlan_rx_kill_vid   = e1000_vlan_rx_kill_vid,
846 #ifdef CONFIG_NET_POLL_CONTROLLER
847         .ndo_poll_controller    = e1000_netpoll,
848 #endif
849         .ndo_fix_features       = e1000_fix_features,
850         .ndo_set_features       = e1000_set_features,
851 };
852
853 /**
854  * e1000_init_hw_struct - initialize members of hw struct
855  * @adapter: board private struct
856  * @hw: structure used by e1000_hw.c
857  *
858  * Factors out initialization of the e1000_hw struct to its own function
859  * that can be called very early at init (just after struct allocation).
860  * Fields are initialized based on PCI device information and
861  * OS network device settings (MTU size).
862  * Returns negative error codes if MAC type setup fails.
863  */
864 static int e1000_init_hw_struct(struct e1000_adapter *adapter,
865                                 struct e1000_hw *hw)
866 {
867         struct pci_dev *pdev = adapter->pdev;
868
869         /* PCI config space info */
870         hw->vendor_id = pdev->vendor;
871         hw->device_id = pdev->device;
872         hw->subsystem_vendor_id = pdev->subsystem_vendor;
873         hw->subsystem_id = pdev->subsystem_device;
874         hw->revision_id = pdev->revision;
875
876         pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
877
878         hw->max_frame_size = adapter->netdev->mtu +
879                              ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
880         hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
881
882         /* identify the MAC */
883         if (e1000_set_mac_type(hw)) {
884                 e_err(probe, "Unknown MAC Type\n");
885                 return -EIO;
886         }
887
888         switch (hw->mac_type) {
889         default:
890                 break;
891         case e1000_82541:
892         case e1000_82547:
893         case e1000_82541_rev_2:
894         case e1000_82547_rev_2:
895                 hw->phy_init_script = 1;
896                 break;
897         }
898
899         e1000_set_media_type(hw);
900         e1000_get_bus_info(hw);
901
902         hw->wait_autoneg_complete = false;
903         hw->tbi_compatibility_en = true;
904         hw->adaptive_ifs = true;
905
906         /* Copper options */
907
908         if (hw->media_type == e1000_media_type_copper) {
909                 hw->mdix = AUTO_ALL_MODES;
910                 hw->disable_polarity_correction = false;
911                 hw->master_slave = E1000_MASTER_SLAVE;
912         }
913
914         return 0;
915 }
916
917 /**
918  * e1000_probe - Device Initialization Routine
919  * @pdev: PCI device information struct
920  * @ent: entry in e1000_pci_tbl
921  *
922  * Returns 0 on success, negative on failure
923  *
924  * e1000_probe initializes an adapter identified by a pci_dev structure.
925  * The OS initialization, configuring of the adapter private structure,
926  * and a hardware reset occur.
927  **/
928 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
929 {
930         struct net_device *netdev;
931         struct e1000_adapter *adapter;
932         struct e1000_hw *hw;
933
934         static int cards_found = 0;
935         static int global_quad_port_a = 0; /* global ksp3 port a indication */
936         int i, err, pci_using_dac;
937         u16 eeprom_data = 0;
938         u16 tmp = 0;
939         u16 eeprom_apme_mask = E1000_EEPROM_APME;
940         int bars, need_ioport;
941
942         /* do not allocate ioport bars when not needed */
943         need_ioport = e1000_is_need_ioport(pdev);
944         if (need_ioport) {
945                 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
946                 err = pci_enable_device(pdev);
947         } else {
948                 bars = pci_select_bars(pdev, IORESOURCE_MEM);
949                 err = pci_enable_device_mem(pdev);
950         }
951         if (err)
952                 return err;
953
954         err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
955         if (err)
956                 goto err_pci_reg;
957
958         pci_set_master(pdev);
959         err = pci_save_state(pdev);
960         if (err)
961                 goto err_alloc_etherdev;
962
963         err = -ENOMEM;
964         netdev = alloc_etherdev(sizeof(struct e1000_adapter));
965         if (!netdev)
966                 goto err_alloc_etherdev;
967
968         SET_NETDEV_DEV(netdev, &pdev->dev);
969
970         pci_set_drvdata(pdev, netdev);
971         adapter = netdev_priv(netdev);
972         adapter->netdev = netdev;
973         adapter->pdev = pdev;
974         adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
975         adapter->bars = bars;
976         adapter->need_ioport = need_ioport;
977
978         hw = &adapter->hw;
979         hw->back = adapter;
980
981         err = -EIO;
982         hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
983         if (!hw->hw_addr)
984                 goto err_ioremap;
985
986         if (adapter->need_ioport) {
987                 for (i = BAR_1; i <= BAR_5; i++) {
988                         if (pci_resource_len(pdev, i) == 0)
989                                 continue;
990                         if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
991                                 hw->io_base = pci_resource_start(pdev, i);
992                                 break;
993                         }
994                 }
995         }
996
997         /* make ready for any if (hw->...) below */
998         err = e1000_init_hw_struct(adapter, hw);
999         if (err)
1000                 goto err_sw_init;
1001
1002         /* there is a workaround being applied below that limits
1003          * 64-bit DMA addresses to 64-bit hardware.  There are some
1004          * 32-bit adapters that Tx hang when given 64-bit DMA addresses
1005          */
1006         pci_using_dac = 0;
1007         if ((hw->bus_type == e1000_bus_type_pcix) &&
1008             !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1009                 pci_using_dac = 1;
1010         } else {
1011                 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1012                 if (err) {
1013                         pr_err("No usable DMA config, aborting\n");
1014                         goto err_dma;
1015                 }
1016         }
1017
1018         netdev->netdev_ops = &e1000_netdev_ops;
1019         e1000_set_ethtool_ops(netdev);
1020         netdev->watchdog_timeo = 5 * HZ;
1021         netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
1022
1023         strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1024
1025         adapter->bd_number = cards_found;
1026
1027         /* setup the private structure */
1028
1029         err = e1000_sw_init(adapter);
1030         if (err)
1031                 goto err_sw_init;
1032
1033         err = -EIO;
1034         if (hw->mac_type == e1000_ce4100) {
1035                 hw->ce4100_gbe_mdio_base_virt =
1036                                         ioremap(pci_resource_start(pdev, BAR_1),
1037                                                 pci_resource_len(pdev, BAR_1));
1038
1039                 if (!hw->ce4100_gbe_mdio_base_virt)
1040                         goto err_mdio_ioremap;
1041         }
1042
1043         if (hw->mac_type >= e1000_82543) {
1044                 netdev->hw_features = NETIF_F_SG |
1045                                    NETIF_F_HW_CSUM |
1046                                    NETIF_F_HW_VLAN_CTAG_RX;
1047                 netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
1048                                    NETIF_F_HW_VLAN_CTAG_FILTER;
1049         }
1050
1051         if ((hw->mac_type >= e1000_82544) &&
1052            (hw->mac_type != e1000_82547))
1053                 netdev->hw_features |= NETIF_F_TSO;
1054
1055         netdev->priv_flags |= IFF_SUPP_NOFCS;
1056
1057         netdev->features |= netdev->hw_features;
1058         netdev->hw_features |= (NETIF_F_RXCSUM |
1059                                 NETIF_F_RXALL |
1060                                 NETIF_F_RXFCS);
1061
1062         if (pci_using_dac) {
1063                 netdev->features |= NETIF_F_HIGHDMA;
1064                 netdev->vlan_features |= NETIF_F_HIGHDMA;
1065         }
1066
1067         netdev->vlan_features |= (NETIF_F_TSO |
1068                                   NETIF_F_HW_CSUM |
1069                                   NETIF_F_SG);
1070
1071         netdev->priv_flags |= IFF_UNICAST_FLT;
1072
1073         adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1074
1075         /* initialize eeprom parameters */
1076         if (e1000_init_eeprom_params(hw)) {
1077                 e_err(probe, "EEPROM initialization failed\n");
1078                 goto err_eeprom;
1079         }
1080
1081         /* before reading the EEPROM, reset the controller to
1082          * put the device in a known good starting state
1083          */
1084
1085         e1000_reset_hw(hw);
1086
1087         /* make sure the EEPROM is good */
1088         if (e1000_validate_eeprom_checksum(hw) < 0) {
1089                 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1090                 e1000_dump_eeprom(adapter);
1091                 /* set MAC address to all zeroes to invalidate and temporary
1092                  * disable this device for the user. This blocks regular
1093                  * traffic while still permitting ethtool ioctls from reaching
1094                  * the hardware as well as allowing the user to run the
1095                  * interface after manually setting a hw addr using
1096                  * `ip set address`
1097                  */
1098                 memset(hw->mac_addr, 0, netdev->addr_len);
1099         } else {
1100                 /* copy the MAC address out of the EEPROM */
1101                 if (e1000_read_mac_addr(hw))
1102                         e_err(probe, "EEPROM Read Error\n");
1103         }
1104         /* don't block initalization here due to bad MAC address */
1105         memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1106
1107         if (!is_valid_ether_addr(netdev->dev_addr))
1108                 e_err(probe, "Invalid MAC Address\n");
1109
1110
1111         INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1112         INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1113                           e1000_82547_tx_fifo_stall_task);
1114         INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1115         INIT_WORK(&adapter->reset_task, e1000_reset_task);
1116
1117         e1000_check_options(adapter);
1118
1119         /* Initial Wake on LAN setting
1120          * If APM wake is enabled in the EEPROM,
1121          * enable the ACPI Magic Packet filter
1122          */
1123
1124         switch (hw->mac_type) {
1125         case e1000_82542_rev2_0:
1126         case e1000_82542_rev2_1:
1127         case e1000_82543:
1128                 break;
1129         case e1000_82544:
1130                 e1000_read_eeprom(hw,
1131                         EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1132                 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1133                 break;
1134         case e1000_82546:
1135         case e1000_82546_rev_3:
1136                 if (er32(STATUS) & E1000_STATUS_FUNC_1){
1137                         e1000_read_eeprom(hw,
1138                                 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1139                         break;
1140                 }
1141                 /* Fall Through */
1142         default:
1143                 e1000_read_eeprom(hw,
1144                         EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1145                 break;
1146         }
1147         if (eeprom_data & eeprom_apme_mask)
1148                 adapter->eeprom_wol |= E1000_WUFC_MAG;
1149
1150         /* now that we have the eeprom settings, apply the special cases
1151          * where the eeprom may be wrong or the board simply won't support
1152          * wake on lan on a particular port
1153          */
1154         switch (pdev->device) {
1155         case E1000_DEV_ID_82546GB_PCIE:
1156                 adapter->eeprom_wol = 0;
1157                 break;
1158         case E1000_DEV_ID_82546EB_FIBER:
1159         case E1000_DEV_ID_82546GB_FIBER:
1160                 /* Wake events only supported on port A for dual fiber
1161                  * regardless of eeprom setting
1162                  */
1163                 if (er32(STATUS) & E1000_STATUS_FUNC_1)
1164                         adapter->eeprom_wol = 0;
1165                 break;
1166         case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1167                 /* if quad port adapter, disable WoL on all but port A */
1168                 if (global_quad_port_a != 0)
1169                         adapter->eeprom_wol = 0;
1170                 else
1171                         adapter->quad_port_a = true;
1172                 /* Reset for multiple quad port adapters */
1173                 if (++global_quad_port_a == 4)
1174                         global_quad_port_a = 0;
1175                 break;
1176         }
1177
1178         /* initialize the wol settings based on the eeprom settings */
1179         adapter->wol = adapter->eeprom_wol;
1180         device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1181
1182         /* Auto detect PHY address */
1183         if (hw->mac_type == e1000_ce4100) {
1184                 for (i = 0; i < 32; i++) {
1185                         hw->phy_addr = i;
1186                         e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1187                         if (tmp == 0 || tmp == 0xFF) {
1188                                 if (i == 31)
1189                                         goto err_eeprom;
1190                                 continue;
1191                         } else
1192                                 break;
1193                 }
1194         }
1195
1196         /* reset the hardware with the new settings */
1197         e1000_reset(adapter);
1198
1199         strcpy(netdev->name, "eth%d");
1200         err = register_netdev(netdev);
1201         if (err)
1202                 goto err_register;
1203
1204         e1000_vlan_filter_on_off(adapter, false);
1205
1206         /* print bus type/speed/width info */
1207         e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1208                ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1209                ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1210                 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1211                 (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1212                 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1213                ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1214                netdev->dev_addr);
1215
1216         /* carrier off reporting is important to ethtool even BEFORE open */
1217         netif_carrier_off(netdev);
1218
1219         e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1220
1221         cards_found++;
1222         return 0;
1223
1224 err_register:
1225 err_eeprom:
1226         e1000_phy_hw_reset(hw);
1227
1228         if (hw->flash_address)
1229                 iounmap(hw->flash_address);
1230         kfree(adapter->tx_ring);
1231         kfree(adapter->rx_ring);
1232 err_dma:
1233 err_sw_init:
1234 err_mdio_ioremap:
1235         iounmap(hw->ce4100_gbe_mdio_base_virt);
1236         iounmap(hw->hw_addr);
1237 err_ioremap:
1238         free_netdev(netdev);
1239 err_alloc_etherdev:
1240         pci_release_selected_regions(pdev, bars);
1241 err_pci_reg:
1242         pci_disable_device(pdev);
1243         return err;
1244 }
1245
1246 /**
1247  * e1000_remove - Device Removal Routine
1248  * @pdev: PCI device information struct
1249  *
1250  * e1000_remove is called by the PCI subsystem to alert the driver
1251  * that it should release a PCI device.  The could be caused by a
1252  * Hot-Plug event, or because the driver is going to be removed from
1253  * memory.
1254  **/
1255 static void e1000_remove(struct pci_dev *pdev)
1256 {
1257         struct net_device *netdev = pci_get_drvdata(pdev);
1258         struct e1000_adapter *adapter = netdev_priv(netdev);
1259         struct e1000_hw *hw = &adapter->hw;
1260
1261         e1000_down_and_stop(adapter);
1262         e1000_release_manageability(adapter);
1263
1264         unregister_netdev(netdev);
1265
1266         e1000_phy_hw_reset(hw);
1267
1268         kfree(adapter->tx_ring);
1269         kfree(adapter->rx_ring);
1270
1271         if (hw->mac_type == e1000_ce4100)
1272                 iounmap(hw->ce4100_gbe_mdio_base_virt);
1273         iounmap(hw->hw_addr);
1274         if (hw->flash_address)
1275                 iounmap(hw->flash_address);
1276         pci_release_selected_regions(pdev, adapter->bars);
1277
1278         free_netdev(netdev);
1279
1280         pci_disable_device(pdev);
1281 }
1282
1283 /**
1284  * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1285  * @adapter: board private structure to initialize
1286  *
1287  * e1000_sw_init initializes the Adapter private data structure.
1288  * e1000_init_hw_struct MUST be called before this function
1289  **/
1290 static int e1000_sw_init(struct e1000_adapter *adapter)
1291 {
1292         adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1293
1294         adapter->num_tx_queues = 1;
1295         adapter->num_rx_queues = 1;
1296
1297         if (e1000_alloc_queues(adapter)) {
1298                 e_err(probe, "Unable to allocate memory for queues\n");
1299                 return -ENOMEM;
1300         }
1301
1302         /* Explicitly disable IRQ since the NIC can be in any state. */
1303         e1000_irq_disable(adapter);
1304
1305         spin_lock_init(&adapter->stats_lock);
1306
1307         set_bit(__E1000_DOWN, &adapter->flags);
1308
1309         return 0;
1310 }
1311
1312 /**
1313  * e1000_alloc_queues - Allocate memory for all rings
1314  * @adapter: board private structure to initialize
1315  *
1316  * We allocate one ring per queue at run-time since we don't know the
1317  * number of queues at compile-time.
1318  **/
1319 static int e1000_alloc_queues(struct e1000_adapter *adapter)
1320 {
1321         adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1322                                    sizeof(struct e1000_tx_ring), GFP_KERNEL);
1323         if (!adapter->tx_ring)
1324                 return -ENOMEM;
1325
1326         adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1327                                    sizeof(struct e1000_rx_ring), GFP_KERNEL);
1328         if (!adapter->rx_ring) {
1329                 kfree(adapter->tx_ring);
1330                 return -ENOMEM;
1331         }
1332
1333         return E1000_SUCCESS;
1334 }
1335
1336 /**
1337  * e1000_open - Called when a network interface is made active
1338  * @netdev: network interface device structure
1339  *
1340  * Returns 0 on success, negative value on failure
1341  *
1342  * The open entry point is called when a network interface is made
1343  * active by the system (IFF_UP).  At this point all resources needed
1344  * for transmit and receive operations are allocated, the interrupt
1345  * handler is registered with the OS, the watchdog task is started,
1346  * and the stack is notified that the interface is ready.
1347  **/
1348 static int e1000_open(struct net_device *netdev)
1349 {
1350         struct e1000_adapter *adapter = netdev_priv(netdev);
1351         struct e1000_hw *hw = &adapter->hw;
1352         int err;
1353
1354         /* disallow open during test */
1355         if (test_bit(__E1000_TESTING, &adapter->flags))
1356                 return -EBUSY;
1357
1358         netif_carrier_off(netdev);
1359
1360         /* allocate transmit descriptors */
1361         err = e1000_setup_all_tx_resources(adapter);
1362         if (err)
1363                 goto err_setup_tx;
1364
1365         /* allocate receive descriptors */
1366         err = e1000_setup_all_rx_resources(adapter);
1367         if (err)
1368                 goto err_setup_rx;
1369
1370         e1000_power_up_phy(adapter);
1371
1372         adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1373         if ((hw->mng_cookie.status &
1374                           E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1375                 e1000_update_mng_vlan(adapter);
1376         }
1377
1378         /* before we allocate an interrupt, we must be ready to handle it.
1379          * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1380          * as soon as we call pci_request_irq, so we have to setup our
1381          * clean_rx handler before we do so.
1382          */
1383         e1000_configure(adapter);
1384
1385         err = e1000_request_irq(adapter);
1386         if (err)
1387                 goto err_req_irq;
1388
1389         /* From here on the code is the same as e1000_up() */
1390         clear_bit(__E1000_DOWN, &adapter->flags);
1391
1392         napi_enable(&adapter->napi);
1393
1394         e1000_irq_enable(adapter);
1395
1396         netif_start_queue(netdev);
1397
1398         /* fire a link status change interrupt to start the watchdog */
1399         ew32(ICS, E1000_ICS_LSC);
1400
1401         return E1000_SUCCESS;
1402
1403 err_req_irq:
1404         e1000_power_down_phy(adapter);
1405         e1000_free_all_rx_resources(adapter);
1406 err_setup_rx:
1407         e1000_free_all_tx_resources(adapter);
1408 err_setup_tx:
1409         e1000_reset(adapter);
1410
1411         return err;
1412 }
1413
1414 /**
1415  * e1000_close - Disables a network interface
1416  * @netdev: network interface device structure
1417  *
1418  * Returns 0, this is not allowed to fail
1419  *
1420  * The close entry point is called when an interface is de-activated
1421  * by the OS.  The hardware is still under the drivers control, but
1422  * needs to be disabled.  A global MAC reset is issued to stop the
1423  * hardware, and all transmit and receive resources are freed.
1424  **/
1425 static int e1000_close(struct net_device *netdev)
1426 {
1427         struct e1000_adapter *adapter = netdev_priv(netdev);
1428         struct e1000_hw *hw = &adapter->hw;
1429         int count = E1000_CHECK_RESET_COUNT;
1430
1431         while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
1432                 usleep_range(10000, 20000);
1433
1434         WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1435         e1000_down(adapter);
1436         e1000_power_down_phy(adapter);
1437         e1000_free_irq(adapter);
1438
1439         e1000_free_all_tx_resources(adapter);
1440         e1000_free_all_rx_resources(adapter);
1441
1442         /* kill manageability vlan ID if supported, but not if a vlan with
1443          * the same ID is registered on the host OS (let 8021q kill it)
1444          */
1445         if ((hw->mng_cookie.status &
1446              E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1447             !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1448                 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
1449                                        adapter->mng_vlan_id);
1450         }
1451
1452         return 0;
1453 }
1454
1455 /**
1456  * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1457  * @adapter: address of board private structure
1458  * @start: address of beginning of memory
1459  * @len: length of memory
1460  **/
1461 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1462                                   unsigned long len)
1463 {
1464         struct e1000_hw *hw = &adapter->hw;
1465         unsigned long begin = (unsigned long)start;
1466         unsigned long end = begin + len;
1467
1468         /* First rev 82545 and 82546 need to not allow any memory
1469          * write location to cross 64k boundary due to errata 23
1470          */
1471         if (hw->mac_type == e1000_82545 ||
1472             hw->mac_type == e1000_ce4100 ||
1473             hw->mac_type == e1000_82546) {
1474                 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1475         }
1476
1477         return true;
1478 }
1479
1480 /**
1481  * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1482  * @adapter: board private structure
1483  * @txdr:    tx descriptor ring (for a specific queue) to setup
1484  *
1485  * Return 0 on success, negative on failure
1486  **/
1487 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1488                                     struct e1000_tx_ring *txdr)
1489 {
1490         struct pci_dev *pdev = adapter->pdev;
1491         int size;
1492
1493         size = sizeof(struct e1000_buffer) * txdr->count;
1494         txdr->buffer_info = vzalloc(size);
1495         if (!txdr->buffer_info)
1496                 return -ENOMEM;
1497
1498         /* round up to nearest 4K */
1499
1500         txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1501         txdr->size = ALIGN(txdr->size, 4096);
1502
1503         txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1504                                         GFP_KERNEL);
1505         if (!txdr->desc) {
1506 setup_tx_desc_die:
1507                 vfree(txdr->buffer_info);
1508                 return -ENOMEM;
1509         }
1510
1511         /* Fix for errata 23, can't cross 64kB boundary */
1512         if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1513                 void *olddesc = txdr->desc;
1514                 dma_addr_t olddma = txdr->dma;
1515                 e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1516                       txdr->size, txdr->desc);
1517                 /* Try again, without freeing the previous */
1518                 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1519                                                 &txdr->dma, GFP_KERNEL);
1520                 /* Failed allocation, critical failure */
1521                 if (!txdr->desc) {
1522                         dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1523                                           olddma);
1524                         goto setup_tx_desc_die;
1525                 }
1526
1527                 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1528                         /* give up */
1529                         dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1530                                           txdr->dma);
1531                         dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1532                                           olddma);
1533                         e_err(probe, "Unable to allocate aligned memory "
1534                               "for the transmit descriptor ring\n");
1535                         vfree(txdr->buffer_info);
1536                         return -ENOMEM;
1537                 } else {
1538                         /* Free old allocation, new allocation was successful */
1539                         dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1540                                           olddma);
1541                 }
1542         }
1543         memset(txdr->desc, 0, txdr->size);
1544
1545         txdr->next_to_use = 0;
1546         txdr->next_to_clean = 0;
1547
1548         return 0;
1549 }
1550
1551 /**
1552  * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1553  *                                (Descriptors) for all queues
1554  * @adapter: board private structure
1555  *
1556  * Return 0 on success, negative on failure
1557  **/
1558 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1559 {
1560         int i, err = 0;
1561
1562         for (i = 0; i < adapter->num_tx_queues; i++) {
1563                 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1564                 if (err) {
1565                         e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1566                         for (i-- ; i >= 0; i--)
1567                                 e1000_free_tx_resources(adapter,
1568                                                         &adapter->tx_ring[i]);
1569                         break;
1570                 }
1571         }
1572
1573         return err;
1574 }
1575
1576 /**
1577  * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1578  * @adapter: board private structure
1579  *
1580  * Configure the Tx unit of the MAC after a reset.
1581  **/
1582 static void e1000_configure_tx(struct e1000_adapter *adapter)
1583 {
1584         u64 tdba;
1585         struct e1000_hw *hw = &adapter->hw;
1586         u32 tdlen, tctl, tipg;
1587         u32 ipgr1, ipgr2;
1588
1589         /* Setup the HW Tx Head and Tail descriptor pointers */
1590
1591         switch (adapter->num_tx_queues) {
1592         case 1:
1593         default:
1594                 tdba = adapter->tx_ring[0].dma;
1595                 tdlen = adapter->tx_ring[0].count *
1596                         sizeof(struct e1000_tx_desc);
1597                 ew32(TDLEN, tdlen);
1598                 ew32(TDBAH, (tdba >> 32));
1599                 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1600                 ew32(TDT, 0);
1601                 ew32(TDH, 0);
1602                 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
1603                                            E1000_TDH : E1000_82542_TDH);
1604                 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
1605                                            E1000_TDT : E1000_82542_TDT);
1606                 break;
1607         }
1608
1609         /* Set the default values for the Tx Inter Packet Gap timer */
1610         if ((hw->media_type == e1000_media_type_fiber ||
1611              hw->media_type == e1000_media_type_internal_serdes))
1612                 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1613         else
1614                 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1615
1616         switch (hw->mac_type) {
1617         case e1000_82542_rev2_0:
1618         case e1000_82542_rev2_1:
1619                 tipg = DEFAULT_82542_TIPG_IPGT;
1620                 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1621                 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1622                 break;
1623         default:
1624                 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1625                 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1626                 break;
1627         }
1628         tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1629         tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1630         ew32(TIPG, tipg);
1631
1632         /* Set the Tx Interrupt Delay register */
1633
1634         ew32(TIDV, adapter->tx_int_delay);
1635         if (hw->mac_type >= e1000_82540)
1636                 ew32(TADV, adapter->tx_abs_int_delay);
1637
1638         /* Program the Transmit Control Register */
1639
1640         tctl = er32(TCTL);
1641         tctl &= ~E1000_TCTL_CT;
1642         tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1643                 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1644
1645         e1000_config_collision_dist(hw);
1646
1647         /* Setup Transmit Descriptor Settings for eop descriptor */
1648         adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1649
1650         /* only set IDE if we are delaying interrupts using the timers */
1651         if (adapter->tx_int_delay)
1652                 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1653
1654         if (hw->mac_type < e1000_82543)
1655                 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1656         else
1657                 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1658
1659         /* Cache if we're 82544 running in PCI-X because we'll
1660          * need this to apply a workaround later in the send path.
1661          */
1662         if (hw->mac_type == e1000_82544 &&
1663             hw->bus_type == e1000_bus_type_pcix)
1664                 adapter->pcix_82544 = true;
1665
1666         ew32(TCTL, tctl);
1667
1668 }
1669
1670 /**
1671  * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1672  * @adapter: board private structure
1673  * @rxdr:    rx descriptor ring (for a specific queue) to setup
1674  *
1675  * Returns 0 on success, negative on failure
1676  **/
1677 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1678                                     struct e1000_rx_ring *rxdr)
1679 {
1680         struct pci_dev *pdev = adapter->pdev;
1681         int size, desc_len;
1682
1683         size = sizeof(struct e1000_buffer) * rxdr->count;
1684         rxdr->buffer_info = vzalloc(size);
1685         if (!rxdr->buffer_info)
1686                 return -ENOMEM;
1687
1688         desc_len = sizeof(struct e1000_rx_desc);
1689
1690         /* Round up to nearest 4K */
1691
1692         rxdr->size = rxdr->count * desc_len;
1693         rxdr->size = ALIGN(rxdr->size, 4096);
1694
1695         rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1696                                         GFP_KERNEL);
1697         if (!rxdr->desc) {
1698 setup_rx_desc_die:
1699                 vfree(rxdr->buffer_info);
1700                 return -ENOMEM;
1701         }
1702
1703         /* Fix for errata 23, can't cross 64kB boundary */
1704         if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1705                 void *olddesc = rxdr->desc;
1706                 dma_addr_t olddma = rxdr->dma;
1707                 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1708                       rxdr->size, rxdr->desc);
1709                 /* Try again, without freeing the previous */
1710                 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1711                                                 &rxdr->dma, GFP_KERNEL);
1712                 /* Failed allocation, critical failure */
1713                 if (!rxdr->desc) {
1714                         dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1715                                           olddma);
1716                         goto setup_rx_desc_die;
1717                 }
1718
1719                 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1720                         /* give up */
1721                         dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1722                                           rxdr->dma);
1723                         dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1724                                           olddma);
1725                         e_err(probe, "Unable to allocate aligned memory for "
1726                               "the Rx descriptor ring\n");
1727                         goto setup_rx_desc_die;
1728                 } else {
1729                         /* Free old allocation, new allocation was successful */
1730                         dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1731                                           olddma);
1732                 }
1733         }
1734         memset(rxdr->desc, 0, rxdr->size);
1735
1736         rxdr->next_to_clean = 0;
1737         rxdr->next_to_use = 0;
1738         rxdr->rx_skb_top = NULL;
1739
1740         return 0;
1741 }
1742
1743 /**
1744  * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1745  *                                (Descriptors) for all queues
1746  * @adapter: board private structure
1747  *
1748  * Return 0 on success, negative on failure
1749  **/
1750 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1751 {
1752         int i, err = 0;
1753
1754         for (i = 0; i < adapter->num_rx_queues; i++) {
1755                 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1756                 if (err) {
1757                         e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1758                         for (i-- ; i >= 0; i--)
1759                                 e1000_free_rx_resources(adapter,
1760                                                         &adapter->rx_ring[i]);
1761                         break;
1762                 }
1763         }
1764
1765         return err;
1766 }
1767
1768 /**
1769  * e1000_setup_rctl - configure the receive control registers
1770  * @adapter: Board private structure
1771  **/
1772 static void e1000_setup_rctl(struct e1000_adapter *adapter)
1773 {
1774         struct e1000_hw *hw = &adapter->hw;
1775         u32 rctl;
1776
1777         rctl = er32(RCTL);
1778
1779         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1780
1781         rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1782                 E1000_RCTL_RDMTS_HALF |
1783                 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1784
1785         if (hw->tbi_compatibility_on == 1)
1786                 rctl |= E1000_RCTL_SBP;
1787         else
1788                 rctl &= ~E1000_RCTL_SBP;
1789
1790         if (adapter->netdev->mtu <= ETH_DATA_LEN)
1791                 rctl &= ~E1000_RCTL_LPE;
1792         else
1793                 rctl |= E1000_RCTL_LPE;
1794
1795         /* Setup buffer sizes */
1796         rctl &= ~E1000_RCTL_SZ_4096;
1797         rctl |= E1000_RCTL_BSEX;
1798         switch (adapter->rx_buffer_len) {
1799                 case E1000_RXBUFFER_2048:
1800                 default:
1801                         rctl |= E1000_RCTL_SZ_2048;
1802                         rctl &= ~E1000_RCTL_BSEX;
1803                         break;
1804                 case E1000_RXBUFFER_4096:
1805                         rctl |= E1000_RCTL_SZ_4096;
1806                         break;
1807                 case E1000_RXBUFFER_8192:
1808                         rctl |= E1000_RCTL_SZ_8192;
1809                         break;
1810                 case E1000_RXBUFFER_16384:
1811                         rctl |= E1000_RCTL_SZ_16384;
1812                         break;
1813         }
1814
1815         /* This is useful for sniffing bad packets. */
1816         if (adapter->netdev->features & NETIF_F_RXALL) {
1817                 /* UPE and MPE will be handled by normal PROMISC logic
1818                  * in e1000e_set_rx_mode
1819                  */
1820                 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1821                          E1000_RCTL_BAM | /* RX All Bcast Pkts */
1822                          E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1823
1824                 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1825                           E1000_RCTL_DPF | /* Allow filtered pause */
1826                           E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1827                 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1828                  * and that breaks VLANs.
1829                  */
1830         }
1831
1832         ew32(RCTL, rctl);
1833 }
1834
1835 /**
1836  * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1837  * @adapter: board private structure
1838  *
1839  * Configure the Rx unit of the MAC after a reset.
1840  **/
1841 static void e1000_configure_rx(struct e1000_adapter *adapter)
1842 {
1843         u64 rdba;
1844         struct e1000_hw *hw = &adapter->hw;
1845         u32 rdlen, rctl, rxcsum;
1846
1847         if (adapter->netdev->mtu > ETH_DATA_LEN) {
1848                 rdlen = adapter->rx_ring[0].count *
1849                         sizeof(struct e1000_rx_desc);
1850                 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1851                 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1852         } else {
1853                 rdlen = adapter->rx_ring[0].count *
1854                         sizeof(struct e1000_rx_desc);
1855                 adapter->clean_rx = e1000_clean_rx_irq;
1856                 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1857         }
1858
1859         /* disable receives while setting up the descriptors */
1860         rctl = er32(RCTL);
1861         ew32(RCTL, rctl & ~E1000_RCTL_EN);
1862
1863         /* set the Receive Delay Timer Register */
1864         ew32(RDTR, adapter->rx_int_delay);
1865
1866         if (hw->mac_type >= e1000_82540) {
1867                 ew32(RADV, adapter->rx_abs_int_delay);
1868                 if (adapter->itr_setting != 0)
1869                         ew32(ITR, 1000000000 / (adapter->itr * 256));
1870         }
1871
1872         /* Setup the HW Rx Head and Tail Descriptor Pointers and
1873          * the Base and Length of the Rx Descriptor Ring
1874          */
1875         switch (adapter->num_rx_queues) {
1876         case 1:
1877         default:
1878                 rdba = adapter->rx_ring[0].dma;
1879                 ew32(RDLEN, rdlen);
1880                 ew32(RDBAH, (rdba >> 32));
1881                 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1882                 ew32(RDT, 0);
1883                 ew32(RDH, 0);
1884                 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
1885                                            E1000_RDH : E1000_82542_RDH);
1886                 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
1887                                            E1000_RDT : E1000_82542_RDT);
1888                 break;
1889         }
1890
1891         /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1892         if (hw->mac_type >= e1000_82543) {
1893                 rxcsum = er32(RXCSUM);
1894                 if (adapter->rx_csum)
1895                         rxcsum |= E1000_RXCSUM_TUOFL;
1896                 else
1897                         /* don't need to clear IPPCSE as it defaults to 0 */
1898                         rxcsum &= ~E1000_RXCSUM_TUOFL;
1899                 ew32(RXCSUM, rxcsum);
1900         }
1901
1902         /* Enable Receives */
1903         ew32(RCTL, rctl | E1000_RCTL_EN);
1904 }
1905
1906 /**
1907  * e1000_free_tx_resources - Free Tx Resources per Queue
1908  * @adapter: board private structure
1909  * @tx_ring: Tx descriptor ring for a specific queue
1910  *
1911  * Free all transmit software resources
1912  **/
1913 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1914                                     struct e1000_tx_ring *tx_ring)
1915 {
1916         struct pci_dev *pdev = adapter->pdev;
1917
1918         e1000_clean_tx_ring(adapter, tx_ring);
1919
1920         vfree(tx_ring->buffer_info);
1921         tx_ring->buffer_info = NULL;
1922
1923         dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1924                           tx_ring->dma);
1925
1926         tx_ring->desc = NULL;
1927 }
1928
1929 /**
1930  * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1931  * @adapter: board private structure
1932  *
1933  * Free all transmit software resources
1934  **/
1935 void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1936 {
1937         int i;
1938
1939         for (i = 0; i < adapter->num_tx_queues; i++)
1940                 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1941 }
1942
1943 static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1944                                              struct e1000_buffer *buffer_info)
1945 {
1946         if (buffer_info->dma) {
1947                 if (buffer_info->mapped_as_page)
1948                         dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1949                                        buffer_info->length, DMA_TO_DEVICE);
1950                 else
1951                         dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1952                                          buffer_info->length,
1953                                          DMA_TO_DEVICE);
1954                 buffer_info->dma = 0;
1955         }
1956         if (buffer_info->skb) {
1957                 dev_kfree_skb_any(buffer_info->skb);
1958                 buffer_info->skb = NULL;
1959         }
1960         buffer_info->time_stamp = 0;
1961         /* buffer_info must be completely set up in the transmit path */
1962 }
1963
1964 /**
1965  * e1000_clean_tx_ring - Free Tx Buffers
1966  * @adapter: board private structure
1967  * @tx_ring: ring to be cleaned
1968  **/
1969 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1970                                 struct e1000_tx_ring *tx_ring)
1971 {
1972         struct e1000_hw *hw = &adapter->hw;
1973         struct e1000_buffer *buffer_info;
1974         unsigned long size;
1975         unsigned int i;
1976
1977         /* Free all the Tx ring sk_buffs */
1978
1979         for (i = 0; i < tx_ring->count; i++) {
1980                 buffer_info = &tx_ring->buffer_info[i];
1981                 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
1982         }
1983
1984         netdev_reset_queue(adapter->netdev);
1985         size = sizeof(struct e1000_buffer) * tx_ring->count;
1986         memset(tx_ring->buffer_info, 0, size);
1987
1988         /* Zero out the descriptor ring */
1989
1990         memset(tx_ring->desc, 0, tx_ring->size);
1991
1992         tx_ring->next_to_use = 0;
1993         tx_ring->next_to_clean = 0;
1994         tx_ring->last_tx_tso = false;
1995
1996         writel(0, hw->hw_addr + tx_ring->tdh);
1997         writel(0, hw->hw_addr + tx_ring->tdt);
1998 }
1999
2000 /**
2001  * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2002  * @adapter: board private structure
2003  **/
2004 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2005 {
2006         int i;
2007
2008         for (i = 0; i < adapter->num_tx_queues; i++)
2009                 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2010 }
2011
2012 /**
2013  * e1000_free_rx_resources - Free Rx Resources
2014  * @adapter: board private structure
2015  * @rx_ring: ring to clean the resources from
2016  *
2017  * Free all receive software resources
2018  **/
2019 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2020                                     struct e1000_rx_ring *rx_ring)
2021 {
2022         struct pci_dev *pdev = adapter->pdev;
2023
2024         e1000_clean_rx_ring(adapter, rx_ring);
2025
2026         vfree(rx_ring->buffer_info);
2027         rx_ring->buffer_info = NULL;
2028
2029         dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2030                           rx_ring->dma);
2031
2032         rx_ring->desc = NULL;
2033 }
2034
2035 /**
2036  * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2037  * @adapter: board private structure
2038  *
2039  * Free all receive software resources
2040  **/
2041 void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2042 {
2043         int i;
2044
2045         for (i = 0; i < adapter->num_rx_queues; i++)
2046                 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2047 }
2048
2049 /**
2050  * e1000_clean_rx_ring - Free Rx Buffers per Queue
2051  * @adapter: board private structure
2052  * @rx_ring: ring to free buffers from
2053  **/
2054 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2055                                 struct e1000_rx_ring *rx_ring)
2056 {
2057         struct e1000_hw *hw = &adapter->hw;
2058         struct e1000_buffer *buffer_info;
2059         struct pci_dev *pdev = adapter->pdev;
2060         unsigned long size;
2061         unsigned int i;
2062
2063         /* Free all the Rx ring sk_buffs */
2064         for (i = 0; i < rx_ring->count; i++) {
2065                 buffer_info = &rx_ring->buffer_info[i];
2066                 if (buffer_info->dma &&
2067                     adapter->clean_rx == e1000_clean_rx_irq) {
2068                         dma_unmap_single(&pdev->dev, buffer_info->dma,
2069                                          buffer_info->length,
2070                                          DMA_FROM_DEVICE);
2071                 } else if (buffer_info->dma &&
2072                            adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2073                         dma_unmap_page(&pdev->dev, buffer_info->dma,
2074                                        buffer_info->length,
2075                                        DMA_FROM_DEVICE);
2076                 }
2077
2078                 buffer_info->dma = 0;
2079                 if (buffer_info->page) {
2080                         put_page(buffer_info->page);
2081                         buffer_info->page = NULL;
2082                 }
2083                 if (buffer_info->skb) {
2084                         dev_kfree_skb(buffer_info->skb);
2085                         buffer_info->skb = NULL;
2086                 }
2087         }
2088
2089         /* there also may be some cached data from a chained receive */
2090         if (rx_ring->rx_skb_top) {
2091                 dev_kfree_skb(rx_ring->rx_skb_top);
2092                 rx_ring->rx_skb_top = NULL;
2093         }
2094
2095         size = sizeof(struct e1000_buffer) * rx_ring->count;
2096         memset(rx_ring->buffer_info, 0, size);
2097
2098         /* Zero out the descriptor ring */
2099         memset(rx_ring->desc, 0, rx_ring->size);
2100
2101         rx_ring->next_to_clean = 0;
2102         rx_ring->next_to_use = 0;
2103
2104         writel(0, hw->hw_addr + rx_ring->rdh);
2105         writel(0, hw->hw_addr + rx_ring->rdt);
2106 }
2107
2108 /**
2109  * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2110  * @adapter: board private structure
2111  **/
2112 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2113 {
2114         int i;
2115
2116         for (i = 0; i < adapter->num_rx_queues; i++)
2117                 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2118 }
2119
2120 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2121  * and memory write and invalidate disabled for certain operations
2122  */
2123 static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2124 {
2125         struct e1000_hw *hw = &adapter->hw;
2126         struct net_device *netdev = adapter->netdev;
2127         u32 rctl;
2128
2129         e1000_pci_clear_mwi(hw);
2130
2131         rctl = er32(RCTL);
2132         rctl |= E1000_RCTL_RST;
2133         ew32(RCTL, rctl);
2134         E1000_WRITE_FLUSH();
2135         mdelay(5);
2136
2137         if (netif_running(netdev))
2138                 e1000_clean_all_rx_rings(adapter);
2139 }
2140
2141 static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2142 {
2143         struct e1000_hw *hw = &adapter->hw;
2144         struct net_device *netdev = adapter->netdev;
2145         u32 rctl;
2146
2147         rctl = er32(RCTL);
2148         rctl &= ~E1000_RCTL_RST;
2149         ew32(RCTL, rctl);
2150         E1000_WRITE_FLUSH();
2151         mdelay(5);
2152
2153         if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2154                 e1000_pci_set_mwi(hw);
2155
2156         if (netif_running(netdev)) {
2157                 /* No need to loop, because 82542 supports only 1 queue */
2158                 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2159                 e1000_configure_rx(adapter);
2160                 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2161         }
2162 }
2163
2164 /**
2165  * e1000_set_mac - Change the Ethernet Address of the NIC
2166  * @netdev: network interface device structure
2167  * @p: pointer to an address structure
2168  *
2169  * Returns 0 on success, negative on failure
2170  **/
2171 static int e1000_set_mac(struct net_device *netdev, void *p)
2172 {
2173         struct e1000_adapter *adapter = netdev_priv(netdev);
2174         struct e1000_hw *hw = &adapter->hw;
2175         struct sockaddr *addr = p;
2176
2177         if (!is_valid_ether_addr(addr->sa_data))
2178                 return -EADDRNOTAVAIL;
2179
2180         /* 82542 2.0 needs to be in reset to write receive address registers */
2181
2182         if (hw->mac_type == e1000_82542_rev2_0)
2183                 e1000_enter_82542_rst(adapter);
2184
2185         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2186         memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2187
2188         e1000_rar_set(hw, hw->mac_addr, 0);
2189
2190         if (hw->mac_type == e1000_82542_rev2_0)
2191                 e1000_leave_82542_rst(adapter);
2192
2193         return 0;
2194 }
2195
2196 /**
2197  * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2198  * @netdev: network interface device structure
2199  *
2200  * The set_rx_mode entry point is called whenever the unicast or multicast
2201  * address lists or the network interface flags are updated. This routine is
2202  * responsible for configuring the hardware for proper unicast, multicast,
2203  * promiscuous mode, and all-multi behavior.
2204  **/
2205 static void e1000_set_rx_mode(struct net_device *netdev)
2206 {
2207         struct e1000_adapter *adapter = netdev_priv(netdev);
2208         struct e1000_hw *hw = &adapter->hw;
2209         struct netdev_hw_addr *ha;
2210         bool use_uc = false;
2211         u32 rctl;
2212         u32 hash_value;
2213         int i, rar_entries = E1000_RAR_ENTRIES;
2214         int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2215         u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2216
2217         if (!mcarray)
2218                 return;
2219
2220         /* Check for Promiscuous and All Multicast modes */
2221
2222         rctl = er32(RCTL);
2223
2224         if (netdev->flags & IFF_PROMISC) {
2225                 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2226                 rctl &= ~E1000_RCTL_VFE;
2227         } else {
2228                 if (netdev->flags & IFF_ALLMULTI)
2229                         rctl |= E1000_RCTL_MPE;
2230                 else
2231                         rctl &= ~E1000_RCTL_MPE;
2232                 /* Enable VLAN filter if there is a VLAN */
2233                 if (e1000_vlan_used(adapter))
2234                         rctl |= E1000_RCTL_VFE;
2235         }
2236
2237         if (netdev_uc_count(netdev) > rar_entries - 1) {
2238                 rctl |= E1000_RCTL_UPE;
2239         } else if (!(netdev->flags & IFF_PROMISC)) {
2240                 rctl &= ~E1000_RCTL_UPE;
2241                 use_uc = true;
2242         }
2243
2244         ew32(RCTL, rctl);
2245
2246         /* 82542 2.0 needs to be in reset to write receive address registers */
2247
2248         if (hw->mac_type == e1000_82542_rev2_0)
2249                 e1000_enter_82542_rst(adapter);
2250
2251         /* load the first 14 addresses into the exact filters 1-14. Unicast
2252          * addresses take precedence to avoid disabling unicast filtering
2253          * when possible.
2254          *
2255          * RAR 0 is used for the station MAC address
2256          * if there are not 14 addresses, go ahead and clear the filters
2257          */
2258         i = 1;
2259         if (use_uc)
2260                 netdev_for_each_uc_addr(ha, netdev) {
2261                         if (i == rar_entries)
2262                                 break;
2263                         e1000_rar_set(hw, ha->addr, i++);
2264                 }
2265
2266         netdev_for_each_mc_addr(ha, netdev) {
2267                 if (i == rar_entries) {
2268                         /* load any remaining addresses into the hash table */
2269                         u32 hash_reg, hash_bit, mta;
2270                         hash_value = e1000_hash_mc_addr(hw, ha->addr);
2271                         hash_reg = (hash_value >> 5) & 0x7F;
2272                         hash_bit = hash_value & 0x1F;
2273                         mta = (1 << hash_bit);
2274                         mcarray[hash_reg] |= mta;
2275                 } else {
2276                         e1000_rar_set(hw, ha->addr, i++);
2277                 }
2278         }
2279
2280         for (; i < rar_entries; i++) {
2281                 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2282                 E1000_WRITE_FLUSH();
2283                 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2284                 E1000_WRITE_FLUSH();
2285         }
2286
2287         /* write the hash table completely, write from bottom to avoid
2288          * both stupid write combining chipsets, and flushing each write
2289          */
2290         for (i = mta_reg_count - 1; i >= 0 ; i--) {
2291                 /* If we are on an 82544 has an errata where writing odd
2292                  * offsets overwrites the previous even offset, but writing
2293                  * backwards over the range solves the issue by always
2294                  * writing the odd offset first
2295                  */
2296                 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2297         }
2298         E1000_WRITE_FLUSH();
2299
2300         if (hw->mac_type == e1000_82542_rev2_0)
2301                 e1000_leave_82542_rst(adapter);
2302
2303         kfree(mcarray);
2304 }
2305
2306 /**
2307  * e1000_update_phy_info_task - get phy info
2308  * @work: work struct contained inside adapter struct
2309  *
2310  * Need to wait a few seconds after link up to get diagnostic information from
2311  * the phy
2312  */
2313 static void e1000_update_phy_info_task(struct work_struct *work)
2314 {
2315         struct e1000_adapter *adapter = container_of(work,
2316                                                      struct e1000_adapter,
2317                                                      phy_info_task.work);
2318
2319         e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2320 }
2321
2322 /**
2323  * e1000_82547_tx_fifo_stall_task - task to complete work
2324  * @work: work struct contained inside adapter struct
2325  **/
2326 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2327 {
2328         struct e1000_adapter *adapter = container_of(work,
2329                                                      struct e1000_adapter,
2330                                                      fifo_stall_task.work);
2331         struct e1000_hw *hw = &adapter->hw;
2332         struct net_device *netdev = adapter->netdev;
2333         u32 tctl;
2334
2335         if (atomic_read(&adapter->tx_fifo_stall)) {
2336                 if ((er32(TDT) == er32(TDH)) &&
2337                    (er32(TDFT) == er32(TDFH)) &&
2338                    (er32(TDFTS) == er32(TDFHS))) {
2339                         tctl = er32(TCTL);
2340                         ew32(TCTL, tctl & ~E1000_TCTL_EN);
2341                         ew32(TDFT, adapter->tx_head_addr);
2342                         ew32(TDFH, adapter->tx_head_addr);
2343                         ew32(TDFTS, adapter->tx_head_addr);
2344                         ew32(TDFHS, adapter->tx_head_addr);
2345                         ew32(TCTL, tctl);
2346                         E1000_WRITE_FLUSH();
2347
2348                         adapter->tx_fifo_head = 0;
2349                         atomic_set(&adapter->tx_fifo_stall, 0);
2350                         netif_wake_queue(netdev);
2351                 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2352                         schedule_delayed_work(&adapter->fifo_stall_task, 1);
2353                 }
2354         }
2355 }
2356
2357 bool e1000_has_link(struct e1000_adapter *adapter)
2358 {
2359         struct e1000_hw *hw = &adapter->hw;
2360         bool link_active = false;
2361
2362         /* get_link_status is set on LSC (link status) interrupt or rx
2363          * sequence error interrupt (except on intel ce4100).
2364          * get_link_status will stay false until the
2365          * e1000_check_for_link establishes link for copper adapters
2366          * ONLY
2367          */
2368         switch (hw->media_type) {
2369         case e1000_media_type_copper:
2370                 if (hw->mac_type == e1000_ce4100)
2371                         hw->get_link_status = 1;
2372                 if (hw->get_link_status) {
2373                         e1000_check_for_link(hw);
2374                         link_active = !hw->get_link_status;
2375                 } else {
2376                         link_active = true;
2377                 }
2378                 break;
2379         case e1000_media_type_fiber:
2380                 e1000_check_for_link(hw);
2381                 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2382                 break;
2383         case e1000_media_type_internal_serdes:
2384                 e1000_check_for_link(hw);
2385                 link_active = hw->serdes_has_link;
2386                 break;
2387         default:
2388                 break;
2389         }
2390
2391         return link_active;
2392 }
2393
2394 /**
2395  * e1000_watchdog - work function
2396  * @work: work struct contained inside adapter struct
2397  **/
2398 static void e1000_watchdog(struct work_struct *work)
2399 {
2400         struct e1000_adapter *adapter = container_of(work,
2401                                                      struct e1000_adapter,
2402                                                      watchdog_task.work);
2403         struct e1000_hw *hw = &adapter->hw;
2404         struct net_device *netdev = adapter->netdev;
2405         struct e1000_tx_ring *txdr = adapter->tx_ring;
2406         u32 link, tctl;
2407
2408         link = e1000_has_link(adapter);
2409         if ((netif_carrier_ok(netdev)) && link)
2410                 goto link_up;
2411
2412         if (link) {
2413                 if (!netif_carrier_ok(netdev)) {
2414                         u32 ctrl;
2415                         bool txb2b = true;
2416                         /* update snapshot of PHY registers on LSC */
2417                         e1000_get_speed_and_duplex(hw,
2418                                                    &adapter->link_speed,
2419                                                    &adapter->link_duplex);
2420
2421                         ctrl = er32(CTRL);
2422                         pr_info("%s NIC Link is Up %d Mbps %s, "
2423                                 "Flow Control: %s\n",
2424                                 netdev->name,
2425                                 adapter->link_speed,
2426                                 adapter->link_duplex == FULL_DUPLEX ?
2427                                 "Full Duplex" : "Half Duplex",
2428                                 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2429                                 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2430                                 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2431                                 E1000_CTRL_TFCE) ? "TX" : "None")));
2432
2433                         /* adjust timeout factor according to speed/duplex */
2434                         adapter->tx_timeout_factor = 1;
2435                         switch (adapter->link_speed) {
2436                         case SPEED_10:
2437                                 txb2b = false;
2438                                 adapter->tx_timeout_factor = 16;
2439                                 break;
2440                         case SPEED_100:
2441                                 txb2b = false;
2442                                 /* maybe add some timeout factor ? */
2443                                 break;
2444                         }
2445
2446                         /* enable transmits in the hardware */
2447                         tctl = er32(TCTL);
2448                         tctl |= E1000_TCTL_EN;
2449                         ew32(TCTL, tctl);
2450
2451                         netif_carrier_on(netdev);
2452                         if (!test_bit(__E1000_DOWN, &adapter->flags))
2453                                 schedule_delayed_work(&adapter->phy_info_task,
2454                                                       2 * HZ);
2455                         adapter->smartspeed = 0;
2456                 }
2457         } else {
2458                 if (netif_carrier_ok(netdev)) {
2459                         adapter->link_speed = 0;
2460                         adapter->link_duplex = 0;
2461                         pr_info("%s NIC Link is Down\n",
2462                                 netdev->name);
2463                         netif_carrier_off(netdev);
2464
2465                         if (!test_bit(__E1000_DOWN, &adapter->flags))
2466                                 schedule_delayed_work(&adapter->phy_info_task,
2467                                                       2 * HZ);
2468                 }
2469
2470                 e1000_smartspeed(adapter);
2471         }
2472
2473 link_up:
2474         e1000_update_stats(adapter);
2475
2476         hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2477         adapter->tpt_old = adapter->stats.tpt;
2478         hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2479         adapter->colc_old = adapter->stats.colc;
2480
2481         adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2482         adapter->gorcl_old = adapter->stats.gorcl;
2483         adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2484         adapter->gotcl_old = adapter->stats.gotcl;
2485
2486         e1000_update_adaptive(hw);
2487
2488         if (!netif_carrier_ok(netdev)) {
2489                 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2490                         /* We've lost link, so the controller stops DMA,
2491                          * but we've got queued Tx work that's never going
2492                          * to get done, so reset controller to flush Tx.
2493                          * (Do the reset outside of interrupt context).
2494                          */
2495                         adapter->tx_timeout_count++;
2496                         schedule_work(&adapter->reset_task);
2497                         /* exit immediately since reset is imminent */
2498                         return;
2499                 }
2500         }
2501
2502         /* Simple mode for Interrupt Throttle Rate (ITR) */
2503         if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2504                 /* Symmetric Tx/Rx gets a reduced ITR=2000;
2505                  * Total asymmetrical Tx or Rx gets ITR=8000;
2506                  * everyone else is between 2000-8000.
2507                  */
2508                 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2509                 u32 dif = (adapter->gotcl > adapter->gorcl ?
2510                             adapter->gotcl - adapter->gorcl :
2511                             adapter->gorcl - adapter->gotcl) / 10000;
2512                 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2513
2514                 ew32(ITR, 1000000000 / (itr * 256));
2515         }
2516
2517         /* Cause software interrupt to ensure rx ring is cleaned */
2518         ew32(ICS, E1000_ICS_RXDMT0);
2519
2520         /* Force detection of hung controller every watchdog period */
2521         adapter->detect_tx_hung = true;
2522
2523         /* Reschedule the task */
2524         if (!test_bit(__E1000_DOWN, &adapter->flags))
2525                 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2526 }
2527
2528 enum latency_range {
2529         lowest_latency = 0,
2530         low_latency = 1,
2531         bulk_latency = 2,
2532         latency_invalid = 255
2533 };
2534
2535 /**
2536  * e1000_update_itr - update the dynamic ITR value based on statistics
2537  * @adapter: pointer to adapter
2538  * @itr_setting: current adapter->itr
2539  * @packets: the number of packets during this measurement interval
2540  * @bytes: the number of bytes during this measurement interval
2541  *
2542  *      Stores a new ITR value based on packets and byte
2543  *      counts during the last interrupt.  The advantage of per interrupt
2544  *      computation is faster updates and more accurate ITR for the current
2545  *      traffic pattern.  Constants in this function were computed
2546  *      based on theoretical maximum wire speed and thresholds were set based
2547  *      on testing data as well as attempting to minimize response time
2548  *      while increasing bulk throughput.
2549  *      this functionality is controlled by the InterruptThrottleRate module
2550  *      parameter (see e1000_param.c)
2551  **/
2552 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2553                                      u16 itr_setting, int packets, int bytes)
2554 {
2555         unsigned int retval = itr_setting;
2556         struct e1000_hw *hw = &adapter->hw;
2557
2558         if (unlikely(hw->mac_type < e1000_82540))
2559                 goto update_itr_done;
2560
2561         if (packets == 0)
2562                 goto update_itr_done;
2563
2564         switch (itr_setting) {
2565         case lowest_latency:
2566                 /* jumbo frames get bulk treatment*/
2567                 if (bytes/packets > 8000)
2568                         retval = bulk_latency;
2569                 else if ((packets < 5) && (bytes > 512))
2570                         retval = low_latency;
2571                 break;
2572         case low_latency:  /* 50 usec aka 20000 ints/s */
2573                 if (bytes > 10000) {
2574                         /* jumbo frames need bulk latency setting */
2575                         if (bytes/packets > 8000)
2576                                 retval = bulk_latency;
2577                         else if ((packets < 10) || ((bytes/packets) > 1200))
2578                                 retval = bulk_latency;
2579                         else if ((packets > 35))
2580                                 retval = lowest_latency;
2581                 } else if (bytes/packets > 2000)
2582                         retval = bulk_latency;
2583                 else if (packets <= 2 && bytes < 512)
2584                         retval = lowest_latency;
2585                 break;
2586         case bulk_latency: /* 250 usec aka 4000 ints/s */
2587                 if (bytes > 25000) {
2588                         if (packets > 35)
2589                                 retval = low_latency;
2590                 } else if (bytes < 6000) {
2591                         retval = low_latency;
2592                 }
2593                 break;
2594         }
2595
2596 update_itr_done:
2597         return retval;
2598 }
2599
2600 static void e1000_set_itr(struct e1000_adapter *adapter)
2601 {
2602         struct e1000_hw *hw = &adapter->hw;
2603         u16 current_itr;
2604         u32 new_itr = adapter->itr;
2605
2606         if (unlikely(hw->mac_type < e1000_82540))
2607                 return;
2608
2609         /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2610         if (unlikely(adapter->link_speed != SPEED_1000)) {
2611                 current_itr = 0;
2612                 new_itr = 4000;
2613                 goto set_itr_now;
2614         }
2615
2616         adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2617                                            adapter->total_tx_packets,
2618                                            adapter->total_tx_bytes);
2619         /* conservative mode (itr 3) eliminates the lowest_latency setting */
2620         if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2621                 adapter->tx_itr = low_latency;
2622
2623         adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2624                                            adapter->total_rx_packets,
2625                                            adapter->total_rx_bytes);
2626         /* conservative mode (itr 3) eliminates the lowest_latency setting */
2627         if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2628                 adapter->rx_itr = low_latency;
2629
2630         current_itr = max(adapter->rx_itr, adapter->tx_itr);
2631
2632         switch (current_itr) {
2633         /* counts and packets in update_itr are dependent on these numbers */
2634         case lowest_latency:
2635                 new_itr = 70000;
2636                 break;
2637         case low_latency:
2638                 new_itr = 20000; /* aka hwitr = ~200 */
2639                 break;
2640         case bulk_latency:
2641                 new_itr = 4000;
2642                 break;
2643         default:
2644                 break;
2645         }
2646
2647 set_itr_now:
2648         if (new_itr != adapter->itr) {
2649                 /* this attempts to bias the interrupt rate towards Bulk
2650                  * by adding intermediate steps when interrupt rate is
2651                  * increasing
2652                  */
2653                 new_itr = new_itr > adapter->itr ?
2654                           min(adapter->itr + (new_itr >> 2), new_itr) :
2655                           new_itr;
2656                 adapter->itr = new_itr;
2657                 ew32(ITR, 1000000000 / (new_itr * 256));
2658         }
2659 }
2660
2661 #define E1000_TX_FLAGS_CSUM             0x00000001
2662 #define E1000_TX_FLAGS_VLAN             0x00000002
2663 #define E1000_TX_FLAGS_TSO              0x00000004
2664 #define E1000_TX_FLAGS_IPV4             0x00000008
2665 #define E1000_TX_FLAGS_NO_FCS           0x00000010
2666 #define E1000_TX_FLAGS_VLAN_MASK        0xffff0000
2667 #define E1000_TX_FLAGS_VLAN_SHIFT       16
2668
2669 static int e1000_tso(struct e1000_adapter *adapter,
2670                      struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2671 {
2672         struct e1000_context_desc *context_desc;
2673         struct e1000_buffer *buffer_info;
2674         unsigned int i;
2675         u32 cmd_length = 0;
2676         u16 ipcse = 0, tucse, mss;
2677         u8 ipcss, ipcso, tucss, tucso, hdr_len;
2678         int err;
2679
2680         if (skb_is_gso(skb)) {
2681                 if (skb_header_cloned(skb)) {
2682                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2683                         if (err)
2684                                 return err;
2685                 }
2686
2687                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2688                 mss = skb_shinfo(skb)->gso_size;
2689                 if (skb->protocol == htons(ETH_P_IP)) {
2690                         struct iphdr *iph = ip_hdr(skb);
2691                         iph->tot_len = 0;
2692                         iph->check = 0;
2693                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2694                                                                  iph->daddr, 0,
2695                                                                  IPPROTO_TCP,
2696                                                                  0);
2697                         cmd_length = E1000_TXD_CMD_IP;
2698                         ipcse = skb_transport_offset(skb) - 1;
2699                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2700                         ipv6_hdr(skb)->payload_len = 0;
2701                         tcp_hdr(skb)->check =
2702                                 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2703                                                  &ipv6_hdr(skb)->daddr,
2704                                                  0, IPPROTO_TCP, 0);
2705                         ipcse = 0;
2706                 }
2707                 ipcss = skb_network_offset(skb);
2708                 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2709                 tucss = skb_transport_offset(skb);
2710                 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2711                 tucse = 0;
2712
2713                 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2714                                E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2715
2716                 i = tx_ring->next_to_use;
2717                 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2718                 buffer_info = &tx_ring->buffer_info[i];
2719
2720                 context_desc->lower_setup.ip_fields.ipcss  = ipcss;
2721                 context_desc->lower_setup.ip_fields.ipcso  = ipcso;
2722                 context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
2723                 context_desc->upper_setup.tcp_fields.tucss = tucss;
2724                 context_desc->upper_setup.tcp_fields.tucso = tucso;
2725                 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2726                 context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
2727                 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2728                 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2729
2730                 buffer_info->time_stamp = jiffies;
2731                 buffer_info->next_to_watch = i;
2732
2733                 if (++i == tx_ring->count) i = 0;
2734                 tx_ring->next_to_use = i;
2735
2736                 return true;
2737         }
2738         return false;
2739 }
2740
2741 static bool e1000_tx_csum(struct e1000_adapter *adapter,
2742                           struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2743 {
2744         struct e1000_context_desc *context_desc;
2745         struct e1000_buffer *buffer_info;
2746         unsigned int i;
2747         u8 css;
2748         u32 cmd_len = E1000_TXD_CMD_DEXT;
2749
2750         if (skb->ip_summed != CHECKSUM_PARTIAL)
2751                 return false;
2752
2753         switch (skb->protocol) {
2754         case cpu_to_be16(ETH_P_IP):
2755                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2756                         cmd_len |= E1000_TXD_CMD_TCP;
2757                 break;
2758         case cpu_to_be16(ETH_P_IPV6):
2759                 /* XXX not handling all IPV6 headers */
2760                 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2761                         cmd_len |= E1000_TXD_CMD_TCP;
2762                 break;
2763         default:
2764                 if (unlikely(net_ratelimit()))
2765                         e_warn(drv, "checksum_partial proto=%x!\n",
2766                                skb->protocol);
2767                 break;
2768         }
2769
2770         css = skb_checksum_start_offset(skb);
2771
2772         i = tx_ring->next_to_use;
2773         buffer_info = &tx_ring->buffer_info[i];
2774         context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2775
2776         context_desc->lower_setup.ip_config = 0;
2777         context_desc->upper_setup.tcp_fields.tucss = css;
2778         context_desc->upper_setup.tcp_fields.tucso =
2779                 css + skb->csum_offset;
2780         context_desc->upper_setup.tcp_fields.tucse = 0;
2781         context_desc->tcp_seg_setup.data = 0;
2782         context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2783
2784         buffer_info->time_stamp = jiffies;
2785         buffer_info->next_to_watch = i;
2786
2787         if (unlikely(++i == tx_ring->count)) i = 0;
2788         tx_ring->next_to_use = i;
2789
2790         return true;
2791 }
2792
2793 #define E1000_MAX_TXD_PWR       12
2794 #define E1000_MAX_DATA_PER_TXD  (1<<E1000_MAX_TXD_PWR)
2795
2796 static int e1000_tx_map(struct e1000_adapter *adapter,
2797                         struct e1000_tx_ring *tx_ring,
2798                         struct sk_buff *skb, unsigned int first,
2799                         unsigned int max_per_txd, unsigned int nr_frags,
2800                         unsigned int mss)
2801 {
2802         struct e1000_hw *hw = &adapter->hw;
2803         struct pci_dev *pdev = adapter->pdev;
2804         struct e1000_buffer *buffer_info;
2805         unsigned int len = skb_headlen(skb);
2806         unsigned int offset = 0, size, count = 0, i;
2807         unsigned int f, bytecount, segs;
2808
2809         i = tx_ring->next_to_use;
2810
2811         while (len) {
2812                 buffer_info = &tx_ring->buffer_info[i];
2813                 size = min(len, max_per_txd);
2814                 /* Workaround for Controller erratum --
2815                  * descriptor for non-tso packet in a linear SKB that follows a
2816                  * tso gets written back prematurely before the data is fully
2817                  * DMA'd to the controller
2818                  */
2819                 if (!skb->data_len && tx_ring->last_tx_tso &&
2820                     !skb_is_gso(skb)) {
2821                         tx_ring->last_tx_tso = false;
2822                         size -= 4;
2823                 }
2824
2825                 /* Workaround for premature desc write-backs
2826                  * in TSO mode.  Append 4-byte sentinel desc
2827                  */
2828                 if (unlikely(mss && !nr_frags && size == len && size > 8))
2829                         size -= 4;
2830                 /* work-around for errata 10 and it applies
2831                  * to all controllers in PCI-X mode
2832                  * The fix is to make sure that the first descriptor of a
2833                  * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2834                  */
2835                 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2836                                 (size > 2015) && count == 0))
2837                         size = 2015;
2838
2839                 /* Workaround for potential 82544 hang in PCI-X.  Avoid
2840                  * terminating buffers within evenly-aligned dwords.
2841                  */
2842                 if (unlikely(adapter->pcix_82544 &&
2843                    !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2844                    size > 4))
2845                         size -= 4;
2846
2847                 buffer_info->length = size;
2848                 /* set time_stamp *before* dma to help avoid a possible race */
2849                 buffer_info->time_stamp = jiffies;
2850                 buffer_info->mapped_as_page = false;
2851                 buffer_info->dma = dma_map_single(&pdev->dev,
2852                                                   skb->data + offset,
2853                                                   size, DMA_TO_DEVICE);
2854                 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2855                         goto dma_error;
2856                 buffer_info->next_to_watch = i;
2857
2858                 len -= size;
2859                 offset += size;
2860                 count++;
2861                 if (len) {
2862                         i++;
2863                         if (unlikely(i == tx_ring->count))
2864                                 i = 0;
2865                 }
2866         }
2867
2868         for (f = 0; f < nr_frags; f++) {
2869                 const struct skb_frag_struct *frag;
2870
2871                 frag = &skb_shinfo(skb)->frags[f];
2872                 len = skb_frag_size(frag);
2873                 offset = 0;
2874
2875                 while (len) {
2876                         unsigned long bufend;
2877                         i++;
2878                         if (unlikely(i == tx_ring->count))
2879                                 i = 0;
2880
2881                         buffer_info = &tx_ring->buffer_info[i];
2882                         size = min(len, max_per_txd);
2883                         /* Workaround for premature desc write-backs
2884                          * in TSO mode.  Append 4-byte sentinel desc
2885                          */
2886                         if (unlikely(mss && f == (nr_frags-1) &&
2887                             size == len && size > 8))
2888                                 size -= 4;
2889                         /* Workaround for potential 82544 hang in PCI-X.
2890                          * Avoid terminating buffers within evenly-aligned
2891                          * dwords.
2892                          */
2893                         bufend = (unsigned long)
2894                                 page_to_phys(skb_frag_page(frag));
2895                         bufend += offset + size - 1;
2896                         if (unlikely(adapter->pcix_82544 &&
2897                                      !(bufend & 4) &&
2898                                      size > 4))
2899                                 size -= 4;
2900
2901                         buffer_info->length = size;
2902                         buffer_info->time_stamp = jiffies;
2903                         buffer_info->mapped_as_page = true;
2904                         buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2905                                                 offset, size, DMA_TO_DEVICE);
2906                         if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2907                                 goto dma_error;
2908                         buffer_info->next_to_watch = i;
2909
2910                         len -= size;
2911                         offset += size;
2912                         count++;
2913                 }
2914         }
2915
2916         segs = skb_shinfo(skb)->gso_segs ?: 1;
2917         /* multiply data chunks by size of headers */
2918         bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2919
2920         tx_ring->buffer_info[i].skb = skb;
2921         tx_ring->buffer_info[i].segs = segs;
2922         tx_ring->buffer_info[i].bytecount = bytecount;
2923         tx_ring->buffer_info[first].next_to_watch = i;
2924
2925         return count;
2926
2927 dma_error:
2928         dev_err(&pdev->dev, "TX DMA map failed\n");
2929         buffer_info->dma = 0;
2930         if (count)
2931                 count--;
2932
2933         while (count--) {
2934                 if (i==0)
2935                         i += tx_ring->count;
2936                 i--;
2937                 buffer_info = &tx_ring->buffer_info[i];
2938                 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2939         }
2940
2941         return 0;
2942 }
2943
2944 static void e1000_tx_queue(struct e1000_adapter *adapter,
2945                            struct e1000_tx_ring *tx_ring, int tx_flags,
2946                            int count)
2947 {
2948         struct e1000_hw *hw = &adapter->hw;
2949         struct e1000_tx_desc *tx_desc = NULL;
2950         struct e1000_buffer *buffer_info;
2951         u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2952         unsigned int i;
2953
2954         if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2955                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2956                              E1000_TXD_CMD_TSE;
2957                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2958
2959                 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2960                         txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2961         }
2962
2963         if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2964                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2965                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2966         }
2967
2968         if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
2969                 txd_lower |= E1000_TXD_CMD_VLE;
2970                 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
2971         }
2972
2973         if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
2974                 txd_lower &= ~(E1000_TXD_CMD_IFCS);
2975
2976         i = tx_ring->next_to_use;
2977
2978         while (count--) {
2979                 buffer_info = &tx_ring->buffer_info[i];
2980                 tx_desc = E1000_TX_DESC(*tx_ring, i);
2981                 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
2982                 tx_desc->lower.data =
2983                         cpu_to_le32(txd_lower | buffer_info->length);
2984                 tx_desc->upper.data = cpu_to_le32(txd_upper);
2985                 if (unlikely(++i == tx_ring->count)) i = 0;
2986         }
2987
2988         tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
2989
2990         /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
2991         if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
2992                 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
2993
2994         /* Force memory writes to complete before letting h/w
2995          * know there are new descriptors to fetch.  (Only
2996          * applicable for weak-ordered memory model archs,
2997          * such as IA-64).
2998          */
2999         wmb();
3000
3001         tx_ring->next_to_use = i;
3002         writel(i, hw->hw_addr + tx_ring->tdt);
3003         /* we need this if more than one processor can write to our tail
3004          * at a time, it synchronizes IO on IA64/Altix systems
3005          */
3006         mmiowb();
3007 }
3008
3009 /* 82547 workaround to avoid controller hang in half-duplex environment.
3010  * The workaround is to avoid queuing a large packet that would span
3011  * the internal Tx FIFO ring boundary by notifying the stack to resend
3012  * the packet at a later time.  This gives the Tx FIFO an opportunity to
3013  * flush all packets.  When that occurs, we reset the Tx FIFO pointers
3014  * to the beginning of the Tx FIFO.
3015  */
3016
3017 #define E1000_FIFO_HDR                  0x10
3018 #define E1000_82547_PAD_LEN             0x3E0
3019
3020 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3021                                        struct sk_buff *skb)
3022 {
3023         u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3024         u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3025
3026         skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3027
3028         if (adapter->link_duplex != HALF_DUPLEX)
3029                 goto no_fifo_stall_required;
3030
3031         if (atomic_read(&adapter->tx_fifo_stall))
3032                 return 1;
3033
3034         if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3035                 atomic_set(&adapter->tx_fifo_stall, 1);
3036                 return 1;
3037         }
3038
3039 no_fifo_stall_required:
3040         adapter->tx_fifo_head += skb_fifo_len;
3041         if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3042                 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3043         return 0;
3044 }
3045
3046 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3047 {
3048         struct e1000_adapter *adapter = netdev_priv(netdev);
3049         struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3050
3051         netif_stop_queue(netdev);
3052         /* Herbert's original patch had:
3053          *  smp_mb__after_netif_stop_queue();
3054          * but since that doesn't exist yet, just open code it.
3055          */
3056         smp_mb();
3057
3058         /* We need to check again in a case another CPU has just
3059          * made room available.
3060          */
3061         if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3062                 return -EBUSY;
3063
3064         /* A reprieve! */
3065         netif_start_queue(netdev);
3066         ++adapter->restart_queue;
3067         return 0;
3068 }
3069
3070 static int e1000_maybe_stop_tx(struct net_device *netdev,
3071                                struct e1000_tx_ring *tx_ring, int size)
3072 {
3073         if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3074                 return 0;
3075         return __e1000_maybe_stop_tx(netdev, size);
3076 }
3077
3078 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
3079 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3080                                     struct net_device *netdev)
3081 {
3082         struct e1000_adapter *adapter = netdev_priv(netdev);
3083         struct e1000_hw *hw = &adapter->hw;
3084         struct e1000_tx_ring *tx_ring;
3085         unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3086         unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3087         unsigned int tx_flags = 0;
3088         unsigned int len = skb_headlen(skb);
3089         unsigned int nr_frags;
3090         unsigned int mss;
3091         int count = 0;
3092         int tso;
3093         unsigned int f;
3094
3095         /* This goes back to the question of how to logically map a Tx queue
3096          * to a flow.  Right now, performance is impacted slightly negatively
3097          * if using multiple Tx queues.  If the stack breaks away from a
3098          * single qdisc implementation, we can look at this again.
3099          */
3100         tx_ring = adapter->tx_ring;
3101
3102         if (unlikely(skb->len <= 0)) {
3103                 dev_kfree_skb_any(skb);
3104                 return NETDEV_TX_OK;
3105         }
3106
3107         /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3108          * packets may get corrupted during padding by HW.
3109          * To WA this issue, pad all small packets manually.
3110          */
3111         if (skb->len < ETH_ZLEN) {
3112                 if (skb_pad(skb, ETH_ZLEN - skb->len))
3113                         return NETDEV_TX_OK;
3114                 skb->len = ETH_ZLEN;
3115                 skb_set_tail_pointer(skb, ETH_ZLEN);
3116         }
3117
3118         mss = skb_shinfo(skb)->gso_size;
3119         /* The controller does a simple calculation to
3120          * make sure there is enough room in the FIFO before
3121          * initiating the DMA for each buffer.  The calc is:
3122          * 4 = ceil(buffer len/mss).  To make sure we don't
3123          * overrun the FIFO, adjust the max buffer len if mss
3124          * drops.
3125          */
3126         if (mss) {
3127                 u8 hdr_len;
3128                 max_per_txd = min(mss << 2, max_per_txd);
3129                 max_txd_pwr = fls(max_per_txd) - 1;
3130
3131                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3132                 if (skb->data_len && hdr_len == len) {
3133                         switch (hw->mac_type) {
3134                                 unsigned int pull_size;
3135                         case e1000_82544:
3136                                 /* Make sure we have room to chop off 4 bytes,
3137                                  * and that the end alignment will work out to
3138                                  * this hardware's requirements
3139                                  * NOTE: this is a TSO only workaround
3140                                  * if end byte alignment not correct move us
3141                                  * into the next dword
3142                                  */
3143                                 if ((unsigned long)(skb_tail_pointer(skb) - 1)
3144                                     & 4)
3145                                         break;
3146                                 /* fall through */
3147                                 pull_size = min((unsigned int)4, skb->data_len);
3148                                 if (!__pskb_pull_tail(skb, pull_size)) {
3149                                         e_err(drv, "__pskb_pull_tail "
3150                                               "failed.\n");
3151                                         dev_kfree_skb_any(skb);
3152                                         return NETDEV_TX_OK;
3153                                 }
3154                                 len = skb_headlen(skb);
3155                                 break;
3156                         default:
3157                                 /* do nothing */
3158                                 break;
3159                         }
3160                 }
3161         }
3162
3163         /* reserve a descriptor for the offload context */
3164         if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3165                 count++;
3166         count++;
3167
3168         /* Controller Erratum workaround */
3169         if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3170                 count++;
3171
3172         count += TXD_USE_COUNT(len, max_txd_pwr);
3173
3174         if (adapter->pcix_82544)
3175                 count++;
3176
3177         /* work-around for errata 10 and it applies to all controllers
3178          * in PCI-X mode, so add one more descriptor to the count
3179          */
3180         if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3181                         (len > 2015)))
3182                 count++;
3183
3184         nr_frags = skb_shinfo(skb)->nr_frags;
3185         for (f = 0; f < nr_frags; f++)
3186                 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3187                                        max_txd_pwr);
3188         if (adapter->pcix_82544)
3189                 count += nr_frags;
3190
3191         /* need: count + 2 desc gap to keep tail from touching
3192          * head, otherwise try next time
3193          */
3194         if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3195                 return NETDEV_TX_BUSY;
3196
3197         if (unlikely((hw->mac_type == e1000_82547) &&
3198                      (e1000_82547_fifo_workaround(adapter, skb)))) {
3199                 netif_stop_queue(netdev);
3200                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3201                         schedule_delayed_work(&adapter->fifo_stall_task, 1);
3202                 return NETDEV_TX_BUSY;
3203         }
3204
3205         if (vlan_tx_tag_present(skb)) {
3206                 tx_flags |= E1000_TX_FLAGS_VLAN;
3207                 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
3208         }
3209
3210         first = tx_ring->next_to_use;
3211
3212         tso = e1000_tso(adapter, tx_ring, skb);
3213         if (tso < 0) {
3214                 dev_kfree_skb_any(skb);
3215                 return NETDEV_TX_OK;
3216         }
3217
3218         if (likely(tso)) {
3219                 if (likely(hw->mac_type != e1000_82544))
3220                         tx_ring->last_tx_tso = true;
3221                 tx_flags |= E1000_TX_FLAGS_TSO;
3222         } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
3223                 tx_flags |= E1000_TX_FLAGS_CSUM;
3224
3225         if (likely(skb->protocol == htons(ETH_P_IP)))
3226                 tx_flags |= E1000_TX_FLAGS_IPV4;
3227
3228         if (unlikely(skb->no_fcs))
3229                 tx_flags |= E1000_TX_FLAGS_NO_FCS;
3230
3231         count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3232                              nr_frags, mss);
3233
3234         if (count) {
3235                 netdev_sent_queue(netdev, skb->len);
3236                 skb_tx_timestamp(skb);
3237
3238                 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3239                 /* Make sure there is space in the ring for the next send. */
3240                 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
3241
3242         } else {
3243                 dev_kfree_skb_any(skb);
3244                 tx_ring->buffer_info[first].time_stamp = 0;
3245                 tx_ring->next_to_use = first;
3246         }
3247
3248         return NETDEV_TX_OK;
3249 }
3250
3251 #define NUM_REGS 38 /* 1 based count */
3252 static void e1000_regdump(struct e1000_adapter *adapter)
3253 {
3254         struct e1000_hw *hw = &adapter->hw;
3255         u32 regs[NUM_REGS];
3256         u32 *regs_buff = regs;
3257         int i = 0;
3258
3259         static const char * const reg_name[] = {
3260                 "CTRL",  "STATUS",
3261                 "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3262                 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3263                 "TIDV", "TXDCTL", "TADV", "TARC0",
3264                 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3265                 "TXDCTL1", "TARC1",
3266                 "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3267                 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3268                 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3269         };
3270
3271         regs_buff[0]  = er32(CTRL);
3272         regs_buff[1]  = er32(STATUS);
3273
3274         regs_buff[2]  = er32(RCTL);
3275         regs_buff[3]  = er32(RDLEN);
3276         regs_buff[4]  = er32(RDH);
3277         regs_buff[5]  = er32(RDT);
3278         regs_buff[6]  = er32(RDTR);
3279
3280         regs_buff[7]  = er32(TCTL);
3281         regs_buff[8]  = er32(TDBAL);
3282         regs_buff[9]  = er32(TDBAH);
3283         regs_buff[10] = er32(TDLEN);
3284         regs_buff[11] = er32(TDH);
3285         regs_buff[12] = er32(TDT);
3286         regs_buff[13] = er32(TIDV);
3287         regs_buff[14] = er32(TXDCTL);
3288         regs_buff[15] = er32(TADV);
3289         regs_buff[16] = er32(TARC0);
3290
3291         regs_buff[17] = er32(TDBAL1);
3292         regs_buff[18] = er32(TDBAH1);
3293         regs_buff[19] = er32(TDLEN1);
3294         regs_buff[20] = er32(TDH1);
3295         regs_buff[21] = er32(TDT1);
3296         regs_buff[22] = er32(TXDCTL1);
3297         regs_buff[23] = er32(TARC1);
3298         regs_buff[24] = er32(CTRL_EXT);
3299         regs_buff[25] = er32(ERT);
3300         regs_buff[26] = er32(RDBAL0);
3301         regs_buff[27] = er32(RDBAH0);
3302         regs_buff[28] = er32(TDFH);
3303         regs_buff[29] = er32(TDFT);
3304         regs_buff[30] = er32(TDFHS);
3305         regs_buff[31] = er32(TDFTS);
3306         regs_buff[32] = er32(TDFPC);
3307         regs_buff[33] = er32(RDFH);
3308         regs_buff[34] = er32(RDFT);
3309         regs_buff[35] = er32(RDFHS);
3310         regs_buff[36] = er32(RDFTS);
3311         regs_buff[37] = er32(RDFPC);
3312
3313         pr_info("Register dump\n");
3314         for (i = 0; i < NUM_REGS; i++)
3315                 pr_info("%-15s  %08x\n", reg_name[i], regs_buff[i]);
3316 }
3317
3318 /*
3319  * e1000_dump: Print registers, tx ring and rx ring
3320  */
3321 static void e1000_dump(struct e1000_adapter *adapter)
3322 {
3323         /* this code doesn't handle multiple rings */
3324         struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3325         struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3326         int i;
3327
3328         if (!netif_msg_hw(adapter))
3329                 return;
3330
3331         /* Print Registers */
3332         e1000_regdump(adapter);
3333
3334         /* transmit dump */
3335         pr_info("TX Desc ring0 dump\n");
3336
3337         /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3338          *
3339          * Legacy Transmit Descriptor
3340          *   +--------------------------------------------------------------+
3341          * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
3342          *   +--------------------------------------------------------------+
3343          * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
3344          *   +--------------------------------------------------------------+
3345          *   63       48 47        36 35    32 31     24 23    16 15        0
3346          *
3347          * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3348          *   63      48 47    40 39       32 31             16 15    8 7      0
3349          *   +----------------------------------------------------------------+
3350          * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
3351          *   +----------------------------------------------------------------+
3352          * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
3353          *   +----------------------------------------------------------------+
3354          *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
3355          *
3356          * Extended Data Descriptor (DTYP=0x1)
3357          *   +----------------------------------------------------------------+
3358          * 0 |                     Buffer Address [63:0]                      |
3359          *   +----------------------------------------------------------------+
3360          * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
3361          *   +----------------------------------------------------------------+
3362          *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
3363          */
3364         pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3365         pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3366
3367         if (!netif_msg_tx_done(adapter))
3368                 goto rx_ring_summary;
3369
3370         for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3371                 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3372                 struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
3373                 struct my_u { __le64 a; __le64 b; };
3374                 struct my_u *u = (struct my_u *)tx_desc;
3375                 const char *type;
3376
3377                 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3378                         type = "NTC/U";
3379                 else if (i == tx_ring->next_to_use)
3380                         type = "NTU";
3381                 else if (i == tx_ring->next_to_clean)
3382                         type = "NTC";
3383                 else
3384                         type = "";
3385
3386                 pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p %s\n",
3387                         ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3388                         le64_to_cpu(u->a), le64_to_cpu(u->b),
3389                         (u64)buffer_info->dma, buffer_info->length,
3390                         buffer_info->next_to_watch,
3391                         (u64)buffer_info->time_stamp, buffer_info->skb, type);
3392         }
3393
3394 rx_ring_summary:
3395         /* receive dump */
3396         pr_info("\nRX Desc ring dump\n");
3397
3398         /* Legacy Receive Descriptor Format
3399          *
3400          * +-----------------------------------------------------+
3401          * |                Buffer Address [63:0]                |
3402          * +-----------------------------------------------------+
3403          * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3404          * +-----------------------------------------------------+
3405          * 63       48 47    40 39      32 31         16 15      0
3406          */
3407         pr_info("R[desc]      [address 63:0  ] [vl er S cks ln] [bi->dma       ] [bi->skb]\n");
3408
3409         if (!netif_msg_rx_status(adapter))
3410                 goto exit;
3411
3412         for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3413                 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3414                 struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
3415                 struct my_u { __le64 a; __le64 b; };
3416                 struct my_u *u = (struct my_u *)rx_desc;
3417                 const char *type;
3418
3419                 if (i == rx_ring->next_to_use)
3420                         type = "NTU";
3421                 else if (i == rx_ring->next_to_clean)
3422                         type = "NTC";
3423                 else
3424                         type = "";
3425
3426                 pr_info("R[0x%03X]     %016llX %016llX %016llX %p %s\n",
3427                         i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3428                         (u64)buffer_info->dma, buffer_info->skb, type);
3429         } /* for */
3430
3431         /* dump the descriptor caches */
3432         /* rx */
3433         pr_info("Rx descriptor cache in 64bit format\n");
3434         for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3435                 pr_info("R%04X: %08X|%08X %08X|%08X\n",
3436                         i,
3437                         readl(adapter->hw.hw_addr + i+4),
3438                         readl(adapter->hw.hw_addr + i),
3439                         readl(adapter->hw.hw_addr + i+12),
3440                         readl(adapter->hw.hw_addr + i+8));
3441         }
3442         /* tx */
3443         pr_info("Tx descriptor cache in 64bit format\n");
3444         for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3445                 pr_info("T%04X: %08X|%08X %08X|%08X\n",
3446                         i,
3447                         readl(adapter->hw.hw_addr + i+4),
3448                         readl(adapter->hw.hw_addr + i),
3449                         readl(adapter->hw.hw_addr + i+12),
3450                         readl(adapter->hw.hw_addr + i+8));
3451         }
3452 exit:
3453         return;
3454 }
3455
3456 /**
3457  * e1000_tx_timeout - Respond to a Tx Hang
3458  * @netdev: network interface device structure
3459  **/
3460 static void e1000_tx_timeout(struct net_device *netdev)
3461 {
3462         struct e1000_adapter *adapter = netdev_priv(netdev);
3463
3464         /* Do the reset outside of interrupt context */
3465         adapter->tx_timeout_count++;
3466         schedule_work(&adapter->reset_task);
3467 }
3468
3469 static void e1000_reset_task(struct work_struct *work)
3470 {
3471         struct e1000_adapter *adapter =
3472                 container_of(work, struct e1000_adapter, reset_task);
3473
3474         e_err(drv, "Reset adapter\n");
3475         e1000_reinit_locked(adapter);
3476 }
3477
3478 /**
3479  * e1000_get_stats - Get System Network Statistics
3480  * @netdev: network interface device structure
3481  *
3482  * Returns the address of the device statistics structure.
3483  * The statistics are actually updated from the watchdog.
3484  **/
3485 static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
3486 {
3487         /* only return the current stats */
3488         return &netdev->stats;
3489 }
3490
3491 /**
3492  * e1000_change_mtu - Change the Maximum Transfer Unit
3493  * @netdev: network interface device structure
3494  * @new_mtu: new value for maximum frame size
3495  *
3496  * Returns 0 on success, negative on failure
3497  **/
3498 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3499 {
3500         struct e1000_adapter *adapter = netdev_priv(netdev);
3501         struct e1000_hw *hw = &adapter->hw;
3502         int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3503
3504         if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3505             (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3506                 e_err(probe, "Invalid MTU setting\n");
3507                 return -EINVAL;
3508         }
3509
3510         /* Adapter-specific max frame size limits. */
3511         switch (hw->mac_type) {
3512         case e1000_undefined ... e1000_82542_rev2_1:
3513                 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3514                         e_err(probe, "Jumbo Frames not supported.\n");
3515                         return -EINVAL;
3516                 }
3517                 break;
3518         default:
3519                 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3520                 break;
3521         }
3522
3523         while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3524                 msleep(1);
3525         /* e1000_down has a dependency on max_frame_size */
3526         hw->max_frame_size = max_frame;
3527         if (netif_running(netdev))
3528                 e1000_down(adapter);
3529
3530         /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3531          * means we reserve 2 more, this pushes us to allocate from the next
3532          * larger slab size.
3533          * i.e. RXBUFFER_2048 --> size-4096 slab
3534          * however with the new *_jumbo_rx* routines, jumbo receives will use
3535          * fragmented skbs
3536          */
3537
3538         if (max_frame <= E1000_RXBUFFER_2048)
3539                 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3540         else
3541 #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3542                 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3543 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3544                 adapter->rx_buffer_len = PAGE_SIZE;
3545 #endif
3546
3547         /* adjust allocation if LPE protects us, and we aren't using SBP */
3548         if (!hw->tbi_compatibility_on &&
3549             ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3550              (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3551                 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3552
3553         pr_info("%s changing MTU from %d to %d\n",
3554                 netdev->name, netdev->mtu, new_mtu);
3555         netdev->mtu = new_mtu;
3556
3557         if (netif_running(netdev))
3558                 e1000_up(adapter);
3559         else
3560                 e1000_reset(adapter);
3561
3562         clear_bit(__E1000_RESETTING, &adapter->flags);
3563
3564         return 0;
3565 }
3566
3567 /**
3568  * e1000_update_stats - Update the board statistics counters
3569  * @adapter: board private structure
3570  **/
3571 void e1000_update_stats(struct e1000_adapter *adapter)
3572 {
3573         struct net_device *netdev = adapter->netdev;
3574         struct e1000_hw *hw = &adapter->hw;
3575         struct pci_dev *pdev = adapter->pdev;
3576         unsigned long flags;
3577         u16 phy_tmp;
3578
3579 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3580
3581         /* Prevent stats update while adapter is being reset, or if the pci
3582          * connection is down.
3583          */
3584         if (adapter->link_speed == 0)
3585                 return;
3586         if (pci_channel_offline(pdev))
3587                 return;
3588
3589         spin_lock_irqsave(&adapter->stats_lock, flags);
3590
3591         /* these counters are modified from e1000_tbi_adjust_stats,
3592          * called from the interrupt context, so they must only
3593          * be written while holding adapter->stats_lock
3594          */
3595
3596         adapter->stats.crcerrs += er32(CRCERRS);
3597         adapter->stats.gprc += er32(GPRC);
3598         adapter->stats.gorcl += er32(GORCL);
3599         adapter->stats.gorch += er32(GORCH);
3600         adapter->stats.bprc += er32(BPRC);
3601         adapter->stats.mprc += er32(MPRC);
3602         adapter->stats.roc += er32(ROC);
3603
3604         adapter->stats.prc64 += er32(PRC64);
3605         adapter->stats.prc127 += er32(PRC127);
3606         adapter->stats.prc255 += er32(PRC255);
3607         adapter->stats.prc511 += er32(PRC511);
3608         adapter->stats.prc1023 += er32(PRC1023);
3609         adapter->stats.prc1522 += er32(PRC1522);
3610
3611         adapter->stats.symerrs += er32(SYMERRS);
3612         adapter->stats.mpc += er32(MPC);
3613         adapter->stats.scc += er32(SCC);
3614         adapter->stats.ecol += er32(ECOL);
3615         adapter->stats.mcc += er32(MCC);
3616         adapter->stats.latecol += er32(LATECOL);
3617         adapter->stats.dc += er32(DC);
3618         adapter->stats.sec += er32(SEC);
3619         adapter->stats.rlec += er32(RLEC);
3620         adapter->stats.xonrxc += er32(XONRXC);
3621         adapter->stats.xontxc += er32(XONTXC);
3622         adapter->stats.xoffrxc += er32(XOFFRXC);
3623         adapter->stats.xofftxc += er32(XOFFTXC);
3624         adapter->stats.fcruc += er32(FCRUC);
3625         adapter->stats.gptc += er32(GPTC);
3626         adapter->stats.gotcl += er32(GOTCL);
3627         adapter->stats.gotch += er32(GOTCH);
3628         adapter->stats.rnbc += er32(RNBC);
3629         adapter->stats.ruc += er32(RUC);
3630         adapter->stats.rfc += er32(RFC);
3631         adapter->stats.rjc += er32(RJC);
3632         adapter->stats.torl += er32(TORL);
3633         adapter->stats.torh += er32(TORH);
3634         adapter->stats.totl += er32(TOTL);
3635         adapter->stats.toth += er32(TOTH);
3636         adapter->stats.tpr += er32(TPR);
3637
3638         adapter->stats.ptc64 += er32(PTC64);
3639         adapter->stats.ptc127 += er32(PTC127);
3640         adapter->stats.ptc255 += er32(PTC255);
3641         adapter->stats.ptc511 += er32(PTC511);
3642         adapter->stats.ptc1023 += er32(PTC1023);
3643         adapter->stats.ptc1522 += er32(PTC1522);
3644
3645         adapter->stats.mptc += er32(MPTC);
3646         adapter->stats.bptc += er32(BPTC);
3647
3648         /* used for adaptive IFS */
3649
3650         hw->tx_packet_delta = er32(TPT);
3651         adapter->stats.tpt += hw->tx_packet_delta;
3652         hw->collision_delta = er32(COLC);
3653         adapter->stats.colc += hw->collision_delta;
3654
3655         if (hw->mac_type >= e1000_82543) {
3656                 adapter->stats.algnerrc += er32(ALGNERRC);
3657                 adapter->stats.rxerrc += er32(RXERRC);
3658                 adapter->stats.tncrs += er32(TNCRS);
3659                 adapter->stats.cexterr += er32(CEXTERR);
3660                 adapter->stats.tsctc += er32(TSCTC);
3661                 adapter->stats.tsctfc += er32(TSCTFC);
3662         }
3663
3664         /* Fill out the OS statistics structure */
3665         netdev->stats.multicast = adapter->stats.mprc;
3666         netdev->stats.collisions = adapter->stats.colc;
3667
3668         /* Rx Errors */
3669
3670         /* RLEC on some newer hardware can be incorrect so build
3671          * our own version based on RUC and ROC
3672          */
3673         netdev->stats.rx_errors = adapter->stats.rxerrc +
3674                 adapter->stats.crcerrs + adapter->stats.algnerrc +
3675                 adapter->stats.ruc + adapter->stats.roc +
3676                 adapter->stats.cexterr;
3677         adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3678         netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3679         netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3680         netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3681         netdev->stats.rx_missed_errors = adapter->stats.mpc;
3682
3683         /* Tx Errors */
3684         adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3685         netdev->stats.tx_errors = adapter->stats.txerrc;
3686         netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3687         netdev->stats.tx_window_errors = adapter->stats.latecol;
3688         netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3689         if (hw->bad_tx_carr_stats_fd &&
3690             adapter->link_duplex == FULL_DUPLEX) {
3691                 netdev->stats.tx_carrier_errors = 0;
3692                 adapter->stats.tncrs = 0;
3693         }
3694
3695         /* Tx Dropped needs to be maintained elsewhere */
3696
3697         /* Phy Stats */
3698         if (hw->media_type == e1000_media_type_copper) {
3699                 if ((adapter->link_speed == SPEED_1000) &&
3700                    (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3701                         phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3702                         adapter->phy_stats.idle_errors += phy_tmp;
3703                 }
3704
3705                 if ((hw->mac_type <= e1000_82546) &&
3706                    (hw->phy_type == e1000_phy_m88) &&
3707                    !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3708                         adapter->phy_stats.receive_errors += phy_tmp;
3709         }
3710
3711         /* Management Stats */
3712         if (hw->has_smbus) {
3713                 adapter->stats.mgptc += er32(MGTPTC);
3714                 adapter->stats.mgprc += er32(MGTPRC);
3715                 adapter->stats.mgpdc += er32(MGTPDC);
3716         }
3717
3718         spin_unlock_irqrestore(&adapter->stats_lock, flags);
3719 }
3720
3721 /**
3722  * e1000_intr - Interrupt Handler
3723  * @irq: interrupt number
3724  * @data: pointer to a network interface device structure
3725  **/
3726 static irqreturn_t e1000_intr(int irq, void *data)
3727 {
3728         struct net_device *netdev = data;
3729         struct e1000_adapter *adapter = netdev_priv(netdev);
3730         struct e1000_hw *hw = &adapter->hw;
3731         u32 icr = er32(ICR);
3732
3733         if (unlikely((!icr)))
3734                 return IRQ_NONE;  /* Not our interrupt */
3735
3736         /* we might have caused the interrupt, but the above
3737          * read cleared it, and just in case the driver is
3738          * down there is nothing to do so return handled
3739          */
3740         if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3741                 return IRQ_HANDLED;
3742
3743         if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3744                 hw->get_link_status = 1;
3745                 /* guard against interrupt when we're going down */
3746                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3747                         schedule_delayed_work(&adapter->watchdog_task, 1);
3748         }
3749
3750         /* disable interrupts, without the synchronize_irq bit */
3751         ew32(IMC, ~0);
3752         E1000_WRITE_FLUSH();
3753
3754         if (likely(napi_schedule_prep(&adapter->napi))) {
3755                 adapter->total_tx_bytes = 0;
3756                 adapter->total_tx_packets = 0;
3757                 adapter->total_rx_bytes = 0;
3758                 adapter->total_rx_packets = 0;
3759                 __napi_schedule(&adapter->napi);
3760         } else {
3761                 /* this really should not happen! if it does it is basically a
3762                  * bug, but not a hard error, so enable ints and continue
3763                  */
3764                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3765                         e1000_irq_enable(adapter);
3766         }
3767
3768         return IRQ_HANDLED;
3769 }
3770
3771 /**
3772  * e1000_clean - NAPI Rx polling callback
3773  * @adapter: board private structure
3774  **/
3775 static int e1000_clean(struct napi_struct *napi, int budget)
3776 {
3777         struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
3778                                                      napi);
3779         int tx_clean_complete = 0, work_done = 0;
3780
3781         tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3782
3783         adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3784
3785         if (!tx_clean_complete)
3786                 work_done = budget;
3787
3788         /* If budget not fully consumed, exit the polling mode */
3789         if (work_done < budget) {
3790                 if (likely(adapter->itr_setting & 3))
3791                         e1000_set_itr(adapter);
3792                 napi_complete(napi);
3793                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3794                         e1000_irq_enable(adapter);
3795         }
3796
3797         return work_done;
3798 }
3799
3800 /**
3801  * e1000_clean_tx_irq - Reclaim resources after transmit completes
3802  * @adapter: board private structure
3803  **/
3804 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3805                                struct e1000_tx_ring *tx_ring)
3806 {
3807         struct e1000_hw *hw = &adapter->hw;
3808         struct net_device *netdev = adapter->netdev;
3809         struct e1000_tx_desc *tx_desc, *eop_desc;
3810         struct e1000_buffer *buffer_info;
3811         unsigned int i, eop;
3812         unsigned int count = 0;
3813         unsigned int total_tx_bytes=0, total_tx_packets=0;
3814         unsigned int bytes_compl = 0, pkts_compl = 0;
3815
3816         i = tx_ring->next_to_clean;
3817         eop = tx_ring->buffer_info[i].next_to_watch;
3818         eop_desc = E1000_TX_DESC(*tx_ring, eop);
3819
3820         while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3821                (count < tx_ring->count)) {
3822                 bool cleaned = false;
3823                 rmb();  /* read buffer_info after eop_desc */
3824                 for ( ; !cleaned; count++) {
3825                         tx_desc = E1000_TX_DESC(*tx_ring, i);
3826                         buffer_info = &tx_ring->buffer_info[i];
3827                         cleaned = (i == eop);
3828
3829                         if (cleaned) {
3830                                 total_tx_packets += buffer_info->segs;
3831                                 total_tx_bytes += buffer_info->bytecount;
3832                                 if (buffer_info->skb) {
3833                                         bytes_compl += buffer_info->skb->len;
3834                                         pkts_compl++;
3835                                 }
3836
3837                         }
3838                         e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3839                         tx_desc->upper.data = 0;
3840
3841                         if (unlikely(++i == tx_ring->count)) i = 0;
3842                 }
3843
3844                 eop = tx_ring->buffer_info[i].next_to_watch;
3845                 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3846         }
3847
3848         tx_ring->next_to_clean = i;
3849
3850         netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3851
3852 #define TX_WAKE_THRESHOLD 32
3853         if (unlikely(count && netif_carrier_ok(netdev) &&
3854                      E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3855                 /* Make sure that anybody stopping the queue after this
3856                  * sees the new next_to_clean.
3857                  */
3858                 smp_mb();
3859
3860                 if (netif_queue_stopped(netdev) &&
3861                     !(test_bit(__E1000_DOWN, &adapter->flags))) {
3862                         netif_wake_queue(netdev);
3863                         ++adapter->restart_queue;
3864                 }
3865         }
3866
3867         if (adapter->detect_tx_hung) {
3868                 /* Detect a transmit hang in hardware, this serializes the
3869                  * check with the clearing of time_stamp and movement of i
3870                  */
3871                 adapter->detect_tx_hung = false;
3872                 if (tx_ring->buffer_info[eop].time_stamp &&
3873                     time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3874                                (adapter->tx_timeout_factor * HZ)) &&
3875                     !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3876
3877                         /* detected Tx unit hang */
3878                         e_err(drv, "Detected Tx Unit Hang\n"
3879                               "  Tx Queue             <%lu>\n"
3880                               "  TDH                  <%x>\n"
3881                               "  TDT                  <%x>\n"
3882                               "  next_to_use          <%x>\n"
3883                               "  next_to_clean        <%x>\n"
3884                               "buffer_info[next_to_clean]\n"
3885                               "  time_stamp           <%lx>\n"
3886                               "  next_to_watch        <%x>\n"
3887                               "  jiffies              <%lx>\n"
3888                               "  next_to_watch.status <%x>\n",
3889                                 (unsigned long)(tx_ring - adapter->tx_ring),
3890                                 readl(hw->hw_addr + tx_ring->tdh),
3891                                 readl(hw->hw_addr + tx_ring->tdt),
3892                                 tx_ring->next_to_use,
3893                                 tx_ring->next_to_clean,
3894                                 tx_ring->buffer_info[eop].time_stamp,
3895                                 eop,
3896                                 jiffies,
3897                                 eop_desc->upper.fields.status);
3898                         e1000_dump(adapter);
3899                         netif_stop_queue(netdev);
3900                 }
3901         }
3902         adapter->total_tx_bytes += total_tx_bytes;
3903         adapter->total_tx_packets += total_tx_packets;
3904         netdev->stats.tx_bytes += total_tx_bytes;
3905         netdev->stats.tx_packets += total_tx_packets;
3906         return count < tx_ring->count;
3907 }
3908
3909 /**
3910  * e1000_rx_checksum - Receive Checksum Offload for 82543
3911  * @adapter:     board private structure
3912  * @status_err:  receive descriptor status and error fields
3913  * @csum:        receive descriptor csum field
3914  * @sk_buff:     socket buffer with received data
3915  **/
3916 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3917                               u32 csum, struct sk_buff *skb)
3918 {
3919         struct e1000_hw *hw = &adapter->hw;
3920         u16 status = (u16)status_err;
3921         u8 errors = (u8)(status_err >> 24);
3922
3923         skb_checksum_none_assert(skb);
3924
3925         /* 82543 or newer only */
3926         if (unlikely(hw->mac_type < e1000_82543)) return;
3927         /* Ignore Checksum bit is set */
3928         if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
3929         /* TCP/UDP checksum error bit is set */
3930         if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3931                 /* let the stack verify checksum errors */
3932                 adapter->hw_csum_err++;
3933                 return;
3934         }
3935         /* TCP/UDP Checksum has not been calculated */
3936         if (!(status & E1000_RXD_STAT_TCPCS))
3937                 return;
3938
3939         /* It must be a TCP or UDP packet with a valid checksum */
3940         if (likely(status & E1000_RXD_STAT_TCPCS)) {
3941                 /* TCP checksum is good */
3942                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3943         }
3944         adapter->hw_csum_good++;
3945 }
3946
3947 /**
3948  * e1000_consume_page - helper function
3949  **/
3950 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
3951                                u16 length)
3952 {
3953         bi->page = NULL;
3954         skb->len += length;
3955         skb->data_len += length;
3956         skb->truesize += PAGE_SIZE;
3957 }
3958
3959 /**
3960  * e1000_receive_skb - helper function to handle rx indications
3961  * @adapter: board private structure
3962  * @status: descriptor status field as written by hardware
3963  * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3964  * @skb: pointer to sk_buff to be indicated to stack
3965  */
3966 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
3967                               __le16 vlan, struct sk_buff *skb)
3968 {
3969         skb->protocol = eth_type_trans(skb, adapter->netdev);
3970
3971         if (status & E1000_RXD_STAT_VP) {
3972                 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
3973
3974                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
3975         }
3976         napi_gro_receive(&adapter->napi, skb);
3977 }
3978
3979 /**
3980  * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
3981  * @adapter: board private structure
3982  * @rx_ring: ring to clean
3983  * @work_done: amount of napi work completed this call
3984  * @work_to_do: max amount of work allowed for this call to do
3985  *
3986  * the return value indicates whether actual cleaning was done, there
3987  * is no guarantee that everything was cleaned
3988  */
3989 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
3990                                      struct e1000_rx_ring *rx_ring,
3991                                      int *work_done, int work_to_do)
3992 {
3993         struct e1000_hw *hw = &adapter->hw;
3994         struct net_device *netdev = adapter->netdev;
3995         struct pci_dev *pdev = adapter->pdev;
3996         struct e1000_rx_desc *rx_desc, *next_rxd;
3997         struct e1000_buffer *buffer_info, *next_buffer;
3998         unsigned long irq_flags;
3999         u32 length;
4000         unsigned int i;
4001         int cleaned_count = 0;
4002         bool cleaned = false;
4003         unsigned int total_rx_bytes=0, total_rx_packets=0;
4004
4005         i = rx_ring->next_to_clean;
4006         rx_desc = E1000_RX_DESC(*rx_ring, i);
4007         buffer_info = &rx_ring->buffer_info[i];
4008
4009         while (rx_desc->status & E1000_RXD_STAT_DD) {
4010                 struct sk_buff *skb;
4011                 u8 status;
4012
4013                 if (*work_done >= work_to_do)
4014                         break;
4015                 (*work_done)++;
4016                 rmb(); /* read descriptor and rx_buffer_info after status DD */
4017
4018                 status = rx_desc->status;
4019                 skb = buffer_info->skb;
4020                 buffer_info->skb = NULL;
4021
4022                 if (++i == rx_ring->count) i = 0;
4023                 next_rxd = E1000_RX_DESC(*rx_ring, i);
4024                 prefetch(next_rxd);
4025
4026                 next_buffer = &rx_ring->buffer_info[i];
4027
4028                 cleaned = true;
4029                 cleaned_count++;
4030                 dma_unmap_page(&pdev->dev, buffer_info->dma,
4031                                buffer_info->length, DMA_FROM_DEVICE);
4032                 buffer_info->dma = 0;
4033
4034                 length = le16_to_cpu(rx_desc->length);
4035
4036                 /* errors is only valid for DD + EOP descriptors */
4037                 if (unlikely((status & E1000_RXD_STAT_EOP) &&
4038                     (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4039                         u8 *mapped;
4040                         u8 last_byte;
4041
4042                         mapped = page_address(buffer_info->page);
4043                         last_byte = *(mapped + length - 1);
4044                         if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4045                                        last_byte)) {
4046                                 spin_lock_irqsave(&adapter->stats_lock,
4047                                                   irq_flags);
4048                                 e1000_tbi_adjust_stats(hw, &adapter->stats,
4049                                                        length, mapped);
4050                                 spin_unlock_irqrestore(&adapter->stats_lock,
4051                                                        irq_flags);
4052                                 length--;
4053                         } else {
4054                                 if (netdev->features & NETIF_F_RXALL)
4055                                         goto process_skb;
4056                                 /* recycle both page and skb */
4057                                 buffer_info->skb = skb;
4058                                 /* an error means any chain goes out the window
4059                                  * too
4060                                  */
4061                                 if (rx_ring->rx_skb_top)
4062                                         dev_kfree_skb(rx_ring->rx_skb_top);
4063                                 rx_ring->rx_skb_top = NULL;
4064                                 goto next_desc;
4065                         }
4066                 }
4067
4068 #define rxtop rx_ring->rx_skb_top
4069 process_skb:
4070                 if (!(status & E1000_RXD_STAT_EOP)) {
4071                         /* this descriptor is only the beginning (or middle) */
4072                         if (!rxtop) {
4073                                 /* this is the beginning of a chain */
4074                                 rxtop = skb;
4075                                 skb_fill_page_desc(rxtop, 0, buffer_info->page,
4076                                                    0, length);
4077                         } else {
4078                                 /* this is the middle of a chain */
4079                                 skb_fill_page_desc(rxtop,
4080                                     skb_shinfo(rxtop)->nr_frags,
4081                                     buffer_info->page, 0, length);
4082                                 /* re-use the skb, only consumed the page */
4083                                 buffer_info->skb = skb;
4084                         }
4085                         e1000_consume_page(buffer_info, rxtop, length);
4086                         goto next_desc;
4087                 } else {
4088                         if (rxtop) {
4089                                 /* end of the chain */
4090                                 skb_fill_page_desc(rxtop,
4091                                     skb_shinfo(rxtop)->nr_frags,
4092                                     buffer_info->page, 0, length);
4093                                 /* re-use the current skb, we only consumed the
4094                                  * page
4095                                  */
4096                                 buffer_info->skb = skb;
4097                                 skb = rxtop;
4098                                 rxtop = NULL;
4099                                 e1000_consume_page(buffer_info, skb, length);
4100                         } else {
4101                                 /* no chain, got EOP, this buf is the packet
4102                                  * copybreak to save the put_page/alloc_page
4103                                  */
4104                                 if (length <= copybreak &&
4105                                     skb_tailroom(skb) >= length) {
4106                                         u8 *vaddr;
4107                                         vaddr = kmap_atomic(buffer_info->page);
4108                                         memcpy(skb_tail_pointer(skb), vaddr,
4109                                                length);
4110                                         kunmap_atomic(vaddr);
4111                                         /* re-use the page, so don't erase
4112                                          * buffer_info->page
4113                                          */
4114                                         skb_put(skb, length);
4115                                 } else {
4116                                         skb_fill_page_desc(skb, 0,
4117                                                            buffer_info->page, 0,
4118                                                            length);
4119                                         e1000_consume_page(buffer_info, skb,
4120                                                            length);
4121                                 }
4122                         }
4123                 }
4124
4125                 /* Receive Checksum Offload XXX recompute due to CRC strip? */
4126                 e1000_rx_checksum(adapter,
4127                                   (u32)(status) |
4128                                   ((u32)(rx_desc->errors) << 24),
4129                                   le16_to_cpu(rx_desc->csum), skb);
4130
4131                 total_rx_bytes += (skb->len - 4); /* don't count FCS */
4132                 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4133                         pskb_trim(skb, skb->len - 4);
4134                 total_rx_packets++;
4135
4136                 /* eth type trans needs skb->data to point to something */
4137                 if (!pskb_may_pull(skb, ETH_HLEN)) {
4138                         e_err(drv, "pskb_may_pull failed.\n");
4139                         dev_kfree_skb(skb);
4140                         goto next_desc;
4141                 }
4142
4143                 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4144
4145 next_desc:
4146                 rx_desc->status = 0;
4147
4148                 /* return some buffers to hardware, one at a time is too slow */
4149                 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4150                         adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4151                         cleaned_count = 0;
4152                 }
4153
4154                 /* use prefetched values */
4155                 rx_desc = next_rxd;
4156                 buffer_info = next_buffer;
4157         }
4158         rx_ring->next_to_clean = i;
4159
4160         cleaned_count = E1000_DESC_UNUSED(rx_ring);
4161         if (cleaned_count)
4162                 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4163
4164         adapter->total_rx_packets += total_rx_packets;
4165         adapter->total_rx_bytes += total_rx_bytes;
4166         netdev->stats.rx_bytes += total_rx_bytes;
4167         netdev->stats.rx_packets += total_rx_packets;
4168         return cleaned;
4169 }
4170
4171 /* this should improve performance for small packets with large amounts
4172  * of reassembly being done in the stack
4173  */
4174 static void e1000_check_copybreak(struct net_device *netdev,
4175                                  struct e1000_buffer *buffer_info,
4176                                  u32 length, struct sk_buff **skb)
4177 {
4178         struct sk_buff *new_skb;
4179
4180         if (length > copybreak)
4181                 return;
4182
4183         new_skb = netdev_alloc_skb_ip_align(netdev, length);
4184         if (!new_skb)
4185                 return;
4186
4187         skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
4188                                        (*skb)->data - NET_IP_ALIGN,
4189                                        length + NET_IP_ALIGN);
4190         /* save the skb in buffer_info as good */
4191         buffer_info->skb = *skb;
4192         *skb = new_skb;
4193 }
4194
4195 /**
4196  * e1000_clean_rx_irq - Send received data up the network stack; legacy
4197  * @adapter: board private structure
4198  * @rx_ring: ring to clean
4199  * @work_done: amount of napi work completed this call
4200  * @work_to_do: max amount of work allowed for this call to do
4201  */
4202 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4203                                struct e1000_rx_ring *rx_ring,
4204                                int *work_done, int work_to_do)
4205 {
4206         struct e1000_hw *hw = &adapter->hw;
4207         struct net_device *netdev = adapter->netdev;
4208         struct pci_dev *pdev = adapter->pdev;
4209         struct e1000_rx_desc *rx_desc, *next_rxd;
4210         struct e1000_buffer *buffer_info, *next_buffer;
4211         unsigned long flags;
4212         u32 length;
4213         unsigned int i;
4214         int cleaned_count = 0;
4215         bool cleaned = false;
4216         unsigned int total_rx_bytes=0, total_rx_packets=0;
4217
4218         i = rx_ring->next_to_clean;
4219         rx_desc = E1000_RX_DESC(*rx_ring, i);
4220         buffer_info = &rx_ring->buffer_info[i];
4221
4222         while (rx_desc->status & E1000_RXD_STAT_DD) {
4223                 struct sk_buff *skb;
4224                 u8 status;
4225
4226                 if (*work_done >= work_to_do)
4227                         break;
4228                 (*work_done)++;
4229                 rmb(); /* read descriptor and rx_buffer_info after status DD */
4230
4231                 status = rx_desc->status;
4232                 skb = buffer_info->skb;
4233                 buffer_info->skb = NULL;
4234
4235                 prefetch(skb->data - NET_IP_ALIGN);
4236
4237                 if (++i == rx_ring->count) i = 0;
4238                 next_rxd = E1000_RX_DESC(*rx_ring, i);
4239                 prefetch(next_rxd);
4240
4241                 next_buffer = &rx_ring->buffer_info[i];
4242
4243                 cleaned = true;
4244                 cleaned_count++;
4245                 dma_unmap_single(&pdev->dev, buffer_info->dma,
4246                                  buffer_info->length, DMA_FROM_DEVICE);
4247                 buffer_info->dma = 0;
4248
4249                 length = le16_to_cpu(rx_desc->length);
4250                 /* !EOP means multiple descriptors were used to store a single
4251                  * packet, if thats the case we need to toss it.  In fact, we
4252                  * to toss every packet with the EOP bit clear and the next
4253                  * frame that _does_ have the EOP bit set, as it is by
4254                  * definition only a frame fragment
4255                  */
4256                 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4257                         adapter->discarding = true;
4258
4259                 if (adapter->discarding) {
4260                         /* All receives must fit into a single buffer */
4261                         e_dbg("Receive packet consumed multiple buffers\n");
4262                         /* recycle */
4263                         buffer_info->skb = skb;
4264                         if (status & E1000_RXD_STAT_EOP)
4265                                 adapter->discarding = false;
4266                         goto next_desc;
4267                 }
4268
4269                 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4270                         u8 last_byte = *(skb->data + length - 1);
4271                         if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4272                                        last_byte)) {
4273                                 spin_lock_irqsave(&adapter->stats_lock, flags);
4274                                 e1000_tbi_adjust_stats(hw, &adapter->stats,
4275                                                        length, skb->data);
4276                                 spin_unlock_irqrestore(&adapter->stats_lock,
4277                                                        flags);
4278                                 length--;
4279                         } else {
4280                                 if (netdev->features & NETIF_F_RXALL)
4281                                         goto process_skb;
4282                                 /* recycle */
4283                                 buffer_info->skb = skb;
4284                                 goto next_desc;
4285                         }
4286                 }
4287
4288 process_skb:
4289                 total_rx_bytes += (length - 4); /* don't count FCS */
4290                 total_rx_packets++;
4291
4292                 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4293                         /* adjust length to remove Ethernet CRC, this must be
4294                          * done after the TBI_ACCEPT workaround above
4295                          */
4296                         length -= 4;
4297
4298                 e1000_check_copybreak(netdev, buffer_info, length, &skb);
4299
4300                 skb_put(skb, length);
4301
4302                 /* Receive Checksum Offload */
4303                 e1000_rx_checksum(adapter,
4304                                   (u32)(status) |
4305                                   ((u32)(rx_desc->errors) << 24),
4306                                   le16_to_cpu(rx_desc->csum), skb);
4307
4308                 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4309
4310 next_desc:
4311                 rx_desc->status = 0;
4312
4313                 /* return some buffers to hardware, one at a time is too slow */
4314                 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4315                         adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4316                         cleaned_count = 0;
4317                 }
4318
4319                 /* use prefetched values */
4320                 rx_desc = next_rxd;
4321                 buffer_info = next_buffer;
4322         }
4323         rx_ring->next_to_clean = i;
4324
4325         cleaned_count = E1000_DESC_UNUSED(rx_ring);
4326         if (cleaned_count)
4327                 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4328
4329         adapter->total_rx_packets += total_rx_packets;
4330         adapter->total_rx_bytes += total_rx_bytes;
4331         netdev->stats.rx_bytes += total_rx_bytes;
4332         netdev->stats.rx_packets += total_rx_packets;
4333         return cleaned;
4334 }
4335
4336 /**
4337  * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4338  * @adapter: address of board private structure
4339  * @rx_ring: pointer to receive ring structure
4340  * @cleaned_count: number of buffers to allocate this pass
4341  **/
4342 static void
4343 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4344                              struct e1000_rx_ring *rx_ring, int cleaned_count)
4345 {
4346         struct net_device *netdev = adapter->netdev;
4347         struct pci_dev *pdev = adapter->pdev;
4348         struct e1000_rx_desc *rx_desc;
4349         struct e1000_buffer *buffer_info;
4350         struct sk_buff *skb;
4351         unsigned int i;
4352         unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
4353
4354         i = rx_ring->next_to_use;
4355         buffer_info = &rx_ring->buffer_info[i];
4356
4357         while (cleaned_count--) {
4358                 skb = buffer_info->skb;
4359                 if (skb) {
4360                         skb_trim(skb, 0);
4361                         goto check_page;
4362                 }
4363
4364                 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4365                 if (unlikely(!skb)) {
4366                         /* Better luck next round */
4367                         adapter->alloc_rx_buff_failed++;
4368                         break;
4369                 }
4370
4371                 buffer_info->skb = skb;
4372                 buffer_info->length = adapter->rx_buffer_len;
4373 check_page:
4374                 /* allocate a new page if necessary */
4375                 if (!buffer_info->page) {
4376                         buffer_info->page = alloc_page(GFP_ATOMIC);
4377                         if (unlikely(!buffer_info->page)) {
4378                                 adapter->alloc_rx_buff_failed++;
4379                                 break;
4380                         }
4381                 }
4382
4383                 if (!buffer_info->dma) {
4384                         buffer_info->dma = dma_map_page(&pdev->dev,
4385                                                         buffer_info->page, 0,
4386                                                         buffer_info->length,
4387                                                         DMA_FROM_DEVICE);
4388                         if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4389                                 put_page(buffer_info->page);
4390                                 dev_kfree_skb(skb);
4391                                 buffer_info->page = NULL;
4392                                 buffer_info->skb = NULL;
4393                                 buffer_info->dma = 0;
4394                                 adapter->alloc_rx_buff_failed++;
4395                                 break; /* while !buffer_info->skb */
4396                         }
4397                 }
4398
4399                 rx_desc = E1000_RX_DESC(*rx_ring, i);
4400                 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4401
4402                 if (unlikely(++i == rx_ring->count))
4403                         i = 0;
4404                 buffer_info = &rx_ring->buffer_info[i];
4405         }
4406
4407         if (likely(rx_ring->next_to_use != i)) {
4408                 rx_ring->next_to_use = i;
4409                 if (unlikely(i-- == 0))
4410                         i = (rx_ring->count - 1);
4411
4412                 /* Force memory writes to complete before letting h/w
4413                  * know there are new descriptors to fetch.  (Only
4414                  * applicable for weak-ordered memory model archs,
4415                  * such as IA-64).
4416                  */
4417                 wmb();
4418                 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4419         }
4420 }
4421
4422 /**
4423  * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4424  * @adapter: address of board private structure
4425  **/
4426 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4427                                    struct e1000_rx_ring *rx_ring,
4428                                    int cleaned_count)
4429 {
4430         struct e1000_hw *hw = &adapter->hw;
4431         struct net_device *netdev = adapter->netdev;
4432         struct pci_dev *pdev = adapter->pdev;
4433         struct e1000_rx_desc *rx_desc;
4434         struct e1000_buffer *buffer_info;
4435         struct sk_buff *skb;
4436         unsigned int i;
4437         unsigned int bufsz = adapter->rx_buffer_len;
4438
4439         i = rx_ring->next_to_use;
4440         buffer_info = &rx_ring->buffer_info[i];
4441
4442         while (cleaned_count--) {
4443                 skb = buffer_info->skb;
4444                 if (skb) {
4445                         skb_trim(skb, 0);
4446                         goto map_skb;
4447                 }
4448
4449                 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4450                 if (unlikely(!skb)) {
4451                         /* Better luck next round */
4452                         adapter->alloc_rx_buff_failed++;
4453                         break;
4454                 }
4455
4456                 /* Fix for errata 23, can't cross 64kB boundary */
4457                 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4458                         struct sk_buff *oldskb = skb;
4459                         e_err(rx_err, "skb align check failed: %u bytes at "
4460                               "%p\n", bufsz, skb->data);
4461                         /* Try again, without freeing the previous */
4462                         skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4463                         /* Failed allocation, critical failure */
4464                         if (!skb) {
4465                                 dev_kfree_skb(oldskb);
4466                                 adapter->alloc_rx_buff_failed++;
4467                                 break;
4468                         }
4469
4470                         if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4471                                 /* give up */
4472                                 dev_kfree_skb(skb);
4473                                 dev_kfree_skb(oldskb);
4474                                 adapter->alloc_rx_buff_failed++;
4475                                 break; /* while !buffer_info->skb */
4476                         }
4477
4478                         /* Use new allocation */
4479                         dev_kfree_skb(oldskb);
4480                 }
4481                 buffer_info->skb = skb;
4482                 buffer_info->length = adapter->rx_buffer_len;
4483 map_skb:
4484                 buffer_info->dma = dma_map_single(&pdev->dev,
4485                                                   skb->data,
4486                                                   buffer_info->length,
4487                                                   DMA_FROM_DEVICE);
4488                 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4489                         dev_kfree_skb(skb);
4490                         buffer_info->skb = NULL;
4491                         buffer_info->dma = 0;
4492                         adapter->alloc_rx_buff_failed++;
4493                         break; /* while !buffer_info->skb */
4494                 }
4495
4496                 /* XXX if it was allocated cleanly it will never map to a
4497                  * boundary crossing
4498                  */
4499
4500                 /* Fix for errata 23, can't cross 64kB boundary */
4501                 if (!e1000_check_64k_bound(adapter,
4502                                         (void *)(unsigned long)buffer_info->dma,
4503                                         adapter->rx_buffer_len)) {
4504                         e_err(rx_err, "dma align check failed: %u bytes at "
4505                               "%p\n", adapter->rx_buffer_len,
4506                               (void *)(unsigned long)buffer_info->dma);
4507                         dev_kfree_skb(skb);
4508                         buffer_info->skb = NULL;
4509
4510                         dma_unmap_single(&pdev->dev, buffer_info->dma,
4511                                          adapter->rx_buffer_len,
4512                                          DMA_FROM_DEVICE);
4513                         buffer_info->dma = 0;
4514
4515                         adapter->alloc_rx_buff_failed++;
4516                         break; /* while !buffer_info->skb */
4517                 }
4518                 rx_desc = E1000_RX_DESC(*rx_ring, i);
4519                 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4520
4521                 if (unlikely(++i == rx_ring->count))
4522                         i = 0;
4523                 buffer_info = &rx_ring->buffer_info[i];
4524         }
4525
4526         if (likely(rx_ring->next_to_use != i)) {
4527                 rx_ring->next_to_use = i;
4528                 if (unlikely(i-- == 0))
4529                         i = (rx_ring->count - 1);
4530
4531                 /* Force memory writes to complete before letting h/w
4532                  * know there are new descriptors to fetch.  (Only
4533                  * applicable for weak-ordered memory model archs,
4534                  * such as IA-64).
4535                  */
4536                 wmb();
4537                 writel(i, hw->hw_addr + rx_ring->rdt);
4538         }
4539 }
4540
4541 /**
4542  * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4543  * @adapter:
4544  **/
4545 static void e1000_smartspeed(struct e1000_adapter *adapter)
4546 {
4547         struct e1000_hw *hw = &adapter->hw;
4548         u16 phy_status;
4549         u16 phy_ctrl;
4550
4551         if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4552            !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4553                 return;
4554
4555         if (adapter->smartspeed == 0) {
4556                 /* If Master/Slave config fault is asserted twice,
4557                  * we assume back-to-back
4558                  */
4559                 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4560                 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4561                 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4562                 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4563                 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4564                 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4565                         phy_ctrl &= ~CR_1000T_MS_ENABLE;
4566                         e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4567                                             phy_ctrl);
4568                         adapter->smartspeed++;
4569                         if (!e1000_phy_setup_autoneg(hw) &&
4570                            !e1000_read_phy_reg(hw, PHY_CTRL,
4571                                                &phy_ctrl)) {
4572                                 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4573                                              MII_CR_RESTART_AUTO_NEG);
4574                                 e1000_write_phy_reg(hw, PHY_CTRL,
4575                                                     phy_ctrl);
4576                         }
4577                 }
4578                 return;
4579         } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4580                 /* If still no link, perhaps using 2/3 pair cable */
4581                 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4582                 phy_ctrl |= CR_1000T_MS_ENABLE;
4583                 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4584                 if (!e1000_phy_setup_autoneg(hw) &&
4585                    !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4586                         phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4587                                      MII_CR_RESTART_AUTO_NEG);
4588                         e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4589                 }
4590         }
4591         /* Restart process after E1000_SMARTSPEED_MAX iterations */
4592         if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4593                 adapter->smartspeed = 0;
4594 }
4595
4596 /**
4597  * e1000_ioctl -
4598  * @netdev:
4599  * @ifreq:
4600  * @cmd:
4601  **/
4602 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4603 {
4604         switch (cmd) {
4605         case SIOCGMIIPHY:
4606         case SIOCGMIIREG:
4607         case SIOCSMIIREG:
4608                 return e1000_mii_ioctl(netdev, ifr, cmd);
4609         default:
4610                 return -EOPNOTSUPP;
4611         }
4612 }
4613
4614 /**
4615  * e1000_mii_ioctl -
4616  * @netdev:
4617  * @ifreq:
4618  * @cmd:
4619  **/
4620 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4621                            int cmd)
4622 {
4623         struct e1000_adapter *adapter = netdev_priv(netdev);
4624         struct e1000_hw *hw = &adapter->hw;
4625         struct mii_ioctl_data *data = if_mii(ifr);
4626         int retval;
4627         u16 mii_reg;
4628         unsigned long flags;
4629
4630         if (hw->media_type != e1000_media_type_copper)
4631                 return -EOPNOTSUPP;
4632
4633         switch (cmd) {
4634         case SIOCGMIIPHY:
4635                 data->phy_id = hw->phy_addr;
4636                 break;
4637         case SIOCGMIIREG:
4638                 spin_lock_irqsave(&adapter->stats_lock, flags);
4639                 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4640                                    &data->val_out)) {
4641                         spin_unlock_irqrestore(&adapter->stats_lock, flags);
4642                         return -EIO;
4643                 }
4644                 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4645                 break;
4646         case SIOCSMIIREG:
4647                 if (data->reg_num & ~(0x1F))
4648                         return -EFAULT;
4649                 mii_reg = data->val_in;
4650                 spin_lock_irqsave(&adapter->stats_lock, flags);
4651                 if (e1000_write_phy_reg(hw, data->reg_num,
4652                                         mii_reg)) {
4653                         spin_unlock_irqrestore(&adapter->stats_lock, flags);
4654                         return -EIO;
4655                 }
4656                 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4657                 if (hw->media_type == e1000_media_type_copper) {
4658                         switch (data->reg_num) {
4659                         case PHY_CTRL:
4660                                 if (mii_reg & MII_CR_POWER_DOWN)
4661                                         break;
4662                                 if (mii_reg & MII_CR_AUTO_NEG_EN) {
4663                                         hw->autoneg = 1;
4664                                         hw->autoneg_advertised = 0x2F;
4665                                 } else {
4666                                         u32 speed;
4667                                         if (mii_reg & 0x40)
4668                                                 speed = SPEED_1000;
4669                                         else if (mii_reg & 0x2000)
4670                                                 speed = SPEED_100;
4671                                         else
4672                                                 speed = SPEED_10;
4673                                         retval = e1000_set_spd_dplx(
4674                                                 adapter, speed,
4675                                                 ((mii_reg & 0x100)
4676                                                  ? DUPLEX_FULL :
4677                                                  DUPLEX_HALF));
4678                                         if (retval)
4679                                                 return retval;
4680                                 }
4681                                 if (netif_running(adapter->netdev))
4682                                         e1000_reinit_locked(adapter);
4683                                 else
4684                                         e1000_reset(adapter);
4685                                 break;
4686                         case M88E1000_PHY_SPEC_CTRL:
4687                         case M88E1000_EXT_PHY_SPEC_CTRL:
4688                                 if (e1000_phy_reset(hw))
4689                                         return -EIO;
4690                                 break;
4691                         }
4692                 } else {
4693                         switch (data->reg_num) {
4694                         case PHY_CTRL:
4695                                 if (mii_reg & MII_CR_POWER_DOWN)
4696                                         break;
4697                                 if (netif_running(adapter->netdev))
4698                                         e1000_reinit_locked(adapter);
4699                                 else
4700                                         e1000_reset(adapter);
4701                                 break;
4702                         }
4703                 }
4704                 break;
4705         default:
4706                 return -EOPNOTSUPP;
4707         }
4708         return E1000_SUCCESS;
4709 }
4710
4711 void e1000_pci_set_mwi(struct e1000_hw *hw)
4712 {
4713         struct e1000_adapter *adapter = hw->back;
4714         int ret_val = pci_set_mwi(adapter->pdev);
4715
4716         if (ret_val)
4717                 e_err(probe, "Error in setting MWI\n");
4718 }
4719
4720 void e1000_pci_clear_mwi(struct e1000_hw *hw)
4721 {
4722         struct e1000_adapter *adapter = hw->back;
4723
4724         pci_clear_mwi(adapter->pdev);
4725 }
4726
4727 int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4728 {
4729         struct e1000_adapter *adapter = hw->back;
4730         return pcix_get_mmrbc(adapter->pdev);
4731 }
4732
4733 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4734 {
4735         struct e1000_adapter *adapter = hw->back;
4736         pcix_set_mmrbc(adapter->pdev, mmrbc);
4737 }
4738
4739 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4740 {
4741         outl(value, port);
4742 }
4743
4744 static bool e1000_vlan_used(struct e1000_adapter *adapter)
4745 {
4746         u16 vid;
4747
4748         for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4749                 return true;
4750         return false;
4751 }
4752
4753 static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4754                               netdev_features_t features)
4755 {
4756         struct e1000_hw *hw = &adapter->hw;
4757         u32 ctrl;
4758
4759         ctrl = er32(CTRL);
4760         if (features & NETIF_F_HW_VLAN_CTAG_RX) {
4761                 /* enable VLAN tag insert/strip */
4762                 ctrl |= E1000_CTRL_VME;
4763         } else {
4764                 /* disable VLAN tag insert/strip */
4765                 ctrl &= ~E1000_CTRL_VME;
4766         }
4767         ew32(CTRL, ctrl);
4768 }
4769 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4770                                      bool filter_on)
4771 {
4772         struct e1000_hw *hw = &adapter->hw;
4773         u32 rctl;
4774
4775         if (!test_bit(__E1000_DOWN, &adapter->flags))
4776                 e1000_irq_disable(adapter);
4777
4778         __e1000_vlan_mode(adapter, adapter->netdev->features);
4779         if (filter_on) {
4780                 /* enable VLAN receive filtering */
4781                 rctl = er32(RCTL);
4782                 rctl &= ~E1000_RCTL_CFIEN;
4783                 if (!(adapter->netdev->flags & IFF_PROMISC))
4784                         rctl |= E1000_RCTL_VFE;
4785                 ew32(RCTL, rctl);
4786                 e1000_update_mng_vlan(adapter);
4787         } else {
4788                 /* disable VLAN receive filtering */
4789                 rctl = er32(RCTL);
4790                 rctl &= ~E1000_RCTL_VFE;
4791                 ew32(RCTL, rctl);
4792         }
4793
4794         if (!test_bit(__E1000_DOWN, &adapter->flags))
4795                 e1000_irq_enable(adapter);
4796 }
4797
4798 static void e1000_vlan_mode(struct net_device *netdev,
4799                             netdev_features_t features)
4800 {
4801         struct e1000_adapter *adapter = netdev_priv(netdev);
4802
4803         if (!test_bit(__E1000_DOWN, &adapter->flags))
4804                 e1000_irq_disable(adapter);
4805
4806         __e1000_vlan_mode(adapter, features);
4807
4808         if (!test_bit(__E1000_DOWN, &adapter->flags))
4809                 e1000_irq_enable(adapter);
4810 }
4811
4812 static int e1000_vlan_rx_add_vid(struct net_device *netdev,
4813                                  __be16 proto, u16 vid)
4814 {
4815         struct e1000_adapter *adapter = netdev_priv(netdev);
4816         struct e1000_hw *hw = &adapter->hw;
4817         u32 vfta, index;
4818
4819         if ((hw->mng_cookie.status &
4820              E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4821             (vid == adapter->mng_vlan_id))
4822                 return 0;
4823
4824         if (!e1000_vlan_used(adapter))
4825                 e1000_vlan_filter_on_off(adapter, true);
4826
4827         /* add VID to filter table */
4828         index = (vid >> 5) & 0x7F;
4829         vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4830         vfta |= (1 << (vid & 0x1F));
4831         e1000_write_vfta(hw, index, vfta);
4832
4833         set_bit(vid, adapter->active_vlans);
4834
4835         return 0;
4836 }
4837
4838 static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
4839                                   __be16 proto, u16 vid)
4840 {
4841         struct e1000_adapter *adapter = netdev_priv(netdev);
4842         struct e1000_hw *hw = &adapter->hw;
4843         u32 vfta, index;
4844
4845         if (!test_bit(__E1000_DOWN, &adapter->flags))
4846                 e1000_irq_disable(adapter);
4847         if (!test_bit(__E1000_DOWN, &adapter->flags))
4848                 e1000_irq_enable(adapter);
4849
4850         /* remove VID from filter table */
4851         index = (vid >> 5) & 0x7F;
4852         vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4853         vfta &= ~(1 << (vid & 0x1F));
4854         e1000_write_vfta(hw, index, vfta);
4855
4856         clear_bit(vid, adapter->active_vlans);
4857
4858         if (!e1000_vlan_used(adapter))
4859                 e1000_vlan_filter_on_off(adapter, false);
4860
4861         return 0;
4862 }
4863
4864 static void e1000_restore_vlan(struct e1000_adapter *adapter)
4865 {
4866         u16 vid;
4867
4868         if (!e1000_vlan_used(adapter))
4869                 return;
4870
4871         e1000_vlan_filter_on_off(adapter, true);
4872         for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4873                 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
4874 }
4875
4876 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
4877 {
4878         struct e1000_hw *hw = &adapter->hw;
4879
4880         hw->autoneg = 0;
4881
4882         /* Make sure dplx is at most 1 bit and lsb of speed is not set
4883          * for the switch() below to work
4884          */
4885         if ((spd & 1) || (dplx & ~1))
4886                 goto err_inval;
4887
4888         /* Fiber NICs only allow 1000 gbps Full duplex */
4889         if ((hw->media_type == e1000_media_type_fiber) &&
4890             spd != SPEED_1000 &&
4891             dplx != DUPLEX_FULL)
4892                 goto err_inval;
4893
4894         switch (spd + dplx) {
4895         case SPEED_10 + DUPLEX_HALF:
4896                 hw->forced_speed_duplex = e1000_10_half;
4897                 break;
4898         case SPEED_10 + DUPLEX_FULL:
4899                 hw->forced_speed_duplex = e1000_10_full;
4900                 break;
4901         case SPEED_100 + DUPLEX_HALF:
4902                 hw->forced_speed_duplex = e1000_100_half;
4903                 break;
4904         case SPEED_100 + DUPLEX_FULL:
4905                 hw->forced_speed_duplex = e1000_100_full;
4906                 break;
4907         case SPEED_1000 + DUPLEX_FULL:
4908                 hw->autoneg = 1;
4909                 hw->autoneg_advertised = ADVERTISE_1000_FULL;
4910                 break;
4911         case SPEED_1000 + DUPLEX_HALF: /* not supported */
4912         default:
4913                 goto err_inval;
4914         }
4915
4916         /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
4917         hw->mdix = AUTO_ALL_MODES;
4918
4919         return 0;
4920
4921 err_inval:
4922         e_err(probe, "Unsupported Speed/Duplex configuration\n");
4923         return -EINVAL;
4924 }
4925
4926 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
4927 {
4928         struct net_device *netdev = pci_get_drvdata(pdev);
4929         struct e1000_adapter *adapter = netdev_priv(netdev);
4930         struct e1000_hw *hw = &adapter->hw;
4931         u32 ctrl, ctrl_ext, rctl, status;
4932         u32 wufc = adapter->wol;
4933 #ifdef CONFIG_PM
4934         int retval = 0;
4935 #endif
4936
4937         netif_device_detach(netdev);
4938
4939         if (netif_running(netdev)) {
4940                 int count = E1000_CHECK_RESET_COUNT;
4941
4942                 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
4943                         usleep_range(10000, 20000);
4944
4945                 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
4946                 e1000_down(adapter);
4947         }
4948
4949 #ifdef CONFIG_PM
4950         retval = pci_save_state(pdev);
4951         if (retval)
4952                 return retval;
4953 #endif
4954
4955         status = er32(STATUS);
4956         if (status & E1000_STATUS_LU)
4957                 wufc &= ~E1000_WUFC_LNKC;
4958
4959         if (wufc) {
4960                 e1000_setup_rctl(adapter);
4961                 e1000_set_rx_mode(netdev);
4962
4963                 rctl = er32(RCTL);
4964
4965                 /* turn on all-multi mode if wake on multicast is enabled */
4966                 if (wufc & E1000_WUFC_MC)
4967                         rctl |= E1000_RCTL_MPE;
4968
4969                 /* enable receives in the hardware */
4970                 ew32(RCTL, rctl | E1000_RCTL_EN);
4971
4972                 if (hw->mac_type >= e1000_82540) {
4973                         ctrl = er32(CTRL);
4974                         /* advertise wake from D3Cold */
4975                         #define E1000_CTRL_ADVD3WUC 0x00100000
4976                         /* phy power management enable */
4977                         #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
4978                         ctrl |= E1000_CTRL_ADVD3WUC |
4979                                 E1000_CTRL_EN_PHY_PWR_MGMT;
4980                         ew32(CTRL, ctrl);
4981                 }
4982
4983                 if (hw->media_type == e1000_media_type_fiber ||
4984                     hw->media_type == e1000_media_type_internal_serdes) {
4985                         /* keep the laser running in D3 */
4986                         ctrl_ext = er32(CTRL_EXT);
4987                         ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
4988                         ew32(CTRL_EXT, ctrl_ext);
4989                 }
4990
4991                 ew32(WUC, E1000_WUC_PME_EN);
4992                 ew32(WUFC, wufc);
4993         } else {
4994                 ew32(WUC, 0);
4995                 ew32(WUFC, 0);
4996         }
4997
4998         e1000_release_manageability(adapter);
4999
5000         *enable_wake = !!wufc;
5001
5002         /* make sure adapter isn't asleep if manageability is enabled */
5003         if (adapter->en_mng_pt)
5004                 *enable_wake = true;
5005
5006         if (netif_running(netdev))
5007                 e1000_free_irq(adapter);
5008
5009         pci_disable_device(pdev);
5010
5011         return 0;
5012 }
5013
5014 #ifdef CONFIG_PM
5015 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5016 {
5017         int retval;
5018         bool wake;
5019
5020         retval = __e1000_shutdown(pdev, &wake);
5021         if (retval)
5022                 return retval;
5023
5024         if (wake) {
5025                 pci_prepare_to_sleep(pdev);
5026         } else {
5027                 pci_wake_from_d3(pdev, false);
5028                 pci_set_power_state(pdev, PCI_D3hot);
5029         }
5030
5031         return 0;
5032 }
5033
5034 static int e1000_resume(struct pci_dev *pdev)
5035 {
5036         struct net_device *netdev = pci_get_drvdata(pdev);
5037         struct e1000_adapter *adapter = netdev_priv(netdev);
5038         struct e1000_hw *hw = &adapter->hw;
5039         u32 err;
5040
5041         pci_set_power_state(pdev, PCI_D0);
5042         pci_restore_state(pdev);
5043         pci_save_state(pdev);
5044
5045         if (adapter->need_ioport)
5046                 err = pci_enable_device(pdev);
5047         else
5048                 err = pci_enable_device_mem(pdev);
5049         if (err) {
5050                 pr_err("Cannot enable PCI device from suspend\n");
5051                 return err;
5052         }
5053         pci_set_master(pdev);
5054
5055         pci_enable_wake(pdev, PCI_D3hot, 0);
5056         pci_enable_wake(pdev, PCI_D3cold, 0);
5057
5058         if (netif_running(netdev)) {
5059                 err = e1000_request_irq(adapter);
5060                 if (err)
5061                         return err;
5062         }
5063
5064         e1000_power_up_phy(adapter);
5065         e1000_reset(adapter);
5066         ew32(WUS, ~0);
5067
5068         e1000_init_manageability(adapter);
5069
5070         if (netif_running(netdev))
5071                 e1000_up(adapter);
5072
5073         netif_device_attach(netdev);
5074
5075         return 0;
5076 }
5077 #endif
5078
5079 static void e1000_shutdown(struct pci_dev *pdev)
5080 {
5081         bool wake;
5082
5083         __e1000_shutdown(pdev, &wake);
5084
5085         if (system_state == SYSTEM_POWER_OFF) {
5086                 pci_wake_from_d3(pdev, wake);
5087                 pci_set_power_state(pdev, PCI_D3hot);
5088         }
5089 }
5090
5091 #ifdef CONFIG_NET_POLL_CONTROLLER
5092 /* Polling 'interrupt' - used by things like netconsole to send skbs
5093  * without having to re-enable interrupts. It's not called while
5094  * the interrupt routine is executing.
5095  */
5096 static void e1000_netpoll(struct net_device *netdev)
5097 {
5098         struct e1000_adapter *adapter = netdev_priv(netdev);
5099
5100         disable_irq(adapter->pdev->irq);
5101         e1000_intr(adapter->pdev->irq, netdev);
5102         enable_irq(adapter->pdev->irq);
5103 }
5104 #endif
5105
5106 /**
5107  * e1000_io_error_detected - called when PCI error is detected
5108  * @pdev: Pointer to PCI device
5109  * @state: The current pci connection state
5110  *
5111  * This function is called after a PCI bus error affecting
5112  * this device has been detected.
5113  */
5114 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5115                                                 pci_channel_state_t state)
5116 {
5117         struct net_device *netdev = pci_get_drvdata(pdev);
5118         struct e1000_adapter *adapter = netdev_priv(netdev);
5119
5120         netif_device_detach(netdev);
5121
5122         if (state == pci_channel_io_perm_failure)
5123                 return PCI_ERS_RESULT_DISCONNECT;
5124
5125         if (netif_running(netdev))
5126                 e1000_down(adapter);
5127         pci_disable_device(pdev);
5128
5129         /* Request a slot slot reset. */
5130         return PCI_ERS_RESULT_NEED_RESET;
5131 }
5132
5133 /**
5134  * e1000_io_slot_reset - called after the pci bus has been reset.
5135  * @pdev: Pointer to PCI device
5136  *
5137  * Restart the card from scratch, as if from a cold-boot. Implementation
5138  * resembles the first-half of the e1000_resume routine.
5139  */
5140 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5141 {
5142         struct net_device *netdev = pci_get_drvdata(pdev);
5143         struct e1000_adapter *adapter = netdev_priv(netdev);
5144         struct e1000_hw *hw = &adapter->hw;
5145         int err;
5146
5147         if (adapter->need_ioport)
5148                 err = pci_enable_device(pdev);
5149         else
5150                 err = pci_enable_device_mem(pdev);
5151         if (err) {
5152                 pr_err("Cannot re-enable PCI device after reset.\n");
5153                 return PCI_ERS_RESULT_DISCONNECT;
5154         }
5155         pci_set_master(pdev);
5156
5157         pci_enable_wake(pdev, PCI_D3hot, 0);
5158         pci_enable_wake(pdev, PCI_D3cold, 0);
5159
5160         e1000_reset(adapter);
5161         ew32(WUS, ~0);
5162
5163         return PCI_ERS_RESULT_RECOVERED;
5164 }
5165
5166 /**
5167  * e1000_io_resume - called when traffic can start flowing again.
5168  * @pdev: Pointer to PCI device
5169  *
5170  * This callback is called when the error recovery driver tells us that
5171  * its OK to resume normal operation. Implementation resembles the
5172  * second-half of the e1000_resume routine.
5173  */
5174 static void e1000_io_resume(struct pci_dev *pdev)
5175 {
5176         struct net_device *netdev = pci_get_drvdata(pdev);
5177         struct e1000_adapter *adapter = netdev_priv(netdev);
5178
5179         e1000_init_manageability(adapter);
5180
5181         if (netif_running(netdev)) {
5182                 if (e1000_up(adapter)) {
5183                         pr_info("can't bring device back up after reset\n");
5184                         return;
5185                 }
5186         }
5187
5188         netif_device_attach(netdev);
5189 }
5190
5191 /* e1000_main.c */