1 /*******************************************************************************
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/init.h>
34 #include <linux/pci.h>
35 #include <linux/vmalloc.h>
36 #include <linux/pagemap.h>
37 #include <linux/delay.h>
38 #include <linux/netdevice.h>
39 #include <linux/tcp.h>
40 #include <linux/ipv6.h>
41 #include <linux/slab.h>
42 #include <net/checksum.h>
43 #include <net/ip6_checksum.h>
44 #include <linux/mii.h>
45 #include <linux/ethtool.h>
46 #include <linux/if_vlan.h>
47 #include <linux/cpu.h>
48 #include <linux/smp.h>
49 #include <linux/pm_qos_params.h>
50 #include <linux/pm_runtime.h>
51 #include <linux/aer.h>
55 #define DRV_VERSION "1.0.2-k2"
56 char e1000e_driver_name[] = "e1000e";
57 const char e1000e_driver_version[] = DRV_VERSION;
59 static const struct e1000_info *e1000_info_tbl[] = {
60 [board_82571] = &e1000_82571_info,
61 [board_82572] = &e1000_82572_info,
62 [board_82573] = &e1000_82573_info,
63 [board_82574] = &e1000_82574_info,
64 [board_82583] = &e1000_82583_info,
65 [board_80003es2lan] = &e1000_es2_info,
66 [board_ich8lan] = &e1000_ich8_info,
67 [board_ich9lan] = &e1000_ich9_info,
68 [board_ich10lan] = &e1000_ich10_info,
69 [board_pchlan] = &e1000_pch_info,
73 * e1000_desc_unused - calculate if we have unused descriptors
75 static int e1000_desc_unused(struct e1000_ring *ring)
77 if (ring->next_to_clean > ring->next_to_use)
78 return ring->next_to_clean - ring->next_to_use - 1;
80 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
84 * e1000_receive_skb - helper function to handle Rx indications
85 * @adapter: board private structure
86 * @status: descriptor status field as written by hardware
87 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
88 * @skb: pointer to sk_buff to be indicated to stack
90 static void e1000_receive_skb(struct e1000_adapter *adapter,
91 struct net_device *netdev,
93 u8 status, __le16 vlan)
95 skb->protocol = eth_type_trans(skb, netdev);
97 if (adapter->vlgrp && (status & E1000_RXD_STAT_VP))
98 vlan_gro_receive(&adapter->napi, adapter->vlgrp,
99 le16_to_cpu(vlan), skb);
101 napi_gro_receive(&adapter->napi, skb);
105 * e1000_rx_checksum - Receive Checksum Offload for 82543
106 * @adapter: board private structure
107 * @status_err: receive descriptor status and error fields
108 * @csum: receive descriptor csum field
109 * @sk_buff: socket buffer with received data
111 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
112 u32 csum, struct sk_buff *skb)
114 u16 status = (u16)status_err;
115 u8 errors = (u8)(status_err >> 24);
116 skb->ip_summed = CHECKSUM_NONE;
118 /* Ignore Checksum bit is set */
119 if (status & E1000_RXD_STAT_IXSM)
121 /* TCP/UDP checksum error bit is set */
122 if (errors & E1000_RXD_ERR_TCPE) {
123 /* let the stack verify checksum errors */
124 adapter->hw_csum_err++;
128 /* TCP/UDP Checksum has not been calculated */
129 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
132 /* It must be a TCP or UDP packet with a valid checksum */
133 if (status & E1000_RXD_STAT_TCPCS) {
134 /* TCP checksum is good */
135 skb->ip_summed = CHECKSUM_UNNECESSARY;
138 * IP fragment with UDP payload
139 * Hardware complements the payload checksum, so we undo it
140 * and then put the value in host order for further stack use.
142 __sum16 sum = (__force __sum16)htons(csum);
143 skb->csum = csum_unfold(~sum);
144 skb->ip_summed = CHECKSUM_COMPLETE;
146 adapter->hw_csum_good++;
150 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
151 * @adapter: address of board private structure
153 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
156 struct net_device *netdev = adapter->netdev;
157 struct pci_dev *pdev = adapter->pdev;
158 struct e1000_ring *rx_ring = adapter->rx_ring;
159 struct e1000_rx_desc *rx_desc;
160 struct e1000_buffer *buffer_info;
163 unsigned int bufsz = adapter->rx_buffer_len;
165 i = rx_ring->next_to_use;
166 buffer_info = &rx_ring->buffer_info[i];
168 while (cleaned_count--) {
169 skb = buffer_info->skb;
175 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
177 /* Better luck next round */
178 adapter->alloc_rx_buff_failed++;
182 buffer_info->skb = skb;
184 buffer_info->dma = pci_map_single(pdev, skb->data,
185 adapter->rx_buffer_len,
187 if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
188 dev_err(&pdev->dev, "RX DMA map failed\n");
189 adapter->rx_dma_failed++;
193 rx_desc = E1000_RX_DESC(*rx_ring, i);
194 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
197 if (i == rx_ring->count)
199 buffer_info = &rx_ring->buffer_info[i];
202 if (rx_ring->next_to_use != i) {
203 rx_ring->next_to_use = i;
205 i = (rx_ring->count - 1);
208 * Force memory writes to complete before letting h/w
209 * know there are new descriptors to fetch. (Only
210 * applicable for weak-ordered memory model archs,
214 writel(i, adapter->hw.hw_addr + rx_ring->tail);
219 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
220 * @adapter: address of board private structure
222 static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
225 struct net_device *netdev = adapter->netdev;
226 struct pci_dev *pdev = adapter->pdev;
227 union e1000_rx_desc_packet_split *rx_desc;
228 struct e1000_ring *rx_ring = adapter->rx_ring;
229 struct e1000_buffer *buffer_info;
230 struct e1000_ps_page *ps_page;
234 i = rx_ring->next_to_use;
235 buffer_info = &rx_ring->buffer_info[i];
237 while (cleaned_count--) {
238 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
240 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
241 ps_page = &buffer_info->ps_pages[j];
242 if (j >= adapter->rx_ps_pages) {
243 /* all unused desc entries get hw null ptr */
244 rx_desc->read.buffer_addr[j+1] = ~cpu_to_le64(0);
247 if (!ps_page->page) {
248 ps_page->page = alloc_page(GFP_ATOMIC);
249 if (!ps_page->page) {
250 adapter->alloc_rx_buff_failed++;
253 ps_page->dma = pci_map_page(pdev,
257 if (pci_dma_mapping_error(pdev, ps_page->dma)) {
258 dev_err(&adapter->pdev->dev,
259 "RX DMA page map failed\n");
260 adapter->rx_dma_failed++;
265 * Refresh the desc even if buffer_addrs
266 * didn't change because each write-back
269 rx_desc->read.buffer_addr[j+1] =
270 cpu_to_le64(ps_page->dma);
273 skb = netdev_alloc_skb_ip_align(netdev,
274 adapter->rx_ps_bsize0);
277 adapter->alloc_rx_buff_failed++;
281 buffer_info->skb = skb;
282 buffer_info->dma = pci_map_single(pdev, skb->data,
283 adapter->rx_ps_bsize0,
285 if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
286 dev_err(&pdev->dev, "RX DMA map failed\n");
287 adapter->rx_dma_failed++;
289 dev_kfree_skb_any(skb);
290 buffer_info->skb = NULL;
294 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
297 if (i == rx_ring->count)
299 buffer_info = &rx_ring->buffer_info[i];
303 if (rx_ring->next_to_use != i) {
304 rx_ring->next_to_use = i;
307 i = (rx_ring->count - 1);
310 * Force memory writes to complete before letting h/w
311 * know there are new descriptors to fetch. (Only
312 * applicable for weak-ordered memory model archs,
317 * Hardware increments by 16 bytes, but packet split
318 * descriptors are 32 bytes...so we increment tail
321 writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
326 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
327 * @adapter: address of board private structure
328 * @cleaned_count: number of buffers to allocate this pass
331 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
334 struct net_device *netdev = adapter->netdev;
335 struct pci_dev *pdev = adapter->pdev;
336 struct e1000_rx_desc *rx_desc;
337 struct e1000_ring *rx_ring = adapter->rx_ring;
338 struct e1000_buffer *buffer_info;
341 unsigned int bufsz = 256 - 16 /* for skb_reserve */;
343 i = rx_ring->next_to_use;
344 buffer_info = &rx_ring->buffer_info[i];
346 while (cleaned_count--) {
347 skb = buffer_info->skb;
353 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
354 if (unlikely(!skb)) {
355 /* Better luck next round */
356 adapter->alloc_rx_buff_failed++;
360 buffer_info->skb = skb;
362 /* allocate a new page if necessary */
363 if (!buffer_info->page) {
364 buffer_info->page = alloc_page(GFP_ATOMIC);
365 if (unlikely(!buffer_info->page)) {
366 adapter->alloc_rx_buff_failed++;
371 if (!buffer_info->dma)
372 buffer_info->dma = pci_map_page(pdev,
373 buffer_info->page, 0,
377 rx_desc = E1000_RX_DESC(*rx_ring, i);
378 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
380 if (unlikely(++i == rx_ring->count))
382 buffer_info = &rx_ring->buffer_info[i];
385 if (likely(rx_ring->next_to_use != i)) {
386 rx_ring->next_to_use = i;
387 if (unlikely(i-- == 0))
388 i = (rx_ring->count - 1);
390 /* Force memory writes to complete before letting h/w
391 * know there are new descriptors to fetch. (Only
392 * applicable for weak-ordered memory model archs,
395 writel(i, adapter->hw.hw_addr + rx_ring->tail);
400 * e1000_clean_rx_irq - Send received data up the network stack; legacy
401 * @adapter: board private structure
403 * the return value indicates whether actual cleaning was done, there
404 * is no guarantee that everything was cleaned
406 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
407 int *work_done, int work_to_do)
409 struct net_device *netdev = adapter->netdev;
410 struct pci_dev *pdev = adapter->pdev;
411 struct e1000_hw *hw = &adapter->hw;
412 struct e1000_ring *rx_ring = adapter->rx_ring;
413 struct e1000_rx_desc *rx_desc, *next_rxd;
414 struct e1000_buffer *buffer_info, *next_buffer;
417 int cleaned_count = 0;
419 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
421 i = rx_ring->next_to_clean;
422 rx_desc = E1000_RX_DESC(*rx_ring, i);
423 buffer_info = &rx_ring->buffer_info[i];
425 while (rx_desc->status & E1000_RXD_STAT_DD) {
429 if (*work_done >= work_to_do)
433 status = rx_desc->status;
434 skb = buffer_info->skb;
435 buffer_info->skb = NULL;
437 prefetch(skb->data - NET_IP_ALIGN);
440 if (i == rx_ring->count)
442 next_rxd = E1000_RX_DESC(*rx_ring, i);
445 next_buffer = &rx_ring->buffer_info[i];
449 pci_unmap_single(pdev,
451 adapter->rx_buffer_len,
453 buffer_info->dma = 0;
455 length = le16_to_cpu(rx_desc->length);
458 * !EOP means multiple descriptors were used to store a single
459 * packet, if that's the case we need to toss it. In fact, we
460 * need to toss every packet with the EOP bit clear and the
461 * next frame that _does_ have the EOP bit set, as it is by
462 * definition only a frame fragment
464 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
465 adapter->flags2 |= FLAG2_IS_DISCARDING;
467 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
468 /* All receives must fit into a single buffer */
469 e_dbg("Receive packet consumed multiple buffers\n");
471 buffer_info->skb = skb;
472 if (status & E1000_RXD_STAT_EOP)
473 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
477 if (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
479 buffer_info->skb = skb;
483 /* adjust length to remove Ethernet CRC */
484 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
487 total_rx_bytes += length;
491 * code added for copybreak, this should improve
492 * performance for small packets with large amounts
493 * of reassembly being done in the stack
495 if (length < copybreak) {
496 struct sk_buff *new_skb =
497 netdev_alloc_skb_ip_align(netdev, length);
499 skb_copy_to_linear_data_offset(new_skb,
505 /* save the skb in buffer_info as good */
506 buffer_info->skb = skb;
509 /* else just continue with the old one */
511 /* end copybreak code */
512 skb_put(skb, length);
514 /* Receive Checksum Offload */
515 e1000_rx_checksum(adapter,
517 ((u32)(rx_desc->errors) << 24),
518 le16_to_cpu(rx_desc->csum), skb);
520 e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special);
525 /* return some buffers to hardware, one at a time is too slow */
526 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
527 adapter->alloc_rx_buf(adapter, cleaned_count);
531 /* use prefetched values */
533 buffer_info = next_buffer;
535 rx_ring->next_to_clean = i;
537 cleaned_count = e1000_desc_unused(rx_ring);
539 adapter->alloc_rx_buf(adapter, cleaned_count);
541 adapter->total_rx_bytes += total_rx_bytes;
542 adapter->total_rx_packets += total_rx_packets;
543 netdev->stats.rx_bytes += total_rx_bytes;
544 netdev->stats.rx_packets += total_rx_packets;
548 static void e1000_put_txbuf(struct e1000_adapter *adapter,
549 struct e1000_buffer *buffer_info)
551 if (buffer_info->dma) {
552 if (buffer_info->mapped_as_page)
553 pci_unmap_page(adapter->pdev, buffer_info->dma,
554 buffer_info->length, PCI_DMA_TODEVICE);
556 pci_unmap_single(adapter->pdev, buffer_info->dma,
559 buffer_info->dma = 0;
561 if (buffer_info->skb) {
562 dev_kfree_skb_any(buffer_info->skb);
563 buffer_info->skb = NULL;
565 buffer_info->time_stamp = 0;
568 static void e1000_print_hw_hang(struct work_struct *work)
570 struct e1000_adapter *adapter = container_of(work,
571 struct e1000_adapter,
573 struct e1000_ring *tx_ring = adapter->tx_ring;
574 unsigned int i = tx_ring->next_to_clean;
575 unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
576 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
577 struct e1000_hw *hw = &adapter->hw;
578 u16 phy_status, phy_1000t_status, phy_ext_status;
581 e1e_rphy(hw, PHY_STATUS, &phy_status);
582 e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
583 e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
585 pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
587 /* detected Hardware unit hang */
588 e_err("Detected Hardware Unit Hang:\n"
591 " next_to_use <%x>\n"
592 " next_to_clean <%x>\n"
593 "buffer_info[next_to_clean]:\n"
594 " time_stamp <%lx>\n"
595 " next_to_watch <%x>\n"
597 " next_to_watch.status <%x>\n"
600 "PHY 1000BASE-T Status <%x>\n"
601 "PHY Extended Status <%x>\n"
603 readl(adapter->hw.hw_addr + tx_ring->head),
604 readl(adapter->hw.hw_addr + tx_ring->tail),
605 tx_ring->next_to_use,
606 tx_ring->next_to_clean,
607 tx_ring->buffer_info[eop].time_stamp,
610 eop_desc->upper.fields.status,
619 * e1000_clean_tx_irq - Reclaim resources after transmit completes
620 * @adapter: board private structure
622 * the return value indicates whether actual cleaning was done, there
623 * is no guarantee that everything was cleaned
625 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
627 struct net_device *netdev = adapter->netdev;
628 struct e1000_hw *hw = &adapter->hw;
629 struct e1000_ring *tx_ring = adapter->tx_ring;
630 struct e1000_tx_desc *tx_desc, *eop_desc;
631 struct e1000_buffer *buffer_info;
633 unsigned int count = 0;
634 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
636 i = tx_ring->next_to_clean;
637 eop = tx_ring->buffer_info[i].next_to_watch;
638 eop_desc = E1000_TX_DESC(*tx_ring, eop);
640 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
641 (count < tx_ring->count)) {
642 bool cleaned = false;
643 for (; !cleaned; count++) {
644 tx_desc = E1000_TX_DESC(*tx_ring, i);
645 buffer_info = &tx_ring->buffer_info[i];
646 cleaned = (i == eop);
649 struct sk_buff *skb = buffer_info->skb;
650 unsigned int segs, bytecount;
651 segs = skb_shinfo(skb)->gso_segs ?: 1;
652 /* multiply data chunks by size of headers */
653 bytecount = ((segs - 1) * skb_headlen(skb)) +
655 total_tx_packets += segs;
656 total_tx_bytes += bytecount;
659 e1000_put_txbuf(adapter, buffer_info);
660 tx_desc->upper.data = 0;
663 if (i == tx_ring->count)
667 eop = tx_ring->buffer_info[i].next_to_watch;
668 eop_desc = E1000_TX_DESC(*tx_ring, eop);
671 tx_ring->next_to_clean = i;
673 #define TX_WAKE_THRESHOLD 32
674 if (count && netif_carrier_ok(netdev) &&
675 e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
676 /* Make sure that anybody stopping the queue after this
677 * sees the new next_to_clean.
681 if (netif_queue_stopped(netdev) &&
682 !(test_bit(__E1000_DOWN, &adapter->state))) {
683 netif_wake_queue(netdev);
684 ++adapter->restart_queue;
688 if (adapter->detect_tx_hung) {
690 * Detect a transmit hang in hardware, this serializes the
691 * check with the clearing of time_stamp and movement of i
693 adapter->detect_tx_hung = 0;
694 if (tx_ring->buffer_info[i].time_stamp &&
695 time_after(jiffies, tx_ring->buffer_info[i].time_stamp
696 + (adapter->tx_timeout_factor * HZ)) &&
697 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
698 schedule_work(&adapter->print_hang_task);
699 netif_stop_queue(netdev);
702 adapter->total_tx_bytes += total_tx_bytes;
703 adapter->total_tx_packets += total_tx_packets;
704 netdev->stats.tx_bytes += total_tx_bytes;
705 netdev->stats.tx_packets += total_tx_packets;
706 return (count < tx_ring->count);
710 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
711 * @adapter: board private structure
713 * the return value indicates whether actual cleaning was done, there
714 * is no guarantee that everything was cleaned
716 static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
717 int *work_done, int work_to_do)
719 struct e1000_hw *hw = &adapter->hw;
720 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
721 struct net_device *netdev = adapter->netdev;
722 struct pci_dev *pdev = adapter->pdev;
723 struct e1000_ring *rx_ring = adapter->rx_ring;
724 struct e1000_buffer *buffer_info, *next_buffer;
725 struct e1000_ps_page *ps_page;
729 int cleaned_count = 0;
731 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
733 i = rx_ring->next_to_clean;
734 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
735 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
736 buffer_info = &rx_ring->buffer_info[i];
738 while (staterr & E1000_RXD_STAT_DD) {
739 if (*work_done >= work_to_do)
742 skb = buffer_info->skb;
744 /* in the packet split case this is header only */
745 prefetch(skb->data - NET_IP_ALIGN);
748 if (i == rx_ring->count)
750 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
753 next_buffer = &rx_ring->buffer_info[i];
757 pci_unmap_single(pdev, buffer_info->dma,
758 adapter->rx_ps_bsize0,
760 buffer_info->dma = 0;
762 /* see !EOP comment in other rx routine */
763 if (!(staterr & E1000_RXD_STAT_EOP))
764 adapter->flags2 |= FLAG2_IS_DISCARDING;
766 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
767 e_dbg("Packet Split buffers didn't pick up the full "
769 dev_kfree_skb_irq(skb);
770 if (staterr & E1000_RXD_STAT_EOP)
771 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
775 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
776 dev_kfree_skb_irq(skb);
780 length = le16_to_cpu(rx_desc->wb.middle.length0);
783 e_dbg("Last part of the packet spanning multiple "
785 dev_kfree_skb_irq(skb);
790 skb_put(skb, length);
794 * this looks ugly, but it seems compiler issues make it
795 * more efficient than reusing j
797 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
800 * page alloc/put takes too long and effects small packet
801 * throughput, so unsplit small packets and save the alloc/put
802 * only valid in softirq (napi) context to call kmap_*
804 if (l1 && (l1 <= copybreak) &&
805 ((length + l1) <= adapter->rx_ps_bsize0)) {
808 ps_page = &buffer_info->ps_pages[0];
811 * there is no documentation about how to call
812 * kmap_atomic, so we can't hold the mapping
815 pci_dma_sync_single_for_cpu(pdev, ps_page->dma,
816 PAGE_SIZE, PCI_DMA_FROMDEVICE);
817 vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
818 memcpy(skb_tail_pointer(skb), vaddr, l1);
819 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
820 pci_dma_sync_single_for_device(pdev, ps_page->dma,
821 PAGE_SIZE, PCI_DMA_FROMDEVICE);
824 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
832 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
833 length = le16_to_cpu(rx_desc->wb.upper.length[j]);
837 ps_page = &buffer_info->ps_pages[j];
838 pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
841 skb_fill_page_desc(skb, j, ps_page->page, 0, length);
842 ps_page->page = NULL;
844 skb->data_len += length;
845 skb->truesize += length;
848 /* strip the ethernet crc, problem is we're using pages now so
849 * this whole operation can get a little cpu intensive
851 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
852 pskb_trim(skb, skb->len - 4);
855 total_rx_bytes += skb->len;
858 e1000_rx_checksum(adapter, staterr, le16_to_cpu(
859 rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
861 if (rx_desc->wb.upper.header_status &
862 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
863 adapter->rx_hdr_split++;
865 e1000_receive_skb(adapter, netdev, skb,
866 staterr, rx_desc->wb.middle.vlan);
869 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
870 buffer_info->skb = NULL;
872 /* return some buffers to hardware, one at a time is too slow */
873 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
874 adapter->alloc_rx_buf(adapter, cleaned_count);
878 /* use prefetched values */
880 buffer_info = next_buffer;
882 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
884 rx_ring->next_to_clean = i;
886 cleaned_count = e1000_desc_unused(rx_ring);
888 adapter->alloc_rx_buf(adapter, cleaned_count);
890 adapter->total_rx_bytes += total_rx_bytes;
891 adapter->total_rx_packets += total_rx_packets;
892 netdev->stats.rx_bytes += total_rx_bytes;
893 netdev->stats.rx_packets += total_rx_packets;
898 * e1000_consume_page - helper function
900 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
905 skb->data_len += length;
906 skb->truesize += length;
910 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
911 * @adapter: board private structure
913 * the return value indicates whether actual cleaning was done, there
914 * is no guarantee that everything was cleaned
917 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
918 int *work_done, int work_to_do)
920 struct net_device *netdev = adapter->netdev;
921 struct pci_dev *pdev = adapter->pdev;
922 struct e1000_ring *rx_ring = adapter->rx_ring;
923 struct e1000_rx_desc *rx_desc, *next_rxd;
924 struct e1000_buffer *buffer_info, *next_buffer;
927 int cleaned_count = 0;
928 bool cleaned = false;
929 unsigned int total_rx_bytes=0, total_rx_packets=0;
931 i = rx_ring->next_to_clean;
932 rx_desc = E1000_RX_DESC(*rx_ring, i);
933 buffer_info = &rx_ring->buffer_info[i];
935 while (rx_desc->status & E1000_RXD_STAT_DD) {
939 if (*work_done >= work_to_do)
943 status = rx_desc->status;
944 skb = buffer_info->skb;
945 buffer_info->skb = NULL;
948 if (i == rx_ring->count)
950 next_rxd = E1000_RX_DESC(*rx_ring, i);
953 next_buffer = &rx_ring->buffer_info[i];
957 pci_unmap_page(pdev, buffer_info->dma, PAGE_SIZE,
959 buffer_info->dma = 0;
961 length = le16_to_cpu(rx_desc->length);
963 /* errors is only valid for DD + EOP descriptors */
964 if (unlikely((status & E1000_RXD_STAT_EOP) &&
965 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
966 /* recycle both page and skb */
967 buffer_info->skb = skb;
968 /* an error means any chain goes out the window
970 if (rx_ring->rx_skb_top)
971 dev_kfree_skb(rx_ring->rx_skb_top);
972 rx_ring->rx_skb_top = NULL;
976 #define rxtop rx_ring->rx_skb_top
977 if (!(status & E1000_RXD_STAT_EOP)) {
978 /* this descriptor is only the beginning (or middle) */
980 /* this is the beginning of a chain */
982 skb_fill_page_desc(rxtop, 0, buffer_info->page,
985 /* this is the middle of a chain */
986 skb_fill_page_desc(rxtop,
987 skb_shinfo(rxtop)->nr_frags,
988 buffer_info->page, 0, length);
989 /* re-use the skb, only consumed the page */
990 buffer_info->skb = skb;
992 e1000_consume_page(buffer_info, rxtop, length);
996 /* end of the chain */
997 skb_fill_page_desc(rxtop,
998 skb_shinfo(rxtop)->nr_frags,
999 buffer_info->page, 0, length);
1000 /* re-use the current skb, we only consumed the
1002 buffer_info->skb = skb;
1005 e1000_consume_page(buffer_info, skb, length);
1007 /* no chain, got EOP, this buf is the packet
1008 * copybreak to save the put_page/alloc_page */
1009 if (length <= copybreak &&
1010 skb_tailroom(skb) >= length) {
1012 vaddr = kmap_atomic(buffer_info->page,
1013 KM_SKB_DATA_SOFTIRQ);
1014 memcpy(skb_tail_pointer(skb), vaddr,
1016 kunmap_atomic(vaddr,
1017 KM_SKB_DATA_SOFTIRQ);
1018 /* re-use the page, so don't erase
1019 * buffer_info->page */
1020 skb_put(skb, length);
1022 skb_fill_page_desc(skb, 0,
1023 buffer_info->page, 0,
1025 e1000_consume_page(buffer_info, skb,
1031 /* Receive Checksum Offload XXX recompute due to CRC strip? */
1032 e1000_rx_checksum(adapter,
1034 ((u32)(rx_desc->errors) << 24),
1035 le16_to_cpu(rx_desc->csum), skb);
1037 /* probably a little skewed due to removing CRC */
1038 total_rx_bytes += skb->len;
1041 /* eth type trans needs skb->data to point to something */
1042 if (!pskb_may_pull(skb, ETH_HLEN)) {
1043 e_err("pskb_may_pull failed.\n");
1048 e1000_receive_skb(adapter, netdev, skb, status,
1052 rx_desc->status = 0;
1054 /* return some buffers to hardware, one at a time is too slow */
1055 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
1056 adapter->alloc_rx_buf(adapter, cleaned_count);
1060 /* use prefetched values */
1062 buffer_info = next_buffer;
1064 rx_ring->next_to_clean = i;
1066 cleaned_count = e1000_desc_unused(rx_ring);
1068 adapter->alloc_rx_buf(adapter, cleaned_count);
1070 adapter->total_rx_bytes += total_rx_bytes;
1071 adapter->total_rx_packets += total_rx_packets;
1072 netdev->stats.rx_bytes += total_rx_bytes;
1073 netdev->stats.rx_packets += total_rx_packets;
1078 * e1000_clean_rx_ring - Free Rx Buffers per Queue
1079 * @adapter: board private structure
1081 static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1083 struct e1000_ring *rx_ring = adapter->rx_ring;
1084 struct e1000_buffer *buffer_info;
1085 struct e1000_ps_page *ps_page;
1086 struct pci_dev *pdev = adapter->pdev;
1089 /* Free all the Rx ring sk_buffs */
1090 for (i = 0; i < rx_ring->count; i++) {
1091 buffer_info = &rx_ring->buffer_info[i];
1092 if (buffer_info->dma) {
1093 if (adapter->clean_rx == e1000_clean_rx_irq)
1094 pci_unmap_single(pdev, buffer_info->dma,
1095 adapter->rx_buffer_len,
1096 PCI_DMA_FROMDEVICE);
1097 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
1098 pci_unmap_page(pdev, buffer_info->dma,
1100 PCI_DMA_FROMDEVICE);
1101 else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
1102 pci_unmap_single(pdev, buffer_info->dma,
1103 adapter->rx_ps_bsize0,
1104 PCI_DMA_FROMDEVICE);
1105 buffer_info->dma = 0;
1108 if (buffer_info->page) {
1109 put_page(buffer_info->page);
1110 buffer_info->page = NULL;
1113 if (buffer_info->skb) {
1114 dev_kfree_skb(buffer_info->skb);
1115 buffer_info->skb = NULL;
1118 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1119 ps_page = &buffer_info->ps_pages[j];
1122 pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
1123 PCI_DMA_FROMDEVICE);
1125 put_page(ps_page->page);
1126 ps_page->page = NULL;
1130 /* there also may be some cached data from a chained receive */
1131 if (rx_ring->rx_skb_top) {
1132 dev_kfree_skb(rx_ring->rx_skb_top);
1133 rx_ring->rx_skb_top = NULL;
1136 /* Zero out the descriptor ring */
1137 memset(rx_ring->desc, 0, rx_ring->size);
1139 rx_ring->next_to_clean = 0;
1140 rx_ring->next_to_use = 0;
1141 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1143 writel(0, adapter->hw.hw_addr + rx_ring->head);
1144 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1147 static void e1000e_downshift_workaround(struct work_struct *work)
1149 struct e1000_adapter *adapter = container_of(work,
1150 struct e1000_adapter, downshift_task);
1152 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
1156 * e1000_intr_msi - Interrupt Handler
1157 * @irq: interrupt number
1158 * @data: pointer to a network interface device structure
1160 static irqreturn_t e1000_intr_msi(int irq, void *data)
1162 struct net_device *netdev = data;
1163 struct e1000_adapter *adapter = netdev_priv(netdev);
1164 struct e1000_hw *hw = &adapter->hw;
1165 u32 icr = er32(ICR);
1168 * read ICR disables interrupts using IAM
1171 if (icr & E1000_ICR_LSC) {
1172 hw->mac.get_link_status = 1;
1174 * ICH8 workaround-- Call gig speed drop workaround on cable
1175 * disconnect (LSC) before accessing any PHY registers
1177 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1178 (!(er32(STATUS) & E1000_STATUS_LU)))
1179 schedule_work(&adapter->downshift_task);
1182 * 80003ES2LAN workaround-- For packet buffer work-around on
1183 * link down event; disable receives here in the ISR and reset
1184 * adapter in watchdog
1186 if (netif_carrier_ok(netdev) &&
1187 adapter->flags & FLAG_RX_NEEDS_RESTART) {
1188 /* disable receives */
1189 u32 rctl = er32(RCTL);
1190 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1191 adapter->flags |= FLAG_RX_RESTART_NOW;
1193 /* guard against interrupt when we're going down */
1194 if (!test_bit(__E1000_DOWN, &adapter->state))
1195 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1198 if (napi_schedule_prep(&adapter->napi)) {
1199 adapter->total_tx_bytes = 0;
1200 adapter->total_tx_packets = 0;
1201 adapter->total_rx_bytes = 0;
1202 adapter->total_rx_packets = 0;
1203 __napi_schedule(&adapter->napi);
1210 * e1000_intr - Interrupt Handler
1211 * @irq: interrupt number
1212 * @data: pointer to a network interface device structure
1214 static irqreturn_t e1000_intr(int irq, void *data)
1216 struct net_device *netdev = data;
1217 struct e1000_adapter *adapter = netdev_priv(netdev);
1218 struct e1000_hw *hw = &adapter->hw;
1219 u32 rctl, icr = er32(ICR);
1221 if (!icr || test_bit(__E1000_DOWN, &adapter->state))
1222 return IRQ_NONE; /* Not our interrupt */
1225 * IMS will not auto-mask if INT_ASSERTED is not set, and if it is
1226 * not set, then the adapter didn't send an interrupt
1228 if (!(icr & E1000_ICR_INT_ASSERTED))
1232 * Interrupt Auto-Mask...upon reading ICR,
1233 * interrupts are masked. No need for the
1237 if (icr & E1000_ICR_LSC) {
1238 hw->mac.get_link_status = 1;
1240 * ICH8 workaround-- Call gig speed drop workaround on cable
1241 * disconnect (LSC) before accessing any PHY registers
1243 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1244 (!(er32(STATUS) & E1000_STATUS_LU)))
1245 schedule_work(&adapter->downshift_task);
1248 * 80003ES2LAN workaround--
1249 * For packet buffer work-around on link down event;
1250 * disable receives here in the ISR and
1251 * reset adapter in watchdog
1253 if (netif_carrier_ok(netdev) &&
1254 (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
1255 /* disable receives */
1257 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1258 adapter->flags |= FLAG_RX_RESTART_NOW;
1260 /* guard against interrupt when we're going down */
1261 if (!test_bit(__E1000_DOWN, &adapter->state))
1262 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1265 if (napi_schedule_prep(&adapter->napi)) {
1266 adapter->total_tx_bytes = 0;
1267 adapter->total_tx_packets = 0;
1268 adapter->total_rx_bytes = 0;
1269 adapter->total_rx_packets = 0;
1270 __napi_schedule(&adapter->napi);
1276 static irqreturn_t e1000_msix_other(int irq, void *data)
1278 struct net_device *netdev = data;
1279 struct e1000_adapter *adapter = netdev_priv(netdev);
1280 struct e1000_hw *hw = &adapter->hw;
1281 u32 icr = er32(ICR);
1283 if (!(icr & E1000_ICR_INT_ASSERTED)) {
1284 if (!test_bit(__E1000_DOWN, &adapter->state))
1285 ew32(IMS, E1000_IMS_OTHER);
1289 if (icr & adapter->eiac_mask)
1290 ew32(ICS, (icr & adapter->eiac_mask));
1292 if (icr & E1000_ICR_OTHER) {
1293 if (!(icr & E1000_ICR_LSC))
1294 goto no_link_interrupt;
1295 hw->mac.get_link_status = 1;
1296 /* guard against interrupt when we're going down */
1297 if (!test_bit(__E1000_DOWN, &adapter->state))
1298 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1302 if (!test_bit(__E1000_DOWN, &adapter->state))
1303 ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
1309 static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
1311 struct net_device *netdev = data;
1312 struct e1000_adapter *adapter = netdev_priv(netdev);
1313 struct e1000_hw *hw = &adapter->hw;
1314 struct e1000_ring *tx_ring = adapter->tx_ring;
1317 adapter->total_tx_bytes = 0;
1318 adapter->total_tx_packets = 0;
1320 if (!e1000_clean_tx_irq(adapter))
1321 /* Ring was not completely cleaned, so fire another interrupt */
1322 ew32(ICS, tx_ring->ims_val);
1327 static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
1329 struct net_device *netdev = data;
1330 struct e1000_adapter *adapter = netdev_priv(netdev);
1332 /* Write the ITR value calculated at the end of the
1333 * previous interrupt.
1335 if (adapter->rx_ring->set_itr) {
1336 writel(1000000000 / (adapter->rx_ring->itr_val * 256),
1337 adapter->hw.hw_addr + adapter->rx_ring->itr_register);
1338 adapter->rx_ring->set_itr = 0;
1341 if (napi_schedule_prep(&adapter->napi)) {
1342 adapter->total_rx_bytes = 0;
1343 adapter->total_rx_packets = 0;
1344 __napi_schedule(&adapter->napi);
1350 * e1000_configure_msix - Configure MSI-X hardware
1352 * e1000_configure_msix sets up the hardware to properly
1353 * generate MSI-X interrupts.
1355 static void e1000_configure_msix(struct e1000_adapter *adapter)
1357 struct e1000_hw *hw = &adapter->hw;
1358 struct e1000_ring *rx_ring = adapter->rx_ring;
1359 struct e1000_ring *tx_ring = adapter->tx_ring;
1361 u32 ctrl_ext, ivar = 0;
1363 adapter->eiac_mask = 0;
1365 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
1366 if (hw->mac.type == e1000_82574) {
1367 u32 rfctl = er32(RFCTL);
1368 rfctl |= E1000_RFCTL_ACK_DIS;
1372 #define E1000_IVAR_INT_ALLOC_VALID 0x8
1373 /* Configure Rx vector */
1374 rx_ring->ims_val = E1000_IMS_RXQ0;
1375 adapter->eiac_mask |= rx_ring->ims_val;
1376 if (rx_ring->itr_val)
1377 writel(1000000000 / (rx_ring->itr_val * 256),
1378 hw->hw_addr + rx_ring->itr_register);
1380 writel(1, hw->hw_addr + rx_ring->itr_register);
1381 ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
1383 /* Configure Tx vector */
1384 tx_ring->ims_val = E1000_IMS_TXQ0;
1386 if (tx_ring->itr_val)
1387 writel(1000000000 / (tx_ring->itr_val * 256),
1388 hw->hw_addr + tx_ring->itr_register);
1390 writel(1, hw->hw_addr + tx_ring->itr_register);
1391 adapter->eiac_mask |= tx_ring->ims_val;
1392 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
1394 /* set vector for Other Causes, e.g. link changes */
1396 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
1397 if (rx_ring->itr_val)
1398 writel(1000000000 / (rx_ring->itr_val * 256),
1399 hw->hw_addr + E1000_EITR_82574(vector));
1401 writel(1, hw->hw_addr + E1000_EITR_82574(vector));
1403 /* Cause Tx interrupts on every write back */
1408 /* enable MSI-X PBA support */
1409 ctrl_ext = er32(CTRL_EXT);
1410 ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
1412 /* Auto-Mask Other interrupts upon ICR read */
1413 #define E1000_EIAC_MASK_82574 0x01F00000
1414 ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
1415 ctrl_ext |= E1000_CTRL_EXT_EIAME;
1416 ew32(CTRL_EXT, ctrl_ext);
1420 void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
1422 if (adapter->msix_entries) {
1423 pci_disable_msix(adapter->pdev);
1424 kfree(adapter->msix_entries);
1425 adapter->msix_entries = NULL;
1426 } else if (adapter->flags & FLAG_MSI_ENABLED) {
1427 pci_disable_msi(adapter->pdev);
1428 adapter->flags &= ~FLAG_MSI_ENABLED;
1435 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
1437 * Attempt to configure interrupts using the best available
1438 * capabilities of the hardware and kernel.
1440 void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
1446 switch (adapter->int_mode) {
1447 case E1000E_INT_MODE_MSIX:
1448 if (adapter->flags & FLAG_HAS_MSIX) {
1449 numvecs = 3; /* RxQ0, TxQ0 and other */
1450 adapter->msix_entries = kcalloc(numvecs,
1451 sizeof(struct msix_entry),
1453 if (adapter->msix_entries) {
1454 for (i = 0; i < numvecs; i++)
1455 adapter->msix_entries[i].entry = i;
1457 err = pci_enable_msix(adapter->pdev,
1458 adapter->msix_entries,
1463 /* MSI-X failed, so fall through and try MSI */
1464 e_err("Failed to initialize MSI-X interrupts. "
1465 "Falling back to MSI interrupts.\n");
1466 e1000e_reset_interrupt_capability(adapter);
1468 adapter->int_mode = E1000E_INT_MODE_MSI;
1470 case E1000E_INT_MODE_MSI:
1471 if (!pci_enable_msi(adapter->pdev)) {
1472 adapter->flags |= FLAG_MSI_ENABLED;
1474 adapter->int_mode = E1000E_INT_MODE_LEGACY;
1475 e_err("Failed to initialize MSI interrupts. Falling "
1476 "back to legacy interrupts.\n");
1479 case E1000E_INT_MODE_LEGACY:
1480 /* Don't do anything; this is the system default */
1488 * e1000_request_msix - Initialize MSI-X interrupts
1490 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
1493 static int e1000_request_msix(struct e1000_adapter *adapter)
1495 struct net_device *netdev = adapter->netdev;
1496 int err = 0, vector = 0;
1498 if (strlen(netdev->name) < (IFNAMSIZ - 5))
1499 sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name);
1501 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
1502 err = request_irq(adapter->msix_entries[vector].vector,
1503 e1000_intr_msix_rx, 0, adapter->rx_ring->name,
1507 adapter->rx_ring->itr_register = E1000_EITR_82574(vector);
1508 adapter->rx_ring->itr_val = adapter->itr;
1511 if (strlen(netdev->name) < (IFNAMSIZ - 5))
1512 sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name);
1514 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
1515 err = request_irq(adapter->msix_entries[vector].vector,
1516 e1000_intr_msix_tx, 0, adapter->tx_ring->name,
1520 adapter->tx_ring->itr_register = E1000_EITR_82574(vector);
1521 adapter->tx_ring->itr_val = adapter->itr;
1524 err = request_irq(adapter->msix_entries[vector].vector,
1525 e1000_msix_other, 0, netdev->name, netdev);
1529 e1000_configure_msix(adapter);
1536 * e1000_request_irq - initialize interrupts
1538 * Attempts to configure interrupts using the best available
1539 * capabilities of the hardware and kernel.
1541 static int e1000_request_irq(struct e1000_adapter *adapter)
1543 struct net_device *netdev = adapter->netdev;
1546 if (adapter->msix_entries) {
1547 err = e1000_request_msix(adapter);
1550 /* fall back to MSI */
1551 e1000e_reset_interrupt_capability(adapter);
1552 adapter->int_mode = E1000E_INT_MODE_MSI;
1553 e1000e_set_interrupt_capability(adapter);
1555 if (adapter->flags & FLAG_MSI_ENABLED) {
1556 err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0,
1557 netdev->name, netdev);
1561 /* fall back to legacy interrupt */
1562 e1000e_reset_interrupt_capability(adapter);
1563 adapter->int_mode = E1000E_INT_MODE_LEGACY;
1566 err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED,
1567 netdev->name, netdev);
1569 e_err("Unable to allocate interrupt, Error: %d\n", err);
1574 static void e1000_free_irq(struct e1000_adapter *adapter)
1576 struct net_device *netdev = adapter->netdev;
1578 if (adapter->msix_entries) {
1581 free_irq(adapter->msix_entries[vector].vector, netdev);
1584 free_irq(adapter->msix_entries[vector].vector, netdev);
1587 /* Other Causes interrupt vector */
1588 free_irq(adapter->msix_entries[vector].vector, netdev);
1592 free_irq(adapter->pdev->irq, netdev);
1596 * e1000_irq_disable - Mask off interrupt generation on the NIC
1598 static void e1000_irq_disable(struct e1000_adapter *adapter)
1600 struct e1000_hw *hw = &adapter->hw;
1603 if (adapter->msix_entries)
1604 ew32(EIAC_82574, 0);
1606 synchronize_irq(adapter->pdev->irq);
1610 * e1000_irq_enable - Enable default interrupt generation settings
1612 static void e1000_irq_enable(struct e1000_adapter *adapter)
1614 struct e1000_hw *hw = &adapter->hw;
1616 if (adapter->msix_entries) {
1617 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
1618 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
1620 ew32(IMS, IMS_ENABLE_MASK);
1626 * e1000_get_hw_control - get control of the h/w from f/w
1627 * @adapter: address of board private structure
1629 * e1000_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
1630 * For ASF and Pass Through versions of f/w this means that
1631 * the driver is loaded. For AMT version (only with 82573)
1632 * of the f/w this means that the network i/f is open.
1634 static void e1000_get_hw_control(struct e1000_adapter *adapter)
1636 struct e1000_hw *hw = &adapter->hw;
1640 /* Let firmware know the driver has taken over */
1641 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
1643 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
1644 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
1645 ctrl_ext = er32(CTRL_EXT);
1646 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1651 * e1000_release_hw_control - release control of the h/w to f/w
1652 * @adapter: address of board private structure
1654 * e1000_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
1655 * For ASF and Pass Through versions of f/w this means that the
1656 * driver is no longer loaded. For AMT version (only with 82573) i
1657 * of the f/w this means that the network i/f is closed.
1660 static void e1000_release_hw_control(struct e1000_adapter *adapter)
1662 struct e1000_hw *hw = &adapter->hw;
1666 /* Let firmware taken over control of h/w */
1667 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
1669 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
1670 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
1671 ctrl_ext = er32(CTRL_EXT);
1672 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1677 * @e1000_alloc_ring - allocate memory for a ring structure
1679 static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
1680 struct e1000_ring *ring)
1682 struct pci_dev *pdev = adapter->pdev;
1684 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
1693 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
1694 * @adapter: board private structure
1696 * Return 0 on success, negative on failure
1698 int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
1700 struct e1000_ring *tx_ring = adapter->tx_ring;
1701 int err = -ENOMEM, size;
1703 size = sizeof(struct e1000_buffer) * tx_ring->count;
1704 tx_ring->buffer_info = vmalloc(size);
1705 if (!tx_ring->buffer_info)
1707 memset(tx_ring->buffer_info, 0, size);
1709 /* round up to nearest 4K */
1710 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
1711 tx_ring->size = ALIGN(tx_ring->size, 4096);
1713 err = e1000_alloc_ring_dma(adapter, tx_ring);
1717 tx_ring->next_to_use = 0;
1718 tx_ring->next_to_clean = 0;
1722 vfree(tx_ring->buffer_info);
1723 e_err("Unable to allocate memory for the transmit descriptor ring\n");
1728 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
1729 * @adapter: board private structure
1731 * Returns 0 on success, negative on failure
1733 int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
1735 struct e1000_ring *rx_ring = adapter->rx_ring;
1736 struct e1000_buffer *buffer_info;
1737 int i, size, desc_len, err = -ENOMEM;
1739 size = sizeof(struct e1000_buffer) * rx_ring->count;
1740 rx_ring->buffer_info = vmalloc(size);
1741 if (!rx_ring->buffer_info)
1743 memset(rx_ring->buffer_info, 0, size);
1745 for (i = 0; i < rx_ring->count; i++) {
1746 buffer_info = &rx_ring->buffer_info[i];
1747 buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
1748 sizeof(struct e1000_ps_page),
1750 if (!buffer_info->ps_pages)
1754 desc_len = sizeof(union e1000_rx_desc_packet_split);
1756 /* Round up to nearest 4K */
1757 rx_ring->size = rx_ring->count * desc_len;
1758 rx_ring->size = ALIGN(rx_ring->size, 4096);
1760 err = e1000_alloc_ring_dma(adapter, rx_ring);
1764 rx_ring->next_to_clean = 0;
1765 rx_ring->next_to_use = 0;
1766 rx_ring->rx_skb_top = NULL;
1771 for (i = 0; i < rx_ring->count; i++) {
1772 buffer_info = &rx_ring->buffer_info[i];
1773 kfree(buffer_info->ps_pages);
1776 vfree(rx_ring->buffer_info);
1777 e_err("Unable to allocate memory for the transmit descriptor ring\n");
1782 * e1000_clean_tx_ring - Free Tx Buffers
1783 * @adapter: board private structure
1785 static void e1000_clean_tx_ring(struct e1000_adapter *adapter)
1787 struct e1000_ring *tx_ring = adapter->tx_ring;
1788 struct e1000_buffer *buffer_info;
1792 for (i = 0; i < tx_ring->count; i++) {
1793 buffer_info = &tx_ring->buffer_info[i];
1794 e1000_put_txbuf(adapter, buffer_info);
1797 size = sizeof(struct e1000_buffer) * tx_ring->count;
1798 memset(tx_ring->buffer_info, 0, size);
1800 memset(tx_ring->desc, 0, tx_ring->size);
1802 tx_ring->next_to_use = 0;
1803 tx_ring->next_to_clean = 0;
1805 writel(0, adapter->hw.hw_addr + tx_ring->head);
1806 writel(0, adapter->hw.hw_addr + tx_ring->tail);
1810 * e1000e_free_tx_resources - Free Tx Resources per Queue
1811 * @adapter: board private structure
1813 * Free all transmit software resources
1815 void e1000e_free_tx_resources(struct e1000_adapter *adapter)
1817 struct pci_dev *pdev = adapter->pdev;
1818 struct e1000_ring *tx_ring = adapter->tx_ring;
1820 e1000_clean_tx_ring(adapter);
1822 vfree(tx_ring->buffer_info);
1823 tx_ring->buffer_info = NULL;
1825 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1827 tx_ring->desc = NULL;
1831 * e1000e_free_rx_resources - Free Rx Resources
1832 * @adapter: board private structure
1834 * Free all receive software resources
1837 void e1000e_free_rx_resources(struct e1000_adapter *adapter)
1839 struct pci_dev *pdev = adapter->pdev;
1840 struct e1000_ring *rx_ring = adapter->rx_ring;
1843 e1000_clean_rx_ring(adapter);
1845 for (i = 0; i < rx_ring->count; i++) {
1846 kfree(rx_ring->buffer_info[i].ps_pages);
1849 vfree(rx_ring->buffer_info);
1850 rx_ring->buffer_info = NULL;
1852 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
1854 rx_ring->desc = NULL;
1858 * e1000_update_itr - update the dynamic ITR value based on statistics
1859 * @adapter: pointer to adapter
1860 * @itr_setting: current adapter->itr
1861 * @packets: the number of packets during this measurement interval
1862 * @bytes: the number of bytes during this measurement interval
1864 * Stores a new ITR value based on packets and byte
1865 * counts during the last interrupt. The advantage of per interrupt
1866 * computation is faster updates and more accurate ITR for the current
1867 * traffic pattern. Constants in this function were computed
1868 * based on theoretical maximum wire speed and thresholds were set based
1869 * on testing data as well as attempting to minimize response time
1870 * while increasing bulk throughput. This functionality is controlled
1871 * by the InterruptThrottleRate module parameter.
1873 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
1874 u16 itr_setting, int packets,
1877 unsigned int retval = itr_setting;
1880 goto update_itr_done;
1882 switch (itr_setting) {
1883 case lowest_latency:
1884 /* handle TSO and jumbo frames */
1885 if (bytes/packets > 8000)
1886 retval = bulk_latency;
1887 else if ((packets < 5) && (bytes > 512)) {
1888 retval = low_latency;
1891 case low_latency: /* 50 usec aka 20000 ints/s */
1892 if (bytes > 10000) {
1893 /* this if handles the TSO accounting */
1894 if (bytes/packets > 8000) {
1895 retval = bulk_latency;
1896 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
1897 retval = bulk_latency;
1898 } else if ((packets > 35)) {
1899 retval = lowest_latency;
1901 } else if (bytes/packets > 2000) {
1902 retval = bulk_latency;
1903 } else if (packets <= 2 && bytes < 512) {
1904 retval = lowest_latency;
1907 case bulk_latency: /* 250 usec aka 4000 ints/s */
1908 if (bytes > 25000) {
1910 retval = low_latency;
1912 } else if (bytes < 6000) {
1913 retval = low_latency;
1922 static void e1000_set_itr(struct e1000_adapter *adapter)
1924 struct e1000_hw *hw = &adapter->hw;
1926 u32 new_itr = adapter->itr;
1928 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
1929 if (adapter->link_speed != SPEED_1000) {
1935 adapter->tx_itr = e1000_update_itr(adapter,
1937 adapter->total_tx_packets,
1938 adapter->total_tx_bytes);
1939 /* conservative mode (itr 3) eliminates the lowest_latency setting */
1940 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
1941 adapter->tx_itr = low_latency;
1943 adapter->rx_itr = e1000_update_itr(adapter,
1945 adapter->total_rx_packets,
1946 adapter->total_rx_bytes);
1947 /* conservative mode (itr 3) eliminates the lowest_latency setting */
1948 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
1949 adapter->rx_itr = low_latency;
1951 current_itr = max(adapter->rx_itr, adapter->tx_itr);
1953 switch (current_itr) {
1954 /* counts and packets in update_itr are dependent on these numbers */
1955 case lowest_latency:
1959 new_itr = 20000; /* aka hwitr = ~200 */
1969 if (new_itr != adapter->itr) {
1971 * this attempts to bias the interrupt rate towards Bulk
1972 * by adding intermediate steps when interrupt rate is
1975 new_itr = new_itr > adapter->itr ?
1976 min(adapter->itr + (new_itr >> 2), new_itr) :
1978 adapter->itr = new_itr;
1979 adapter->rx_ring->itr_val = new_itr;
1980 if (adapter->msix_entries)
1981 adapter->rx_ring->set_itr = 1;
1983 ew32(ITR, 1000000000 / (new_itr * 256));
1988 * e1000_alloc_queues - Allocate memory for all rings
1989 * @adapter: board private structure to initialize
1991 static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
1993 adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
1994 if (!adapter->tx_ring)
1997 adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
1998 if (!adapter->rx_ring)
2003 e_err("Unable to allocate memory for queues\n");
2004 kfree(adapter->rx_ring);
2005 kfree(adapter->tx_ring);
2010 * e1000_clean - NAPI Rx polling callback
2011 * @napi: struct associated with this polling callback
2012 * @budget: amount of packets driver is allowed to process this poll
2014 static int e1000_clean(struct napi_struct *napi, int budget)
2016 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
2017 struct e1000_hw *hw = &adapter->hw;
2018 struct net_device *poll_dev = adapter->netdev;
2019 int tx_cleaned = 1, work_done = 0;
2021 adapter = netdev_priv(poll_dev);
2023 if (adapter->msix_entries &&
2024 !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
2027 tx_cleaned = e1000_clean_tx_irq(adapter);
2030 adapter->clean_rx(adapter, &work_done, budget);
2035 /* If budget not fully consumed, exit the polling mode */
2036 if (work_done < budget) {
2037 if (adapter->itr_setting & 3)
2038 e1000_set_itr(adapter);
2039 napi_complete(napi);
2040 if (!test_bit(__E1000_DOWN, &adapter->state)) {
2041 if (adapter->msix_entries)
2042 ew32(IMS, adapter->rx_ring->ims_val);
2044 e1000_irq_enable(adapter);
2051 static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2053 struct e1000_adapter *adapter = netdev_priv(netdev);
2054 struct e1000_hw *hw = &adapter->hw;
2057 /* don't update vlan cookie if already programmed */
2058 if ((adapter->hw.mng_cookie.status &
2059 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2060 (vid == adapter->mng_vlan_id))
2063 /* add VID to filter table */
2064 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2065 index = (vid >> 5) & 0x7F;
2066 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2067 vfta |= (1 << (vid & 0x1F));
2068 hw->mac.ops.write_vfta(hw, index, vfta);
2072 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2074 struct e1000_adapter *adapter = netdev_priv(netdev);
2075 struct e1000_hw *hw = &adapter->hw;
2078 if (!test_bit(__E1000_DOWN, &adapter->state))
2079 e1000_irq_disable(adapter);
2080 vlan_group_set_device(adapter->vlgrp, vid, NULL);
2082 if (!test_bit(__E1000_DOWN, &adapter->state))
2083 e1000_irq_enable(adapter);
2085 if ((adapter->hw.mng_cookie.status &
2086 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2087 (vid == adapter->mng_vlan_id)) {
2088 /* release control to f/w */
2089 e1000_release_hw_control(adapter);
2093 /* remove VID from filter table */
2094 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2095 index = (vid >> 5) & 0x7F;
2096 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2097 vfta &= ~(1 << (vid & 0x1F));
2098 hw->mac.ops.write_vfta(hw, index, vfta);
2102 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
2104 struct net_device *netdev = adapter->netdev;
2105 u16 vid = adapter->hw.mng_cookie.vlan_id;
2106 u16 old_vid = adapter->mng_vlan_id;
2108 if (!adapter->vlgrp)
2111 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
2112 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
2113 if (adapter->hw.mng_cookie.status &
2114 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
2115 e1000_vlan_rx_add_vid(netdev, vid);
2116 adapter->mng_vlan_id = vid;
2119 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
2121 !vlan_group_get_device(adapter->vlgrp, old_vid))
2122 e1000_vlan_rx_kill_vid(netdev, old_vid);
2124 adapter->mng_vlan_id = vid;
2129 static void e1000_vlan_rx_register(struct net_device *netdev,
2130 struct vlan_group *grp)
2132 struct e1000_adapter *adapter = netdev_priv(netdev);
2133 struct e1000_hw *hw = &adapter->hw;
2136 if (!test_bit(__E1000_DOWN, &adapter->state))
2137 e1000_irq_disable(adapter);
2138 adapter->vlgrp = grp;
2141 /* enable VLAN tag insert/strip */
2143 ctrl |= E1000_CTRL_VME;
2146 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2147 /* enable VLAN receive filtering */
2149 rctl &= ~E1000_RCTL_CFIEN;
2151 e1000_update_mng_vlan(adapter);
2154 /* disable VLAN tag insert/strip */
2156 ctrl &= ~E1000_CTRL_VME;
2159 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2160 if (adapter->mng_vlan_id !=
2161 (u16)E1000_MNG_VLAN_NONE) {
2162 e1000_vlan_rx_kill_vid(netdev,
2163 adapter->mng_vlan_id);
2164 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
2169 if (!test_bit(__E1000_DOWN, &adapter->state))
2170 e1000_irq_enable(adapter);
2173 static void e1000_restore_vlan(struct e1000_adapter *adapter)
2177 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
2179 if (!adapter->vlgrp)
2182 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
2183 if (!vlan_group_get_device(adapter->vlgrp, vid))
2185 e1000_vlan_rx_add_vid(adapter->netdev, vid);
2189 static void e1000_init_manageability(struct e1000_adapter *adapter)
2191 struct e1000_hw *hw = &adapter->hw;
2194 if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
2200 * enable receiving management packets to the host. this will probably
2201 * generate destination unreachable messages from the host OS, but
2202 * the packets will be handled on SMBUS
2204 manc |= E1000_MANC_EN_MNG2HOST;
2205 manc2h = er32(MANC2H);
2206 #define E1000_MNG2HOST_PORT_623 (1 << 5)
2207 #define E1000_MNG2HOST_PORT_664 (1 << 6)
2208 manc2h |= E1000_MNG2HOST_PORT_623;
2209 manc2h |= E1000_MNG2HOST_PORT_664;
2210 ew32(MANC2H, manc2h);
2215 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
2216 * @adapter: board private structure
2218 * Configure the Tx unit of the MAC after a reset.
2220 static void e1000_configure_tx(struct e1000_adapter *adapter)
2222 struct e1000_hw *hw = &adapter->hw;
2223 struct e1000_ring *tx_ring = adapter->tx_ring;
2225 u32 tdlen, tctl, tipg, tarc;
2228 /* Setup the HW Tx Head and Tail descriptor pointers */
2229 tdba = tx_ring->dma;
2230 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
2231 ew32(TDBAL, (tdba & DMA_BIT_MASK(32)));
2232 ew32(TDBAH, (tdba >> 32));
2236 tx_ring->head = E1000_TDH;
2237 tx_ring->tail = E1000_TDT;
2239 /* Set the default values for the Tx Inter Packet Gap timer */
2240 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; /* 8 */
2241 ipgr1 = DEFAULT_82543_TIPG_IPGR1; /* 8 */
2242 ipgr2 = DEFAULT_82543_TIPG_IPGR2; /* 6 */
2244 if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN)
2245 ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /* 7 */
2247 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
2248 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
2251 /* Set the Tx Interrupt Delay register */
2252 ew32(TIDV, adapter->tx_int_delay);
2253 /* Tx irq moderation */
2254 ew32(TADV, adapter->tx_abs_int_delay);
2256 /* Program the Transmit Control Register */
2258 tctl &= ~E1000_TCTL_CT;
2259 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2260 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2262 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
2263 tarc = er32(TARC(0));
2265 * set the speed mode bit, we'll clear it if we're not at
2266 * gigabit link later
2268 #define SPEED_MODE_BIT (1 << 21)
2269 tarc |= SPEED_MODE_BIT;
2270 ew32(TARC(0), tarc);
2273 /* errata: program both queues to unweighted RR */
2274 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
2275 tarc = er32(TARC(0));
2277 ew32(TARC(0), tarc);
2278 tarc = er32(TARC(1));
2280 ew32(TARC(1), tarc);
2283 /* Setup Transmit Descriptor Settings for eop descriptor */
2284 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
2286 /* only set IDE if we are delaying interrupts using the timers */
2287 if (adapter->tx_int_delay)
2288 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2290 /* enable Report Status bit */
2291 adapter->txd_cmd |= E1000_TXD_CMD_RS;
2295 e1000e_config_collision_dist(hw);
2299 * e1000_setup_rctl - configure the receive control registers
2300 * @adapter: Board private structure
2302 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
2303 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
2304 static void e1000_setup_rctl(struct e1000_adapter *adapter)
2306 struct e1000_hw *hw = &adapter->hw;
2311 /* Program MC offset vector base */
2313 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2314 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
2315 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
2316 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2318 /* Do not Store bad packets */
2319 rctl &= ~E1000_RCTL_SBP;
2321 /* Enable Long Packet receive */
2322 if (adapter->netdev->mtu <= ETH_DATA_LEN)
2323 rctl &= ~E1000_RCTL_LPE;
2325 rctl |= E1000_RCTL_LPE;
2327 /* Some systems expect that the CRC is included in SMBUS traffic. The
2328 * hardware strips the CRC before sending to both SMBUS (BMC) and to
2329 * host memory when this is enabled
2331 if (adapter->flags2 & FLAG2_CRC_STRIPPING)
2332 rctl |= E1000_RCTL_SECRC;
2334 /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */
2335 if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) {
2338 e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
2340 phy_data |= (1 << 2);
2341 e1e_wphy(hw, PHY_REG(770, 26), phy_data);
2343 e1e_rphy(hw, 22, &phy_data);
2345 phy_data |= (1 << 14);
2346 e1e_wphy(hw, 0x10, 0x2823);
2347 e1e_wphy(hw, 0x11, 0x0003);
2348 e1e_wphy(hw, 22, phy_data);
2351 /* Setup buffer sizes */
2352 rctl &= ~E1000_RCTL_SZ_4096;
2353 rctl |= E1000_RCTL_BSEX;
2354 switch (adapter->rx_buffer_len) {
2357 rctl |= E1000_RCTL_SZ_2048;
2358 rctl &= ~E1000_RCTL_BSEX;
2361 rctl |= E1000_RCTL_SZ_4096;
2364 rctl |= E1000_RCTL_SZ_8192;
2367 rctl |= E1000_RCTL_SZ_16384;
2372 * 82571 and greater support packet-split where the protocol
2373 * header is placed in skb->data and the packet data is
2374 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
2375 * In the case of a non-split, skb->data is linearly filled,
2376 * followed by the page buffers. Therefore, skb->data is
2377 * sized to hold the largest protocol header.
2379 * allocations using alloc_page take too long for regular MTU
2380 * so only enable packet split for jumbo frames
2382 * Using pages when the page size is greater than 16k wastes
2383 * a lot of memory, since we allocate 3 pages at all times
2386 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
2387 if (!(adapter->flags & FLAG_IS_ICH) && (pages <= 3) &&
2388 (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
2389 adapter->rx_ps_pages = pages;
2391 adapter->rx_ps_pages = 0;
2393 if (adapter->rx_ps_pages) {
2394 /* Configure extra packet-split registers */
2395 rfctl = er32(RFCTL);
2396 rfctl |= E1000_RFCTL_EXTEN;
2398 * disable packet split support for IPv6 extension headers,
2399 * because some malformed IPv6 headers can hang the Rx
2401 rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
2402 E1000_RFCTL_NEW_IPV6_EXT_DIS);
2406 /* Enable Packet split descriptors */
2407 rctl |= E1000_RCTL_DTYP_PS;
2409 psrctl |= adapter->rx_ps_bsize0 >>
2410 E1000_PSRCTL_BSIZE0_SHIFT;
2412 switch (adapter->rx_ps_pages) {
2414 psrctl |= PAGE_SIZE <<
2415 E1000_PSRCTL_BSIZE3_SHIFT;
2417 psrctl |= PAGE_SIZE <<
2418 E1000_PSRCTL_BSIZE2_SHIFT;
2420 psrctl |= PAGE_SIZE >>
2421 E1000_PSRCTL_BSIZE1_SHIFT;
2425 ew32(PSRCTL, psrctl);
2429 /* just started the receive unit, no need to restart */
2430 adapter->flags &= ~FLAG_RX_RESTART_NOW;
2434 * e1000_configure_rx - Configure Receive Unit after Reset
2435 * @adapter: board private structure
2437 * Configure the Rx unit of the MAC after a reset.
2439 static void e1000_configure_rx(struct e1000_adapter *adapter)
2441 struct e1000_hw *hw = &adapter->hw;
2442 struct e1000_ring *rx_ring = adapter->rx_ring;
2444 u32 rdlen, rctl, rxcsum, ctrl_ext;
2446 if (adapter->rx_ps_pages) {
2447 /* this is a 32 byte descriptor */
2448 rdlen = rx_ring->count *
2449 sizeof(union e1000_rx_desc_packet_split);
2450 adapter->clean_rx = e1000_clean_rx_irq_ps;
2451 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
2452 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
2453 rdlen = rx_ring->count * sizeof(struct e1000_rx_desc);
2454 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
2455 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
2457 rdlen = rx_ring->count * sizeof(struct e1000_rx_desc);
2458 adapter->clean_rx = e1000_clean_rx_irq;
2459 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
2462 /* disable receives while setting up the descriptors */
2464 ew32(RCTL, rctl & ~E1000_RCTL_EN);
2468 /* set the Receive Delay Timer Register */
2469 ew32(RDTR, adapter->rx_int_delay);
2471 /* irq moderation */
2472 ew32(RADV, adapter->rx_abs_int_delay);
2473 if (adapter->itr_setting != 0)
2474 ew32(ITR, 1000000000 / (adapter->itr * 256));
2476 ctrl_ext = er32(CTRL_EXT);
2477 /* Auto-Mask interrupts upon ICR access */
2478 ctrl_ext |= E1000_CTRL_EXT_IAME;
2479 ew32(IAM, 0xffffffff);
2480 ew32(CTRL_EXT, ctrl_ext);
2484 * Setup the HW Rx Head and Tail Descriptor Pointers and
2485 * the Base and Length of the Rx Descriptor Ring
2487 rdba = rx_ring->dma;
2488 ew32(RDBAL, (rdba & DMA_BIT_MASK(32)));
2489 ew32(RDBAH, (rdba >> 32));
2493 rx_ring->head = E1000_RDH;
2494 rx_ring->tail = E1000_RDT;
2496 /* Enable Receive Checksum Offload for TCP and UDP */
2497 rxcsum = er32(RXCSUM);
2498 if (adapter->flags & FLAG_RX_CSUM_ENABLED) {
2499 rxcsum |= E1000_RXCSUM_TUOFL;
2502 * IPv4 payload checksum for UDP fragments must be
2503 * used in conjunction with packet-split.
2505 if (adapter->rx_ps_pages)
2506 rxcsum |= E1000_RXCSUM_IPPCSE;
2508 rxcsum &= ~E1000_RXCSUM_TUOFL;
2509 /* no need to clear IPPCSE as it defaults to 0 */
2511 ew32(RXCSUM, rxcsum);
2514 * Enable early receives on supported devices, only takes effect when
2515 * packet size is equal or larger than the specified value (in 8 byte
2516 * units), e.g. using jumbo frames when setting to E1000_ERT_2048
2518 if (adapter->flags & FLAG_HAS_ERT) {
2519 if (adapter->netdev->mtu > ETH_DATA_LEN) {
2520 u32 rxdctl = er32(RXDCTL(0));
2521 ew32(RXDCTL(0), rxdctl | 0x3);
2522 ew32(ERT, E1000_ERT_2048 | (1 << 13));
2524 * With jumbo frames and early-receive enabled,
2525 * excessive C-state transition latencies result in
2526 * dropped transactions.
2528 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
2529 adapter->netdev->name, 55);
2531 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
2532 adapter->netdev->name,
2533 PM_QOS_DEFAULT_VALUE);
2537 /* Enable Receives */
2542 * e1000_update_mc_addr_list - Update Multicast addresses
2543 * @hw: pointer to the HW structure
2544 * @mc_addr_list: array of multicast addresses to program
2545 * @mc_addr_count: number of multicast addresses to program
2547 * Updates the Multicast Table Array.
2548 * The caller must have a packed mc_addr_list of multicast addresses.
2550 static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
2553 hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count);
2557 * e1000_set_multi - Multicast and Promiscuous mode set
2558 * @netdev: network interface device structure
2560 * The set_multi entry point is called whenever the multicast address
2561 * list or the network interface flags are updated. This routine is
2562 * responsible for configuring the hardware for proper multicast,
2563 * promiscuous mode, and all-multi behavior.
2565 static void e1000_set_multi(struct net_device *netdev)
2567 struct e1000_adapter *adapter = netdev_priv(netdev);
2568 struct e1000_hw *hw = &adapter->hw;
2569 struct netdev_hw_addr *ha;
2574 /* Check for Promiscuous and All Multicast modes */
2578 if (netdev->flags & IFF_PROMISC) {
2579 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2580 rctl &= ~E1000_RCTL_VFE;
2582 if (netdev->flags & IFF_ALLMULTI) {
2583 rctl |= E1000_RCTL_MPE;
2584 rctl &= ~E1000_RCTL_UPE;
2586 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
2588 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
2589 rctl |= E1000_RCTL_VFE;
2594 if (!netdev_mc_empty(netdev)) {
2595 mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
2599 /* prepare a packed array of only addresses. */
2601 netdev_for_each_mc_addr(ha, netdev)
2602 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
2604 e1000_update_mc_addr_list(hw, mta_list, i);
2608 * if we're called from probe, we might not have
2609 * anything to do here, so clear out the list
2611 e1000_update_mc_addr_list(hw, NULL, 0);
2616 * e1000_configure - configure the hardware for Rx and Tx
2617 * @adapter: private board structure
2619 static void e1000_configure(struct e1000_adapter *adapter)
2621 e1000_set_multi(adapter->netdev);
2623 e1000_restore_vlan(adapter);
2624 e1000_init_manageability(adapter);
2626 e1000_configure_tx(adapter);
2627 e1000_setup_rctl(adapter);
2628 e1000_configure_rx(adapter);
2629 adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring));
2633 * e1000e_power_up_phy - restore link in case the phy was powered down
2634 * @adapter: address of board private structure
2636 * The phy may be powered down to save power and turn off link when the
2637 * driver is unloaded and wake on lan is not enabled (among others)
2638 * *** this routine MUST be followed by a call to e1000e_reset ***
2640 void e1000e_power_up_phy(struct e1000_adapter *adapter)
2642 if (adapter->hw.phy.ops.power_up)
2643 adapter->hw.phy.ops.power_up(&adapter->hw);
2645 adapter->hw.mac.ops.setup_link(&adapter->hw);
2649 * e1000_power_down_phy - Power down the PHY
2651 * Power down the PHY so no link is implied when interface is down.
2652 * The PHY cannot be powered down if management or WoL is active.
2654 static void e1000_power_down_phy(struct e1000_adapter *adapter)
2656 /* WoL is enabled */
2660 if (adapter->hw.phy.ops.power_down)
2661 adapter->hw.phy.ops.power_down(&adapter->hw);
2665 * e1000e_reset - bring the hardware into a known good state
2667 * This function boots the hardware and enables some settings that
2668 * require a configuration cycle of the hardware - those cannot be
2669 * set/changed during runtime. After reset the device needs to be
2670 * properly configured for Rx, Tx etc.
2672 void e1000e_reset(struct e1000_adapter *adapter)
2674 struct e1000_mac_info *mac = &adapter->hw.mac;
2675 struct e1000_fc_info *fc = &adapter->hw.fc;
2676 struct e1000_hw *hw = &adapter->hw;
2677 u32 tx_space, min_tx_space, min_rx_space;
2678 u32 pba = adapter->pba;
2681 /* reset Packet Buffer Allocation to default */
2684 if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
2686 * To maintain wire speed transmits, the Tx FIFO should be
2687 * large enough to accommodate two full transmit packets,
2688 * rounded up to the next 1KB and expressed in KB. Likewise,
2689 * the Rx FIFO should be large enough to accommodate at least
2690 * one full receive packet and is similarly rounded up and
2694 /* upper 16 bits has Tx packet buffer allocation size in KB */
2695 tx_space = pba >> 16;
2696 /* lower 16 bits has Rx packet buffer allocation size in KB */
2699 * the Tx fifo also stores 16 bytes of information about the tx
2700 * but don't include ethernet FCS because hardware appends it
2702 min_tx_space = (adapter->max_frame_size +
2703 sizeof(struct e1000_tx_desc) -
2705 min_tx_space = ALIGN(min_tx_space, 1024);
2706 min_tx_space >>= 10;
2707 /* software strips receive CRC, so leave room for it */
2708 min_rx_space = adapter->max_frame_size;
2709 min_rx_space = ALIGN(min_rx_space, 1024);
2710 min_rx_space >>= 10;
2713 * If current Tx allocation is less than the min Tx FIFO size,
2714 * and the min Tx FIFO size is less than the current Rx FIFO
2715 * allocation, take space away from current Rx allocation
2717 if ((tx_space < min_tx_space) &&
2718 ((min_tx_space - tx_space) < pba)) {
2719 pba -= min_tx_space - tx_space;
2722 * if short on Rx space, Rx wins and must trump tx
2723 * adjustment or use Early Receive if available
2725 if ((pba < min_rx_space) &&
2726 (!(adapter->flags & FLAG_HAS_ERT)))
2727 /* ERT enabled in e1000_configure_rx */
2736 * flow control settings
2738 * The high water mark must be low enough to fit one full frame
2739 * (or the size used for early receive) above it in the Rx FIFO.
2740 * Set it to the lower of:
2741 * - 90% of the Rx FIFO size, and
2742 * - the full Rx FIFO size minus the early receive size (for parts
2743 * with ERT support assuming ERT set to E1000_ERT_2048), or
2744 * - the full Rx FIFO size minus one full frame
2746 if (hw->mac.type == e1000_pchlan) {
2748 * Workaround PCH LOM adapter hangs with certain network
2749 * loads. If hangs persist, try disabling Tx flow control.
2751 if (adapter->netdev->mtu > ETH_DATA_LEN) {
2752 fc->high_water = 0x3500;
2753 fc->low_water = 0x1500;
2755 fc->high_water = 0x5000;
2756 fc->low_water = 0x3000;
2759 if ((adapter->flags & FLAG_HAS_ERT) &&
2760 (adapter->netdev->mtu > ETH_DATA_LEN))
2761 hwm = min(((pba << 10) * 9 / 10),
2762 ((pba << 10) - (E1000_ERT_2048 << 3)));
2764 hwm = min(((pba << 10) * 9 / 10),
2765 ((pba << 10) - adapter->max_frame_size));
2767 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
2768 fc->low_water = fc->high_water - 8;
2771 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
2772 fc->pause_time = 0xFFFF;
2774 fc->pause_time = E1000_FC_PAUSE_TIME;
2776 fc->current_mode = fc->requested_mode;
2778 /* Allow time for pending master requests to run */
2779 mac->ops.reset_hw(hw);
2782 * For parts with AMT enabled, let the firmware know
2783 * that the network interface is in control
2785 if (adapter->flags & FLAG_HAS_AMT)
2786 e1000_get_hw_control(adapter);
2789 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)
2790 e1e_wphy(&adapter->hw, BM_WUC, 0);
2792 if (mac->ops.init_hw(hw))
2793 e_err("Hardware Error\n");
2795 /* additional part of the flow-control workaround above */
2796 if (hw->mac.type == e1000_pchlan)
2797 ew32(FCRTV_PCH, 0x1000);
2799 e1000_update_mng_vlan(adapter);
2801 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
2802 ew32(VET, ETH_P_8021Q);
2804 e1000e_reset_adaptive(hw);
2805 e1000_get_phy_info(hw);
2807 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
2808 !(adapter->flags & FLAG_SMART_POWER_DOWN)) {
2811 * speed up time to link by disabling smart power down, ignore
2812 * the return value of this function because there is nothing
2813 * different we would do if it failed
2815 e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
2816 phy_data &= ~IGP02E1000_PM_SPD;
2817 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
2821 int e1000e_up(struct e1000_adapter *adapter)
2823 struct e1000_hw *hw = &adapter->hw;
2825 /* DMA latency requirement to workaround early-receive/jumbo issue */
2826 if (adapter->flags & FLAG_HAS_ERT)
2827 pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY,
2828 adapter->netdev->name,
2829 PM_QOS_DEFAULT_VALUE);
2831 /* hardware has been reset, we need to reload some things */
2832 e1000_configure(adapter);
2834 clear_bit(__E1000_DOWN, &adapter->state);
2836 napi_enable(&adapter->napi);
2837 if (adapter->msix_entries)
2838 e1000_configure_msix(adapter);
2839 e1000_irq_enable(adapter);
2841 netif_wake_queue(adapter->netdev);
2843 /* fire a link change interrupt to start the watchdog */
2844 ew32(ICS, E1000_ICS_LSC);
2848 void e1000e_down(struct e1000_adapter *adapter)
2850 struct net_device *netdev = adapter->netdev;
2851 struct e1000_hw *hw = &adapter->hw;
2855 * signal that we're down so the interrupt handler does not
2856 * reschedule our watchdog timer
2858 set_bit(__E1000_DOWN, &adapter->state);
2860 /* disable receives in the hardware */
2862 ew32(RCTL, rctl & ~E1000_RCTL_EN);
2863 /* flush and sleep below */
2865 netif_stop_queue(netdev);
2867 /* disable transmits in the hardware */
2869 tctl &= ~E1000_TCTL_EN;
2871 /* flush both disables and wait for them to finish */
2875 napi_disable(&adapter->napi);
2876 e1000_irq_disable(adapter);
2878 del_timer_sync(&adapter->watchdog_timer);
2879 del_timer_sync(&adapter->phy_info_timer);
2881 netif_carrier_off(netdev);
2882 adapter->link_speed = 0;
2883 adapter->link_duplex = 0;
2885 if (!pci_channel_offline(adapter->pdev))
2886 e1000e_reset(adapter);
2887 e1000_clean_tx_ring(adapter);
2888 e1000_clean_rx_ring(adapter);
2890 if (adapter->flags & FLAG_HAS_ERT)
2891 pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY,
2892 adapter->netdev->name);
2895 * TODO: for power management, we could drop the link and
2896 * pci_disable_device here.
2900 void e1000e_reinit_locked(struct e1000_adapter *adapter)
2903 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
2905 e1000e_down(adapter);
2907 clear_bit(__E1000_RESETTING, &adapter->state);
2911 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
2912 * @adapter: board private structure to initialize
2914 * e1000_sw_init initializes the Adapter private data structure.
2915 * Fields are initialized based on PCI device information and
2916 * OS network device settings (MTU size).
2918 static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
2920 struct net_device *netdev = adapter->netdev;
2922 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
2923 adapter->rx_ps_bsize0 = 128;
2924 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2925 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2927 e1000e_set_interrupt_capability(adapter);
2929 if (e1000_alloc_queues(adapter))
2932 /* Explicitly disable IRQ since the NIC can be in any state. */
2933 e1000_irq_disable(adapter);
2935 set_bit(__E1000_DOWN, &adapter->state);
2940 * e1000_intr_msi_test - Interrupt Handler
2941 * @irq: interrupt number
2942 * @data: pointer to a network interface device structure
2944 static irqreturn_t e1000_intr_msi_test(int irq, void *data)
2946 struct net_device *netdev = data;
2947 struct e1000_adapter *adapter = netdev_priv(netdev);
2948 struct e1000_hw *hw = &adapter->hw;
2949 u32 icr = er32(ICR);
2951 e_dbg("icr is %08X\n", icr);
2952 if (icr & E1000_ICR_RXSEQ) {
2953 adapter->flags &= ~FLAG_MSI_TEST_FAILED;
2961 * e1000_test_msi_interrupt - Returns 0 for successful test
2962 * @adapter: board private struct
2964 * code flow taken from tg3.c
2966 static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
2968 struct net_device *netdev = adapter->netdev;
2969 struct e1000_hw *hw = &adapter->hw;
2972 /* poll_enable hasn't been called yet, so don't need disable */
2973 /* clear any pending events */
2976 /* free the real vector and request a test handler */
2977 e1000_free_irq(adapter);
2978 e1000e_reset_interrupt_capability(adapter);
2980 /* Assume that the test fails, if it succeeds then the test
2981 * MSI irq handler will unset this flag */
2982 adapter->flags |= FLAG_MSI_TEST_FAILED;
2984 err = pci_enable_msi(adapter->pdev);
2986 goto msi_test_failed;
2988 err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0,
2989 netdev->name, netdev);
2991 pci_disable_msi(adapter->pdev);
2992 goto msi_test_failed;
2997 e1000_irq_enable(adapter);
2999 /* fire an unusual interrupt on the test handler */
3000 ew32(ICS, E1000_ICS_RXSEQ);
3004 e1000_irq_disable(adapter);
3008 if (adapter->flags & FLAG_MSI_TEST_FAILED) {
3009 adapter->int_mode = E1000E_INT_MODE_LEGACY;
3011 e_info("MSI interrupt test failed!\n");
3014 free_irq(adapter->pdev->irq, netdev);
3015 pci_disable_msi(adapter->pdev);
3018 goto msi_test_failed;
3020 /* okay so the test worked, restore settings */
3021 e_dbg("MSI interrupt test succeeded!\n");
3023 e1000e_set_interrupt_capability(adapter);
3024 e1000_request_irq(adapter);
3029 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
3030 * @adapter: board private struct
3032 * code flow taken from tg3.c, called with e1000 interrupts disabled.
3034 static int e1000_test_msi(struct e1000_adapter *adapter)
3039 if (!(adapter->flags & FLAG_MSI_ENABLED))
3042 /* disable SERR in case the MSI write causes a master abort */
3043 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
3044 pci_write_config_word(adapter->pdev, PCI_COMMAND,
3045 pci_cmd & ~PCI_COMMAND_SERR);
3047 err = e1000_test_msi_interrupt(adapter);
3049 /* restore previous setting of command word */
3050 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
3056 /* EIO means MSI test failed */
3060 /* back to INTx mode */
3061 e_warn("MSI interrupt test failed, using legacy interrupt.\n");
3063 e1000_free_irq(adapter);
3065 err = e1000_request_irq(adapter);
3071 * e1000_open - Called when a network interface is made active
3072 * @netdev: network interface device structure
3074 * Returns 0 on success, negative value on failure
3076 * The open entry point is called when a network interface is made
3077 * active by the system (IFF_UP). At this point all resources needed
3078 * for transmit and receive operations are allocated, the interrupt
3079 * handler is registered with the OS, the watchdog timer is started,
3080 * and the stack is notified that the interface is ready.
3082 static int e1000_open(struct net_device *netdev)
3084 struct e1000_adapter *adapter = netdev_priv(netdev);
3085 struct e1000_hw *hw = &adapter->hw;
3086 struct pci_dev *pdev = adapter->pdev;
3089 /* disallow open during test */
3090 if (test_bit(__E1000_TESTING, &adapter->state))
3093 pm_runtime_get_sync(&pdev->dev);
3095 netif_carrier_off(netdev);
3097 /* allocate transmit descriptors */
3098 err = e1000e_setup_tx_resources(adapter);
3102 /* allocate receive descriptors */
3103 err = e1000e_setup_rx_resources(adapter);
3107 e1000e_power_up_phy(adapter);
3109 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
3110 if ((adapter->hw.mng_cookie.status &
3111 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
3112 e1000_update_mng_vlan(adapter);
3115 * If AMT is enabled, let the firmware know that the network
3116 * interface is now open
3118 if (adapter->flags & FLAG_HAS_AMT)
3119 e1000_get_hw_control(adapter);
3122 * before we allocate an interrupt, we must be ready to handle it.
3123 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
3124 * as soon as we call pci_request_irq, so we have to setup our
3125 * clean_rx handler before we do so.
3127 e1000_configure(adapter);
3129 err = e1000_request_irq(adapter);
3134 * Work around PCIe errata with MSI interrupts causing some chipsets to
3135 * ignore e1000e MSI messages, which means we need to test our MSI
3138 if (adapter->int_mode != E1000E_INT_MODE_LEGACY) {
3139 err = e1000_test_msi(adapter);
3141 e_err("Interrupt allocation failed\n");
3146 /* From here on the code is the same as e1000e_up() */
3147 clear_bit(__E1000_DOWN, &adapter->state);
3149 napi_enable(&adapter->napi);
3151 e1000_irq_enable(adapter);
3153 netif_start_queue(netdev);
3155 adapter->idle_check = true;
3156 pm_runtime_put(&pdev->dev);
3158 /* fire a link status change interrupt to start the watchdog */
3159 ew32(ICS, E1000_ICS_LSC);
3164 e1000_release_hw_control(adapter);
3165 e1000_power_down_phy(adapter);
3166 e1000e_free_rx_resources(adapter);
3168 e1000e_free_tx_resources(adapter);
3170 e1000e_reset(adapter);
3171 pm_runtime_put_sync(&pdev->dev);
3177 * e1000_close - Disables a network interface
3178 * @netdev: network interface device structure
3180 * Returns 0, this is not allowed to fail
3182 * The close entry point is called when an interface is de-activated
3183 * by the OS. The hardware is still under the drivers control, but
3184 * needs to be disabled. A global MAC reset is issued to stop the
3185 * hardware, and all transmit and receive resources are freed.
3187 static int e1000_close(struct net_device *netdev)
3189 struct e1000_adapter *adapter = netdev_priv(netdev);
3190 struct pci_dev *pdev = adapter->pdev;
3192 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
3194 pm_runtime_get_sync(&pdev->dev);
3196 if (!test_bit(__E1000_DOWN, &adapter->state)) {
3197 e1000e_down(adapter);
3198 e1000_free_irq(adapter);
3200 e1000_power_down_phy(adapter);
3202 e1000e_free_tx_resources(adapter);
3203 e1000e_free_rx_resources(adapter);
3206 * kill manageability vlan ID if supported, but not if a vlan with
3207 * the same ID is registered on the host OS (let 8021q kill it)
3209 if ((adapter->hw.mng_cookie.status &
3210 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
3212 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
3213 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
3216 * If AMT is enabled, let the firmware know that the network
3217 * interface is now closed
3219 if (adapter->flags & FLAG_HAS_AMT)
3220 e1000_release_hw_control(adapter);
3222 pm_runtime_put_sync(&pdev->dev);
3227 * e1000_set_mac - Change the Ethernet Address of the NIC
3228 * @netdev: network interface device structure
3229 * @p: pointer to an address structure
3231 * Returns 0 on success, negative on failure
3233 static int e1000_set_mac(struct net_device *netdev, void *p)
3235 struct e1000_adapter *adapter = netdev_priv(netdev);
3236 struct sockaddr *addr = p;
3238 if (!is_valid_ether_addr(addr->sa_data))
3239 return -EADDRNOTAVAIL;
3241 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3242 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
3244 e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
3246 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
3247 /* activate the work around */
3248 e1000e_set_laa_state_82571(&adapter->hw, 1);
3251 * Hold a copy of the LAA in RAR[14] This is done so that
3252 * between the time RAR[0] gets clobbered and the time it
3253 * gets fixed (in e1000_watchdog), the actual LAA is in one
3254 * of the RARs and no incoming packets directed to this port
3255 * are dropped. Eventually the LAA will be in RAR[0] and
3258 e1000e_rar_set(&adapter->hw,
3259 adapter->hw.mac.addr,
3260 adapter->hw.mac.rar_entry_count - 1);
3267 * e1000e_update_phy_task - work thread to update phy
3268 * @work: pointer to our work struct
3270 * this worker thread exists because we must acquire a
3271 * semaphore to read the phy, which we could msleep while
3272 * waiting for it, and we can't msleep in a timer.
3274 static void e1000e_update_phy_task(struct work_struct *work)
3276 struct e1000_adapter *adapter = container_of(work,
3277 struct e1000_adapter, update_phy_task);
3278 e1000_get_phy_info(&adapter->hw);
3282 * Need to wait a few seconds after link up to get diagnostic information from
3285 static void e1000_update_phy_info(unsigned long data)
3287 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
3288 schedule_work(&adapter->update_phy_task);
3292 * e1000e_update_stats - Update the board statistics counters
3293 * @adapter: board private structure
3295 void e1000e_update_stats(struct e1000_adapter *adapter)
3297 struct net_device *netdev = adapter->netdev;
3298 struct e1000_hw *hw = &adapter->hw;
3299 struct pci_dev *pdev = adapter->pdev;
3303 * Prevent stats update while adapter is being reset, or if the pci
3304 * connection is down.
3306 if (adapter->link_speed == 0)
3308 if (pci_channel_offline(pdev))
3311 adapter->stats.crcerrs += er32(CRCERRS);
3312 adapter->stats.gprc += er32(GPRC);
3313 adapter->stats.gorc += er32(GORCL);
3314 er32(GORCH); /* Clear gorc */
3315 adapter->stats.bprc += er32(BPRC);
3316 adapter->stats.mprc += er32(MPRC);
3317 adapter->stats.roc += er32(ROC);
3319 adapter->stats.mpc += er32(MPC);
3320 if ((hw->phy.type == e1000_phy_82578) ||
3321 (hw->phy.type == e1000_phy_82577)) {
3322 e1e_rphy(hw, HV_SCC_UPPER, &phy_data);
3323 if (!e1e_rphy(hw, HV_SCC_LOWER, &phy_data))
3324 adapter->stats.scc += phy_data;
3326 e1e_rphy(hw, HV_ECOL_UPPER, &phy_data);
3327 if (!e1e_rphy(hw, HV_ECOL_LOWER, &phy_data))
3328 adapter->stats.ecol += phy_data;
3330 e1e_rphy(hw, HV_MCC_UPPER, &phy_data);
3331 if (!e1e_rphy(hw, HV_MCC_LOWER, &phy_data))
3332 adapter->stats.mcc += phy_data;
3334 e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data);
3335 if (!e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data))
3336 adapter->stats.latecol += phy_data;
3338 e1e_rphy(hw, HV_DC_UPPER, &phy_data);
3339 if (!e1e_rphy(hw, HV_DC_LOWER, &phy_data))
3340 adapter->stats.dc += phy_data;
3342 adapter->stats.scc += er32(SCC);
3343 adapter->stats.ecol += er32(ECOL);
3344 adapter->stats.mcc += er32(MCC);
3345 adapter->stats.latecol += er32(LATECOL);
3346 adapter->stats.dc += er32(DC);
3348 adapter->stats.xonrxc += er32(XONRXC);
3349 adapter->stats.xontxc += er32(XONTXC);
3350 adapter->stats.xoffrxc += er32(XOFFRXC);
3351 adapter->stats.xofftxc += er32(XOFFTXC);
3352 adapter->stats.gptc += er32(GPTC);
3353 adapter->stats.gotc += er32(GOTCL);
3354 er32(GOTCH); /* Clear gotc */
3355 adapter->stats.rnbc += er32(RNBC);
3356 adapter->stats.ruc += er32(RUC);
3358 adapter->stats.mptc += er32(MPTC);
3359 adapter->stats.bptc += er32(BPTC);
3361 /* used for adaptive IFS */
3363 hw->mac.tx_packet_delta = er32(TPT);
3364 adapter->stats.tpt += hw->mac.tx_packet_delta;
3365 if ((hw->phy.type == e1000_phy_82578) ||
3366 (hw->phy.type == e1000_phy_82577)) {
3367 e1e_rphy(hw, HV_COLC_UPPER, &phy_data);
3368 if (!e1e_rphy(hw, HV_COLC_LOWER, &phy_data))
3369 hw->mac.collision_delta = phy_data;
3371 hw->mac.collision_delta = er32(COLC);
3373 adapter->stats.colc += hw->mac.collision_delta;
3375 adapter->stats.algnerrc += er32(ALGNERRC);
3376 adapter->stats.rxerrc += er32(RXERRC);
3377 if ((hw->phy.type == e1000_phy_82578) ||
3378 (hw->phy.type == e1000_phy_82577)) {
3379 e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data);
3380 if (!e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data))
3381 adapter->stats.tncrs += phy_data;
3383 if ((hw->mac.type != e1000_82574) &&
3384 (hw->mac.type != e1000_82583))
3385 adapter->stats.tncrs += er32(TNCRS);
3387 adapter->stats.cexterr += er32(CEXTERR);
3388 adapter->stats.tsctc += er32(TSCTC);
3389 adapter->stats.tsctfc += er32(TSCTFC);
3391 /* Fill out the OS statistics structure */
3392 netdev->stats.multicast = adapter->stats.mprc;
3393 netdev->stats.collisions = adapter->stats.colc;
3398 * RLEC on some newer hardware can be incorrect so build
3399 * our own version based on RUC and ROC
3401 netdev->stats.rx_errors = adapter->stats.rxerrc +
3402 adapter->stats.crcerrs + adapter->stats.algnerrc +
3403 adapter->stats.ruc + adapter->stats.roc +
3404 adapter->stats.cexterr;
3405 netdev->stats.rx_length_errors = adapter->stats.ruc +
3407 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3408 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3409 netdev->stats.rx_missed_errors = adapter->stats.mpc;
3412 netdev->stats.tx_errors = adapter->stats.ecol +
3413 adapter->stats.latecol;
3414 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3415 netdev->stats.tx_window_errors = adapter->stats.latecol;
3416 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3418 /* Tx Dropped needs to be maintained elsewhere */
3420 /* Management Stats */
3421 adapter->stats.mgptc += er32(MGTPTC);
3422 adapter->stats.mgprc += er32(MGTPRC);
3423 adapter->stats.mgpdc += er32(MGTPDC);
3427 * e1000_phy_read_status - Update the PHY register status snapshot
3428 * @adapter: board private structure
3430 static void e1000_phy_read_status(struct e1000_adapter *adapter)
3432 struct e1000_hw *hw = &adapter->hw;
3433 struct e1000_phy_regs *phy = &adapter->phy_regs;
3436 if ((er32(STATUS) & E1000_STATUS_LU) &&
3437 (adapter->hw.phy.media_type == e1000_media_type_copper)) {
3438 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
3439 ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
3440 ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
3441 ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa);
3442 ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion);
3443 ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000);
3444 ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000);
3445 ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);
3447 e_warn("Error reading PHY register\n");
3450 * Do not read PHY registers if link is not up
3451 * Set values to typical power-on defaults
3453 phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
3454 phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
3455 BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
3457 phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
3458 ADVERTISE_ALL | ADVERTISE_CSMA);
3460 phy->expansion = EXPANSION_ENABLENPAGE;
3461 phy->ctrl1000 = ADVERTISE_1000FULL;
3463 phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
3467 static void e1000_print_link_info(struct e1000_adapter *adapter)
3469 struct e1000_hw *hw = &adapter->hw;
3470 u32 ctrl = er32(CTRL);
3472 /* Link status message must follow this format for user tools */
3473 printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s, "
3474 "Flow Control: %s\n",
3475 adapter->netdev->name,
3476 adapter->link_speed,
3477 (adapter->link_duplex == FULL_DUPLEX) ?
3478 "Full Duplex" : "Half Duplex",
3479 ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
3481 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
3482 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
3485 bool e1000e_has_link(struct e1000_adapter *adapter)
3487 struct e1000_hw *hw = &adapter->hw;
3488 bool link_active = 0;
3492 * get_link_status is set on LSC (link status) interrupt or
3493 * Rx sequence error interrupt. get_link_status will stay
3494 * false until the check_for_link establishes link
3495 * for copper adapters ONLY
3497 switch (hw->phy.media_type) {
3498 case e1000_media_type_copper:
3499 if (hw->mac.get_link_status) {
3500 ret_val = hw->mac.ops.check_for_link(hw);
3501 link_active = !hw->mac.get_link_status;
3506 case e1000_media_type_fiber:
3507 ret_val = hw->mac.ops.check_for_link(hw);
3508 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
3510 case e1000_media_type_internal_serdes:
3511 ret_val = hw->mac.ops.check_for_link(hw);
3512 link_active = adapter->hw.mac.serdes_has_link;
3515 case e1000_media_type_unknown:
3519 if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
3520 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
3521 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
3522 e_info("Gigabit has been disabled, downgrading speed\n");
3528 static void e1000e_enable_receives(struct e1000_adapter *adapter)
3530 /* make sure the receive unit is started */
3531 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
3532 (adapter->flags & FLAG_RX_RESTART_NOW)) {
3533 struct e1000_hw *hw = &adapter->hw;
3534 u32 rctl = er32(RCTL);
3535 ew32(RCTL, rctl | E1000_RCTL_EN);
3536 adapter->flags &= ~FLAG_RX_RESTART_NOW;
3541 * e1000_watchdog - Timer Call-back
3542 * @data: pointer to adapter cast into an unsigned long
3544 static void e1000_watchdog(unsigned long data)
3546 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
3548 /* Do the rest outside of interrupt context */
3549 schedule_work(&adapter->watchdog_task);
3551 /* TODO: make this use queue_delayed_work() */
3554 static void e1000_watchdog_task(struct work_struct *work)
3556 struct e1000_adapter *adapter = container_of(work,
3557 struct e1000_adapter, watchdog_task);
3558 struct net_device *netdev = adapter->netdev;
3559 struct e1000_mac_info *mac = &adapter->hw.mac;
3560 struct e1000_phy_info *phy = &adapter->hw.phy;
3561 struct e1000_ring *tx_ring = adapter->tx_ring;
3562 struct e1000_hw *hw = &adapter->hw;
3566 link = e1000e_has_link(adapter);
3567 if ((netif_carrier_ok(netdev)) && link) {
3568 /* Cancel scheduled suspend requests. */
3569 pm_runtime_resume(netdev->dev.parent);
3571 e1000e_enable_receives(adapter);
3575 if ((e1000e_enable_tx_pkt_filtering(hw)) &&
3576 (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
3577 e1000_update_mng_vlan(adapter);
3580 if (!netif_carrier_ok(netdev)) {
3583 /* Cancel scheduled suspend requests. */
3584 pm_runtime_resume(netdev->dev.parent);
3586 /* update snapshot of PHY registers on LSC */
3587 e1000_phy_read_status(adapter);
3588 mac->ops.get_link_up_info(&adapter->hw,
3589 &adapter->link_speed,
3590 &adapter->link_duplex);
3591 e1000_print_link_info(adapter);
3593 * On supported PHYs, check for duplex mismatch only
3594 * if link has autonegotiated at 10/100 half
3596 if ((hw->phy.type == e1000_phy_igp_3 ||
3597 hw->phy.type == e1000_phy_bm) &&
3598 (hw->mac.autoneg == true) &&
3599 (adapter->link_speed == SPEED_10 ||
3600 adapter->link_speed == SPEED_100) &&
3601 (adapter->link_duplex == HALF_DUPLEX)) {
3604 e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);
3606 if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
3607 e_info("Autonegotiated half duplex but"
3608 " link partner cannot autoneg. "
3609 " Try forcing full duplex if "
3610 "link gets many collisions.\n");
3613 /* adjust timeout factor according to speed/duplex */
3614 adapter->tx_timeout_factor = 1;
3615 switch (adapter->link_speed) {
3618 adapter->tx_timeout_factor = 16;
3622 adapter->tx_timeout_factor = 10;
3627 * workaround: re-program speed mode bit after
3630 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
3633 tarc0 = er32(TARC(0));
3634 tarc0 &= ~SPEED_MODE_BIT;
3635 ew32(TARC(0), tarc0);
3639 * disable TSO for pcie and 10/100 speeds, to avoid
3640 * some hardware issues
3642 if (!(adapter->flags & FLAG_TSO_FORCE)) {
3643 switch (adapter->link_speed) {
3646 e_info("10/100 speed: disabling TSO\n");
3647 netdev->features &= ~NETIF_F_TSO;
3648 netdev->features &= ~NETIF_F_TSO6;
3651 netdev->features |= NETIF_F_TSO;
3652 netdev->features |= NETIF_F_TSO6;
3661 * enable transmits in the hardware, need to do this
3662 * after setting TARC(0)
3665 tctl |= E1000_TCTL_EN;
3669 * Perform any post-link-up configuration before
3670 * reporting link up.
3672 if (phy->ops.cfg_on_link_up)
3673 phy->ops.cfg_on_link_up(hw);
3675 netif_carrier_on(netdev);
3677 if (!test_bit(__E1000_DOWN, &adapter->state))
3678 mod_timer(&adapter->phy_info_timer,
3679 round_jiffies(jiffies + 2 * HZ));
3682 if (netif_carrier_ok(netdev)) {
3683 adapter->link_speed = 0;
3684 adapter->link_duplex = 0;
3685 /* Link status message must follow this format */
3686 printk(KERN_INFO "e1000e: %s NIC Link is Down\n",
3687 adapter->netdev->name);
3688 netif_carrier_off(netdev);
3689 if (!test_bit(__E1000_DOWN, &adapter->state))
3690 mod_timer(&adapter->phy_info_timer,
3691 round_jiffies(jiffies + 2 * HZ));
3693 if (adapter->flags & FLAG_RX_NEEDS_RESTART)
3694 schedule_work(&adapter->reset_task);
3696 pm_schedule_suspend(netdev->dev.parent,
3702 e1000e_update_stats(adapter);
3704 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
3705 adapter->tpt_old = adapter->stats.tpt;
3706 mac->collision_delta = adapter->stats.colc - adapter->colc_old;
3707 adapter->colc_old = adapter->stats.colc;
3709 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
3710 adapter->gorc_old = adapter->stats.gorc;
3711 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
3712 adapter->gotc_old = adapter->stats.gotc;
3714 e1000e_update_adaptive(&adapter->hw);
3716 if (!netif_carrier_ok(netdev)) {
3717 tx_pending = (e1000_desc_unused(tx_ring) + 1 <
3721 * We've lost link, so the controller stops DMA,
3722 * but we've got queued Tx work that's never going
3723 * to get done, so reset controller to flush Tx.
3724 * (Do the reset outside of interrupt context).
3726 adapter->tx_timeout_count++;
3727 schedule_work(&adapter->reset_task);
3728 /* return immediately since reset is imminent */
3733 /* Cause software interrupt to ensure Rx ring is cleaned */
3734 if (adapter->msix_entries)
3735 ew32(ICS, adapter->rx_ring->ims_val);
3737 ew32(ICS, E1000_ICS_RXDMT0);
3739 /* Force detection of hung controller every watchdog period */
3740 adapter->detect_tx_hung = 1;
3743 * With 82571 controllers, LAA may be overwritten due to controller
3744 * reset from the other port. Set the appropriate LAA in RAR[0]
3746 if (e1000e_get_laa_state_82571(hw))
3747 e1000e_rar_set(hw, adapter->hw.mac.addr, 0);
3749 /* Reset the timer */
3750 if (!test_bit(__E1000_DOWN, &adapter->state))
3751 mod_timer(&adapter->watchdog_timer,
3752 round_jiffies(jiffies + 2 * HZ));
3755 #define E1000_TX_FLAGS_CSUM 0x00000001
3756 #define E1000_TX_FLAGS_VLAN 0x00000002
3757 #define E1000_TX_FLAGS_TSO 0x00000004
3758 #define E1000_TX_FLAGS_IPV4 0x00000008
3759 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
3760 #define E1000_TX_FLAGS_VLAN_SHIFT 16
3762 static int e1000_tso(struct e1000_adapter *adapter,
3763 struct sk_buff *skb)
3765 struct e1000_ring *tx_ring = adapter->tx_ring;
3766 struct e1000_context_desc *context_desc;
3767 struct e1000_buffer *buffer_info;
3770 u16 ipcse = 0, tucse, mss;
3771 u8 ipcss, ipcso, tucss, tucso, hdr_len;
3774 if (!skb_is_gso(skb))
3777 if (skb_header_cloned(skb)) {
3778 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3783 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3784 mss = skb_shinfo(skb)->gso_size;
3785 if (skb->protocol == htons(ETH_P_IP)) {
3786 struct iphdr *iph = ip_hdr(skb);
3789 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
3791 cmd_length = E1000_TXD_CMD_IP;
3792 ipcse = skb_transport_offset(skb) - 1;
3793 } else if (skb_is_gso_v6(skb)) {
3794 ipv6_hdr(skb)->payload_len = 0;
3795 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3796 &ipv6_hdr(skb)->daddr,
3800 ipcss = skb_network_offset(skb);
3801 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
3802 tucss = skb_transport_offset(skb);
3803 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
3806 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
3807 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
3809 i = tx_ring->next_to_use;
3810 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
3811 buffer_info = &tx_ring->buffer_info[i];
3813 context_desc->lower_setup.ip_fields.ipcss = ipcss;
3814 context_desc->lower_setup.ip_fields.ipcso = ipcso;
3815 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
3816 context_desc->upper_setup.tcp_fields.tucss = tucss;
3817 context_desc->upper_setup.tcp_fields.tucso = tucso;
3818 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
3819 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
3820 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
3821 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
3823 buffer_info->time_stamp = jiffies;
3824 buffer_info->next_to_watch = i;
3827 if (i == tx_ring->count)
3829 tx_ring->next_to_use = i;
3834 static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
3836 struct e1000_ring *tx_ring = adapter->tx_ring;
3837 struct e1000_context_desc *context_desc;
3838 struct e1000_buffer *buffer_info;
3841 u32 cmd_len = E1000_TXD_CMD_DEXT;
3844 if (skb->ip_summed != CHECKSUM_PARTIAL)
3847 if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
3848 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
3850 protocol = skb->protocol;
3853 case cpu_to_be16(ETH_P_IP):
3854 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3855 cmd_len |= E1000_TXD_CMD_TCP;
3857 case cpu_to_be16(ETH_P_IPV6):
3858 /* XXX not handling all IPV6 headers */
3859 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3860 cmd_len |= E1000_TXD_CMD_TCP;
3863 if (unlikely(net_ratelimit()))
3864 e_warn("checksum_partial proto=%x!\n",
3865 be16_to_cpu(protocol));
3869 css = skb_transport_offset(skb);
3871 i = tx_ring->next_to_use;
3872 buffer_info = &tx_ring->buffer_info[i];
3873 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
3875 context_desc->lower_setup.ip_config = 0;
3876 context_desc->upper_setup.tcp_fields.tucss = css;
3877 context_desc->upper_setup.tcp_fields.tucso =
3878 css + skb->csum_offset;
3879 context_desc->upper_setup.tcp_fields.tucse = 0;
3880 context_desc->tcp_seg_setup.data = 0;
3881 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
3883 buffer_info->time_stamp = jiffies;
3884 buffer_info->next_to_watch = i;
3887 if (i == tx_ring->count)
3889 tx_ring->next_to_use = i;
3894 #define E1000_MAX_PER_TXD 8192
3895 #define E1000_MAX_TXD_PWR 12
3897 static int e1000_tx_map(struct e1000_adapter *adapter,
3898 struct sk_buff *skb, unsigned int first,
3899 unsigned int max_per_txd, unsigned int nr_frags,
3902 struct e1000_ring *tx_ring = adapter->tx_ring;
3903 struct pci_dev *pdev = adapter->pdev;
3904 struct e1000_buffer *buffer_info;
3905 unsigned int len = skb_headlen(skb);
3906 unsigned int offset = 0, size, count = 0, i;
3909 i = tx_ring->next_to_use;
3912 buffer_info = &tx_ring->buffer_info[i];
3913 size = min(len, max_per_txd);
3915 buffer_info->length = size;
3916 buffer_info->time_stamp = jiffies;
3917 buffer_info->next_to_watch = i;
3918 buffer_info->dma = pci_map_single(pdev, skb->data + offset,
3919 size, PCI_DMA_TODEVICE);
3920 buffer_info->mapped_as_page = false;
3921 if (pci_dma_mapping_error(pdev, buffer_info->dma))
3930 if (i == tx_ring->count)
3935 for (f = 0; f < nr_frags; f++) {
3936 struct skb_frag_struct *frag;
3938 frag = &skb_shinfo(skb)->frags[f];
3940 offset = frag->page_offset;
3944 if (i == tx_ring->count)
3947 buffer_info = &tx_ring->buffer_info[i];
3948 size = min(len, max_per_txd);
3950 buffer_info->length = size;
3951 buffer_info->time_stamp = jiffies;
3952 buffer_info->next_to_watch = i;
3953 buffer_info->dma = pci_map_page(pdev, frag->page,
3956 buffer_info->mapped_as_page = true;
3957 if (pci_dma_mapping_error(pdev, buffer_info->dma))
3966 tx_ring->buffer_info[i].skb = skb;
3967 tx_ring->buffer_info[first].next_to_watch = i;
3972 dev_err(&pdev->dev, "TX DMA map failed\n");
3973 buffer_info->dma = 0;
3979 i += tx_ring->count;
3981 buffer_info = &tx_ring->buffer_info[i];
3982 e1000_put_txbuf(adapter, buffer_info);;
3988 static void e1000_tx_queue(struct e1000_adapter *adapter,
3989 int tx_flags, int count)
3991 struct e1000_ring *tx_ring = adapter->tx_ring;
3992 struct e1000_tx_desc *tx_desc = NULL;
3993 struct e1000_buffer *buffer_info;
3994 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
3997 if (tx_flags & E1000_TX_FLAGS_TSO) {
3998 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
4000 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
4002 if (tx_flags & E1000_TX_FLAGS_IPV4)
4003 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
4006 if (tx_flags & E1000_TX_FLAGS_CSUM) {
4007 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
4008 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
4011 if (tx_flags & E1000_TX_FLAGS_VLAN) {
4012 txd_lower |= E1000_TXD_CMD_VLE;
4013 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
4016 i = tx_ring->next_to_use;
4019 buffer_info = &tx_ring->buffer_info[i];
4020 tx_desc = E1000_TX_DESC(*tx_ring, i);
4021 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4022 tx_desc->lower.data =
4023 cpu_to_le32(txd_lower | buffer_info->length);
4024 tx_desc->upper.data = cpu_to_le32(txd_upper);
4027 if (i == tx_ring->count)
4031 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
4034 * Force memory writes to complete before letting h/w
4035 * know there are new descriptors to fetch. (Only
4036 * applicable for weak-ordered memory model archs,
4041 tx_ring->next_to_use = i;
4042 writel(i, adapter->hw.hw_addr + tx_ring->tail);
4044 * we need this if more than one processor can write to our tail
4045 * at a time, it synchronizes IO on IA64/Altix systems
4050 #define MINIMUM_DHCP_PACKET_SIZE 282
4051 static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
4052 struct sk_buff *skb)
4054 struct e1000_hw *hw = &adapter->hw;
4057 if (vlan_tx_tag_present(skb)) {
4058 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
4059 (adapter->hw.mng_cookie.status &
4060 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
4064 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
4067 if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP))
4071 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
4074 if (ip->protocol != IPPROTO_UDP)
4077 udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
4078 if (ntohs(udp->dest) != 67)
4081 offset = (u8 *)udp + 8 - skb->data;
4082 length = skb->len - offset;
4083 return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length);
4089 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
4091 struct e1000_adapter *adapter = netdev_priv(netdev);
4093 netif_stop_queue(netdev);
4095 * Herbert's original patch had:
4096 * smp_mb__after_netif_stop_queue();
4097 * but since that doesn't exist yet, just open code it.
4102 * We need to check again in a case another CPU has just
4103 * made room available.
4105 if (e1000_desc_unused(adapter->tx_ring) < size)
4109 netif_start_queue(netdev);
4110 ++adapter->restart_queue;
4114 static int e1000_maybe_stop_tx(struct net_device *netdev, int size)
4116 struct e1000_adapter *adapter = netdev_priv(netdev);
4118 if (e1000_desc_unused(adapter->tx_ring) >= size)
4120 return __e1000_maybe_stop_tx(netdev, size);
4123 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
4124 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
4125 struct net_device *netdev)
4127 struct e1000_adapter *adapter = netdev_priv(netdev);
4128 struct e1000_ring *tx_ring = adapter->tx_ring;
4130 unsigned int max_per_txd = E1000_MAX_PER_TXD;
4131 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
4132 unsigned int tx_flags = 0;
4133 unsigned int len = skb->len - skb->data_len;
4134 unsigned int nr_frags;
4140 if (test_bit(__E1000_DOWN, &adapter->state)) {
4141 dev_kfree_skb_any(skb);
4142 return NETDEV_TX_OK;
4145 if (skb->len <= 0) {
4146 dev_kfree_skb_any(skb);
4147 return NETDEV_TX_OK;
4150 mss = skb_shinfo(skb)->gso_size;
4152 * The controller does a simple calculation to
4153 * make sure there is enough room in the FIFO before
4154 * initiating the DMA for each buffer. The calc is:
4155 * 4 = ceil(buffer len/mss). To make sure we don't
4156 * overrun the FIFO, adjust the max buffer len if mss
4161 max_per_txd = min(mss << 2, max_per_txd);
4162 max_txd_pwr = fls(max_per_txd) - 1;
4165 * TSO Workaround for 82571/2/3 Controllers -- if skb->data
4166 * points to just header, pull a few bytes of payload from
4167 * frags into skb->data
4169 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4171 * we do this workaround for ES2LAN, but it is un-necessary,
4172 * avoiding it could save a lot of cycles
4174 if (skb->data_len && (hdr_len == len)) {
4175 unsigned int pull_size;
4177 pull_size = min((unsigned int)4, skb->data_len);
4178 if (!__pskb_pull_tail(skb, pull_size)) {
4179 e_err("__pskb_pull_tail failed.\n");
4180 dev_kfree_skb_any(skb);
4181 return NETDEV_TX_OK;
4183 len = skb->len - skb->data_len;
4187 /* reserve a descriptor for the offload context */
4188 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
4192 count += TXD_USE_COUNT(len, max_txd_pwr);
4194 nr_frags = skb_shinfo(skb)->nr_frags;
4195 for (f = 0; f < nr_frags; f++)
4196 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
4199 if (adapter->hw.mac.tx_pkt_filtering)
4200 e1000_transfer_dhcp_info(adapter, skb);
4203 * need: count + 2 desc gap to keep tail from touching
4204 * head, otherwise try next time
4206 if (e1000_maybe_stop_tx(netdev, count + 2))
4207 return NETDEV_TX_BUSY;
4209 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
4210 tx_flags |= E1000_TX_FLAGS_VLAN;
4211 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
4214 first = tx_ring->next_to_use;
4216 tso = e1000_tso(adapter, skb);
4218 dev_kfree_skb_any(skb);
4219 return NETDEV_TX_OK;
4223 tx_flags |= E1000_TX_FLAGS_TSO;
4224 else if (e1000_tx_csum(adapter, skb))
4225 tx_flags |= E1000_TX_FLAGS_CSUM;
4228 * Old method was to assume IPv4 packet by default if TSO was enabled.
4229 * 82571 hardware supports TSO capabilities for IPv6 as well...
4230 * no longer assume, we must.
4232 if (skb->protocol == htons(ETH_P_IP))
4233 tx_flags |= E1000_TX_FLAGS_IPV4;
4235 /* if count is 0 then mapping error has occured */
4236 count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss);
4238 e1000_tx_queue(adapter, tx_flags, count);
4239 /* Make sure there is space in the ring for the next send. */
4240 e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
4243 dev_kfree_skb_any(skb);
4244 tx_ring->buffer_info[first].time_stamp = 0;
4245 tx_ring->next_to_use = first;
4248 return NETDEV_TX_OK;
4252 * e1000_tx_timeout - Respond to a Tx Hang
4253 * @netdev: network interface device structure
4255 static void e1000_tx_timeout(struct net_device *netdev)
4257 struct e1000_adapter *adapter = netdev_priv(netdev);
4259 /* Do the reset outside of interrupt context */
4260 adapter->tx_timeout_count++;
4261 schedule_work(&adapter->reset_task);
4264 static void e1000_reset_task(struct work_struct *work)
4266 struct e1000_adapter *adapter;
4267 adapter = container_of(work, struct e1000_adapter, reset_task);
4269 e1000e_reinit_locked(adapter);
4273 * e1000_get_stats - Get System Network Statistics
4274 * @netdev: network interface device structure
4276 * Returns the address of the device statistics structure.
4277 * The statistics are actually updated from the timer callback.
4279 static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
4281 /* only return the current stats */
4282 return &netdev->stats;
4286 * e1000_change_mtu - Change the Maximum Transfer Unit
4287 * @netdev: network interface device structure
4288 * @new_mtu: new value for maximum frame size
4290 * Returns 0 on success, negative on failure
4292 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
4294 struct e1000_adapter *adapter = netdev_priv(netdev);
4295 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
4297 /* Jumbo frame support */
4298 if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
4299 !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
4300 e_err("Jumbo Frames not supported.\n");
4304 /* Supported frame sizes */
4305 if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
4306 (max_frame > adapter->max_hw_frame_size)) {
4307 e_err("Unsupported MTU setting\n");
4311 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
4313 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
4314 adapter->max_frame_size = max_frame;
4315 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
4316 netdev->mtu = new_mtu;
4317 if (netif_running(netdev))
4318 e1000e_down(adapter);
4321 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
4322 * means we reserve 2 more, this pushes us to allocate from the next
4324 * i.e. RXBUFFER_2048 --> size-4096 slab
4325 * However with the new *_jumbo_rx* routines, jumbo receives will use
4329 if (max_frame <= 2048)
4330 adapter->rx_buffer_len = 2048;
4332 adapter->rx_buffer_len = 4096;
4334 /* adjust allocation if LPE protects us, and we aren't using SBP */
4335 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
4336 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
4337 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
4340 if (netif_running(netdev))
4343 e1000e_reset(adapter);
4345 clear_bit(__E1000_RESETTING, &adapter->state);
4350 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4353 struct e1000_adapter *adapter = netdev_priv(netdev);
4354 struct mii_ioctl_data *data = if_mii(ifr);
4356 if (adapter->hw.phy.media_type != e1000_media_type_copper)
4361 data->phy_id = adapter->hw.phy.addr;
4364 e1000_phy_read_status(adapter);
4366 switch (data->reg_num & 0x1F) {
4368 data->val_out = adapter->phy_regs.bmcr;
4371 data->val_out = adapter->phy_regs.bmsr;
4374 data->val_out = (adapter->hw.phy.id >> 16);
4377 data->val_out = (adapter->hw.phy.id & 0xFFFF);
4380 data->val_out = adapter->phy_regs.advertise;
4383 data->val_out = adapter->phy_regs.lpa;
4386 data->val_out = adapter->phy_regs.expansion;
4389 data->val_out = adapter->phy_regs.ctrl1000;
4392 data->val_out = adapter->phy_regs.stat1000;
4395 data->val_out = adapter->phy_regs.estatus;
4408 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4414 return e1000_mii_ioctl(netdev, ifr, cmd);
4420 static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
4422 struct e1000_hw *hw = &adapter->hw;
4427 /* copy MAC RARs to PHY RARs */
4428 for (i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
4429 mac_reg = er32(RAL(i));
4430 e1e_wphy(hw, BM_RAR_L(i), (u16)(mac_reg & 0xFFFF));
4431 e1e_wphy(hw, BM_RAR_M(i), (u16)((mac_reg >> 16) & 0xFFFF));
4432 mac_reg = er32(RAH(i));
4433 e1e_wphy(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF));
4434 e1e_wphy(hw, BM_RAR_CTRL(i), (u16)((mac_reg >> 16) & 0xFFFF));
4437 /* copy MAC MTA to PHY MTA */
4438 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
4439 mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
4440 e1e_wphy(hw, BM_MTA(i), (u16)(mac_reg & 0xFFFF));
4441 e1e_wphy(hw, BM_MTA(i) + 1, (u16)((mac_reg >> 16) & 0xFFFF));
4444 /* configure PHY Rx Control register */
4445 e1e_rphy(&adapter->hw, BM_RCTL, &phy_reg);
4446 mac_reg = er32(RCTL);
4447 if (mac_reg & E1000_RCTL_UPE)
4448 phy_reg |= BM_RCTL_UPE;
4449 if (mac_reg & E1000_RCTL_MPE)
4450 phy_reg |= BM_RCTL_MPE;
4451 phy_reg &= ~(BM_RCTL_MO_MASK);
4452 if (mac_reg & E1000_RCTL_MO_3)
4453 phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
4454 << BM_RCTL_MO_SHIFT);
4455 if (mac_reg & E1000_RCTL_BAM)
4456 phy_reg |= BM_RCTL_BAM;
4457 if (mac_reg & E1000_RCTL_PMCF)
4458 phy_reg |= BM_RCTL_PMCF;
4459 mac_reg = er32(CTRL);
4460 if (mac_reg & E1000_CTRL_RFCE)
4461 phy_reg |= BM_RCTL_RFCE;
4462 e1e_wphy(&adapter->hw, BM_RCTL, phy_reg);
4464 /* enable PHY wakeup in MAC register */
4466 ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
4468 /* configure and enable PHY wakeup in PHY registers */
4469 e1e_wphy(&adapter->hw, BM_WUFC, wufc);
4470 e1e_wphy(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
4472 /* activate PHY wakeup */
4473 retval = hw->phy.ops.acquire(hw);
4475 e_err("Could not acquire PHY\n");
4478 e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4479 (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
4480 retval = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &phy_reg);
4482 e_err("Could not read PHY page 769\n");
4485 phy_reg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
4486 retval = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
4488 e_err("Could not set PHY Host Wakeup bit\n");
4490 hw->phy.ops.release(hw);
4495 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
4498 struct net_device *netdev = pci_get_drvdata(pdev);
4499 struct e1000_adapter *adapter = netdev_priv(netdev);
4500 struct e1000_hw *hw = &adapter->hw;
4501 u32 ctrl, ctrl_ext, rctl, status;
4502 /* Runtime suspend should only enable wakeup for link changes */
4503 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
4506 netif_device_detach(netdev);
4508 if (netif_running(netdev)) {
4509 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
4510 e1000e_down(adapter);
4511 e1000_free_irq(adapter);
4513 e1000e_reset_interrupt_capability(adapter);
4515 retval = pci_save_state(pdev);
4519 status = er32(STATUS);
4520 if (status & E1000_STATUS_LU)
4521 wufc &= ~E1000_WUFC_LNKC;
4524 e1000_setup_rctl(adapter);
4525 e1000_set_multi(netdev);
4527 /* turn on all-multi mode if wake on multicast is enabled */
4528 if (wufc & E1000_WUFC_MC) {
4530 rctl |= E1000_RCTL_MPE;
4535 /* advertise wake from D3Cold */
4536 #define E1000_CTRL_ADVD3WUC 0x00100000
4537 /* phy power management enable */
4538 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
4539 ctrl |= E1000_CTRL_ADVD3WUC;
4540 if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
4541 ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
4544 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
4545 adapter->hw.phy.media_type ==
4546 e1000_media_type_internal_serdes) {
4547 /* keep the laser running in D3 */
4548 ctrl_ext = er32(CTRL_EXT);
4549 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
4550 ew32(CTRL_EXT, ctrl_ext);
4553 if (adapter->flags & FLAG_IS_ICH)
4554 e1000e_disable_gig_wol_ich8lan(&adapter->hw);
4556 /* Allow time for pending master requests to run */
4557 e1000e_disable_pcie_master(&adapter->hw);
4559 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
4560 /* enable wakeup by the PHY */
4561 retval = e1000_init_phy_wakeup(adapter, wufc);
4565 /* enable wakeup by the MAC */
4567 ew32(WUC, E1000_WUC_PME_EN);
4574 *enable_wake = !!wufc;
4576 /* make sure adapter isn't asleep if manageability is enabled */
4577 if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
4578 (hw->mac.ops.check_mng_mode(hw)))
4579 *enable_wake = true;
4581 if (adapter->hw.phy.type == e1000_phy_igp_3)
4582 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
4585 * Release control of h/w to f/w. If f/w is AMT enabled, this
4586 * would have already happened in close and is redundant.
4588 e1000_release_hw_control(adapter);
4590 pci_disable_device(pdev);
4595 static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake)
4597 if (sleep && wake) {
4598 pci_prepare_to_sleep(pdev);
4602 pci_wake_from_d3(pdev, wake);
4603 pci_set_power_state(pdev, PCI_D3hot);
4606 static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
4609 struct net_device *netdev = pci_get_drvdata(pdev);
4610 struct e1000_adapter *adapter = netdev_priv(netdev);
4613 * The pci-e switch on some quad port adapters will report a
4614 * correctable error when the MAC transitions from D0 to D3. To
4615 * prevent this we need to mask off the correctable errors on the
4616 * downstream port of the pci-e switch.
4618 if (adapter->flags & FLAG_IS_QUAD_PORT) {
4619 struct pci_dev *us_dev = pdev->bus->self;
4620 int pos = pci_find_capability(us_dev, PCI_CAP_ID_EXP);
4623 pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl);
4624 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL,
4625 (devctl & ~PCI_EXP_DEVCTL_CERE));
4627 e1000_power_off(pdev, sleep, wake);
4629 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl);
4631 e1000_power_off(pdev, sleep, wake);
4635 static void e1000e_disable_l1aspm(struct pci_dev *pdev)
4641 * 82573 workaround - disable L1 ASPM on mobile chipsets
4643 * L1 ASPM on various mobile (ich7) chipsets do not behave properly
4644 * resulting in lost data or garbage information on the pci-e link
4645 * level. This could result in (false) bad EEPROM checksum errors,
4646 * long ping times (up to 2s) or even a system freeze/hang.
4648 * Unfortunately this feature saves about 1W power consumption when
4651 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
4652 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &val);
4654 dev_warn(&pdev->dev, "Disabling L1 ASPM\n");
4656 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, val);
4660 #ifdef CONFIG_PM_OPS
4661 static bool e1000e_pm_ready(struct e1000_adapter *adapter)
4663 return !!adapter->tx_ring->buffer_info;
4666 static int __e1000_resume(struct pci_dev *pdev)
4668 struct net_device *netdev = pci_get_drvdata(pdev);
4669 struct e1000_adapter *adapter = netdev_priv(netdev);
4670 struct e1000_hw *hw = &adapter->hw;
4673 e1000e_disable_l1aspm(pdev);
4675 e1000e_set_interrupt_capability(adapter);
4676 if (netif_running(netdev)) {
4677 err = e1000_request_irq(adapter);
4682 e1000e_power_up_phy(adapter);
4684 /* report the system wakeup cause from S3/S4 */
4685 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
4688 e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
4690 e_info("PHY Wakeup cause - %s\n",
4691 phy_data & E1000_WUS_EX ? "Unicast Packet" :
4692 phy_data & E1000_WUS_MC ? "Multicast Packet" :
4693 phy_data & E1000_WUS_BC ? "Broadcast Packet" :
4694 phy_data & E1000_WUS_MAG ? "Magic Packet" :
4695 phy_data & E1000_WUS_LNKC ? "Link Status "
4696 " Change" : "other");
4698 e1e_wphy(&adapter->hw, BM_WUS, ~0);
4700 u32 wus = er32(WUS);
4702 e_info("MAC Wakeup cause - %s\n",
4703 wus & E1000_WUS_EX ? "Unicast Packet" :
4704 wus & E1000_WUS_MC ? "Multicast Packet" :
4705 wus & E1000_WUS_BC ? "Broadcast Packet" :
4706 wus & E1000_WUS_MAG ? "Magic Packet" :
4707 wus & E1000_WUS_LNKC ? "Link Status Change" :
4713 e1000e_reset(adapter);
4715 e1000_init_manageability(adapter);
4717 if (netif_running(netdev))
4720 netif_device_attach(netdev);
4723 * If the controller has AMT, do not set DRV_LOAD until the interface
4724 * is up. For all other cases, let the f/w know that the h/w is now
4725 * under the control of the driver.
4727 if (!(adapter->flags & FLAG_HAS_AMT))
4728 e1000_get_hw_control(adapter);
4733 #ifdef CONFIG_PM_SLEEP
4734 static int e1000_suspend(struct device *dev)
4736 struct pci_dev *pdev = to_pci_dev(dev);
4740 retval = __e1000_shutdown(pdev, &wake, false);
4742 e1000_complete_shutdown(pdev, true, wake);
4747 static int e1000_resume(struct device *dev)
4749 struct pci_dev *pdev = to_pci_dev(dev);
4750 struct net_device *netdev = pci_get_drvdata(pdev);
4751 struct e1000_adapter *adapter = netdev_priv(netdev);
4753 if (e1000e_pm_ready(adapter))
4754 adapter->idle_check = true;
4756 return __e1000_resume(pdev);
4758 #endif /* CONFIG_PM_SLEEP */
4760 #ifdef CONFIG_PM_RUNTIME
4761 static int e1000_runtime_suspend(struct device *dev)
4763 struct pci_dev *pdev = to_pci_dev(dev);
4764 struct net_device *netdev = pci_get_drvdata(pdev);
4765 struct e1000_adapter *adapter = netdev_priv(netdev);
4767 if (e1000e_pm_ready(adapter)) {
4770 __e1000_shutdown(pdev, &wake, true);
4776 static int e1000_idle(struct device *dev)
4778 struct pci_dev *pdev = to_pci_dev(dev);
4779 struct net_device *netdev = pci_get_drvdata(pdev);
4780 struct e1000_adapter *adapter = netdev_priv(netdev);
4782 if (!e1000e_pm_ready(adapter))
4785 if (adapter->idle_check) {
4786 adapter->idle_check = false;
4787 if (!e1000e_has_link(adapter))
4788 pm_schedule_suspend(dev, MSEC_PER_SEC);
4794 static int e1000_runtime_resume(struct device *dev)
4796 struct pci_dev *pdev = to_pci_dev(dev);
4797 struct net_device *netdev = pci_get_drvdata(pdev);
4798 struct e1000_adapter *adapter = netdev_priv(netdev);
4800 if (!e1000e_pm_ready(adapter))
4803 adapter->idle_check = !dev->power.runtime_auto;
4804 return __e1000_resume(pdev);
4806 #endif /* CONFIG_PM_RUNTIME */
4807 #endif /* CONFIG_PM_OPS */
4809 static void e1000_shutdown(struct pci_dev *pdev)
4813 __e1000_shutdown(pdev, &wake, false);
4815 if (system_state == SYSTEM_POWER_OFF)
4816 e1000_complete_shutdown(pdev, false, wake);
4819 #ifdef CONFIG_NET_POLL_CONTROLLER
4821 * Polling 'interrupt' - used by things like netconsole to send skbs
4822 * without having to re-enable interrupts. It's not called while
4823 * the interrupt routine is executing.
4825 static void e1000_netpoll(struct net_device *netdev)
4827 struct e1000_adapter *adapter = netdev_priv(netdev);
4829 disable_irq(adapter->pdev->irq);
4830 e1000_intr(adapter->pdev->irq, netdev);
4832 enable_irq(adapter->pdev->irq);
4837 * e1000_io_error_detected - called when PCI error is detected
4838 * @pdev: Pointer to PCI device
4839 * @state: The current pci connection state
4841 * This function is called after a PCI bus error affecting
4842 * this device has been detected.
4844 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
4845 pci_channel_state_t state)
4847 struct net_device *netdev = pci_get_drvdata(pdev);
4848 struct e1000_adapter *adapter = netdev_priv(netdev);
4850 netif_device_detach(netdev);
4852 if (state == pci_channel_io_perm_failure)
4853 return PCI_ERS_RESULT_DISCONNECT;
4855 if (netif_running(netdev))
4856 e1000e_down(adapter);
4857 pci_disable_device(pdev);
4859 /* Request a slot slot reset. */
4860 return PCI_ERS_RESULT_NEED_RESET;
4864 * e1000_io_slot_reset - called after the pci bus has been reset.
4865 * @pdev: Pointer to PCI device
4867 * Restart the card from scratch, as if from a cold-boot. Implementation
4868 * resembles the first-half of the e1000_resume routine.
4870 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
4872 struct net_device *netdev = pci_get_drvdata(pdev);
4873 struct e1000_adapter *adapter = netdev_priv(netdev);
4874 struct e1000_hw *hw = &adapter->hw;
4876 pci_ers_result_t result;
4878 e1000e_disable_l1aspm(pdev);
4879 err = pci_enable_device_mem(pdev);
4882 "Cannot re-enable PCI device after reset.\n");
4883 result = PCI_ERS_RESULT_DISCONNECT;
4885 pci_set_master(pdev);
4886 pdev->state_saved = true;
4887 pci_restore_state(pdev);
4889 pci_enable_wake(pdev, PCI_D3hot, 0);
4890 pci_enable_wake(pdev, PCI_D3cold, 0);
4892 e1000e_reset(adapter);
4894 result = PCI_ERS_RESULT_RECOVERED;
4897 pci_cleanup_aer_uncorrect_error_status(pdev);
4903 * e1000_io_resume - called when traffic can start flowing again.
4904 * @pdev: Pointer to PCI device
4906 * This callback is called when the error recovery driver tells us that
4907 * its OK to resume normal operation. Implementation resembles the
4908 * second-half of the e1000_resume routine.
4910 static void e1000_io_resume(struct pci_dev *pdev)
4912 struct net_device *netdev = pci_get_drvdata(pdev);
4913 struct e1000_adapter *adapter = netdev_priv(netdev);
4915 e1000_init_manageability(adapter);
4917 if (netif_running(netdev)) {
4918 if (e1000e_up(adapter)) {
4920 "can't bring device back up after reset\n");
4925 netif_device_attach(netdev);
4928 * If the controller has AMT, do not set DRV_LOAD until the interface
4929 * is up. For all other cases, let the f/w know that the h/w is now
4930 * under the control of the driver.
4932 if (!(adapter->flags & FLAG_HAS_AMT))
4933 e1000_get_hw_control(adapter);
4937 static void e1000_print_device_info(struct e1000_adapter *adapter)
4939 struct e1000_hw *hw = &adapter->hw;
4940 struct net_device *netdev = adapter->netdev;
4943 /* print bus type/speed/width info */
4944 e_info("(PCI Express:2.5GB/s:%s) %pM\n",
4946 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
4950 e_info("Intel(R) PRO/%s Network Connection\n",
4951 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
4952 e1000e_read_pba_num(hw, &pba_num);
4953 e_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
4954 hw->mac.type, hw->phy.type, (pba_num >> 8), (pba_num & 0xff));
4957 static void e1000_eeprom_checks(struct e1000_adapter *adapter)
4959 struct e1000_hw *hw = &adapter->hw;
4963 if (hw->mac.type != e1000_82573)
4966 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
4967 if (!ret_val && (!(le16_to_cpu(buf) & (1 << 0)))) {
4968 /* Deep Smart Power Down (DSPD) */
4969 dev_warn(&adapter->pdev->dev,
4970 "Warning: detected DSPD enabled in EEPROM\n");
4973 ret_val = e1000_read_nvm(hw, NVM_INIT_3GIO_3, 1, &buf);
4974 if (!ret_val && (le16_to_cpu(buf) & (3 << 2))) {
4976 dev_warn(&adapter->pdev->dev,
4977 "Warning: detected ASPM enabled in EEPROM\n");
4981 static const struct net_device_ops e1000e_netdev_ops = {
4982 .ndo_open = e1000_open,
4983 .ndo_stop = e1000_close,
4984 .ndo_start_xmit = e1000_xmit_frame,
4985 .ndo_get_stats = e1000_get_stats,
4986 .ndo_set_multicast_list = e1000_set_multi,
4987 .ndo_set_mac_address = e1000_set_mac,
4988 .ndo_change_mtu = e1000_change_mtu,
4989 .ndo_do_ioctl = e1000_ioctl,
4990 .ndo_tx_timeout = e1000_tx_timeout,
4991 .ndo_validate_addr = eth_validate_addr,
4993 .ndo_vlan_rx_register = e1000_vlan_rx_register,
4994 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
4995 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
4996 #ifdef CONFIG_NET_POLL_CONTROLLER
4997 .ndo_poll_controller = e1000_netpoll,
5002 * e1000_probe - Device Initialization Routine
5003 * @pdev: PCI device information struct
5004 * @ent: entry in e1000_pci_tbl
5006 * Returns 0 on success, negative on failure
5008 * e1000_probe initializes an adapter identified by a pci_dev structure.
5009 * The OS initialization, configuring of the adapter private structure,
5010 * and a hardware reset occur.
5012 static int __devinit e1000_probe(struct pci_dev *pdev,
5013 const struct pci_device_id *ent)
5015 struct net_device *netdev;
5016 struct e1000_adapter *adapter;
5017 struct e1000_hw *hw;
5018 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
5019 resource_size_t mmio_start, mmio_len;
5020 resource_size_t flash_start, flash_len;
5022 static int cards_found;
5023 int i, err, pci_using_dac;
5024 u16 eeprom_data = 0;
5025 u16 eeprom_apme_mask = E1000_EEPROM_APME;
5027 e1000e_disable_l1aspm(pdev);
5029 err = pci_enable_device_mem(pdev);
5034 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
5036 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5040 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5042 err = pci_set_consistent_dma_mask(pdev,
5045 dev_err(&pdev->dev, "No usable DMA "
5046 "configuration, aborting\n");
5052 err = pci_request_selected_regions_exclusive(pdev,
5053 pci_select_bars(pdev, IORESOURCE_MEM),
5054 e1000e_driver_name);
5058 /* AER (Advanced Error Reporting) hooks */
5059 pci_enable_pcie_error_reporting(pdev);
5061 pci_set_master(pdev);
5062 /* PCI config space info */
5063 err = pci_save_state(pdev);
5065 goto err_alloc_etherdev;
5068 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
5070 goto err_alloc_etherdev;
5072 SET_NETDEV_DEV(netdev, &pdev->dev);
5074 pci_set_drvdata(pdev, netdev);
5075 adapter = netdev_priv(netdev);
5077 adapter->netdev = netdev;
5078 adapter->pdev = pdev;
5080 adapter->pba = ei->pba;
5081 adapter->flags = ei->flags;
5082 adapter->flags2 = ei->flags2;
5083 adapter->hw.adapter = adapter;
5084 adapter->hw.mac.type = ei->mac;
5085 adapter->max_hw_frame_size = ei->max_hw_frame_size;
5086 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
5088 mmio_start = pci_resource_start(pdev, 0);
5089 mmio_len = pci_resource_len(pdev, 0);
5092 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
5093 if (!adapter->hw.hw_addr)
5096 if ((adapter->flags & FLAG_HAS_FLASH) &&
5097 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
5098 flash_start = pci_resource_start(pdev, 1);
5099 flash_len = pci_resource_len(pdev, 1);
5100 adapter->hw.flash_address = ioremap(flash_start, flash_len);
5101 if (!adapter->hw.flash_address)
5105 /* construct the net_device struct */
5106 netdev->netdev_ops = &e1000e_netdev_ops;
5107 e1000e_set_ethtool_ops(netdev);
5108 netdev->watchdog_timeo = 5 * HZ;
5109 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
5110 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
5112 netdev->mem_start = mmio_start;
5113 netdev->mem_end = mmio_start + mmio_len;
5115 adapter->bd_number = cards_found++;
5117 e1000e_check_options(adapter);
5119 /* setup adapter struct */
5120 err = e1000_sw_init(adapter);
5126 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
5127 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
5128 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
5130 err = ei->get_variants(adapter);
5134 if ((adapter->flags & FLAG_IS_ICH) &&
5135 (adapter->flags & FLAG_READ_ONLY_NVM))
5136 e1000e_write_protect_nvm_ich8lan(&adapter->hw);
5138 hw->mac.ops.get_bus_info(&adapter->hw);
5140 adapter->hw.phy.autoneg_wait_to_complete = 0;
5142 /* Copper options */
5143 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
5144 adapter->hw.phy.mdix = AUTO_ALL_MODES;
5145 adapter->hw.phy.disable_polarity_correction = 0;
5146 adapter->hw.phy.ms_type = e1000_ms_hw_default;
5149 if (e1000_check_reset_block(&adapter->hw))
5150 e_info("PHY reset is blocked due to SOL/IDER session.\n");
5152 netdev->features = NETIF_F_SG |
5154 NETIF_F_HW_VLAN_TX |
5157 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
5158 netdev->features |= NETIF_F_HW_VLAN_FILTER;
5160 netdev->features |= NETIF_F_TSO;
5161 netdev->features |= NETIF_F_TSO6;
5163 netdev->vlan_features |= NETIF_F_TSO;
5164 netdev->vlan_features |= NETIF_F_TSO6;
5165 netdev->vlan_features |= NETIF_F_HW_CSUM;
5166 netdev->vlan_features |= NETIF_F_SG;
5169 netdev->features |= NETIF_F_HIGHDMA;
5171 if (e1000e_enable_mng_pass_thru(&adapter->hw))
5172 adapter->flags |= FLAG_MNG_PT_ENABLED;
5175 * before reading the NVM, reset the controller to
5176 * put the device in a known good starting state
5178 adapter->hw.mac.ops.reset_hw(&adapter->hw);
5181 * systems with ASPM and others may see the checksum fail on the first
5182 * attempt. Let's give it a few tries
5185 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
5188 e_err("The NVM Checksum Is Not Valid\n");
5194 e1000_eeprom_checks(adapter);
5196 /* copy the MAC address */
5197 if (e1000e_read_mac_addr(&adapter->hw))
5198 e_err("NVM Read Error while reading MAC address\n");
5200 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
5201 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
5203 if (!is_valid_ether_addr(netdev->perm_addr)) {
5204 e_err("Invalid MAC Address: %pM\n", netdev->perm_addr);
5209 init_timer(&adapter->watchdog_timer);
5210 adapter->watchdog_timer.function = &e1000_watchdog;
5211 adapter->watchdog_timer.data = (unsigned long) adapter;
5213 init_timer(&adapter->phy_info_timer);
5214 adapter->phy_info_timer.function = &e1000_update_phy_info;
5215 adapter->phy_info_timer.data = (unsigned long) adapter;
5217 INIT_WORK(&adapter->reset_task, e1000_reset_task);
5218 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
5219 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
5220 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
5221 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
5223 /* Initialize link parameters. User can change them with ethtool */
5224 adapter->hw.mac.autoneg = 1;
5225 adapter->fc_autoneg = 1;
5226 adapter->hw.fc.requested_mode = e1000_fc_default;
5227 adapter->hw.fc.current_mode = e1000_fc_default;
5228 adapter->hw.phy.autoneg_advertised = 0x2f;
5230 /* ring size defaults */
5231 adapter->rx_ring->count = 256;
5232 adapter->tx_ring->count = 256;
5235 * Initial Wake on LAN setting - If APM wake is enabled in
5236 * the EEPROM, enable the ACPI Magic Packet filter
5238 if (adapter->flags & FLAG_APME_IN_WUC) {
5239 /* APME bit in EEPROM is mapped to WUC.APME */
5240 eeprom_data = er32(WUC);
5241 eeprom_apme_mask = E1000_WUC_APME;
5242 if (eeprom_data & E1000_WUC_PHY_WAKE)
5243 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
5244 } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
5245 if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
5246 (adapter->hw.bus.func == 1))
5247 e1000_read_nvm(&adapter->hw,
5248 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
5250 e1000_read_nvm(&adapter->hw,
5251 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
5254 /* fetch WoL from EEPROM */
5255 if (eeprom_data & eeprom_apme_mask)
5256 adapter->eeprom_wol |= E1000_WUFC_MAG;
5259 * now that we have the eeprom settings, apply the special cases
5260 * where the eeprom may be wrong or the board simply won't support
5261 * wake on lan on a particular port
5263 if (!(adapter->flags & FLAG_HAS_WOL))
5264 adapter->eeprom_wol = 0;
5266 /* initialize the wol settings based on the eeprom settings */
5267 adapter->wol = adapter->eeprom_wol;
5268 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
5270 /* save off EEPROM version number */
5271 e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
5273 /* reset the hardware with the new settings */
5274 e1000e_reset(adapter);
5277 * If the controller has AMT, do not set DRV_LOAD until the interface
5278 * is up. For all other cases, let the f/w know that the h/w is now
5279 * under the control of the driver.
5281 if (!(adapter->flags & FLAG_HAS_AMT))
5282 e1000_get_hw_control(adapter);
5284 strcpy(netdev->name, "eth%d");
5285 err = register_netdev(netdev);
5289 /* carrier off reporting is important to ethtool even BEFORE open */
5290 netif_carrier_off(netdev);
5292 e1000_print_device_info(adapter);
5294 if (pci_dev_run_wake(pdev)) {
5295 pm_runtime_set_active(&pdev->dev);
5296 pm_runtime_enable(&pdev->dev);
5298 pm_schedule_suspend(&pdev->dev, MSEC_PER_SEC);
5303 if (!(adapter->flags & FLAG_HAS_AMT))
5304 e1000_release_hw_control(adapter);
5306 if (!e1000_check_reset_block(&adapter->hw))
5307 e1000_phy_hw_reset(&adapter->hw);
5310 kfree(adapter->tx_ring);
5311 kfree(adapter->rx_ring);
5313 if (adapter->hw.flash_address)
5314 iounmap(adapter->hw.flash_address);
5315 e1000e_reset_interrupt_capability(adapter);
5317 iounmap(adapter->hw.hw_addr);
5319 free_netdev(netdev);
5321 pci_release_selected_regions(pdev,
5322 pci_select_bars(pdev, IORESOURCE_MEM));
5325 pci_disable_device(pdev);
5330 * e1000_remove - Device Removal Routine
5331 * @pdev: PCI device information struct
5333 * e1000_remove is called by the PCI subsystem to alert the driver
5334 * that it should release a PCI device. The could be caused by a
5335 * Hot-Plug event, or because the driver is going to be removed from
5338 static void __devexit e1000_remove(struct pci_dev *pdev)
5340 struct net_device *netdev = pci_get_drvdata(pdev);
5341 struct e1000_adapter *adapter = netdev_priv(netdev);
5342 bool down = test_bit(__E1000_DOWN, &adapter->state);
5344 pm_runtime_get_sync(&pdev->dev);
5347 * flush_scheduled work may reschedule our watchdog task, so
5348 * explicitly disable watchdog tasks from being rescheduled
5351 set_bit(__E1000_DOWN, &adapter->state);
5352 del_timer_sync(&adapter->watchdog_timer);
5353 del_timer_sync(&adapter->phy_info_timer);
5355 cancel_work_sync(&adapter->reset_task);
5356 cancel_work_sync(&adapter->watchdog_task);
5357 cancel_work_sync(&adapter->downshift_task);
5358 cancel_work_sync(&adapter->update_phy_task);
5359 cancel_work_sync(&adapter->print_hang_task);
5360 flush_scheduled_work();
5362 if (!(netdev->flags & IFF_UP))
5363 e1000_power_down_phy(adapter);
5365 /* Don't lie to e1000_close() down the road. */
5367 clear_bit(__E1000_DOWN, &adapter->state);
5368 unregister_netdev(netdev);
5370 if (pci_dev_run_wake(pdev)) {
5371 pm_runtime_disable(&pdev->dev);
5372 pm_runtime_set_suspended(&pdev->dev);
5374 pm_runtime_put_noidle(&pdev->dev);
5377 * Release control of h/w to f/w. If f/w is AMT enabled, this
5378 * would have already happened in close and is redundant.
5380 e1000_release_hw_control(adapter);
5382 e1000e_reset_interrupt_capability(adapter);
5383 kfree(adapter->tx_ring);
5384 kfree(adapter->rx_ring);
5386 iounmap(adapter->hw.hw_addr);
5387 if (adapter->hw.flash_address)
5388 iounmap(adapter->hw.flash_address);
5389 pci_release_selected_regions(pdev,
5390 pci_select_bars(pdev, IORESOURCE_MEM));
5392 free_netdev(netdev);
5395 pci_disable_pcie_error_reporting(pdev);
5397 pci_disable_device(pdev);
5400 /* PCI Error Recovery (ERS) */
5401 static struct pci_error_handlers e1000_err_handler = {
5402 .error_detected = e1000_io_error_detected,
5403 .slot_reset = e1000_io_slot_reset,
5404 .resume = e1000_io_resume,
5407 static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
5408 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
5409 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
5410 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
5411 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 },
5412 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
5413 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
5414 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
5415 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 },
5416 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 },
5418 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
5419 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
5420 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
5421 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
5423 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
5424 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
5425 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
5427 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
5428 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 },
5429 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 },
5431 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
5432 board_80003es2lan },
5433 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
5434 board_80003es2lan },
5435 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT),
5436 board_80003es2lan },
5437 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
5438 board_80003es2lan },
5440 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
5441 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
5442 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
5443 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan },
5444 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
5445 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
5446 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
5447 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
5449 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
5450 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
5451 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
5452 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
5453 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
5454 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan },
5455 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
5456 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
5457 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
5459 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
5460 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
5461 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
5463 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
5464 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
5466 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
5467 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
5468 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
5469 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
5471 { } /* terminate list */
5473 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
5475 #ifdef CONFIG_PM_OPS
5476 static const struct dev_pm_ops e1000_pm_ops = {
5477 SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
5478 SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
5479 e1000_runtime_resume, e1000_idle)
5483 /* PCI Device API Driver */
5484 static struct pci_driver e1000_driver = {
5485 .name = e1000e_driver_name,
5486 .id_table = e1000_pci_tbl,
5487 .probe = e1000_probe,
5488 .remove = __devexit_p(e1000_remove),
5489 #ifdef CONFIG_PM_OPS
5490 .driver.pm = &e1000_pm_ops,
5492 .shutdown = e1000_shutdown,
5493 .err_handler = &e1000_err_handler
5497 * e1000_init_module - Driver Registration Routine
5499 * e1000_init_module is the first routine called when the driver is
5500 * loaded. All it does is register with the PCI subsystem.
5502 static int __init e1000_init_module(void)
5505 pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
5506 e1000e_driver_version);
5507 pr_info("Copyright (c) 1999 - 2009 Intel Corporation.\n");
5508 ret = pci_register_driver(&e1000_driver);
5512 module_init(e1000_init_module);
5515 * e1000_exit_module - Driver Exit Cleanup Routine
5517 * e1000_exit_module is called just before the driver is removed
5520 static void __exit e1000_exit_module(void)
5522 pci_unregister_driver(&e1000_driver);
5524 module_exit(e1000_exit_module);
5527 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
5528 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
5529 MODULE_LICENSE("GPL");
5530 MODULE_VERSION(DRV_VERSION);