1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
4 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/types.h>
8 #include <linux/init.h>
10 #include <linux/vmalloc.h>
11 #include <linux/pagemap.h>
12 #include <linux/delay.h>
13 #include <linux/netdevice.h>
14 #include <linux/interrupt.h>
15 #include <linux/tcp.h>
16 #include <linux/ipv6.h>
17 #include <linux/slab.h>
18 #include <net/checksum.h>
19 #include <net/ip6_checksum.h>
20 #include <linux/ethtool.h>
21 #include <linux/if_vlan.h>
22 #include <linux/cpu.h>
23 #include <linux/smp.h>
24 #include <linux/pm_qos.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/aer.h>
27 #include <linux/prefetch.h>
28 #include <linux/suspend.h>
32 char e1000e_driver_name[] = "e1000e";
34 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
35 static int debug = -1;
36 module_param(debug, int, 0);
37 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
39 static const struct e1000_info *e1000_info_tbl[] = {
40 [board_82571] = &e1000_82571_info,
41 [board_82572] = &e1000_82572_info,
42 [board_82573] = &e1000_82573_info,
43 [board_82574] = &e1000_82574_info,
44 [board_82583] = &e1000_82583_info,
45 [board_80003es2lan] = &e1000_es2_info,
46 [board_ich8lan] = &e1000_ich8_info,
47 [board_ich9lan] = &e1000_ich9_info,
48 [board_ich10lan] = &e1000_ich10_info,
49 [board_pchlan] = &e1000_pch_info,
50 [board_pch2lan] = &e1000_pch2_info,
51 [board_pch_lpt] = &e1000_pch_lpt_info,
52 [board_pch_spt] = &e1000_pch_spt_info,
53 [board_pch_cnp] = &e1000_pch_cnp_info,
54 [board_pch_tgp] = &e1000_pch_tgp_info,
55 [board_pch_adp] = &e1000_pch_adp_info,
58 struct e1000_reg_info {
63 static const struct e1000_reg_info e1000_reg_info_tbl[] = {
64 /* General Registers */
66 {E1000_STATUS, "STATUS"},
67 {E1000_CTRL_EXT, "CTRL_EXT"},
69 /* Interrupt Registers */
74 {E1000_RDLEN(0), "RDLEN"},
75 {E1000_RDH(0), "RDH"},
76 {E1000_RDT(0), "RDT"},
78 {E1000_RXDCTL(0), "RXDCTL"},
80 {E1000_RDBAL(0), "RDBAL"},
81 {E1000_RDBAH(0), "RDBAH"},
84 {E1000_RDFHS, "RDFHS"},
85 {E1000_RDFTS, "RDFTS"},
86 {E1000_RDFPC, "RDFPC"},
90 {E1000_TDBAL(0), "TDBAL"},
91 {E1000_TDBAH(0), "TDBAH"},
92 {E1000_TDLEN(0), "TDLEN"},
93 {E1000_TDH(0), "TDH"},
94 {E1000_TDT(0), "TDT"},
96 {E1000_TXDCTL(0), "TXDCTL"},
98 {E1000_TARC(0), "TARC"},
100 {E1000_TDFT, "TDFT"},
101 {E1000_TDFHS, "TDFHS"},
102 {E1000_TDFTS, "TDFTS"},
103 {E1000_TDFPC, "TDFPC"},
105 /* List Terminator */
110 * __ew32_prepare - prepare to write to MAC CSR register on certain parts
111 * @hw: pointer to the HW structure
113 * When updating the MAC CSR registers, the Manageability Engine (ME) could
114 * be accessing the registers at the same time. Normally, this is handled in
115 * h/w by an arbiter but on some parts there is a bug that acknowledges Host
116 * accesses later than it should which could result in the register to have
117 * an incorrect value. Workaround this by checking the FWSM register which
118 * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set
119 * and try again a number of times.
121 static void __ew32_prepare(struct e1000_hw *hw)
123 s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
125 while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
129 void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
131 if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
134 writel(val, hw->hw_addr + reg);
138 * e1000_regdump - register printout routine
139 * @hw: pointer to the HW structure
140 * @reginfo: pointer to the register info table
142 static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
148 switch (reginfo->ofs) {
149 case E1000_RXDCTL(0):
150 for (n = 0; n < 2; n++)
151 regs[n] = __er32(hw, E1000_RXDCTL(n));
153 case E1000_TXDCTL(0):
154 for (n = 0; n < 2; n++)
155 regs[n] = __er32(hw, E1000_TXDCTL(n));
158 for (n = 0; n < 2; n++)
159 regs[n] = __er32(hw, E1000_TARC(n));
162 pr_info("%-15s %08x\n",
163 reginfo->name, __er32(hw, reginfo->ofs));
167 snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
168 pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]);
171 static void e1000e_dump_ps_pages(struct e1000_adapter *adapter,
172 struct e1000_buffer *bi)
175 struct e1000_ps_page *ps_page;
177 for (i = 0; i < adapter->rx_ps_pages; i++) {
178 ps_page = &bi->ps_pages[i];
181 pr_info("packet dump for ps_page %d:\n", i);
182 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
183 16, 1, page_address(ps_page->page),
190 * e1000e_dump - Print registers, Tx-ring and Rx-ring
191 * @adapter: board private structure
193 static void e1000e_dump(struct e1000_adapter *adapter)
195 struct net_device *netdev = adapter->netdev;
196 struct e1000_hw *hw = &adapter->hw;
197 struct e1000_reg_info *reginfo;
198 struct e1000_ring *tx_ring = adapter->tx_ring;
199 struct e1000_tx_desc *tx_desc;
204 struct e1000_buffer *buffer_info;
205 struct e1000_ring *rx_ring = adapter->rx_ring;
206 union e1000_rx_desc_packet_split *rx_desc_ps;
207 union e1000_rx_desc_extended *rx_desc;
217 if (!netif_msg_hw(adapter))
220 /* Print netdevice Info */
222 dev_info(&adapter->pdev->dev, "Net device Info\n");
223 pr_info("Device Name state trans_start\n");
224 pr_info("%-15s %016lX %016lX\n", netdev->name,
225 netdev->state, dev_trans_start(netdev));
228 /* Print Registers */
229 dev_info(&adapter->pdev->dev, "Register Dump\n");
230 pr_info(" Register Name Value\n");
231 for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
232 reginfo->name; reginfo++) {
233 e1000_regdump(hw, reginfo);
236 /* Print Tx Ring Summary */
237 if (!netdev || !netif_running(netdev))
240 dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
241 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
242 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
243 pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
244 0, tx_ring->next_to_use, tx_ring->next_to_clean,
245 (unsigned long long)buffer_info->dma,
247 buffer_info->next_to_watch,
248 (unsigned long long)buffer_info->time_stamp);
251 if (!netif_msg_tx_done(adapter))
252 goto rx_ring_summary;
254 dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
256 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
258 * Legacy Transmit Descriptor
259 * +--------------------------------------------------------------+
260 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
261 * +--------------------------------------------------------------+
262 * 8 | Special | CSS | Status | CMD | CSO | Length |
263 * +--------------------------------------------------------------+
264 * 63 48 47 36 35 32 31 24 23 16 15 0
266 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
267 * 63 48 47 40 39 32 31 16 15 8 7 0
268 * +----------------------------------------------------------------+
269 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
270 * +----------------------------------------------------------------+
271 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
272 * +----------------------------------------------------------------+
273 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
275 * Extended Data Descriptor (DTYP=0x1)
276 * +----------------------------------------------------------------+
277 * 0 | Buffer Address [63:0] |
278 * +----------------------------------------------------------------+
279 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
280 * +----------------------------------------------------------------+
281 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
283 pr_info("Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb <-- Legacy format\n");
284 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Context format\n");
285 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Data format\n");
286 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
287 const char *next_desc;
288 tx_desc = E1000_TX_DESC(*tx_ring, i);
289 buffer_info = &tx_ring->buffer_info[i];
290 u0 = (struct my_u0 *)tx_desc;
291 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
292 next_desc = " NTC/U";
293 else if (i == tx_ring->next_to_use)
295 else if (i == tx_ring->next_to_clean)
299 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n",
300 (!(le64_to_cpu(u0->b) & BIT(29)) ? 'l' :
301 ((le64_to_cpu(u0->b) & BIT(20)) ? 'd' : 'c')),
303 (unsigned long long)le64_to_cpu(u0->a),
304 (unsigned long long)le64_to_cpu(u0->b),
305 (unsigned long long)buffer_info->dma,
306 buffer_info->length, buffer_info->next_to_watch,
307 (unsigned long long)buffer_info->time_stamp,
308 buffer_info->skb, next_desc);
310 if (netif_msg_pktdata(adapter) && buffer_info->skb)
311 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
312 16, 1, buffer_info->skb->data,
313 buffer_info->skb->len, true);
316 /* Print Rx Ring Summary */
318 dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
319 pr_info("Queue [NTU] [NTC]\n");
320 pr_info(" %5d %5X %5X\n",
321 0, rx_ring->next_to_use, rx_ring->next_to_clean);
324 if (!netif_msg_rx_status(adapter))
327 dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
328 switch (adapter->rx_ps_pages) {
332 /* [Extended] Packet Split Receive Descriptor Format
334 * +-----------------------------------------------------+
335 * 0 | Buffer Address 0 [63:0] |
336 * +-----------------------------------------------------+
337 * 8 | Buffer Address 1 [63:0] |
338 * +-----------------------------------------------------+
339 * 16 | Buffer Address 2 [63:0] |
340 * +-----------------------------------------------------+
341 * 24 | Buffer Address 3 [63:0] |
342 * +-----------------------------------------------------+
344 pr_info("R [desc] [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] [bi->skb] <-- Ext Pkt Split format\n");
345 /* [Extended] Receive Descriptor (Write-Back) Format
347 * 63 48 47 32 31 13 12 8 7 4 3 0
348 * +------------------------------------------------------+
349 * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS |
350 * | Checksum | Ident | | Queue | | Type |
351 * +------------------------------------------------------+
352 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
353 * +------------------------------------------------------+
354 * 63 48 47 32 31 20 19 0
356 pr_info("RWB[desc] [ck ipid mrqhsh] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n");
357 for (i = 0; i < rx_ring->count; i++) {
358 const char *next_desc;
359 buffer_info = &rx_ring->buffer_info[i];
360 rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
361 u1 = (struct my_u1 *)rx_desc_ps;
363 le32_to_cpu(rx_desc_ps->wb.middle.status_error);
365 if (i == rx_ring->next_to_use)
367 else if (i == rx_ring->next_to_clean)
372 if (staterr & E1000_RXD_STAT_DD) {
373 /* Descriptor Done */
374 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n",
376 (unsigned long long)le64_to_cpu(u1->a),
377 (unsigned long long)le64_to_cpu(u1->b),
378 (unsigned long long)le64_to_cpu(u1->c),
379 (unsigned long long)le64_to_cpu(u1->d),
380 buffer_info->skb, next_desc);
382 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX %016llX %p%s\n",
384 (unsigned long long)le64_to_cpu(u1->a),
385 (unsigned long long)le64_to_cpu(u1->b),
386 (unsigned long long)le64_to_cpu(u1->c),
387 (unsigned long long)le64_to_cpu(u1->d),
388 (unsigned long long)buffer_info->dma,
389 buffer_info->skb, next_desc);
391 if (netif_msg_pktdata(adapter))
392 e1000e_dump_ps_pages(adapter,
399 /* Extended Receive Descriptor (Read) Format
401 * +-----------------------------------------------------+
402 * 0 | Buffer Address [63:0] |
403 * +-----------------------------------------------------+
405 * +-----------------------------------------------------+
407 pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read) format\n");
408 /* Extended Receive Descriptor (Write-Back) Format
410 * 63 48 47 32 31 24 23 4 3 0
411 * +------------------------------------------------------+
413 * 0 +-------------------+ Rsvd | Reserved | MRQ RSS |
414 * | Packet | IP | | | Type |
415 * | Checksum | Ident | | | |
416 * +------------------------------------------------------+
417 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
418 * +------------------------------------------------------+
419 * 63 48 47 32 31 20 19 0
421 pr_info("RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n");
423 for (i = 0; i < rx_ring->count; i++) {
424 const char *next_desc;
426 buffer_info = &rx_ring->buffer_info[i];
427 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
428 u1 = (struct my_u1 *)rx_desc;
429 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
431 if (i == rx_ring->next_to_use)
433 else if (i == rx_ring->next_to_clean)
438 if (staterr & E1000_RXD_STAT_DD) {
439 /* Descriptor Done */
440 pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n",
442 (unsigned long long)le64_to_cpu(u1->a),
443 (unsigned long long)le64_to_cpu(u1->b),
444 buffer_info->skb, next_desc);
446 pr_info("%s[0x%03X] %016llX %016llX %016llX %p%s\n",
448 (unsigned long long)le64_to_cpu(u1->a),
449 (unsigned long long)le64_to_cpu(u1->b),
450 (unsigned long long)buffer_info->dma,
451 buffer_info->skb, next_desc);
453 if (netif_msg_pktdata(adapter) &&
455 print_hex_dump(KERN_INFO, "",
456 DUMP_PREFIX_ADDRESS, 16,
458 buffer_info->skb->data,
459 adapter->rx_buffer_len,
467 * e1000_desc_unused - calculate if we have unused descriptors
468 * @ring: pointer to ring struct to perform calculation on
470 static int e1000_desc_unused(struct e1000_ring *ring)
472 if (ring->next_to_clean > ring->next_to_use)
473 return ring->next_to_clean - ring->next_to_use - 1;
475 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
479 * e1000e_systim_to_hwtstamp - convert system time value to hw time stamp
480 * @adapter: board private structure
481 * @hwtstamps: time stamp structure to update
482 * @systim: unsigned 64bit system time value.
484 * Convert the system time value stored in the RX/TXSTMP registers into a
485 * hwtstamp which can be used by the upper level time stamping functions.
487 * The 'systim_lock' spinlock is used to protect the consistency of the
488 * system time value. This is needed because reading the 64 bit time
489 * value involves reading two 32 bit registers. The first read latches the
492 static void e1000e_systim_to_hwtstamp(struct e1000_adapter *adapter,
493 struct skb_shared_hwtstamps *hwtstamps,
499 spin_lock_irqsave(&adapter->systim_lock, flags);
500 ns = timecounter_cyc2time(&adapter->tc, systim);
501 spin_unlock_irqrestore(&adapter->systim_lock, flags);
503 memset(hwtstamps, 0, sizeof(*hwtstamps));
504 hwtstamps->hwtstamp = ns_to_ktime(ns);
508 * e1000e_rx_hwtstamp - utility function which checks for Rx time stamp
509 * @adapter: board private structure
510 * @status: descriptor extended error and status field
511 * @skb: particular skb to include time stamp
513 * If the time stamp is valid, convert it into the timecounter ns value
514 * and store that result into the shhwtstamps structure which is passed
515 * up the network stack.
517 static void e1000e_rx_hwtstamp(struct e1000_adapter *adapter, u32 status,
520 struct e1000_hw *hw = &adapter->hw;
523 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP) ||
524 !(status & E1000_RXDEXT_STATERR_TST) ||
525 !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
528 /* The Rx time stamp registers contain the time stamp. No other
529 * received packet will be time stamped until the Rx time stamp
530 * registers are read. Because only one packet can be time stamped
531 * at a time, the register values must belong to this packet and
532 * therefore none of the other additional attributes need to be
535 rxstmp = (u64)er32(RXSTMPL);
536 rxstmp |= (u64)er32(RXSTMPH) << 32;
537 e1000e_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), rxstmp);
539 adapter->flags2 &= ~FLAG2_CHECK_RX_HWTSTAMP;
543 * e1000_receive_skb - helper function to handle Rx indications
544 * @adapter: board private structure
545 * @netdev: pointer to netdev struct
546 * @staterr: descriptor extended error and status field as written by hardware
547 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
548 * @skb: pointer to sk_buff to be indicated to stack
550 static void e1000_receive_skb(struct e1000_adapter *adapter,
551 struct net_device *netdev, struct sk_buff *skb,
552 u32 staterr, __le16 vlan)
554 u16 tag = le16_to_cpu(vlan);
556 e1000e_rx_hwtstamp(adapter, staterr, skb);
558 skb->protocol = eth_type_trans(skb, netdev);
560 if (staterr & E1000_RXD_STAT_VP)
561 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
563 napi_gro_receive(&adapter->napi, skb);
567 * e1000_rx_checksum - Receive Checksum Offload
568 * @adapter: board private structure
569 * @status_err: receive descriptor status and error fields
570 * @skb: socket buffer with received data
572 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
575 u16 status = (u16)status_err;
576 u8 errors = (u8)(status_err >> 24);
578 skb_checksum_none_assert(skb);
580 /* Rx checksum disabled */
581 if (!(adapter->netdev->features & NETIF_F_RXCSUM))
584 /* Ignore Checksum bit is set */
585 if (status & E1000_RXD_STAT_IXSM)
588 /* TCP/UDP checksum error bit or IP checksum error bit is set */
589 if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) {
590 /* let the stack verify checksum errors */
591 adapter->hw_csum_err++;
595 /* TCP/UDP Checksum has not been calculated */
596 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
599 /* It must be a TCP or UDP packet with a valid checksum */
600 skb->ip_summed = CHECKSUM_UNNECESSARY;
601 adapter->hw_csum_good++;
604 static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
606 struct e1000_adapter *adapter = rx_ring->adapter;
607 struct e1000_hw *hw = &adapter->hw;
610 writel(i, rx_ring->tail);
612 if (unlikely(i != readl(rx_ring->tail))) {
613 u32 rctl = er32(RCTL);
615 ew32(RCTL, rctl & ~E1000_RCTL_EN);
616 e_err("ME firmware caused invalid RDT - resetting\n");
617 schedule_work(&adapter->reset_task);
621 static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
623 struct e1000_adapter *adapter = tx_ring->adapter;
624 struct e1000_hw *hw = &adapter->hw;
627 writel(i, tx_ring->tail);
629 if (unlikely(i != readl(tx_ring->tail))) {
630 u32 tctl = er32(TCTL);
632 ew32(TCTL, tctl & ~E1000_TCTL_EN);
633 e_err("ME firmware caused invalid TDT - resetting\n");
634 schedule_work(&adapter->reset_task);
639 * e1000_alloc_rx_buffers - Replace used receive buffers
640 * @rx_ring: Rx descriptor ring
641 * @cleaned_count: number to reallocate
642 * @gfp: flags for allocation
644 static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring,
645 int cleaned_count, gfp_t gfp)
647 struct e1000_adapter *adapter = rx_ring->adapter;
648 struct net_device *netdev = adapter->netdev;
649 struct pci_dev *pdev = adapter->pdev;
650 union e1000_rx_desc_extended *rx_desc;
651 struct e1000_buffer *buffer_info;
654 unsigned int bufsz = adapter->rx_buffer_len;
656 i = rx_ring->next_to_use;
657 buffer_info = &rx_ring->buffer_info[i];
659 while (cleaned_count--) {
660 skb = buffer_info->skb;
666 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
668 /* Better luck next round */
669 adapter->alloc_rx_buff_failed++;
673 buffer_info->skb = skb;
675 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
676 adapter->rx_buffer_len,
678 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
679 dev_err(&pdev->dev, "Rx DMA map failed\n");
680 adapter->rx_dma_failed++;
684 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
685 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
687 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
688 /* Force memory writes to complete before letting h/w
689 * know there are new descriptors to fetch. (Only
690 * applicable for weak-ordered memory model archs,
694 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
695 e1000e_update_rdt_wa(rx_ring, i);
697 writel(i, rx_ring->tail);
700 if (i == rx_ring->count)
702 buffer_info = &rx_ring->buffer_info[i];
705 rx_ring->next_to_use = i;
709 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
710 * @rx_ring: Rx descriptor ring
711 * @cleaned_count: number to reallocate
712 * @gfp: flags for allocation
714 static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
715 int cleaned_count, gfp_t gfp)
717 struct e1000_adapter *adapter = rx_ring->adapter;
718 struct net_device *netdev = adapter->netdev;
719 struct pci_dev *pdev = adapter->pdev;
720 union e1000_rx_desc_packet_split *rx_desc;
721 struct e1000_buffer *buffer_info;
722 struct e1000_ps_page *ps_page;
726 i = rx_ring->next_to_use;
727 buffer_info = &rx_ring->buffer_info[i];
729 while (cleaned_count--) {
730 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
732 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
733 ps_page = &buffer_info->ps_pages[j];
734 if (j >= adapter->rx_ps_pages) {
735 /* all unused desc entries get hw null ptr */
736 rx_desc->read.buffer_addr[j + 1] =
740 if (!ps_page->page) {
741 ps_page->page = alloc_page(gfp);
742 if (!ps_page->page) {
743 adapter->alloc_rx_buff_failed++;
746 ps_page->dma = dma_map_page(&pdev->dev,
750 if (dma_mapping_error(&pdev->dev,
752 dev_err(&adapter->pdev->dev,
753 "Rx DMA page map failed\n");
754 adapter->rx_dma_failed++;
758 /* Refresh the desc even if buffer_addrs
759 * didn't change because each write-back
762 rx_desc->read.buffer_addr[j + 1] =
763 cpu_to_le64(ps_page->dma);
766 skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0,
770 adapter->alloc_rx_buff_failed++;
774 buffer_info->skb = skb;
775 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
776 adapter->rx_ps_bsize0,
778 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
779 dev_err(&pdev->dev, "Rx DMA map failed\n");
780 adapter->rx_dma_failed++;
782 dev_kfree_skb_any(skb);
783 buffer_info->skb = NULL;
787 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
789 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
790 /* Force memory writes to complete before letting h/w
791 * know there are new descriptors to fetch. (Only
792 * applicable for weak-ordered memory model archs,
796 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
797 e1000e_update_rdt_wa(rx_ring, i << 1);
799 writel(i << 1, rx_ring->tail);
803 if (i == rx_ring->count)
805 buffer_info = &rx_ring->buffer_info[i];
809 rx_ring->next_to_use = i;
813 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
814 * @rx_ring: Rx descriptor ring
815 * @cleaned_count: number of buffers to allocate this pass
816 * @gfp: flags for allocation
819 static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring,
820 int cleaned_count, gfp_t gfp)
822 struct e1000_adapter *adapter = rx_ring->adapter;
823 struct net_device *netdev = adapter->netdev;
824 struct pci_dev *pdev = adapter->pdev;
825 union e1000_rx_desc_extended *rx_desc;
826 struct e1000_buffer *buffer_info;
829 unsigned int bufsz = 256 - 16; /* for skb_reserve */
831 i = rx_ring->next_to_use;
832 buffer_info = &rx_ring->buffer_info[i];
834 while (cleaned_count--) {
835 skb = buffer_info->skb;
841 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
842 if (unlikely(!skb)) {
843 /* Better luck next round */
844 adapter->alloc_rx_buff_failed++;
848 buffer_info->skb = skb;
850 /* allocate a new page if necessary */
851 if (!buffer_info->page) {
852 buffer_info->page = alloc_page(gfp);
853 if (unlikely(!buffer_info->page)) {
854 adapter->alloc_rx_buff_failed++;
859 if (!buffer_info->dma) {
860 buffer_info->dma = dma_map_page(&pdev->dev,
861 buffer_info->page, 0,
864 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
865 adapter->alloc_rx_buff_failed++;
870 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
871 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
873 if (unlikely(++i == rx_ring->count))
875 buffer_info = &rx_ring->buffer_info[i];
878 if (likely(rx_ring->next_to_use != i)) {
879 rx_ring->next_to_use = i;
880 if (unlikely(i-- == 0))
881 i = (rx_ring->count - 1);
883 /* Force memory writes to complete before letting h/w
884 * know there are new descriptors to fetch. (Only
885 * applicable for weak-ordered memory model archs,
889 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
890 e1000e_update_rdt_wa(rx_ring, i);
892 writel(i, rx_ring->tail);
896 static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss,
899 if (netdev->features & NETIF_F_RXHASH)
900 skb_set_hash(skb, le32_to_cpu(rss), PKT_HASH_TYPE_L3);
904 * e1000_clean_rx_irq - Send received data up the network stack
905 * @rx_ring: Rx descriptor ring
906 * @work_done: output parameter for indicating completed work
907 * @work_to_do: how many packets we can clean
909 * the return value indicates whether actual cleaning was done, there
910 * is no guarantee that everything was cleaned
912 static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
915 struct e1000_adapter *adapter = rx_ring->adapter;
916 struct net_device *netdev = adapter->netdev;
917 struct pci_dev *pdev = adapter->pdev;
918 struct e1000_hw *hw = &adapter->hw;
919 union e1000_rx_desc_extended *rx_desc, *next_rxd;
920 struct e1000_buffer *buffer_info, *next_buffer;
923 int cleaned_count = 0;
924 bool cleaned = false;
925 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
927 i = rx_ring->next_to_clean;
928 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
929 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
930 buffer_info = &rx_ring->buffer_info[i];
932 while (staterr & E1000_RXD_STAT_DD) {
935 if (*work_done >= work_to_do)
938 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
940 skb = buffer_info->skb;
941 buffer_info->skb = NULL;
943 prefetch(skb->data - NET_IP_ALIGN);
946 if (i == rx_ring->count)
948 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
951 next_buffer = &rx_ring->buffer_info[i];
955 dma_unmap_single(&pdev->dev, buffer_info->dma,
956 adapter->rx_buffer_len, DMA_FROM_DEVICE);
957 buffer_info->dma = 0;
959 length = le16_to_cpu(rx_desc->wb.upper.length);
961 /* !EOP means multiple descriptors were used to store a single
962 * packet, if that's the case we need to toss it. In fact, we
963 * need to toss every packet with the EOP bit clear and the
964 * next frame that _does_ have the EOP bit set, as it is by
965 * definition only a frame fragment
967 if (unlikely(!(staterr & E1000_RXD_STAT_EOP)))
968 adapter->flags2 |= FLAG2_IS_DISCARDING;
970 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
971 /* All receives must fit into a single buffer */
972 e_dbg("Receive packet consumed multiple buffers\n");
974 buffer_info->skb = skb;
975 if (staterr & E1000_RXD_STAT_EOP)
976 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
980 if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
981 !(netdev->features & NETIF_F_RXALL))) {
983 buffer_info->skb = skb;
987 /* adjust length to remove Ethernet CRC */
988 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
989 /* If configured to store CRC, don't subtract FCS,
990 * but keep the FCS bytes out of the total_rx_bytes
993 if (netdev->features & NETIF_F_RXFCS)
999 total_rx_bytes += length;
1002 /* code added for copybreak, this should improve
1003 * performance for small packets with large amounts
1004 * of reassembly being done in the stack
1006 if (length < copybreak) {
1007 struct sk_buff *new_skb =
1008 napi_alloc_skb(&adapter->napi, length);
1010 skb_copy_to_linear_data_offset(new_skb,
1016 /* save the skb in buffer_info as good */
1017 buffer_info->skb = skb;
1020 /* else just continue with the old one */
1022 /* end copybreak code */
1023 skb_put(skb, length);
1025 /* Receive Checksum Offload */
1026 e1000_rx_checksum(adapter, staterr, skb);
1028 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1030 e1000_receive_skb(adapter, netdev, skb, staterr,
1031 rx_desc->wb.upper.vlan);
1034 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
1036 /* return some buffers to hardware, one at a time is too slow */
1037 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
1038 adapter->alloc_rx_buf(rx_ring, cleaned_count,
1043 /* use prefetched values */
1045 buffer_info = next_buffer;
1047 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1049 rx_ring->next_to_clean = i;
1051 cleaned_count = e1000_desc_unused(rx_ring);
1053 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
1055 adapter->total_rx_bytes += total_rx_bytes;
1056 adapter->total_rx_packets += total_rx_packets;
1060 static void e1000_put_txbuf(struct e1000_ring *tx_ring,
1061 struct e1000_buffer *buffer_info,
1064 struct e1000_adapter *adapter = tx_ring->adapter;
1066 if (buffer_info->dma) {
1067 if (buffer_info->mapped_as_page)
1068 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1069 buffer_info->length, DMA_TO_DEVICE);
1071 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1072 buffer_info->length, DMA_TO_DEVICE);
1073 buffer_info->dma = 0;
1075 if (buffer_info->skb) {
1077 dev_kfree_skb_any(buffer_info->skb);
1079 dev_consume_skb_any(buffer_info->skb);
1080 buffer_info->skb = NULL;
1082 buffer_info->time_stamp = 0;
1085 static void e1000_print_hw_hang(struct work_struct *work)
1087 struct e1000_adapter *adapter = container_of(work,
1088 struct e1000_adapter,
1090 struct net_device *netdev = adapter->netdev;
1091 struct e1000_ring *tx_ring = adapter->tx_ring;
1092 unsigned int i = tx_ring->next_to_clean;
1093 unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
1094 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
1095 struct e1000_hw *hw = &adapter->hw;
1096 u16 phy_status, phy_1000t_status, phy_ext_status;
1099 if (test_bit(__E1000_DOWN, &adapter->state))
1102 if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) {
1103 /* May be block on write-back, flush and detect again
1104 * flush pending descriptor writebacks to memory
1106 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
1107 /* execute the writes immediately */
1109 /* Due to rare timing issues, write to TIDV again to ensure
1110 * the write is successful
1112 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
1113 /* execute the writes immediately */
1115 adapter->tx_hang_recheck = true;
1118 adapter->tx_hang_recheck = false;
1120 if (er32(TDH(0)) == er32(TDT(0))) {
1121 e_dbg("false hang detected, ignoring\n");
1125 /* Real hang detected */
1126 netif_stop_queue(netdev);
1128 e1e_rphy(hw, MII_BMSR, &phy_status);
1129 e1e_rphy(hw, MII_STAT1000, &phy_1000t_status);
1130 e1e_rphy(hw, MII_ESTATUS, &phy_ext_status);
1132 pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
1134 /* detected Hardware unit hang */
1135 e_err("Detected Hardware Unit Hang:\n"
1138 " next_to_use <%x>\n"
1139 " next_to_clean <%x>\n"
1140 "buffer_info[next_to_clean]:\n"
1141 " time_stamp <%lx>\n"
1142 " next_to_watch <%x>\n"
1144 " next_to_watch.status <%x>\n"
1147 "PHY 1000BASE-T Status <%x>\n"
1148 "PHY Extended Status <%x>\n"
1149 "PCI Status <%x>\n",
1150 readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use,
1151 tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp,
1152 eop, jiffies, eop_desc->upper.fields.status, er32(STATUS),
1153 phy_status, phy_1000t_status, phy_ext_status, pci_status);
1155 e1000e_dump(adapter);
1157 /* Suggest workaround for known h/w issue */
1158 if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE))
1159 e_err("Try turning off Tx pause (flow control) via ethtool\n");
1163 * e1000e_tx_hwtstamp_work - check for Tx time stamp
1164 * @work: pointer to work struct
1166 * This work function polls the TSYNCTXCTL valid bit to determine when a
1167 * timestamp has been taken for the current stored skb. The timestamp must
1168 * be for this skb because only one such packet is allowed in the queue.
1170 static void e1000e_tx_hwtstamp_work(struct work_struct *work)
1172 struct e1000_adapter *adapter = container_of(work, struct e1000_adapter,
1174 struct e1000_hw *hw = &adapter->hw;
1176 if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) {
1177 struct sk_buff *skb = adapter->tx_hwtstamp_skb;
1178 struct skb_shared_hwtstamps shhwtstamps;
1181 txstmp = er32(TXSTMPL);
1182 txstmp |= (u64)er32(TXSTMPH) << 32;
1184 e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp);
1186 /* Clear the global tx_hwtstamp_skb pointer and force writes
1187 * prior to notifying the stack of a Tx timestamp.
1189 adapter->tx_hwtstamp_skb = NULL;
1190 wmb(); /* force write prior to skb_tstamp_tx */
1192 skb_tstamp_tx(skb, &shhwtstamps);
1193 dev_consume_skb_any(skb);
1194 } else if (time_after(jiffies, adapter->tx_hwtstamp_start
1195 + adapter->tx_timeout_factor * HZ)) {
1196 dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
1197 adapter->tx_hwtstamp_skb = NULL;
1198 adapter->tx_hwtstamp_timeouts++;
1199 e_warn("clearing Tx timestamp hang\n");
1201 /* reschedule to check later */
1202 schedule_work(&adapter->tx_hwtstamp_work);
1207 * e1000_clean_tx_irq - Reclaim resources after transmit completes
1208 * @tx_ring: Tx descriptor ring
1210 * the return value indicates whether actual cleaning was done, there
1211 * is no guarantee that everything was cleaned
1213 static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
1215 struct e1000_adapter *adapter = tx_ring->adapter;
1216 struct net_device *netdev = adapter->netdev;
1217 struct e1000_hw *hw = &adapter->hw;
1218 struct e1000_tx_desc *tx_desc, *eop_desc;
1219 struct e1000_buffer *buffer_info;
1220 unsigned int i, eop;
1221 unsigned int count = 0;
1222 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
1223 unsigned int bytes_compl = 0, pkts_compl = 0;
1225 i = tx_ring->next_to_clean;
1226 eop = tx_ring->buffer_info[i].next_to_watch;
1227 eop_desc = E1000_TX_DESC(*tx_ring, eop);
1229 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
1230 (count < tx_ring->count)) {
1231 bool cleaned = false;
1233 dma_rmb(); /* read buffer_info after eop_desc */
1234 for (; !cleaned; count++) {
1235 tx_desc = E1000_TX_DESC(*tx_ring, i);
1236 buffer_info = &tx_ring->buffer_info[i];
1237 cleaned = (i == eop);
1240 total_tx_packets += buffer_info->segs;
1241 total_tx_bytes += buffer_info->bytecount;
1242 if (buffer_info->skb) {
1243 bytes_compl += buffer_info->skb->len;
1248 e1000_put_txbuf(tx_ring, buffer_info, false);
1249 tx_desc->upper.data = 0;
1252 if (i == tx_ring->count)
1256 if (i == tx_ring->next_to_use)
1258 eop = tx_ring->buffer_info[i].next_to_watch;
1259 eop_desc = E1000_TX_DESC(*tx_ring, eop);
1262 tx_ring->next_to_clean = i;
1264 netdev_completed_queue(netdev, pkts_compl, bytes_compl);
1266 #define TX_WAKE_THRESHOLD 32
1267 if (count && netif_carrier_ok(netdev) &&
1268 e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
1269 /* Make sure that anybody stopping the queue after this
1270 * sees the new next_to_clean.
1274 if (netif_queue_stopped(netdev) &&
1275 !(test_bit(__E1000_DOWN, &adapter->state))) {
1276 netif_wake_queue(netdev);
1277 ++adapter->restart_queue;
1281 if (adapter->detect_tx_hung) {
1282 /* Detect a transmit hang in hardware, this serializes the
1283 * check with the clearing of time_stamp and movement of i
1285 adapter->detect_tx_hung = false;
1286 if (tx_ring->buffer_info[i].time_stamp &&
1287 time_after(jiffies, tx_ring->buffer_info[i].time_stamp
1288 + (adapter->tx_timeout_factor * HZ)) &&
1289 !(er32(STATUS) & E1000_STATUS_TXOFF))
1290 schedule_work(&adapter->print_hang_task);
1292 adapter->tx_hang_recheck = false;
1294 adapter->total_tx_bytes += total_tx_bytes;
1295 adapter->total_tx_packets += total_tx_packets;
1296 return count < tx_ring->count;
1300 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
1301 * @rx_ring: Rx descriptor ring
1302 * @work_done: output parameter for indicating completed work
1303 * @work_to_do: how many packets we can clean
1305 * the return value indicates whether actual cleaning was done, there
1306 * is no guarantee that everything was cleaned
1308 static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
1311 struct e1000_adapter *adapter = rx_ring->adapter;
1312 struct e1000_hw *hw = &adapter->hw;
1313 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
1314 struct net_device *netdev = adapter->netdev;
1315 struct pci_dev *pdev = adapter->pdev;
1316 struct e1000_buffer *buffer_info, *next_buffer;
1317 struct e1000_ps_page *ps_page;
1318 struct sk_buff *skb;
1320 u32 length, staterr;
1321 int cleaned_count = 0;
1322 bool cleaned = false;
1323 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1325 i = rx_ring->next_to_clean;
1326 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
1327 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1328 buffer_info = &rx_ring->buffer_info[i];
1330 while (staterr & E1000_RXD_STAT_DD) {
1331 if (*work_done >= work_to_do)
1334 skb = buffer_info->skb;
1335 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
1337 /* in the packet split case this is header only */
1338 prefetch(skb->data - NET_IP_ALIGN);
1341 if (i == rx_ring->count)
1343 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
1346 next_buffer = &rx_ring->buffer_info[i];
1350 dma_unmap_single(&pdev->dev, buffer_info->dma,
1351 adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
1352 buffer_info->dma = 0;
1354 /* see !EOP comment in other Rx routine */
1355 if (!(staterr & E1000_RXD_STAT_EOP))
1356 adapter->flags2 |= FLAG2_IS_DISCARDING;
1358 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
1359 e_dbg("Packet Split buffers didn't pick up the full packet\n");
1360 dev_kfree_skb_irq(skb);
1361 if (staterr & E1000_RXD_STAT_EOP)
1362 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1366 if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
1367 !(netdev->features & NETIF_F_RXALL))) {
1368 dev_kfree_skb_irq(skb);
1372 length = le16_to_cpu(rx_desc->wb.middle.length0);
1375 e_dbg("Last part of the packet spanning multiple descriptors\n");
1376 dev_kfree_skb_irq(skb);
1381 skb_put(skb, length);
1384 /* this looks ugly, but it seems compiler issues make
1385 * it more efficient than reusing j
1387 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
1389 /* page alloc/put takes too long and effects small
1390 * packet throughput, so unsplit small packets and
1391 * save the alloc/put only valid in softirq (napi)
1392 * context to call kmap_*
1394 if (l1 && (l1 <= copybreak) &&
1395 ((length + l1) <= adapter->rx_ps_bsize0)) {
1398 ps_page = &buffer_info->ps_pages[0];
1400 /* there is no documentation about how to call
1401 * kmap_atomic, so we can't hold the mapping
1404 dma_sync_single_for_cpu(&pdev->dev,
1408 vaddr = kmap_atomic(ps_page->page);
1409 memcpy(skb_tail_pointer(skb), vaddr, l1);
1410 kunmap_atomic(vaddr);
1411 dma_sync_single_for_device(&pdev->dev,
1416 /* remove the CRC */
1417 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
1418 if (!(netdev->features & NETIF_F_RXFCS))
1427 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1428 length = le16_to_cpu(rx_desc->wb.upper.length[j]);
1432 ps_page = &buffer_info->ps_pages[j];
1433 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1436 skb_fill_page_desc(skb, j, ps_page->page, 0, length);
1437 ps_page->page = NULL;
1439 skb->data_len += length;
1440 skb->truesize += PAGE_SIZE;
1443 /* strip the ethernet crc, problem is we're using pages now so
1444 * this whole operation can get a little cpu intensive
1446 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
1447 if (!(netdev->features & NETIF_F_RXFCS))
1448 pskb_trim(skb, skb->len - 4);
1452 total_rx_bytes += skb->len;
1455 e1000_rx_checksum(adapter, staterr, skb);
1457 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1459 if (rx_desc->wb.upper.header_status &
1460 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
1461 adapter->rx_hdr_split++;
1463 e1000_receive_skb(adapter, netdev, skb, staterr,
1464 rx_desc->wb.middle.vlan);
1467 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
1468 buffer_info->skb = NULL;
1470 /* return some buffers to hardware, one at a time is too slow */
1471 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
1472 adapter->alloc_rx_buf(rx_ring, cleaned_count,
1477 /* use prefetched values */
1479 buffer_info = next_buffer;
1481 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1483 rx_ring->next_to_clean = i;
1485 cleaned_count = e1000_desc_unused(rx_ring);
1487 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
1489 adapter->total_rx_bytes += total_rx_bytes;
1490 adapter->total_rx_packets += total_rx_packets;
1494 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
1499 skb->data_len += length;
1500 skb->truesize += PAGE_SIZE;
1504 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
1505 * @rx_ring: Rx descriptor ring
1506 * @work_done: output parameter for indicating completed work
1507 * @work_to_do: how many packets we can clean
1509 * the return value indicates whether actual cleaning was done, there
1510 * is no guarantee that everything was cleaned
1512 static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
1515 struct e1000_adapter *adapter = rx_ring->adapter;
1516 struct net_device *netdev = adapter->netdev;
1517 struct pci_dev *pdev = adapter->pdev;
1518 union e1000_rx_desc_extended *rx_desc, *next_rxd;
1519 struct e1000_buffer *buffer_info, *next_buffer;
1520 u32 length, staterr;
1522 int cleaned_count = 0;
1523 bool cleaned = false;
1524 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1525 struct skb_shared_info *shinfo;
1527 i = rx_ring->next_to_clean;
1528 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
1529 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1530 buffer_info = &rx_ring->buffer_info[i];
1532 while (staterr & E1000_RXD_STAT_DD) {
1533 struct sk_buff *skb;
1535 if (*work_done >= work_to_do)
1538 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
1540 skb = buffer_info->skb;
1541 buffer_info->skb = NULL;
1544 if (i == rx_ring->count)
1546 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
1549 next_buffer = &rx_ring->buffer_info[i];
1553 dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
1555 buffer_info->dma = 0;
1557 length = le16_to_cpu(rx_desc->wb.upper.length);
1559 /* errors is only valid for DD + EOP descriptors */
1560 if (unlikely((staterr & E1000_RXD_STAT_EOP) &&
1561 ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
1562 !(netdev->features & NETIF_F_RXALL)))) {
1563 /* recycle both page and skb */
1564 buffer_info->skb = skb;
1565 /* an error means any chain goes out the window too */
1566 if (rx_ring->rx_skb_top)
1567 dev_kfree_skb_irq(rx_ring->rx_skb_top);
1568 rx_ring->rx_skb_top = NULL;
1571 #define rxtop (rx_ring->rx_skb_top)
1572 if (!(staterr & E1000_RXD_STAT_EOP)) {
1573 /* this descriptor is only the beginning (or middle) */
1575 /* this is the beginning of a chain */
1577 skb_fill_page_desc(rxtop, 0, buffer_info->page,
1580 /* this is the middle of a chain */
1581 shinfo = skb_shinfo(rxtop);
1582 skb_fill_page_desc(rxtop, shinfo->nr_frags,
1583 buffer_info->page, 0,
1585 /* re-use the skb, only consumed the page */
1586 buffer_info->skb = skb;
1588 e1000_consume_page(buffer_info, rxtop, length);
1592 /* end of the chain */
1593 shinfo = skb_shinfo(rxtop);
1594 skb_fill_page_desc(rxtop, shinfo->nr_frags,
1595 buffer_info->page, 0,
1597 /* re-use the current skb, we only consumed the
1600 buffer_info->skb = skb;
1603 e1000_consume_page(buffer_info, skb, length);
1605 /* no chain, got EOP, this buf is the packet
1606 * copybreak to save the put_page/alloc_page
1608 if (length <= copybreak &&
1609 skb_tailroom(skb) >= length) {
1611 vaddr = kmap_atomic(buffer_info->page);
1612 memcpy(skb_tail_pointer(skb), vaddr,
1614 kunmap_atomic(vaddr);
1615 /* re-use the page, so don't erase
1618 skb_put(skb, length);
1620 skb_fill_page_desc(skb, 0,
1621 buffer_info->page, 0,
1623 e1000_consume_page(buffer_info, skb,
1629 /* Receive Checksum Offload */
1630 e1000_rx_checksum(adapter, staterr, skb);
1632 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1634 /* probably a little skewed due to removing CRC */
1635 total_rx_bytes += skb->len;
1638 /* eth type trans needs skb->data to point to something */
1639 if (!pskb_may_pull(skb, ETH_HLEN)) {
1640 e_err("pskb_may_pull failed.\n");
1641 dev_kfree_skb_irq(skb);
1645 e1000_receive_skb(adapter, netdev, skb, staterr,
1646 rx_desc->wb.upper.vlan);
1649 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
1651 /* return some buffers to hardware, one at a time is too slow */
1652 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
1653 adapter->alloc_rx_buf(rx_ring, cleaned_count,
1658 /* use prefetched values */
1660 buffer_info = next_buffer;
1662 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1664 rx_ring->next_to_clean = i;
1666 cleaned_count = e1000_desc_unused(rx_ring);
1668 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
1670 adapter->total_rx_bytes += total_rx_bytes;
1671 adapter->total_rx_packets += total_rx_packets;
1676 * e1000_clean_rx_ring - Free Rx Buffers per Queue
1677 * @rx_ring: Rx descriptor ring
1679 static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
1681 struct e1000_adapter *adapter = rx_ring->adapter;
1682 struct e1000_buffer *buffer_info;
1683 struct e1000_ps_page *ps_page;
1684 struct pci_dev *pdev = adapter->pdev;
1687 /* Free all the Rx ring sk_buffs */
1688 for (i = 0; i < rx_ring->count; i++) {
1689 buffer_info = &rx_ring->buffer_info[i];
1690 if (buffer_info->dma) {
1691 if (adapter->clean_rx == e1000_clean_rx_irq)
1692 dma_unmap_single(&pdev->dev, buffer_info->dma,
1693 adapter->rx_buffer_len,
1695 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
1696 dma_unmap_page(&pdev->dev, buffer_info->dma,
1697 PAGE_SIZE, DMA_FROM_DEVICE);
1698 else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
1699 dma_unmap_single(&pdev->dev, buffer_info->dma,
1700 adapter->rx_ps_bsize0,
1702 buffer_info->dma = 0;
1705 if (buffer_info->page) {
1706 put_page(buffer_info->page);
1707 buffer_info->page = NULL;
1710 if (buffer_info->skb) {
1711 dev_kfree_skb(buffer_info->skb);
1712 buffer_info->skb = NULL;
1715 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1716 ps_page = &buffer_info->ps_pages[j];
1719 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1722 put_page(ps_page->page);
1723 ps_page->page = NULL;
1727 /* there also may be some cached data from a chained receive */
1728 if (rx_ring->rx_skb_top) {
1729 dev_kfree_skb(rx_ring->rx_skb_top);
1730 rx_ring->rx_skb_top = NULL;
1733 /* Zero out the descriptor ring */
1734 memset(rx_ring->desc, 0, rx_ring->size);
1736 rx_ring->next_to_clean = 0;
1737 rx_ring->next_to_use = 0;
1738 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1741 static void e1000e_downshift_workaround(struct work_struct *work)
1743 struct e1000_adapter *adapter = container_of(work,
1744 struct e1000_adapter,
1747 if (test_bit(__E1000_DOWN, &adapter->state))
1750 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
1754 * e1000_intr_msi - Interrupt Handler
1755 * @irq: interrupt number
1756 * @data: pointer to a network interface device structure
1758 static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
1760 struct net_device *netdev = data;
1761 struct e1000_adapter *adapter = netdev_priv(netdev);
1762 struct e1000_hw *hw = &adapter->hw;
1763 u32 icr = er32(ICR);
1765 /* read ICR disables interrupts using IAM */
1766 if (icr & E1000_ICR_LSC) {
1767 hw->mac.get_link_status = true;
1768 /* ICH8 workaround-- Call gig speed drop workaround on cable
1769 * disconnect (LSC) before accessing any PHY registers
1771 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1772 (!(er32(STATUS) & E1000_STATUS_LU)))
1773 schedule_work(&adapter->downshift_task);
1775 /* 80003ES2LAN workaround-- For packet buffer work-around on
1776 * link down event; disable receives here in the ISR and reset
1777 * adapter in watchdog
1779 if (netif_carrier_ok(netdev) &&
1780 adapter->flags & FLAG_RX_NEEDS_RESTART) {
1781 /* disable receives */
1782 u32 rctl = er32(RCTL);
1784 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1785 adapter->flags |= FLAG_RESTART_NOW;
1787 /* guard against interrupt when we're going down */
1788 if (!test_bit(__E1000_DOWN, &adapter->state))
1789 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1792 /* Reset on uncorrectable ECC error */
1793 if ((icr & E1000_ICR_ECCER) && (hw->mac.type >= e1000_pch_lpt)) {
1794 u32 pbeccsts = er32(PBECCSTS);
1796 adapter->corr_errors +=
1797 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
1798 adapter->uncorr_errors +=
1799 (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
1800 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
1802 /* Do the reset outside of interrupt context */
1803 schedule_work(&adapter->reset_task);
1805 /* return immediately since reset is imminent */
1809 if (napi_schedule_prep(&adapter->napi)) {
1810 adapter->total_tx_bytes = 0;
1811 adapter->total_tx_packets = 0;
1812 adapter->total_rx_bytes = 0;
1813 adapter->total_rx_packets = 0;
1814 __napi_schedule(&adapter->napi);
1821 * e1000_intr - Interrupt Handler
1822 * @irq: interrupt number
1823 * @data: pointer to a network interface device structure
1825 static irqreturn_t e1000_intr(int __always_unused irq, void *data)
1827 struct net_device *netdev = data;
1828 struct e1000_adapter *adapter = netdev_priv(netdev);
1829 struct e1000_hw *hw = &adapter->hw;
1830 u32 rctl, icr = er32(ICR);
1832 if (!icr || test_bit(__E1000_DOWN, &adapter->state))
1833 return IRQ_NONE; /* Not our interrupt */
1835 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
1836 * not set, then the adapter didn't send an interrupt
1838 if (!(icr & E1000_ICR_INT_ASSERTED))
1841 /* Interrupt Auto-Mask...upon reading ICR,
1842 * interrupts are masked. No need for the
1846 if (icr & E1000_ICR_LSC) {
1847 hw->mac.get_link_status = true;
1848 /* ICH8 workaround-- Call gig speed drop workaround on cable
1849 * disconnect (LSC) before accessing any PHY registers
1851 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1852 (!(er32(STATUS) & E1000_STATUS_LU)))
1853 schedule_work(&adapter->downshift_task);
1855 /* 80003ES2LAN workaround--
1856 * For packet buffer work-around on link down event;
1857 * disable receives here in the ISR and
1858 * reset adapter in watchdog
1860 if (netif_carrier_ok(netdev) &&
1861 (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
1862 /* disable receives */
1864 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1865 adapter->flags |= FLAG_RESTART_NOW;
1867 /* guard against interrupt when we're going down */
1868 if (!test_bit(__E1000_DOWN, &adapter->state))
1869 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1872 /* Reset on uncorrectable ECC error */
1873 if ((icr & E1000_ICR_ECCER) && (hw->mac.type >= e1000_pch_lpt)) {
1874 u32 pbeccsts = er32(PBECCSTS);
1876 adapter->corr_errors +=
1877 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
1878 adapter->uncorr_errors +=
1879 (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
1880 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
1882 /* Do the reset outside of interrupt context */
1883 schedule_work(&adapter->reset_task);
1885 /* return immediately since reset is imminent */
1889 if (napi_schedule_prep(&adapter->napi)) {
1890 adapter->total_tx_bytes = 0;
1891 adapter->total_tx_packets = 0;
1892 adapter->total_rx_bytes = 0;
1893 adapter->total_rx_packets = 0;
1894 __napi_schedule(&adapter->napi);
1900 static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
1902 struct net_device *netdev = data;
1903 struct e1000_adapter *adapter = netdev_priv(netdev);
1904 struct e1000_hw *hw = &adapter->hw;
1905 u32 icr = er32(ICR);
1907 if (icr & adapter->eiac_mask)
1908 ew32(ICS, (icr & adapter->eiac_mask));
1910 if (icr & E1000_ICR_LSC) {
1911 hw->mac.get_link_status = true;
1912 /* guard against interrupt when we're going down */
1913 if (!test_bit(__E1000_DOWN, &adapter->state))
1914 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1917 if (!test_bit(__E1000_DOWN, &adapter->state))
1918 ew32(IMS, E1000_IMS_OTHER | IMS_OTHER_MASK);
1923 static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data)
1925 struct net_device *netdev = data;
1926 struct e1000_adapter *adapter = netdev_priv(netdev);
1927 struct e1000_hw *hw = &adapter->hw;
1928 struct e1000_ring *tx_ring = adapter->tx_ring;
1930 adapter->total_tx_bytes = 0;
1931 adapter->total_tx_packets = 0;
1933 if (!e1000_clean_tx_irq(tx_ring))
1934 /* Ring was not completely cleaned, so fire another interrupt */
1935 ew32(ICS, tx_ring->ims_val);
1937 if (!test_bit(__E1000_DOWN, &adapter->state))
1938 ew32(IMS, adapter->tx_ring->ims_val);
1943 static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data)
1945 struct net_device *netdev = data;
1946 struct e1000_adapter *adapter = netdev_priv(netdev);
1947 struct e1000_ring *rx_ring = adapter->rx_ring;
1949 /* Write the ITR value calculated at the end of the
1950 * previous interrupt.
1952 if (rx_ring->set_itr) {
1953 u32 itr = rx_ring->itr_val ?
1954 1000000000 / (rx_ring->itr_val * 256) : 0;
1956 writel(itr, rx_ring->itr_register);
1957 rx_ring->set_itr = 0;
1960 if (napi_schedule_prep(&adapter->napi)) {
1961 adapter->total_rx_bytes = 0;
1962 adapter->total_rx_packets = 0;
1963 __napi_schedule(&adapter->napi);
1969 * e1000_configure_msix - Configure MSI-X hardware
1970 * @adapter: board private structure
1972 * e1000_configure_msix sets up the hardware to properly
1973 * generate MSI-X interrupts.
1975 static void e1000_configure_msix(struct e1000_adapter *adapter)
1977 struct e1000_hw *hw = &adapter->hw;
1978 struct e1000_ring *rx_ring = adapter->rx_ring;
1979 struct e1000_ring *tx_ring = adapter->tx_ring;
1981 u32 ctrl_ext, ivar = 0;
1983 adapter->eiac_mask = 0;
1985 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
1986 if (hw->mac.type == e1000_82574) {
1987 u32 rfctl = er32(RFCTL);
1989 rfctl |= E1000_RFCTL_ACK_DIS;
1993 /* Configure Rx vector */
1994 rx_ring->ims_val = E1000_IMS_RXQ0;
1995 adapter->eiac_mask |= rx_ring->ims_val;
1996 if (rx_ring->itr_val)
1997 writel(1000000000 / (rx_ring->itr_val * 256),
1998 rx_ring->itr_register);
2000 writel(1, rx_ring->itr_register);
2001 ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
2003 /* Configure Tx vector */
2004 tx_ring->ims_val = E1000_IMS_TXQ0;
2006 if (tx_ring->itr_val)
2007 writel(1000000000 / (tx_ring->itr_val * 256),
2008 tx_ring->itr_register);
2010 writel(1, tx_ring->itr_register);
2011 adapter->eiac_mask |= tx_ring->ims_val;
2012 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
2014 /* set vector for Other Causes, e.g. link changes */
2016 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
2017 if (rx_ring->itr_val)
2018 writel(1000000000 / (rx_ring->itr_val * 256),
2019 hw->hw_addr + E1000_EITR_82574(vector));
2021 writel(1, hw->hw_addr + E1000_EITR_82574(vector));
2023 /* Cause Tx interrupts on every write back */
2028 /* enable MSI-X PBA support */
2029 ctrl_ext = er32(CTRL_EXT) & ~E1000_CTRL_EXT_IAME;
2030 ctrl_ext |= E1000_CTRL_EXT_PBA_CLR | E1000_CTRL_EXT_EIAME;
2031 ew32(CTRL_EXT, ctrl_ext);
2035 void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
2037 if (adapter->msix_entries) {
2038 pci_disable_msix(adapter->pdev);
2039 kfree(adapter->msix_entries);
2040 adapter->msix_entries = NULL;
2041 } else if (adapter->flags & FLAG_MSI_ENABLED) {
2042 pci_disable_msi(adapter->pdev);
2043 adapter->flags &= ~FLAG_MSI_ENABLED;
2048 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
2049 * @adapter: board private structure
2051 * Attempt to configure interrupts using the best available
2052 * capabilities of the hardware and kernel.
2054 void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
2059 switch (adapter->int_mode) {
2060 case E1000E_INT_MODE_MSIX:
2061 if (adapter->flags & FLAG_HAS_MSIX) {
2062 adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
2063 adapter->msix_entries = kcalloc(adapter->num_vectors,
2067 if (adapter->msix_entries) {
2068 struct e1000_adapter *a = adapter;
2070 for (i = 0; i < adapter->num_vectors; i++)
2071 adapter->msix_entries[i].entry = i;
2073 err = pci_enable_msix_range(a->pdev,
2080 /* MSI-X failed, so fall through and try MSI */
2081 e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n");
2082 e1000e_reset_interrupt_capability(adapter);
2084 adapter->int_mode = E1000E_INT_MODE_MSI;
2086 case E1000E_INT_MODE_MSI:
2087 if (!pci_enable_msi(adapter->pdev)) {
2088 adapter->flags |= FLAG_MSI_ENABLED;
2090 adapter->int_mode = E1000E_INT_MODE_LEGACY;
2091 e_err("Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n");
2094 case E1000E_INT_MODE_LEGACY:
2095 /* Don't do anything; this is the system default */
2099 /* store the number of vectors being used */
2100 adapter->num_vectors = 1;
2104 * e1000_request_msix - Initialize MSI-X interrupts
2105 * @adapter: board private structure
2107 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
2110 static int e1000_request_msix(struct e1000_adapter *adapter)
2112 struct net_device *netdev = adapter->netdev;
2113 int err = 0, vector = 0;
2115 if (strlen(netdev->name) < (IFNAMSIZ - 5))
2116 snprintf(adapter->rx_ring->name,
2117 sizeof(adapter->rx_ring->name) - 1,
2118 "%.14s-rx-0", netdev->name);
2120 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
2121 err = request_irq(adapter->msix_entries[vector].vector,
2122 e1000_intr_msix_rx, 0, adapter->rx_ring->name,
2126 adapter->rx_ring->itr_register = adapter->hw.hw_addr +
2127 E1000_EITR_82574(vector);
2128 adapter->rx_ring->itr_val = adapter->itr;
2131 if (strlen(netdev->name) < (IFNAMSIZ - 5))
2132 snprintf(adapter->tx_ring->name,
2133 sizeof(adapter->tx_ring->name) - 1,
2134 "%.14s-tx-0", netdev->name);
2136 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
2137 err = request_irq(adapter->msix_entries[vector].vector,
2138 e1000_intr_msix_tx, 0, adapter->tx_ring->name,
2142 adapter->tx_ring->itr_register = adapter->hw.hw_addr +
2143 E1000_EITR_82574(vector);
2144 adapter->tx_ring->itr_val = adapter->itr;
2147 err = request_irq(adapter->msix_entries[vector].vector,
2148 e1000_msix_other, 0, netdev->name, netdev);
2152 e1000_configure_msix(adapter);
2158 * e1000_request_irq - initialize interrupts
2159 * @adapter: board private structure
2161 * Attempts to configure interrupts using the best available
2162 * capabilities of the hardware and kernel.
2164 static int e1000_request_irq(struct e1000_adapter *adapter)
2166 struct net_device *netdev = adapter->netdev;
2169 if (adapter->msix_entries) {
2170 err = e1000_request_msix(adapter);
2173 /* fall back to MSI */
2174 e1000e_reset_interrupt_capability(adapter);
2175 adapter->int_mode = E1000E_INT_MODE_MSI;
2176 e1000e_set_interrupt_capability(adapter);
2178 if (adapter->flags & FLAG_MSI_ENABLED) {
2179 err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0,
2180 netdev->name, netdev);
2184 /* fall back to legacy interrupt */
2185 e1000e_reset_interrupt_capability(adapter);
2186 adapter->int_mode = E1000E_INT_MODE_LEGACY;
2189 err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED,
2190 netdev->name, netdev);
2192 e_err("Unable to allocate interrupt, Error: %d\n", err);
2197 static void e1000_free_irq(struct e1000_adapter *adapter)
2199 struct net_device *netdev = adapter->netdev;
2201 if (adapter->msix_entries) {
2204 free_irq(adapter->msix_entries[vector].vector, netdev);
2207 free_irq(adapter->msix_entries[vector].vector, netdev);
2210 /* Other Causes interrupt vector */
2211 free_irq(adapter->msix_entries[vector].vector, netdev);
2215 free_irq(adapter->pdev->irq, netdev);
2219 * e1000_irq_disable - Mask off interrupt generation on the NIC
2220 * @adapter: board private structure
2222 static void e1000_irq_disable(struct e1000_adapter *adapter)
2224 struct e1000_hw *hw = &adapter->hw;
2227 if (adapter->msix_entries)
2228 ew32(EIAC_82574, 0);
2231 if (adapter->msix_entries) {
2234 for (i = 0; i < adapter->num_vectors; i++)
2235 synchronize_irq(adapter->msix_entries[i].vector);
2237 synchronize_irq(adapter->pdev->irq);
2242 * e1000_irq_enable - Enable default interrupt generation settings
2243 * @adapter: board private structure
2245 static void e1000_irq_enable(struct e1000_adapter *adapter)
2247 struct e1000_hw *hw = &adapter->hw;
2249 if (adapter->msix_entries) {
2250 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
2251 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER |
2253 } else if (hw->mac.type >= e1000_pch_lpt) {
2254 ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
2256 ew32(IMS, IMS_ENABLE_MASK);
2262 * e1000e_get_hw_control - get control of the h/w from f/w
2263 * @adapter: address of board private structure
2265 * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
2266 * For ASF and Pass Through versions of f/w this means that
2267 * the driver is loaded. For AMT version (only with 82573)
2268 * of the f/w this means that the network i/f is open.
2270 void e1000e_get_hw_control(struct e1000_adapter *adapter)
2272 struct e1000_hw *hw = &adapter->hw;
2276 /* Let firmware know the driver has taken over */
2277 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2279 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
2280 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2281 ctrl_ext = er32(CTRL_EXT);
2282 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
2287 * e1000e_release_hw_control - release control of the h/w to f/w
2288 * @adapter: address of board private structure
2290 * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
2291 * For ASF and Pass Through versions of f/w this means that the
2292 * driver is no longer loaded. For AMT version (only with 82573) i
2293 * of the f/w this means that the network i/f is closed.
2296 void e1000e_release_hw_control(struct e1000_adapter *adapter)
2298 struct e1000_hw *hw = &adapter->hw;
2302 /* Let firmware taken over control of h/w */
2303 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2305 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
2306 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2307 ctrl_ext = er32(CTRL_EXT);
2308 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
2313 * e1000_alloc_ring_dma - allocate memory for a ring structure
2314 * @adapter: board private structure
2315 * @ring: ring struct for which to allocate dma
2317 static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
2318 struct e1000_ring *ring)
2320 struct pci_dev *pdev = adapter->pdev;
2322 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
2331 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
2332 * @tx_ring: Tx descriptor ring
2334 * Return 0 on success, negative on failure
2336 int e1000e_setup_tx_resources(struct e1000_ring *tx_ring)
2338 struct e1000_adapter *adapter = tx_ring->adapter;
2339 int err = -ENOMEM, size;
2341 size = sizeof(struct e1000_buffer) * tx_ring->count;
2342 tx_ring->buffer_info = vzalloc(size);
2343 if (!tx_ring->buffer_info)
2346 /* round up to nearest 4K */
2347 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
2348 tx_ring->size = ALIGN(tx_ring->size, 4096);
2350 err = e1000_alloc_ring_dma(adapter, tx_ring);
2354 tx_ring->next_to_use = 0;
2355 tx_ring->next_to_clean = 0;
2359 vfree(tx_ring->buffer_info);
2360 e_err("Unable to allocate memory for the transmit descriptor ring\n");
2365 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
2366 * @rx_ring: Rx descriptor ring
2368 * Returns 0 on success, negative on failure
2370 int e1000e_setup_rx_resources(struct e1000_ring *rx_ring)
2372 struct e1000_adapter *adapter = rx_ring->adapter;
2373 struct e1000_buffer *buffer_info;
2374 int i, size, desc_len, err = -ENOMEM;
2376 size = sizeof(struct e1000_buffer) * rx_ring->count;
2377 rx_ring->buffer_info = vzalloc(size);
2378 if (!rx_ring->buffer_info)
2381 for (i = 0; i < rx_ring->count; i++) {
2382 buffer_info = &rx_ring->buffer_info[i];
2383 buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
2384 sizeof(struct e1000_ps_page),
2386 if (!buffer_info->ps_pages)
2390 desc_len = sizeof(union e1000_rx_desc_packet_split);
2392 /* Round up to nearest 4K */
2393 rx_ring->size = rx_ring->count * desc_len;
2394 rx_ring->size = ALIGN(rx_ring->size, 4096);
2396 err = e1000_alloc_ring_dma(adapter, rx_ring);
2400 rx_ring->next_to_clean = 0;
2401 rx_ring->next_to_use = 0;
2402 rx_ring->rx_skb_top = NULL;
2407 for (i = 0; i < rx_ring->count; i++) {
2408 buffer_info = &rx_ring->buffer_info[i];
2409 kfree(buffer_info->ps_pages);
2412 vfree(rx_ring->buffer_info);
2413 e_err("Unable to allocate memory for the receive descriptor ring\n");
2418 * e1000_clean_tx_ring - Free Tx Buffers
2419 * @tx_ring: Tx descriptor ring
2421 static void e1000_clean_tx_ring(struct e1000_ring *tx_ring)
2423 struct e1000_adapter *adapter = tx_ring->adapter;
2424 struct e1000_buffer *buffer_info;
2428 for (i = 0; i < tx_ring->count; i++) {
2429 buffer_info = &tx_ring->buffer_info[i];
2430 e1000_put_txbuf(tx_ring, buffer_info, false);
2433 netdev_reset_queue(adapter->netdev);
2434 size = sizeof(struct e1000_buffer) * tx_ring->count;
2435 memset(tx_ring->buffer_info, 0, size);
2437 memset(tx_ring->desc, 0, tx_ring->size);
2439 tx_ring->next_to_use = 0;
2440 tx_ring->next_to_clean = 0;
2444 * e1000e_free_tx_resources - Free Tx Resources per Queue
2445 * @tx_ring: Tx descriptor ring
2447 * Free all transmit software resources
2449 void e1000e_free_tx_resources(struct e1000_ring *tx_ring)
2451 struct e1000_adapter *adapter = tx_ring->adapter;
2452 struct pci_dev *pdev = adapter->pdev;
2454 e1000_clean_tx_ring(tx_ring);
2456 vfree(tx_ring->buffer_info);
2457 tx_ring->buffer_info = NULL;
2459 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2461 tx_ring->desc = NULL;
2465 * e1000e_free_rx_resources - Free Rx Resources
2466 * @rx_ring: Rx descriptor ring
2468 * Free all receive software resources
2470 void e1000e_free_rx_resources(struct e1000_ring *rx_ring)
2472 struct e1000_adapter *adapter = rx_ring->adapter;
2473 struct pci_dev *pdev = adapter->pdev;
2476 e1000_clean_rx_ring(rx_ring);
2478 for (i = 0; i < rx_ring->count; i++)
2479 kfree(rx_ring->buffer_info[i].ps_pages);
2481 vfree(rx_ring->buffer_info);
2482 rx_ring->buffer_info = NULL;
2484 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2486 rx_ring->desc = NULL;
2490 * e1000_update_itr - update the dynamic ITR value based on statistics
2491 * @itr_setting: current adapter->itr
2492 * @packets: the number of packets during this measurement interval
2493 * @bytes: the number of bytes during this measurement interval
2495 * Stores a new ITR value based on packets and byte
2496 * counts during the last interrupt. The advantage of per interrupt
2497 * computation is faster updates and more accurate ITR for the current
2498 * traffic pattern. Constants in this function were computed
2499 * based on theoretical maximum wire speed and thresholds were set based
2500 * on testing data as well as attempting to minimize response time
2501 * while increasing bulk throughput. This functionality is controlled
2502 * by the InterruptThrottleRate module parameter.
2504 static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes)
2506 unsigned int retval = itr_setting;
2511 switch (itr_setting) {
2512 case lowest_latency:
2513 /* handle TSO and jumbo frames */
2514 if (bytes / packets > 8000)
2515 retval = bulk_latency;
2516 else if ((packets < 5) && (bytes > 512))
2517 retval = low_latency;
2519 case low_latency: /* 50 usec aka 20000 ints/s */
2520 if (bytes > 10000) {
2521 /* this if handles the TSO accounting */
2522 if (bytes / packets > 8000)
2523 retval = bulk_latency;
2524 else if ((packets < 10) || ((bytes / packets) > 1200))
2525 retval = bulk_latency;
2526 else if ((packets > 35))
2527 retval = lowest_latency;
2528 } else if (bytes / packets > 2000) {
2529 retval = bulk_latency;
2530 } else if (packets <= 2 && bytes < 512) {
2531 retval = lowest_latency;
2534 case bulk_latency: /* 250 usec aka 4000 ints/s */
2535 if (bytes > 25000) {
2537 retval = low_latency;
2538 } else if (bytes < 6000) {
2539 retval = low_latency;
2547 static void e1000_set_itr(struct e1000_adapter *adapter)
2550 u32 new_itr = adapter->itr;
2552 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2553 if (adapter->link_speed != SPEED_1000) {
2559 if (adapter->flags2 & FLAG2_DISABLE_AIM) {
2564 adapter->tx_itr = e1000_update_itr(adapter->tx_itr,
2565 adapter->total_tx_packets,
2566 adapter->total_tx_bytes);
2567 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2568 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2569 adapter->tx_itr = low_latency;
2571 adapter->rx_itr = e1000_update_itr(adapter->rx_itr,
2572 adapter->total_rx_packets,
2573 adapter->total_rx_bytes);
2574 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2575 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2576 adapter->rx_itr = low_latency;
2578 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2580 /* counts and packets in update_itr are dependent on these numbers */
2581 switch (current_itr) {
2582 case lowest_latency:
2586 new_itr = 20000; /* aka hwitr = ~200 */
2596 if (new_itr != adapter->itr) {
2597 /* this attempts to bias the interrupt rate towards Bulk
2598 * by adding intermediate steps when interrupt rate is
2601 new_itr = new_itr > adapter->itr ?
2602 min(adapter->itr + (new_itr >> 2), new_itr) : new_itr;
2603 adapter->itr = new_itr;
2604 adapter->rx_ring->itr_val = new_itr;
2605 if (adapter->msix_entries)
2606 adapter->rx_ring->set_itr = 1;
2608 e1000e_write_itr(adapter, new_itr);
2613 * e1000e_write_itr - write the ITR value to the appropriate registers
2614 * @adapter: address of board private structure
2615 * @itr: new ITR value to program
2617 * e1000e_write_itr determines if the adapter is in MSI-X mode
2618 * and, if so, writes the EITR registers with the ITR value.
2619 * Otherwise, it writes the ITR value into the ITR register.
2621 void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr)
2623 struct e1000_hw *hw = &adapter->hw;
2624 u32 new_itr = itr ? 1000000000 / (itr * 256) : 0;
2626 if (adapter->msix_entries) {
2629 for (vector = 0; vector < adapter->num_vectors; vector++)
2630 writel(new_itr, hw->hw_addr + E1000_EITR_82574(vector));
2637 * e1000_alloc_queues - Allocate memory for all rings
2638 * @adapter: board private structure to initialize
2640 static int e1000_alloc_queues(struct e1000_adapter *adapter)
2642 int size = sizeof(struct e1000_ring);
2644 adapter->tx_ring = kzalloc(size, GFP_KERNEL);
2645 if (!adapter->tx_ring)
2647 adapter->tx_ring->count = adapter->tx_ring_count;
2648 adapter->tx_ring->adapter = adapter;
2650 adapter->rx_ring = kzalloc(size, GFP_KERNEL);
2651 if (!adapter->rx_ring)
2653 adapter->rx_ring->count = adapter->rx_ring_count;
2654 adapter->rx_ring->adapter = adapter;
2658 e_err("Unable to allocate memory for queues\n");
2659 kfree(adapter->rx_ring);
2660 kfree(adapter->tx_ring);
2665 * e1000e_poll - NAPI Rx polling callback
2666 * @napi: struct associated with this polling callback
2667 * @budget: number of packets driver is allowed to process this poll
2669 static int e1000e_poll(struct napi_struct *napi, int budget)
2671 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
2673 struct e1000_hw *hw = &adapter->hw;
2674 struct net_device *poll_dev = adapter->netdev;
2675 int tx_cleaned = 1, work_done = 0;
2677 adapter = netdev_priv(poll_dev);
2679 if (!adapter->msix_entries ||
2680 (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
2681 tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
2683 adapter->clean_rx(adapter->rx_ring, &work_done, budget);
2685 if (!tx_cleaned || work_done == budget)
2688 /* Exit the polling mode, but don't re-enable interrupts if stack might
2689 * poll us due to busy-polling
2691 if (likely(napi_complete_done(napi, work_done))) {
2692 if (adapter->itr_setting & 3)
2693 e1000_set_itr(adapter);
2694 if (!test_bit(__E1000_DOWN, &adapter->state)) {
2695 if (adapter->msix_entries)
2696 ew32(IMS, adapter->rx_ring->ims_val);
2698 e1000_irq_enable(adapter);
2705 static int e1000_vlan_rx_add_vid(struct net_device *netdev,
2706 __always_unused __be16 proto, u16 vid)
2708 struct e1000_adapter *adapter = netdev_priv(netdev);
2709 struct e1000_hw *hw = &adapter->hw;
2712 /* don't update vlan cookie if already programmed */
2713 if ((adapter->hw.mng_cookie.status &
2714 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2715 (vid == adapter->mng_vlan_id))
2718 /* add VID to filter table */
2719 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2720 index = (vid >> 5) & 0x7F;
2721 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2722 vfta |= BIT((vid & 0x1F));
2723 hw->mac.ops.write_vfta(hw, index, vfta);
2726 set_bit(vid, adapter->active_vlans);
2731 static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
2732 __always_unused __be16 proto, u16 vid)
2734 struct e1000_adapter *adapter = netdev_priv(netdev);
2735 struct e1000_hw *hw = &adapter->hw;
2738 if ((adapter->hw.mng_cookie.status &
2739 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2740 (vid == adapter->mng_vlan_id)) {
2741 /* release control to f/w */
2742 e1000e_release_hw_control(adapter);
2746 /* remove VID from filter table */
2747 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2748 index = (vid >> 5) & 0x7F;
2749 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2750 vfta &= ~BIT((vid & 0x1F));
2751 hw->mac.ops.write_vfta(hw, index, vfta);
2754 clear_bit(vid, adapter->active_vlans);
2760 * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering
2761 * @adapter: board private structure to initialize
2763 static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
2765 struct net_device *netdev = adapter->netdev;
2766 struct e1000_hw *hw = &adapter->hw;
2769 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2770 /* disable VLAN receive filtering */
2772 rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN);
2775 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
2776 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
2777 adapter->mng_vlan_id);
2778 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
2784 * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering
2785 * @adapter: board private structure to initialize
2787 static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
2789 struct e1000_hw *hw = &adapter->hw;
2792 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2793 /* enable VLAN receive filtering */
2795 rctl |= E1000_RCTL_VFE;
2796 rctl &= ~E1000_RCTL_CFIEN;
2802 * e1000e_vlan_strip_disable - helper to disable HW VLAN stripping
2803 * @adapter: board private structure to initialize
2805 static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
2807 struct e1000_hw *hw = &adapter->hw;
2810 /* disable VLAN tag insert/strip */
2812 ctrl &= ~E1000_CTRL_VME;
2817 * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping
2818 * @adapter: board private structure to initialize
2820 static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter)
2822 struct e1000_hw *hw = &adapter->hw;
2825 /* enable VLAN tag insert/strip */
2827 ctrl |= E1000_CTRL_VME;
2831 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
2833 struct net_device *netdev = adapter->netdev;
2834 u16 vid = adapter->hw.mng_cookie.vlan_id;
2835 u16 old_vid = adapter->mng_vlan_id;
2837 if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
2838 e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
2839 adapter->mng_vlan_id = vid;
2842 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
2843 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), old_vid);
2846 static void e1000_restore_vlan(struct e1000_adapter *adapter)
2850 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
2852 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2853 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
2856 static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
2858 struct e1000_hw *hw = &adapter->hw;
2859 u32 manc, manc2h, mdef, i, j;
2861 if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
2866 /* enable receiving management packets to the host. this will probably
2867 * generate destination unreachable messages from the host OS, but
2868 * the packets will be handled on SMBUS
2870 manc |= E1000_MANC_EN_MNG2HOST;
2871 manc2h = er32(MANC2H);
2873 switch (hw->mac.type) {
2875 manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
2879 /* Check if IPMI pass-through decision filter already exists;
2882 for (i = 0, j = 0; i < 8; i++) {
2883 mdef = er32(MDEF(i));
2885 /* Ignore filters with anything other than IPMI ports */
2886 if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2889 /* Enable this decision filter in MANC2H */
2896 if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2899 /* Create new decision filter in an empty filter */
2900 for (i = 0, j = 0; i < 8; i++)
2901 if (er32(MDEF(i)) == 0) {
2902 ew32(MDEF(i), (E1000_MDEF_PORT_623 |
2903 E1000_MDEF_PORT_664));
2910 e_warn("Unable to create IPMI pass-through filter\n");
2914 ew32(MANC2H, manc2h);
2919 * e1000_configure_tx - Configure Transmit Unit after Reset
2920 * @adapter: board private structure
2922 * Configure the Tx unit of the MAC after a reset.
2924 static void e1000_configure_tx(struct e1000_adapter *adapter)
2926 struct e1000_hw *hw = &adapter->hw;
2927 struct e1000_ring *tx_ring = adapter->tx_ring;
2929 u32 tdlen, tctl, tarc;
2931 /* Setup the HW Tx Head and Tail descriptor pointers */
2932 tdba = tx_ring->dma;
2933 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
2934 ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32)));
2935 ew32(TDBAH(0), (tdba >> 32));
2936 ew32(TDLEN(0), tdlen);
2939 tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0);
2940 tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0);
2942 writel(0, tx_ring->head);
2943 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
2944 e1000e_update_tdt_wa(tx_ring, 0);
2946 writel(0, tx_ring->tail);
2948 /* Set the Tx Interrupt Delay register */
2949 ew32(TIDV, adapter->tx_int_delay);
2950 /* Tx irq moderation */
2951 ew32(TADV, adapter->tx_abs_int_delay);
2953 if (adapter->flags2 & FLAG2_DMA_BURST) {
2954 u32 txdctl = er32(TXDCTL(0));
2956 txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
2957 E1000_TXDCTL_WTHRESH);
2958 /* set up some performance related parameters to encourage the
2959 * hardware to use the bus more efficiently in bursts, depends
2960 * on the tx_int_delay to be enabled,
2961 * wthresh = 1 ==> burst write is disabled to avoid Tx stalls
2962 * hthresh = 1 ==> prefetch when one or more available
2963 * pthresh = 0x1f ==> prefetch if internal cache 31 or less
2964 * BEWARE: this seems to work but should be considered first if
2965 * there are Tx hangs or other Tx related bugs
2967 txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
2968 ew32(TXDCTL(0), txdctl);
2970 /* erratum work around: set txdctl the same for both queues */
2971 ew32(TXDCTL(1), er32(TXDCTL(0)));
2973 /* Program the Transmit Control Register */
2975 tctl &= ~E1000_TCTL_CT;
2976 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2977 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2979 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
2980 tarc = er32(TARC(0));
2981 /* set the speed mode bit, we'll clear it if we're not at
2982 * gigabit link later
2984 #define SPEED_MODE_BIT BIT(21)
2985 tarc |= SPEED_MODE_BIT;
2986 ew32(TARC(0), tarc);
2989 /* errata: program both queues to unweighted RR */
2990 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
2991 tarc = er32(TARC(0));
2993 ew32(TARC(0), tarc);
2994 tarc = er32(TARC(1));
2996 ew32(TARC(1), tarc);
2999 /* Setup Transmit Descriptor Settings for eop descriptor */
3000 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
3002 /* only set IDE if we are delaying interrupts using the timers */
3003 if (adapter->tx_int_delay)
3004 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3006 /* enable Report Status bit */
3007 adapter->txd_cmd |= E1000_TXD_CMD_RS;
3011 hw->mac.ops.config_collision_dist(hw);
3013 /* SPT and KBL Si errata workaround to avoid data corruption */
3014 if (hw->mac.type == e1000_pch_spt) {
3017 reg_val = er32(IOSFPC);
3018 reg_val |= E1000_RCTL_RDMTS_HEX;
3019 ew32(IOSFPC, reg_val);
3021 reg_val = er32(TARC(0));
3022 /* SPT and KBL Si errata workaround to avoid Tx hang.
3023 * Dropping the number of outstanding requests from
3024 * 3 to 2 in order to avoid a buffer overrun.
3026 reg_val &= ~E1000_TARC0_CB_MULTIQ_3_REQ;
3027 reg_val |= E1000_TARC0_CB_MULTIQ_2_REQ;
3028 ew32(TARC(0), reg_val);
3032 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
3033 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
3036 * e1000_setup_rctl - configure the receive control registers
3037 * @adapter: Board private structure
3039 static void e1000_setup_rctl(struct e1000_adapter *adapter)
3041 struct e1000_hw *hw = &adapter->hw;
3045 /* Workaround Si errata on PCHx - configure jumbo frame flow.
3046 * If jumbo frames not set, program related MAC/PHY registers
3049 if (hw->mac.type >= e1000_pch2lan) {
3052 if (adapter->netdev->mtu > ETH_DATA_LEN)
3053 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
3055 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
3058 e_dbg("failed to enable|disable jumbo frame workaround mode\n");
3061 /* Program MC offset vector base */
3063 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3064 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
3065 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
3066 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3068 /* Do not Store bad packets */
3069 rctl &= ~E1000_RCTL_SBP;
3071 /* Enable Long Packet receive */
3072 if (adapter->netdev->mtu <= ETH_DATA_LEN)
3073 rctl &= ~E1000_RCTL_LPE;
3075 rctl |= E1000_RCTL_LPE;
3077 /* Some systems expect that the CRC is included in SMBUS traffic. The
3078 * hardware strips the CRC before sending to both SMBUS (BMC) and to
3079 * host memory when this is enabled
3081 if (adapter->flags2 & FLAG2_CRC_STRIPPING)
3082 rctl |= E1000_RCTL_SECRC;
3084 /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */
3085 if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) {
3088 e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
3091 e1e_wphy(hw, PHY_REG(770, 26), phy_data);
3093 e1e_rphy(hw, 22, &phy_data);
3095 phy_data |= BIT(14);
3096 e1e_wphy(hw, 0x10, 0x2823);
3097 e1e_wphy(hw, 0x11, 0x0003);
3098 e1e_wphy(hw, 22, phy_data);
3101 /* Setup buffer sizes */
3102 rctl &= ~E1000_RCTL_SZ_4096;
3103 rctl |= E1000_RCTL_BSEX;
3104 switch (adapter->rx_buffer_len) {
3107 rctl |= E1000_RCTL_SZ_2048;
3108 rctl &= ~E1000_RCTL_BSEX;
3111 rctl |= E1000_RCTL_SZ_4096;
3114 rctl |= E1000_RCTL_SZ_8192;
3117 rctl |= E1000_RCTL_SZ_16384;
3121 /* Enable Extended Status in all Receive Descriptors */
3122 rfctl = er32(RFCTL);
3123 rfctl |= E1000_RFCTL_EXTEN;
3126 /* 82571 and greater support packet-split where the protocol
3127 * header is placed in skb->data and the packet data is
3128 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
3129 * In the case of a non-split, skb->data is linearly filled,
3130 * followed by the page buffers. Therefore, skb->data is
3131 * sized to hold the largest protocol header.
3133 * allocations using alloc_page take too long for regular MTU
3134 * so only enable packet split for jumbo frames
3136 * Using pages when the page size is greater than 16k wastes
3137 * a lot of memory, since we allocate 3 pages at all times
3140 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
3141 if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
3142 adapter->rx_ps_pages = pages;
3144 adapter->rx_ps_pages = 0;
3146 if (adapter->rx_ps_pages) {
3149 /* Enable Packet split descriptors */
3150 rctl |= E1000_RCTL_DTYP_PS;
3152 psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT;
3154 switch (adapter->rx_ps_pages) {
3156 psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE3_SHIFT;
3159 psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE2_SHIFT;
3162 psrctl |= PAGE_SIZE >> E1000_PSRCTL_BSIZE1_SHIFT;
3166 ew32(PSRCTL, psrctl);
3169 /* This is useful for sniffing bad packets. */
3170 if (adapter->netdev->features & NETIF_F_RXALL) {
3171 /* UPE and MPE will be handled by normal PROMISC logic
3172 * in e1000e_set_rx_mode
3174 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
3175 E1000_RCTL_BAM | /* RX All Bcast Pkts */
3176 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
3178 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
3179 E1000_RCTL_DPF | /* Allow filtered pause */
3180 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
3181 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
3182 * and that breaks VLANs.
3187 /* just started the receive unit, no need to restart */
3188 adapter->flags &= ~FLAG_RESTART_NOW;
3192 * e1000_configure_rx - Configure Receive Unit after Reset
3193 * @adapter: board private structure
3195 * Configure the Rx unit of the MAC after a reset.
3197 static void e1000_configure_rx(struct e1000_adapter *adapter)
3199 struct e1000_hw *hw = &adapter->hw;
3200 struct e1000_ring *rx_ring = adapter->rx_ring;
3202 u32 rdlen, rctl, rxcsum, ctrl_ext;
3204 if (adapter->rx_ps_pages) {
3205 /* this is a 32 byte descriptor */
3206 rdlen = rx_ring->count *
3207 sizeof(union e1000_rx_desc_packet_split);
3208 adapter->clean_rx = e1000_clean_rx_irq_ps;
3209 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
3210 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
3211 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
3212 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
3213 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
3215 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
3216 adapter->clean_rx = e1000_clean_rx_irq;
3217 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
3220 /* disable receives while setting up the descriptors */
3222 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
3223 ew32(RCTL, rctl & ~E1000_RCTL_EN);
3225 usleep_range(10000, 11000);
3227 if (adapter->flags2 & FLAG2_DMA_BURST) {
3228 /* set the writeback threshold (only takes effect if the RDTR
3229 * is set). set GRAN=1 and write back up to 0x4 worth, and
3230 * enable prefetching of 0x20 Rx descriptors
3236 ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE);
3237 ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE);
3240 /* set the Receive Delay Timer Register */
3241 ew32(RDTR, adapter->rx_int_delay);
3243 /* irq moderation */
3244 ew32(RADV, adapter->rx_abs_int_delay);
3245 if ((adapter->itr_setting != 0) && (adapter->itr != 0))
3246 e1000e_write_itr(adapter, adapter->itr);
3248 ctrl_ext = er32(CTRL_EXT);
3249 /* Auto-Mask interrupts upon ICR access */
3250 ctrl_ext |= E1000_CTRL_EXT_IAME;
3251 ew32(IAM, 0xffffffff);
3252 ew32(CTRL_EXT, ctrl_ext);
3255 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3256 * the Base and Length of the Rx Descriptor Ring
3258 rdba = rx_ring->dma;
3259 ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32)));
3260 ew32(RDBAH(0), (rdba >> 32));
3261 ew32(RDLEN(0), rdlen);
3264 rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0);
3265 rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0);
3267 writel(0, rx_ring->head);
3268 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
3269 e1000e_update_rdt_wa(rx_ring, 0);
3271 writel(0, rx_ring->tail);
3273 /* Enable Receive Checksum Offload for TCP and UDP */
3274 rxcsum = er32(RXCSUM);
3275 if (adapter->netdev->features & NETIF_F_RXCSUM)
3276 rxcsum |= E1000_RXCSUM_TUOFL;
3278 rxcsum &= ~E1000_RXCSUM_TUOFL;
3279 ew32(RXCSUM, rxcsum);
3281 /* With jumbo frames, excessive C-state transition latencies result
3282 * in dropped transactions.
3284 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3286 ((er32(PBA) & E1000_PBA_RXA_MASK) * 1024 -
3287 adapter->max_frame_size) * 8 / 1000;
3289 if (adapter->flags & FLAG_IS_ICH) {
3290 u32 rxdctl = er32(RXDCTL(0));
3292 ew32(RXDCTL(0), rxdctl | 0x3 | BIT(8));
3295 dev_info(&adapter->pdev->dev,
3296 "Some CPU C-states have been disabled in order to enable jumbo frames\n");
3297 cpu_latency_qos_update_request(&adapter->pm_qos_req, lat);
3299 cpu_latency_qos_update_request(&adapter->pm_qos_req,
3300 PM_QOS_DEFAULT_VALUE);
3303 /* Enable Receives */
3308 * e1000e_write_mc_addr_list - write multicast addresses to MTA
3309 * @netdev: network interface device structure
3311 * Writes multicast address list to the MTA hash table.
3312 * Returns: -ENOMEM on failure
3313 * 0 on no addresses written
3314 * X on writing X addresses to MTA
3316 static int e1000e_write_mc_addr_list(struct net_device *netdev)
3318 struct e1000_adapter *adapter = netdev_priv(netdev);
3319 struct e1000_hw *hw = &adapter->hw;
3320 struct netdev_hw_addr *ha;
3324 if (netdev_mc_empty(netdev)) {
3325 /* nothing to program, so clear mc list */
3326 hw->mac.ops.update_mc_addr_list(hw, NULL, 0);
3330 mta_list = kcalloc(netdev_mc_count(netdev), ETH_ALEN, GFP_ATOMIC);
3334 /* update_mc_addr_list expects a packed array of only addresses. */
3336 netdev_for_each_mc_addr(ha, netdev)
3337 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
3339 hw->mac.ops.update_mc_addr_list(hw, mta_list, i);
3342 return netdev_mc_count(netdev);
3346 * e1000e_write_uc_addr_list - write unicast addresses to RAR table
3347 * @netdev: network interface device structure
3349 * Writes unicast address list to the RAR table.
3350 * Returns: -ENOMEM on failure/insufficient address space
3351 * 0 on no addresses written
3352 * X on writing X addresses to the RAR table
3354 static int e1000e_write_uc_addr_list(struct net_device *netdev)
3356 struct e1000_adapter *adapter = netdev_priv(netdev);
3357 struct e1000_hw *hw = &adapter->hw;
3358 unsigned int rar_entries;
3361 rar_entries = hw->mac.ops.rar_get_count(hw);
3363 /* save a rar entry for our hardware address */
3366 /* save a rar entry for the LAA workaround */
3367 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA)
3370 /* return ENOMEM indicating insufficient memory for addresses */
3371 if (netdev_uc_count(netdev) > rar_entries)
3374 if (!netdev_uc_empty(netdev) && rar_entries) {
3375 struct netdev_hw_addr *ha;
3377 /* write the addresses in reverse order to avoid write
3380 netdev_for_each_uc_addr(ha, netdev) {
3385 ret_val = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
3392 /* zero out the remaining RAR entries not used above */
3393 for (; rar_entries > 0; rar_entries--) {
3394 ew32(RAH(rar_entries), 0);
3395 ew32(RAL(rar_entries), 0);
3403 * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set
3404 * @netdev: network interface device structure
3406 * The ndo_set_rx_mode entry point is called whenever the unicast or multicast
3407 * address list or the network interface flags are updated. This routine is
3408 * responsible for configuring the hardware for proper unicast, multicast,
3409 * promiscuous mode, and all-multi behavior.
3411 static void e1000e_set_rx_mode(struct net_device *netdev)
3413 struct e1000_adapter *adapter = netdev_priv(netdev);
3414 struct e1000_hw *hw = &adapter->hw;
3417 if (pm_runtime_suspended(netdev->dev.parent))
3420 /* Check for Promiscuous and All Multicast modes */
3423 /* clear the affected bits */
3424 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
3426 if (netdev->flags & IFF_PROMISC) {
3427 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
3428 /* Do not hardware filter VLANs in promisc mode */
3429 e1000e_vlan_filter_disable(adapter);
3433 if (netdev->flags & IFF_ALLMULTI) {
3434 rctl |= E1000_RCTL_MPE;
3436 /* Write addresses to the MTA, if the attempt fails
3437 * then we should just turn on promiscuous mode so
3438 * that we can at least receive multicast traffic
3440 count = e1000e_write_mc_addr_list(netdev);
3442 rctl |= E1000_RCTL_MPE;
3444 e1000e_vlan_filter_enable(adapter);
3445 /* Write addresses to available RAR registers, if there is not
3446 * sufficient space to store all the addresses then enable
3447 * unicast promiscuous mode
3449 count = e1000e_write_uc_addr_list(netdev);
3451 rctl |= E1000_RCTL_UPE;
3456 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
3457 e1000e_vlan_strip_enable(adapter);
3459 e1000e_vlan_strip_disable(adapter);
3462 static void e1000e_setup_rss_hash(struct e1000_adapter *adapter)
3464 struct e1000_hw *hw = &adapter->hw;
3469 netdev_rss_key_fill(rss_key, sizeof(rss_key));
3470 for (i = 0; i < 10; i++)
3471 ew32(RSSRK(i), rss_key[i]);
3473 /* Direct all traffic to queue 0 */
3474 for (i = 0; i < 32; i++)
3477 /* Disable raw packet checksumming so that RSS hash is placed in
3478 * descriptor on writeback.
3480 rxcsum = er32(RXCSUM);
3481 rxcsum |= E1000_RXCSUM_PCSD;
3483 ew32(RXCSUM, rxcsum);
3485 mrqc = (E1000_MRQC_RSS_FIELD_IPV4 |
3486 E1000_MRQC_RSS_FIELD_IPV4_TCP |
3487 E1000_MRQC_RSS_FIELD_IPV6 |
3488 E1000_MRQC_RSS_FIELD_IPV6_TCP |
3489 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
3495 * e1000e_get_base_timinca - get default SYSTIM time increment attributes
3496 * @adapter: board private structure
3497 * @timinca: pointer to returned time increment attributes
3499 * Get attributes for incrementing the System Time Register SYSTIML/H at
3500 * the default base frequency, and set the cyclecounter shift value.
3502 s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
3504 struct e1000_hw *hw = &adapter->hw;
3505 u32 incvalue, incperiod, shift;
3507 /* Make sure clock is enabled on I217/I218/I219 before checking
3510 if ((hw->mac.type >= e1000_pch_lpt) &&
3511 !(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) &&
3512 !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) {
3513 u32 fextnvm7 = er32(FEXTNVM7);
3515 if (!(fextnvm7 & BIT(0))) {
3516 ew32(FEXTNVM7, fextnvm7 | BIT(0));
3521 switch (hw->mac.type) {
3523 /* Stable 96MHz frequency */
3524 incperiod = INCPERIOD_96MHZ;
3525 incvalue = INCVALUE_96MHZ;
3526 shift = INCVALUE_SHIFT_96MHZ;
3527 adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHZ;
3530 if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
3531 /* Stable 96MHz frequency */
3532 incperiod = INCPERIOD_96MHZ;
3533 incvalue = INCVALUE_96MHZ;
3534 shift = INCVALUE_SHIFT_96MHZ;
3535 adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHZ;
3537 /* Stable 25MHz frequency */
3538 incperiod = INCPERIOD_25MHZ;
3539 incvalue = INCVALUE_25MHZ;
3540 shift = INCVALUE_SHIFT_25MHZ;
3541 adapter->cc.shift = shift;
3545 /* Stable 24MHz frequency */
3546 incperiod = INCPERIOD_24MHZ;
3547 incvalue = INCVALUE_24MHZ;
3548 shift = INCVALUE_SHIFT_24MHZ;
3549 adapter->cc.shift = shift;
3556 if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
3557 /* Stable 24MHz frequency */
3558 incperiod = INCPERIOD_24MHZ;
3559 incvalue = INCVALUE_24MHZ;
3560 shift = INCVALUE_SHIFT_24MHZ;
3561 adapter->cc.shift = shift;
3563 /* Stable 38400KHz frequency */
3564 incperiod = INCPERIOD_38400KHZ;
3565 incvalue = INCVALUE_38400KHZ;
3566 shift = INCVALUE_SHIFT_38400KHZ;
3567 adapter->cc.shift = shift;
3572 /* Stable 25MHz frequency */
3573 incperiod = INCPERIOD_25MHZ;
3574 incvalue = INCVALUE_25MHZ;
3575 shift = INCVALUE_SHIFT_25MHZ;
3576 adapter->cc.shift = shift;
3582 *timinca = ((incperiod << E1000_TIMINCA_INCPERIOD_SHIFT) |
3583 ((incvalue << shift) & E1000_TIMINCA_INCVALUE_MASK));
3589 * e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable
3590 * @adapter: board private structure
3591 * @config: timestamp configuration
3593 * Outgoing time stamping can be enabled and disabled. Play nice and
3594 * disable it when requested, although it shouldn't cause any overhead
3595 * when no packet needs it. At most one packet in the queue may be
3596 * marked for time stamping, otherwise it would be impossible to tell
3597 * for sure to which packet the hardware time stamp belongs.
3599 * Incoming time stamping has to be configured via the hardware filters.
3600 * Not all combinations are supported, in particular event type has to be
3601 * specified. Matching the kind of event packet is not supported, with the
3602 * exception of "all V2 events regardless of level 2 or 4".
3604 static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
3605 struct hwtstamp_config *config)
3607 struct e1000_hw *hw = &adapter->hw;
3608 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
3609 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
3616 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
3619 /* flags reserved for future extensions - must be zero */
3623 switch (config->tx_type) {
3624 case HWTSTAMP_TX_OFF:
3627 case HWTSTAMP_TX_ON:
3633 switch (config->rx_filter) {
3634 case HWTSTAMP_FILTER_NONE:
3637 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3638 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
3639 rxmtrl = E1000_RXMTRL_PTP_V1_SYNC_MESSAGE;
3642 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3643 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
3644 rxmtrl = E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE;
3647 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3648 /* Also time stamps V2 L2 Path Delay Request/Response */
3649 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2;
3650 rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE;
3653 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3654 /* Also time stamps V2 L2 Path Delay Request/Response. */
3655 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2;
3656 rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE;
3659 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3660 /* Hardware cannot filter just V2 L4 Sync messages */
3662 case HWTSTAMP_FILTER_PTP_V2_SYNC:
3663 /* Also time stamps V2 Path Delay Request/Response. */
3664 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
3665 rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE;
3669 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3670 /* Hardware cannot filter just V2 L4 Delay Request messages */
3672 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3673 /* Also time stamps V2 Path Delay Request/Response. */
3674 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
3675 rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE;
3679 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3680 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3681 /* Hardware cannot filter just V2 L4 or L2 Event messages */
3683 case HWTSTAMP_FILTER_PTP_V2_EVENT:
3684 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
3685 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
3689 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3690 /* For V1, the hardware can only filter Sync messages or
3691 * Delay Request messages but not both so fall-through to
3692 * time stamp all packets.
3695 case HWTSTAMP_FILTER_NTP_ALL:
3696 case HWTSTAMP_FILTER_ALL:
3699 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
3700 config->rx_filter = HWTSTAMP_FILTER_ALL;
3706 adapter->hwtstamp_config = *config;
3708 /* enable/disable Tx h/w time stamping */
3709 regval = er32(TSYNCTXCTL);
3710 regval &= ~E1000_TSYNCTXCTL_ENABLED;
3711 regval |= tsync_tx_ctl;
3712 ew32(TSYNCTXCTL, regval);
3713 if ((er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) !=
3714 (regval & E1000_TSYNCTXCTL_ENABLED)) {
3715 e_err("Timesync Tx Control register not set as expected\n");
3719 /* enable/disable Rx h/w time stamping */
3720 regval = er32(TSYNCRXCTL);
3721 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
3722 regval |= tsync_rx_ctl;
3723 ew32(TSYNCRXCTL, regval);
3724 if ((er32(TSYNCRXCTL) & (E1000_TSYNCRXCTL_ENABLED |
3725 E1000_TSYNCRXCTL_TYPE_MASK)) !=
3726 (regval & (E1000_TSYNCRXCTL_ENABLED |
3727 E1000_TSYNCRXCTL_TYPE_MASK))) {
3728 e_err("Timesync Rx Control register not set as expected\n");
3732 /* L2: define ethertype filter for time stamped packets */
3734 rxmtrl |= ETH_P_1588;
3736 /* define which PTP packets get time stamped */
3737 ew32(RXMTRL, rxmtrl);
3739 /* Filter by destination port */
3741 rxudp = PTP_EV_PORT;
3742 cpu_to_be16s(&rxudp);
3748 /* Clear TSYNCRXCTL_VALID & TSYNCTXCTL_VALID bit */
3756 * e1000_configure - configure the hardware for Rx and Tx
3757 * @adapter: private board structure
3759 static void e1000_configure(struct e1000_adapter *adapter)
3761 struct e1000_ring *rx_ring = adapter->rx_ring;
3763 e1000e_set_rx_mode(adapter->netdev);
3765 e1000_restore_vlan(adapter);
3766 e1000_init_manageability_pt(adapter);
3768 e1000_configure_tx(adapter);
3770 if (adapter->netdev->features & NETIF_F_RXHASH)
3771 e1000e_setup_rss_hash(adapter);
3772 e1000_setup_rctl(adapter);
3773 e1000_configure_rx(adapter);
3774 adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL);
3778 * e1000e_power_up_phy - restore link in case the phy was powered down
3779 * @adapter: address of board private structure
3781 * The phy may be powered down to save power and turn off link when the
3782 * driver is unloaded and wake on lan is not enabled (among others)
3783 * *** this routine MUST be followed by a call to e1000e_reset ***
3785 void e1000e_power_up_phy(struct e1000_adapter *adapter)
3787 if (adapter->hw.phy.ops.power_up)
3788 adapter->hw.phy.ops.power_up(&adapter->hw);
3790 adapter->hw.mac.ops.setup_link(&adapter->hw);
3794 * e1000_power_down_phy - Power down the PHY
3795 * @adapter: board private structure
3797 * Power down the PHY so no link is implied when interface is down.
3798 * The PHY cannot be powered down if management or WoL is active.
3800 static void e1000_power_down_phy(struct e1000_adapter *adapter)
3802 if (adapter->hw.phy.ops.power_down)
3803 adapter->hw.phy.ops.power_down(&adapter->hw);
3807 * e1000_flush_tx_ring - remove all descriptors from the tx_ring
3808 * @adapter: board private structure
3810 * We want to clear all pending descriptors from the TX ring.
3811 * zeroing happens when the HW reads the regs. We assign the ring itself as
3812 * the data of the next descriptor. We don't care about the data we are about
3815 static void e1000_flush_tx_ring(struct e1000_adapter *adapter)
3817 struct e1000_hw *hw = &adapter->hw;
3818 struct e1000_ring *tx_ring = adapter->tx_ring;
3819 struct e1000_tx_desc *tx_desc = NULL;
3820 u32 tdt, tctl, txd_lower = E1000_TXD_CMD_IFCS;
3824 ew32(TCTL, tctl | E1000_TCTL_EN);
3826 BUG_ON(tdt != tx_ring->next_to_use);
3827 tx_desc = E1000_TX_DESC(*tx_ring, tx_ring->next_to_use);
3828 tx_desc->buffer_addr = cpu_to_le64(tx_ring->dma);
3830 tx_desc->lower.data = cpu_to_le32(txd_lower | size);
3831 tx_desc->upper.data = 0;
3832 /* flush descriptors to memory before notifying the HW */
3834 tx_ring->next_to_use++;
3835 if (tx_ring->next_to_use == tx_ring->count)
3836 tx_ring->next_to_use = 0;
3837 ew32(TDT(0), tx_ring->next_to_use);
3838 usleep_range(200, 250);
3842 * e1000_flush_rx_ring - remove all descriptors from the rx_ring
3843 * @adapter: board private structure
3845 * Mark all descriptors in the RX ring as consumed and disable the rx ring
3847 static void e1000_flush_rx_ring(struct e1000_adapter *adapter)
3850 struct e1000_hw *hw = &adapter->hw;
3853 ew32(RCTL, rctl & ~E1000_RCTL_EN);
3855 usleep_range(100, 150);
3857 rxdctl = er32(RXDCTL(0));
3858 /* zero the lower 14 bits (prefetch and host thresholds) */
3859 rxdctl &= 0xffffc000;
3861 /* update thresholds: prefetch threshold to 31, host threshold to 1
3862 * and make sure the granularity is "descriptors" and not "cache lines"
3864 rxdctl |= (0x1F | BIT(8) | E1000_RXDCTL_THRESH_UNIT_DESC);
3866 ew32(RXDCTL(0), rxdctl);
3867 /* momentarily enable the RX ring for the changes to take effect */
3868 ew32(RCTL, rctl | E1000_RCTL_EN);
3870 usleep_range(100, 150);
3871 ew32(RCTL, rctl & ~E1000_RCTL_EN);
3875 * e1000_flush_desc_rings - remove all descriptors from the descriptor rings
3876 * @adapter: board private structure
3878 * In i219, the descriptor rings must be emptied before resetting the HW
3879 * or before changing the device state to D3 during runtime (runtime PM).
3881 * Failure to do this will cause the HW to enter a unit hang state which can
3882 * only be released by PCI reset on the device
3886 static void e1000_flush_desc_rings(struct e1000_adapter *adapter)
3889 u32 fext_nvm11, tdlen;
3890 struct e1000_hw *hw = &adapter->hw;
3892 /* First, disable MULR fix in FEXTNVM11 */
3893 fext_nvm11 = er32(FEXTNVM11);
3894 fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX;
3895 ew32(FEXTNVM11, fext_nvm11);
3896 /* do nothing if we're not in faulty state, or if the queue is empty */
3897 tdlen = er32(TDLEN(0));
3898 pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS,
3900 if (!(hang_state & FLUSH_DESC_REQUIRED) || !tdlen)
3902 e1000_flush_tx_ring(adapter);
3903 /* recheck, maybe the fault is caused by the rx ring */
3904 pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS,
3906 if (hang_state & FLUSH_DESC_REQUIRED)
3907 e1000_flush_rx_ring(adapter);
3911 * e1000e_systim_reset - reset the timesync registers after a hardware reset
3912 * @adapter: board private structure
3914 * When the MAC is reset, all hardware bits for timesync will be reset to the
3915 * default values. This function will restore the settings last in place.
3916 * Since the clock SYSTIME registers are reset, we will simply restore the
3917 * cyclecounter to the kernel real clock time.
3919 static void e1000e_systim_reset(struct e1000_adapter *adapter)
3921 struct ptp_clock_info *info = &adapter->ptp_clock_info;
3922 struct e1000_hw *hw = &adapter->hw;
3923 unsigned long flags;
3927 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
3930 if (info->adjfreq) {
3931 /* restore the previous ptp frequency delta */
3932 ret_val = info->adjfreq(info, adapter->ptp_delta);
3934 /* set the default base frequency if no adjustment possible */
3935 ret_val = e1000e_get_base_timinca(adapter, &timinca);
3937 ew32(TIMINCA, timinca);
3941 dev_warn(&adapter->pdev->dev,
3942 "Failed to restore TIMINCA clock rate delta: %d\n",
3947 /* reset the systim ns time counter */
3948 spin_lock_irqsave(&adapter->systim_lock, flags);
3949 timecounter_init(&adapter->tc, &adapter->cc,
3950 ktime_to_ns(ktime_get_real()));
3951 spin_unlock_irqrestore(&adapter->systim_lock, flags);
3953 /* restore the previous hwtstamp configuration settings */
3954 e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config);
3958 * e1000e_reset - bring the hardware into a known good state
3959 * @adapter: board private structure
3961 * This function boots the hardware and enables some settings that
3962 * require a configuration cycle of the hardware - those cannot be
3963 * set/changed during runtime. After reset the device needs to be
3964 * properly configured for Rx, Tx etc.
3966 void e1000e_reset(struct e1000_adapter *adapter)
3968 struct e1000_mac_info *mac = &adapter->hw.mac;
3969 struct e1000_fc_info *fc = &adapter->hw.fc;
3970 struct e1000_hw *hw = &adapter->hw;
3971 u32 tx_space, min_tx_space, min_rx_space;
3972 u32 pba = adapter->pba;
3975 /* reset Packet Buffer Allocation to default */
3978 if (adapter->max_frame_size > (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) {
3979 /* To maintain wire speed transmits, the Tx FIFO should be
3980 * large enough to accommodate two full transmit packets,
3981 * rounded up to the next 1KB and expressed in KB. Likewise,
3982 * the Rx FIFO should be large enough to accommodate at least
3983 * one full receive packet and is similarly rounded up and
3987 /* upper 16 bits has Tx packet buffer allocation size in KB */
3988 tx_space = pba >> 16;
3989 /* lower 16 bits has Rx packet buffer allocation size in KB */
3991 /* the Tx fifo also stores 16 bytes of information about the Tx
3992 * but don't include ethernet FCS because hardware appends it
3994 min_tx_space = (adapter->max_frame_size +
3995 sizeof(struct e1000_tx_desc) - ETH_FCS_LEN) * 2;
3996 min_tx_space = ALIGN(min_tx_space, 1024);
3997 min_tx_space >>= 10;
3998 /* software strips receive CRC, so leave room for it */
3999 min_rx_space = adapter->max_frame_size;
4000 min_rx_space = ALIGN(min_rx_space, 1024);
4001 min_rx_space >>= 10;
4003 /* If current Tx allocation is less than the min Tx FIFO size,
4004 * and the min Tx FIFO size is less than the current Rx FIFO
4005 * allocation, take space away from current Rx allocation
4007 if ((tx_space < min_tx_space) &&
4008 ((min_tx_space - tx_space) < pba)) {
4009 pba -= min_tx_space - tx_space;
4011 /* if short on Rx space, Rx wins and must trump Tx
4014 if (pba < min_rx_space)
4021 /* flow control settings
4023 * The high water mark must be low enough to fit one full frame
4024 * (or the size used for early receive) above it in the Rx FIFO.
4025 * Set it to the lower of:
4026 * - 90% of the Rx FIFO size, and
4027 * - the full Rx FIFO size minus one full frame
4029 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
4030 fc->pause_time = 0xFFFF;
4032 fc->pause_time = E1000_FC_PAUSE_TIME;
4033 fc->send_xon = true;
4034 fc->current_mode = fc->requested_mode;
4036 switch (hw->mac.type) {
4038 case e1000_ich10lan:
4039 if (adapter->netdev->mtu > ETH_DATA_LEN) {
4042 fc->high_water = 0x2800;
4043 fc->low_water = fc->high_water - 8;
4048 hwm = min(((pba << 10) * 9 / 10),
4049 ((pba << 10) - adapter->max_frame_size));
4051 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
4052 fc->low_water = fc->high_water - 8;
4055 /* Workaround PCH LOM adapter hangs with certain network
4056 * loads. If hangs persist, try disabling Tx flow control.
4058 if (adapter->netdev->mtu > ETH_DATA_LEN) {
4059 fc->high_water = 0x3500;
4060 fc->low_water = 0x1500;
4062 fc->high_water = 0x5000;
4063 fc->low_water = 0x3000;
4065 fc->refresh_time = 0x1000;
4075 fc->refresh_time = 0xFFFF;
4076 fc->pause_time = 0xFFFF;
4078 if (adapter->netdev->mtu <= ETH_DATA_LEN) {
4079 fc->high_water = 0x05C20;
4080 fc->low_water = 0x05048;
4086 fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH;
4087 fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL;
4091 /* Alignment of Tx data is on an arbitrary byte boundary with the
4092 * maximum size per Tx descriptor limited only to the transmit
4093 * allocation of the packet buffer minus 96 bytes with an upper
4094 * limit of 24KB due to receive synchronization limitations.
4096 adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
4099 /* Disable Adaptive Interrupt Moderation if 2 full packets cannot
4100 * fit in receive buffer.
4102 if (adapter->itr_setting & 0x3) {
4103 if ((adapter->max_frame_size * 2) > (pba << 10)) {
4104 if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
4105 dev_info(&adapter->pdev->dev,
4106 "Interrupt Throttle Rate off\n");
4107 adapter->flags2 |= FLAG2_DISABLE_AIM;
4108 e1000e_write_itr(adapter, 0);
4110 } else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
4111 dev_info(&adapter->pdev->dev,
4112 "Interrupt Throttle Rate on\n");
4113 adapter->flags2 &= ~FLAG2_DISABLE_AIM;
4114 adapter->itr = 20000;
4115 e1000e_write_itr(adapter, adapter->itr);
4119 if (hw->mac.type >= e1000_pch_spt)
4120 e1000_flush_desc_rings(adapter);
4121 /* Allow time for pending master requests to run */
4122 mac->ops.reset_hw(hw);
4124 /* For parts with AMT enabled, let the firmware know
4125 * that the network interface is in control
4127 if (adapter->flags & FLAG_HAS_AMT)
4128 e1000e_get_hw_control(adapter);
4132 if (mac->ops.init_hw(hw))
4133 e_err("Hardware Error\n");
4135 e1000_update_mng_vlan(adapter);
4137 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
4138 ew32(VET, ETH_P_8021Q);
4140 e1000e_reset_adaptive(hw);
4142 /* restore systim and hwtstamp settings */
4143 e1000e_systim_reset(adapter);
4145 /* Set EEE advertisement as appropriate */
4146 if (adapter->flags2 & FLAG2_HAS_EEE) {
4150 switch (hw->phy.type) {
4151 case e1000_phy_82579:
4152 adv_addr = I82579_EEE_ADVERTISEMENT;
4154 case e1000_phy_i217:
4155 adv_addr = I217_EEE_ADVERTISEMENT;
4158 dev_err(&adapter->pdev->dev,
4159 "Invalid PHY type setting EEE advertisement\n");
4163 ret_val = hw->phy.ops.acquire(hw);
4165 dev_err(&adapter->pdev->dev,
4166 "EEE advertisement - unable to acquire PHY\n");
4170 e1000_write_emi_reg_locked(hw, adv_addr,
4171 hw->dev_spec.ich8lan.eee_disable ?
4172 0 : adapter->eee_advert);
4174 hw->phy.ops.release(hw);
4177 if (!netif_running(adapter->netdev) &&
4178 !test_bit(__E1000_TESTING, &adapter->state))
4179 e1000_power_down_phy(adapter);
4181 e1000_get_phy_info(hw);
4183 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
4184 !(adapter->flags & FLAG_SMART_POWER_DOWN)) {
4186 /* speed up time to link by disabling smart power down, ignore
4187 * the return value of this function because there is nothing
4188 * different we would do if it failed
4190 e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
4191 phy_data &= ~IGP02E1000_PM_SPD;
4192 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
4194 if (hw->mac.type >= e1000_pch_spt && adapter->int_mode == 0) {
4197 /* Fextnvm7 @ 0xe4[2] = 1 */
4198 reg = er32(FEXTNVM7);
4199 reg |= E1000_FEXTNVM7_SIDE_CLK_UNGATE;
4200 ew32(FEXTNVM7, reg);
4201 /* Fextnvm9 @ 0x5bb4[13:12] = 11 */
4202 reg = er32(FEXTNVM9);
4203 reg |= E1000_FEXTNVM9_IOSFSB_CLKGATE_DIS |
4204 E1000_FEXTNVM9_IOSFSB_CLKREQ_DIS;
4205 ew32(FEXTNVM9, reg);
4211 * e1000e_trigger_lsc - trigger an LSC interrupt
4214 * Fire a link status change interrupt to start the watchdog.
4216 static void e1000e_trigger_lsc(struct e1000_adapter *adapter)
4218 struct e1000_hw *hw = &adapter->hw;
4220 if (adapter->msix_entries)
4221 ew32(ICS, E1000_ICS_LSC | E1000_ICS_OTHER);
4223 ew32(ICS, E1000_ICS_LSC);
4226 void e1000e_up(struct e1000_adapter *adapter)
4228 /* hardware has been reset, we need to reload some things */
4229 e1000_configure(adapter);
4231 clear_bit(__E1000_DOWN, &adapter->state);
4233 if (adapter->msix_entries)
4234 e1000_configure_msix(adapter);
4235 e1000_irq_enable(adapter);
4237 /* Tx queue started by watchdog timer when link is up */
4239 e1000e_trigger_lsc(adapter);
4242 static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
4244 struct e1000_hw *hw = &adapter->hw;
4246 if (!(adapter->flags2 & FLAG2_DMA_BURST))
4249 /* flush pending descriptor writebacks to memory */
4250 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
4251 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
4253 /* execute the writes immediately */
4256 /* due to rare timing issues, write to TIDV/RDTR again to ensure the
4257 * write is successful
4259 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
4260 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
4262 /* execute the writes immediately */
4266 static void e1000e_update_stats(struct e1000_adapter *adapter);
4269 * e1000e_down - quiesce the device and optionally reset the hardware
4270 * @adapter: board private structure
4271 * @reset: boolean flag to reset the hardware or not
4273 void e1000e_down(struct e1000_adapter *adapter, bool reset)
4275 struct net_device *netdev = adapter->netdev;
4276 struct e1000_hw *hw = &adapter->hw;
4279 /* signal that we're down so the interrupt handler does not
4280 * reschedule our watchdog timer
4282 set_bit(__E1000_DOWN, &adapter->state);
4284 netif_carrier_off(netdev);
4286 /* disable receives in the hardware */
4288 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
4289 ew32(RCTL, rctl & ~E1000_RCTL_EN);
4290 /* flush and sleep below */
4292 netif_stop_queue(netdev);
4294 /* disable transmits in the hardware */
4296 tctl &= ~E1000_TCTL_EN;
4299 /* flush both disables and wait for them to finish */
4301 usleep_range(10000, 11000);
4303 e1000_irq_disable(adapter);
4305 napi_synchronize(&adapter->napi);
4307 del_timer_sync(&adapter->watchdog_timer);
4308 del_timer_sync(&adapter->phy_info_timer);
4310 spin_lock(&adapter->stats64_lock);
4311 e1000e_update_stats(adapter);
4312 spin_unlock(&adapter->stats64_lock);
4314 e1000e_flush_descriptors(adapter);
4316 adapter->link_speed = 0;
4317 adapter->link_duplex = 0;
4319 /* Disable Si errata workaround on PCHx for jumbo frame flow */
4320 if ((hw->mac.type >= e1000_pch2lan) &&
4321 (adapter->netdev->mtu > ETH_DATA_LEN) &&
4322 e1000_lv_jumbo_workaround_ich8lan(hw, false))
4323 e_dbg("failed to disable jumbo frame workaround mode\n");
4325 if (!pci_channel_offline(adapter->pdev)) {
4327 e1000e_reset(adapter);
4328 else if (hw->mac.type >= e1000_pch_spt)
4329 e1000_flush_desc_rings(adapter);
4331 e1000_clean_tx_ring(adapter->tx_ring);
4332 e1000_clean_rx_ring(adapter->rx_ring);
4335 void e1000e_reinit_locked(struct e1000_adapter *adapter)
4338 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
4339 usleep_range(1000, 1100);
4340 e1000e_down(adapter, true);
4342 clear_bit(__E1000_RESETTING, &adapter->state);
4346 * e1000e_sanitize_systim - sanitize raw cycle counter reads
4347 * @hw: pointer to the HW structure
4348 * @systim: PHC time value read, sanitized and returned
4349 * @sts: structure to hold system time before and after reading SYSTIML,
4352 * Errata for 82574/82583 possible bad bits read from SYSTIMH/L:
4353 * check to see that the time is incrementing at a reasonable
4354 * rate and is a multiple of incvalue.
4356 static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim,
4357 struct ptp_system_timestamp *sts)
4359 u64 time_delta, rem, temp;
4364 incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
4365 for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
4366 /* latch SYSTIMH on read of SYSTIML */
4367 ptp_read_system_prets(sts);
4368 systim_next = (u64)er32(SYSTIML);
4369 ptp_read_system_postts(sts);
4370 systim_next |= (u64)er32(SYSTIMH) << 32;
4372 time_delta = systim_next - systim;
4374 /* VMWare users have seen incvalue of zero, don't div / 0 */
4375 rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0);
4377 systim = systim_next;
4379 if ((time_delta < E1000_82574_SYSTIM_EPSILON) && (rem == 0))
4387 * e1000e_read_systim - read SYSTIM register
4388 * @adapter: board private structure
4389 * @sts: structure which will contain system time before and after reading
4390 * SYSTIML, may be NULL
4392 u64 e1000e_read_systim(struct e1000_adapter *adapter,
4393 struct ptp_system_timestamp *sts)
4395 struct e1000_hw *hw = &adapter->hw;
4396 u32 systimel, systimel_2, systimeh;
4398 /* SYSTIMH latching upon SYSTIML read does not work well.
4399 * This means that if SYSTIML overflows after we read it but before
4400 * we read SYSTIMH, the value of SYSTIMH has been incremented and we
4401 * will experience a huge non linear increment in the systime value
4402 * to fix that we test for overflow and if true, we re-read systime.
4404 ptp_read_system_prets(sts);
4405 systimel = er32(SYSTIML);
4406 ptp_read_system_postts(sts);
4407 systimeh = er32(SYSTIMH);
4408 /* Is systimel is so large that overflow is possible? */
4409 if (systimel >= (u32)0xffffffff - E1000_TIMINCA_INCVALUE_MASK) {
4410 ptp_read_system_prets(sts);
4411 systimel_2 = er32(SYSTIML);
4412 ptp_read_system_postts(sts);
4413 if (systimel > systimel_2) {
4414 /* There was an overflow, read again SYSTIMH, and use
4417 systimeh = er32(SYSTIMH);
4418 systimel = systimel_2;
4421 systim = (u64)systimel;
4422 systim |= (u64)systimeh << 32;
4424 if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW)
4425 systim = e1000e_sanitize_systim(hw, systim, sts);
4431 * e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
4432 * @cc: cyclecounter structure
4434 static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc)
4436 struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
4439 return e1000e_read_systim(adapter, NULL);
4443 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
4444 * @adapter: board private structure to initialize
4446 * e1000_sw_init initializes the Adapter private data structure.
4447 * Fields are initialized based on PCI device information and
4448 * OS network device settings (MTU size).
4450 static int e1000_sw_init(struct e1000_adapter *adapter)
4452 struct net_device *netdev = adapter->netdev;
4454 adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
4455 adapter->rx_ps_bsize0 = 128;
4456 adapter->max_frame_size = netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
4457 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
4458 adapter->tx_ring_count = E1000_DEFAULT_TXD;
4459 adapter->rx_ring_count = E1000_DEFAULT_RXD;
4461 spin_lock_init(&adapter->stats64_lock);
4463 e1000e_set_interrupt_capability(adapter);
4465 if (e1000_alloc_queues(adapter))
4468 /* Setup hardware time stamping cyclecounter */
4469 if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
4470 adapter->cc.read = e1000e_cyclecounter_read;
4471 adapter->cc.mask = CYCLECOUNTER_MASK(64);
4472 adapter->cc.mult = 1;
4473 /* cc.shift set in e1000e_get_base_tininca() */
4475 spin_lock_init(&adapter->systim_lock);
4476 INIT_WORK(&adapter->tx_hwtstamp_work, e1000e_tx_hwtstamp_work);
4479 /* Explicitly disable IRQ since the NIC can be in any state. */
4480 e1000_irq_disable(adapter);
4482 set_bit(__E1000_DOWN, &adapter->state);
4487 * e1000_intr_msi_test - Interrupt Handler
4488 * @irq: interrupt number
4489 * @data: pointer to a network interface device structure
4491 static irqreturn_t e1000_intr_msi_test(int __always_unused irq, void *data)
4493 struct net_device *netdev = data;
4494 struct e1000_adapter *adapter = netdev_priv(netdev);
4495 struct e1000_hw *hw = &adapter->hw;
4496 u32 icr = er32(ICR);
4498 e_dbg("icr is %08X\n", icr);
4499 if (icr & E1000_ICR_RXSEQ) {
4500 adapter->flags &= ~FLAG_MSI_TEST_FAILED;
4501 /* Force memory writes to complete before acknowledging the
4502 * interrupt is handled.
4511 * e1000_test_msi_interrupt - Returns 0 for successful test
4512 * @adapter: board private struct
4514 * code flow taken from tg3.c
4516 static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
4518 struct net_device *netdev = adapter->netdev;
4519 struct e1000_hw *hw = &adapter->hw;
4522 /* poll_enable hasn't been called yet, so don't need disable */
4523 /* clear any pending events */
4526 /* free the real vector and request a test handler */
4527 e1000_free_irq(adapter);
4528 e1000e_reset_interrupt_capability(adapter);
4530 /* Assume that the test fails, if it succeeds then the test
4531 * MSI irq handler will unset this flag
4533 adapter->flags |= FLAG_MSI_TEST_FAILED;
4535 err = pci_enable_msi(adapter->pdev);
4537 goto msi_test_failed;
4539 err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0,
4540 netdev->name, netdev);
4542 pci_disable_msi(adapter->pdev);
4543 goto msi_test_failed;
4546 /* Force memory writes to complete before enabling and firing an
4551 e1000_irq_enable(adapter);
4553 /* fire an unusual interrupt on the test handler */
4554 ew32(ICS, E1000_ICS_RXSEQ);
4558 e1000_irq_disable(adapter);
4560 rmb(); /* read flags after interrupt has been fired */
4562 if (adapter->flags & FLAG_MSI_TEST_FAILED) {
4563 adapter->int_mode = E1000E_INT_MODE_LEGACY;
4564 e_info("MSI interrupt test failed, using legacy interrupt.\n");
4566 e_dbg("MSI interrupt test succeeded!\n");
4569 free_irq(adapter->pdev->irq, netdev);
4570 pci_disable_msi(adapter->pdev);
4573 e1000e_set_interrupt_capability(adapter);
4574 return e1000_request_irq(adapter);
4578 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
4579 * @adapter: board private struct
4581 * code flow taken from tg3.c, called with e1000 interrupts disabled.
4583 static int e1000_test_msi(struct e1000_adapter *adapter)
4588 if (!(adapter->flags & FLAG_MSI_ENABLED))
4591 /* disable SERR in case the MSI write causes a master abort */
4592 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
4593 if (pci_cmd & PCI_COMMAND_SERR)
4594 pci_write_config_word(adapter->pdev, PCI_COMMAND,
4595 pci_cmd & ~PCI_COMMAND_SERR);
4597 err = e1000_test_msi_interrupt(adapter);
4599 /* re-enable SERR */
4600 if (pci_cmd & PCI_COMMAND_SERR) {
4601 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
4602 pci_cmd |= PCI_COMMAND_SERR;
4603 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
4610 * e1000e_open - Called when a network interface is made active
4611 * @netdev: network interface device structure
4613 * Returns 0 on success, negative value on failure
4615 * The open entry point is called when a network interface is made
4616 * active by the system (IFF_UP). At this point all resources needed
4617 * for transmit and receive operations are allocated, the interrupt
4618 * handler is registered with the OS, the watchdog timer is started,
4619 * and the stack is notified that the interface is ready.
4621 int e1000e_open(struct net_device *netdev)
4623 struct e1000_adapter *adapter = netdev_priv(netdev);
4624 struct e1000_hw *hw = &adapter->hw;
4625 struct pci_dev *pdev = adapter->pdev;
4628 /* disallow open during test */
4629 if (test_bit(__E1000_TESTING, &adapter->state))
4632 pm_runtime_get_sync(&pdev->dev);
4634 netif_carrier_off(netdev);
4635 netif_stop_queue(netdev);
4637 /* allocate transmit descriptors */
4638 err = e1000e_setup_tx_resources(adapter->tx_ring);
4642 /* allocate receive descriptors */
4643 err = e1000e_setup_rx_resources(adapter->rx_ring);
4647 /* If AMT is enabled, let the firmware know that the network
4648 * interface is now open and reset the part to a known state.
4650 if (adapter->flags & FLAG_HAS_AMT) {
4651 e1000e_get_hw_control(adapter);
4652 e1000e_reset(adapter);
4655 e1000e_power_up_phy(adapter);
4657 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
4658 if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
4659 e1000_update_mng_vlan(adapter);
4661 /* DMA latency requirement to workaround jumbo issue */
4662 cpu_latency_qos_add_request(&adapter->pm_qos_req, PM_QOS_DEFAULT_VALUE);
4664 /* before we allocate an interrupt, we must be ready to handle it.
4665 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
4666 * as soon as we call pci_request_irq, so we have to setup our
4667 * clean_rx handler before we do so.
4669 e1000_configure(adapter);
4671 err = e1000_request_irq(adapter);
4675 /* Work around PCIe errata with MSI interrupts causing some chipsets to
4676 * ignore e1000e MSI messages, which means we need to test our MSI
4679 if (adapter->int_mode != E1000E_INT_MODE_LEGACY) {
4680 err = e1000_test_msi(adapter);
4682 e_err("Interrupt allocation failed\n");
4687 /* From here on the code is the same as e1000e_up() */
4688 clear_bit(__E1000_DOWN, &adapter->state);
4690 napi_enable(&adapter->napi);
4692 e1000_irq_enable(adapter);
4694 adapter->tx_hang_recheck = false;
4696 hw->mac.get_link_status = true;
4697 pm_runtime_put(&pdev->dev);
4699 e1000e_trigger_lsc(adapter);
4704 cpu_latency_qos_remove_request(&adapter->pm_qos_req);
4705 e1000e_release_hw_control(adapter);
4706 e1000_power_down_phy(adapter);
4707 e1000e_free_rx_resources(adapter->rx_ring);
4709 e1000e_free_tx_resources(adapter->tx_ring);
4711 e1000e_reset(adapter);
4712 pm_runtime_put_sync(&pdev->dev);
4718 * e1000e_close - Disables a network interface
4719 * @netdev: network interface device structure
4721 * Returns 0, this is not allowed to fail
4723 * The close entry point is called when an interface is de-activated
4724 * by the OS. The hardware is still under the drivers control, but
4725 * needs to be disabled. A global MAC reset is issued to stop the
4726 * hardware, and all transmit and receive resources are freed.
4728 int e1000e_close(struct net_device *netdev)
4730 struct e1000_adapter *adapter = netdev_priv(netdev);
4731 struct pci_dev *pdev = adapter->pdev;
4732 int count = E1000_CHECK_RESET_COUNT;
4734 while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
4735 usleep_range(10000, 11000);
4737 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
4739 pm_runtime_get_sync(&pdev->dev);
4741 if (netif_device_present(netdev)) {
4742 e1000e_down(adapter, true);
4743 e1000_free_irq(adapter);
4745 /* Link status message must follow this format */
4746 netdev_info(netdev, "NIC Link is Down\n");
4749 napi_disable(&adapter->napi);
4751 e1000e_free_tx_resources(adapter->tx_ring);
4752 e1000e_free_rx_resources(adapter->rx_ring);
4754 /* kill manageability vlan ID if supported, but not if a vlan with
4755 * the same ID is registered on the host OS (let 8021q kill it)
4757 if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
4758 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
4759 adapter->mng_vlan_id);
4761 /* If AMT is enabled, let the firmware know that the network
4762 * interface is now closed
4764 if ((adapter->flags & FLAG_HAS_AMT) &&
4765 !test_bit(__E1000_TESTING, &adapter->state))
4766 e1000e_release_hw_control(adapter);
4768 cpu_latency_qos_remove_request(&adapter->pm_qos_req);
4770 pm_runtime_put_sync(&pdev->dev);
4776 * e1000_set_mac - Change the Ethernet Address of the NIC
4777 * @netdev: network interface device structure
4778 * @p: pointer to an address structure
4780 * Returns 0 on success, negative on failure
4782 static int e1000_set_mac(struct net_device *netdev, void *p)
4784 struct e1000_adapter *adapter = netdev_priv(netdev);
4785 struct e1000_hw *hw = &adapter->hw;
4786 struct sockaddr *addr = p;
4788 if (!is_valid_ether_addr(addr->sa_data))
4789 return -EADDRNOTAVAIL;
4791 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
4792 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
4794 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
4796 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
4797 /* activate the work around */
4798 e1000e_set_laa_state_82571(&adapter->hw, 1);
4800 /* Hold a copy of the LAA in RAR[14] This is done so that
4801 * between the time RAR[0] gets clobbered and the time it
4802 * gets fixed (in e1000_watchdog), the actual LAA is in one
4803 * of the RARs and no incoming packets directed to this port
4804 * are dropped. Eventually the LAA will be in RAR[0] and
4807 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr,
4808 adapter->hw.mac.rar_entry_count - 1);
4815 * e1000e_update_phy_task - work thread to update phy
4816 * @work: pointer to our work struct
4818 * this worker thread exists because we must acquire a
4819 * semaphore to read the phy, which we could msleep while
4820 * waiting for it, and we can't msleep in a timer.
4822 static void e1000e_update_phy_task(struct work_struct *work)
4824 struct e1000_adapter *adapter = container_of(work,
4825 struct e1000_adapter,
4827 struct e1000_hw *hw = &adapter->hw;
4829 if (test_bit(__E1000_DOWN, &adapter->state))
4832 e1000_get_phy_info(hw);
4834 /* Enable EEE on 82579 after link up */
4835 if (hw->phy.type >= e1000_phy_82579)
4836 e1000_set_eee_pchlan(hw);
4840 * e1000_update_phy_info - timre call-back to update PHY info
4841 * @t: pointer to timer_list containing private info adapter
4843 * Need to wait a few seconds after link up to get diagnostic information from
4846 static void e1000_update_phy_info(struct timer_list *t)
4848 struct e1000_adapter *adapter = from_timer(adapter, t, phy_info_timer);
4850 if (test_bit(__E1000_DOWN, &adapter->state))
4853 schedule_work(&adapter->update_phy_task);
4857 * e1000e_update_phy_stats - Update the PHY statistics counters
4858 * @adapter: board private structure
4860 * Read/clear the upper 16-bit PHY registers and read/accumulate lower
4862 static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
4864 struct e1000_hw *hw = &adapter->hw;
4868 ret_val = hw->phy.ops.acquire(hw);
4872 /* A page set is expensive so check if already on desired page.
4873 * If not, set to the page with the PHY status registers.
4876 ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4880 if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) {
4881 ret_val = hw->phy.ops.set_page(hw,
4882 HV_STATS_PAGE << IGP_PAGE_SHIFT);
4887 /* Single Collision Count */
4888 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
4889 ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
4891 adapter->stats.scc += phy_data;
4893 /* Excessive Collision Count */
4894 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
4895 ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
4897 adapter->stats.ecol += phy_data;
4899 /* Multiple Collision Count */
4900 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
4901 ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
4903 adapter->stats.mcc += phy_data;
4905 /* Late Collision Count */
4906 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
4907 ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
4909 adapter->stats.latecol += phy_data;
4911 /* Collision Count - also used for adaptive IFS */
4912 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
4913 ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
4915 hw->mac.collision_delta = phy_data;
4918 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
4919 ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
4921 adapter->stats.dc += phy_data;
4923 /* Transmit with no CRS */
4924 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
4925 ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
4927 adapter->stats.tncrs += phy_data;
4930 hw->phy.ops.release(hw);
4934 * e1000e_update_stats - Update the board statistics counters
4935 * @adapter: board private structure
4937 static void e1000e_update_stats(struct e1000_adapter *adapter)
4939 struct net_device *netdev = adapter->netdev;
4940 struct e1000_hw *hw = &adapter->hw;
4941 struct pci_dev *pdev = adapter->pdev;
4943 /* Prevent stats update while adapter is being reset, or if the pci
4944 * connection is down.
4946 if (adapter->link_speed == 0)
4948 if (pci_channel_offline(pdev))
4951 adapter->stats.crcerrs += er32(CRCERRS);
4952 adapter->stats.gprc += er32(GPRC);
4953 adapter->stats.gorc += er32(GORCL);
4954 er32(GORCH); /* Clear gorc */
4955 adapter->stats.bprc += er32(BPRC);
4956 adapter->stats.mprc += er32(MPRC);
4957 adapter->stats.roc += er32(ROC);
4959 adapter->stats.mpc += er32(MPC);
4961 /* Half-duplex statistics */
4962 if (adapter->link_duplex == HALF_DUPLEX) {
4963 if (adapter->flags2 & FLAG2_HAS_PHY_STATS) {
4964 e1000e_update_phy_stats(adapter);
4966 adapter->stats.scc += er32(SCC);
4967 adapter->stats.ecol += er32(ECOL);
4968 adapter->stats.mcc += er32(MCC);
4969 adapter->stats.latecol += er32(LATECOL);
4970 adapter->stats.dc += er32(DC);
4972 hw->mac.collision_delta = er32(COLC);
4974 if ((hw->mac.type != e1000_82574) &&
4975 (hw->mac.type != e1000_82583))
4976 adapter->stats.tncrs += er32(TNCRS);
4978 adapter->stats.colc += hw->mac.collision_delta;
4981 adapter->stats.xonrxc += er32(XONRXC);
4982 adapter->stats.xontxc += er32(XONTXC);
4983 adapter->stats.xoffrxc += er32(XOFFRXC);
4984 adapter->stats.xofftxc += er32(XOFFTXC);
4985 adapter->stats.gptc += er32(GPTC);
4986 adapter->stats.gotc += er32(GOTCL);
4987 er32(GOTCH); /* Clear gotc */
4988 adapter->stats.rnbc += er32(RNBC);
4989 adapter->stats.ruc += er32(RUC);
4991 adapter->stats.mptc += er32(MPTC);
4992 adapter->stats.bptc += er32(BPTC);
4994 /* used for adaptive IFS */
4996 hw->mac.tx_packet_delta = er32(TPT);
4997 adapter->stats.tpt += hw->mac.tx_packet_delta;
4999 adapter->stats.algnerrc += er32(ALGNERRC);
5000 adapter->stats.rxerrc += er32(RXERRC);
5001 adapter->stats.cexterr += er32(CEXTERR);
5002 adapter->stats.tsctc += er32(TSCTC);
5003 adapter->stats.tsctfc += er32(TSCTFC);
5005 /* Fill out the OS statistics structure */
5006 netdev->stats.multicast = adapter->stats.mprc;
5007 netdev->stats.collisions = adapter->stats.colc;
5011 /* RLEC on some newer hardware can be incorrect so build
5012 * our own version based on RUC and ROC
5014 netdev->stats.rx_errors = adapter->stats.rxerrc +
5015 adapter->stats.crcerrs + adapter->stats.algnerrc +
5016 adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr;
5017 netdev->stats.rx_length_errors = adapter->stats.ruc +
5019 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
5020 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
5021 netdev->stats.rx_missed_errors = adapter->stats.mpc;
5024 netdev->stats.tx_errors = adapter->stats.ecol + adapter->stats.latecol;
5025 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
5026 netdev->stats.tx_window_errors = adapter->stats.latecol;
5027 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
5029 /* Tx Dropped needs to be maintained elsewhere */
5031 /* Management Stats */
5032 adapter->stats.mgptc += er32(MGTPTC);
5033 adapter->stats.mgprc += er32(MGTPRC);
5034 adapter->stats.mgpdc += er32(MGTPDC);
5036 /* Correctable ECC Errors */
5037 if (hw->mac.type >= e1000_pch_lpt) {
5038 u32 pbeccsts = er32(PBECCSTS);
5040 adapter->corr_errors +=
5041 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
5042 adapter->uncorr_errors +=
5043 (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
5044 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
5049 * e1000_phy_read_status - Update the PHY register status snapshot
5050 * @adapter: board private structure
5052 static void e1000_phy_read_status(struct e1000_adapter *adapter)
5054 struct e1000_hw *hw = &adapter->hw;
5055 struct e1000_phy_regs *phy = &adapter->phy_regs;
5057 if (!pm_runtime_suspended((&adapter->pdev->dev)->parent) &&
5058 (er32(STATUS) & E1000_STATUS_LU) &&
5059 (adapter->hw.phy.media_type == e1000_media_type_copper)) {
5062 ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr);
5063 ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr);
5064 ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise);
5065 ret_val |= e1e_rphy(hw, MII_LPA, &phy->lpa);
5066 ret_val |= e1e_rphy(hw, MII_EXPANSION, &phy->expansion);
5067 ret_val |= e1e_rphy(hw, MII_CTRL1000, &phy->ctrl1000);
5068 ret_val |= e1e_rphy(hw, MII_STAT1000, &phy->stat1000);
5069 ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus);
5071 e_warn("Error reading PHY register\n");
5073 /* Do not read PHY registers if link is not up
5074 * Set values to typical power-on defaults
5076 phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
5077 phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
5078 BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
5080 phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
5081 ADVERTISE_ALL | ADVERTISE_CSMA);
5083 phy->expansion = EXPANSION_ENABLENPAGE;
5084 phy->ctrl1000 = ADVERTISE_1000FULL;
5086 phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
5090 static void e1000_print_link_info(struct e1000_adapter *adapter)
5092 struct e1000_hw *hw = &adapter->hw;
5093 u32 ctrl = er32(CTRL);
5095 /* Link status message must follow this format for user tools */
5096 netdev_info(adapter->netdev,
5097 "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
5098 adapter->link_speed,
5099 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
5100 (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" :
5101 (ctrl & E1000_CTRL_RFCE) ? "Rx" :
5102 (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None");
5105 static bool e1000e_has_link(struct e1000_adapter *adapter)
5107 struct e1000_hw *hw = &adapter->hw;
5108 bool link_active = false;
5111 /* get_link_status is set on LSC (link status) interrupt or
5112 * Rx sequence error interrupt. get_link_status will stay
5113 * true until the check_for_link establishes link
5114 * for copper adapters ONLY
5116 switch (hw->phy.media_type) {
5117 case e1000_media_type_copper:
5118 if (hw->mac.get_link_status) {
5119 ret_val = hw->mac.ops.check_for_link(hw);
5120 link_active = !hw->mac.get_link_status;
5125 case e1000_media_type_fiber:
5126 ret_val = hw->mac.ops.check_for_link(hw);
5127 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
5129 case e1000_media_type_internal_serdes:
5130 ret_val = hw->mac.ops.check_for_link(hw);
5131 link_active = hw->mac.serdes_has_link;
5134 case e1000_media_type_unknown:
5138 if ((ret_val == -E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
5139 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
5140 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
5141 e_info("Gigabit has been disabled, downgrading speed\n");
5147 static void e1000e_enable_receives(struct e1000_adapter *adapter)
5149 /* make sure the receive unit is started */
5150 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
5151 (adapter->flags & FLAG_RESTART_NOW)) {
5152 struct e1000_hw *hw = &adapter->hw;
5153 u32 rctl = er32(RCTL);
5155 ew32(RCTL, rctl | E1000_RCTL_EN);
5156 adapter->flags &= ~FLAG_RESTART_NOW;
5160 static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
5162 struct e1000_hw *hw = &adapter->hw;
5164 /* With 82574 controllers, PHY needs to be checked periodically
5165 * for hung state and reset, if two calls return true
5167 if (e1000_check_phy_82574(hw))
5168 adapter->phy_hang_count++;
5170 adapter->phy_hang_count = 0;
5172 if (adapter->phy_hang_count > 1) {
5173 adapter->phy_hang_count = 0;
5174 e_dbg("PHY appears hung - resetting\n");
5175 schedule_work(&adapter->reset_task);
5180 * e1000_watchdog - Timer Call-back
5181 * @t: pointer to timer_list containing private info adapter
5183 static void e1000_watchdog(struct timer_list *t)
5185 struct e1000_adapter *adapter = from_timer(adapter, t, watchdog_timer);
5187 /* Do the rest outside of interrupt context */
5188 schedule_work(&adapter->watchdog_task);
5190 /* TODO: make this use queue_delayed_work() */
5193 static void e1000_watchdog_task(struct work_struct *work)
5195 struct e1000_adapter *adapter = container_of(work,
5196 struct e1000_adapter,
5198 struct net_device *netdev = adapter->netdev;
5199 struct e1000_mac_info *mac = &adapter->hw.mac;
5200 struct e1000_phy_info *phy = &adapter->hw.phy;
5201 struct e1000_ring *tx_ring = adapter->tx_ring;
5202 u32 dmoff_exit_timeout = 100, tries = 0;
5203 struct e1000_hw *hw = &adapter->hw;
5204 u32 link, tctl, pcim_state;
5206 if (test_bit(__E1000_DOWN, &adapter->state))
5209 link = e1000e_has_link(adapter);
5210 if ((netif_carrier_ok(netdev)) && link) {
5211 /* Cancel scheduled suspend requests. */
5212 pm_runtime_resume(netdev->dev.parent);
5214 e1000e_enable_receives(adapter);
5218 if ((e1000e_enable_tx_pkt_filtering(hw)) &&
5219 (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
5220 e1000_update_mng_vlan(adapter);
5223 if (!netif_carrier_ok(netdev)) {
5226 /* Cancel scheduled suspend requests. */
5227 pm_runtime_resume(netdev->dev.parent);
5229 /* Checking if MAC is in DMoff state*/
5230 if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
5231 pcim_state = er32(STATUS);
5232 while (pcim_state & E1000_STATUS_PCIM_STATE) {
5233 if (tries++ == dmoff_exit_timeout) {
5234 e_dbg("Error in exiting dmoff\n");
5237 usleep_range(10000, 20000);
5238 pcim_state = er32(STATUS);
5240 /* Checking if MAC exited DMoff state */
5241 if (!(pcim_state & E1000_STATUS_PCIM_STATE))
5242 e1000_phy_hw_reset(&adapter->hw);
5246 /* update snapshot of PHY registers on LSC */
5247 e1000_phy_read_status(adapter);
5248 mac->ops.get_link_up_info(&adapter->hw,
5249 &adapter->link_speed,
5250 &adapter->link_duplex);
5251 e1000_print_link_info(adapter);
5253 /* check if SmartSpeed worked */
5254 e1000e_check_downshift(hw);
5255 if (phy->speed_downgraded)
5257 "Link Speed was downgraded by SmartSpeed\n");
5259 /* On supported PHYs, check for duplex mismatch only
5260 * if link has autonegotiated at 10/100 half
5262 if ((hw->phy.type == e1000_phy_igp_3 ||
5263 hw->phy.type == e1000_phy_bm) &&
5265 (adapter->link_speed == SPEED_10 ||
5266 adapter->link_speed == SPEED_100) &&
5267 (adapter->link_duplex == HALF_DUPLEX)) {
5270 e1e_rphy(hw, MII_EXPANSION, &autoneg_exp);
5272 if (!(autoneg_exp & EXPANSION_NWAY))
5273 e_info("Autonegotiated half duplex but link partner cannot autoneg. Try forcing full duplex if link gets many collisions.\n");
5276 /* adjust timeout factor according to speed/duplex */
5277 adapter->tx_timeout_factor = 1;
5278 switch (adapter->link_speed) {
5281 adapter->tx_timeout_factor = 16;
5285 adapter->tx_timeout_factor = 10;
5289 /* workaround: re-program speed mode bit after
5292 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
5296 tarc0 = er32(TARC(0));
5297 tarc0 &= ~SPEED_MODE_BIT;
5298 ew32(TARC(0), tarc0);
5301 /* disable TSO for pcie and 10/100 speeds, to avoid
5302 * some hardware issues
5304 if (!(adapter->flags & FLAG_TSO_FORCE)) {
5305 switch (adapter->link_speed) {
5308 e_info("10/100 speed: disabling TSO\n");
5309 netdev->features &= ~NETIF_F_TSO;
5310 netdev->features &= ~NETIF_F_TSO6;
5313 netdev->features |= NETIF_F_TSO;
5314 netdev->features |= NETIF_F_TSO6;
5320 if (hw->mac.type == e1000_pch_spt) {
5321 netdev->features &= ~NETIF_F_TSO;
5322 netdev->features &= ~NETIF_F_TSO6;
5326 /* enable transmits in the hardware, need to do this
5327 * after setting TARC(0)
5330 tctl |= E1000_TCTL_EN;
5333 /* Perform any post-link-up configuration before
5334 * reporting link up.
5336 if (phy->ops.cfg_on_link_up)
5337 phy->ops.cfg_on_link_up(hw);
5339 netif_wake_queue(netdev);
5340 netif_carrier_on(netdev);
5342 if (!test_bit(__E1000_DOWN, &adapter->state))
5343 mod_timer(&adapter->phy_info_timer,
5344 round_jiffies(jiffies + 2 * HZ));
5347 if (netif_carrier_ok(netdev)) {
5348 adapter->link_speed = 0;
5349 adapter->link_duplex = 0;
5350 /* Link status message must follow this format */
5351 netdev_info(netdev, "NIC Link is Down\n");
5352 netif_carrier_off(netdev);
5353 netif_stop_queue(netdev);
5354 if (!test_bit(__E1000_DOWN, &adapter->state))
5355 mod_timer(&adapter->phy_info_timer,
5356 round_jiffies(jiffies + 2 * HZ));
5358 /* 8000ES2LAN requires a Rx packet buffer work-around
5359 * on link down event; reset the controller to flush
5360 * the Rx packet buffer.
5362 if (adapter->flags & FLAG_RX_NEEDS_RESTART)
5363 adapter->flags |= FLAG_RESTART_NOW;
5365 pm_schedule_suspend(netdev->dev.parent,
5371 spin_lock(&adapter->stats64_lock);
5372 e1000e_update_stats(adapter);
5374 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
5375 adapter->tpt_old = adapter->stats.tpt;
5376 mac->collision_delta = adapter->stats.colc - adapter->colc_old;
5377 adapter->colc_old = adapter->stats.colc;
5379 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
5380 adapter->gorc_old = adapter->stats.gorc;
5381 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
5382 adapter->gotc_old = adapter->stats.gotc;
5383 spin_unlock(&adapter->stats64_lock);
5385 /* If the link is lost the controller stops DMA, but
5386 * if there is queued Tx work it cannot be done. So
5387 * reset the controller to flush the Tx packet buffers.
5389 if (!netif_carrier_ok(netdev) &&
5390 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
5391 adapter->flags |= FLAG_RESTART_NOW;
5393 /* If reset is necessary, do it outside of interrupt context. */
5394 if (adapter->flags & FLAG_RESTART_NOW) {
5395 schedule_work(&adapter->reset_task);
5396 /* return immediately since reset is imminent */
5400 e1000e_update_adaptive(&adapter->hw);
5402 /* Simple mode for Interrupt Throttle Rate (ITR) */
5403 if (adapter->itr_setting == 4) {
5404 /* Symmetric Tx/Rx gets a reduced ITR=2000;
5405 * Total asymmetrical Tx or Rx gets ITR=8000;
5406 * everyone else is between 2000-8000.
5408 u32 goc = (adapter->gotc + adapter->gorc) / 10000;
5409 u32 dif = (adapter->gotc > adapter->gorc ?
5410 adapter->gotc - adapter->gorc :
5411 adapter->gorc - adapter->gotc) / 10000;
5412 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
5414 e1000e_write_itr(adapter, itr);
5417 /* Cause software interrupt to ensure Rx ring is cleaned */
5418 if (adapter->msix_entries)
5419 ew32(ICS, adapter->rx_ring->ims_val);
5421 ew32(ICS, E1000_ICS_RXDMT0);
5423 /* flush pending descriptors to memory before detecting Tx hang */
5424 e1000e_flush_descriptors(adapter);
5426 /* Force detection of hung controller every watchdog period */
5427 adapter->detect_tx_hung = true;
5429 /* With 82571 controllers, LAA may be overwritten due to controller
5430 * reset from the other port. Set the appropriate LAA in RAR[0]
5432 if (e1000e_get_laa_state_82571(hw))
5433 hw->mac.ops.rar_set(hw, adapter->hw.mac.addr, 0);
5435 if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
5436 e1000e_check_82574_phy_workaround(adapter);
5438 /* Clear valid timestamp stuck in RXSTMPL/H due to a Rx error */
5439 if (adapter->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
5440 if ((adapter->flags2 & FLAG2_CHECK_RX_HWTSTAMP) &&
5441 (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) {
5443 adapter->rx_hwtstamp_cleared++;
5445 adapter->flags2 |= FLAG2_CHECK_RX_HWTSTAMP;
5449 /* Reset the timer */
5450 if (!test_bit(__E1000_DOWN, &adapter->state))
5451 mod_timer(&adapter->watchdog_timer,
5452 round_jiffies(jiffies + 2 * HZ));
5455 #define E1000_TX_FLAGS_CSUM 0x00000001
5456 #define E1000_TX_FLAGS_VLAN 0x00000002
5457 #define E1000_TX_FLAGS_TSO 0x00000004
5458 #define E1000_TX_FLAGS_IPV4 0x00000008
5459 #define E1000_TX_FLAGS_NO_FCS 0x00000010
5460 #define E1000_TX_FLAGS_HWTSTAMP 0x00000020
5461 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
5462 #define E1000_TX_FLAGS_VLAN_SHIFT 16
5464 static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb,
5467 struct e1000_context_desc *context_desc;
5468 struct e1000_buffer *buffer_info;
5472 u8 ipcss, ipcso, tucss, tucso, hdr_len;
5475 if (!skb_is_gso(skb))
5478 err = skb_cow_head(skb, 0);
5482 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
5483 mss = skb_shinfo(skb)->gso_size;
5484 if (protocol == htons(ETH_P_IP)) {
5485 struct iphdr *iph = ip_hdr(skb);
5488 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
5490 cmd_length = E1000_TXD_CMD_IP;
5491 ipcse = skb_transport_offset(skb) - 1;
5492 } else if (skb_is_gso_v6(skb)) {
5493 tcp_v6_gso_csum_prep(skb);
5496 ipcss = skb_network_offset(skb);
5497 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
5498 tucss = skb_transport_offset(skb);
5499 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
5501 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
5502 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
5504 i = tx_ring->next_to_use;
5505 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
5506 buffer_info = &tx_ring->buffer_info[i];
5508 context_desc->lower_setup.ip_fields.ipcss = ipcss;
5509 context_desc->lower_setup.ip_fields.ipcso = ipcso;
5510 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
5511 context_desc->upper_setup.tcp_fields.tucss = tucss;
5512 context_desc->upper_setup.tcp_fields.tucso = tucso;
5513 context_desc->upper_setup.tcp_fields.tucse = 0;
5514 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
5515 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
5516 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
5518 buffer_info->time_stamp = jiffies;
5519 buffer_info->next_to_watch = i;
5522 if (i == tx_ring->count)
5524 tx_ring->next_to_use = i;
5529 static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb,
5532 struct e1000_adapter *adapter = tx_ring->adapter;
5533 struct e1000_context_desc *context_desc;
5534 struct e1000_buffer *buffer_info;
5537 u32 cmd_len = E1000_TXD_CMD_DEXT;
5539 if (skb->ip_summed != CHECKSUM_PARTIAL)
5543 case cpu_to_be16(ETH_P_IP):
5544 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
5545 cmd_len |= E1000_TXD_CMD_TCP;
5547 case cpu_to_be16(ETH_P_IPV6):
5548 /* XXX not handling all IPV6 headers */
5549 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
5550 cmd_len |= E1000_TXD_CMD_TCP;
5553 if (unlikely(net_ratelimit()))
5554 e_warn("checksum_partial proto=%x!\n",
5555 be16_to_cpu(protocol));
5559 css = skb_checksum_start_offset(skb);
5561 i = tx_ring->next_to_use;
5562 buffer_info = &tx_ring->buffer_info[i];
5563 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
5565 context_desc->lower_setup.ip_config = 0;
5566 context_desc->upper_setup.tcp_fields.tucss = css;
5567 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset;
5568 context_desc->upper_setup.tcp_fields.tucse = 0;
5569 context_desc->tcp_seg_setup.data = 0;
5570 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
5572 buffer_info->time_stamp = jiffies;
5573 buffer_info->next_to_watch = i;
5576 if (i == tx_ring->count)
5578 tx_ring->next_to_use = i;
5583 static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
5584 unsigned int first, unsigned int max_per_txd,
5585 unsigned int nr_frags)
5587 struct e1000_adapter *adapter = tx_ring->adapter;
5588 struct pci_dev *pdev = adapter->pdev;
5589 struct e1000_buffer *buffer_info;
5590 unsigned int len = skb_headlen(skb);
5591 unsigned int offset = 0, size, count = 0, i;
5592 unsigned int f, bytecount, segs;
5594 i = tx_ring->next_to_use;
5597 buffer_info = &tx_ring->buffer_info[i];
5598 size = min(len, max_per_txd);
5600 buffer_info->length = size;
5601 buffer_info->time_stamp = jiffies;
5602 buffer_info->next_to_watch = i;
5603 buffer_info->dma = dma_map_single(&pdev->dev,
5605 size, DMA_TO_DEVICE);
5606 buffer_info->mapped_as_page = false;
5607 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
5616 if (i == tx_ring->count)
5621 for (f = 0; f < nr_frags; f++) {
5622 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
5624 len = skb_frag_size(frag);
5629 if (i == tx_ring->count)
5632 buffer_info = &tx_ring->buffer_info[i];
5633 size = min(len, max_per_txd);
5635 buffer_info->length = size;
5636 buffer_info->time_stamp = jiffies;
5637 buffer_info->next_to_watch = i;
5638 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
5641 buffer_info->mapped_as_page = true;
5642 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
5651 segs = skb_shinfo(skb)->gso_segs ? : 1;
5652 /* multiply data chunks by size of headers */
5653 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
5655 tx_ring->buffer_info[i].skb = skb;
5656 tx_ring->buffer_info[i].segs = segs;
5657 tx_ring->buffer_info[i].bytecount = bytecount;
5658 tx_ring->buffer_info[first].next_to_watch = i;
5663 dev_err(&pdev->dev, "Tx DMA map failed\n");
5664 buffer_info->dma = 0;
5670 i += tx_ring->count;
5672 buffer_info = &tx_ring->buffer_info[i];
5673 e1000_put_txbuf(tx_ring, buffer_info, true);
5679 static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
5681 struct e1000_adapter *adapter = tx_ring->adapter;
5682 struct e1000_tx_desc *tx_desc = NULL;
5683 struct e1000_buffer *buffer_info;
5684 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
5687 if (tx_flags & E1000_TX_FLAGS_TSO) {
5688 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
5690 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
5692 if (tx_flags & E1000_TX_FLAGS_IPV4)
5693 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
5696 if (tx_flags & E1000_TX_FLAGS_CSUM) {
5697 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
5698 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
5701 if (tx_flags & E1000_TX_FLAGS_VLAN) {
5702 txd_lower |= E1000_TXD_CMD_VLE;
5703 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
5706 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
5707 txd_lower &= ~(E1000_TXD_CMD_IFCS);
5709 if (unlikely(tx_flags & E1000_TX_FLAGS_HWTSTAMP)) {
5710 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
5711 txd_upper |= E1000_TXD_EXTCMD_TSTAMP;
5714 i = tx_ring->next_to_use;
5717 buffer_info = &tx_ring->buffer_info[i];
5718 tx_desc = E1000_TX_DESC(*tx_ring, i);
5719 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
5720 tx_desc->lower.data = cpu_to_le32(txd_lower |
5721 buffer_info->length);
5722 tx_desc->upper.data = cpu_to_le32(txd_upper);
5725 if (i == tx_ring->count)
5727 } while (--count > 0);
5729 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
5731 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
5732 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
5733 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
5735 /* Force memory writes to complete before letting h/w
5736 * know there are new descriptors to fetch. (Only
5737 * applicable for weak-ordered memory model archs,
5742 tx_ring->next_to_use = i;
5745 #define MINIMUM_DHCP_PACKET_SIZE 282
5746 static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
5747 struct sk_buff *skb)
5749 struct e1000_hw *hw = &adapter->hw;
5752 if (skb_vlan_tag_present(skb) &&
5753 !((skb_vlan_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
5754 (adapter->hw.mng_cookie.status &
5755 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
5758 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
5761 if (((struct ethhdr *)skb->data)->h_proto != htons(ETH_P_IP))
5765 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data + 14);
5768 if (ip->protocol != IPPROTO_UDP)
5771 udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
5772 if (ntohs(udp->dest) != 67)
5775 offset = (u8 *)udp + 8 - skb->data;
5776 length = skb->len - offset;
5777 return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length);
5783 static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
5785 struct e1000_adapter *adapter = tx_ring->adapter;
5787 netif_stop_queue(adapter->netdev);
5788 /* Herbert's original patch had:
5789 * smp_mb__after_netif_stop_queue();
5790 * but since that doesn't exist yet, just open code it.
5794 /* We need to check again in a case another CPU has just
5795 * made room available.
5797 if (e1000_desc_unused(tx_ring) < size)
5801 netif_start_queue(adapter->netdev);
5802 ++adapter->restart_queue;
5806 static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
5808 BUG_ON(size > tx_ring->count);
5810 if (e1000_desc_unused(tx_ring) >= size)
5812 return __e1000_maybe_stop_tx(tx_ring, size);
5815 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5816 struct net_device *netdev)
5818 struct e1000_adapter *adapter = netdev_priv(netdev);
5819 struct e1000_ring *tx_ring = adapter->tx_ring;
5821 unsigned int tx_flags = 0;
5822 unsigned int len = skb_headlen(skb);
5823 unsigned int nr_frags;
5828 __be16 protocol = vlan_get_protocol(skb);
5830 if (test_bit(__E1000_DOWN, &adapter->state)) {
5831 dev_kfree_skb_any(skb);
5832 return NETDEV_TX_OK;
5835 if (skb->len <= 0) {
5836 dev_kfree_skb_any(skb);
5837 return NETDEV_TX_OK;
5840 /* The minimum packet size with TCTL.PSP set is 17 bytes so
5841 * pad skb in order to meet this minimum size requirement
5843 if (skb_put_padto(skb, 17))
5844 return NETDEV_TX_OK;
5846 mss = skb_shinfo(skb)->gso_size;
5850 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
5851 * points to just header, pull a few bytes of payload from
5852 * frags into skb->data
5854 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
5855 /* we do this workaround for ES2LAN, but it is un-necessary,
5856 * avoiding it could save a lot of cycles
5858 if (skb->data_len && (hdr_len == len)) {
5859 unsigned int pull_size;
5861 pull_size = min_t(unsigned int, 4, skb->data_len);
5862 if (!__pskb_pull_tail(skb, pull_size)) {
5863 e_err("__pskb_pull_tail failed.\n");
5864 dev_kfree_skb_any(skb);
5865 return NETDEV_TX_OK;
5867 len = skb_headlen(skb);
5871 /* reserve a descriptor for the offload context */
5872 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
5876 count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
5878 nr_frags = skb_shinfo(skb)->nr_frags;
5879 for (f = 0; f < nr_frags; f++)
5880 count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
5881 adapter->tx_fifo_limit);
5883 if (adapter->hw.mac.tx_pkt_filtering)
5884 e1000_transfer_dhcp_info(adapter, skb);
5886 /* need: count + 2 desc gap to keep tail from touching
5887 * head, otherwise try next time
5889 if (e1000_maybe_stop_tx(tx_ring, count + 2))
5890 return NETDEV_TX_BUSY;
5892 if (skb_vlan_tag_present(skb)) {
5893 tx_flags |= E1000_TX_FLAGS_VLAN;
5894 tx_flags |= (skb_vlan_tag_get(skb) <<
5895 E1000_TX_FLAGS_VLAN_SHIFT);
5898 first = tx_ring->next_to_use;
5900 tso = e1000_tso(tx_ring, skb, protocol);
5902 dev_kfree_skb_any(skb);
5903 return NETDEV_TX_OK;
5907 tx_flags |= E1000_TX_FLAGS_TSO;
5908 else if (e1000_tx_csum(tx_ring, skb, protocol))
5909 tx_flags |= E1000_TX_FLAGS_CSUM;
5911 /* Old method was to assume IPv4 packet by default if TSO was enabled.
5912 * 82571 hardware supports TSO capabilities for IPv6 as well...
5913 * no longer assume, we must.
5915 if (protocol == htons(ETH_P_IP))
5916 tx_flags |= E1000_TX_FLAGS_IPV4;
5918 if (unlikely(skb->no_fcs))
5919 tx_flags |= E1000_TX_FLAGS_NO_FCS;
5921 /* if count is 0 then mapping error has occurred */
5922 count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
5925 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
5926 (adapter->flags & FLAG_HAS_HW_TIMESTAMP)) {
5927 if (!adapter->tx_hwtstamp_skb) {
5928 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
5929 tx_flags |= E1000_TX_FLAGS_HWTSTAMP;
5930 adapter->tx_hwtstamp_skb = skb_get(skb);
5931 adapter->tx_hwtstamp_start = jiffies;
5932 schedule_work(&adapter->tx_hwtstamp_work);
5934 adapter->tx_hwtstamp_skipped++;
5938 skb_tx_timestamp(skb);
5940 netdev_sent_queue(netdev, skb->len);
5941 e1000_tx_queue(tx_ring, tx_flags, count);
5942 /* Make sure there is space in the ring for the next send. */
5943 e1000_maybe_stop_tx(tx_ring,
5945 DIV_ROUND_UP(PAGE_SIZE,
5946 adapter->tx_fifo_limit) + 2));
5948 if (!netdev_xmit_more() ||
5949 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
5950 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
5951 e1000e_update_tdt_wa(tx_ring,
5952 tx_ring->next_to_use);
5954 writel(tx_ring->next_to_use, tx_ring->tail);
5957 dev_kfree_skb_any(skb);
5958 tx_ring->buffer_info[first].time_stamp = 0;
5959 tx_ring->next_to_use = first;
5962 return NETDEV_TX_OK;
5966 * e1000_tx_timeout - Respond to a Tx Hang
5967 * @netdev: network interface device structure
5968 * @txqueue: index of the hung queue (unused)
5970 static void e1000_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
5972 struct e1000_adapter *adapter = netdev_priv(netdev);
5974 /* Do the reset outside of interrupt context */
5975 adapter->tx_timeout_count++;
5976 schedule_work(&adapter->reset_task);
5979 static void e1000_reset_task(struct work_struct *work)
5981 struct e1000_adapter *adapter;
5982 adapter = container_of(work, struct e1000_adapter, reset_task);
5985 /* don't run the task if already down */
5986 if (test_bit(__E1000_DOWN, &adapter->state)) {
5991 if (!(adapter->flags & FLAG_RESTART_NOW)) {
5992 e1000e_dump(adapter);
5993 e_err("Reset adapter unexpectedly\n");
5995 e1000e_reinit_locked(adapter);
6000 * e1000e_get_stats64 - Get System Network Statistics
6001 * @netdev: network interface device structure
6002 * @stats: rtnl_link_stats64 pointer
6004 * Returns the address of the device statistics structure.
6006 void e1000e_get_stats64(struct net_device *netdev,
6007 struct rtnl_link_stats64 *stats)
6009 struct e1000_adapter *adapter = netdev_priv(netdev);
6011 spin_lock(&adapter->stats64_lock);
6012 e1000e_update_stats(adapter);
6013 /* Fill out the OS statistics structure */
6014 stats->rx_bytes = adapter->stats.gorc;
6015 stats->rx_packets = adapter->stats.gprc;
6016 stats->tx_bytes = adapter->stats.gotc;
6017 stats->tx_packets = adapter->stats.gptc;
6018 stats->multicast = adapter->stats.mprc;
6019 stats->collisions = adapter->stats.colc;
6023 /* RLEC on some newer hardware can be incorrect so build
6024 * our own version based on RUC and ROC
6026 stats->rx_errors = adapter->stats.rxerrc +
6027 adapter->stats.crcerrs + adapter->stats.algnerrc +
6028 adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr;
6029 stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc;
6030 stats->rx_crc_errors = adapter->stats.crcerrs;
6031 stats->rx_frame_errors = adapter->stats.algnerrc;
6032 stats->rx_missed_errors = adapter->stats.mpc;
6035 stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol;
6036 stats->tx_aborted_errors = adapter->stats.ecol;
6037 stats->tx_window_errors = adapter->stats.latecol;
6038 stats->tx_carrier_errors = adapter->stats.tncrs;
6040 /* Tx Dropped needs to be maintained elsewhere */
6042 spin_unlock(&adapter->stats64_lock);
6046 * e1000_change_mtu - Change the Maximum Transfer Unit
6047 * @netdev: network interface device structure
6048 * @new_mtu: new value for maximum frame size
6050 * Returns 0 on success, negative on failure
6052 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
6054 struct e1000_adapter *adapter = netdev_priv(netdev);
6055 int max_frame = new_mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
6057 /* Jumbo frame support */
6058 if ((new_mtu > ETH_DATA_LEN) &&
6059 !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
6060 e_err("Jumbo Frames not supported.\n");
6064 /* Jumbo frame workaround on 82579 and newer requires CRC be stripped */
6065 if ((adapter->hw.mac.type >= e1000_pch2lan) &&
6066 !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
6067 (new_mtu > ETH_DATA_LEN)) {
6068 e_err("Jumbo Frames not supported on this device when CRC stripping is disabled.\n");
6072 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
6073 usleep_range(1000, 1100);
6074 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
6075 adapter->max_frame_size = max_frame;
6076 netdev_dbg(netdev, "changing MTU from %d to %d\n",
6077 netdev->mtu, new_mtu);
6078 netdev->mtu = new_mtu;
6080 pm_runtime_get_sync(netdev->dev.parent);
6082 if (netif_running(netdev))
6083 e1000e_down(adapter, true);
6085 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
6086 * means we reserve 2 more, this pushes us to allocate from the next
6088 * i.e. RXBUFFER_2048 --> size-4096 slab
6089 * However with the new *_jumbo_rx* routines, jumbo receives will use
6093 if (max_frame <= 2048)
6094 adapter->rx_buffer_len = 2048;
6096 adapter->rx_buffer_len = 4096;
6098 /* adjust allocation if LPE protects us, and we aren't using SBP */
6099 if (max_frame <= (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN))
6100 adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
6102 if (netif_running(netdev))
6105 e1000e_reset(adapter);
6107 pm_runtime_put_sync(netdev->dev.parent);
6109 clear_bit(__E1000_RESETTING, &adapter->state);
6114 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
6117 struct e1000_adapter *adapter = netdev_priv(netdev);
6118 struct mii_ioctl_data *data = if_mii(ifr);
6120 if (adapter->hw.phy.media_type != e1000_media_type_copper)
6125 data->phy_id = adapter->hw.phy.addr;
6128 e1000_phy_read_status(adapter);
6130 switch (data->reg_num & 0x1F) {
6132 data->val_out = adapter->phy_regs.bmcr;
6135 data->val_out = adapter->phy_regs.bmsr;
6138 data->val_out = (adapter->hw.phy.id >> 16);
6141 data->val_out = (adapter->hw.phy.id & 0xFFFF);
6144 data->val_out = adapter->phy_regs.advertise;
6147 data->val_out = adapter->phy_regs.lpa;
6150 data->val_out = adapter->phy_regs.expansion;
6153 data->val_out = adapter->phy_regs.ctrl1000;
6156 data->val_out = adapter->phy_regs.stat1000;
6159 data->val_out = adapter->phy_regs.estatus;
6173 * e1000e_hwtstamp_set - control hardware time stamping
6174 * @netdev: network interface device structure
6175 * @ifr: interface request
6177 * Outgoing time stamping can be enabled and disabled. Play nice and
6178 * disable it when requested, although it shouldn't cause any overhead
6179 * when no packet needs it. At most one packet in the queue may be
6180 * marked for time stamping, otherwise it would be impossible to tell
6181 * for sure to which packet the hardware time stamp belongs.
6183 * Incoming time stamping has to be configured via the hardware filters.
6184 * Not all combinations are supported, in particular event type has to be
6185 * specified. Matching the kind of event packet is not supported, with the
6186 * exception of "all V2 events regardless of level 2 or 4".
6188 static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
6190 struct e1000_adapter *adapter = netdev_priv(netdev);
6191 struct hwtstamp_config config;
6194 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
6197 ret_val = e1000e_config_hwtstamp(adapter, &config);
6201 switch (config.rx_filter) {
6202 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
6203 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
6204 case HWTSTAMP_FILTER_PTP_V2_SYNC:
6205 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
6206 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
6207 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
6208 /* With V2 type filters which specify a Sync or Delay Request,
6209 * Path Delay Request/Response messages are also time stamped
6210 * by hardware so notify the caller the requested packets plus
6211 * some others are time stamped.
6213 config.rx_filter = HWTSTAMP_FILTER_SOME;
6219 return copy_to_user(ifr->ifr_data, &config,
6220 sizeof(config)) ? -EFAULT : 0;
6223 static int e1000e_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
6225 struct e1000_adapter *adapter = netdev_priv(netdev);
6227 return copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config,
6228 sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0;
6231 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6237 return e1000_mii_ioctl(netdev, ifr, cmd);
6239 return e1000e_hwtstamp_set(netdev, ifr);
6241 return e1000e_hwtstamp_get(netdev, ifr);
6247 static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
6249 struct e1000_hw *hw = &adapter->hw;
6250 u32 i, mac_reg, wuc;
6251 u16 phy_reg, wuc_enable;
6254 /* copy MAC RARs to PHY RARs */
6255 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
6257 retval = hw->phy.ops.acquire(hw);
6259 e_err("Could not acquire PHY\n");
6263 /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */
6264 retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
6268 /* copy MAC MTA to PHY MTA - only needed for pchlan */
6269 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
6270 mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
6271 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
6272 (u16)(mac_reg & 0xFFFF));
6273 hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1,
6274 (u16)((mac_reg >> 16) & 0xFFFF));
6277 /* configure PHY Rx Control register */
6278 hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg);
6279 mac_reg = er32(RCTL);
6280 if (mac_reg & E1000_RCTL_UPE)
6281 phy_reg |= BM_RCTL_UPE;
6282 if (mac_reg & E1000_RCTL_MPE)
6283 phy_reg |= BM_RCTL_MPE;
6284 phy_reg &= ~(BM_RCTL_MO_MASK);
6285 if (mac_reg & E1000_RCTL_MO_3)
6286 phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
6287 << BM_RCTL_MO_SHIFT);
6288 if (mac_reg & E1000_RCTL_BAM)
6289 phy_reg |= BM_RCTL_BAM;
6290 if (mac_reg & E1000_RCTL_PMCF)
6291 phy_reg |= BM_RCTL_PMCF;
6292 mac_reg = er32(CTRL);
6293 if (mac_reg & E1000_CTRL_RFCE)
6294 phy_reg |= BM_RCTL_RFCE;
6295 hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg);
6297 wuc = E1000_WUC_PME_EN;
6298 if (wufc & (E1000_WUFC_MAG | E1000_WUFC_LNKC))
6299 wuc |= E1000_WUC_APME;
6301 /* enable PHY wakeup in MAC register */
6303 ew32(WUC, (E1000_WUC_PHY_WAKE | E1000_WUC_APMPME |
6304 E1000_WUC_PME_STATUS | wuc));
6306 /* configure and enable PHY wakeup in PHY registers */
6307 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc);
6308 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, wuc);
6310 /* activate PHY wakeup */
6311 wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
6312 retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
6314 e_err("Could not set PHY Host Wakeup bit\n");
6316 hw->phy.ops.release(hw);
6321 static void e1000e_flush_lpic(struct pci_dev *pdev)
6323 struct net_device *netdev = pci_get_drvdata(pdev);
6324 struct e1000_adapter *adapter = netdev_priv(netdev);
6325 struct e1000_hw *hw = &adapter->hw;
6328 pm_runtime_get_sync(netdev->dev.parent);
6330 ret_val = hw->phy.ops.acquire(hw);
6334 pr_info("EEE TX LPI TIMER: %08X\n",
6335 er32(LPIC) >> E1000_LPIC_LPIET_SHIFT);
6337 hw->phy.ops.release(hw);
6340 pm_runtime_put_sync(netdev->dev.parent);
6343 /* S0ix implementation */
6344 static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
6346 struct e1000_hw *hw = &adapter->hw;
6350 if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
6351 hw->mac.type >= e1000_pch_adp) {
6352 /* Request ME configure the device for S0ix */
6353 mac_data = er32(H2ME);
6354 mac_data |= E1000_H2ME_START_DPG;
6355 mac_data &= ~E1000_H2ME_EXIT_DPG;
6356 ew32(H2ME, mac_data);
6358 /* Request driver configure the device to S0ix */
6359 /* Disable the periodic inband message,
6360 * don't request PCIe clock in K1 page770_17[10:9] = 10b
6362 e1e_rphy(hw, HV_PM_CTRL, &phy_data);
6363 phy_data &= ~HV_PM_CTRL_K1_CLK_REQ;
6364 phy_data |= BIT(10);
6365 e1e_wphy(hw, HV_PM_CTRL, phy_data);
6367 /* Make sure we don't exit K1 every time a new packet arrives
6368 * 772_29[5] = 1 CS_Mode_Stay_In_K1
6370 e1e_rphy(hw, I217_CGFREG, &phy_data);
6372 e1e_wphy(hw, I217_CGFREG, phy_data);
6374 /* Change the MAC/PHY interface to SMBus
6375 * Force the SMBus in PHY page769_23[0] = 1
6376 * Force the SMBus in MAC CTRL_EXT[11] = 1
6378 e1e_rphy(hw, CV_SMB_CTRL, &phy_data);
6379 phy_data |= CV_SMB_CTRL_FORCE_SMBUS;
6380 e1e_wphy(hw, CV_SMB_CTRL, phy_data);
6381 mac_data = er32(CTRL_EXT);
6382 mac_data |= E1000_CTRL_EXT_FORCE_SMBUS;
6383 ew32(CTRL_EXT, mac_data);
6385 /* DFT control: PHY bit: page769_20[0] = 1
6386 * page769_20[7] - PHY PLL stop
6387 * page769_20[8] - PHY go to the electrical idle
6388 * page769_20[9] - PHY serdes disable
6389 * Gate PPW via EXTCNF_CTRL - set 0x0F00[7] = 1
6391 e1e_rphy(hw, I82579_DFT_CTRL, &phy_data);
6396 e1e_wphy(hw, I82579_DFT_CTRL, phy_data);
6398 mac_data = er32(EXTCNF_CTRL);
6399 mac_data |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
6400 ew32(EXTCNF_CTRL, mac_data);
6402 /* Enable the Dynamic Power Gating in the MAC */
6403 mac_data = er32(FEXTNVM7);
6404 mac_data |= BIT(22);
6405 ew32(FEXTNVM7, mac_data);
6407 /* Disable disconnected cable conditioning for Power Gating */
6408 mac_data = er32(DPGFR);
6410 ew32(DPGFR, mac_data);
6412 /* Don't wake from dynamic Power Gating with clock request */
6413 mac_data = er32(FEXTNVM12);
6414 mac_data |= BIT(12);
6415 ew32(FEXTNVM12, mac_data);
6417 /* Ungate PGCB clock */
6418 mac_data = er32(FEXTNVM9);
6419 mac_data &= ~BIT(28);
6420 ew32(FEXTNVM9, mac_data);
6422 /* Enable K1 off to enable mPHY Power Gating */
6423 mac_data = er32(FEXTNVM6);
6424 mac_data |= BIT(31);
6425 ew32(FEXTNVM6, mac_data);
6427 /* Enable mPHY power gating for any link and speed */
6428 mac_data = er32(FEXTNVM8);
6430 ew32(FEXTNVM8, mac_data);
6432 /* Enable the Dynamic Clock Gating in the DMA and MAC */
6433 mac_data = er32(CTRL_EXT);
6434 mac_data |= E1000_CTRL_EXT_DMA_DYN_CLK_EN;
6435 ew32(CTRL_EXT, mac_data);
6437 /* No MAC DPG gating SLP_S0 in modern standby
6438 * Switch the logic of the lanphypc to use PMC counter
6440 mac_data = er32(FEXTNVM5);
6442 ew32(FEXTNVM5, mac_data);
6445 /* Disable the time synchronization clock */
6446 mac_data = er32(FEXTNVM7);
6447 mac_data |= BIT(31);
6448 mac_data &= ~BIT(0);
6449 ew32(FEXTNVM7, mac_data);
6451 /* Dynamic Power Gating Enable */
6452 mac_data = er32(CTRL_EXT);
6454 ew32(CTRL_EXT, mac_data);
6456 /* Check MAC Tx/Rx packet buffer pointers.
6457 * Reset MAC Tx/Rx packet buffer pointers to suppress any
6458 * pending traffic indication that would prevent power gating.
6460 mac_data = er32(TDFH);
6463 mac_data = er32(TDFT);
6466 mac_data = er32(TDFHS);
6469 mac_data = er32(TDFTS);
6472 mac_data = er32(TDFPC);
6475 mac_data = er32(RDFH);
6478 mac_data = er32(RDFT);
6481 mac_data = er32(RDFHS);
6484 mac_data = er32(RDFTS);
6487 mac_data = er32(RDFPC);
6492 static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
6494 struct e1000_hw *hw = &adapter->hw;
6495 bool firmware_bug = false;
6500 if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
6501 hw->mac.type >= e1000_pch_adp) {
6502 /* Keep the GPT clock enabled for CSME */
6503 mac_data = er32(FEXTNVM);
6505 ew32(FEXTNVM, mac_data);
6506 /* Request ME unconfigure the device from S0ix */
6507 mac_data = er32(H2ME);
6508 mac_data &= ~E1000_H2ME_START_DPG;
6509 mac_data |= E1000_H2ME_EXIT_DPG;
6510 ew32(H2ME, mac_data);
6512 /* Poll up to 2.5 seconds for ME to unconfigure DPG.
6513 * If this takes more than 1 second, show a warning indicating a
6516 while (!(er32(EXFWSM) & E1000_EXFWSM_DPG_EXIT_DONE)) {
6517 if (i > 100 && !firmware_bug)
6518 firmware_bug = true;
6521 e_dbg("Timeout (firmware bug): %d msec\n",
6526 usleep_range(10000, 11000);
6529 e_warn("DPG_EXIT_DONE took %d msec. This is a firmware bug\n",
6532 e_dbg("DPG_EXIT_DONE cleared after %d msec\n", i * 10);
6534 /* Request driver unconfigure the device from S0ix */
6536 /* Disable the Dynamic Power Gating in the MAC */
6537 mac_data = er32(FEXTNVM7);
6538 mac_data &= 0xFFBFFFFF;
6539 ew32(FEXTNVM7, mac_data);
6541 /* Disable mPHY power gating for any link and speed */
6542 mac_data = er32(FEXTNVM8);
6543 mac_data &= ~BIT(9);
6544 ew32(FEXTNVM8, mac_data);
6546 /* Disable K1 off */
6547 mac_data = er32(FEXTNVM6);
6548 mac_data &= ~BIT(31);
6549 ew32(FEXTNVM6, mac_data);
6551 /* Disable Ungate PGCB clock */
6552 mac_data = er32(FEXTNVM9);
6553 mac_data |= BIT(28);
6554 ew32(FEXTNVM9, mac_data);
6556 /* Cancel not waking from dynamic
6557 * Power Gating with clock request
6559 mac_data = er32(FEXTNVM12);
6560 mac_data &= ~BIT(12);
6561 ew32(FEXTNVM12, mac_data);
6563 /* Cancel disable disconnected cable conditioning
6566 mac_data = er32(DPGFR);
6567 mac_data &= ~BIT(2);
6568 ew32(DPGFR, mac_data);
6570 /* Disable the Dynamic Clock Gating in the DMA and MAC */
6571 mac_data = er32(CTRL_EXT);
6572 mac_data &= 0xFFF7FFFF;
6573 ew32(CTRL_EXT, mac_data);
6575 /* Revert the lanphypc logic to use the internal Gbe counter
6576 * and not the PMC counter
6578 mac_data = er32(FEXTNVM5);
6579 mac_data &= 0xFFFFFF7F;
6580 ew32(FEXTNVM5, mac_data);
6582 /* Enable the periodic inband message,
6583 * Request PCIe clock in K1 page770_17[10:9] =01b
6585 e1e_rphy(hw, HV_PM_CTRL, &phy_data);
6587 phy_data |= HV_PM_CTRL_K1_CLK_REQ;
6588 e1e_wphy(hw, HV_PM_CTRL, phy_data);
6590 /* Return back configuration
6591 * 772_29[5] = 0 CS_Mode_Stay_In_K1
6593 e1e_rphy(hw, I217_CGFREG, &phy_data);
6595 e1e_wphy(hw, I217_CGFREG, phy_data);
6597 /* Change the MAC/PHY interface to Kumeran
6598 * Unforce the SMBus in PHY page769_23[0] = 0
6599 * Unforce the SMBus in MAC CTRL_EXT[11] = 0
6601 e1e_rphy(hw, CV_SMB_CTRL, &phy_data);
6602 phy_data &= ~CV_SMB_CTRL_FORCE_SMBUS;
6603 e1e_wphy(hw, CV_SMB_CTRL, phy_data);
6604 mac_data = er32(CTRL_EXT);
6605 mac_data &= ~E1000_CTRL_EXT_FORCE_SMBUS;
6606 ew32(CTRL_EXT, mac_data);
6609 /* Disable Dynamic Power Gating */
6610 mac_data = er32(CTRL_EXT);
6611 mac_data &= 0xFFFFFFF7;
6612 ew32(CTRL_EXT, mac_data);
6614 /* Enable the time synchronization clock */
6615 mac_data = er32(FEXTNVM7);
6616 mac_data &= ~BIT(31);
6618 ew32(FEXTNVM7, mac_data);
6621 static int e1000e_pm_freeze(struct device *dev)
6623 struct net_device *netdev = dev_get_drvdata(dev);
6624 struct e1000_adapter *adapter = netdev_priv(netdev);
6629 present = netif_device_present(netdev);
6630 netif_device_detach(netdev);
6632 if (present && netif_running(netdev)) {
6633 int count = E1000_CHECK_RESET_COUNT;
6635 while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
6636 usleep_range(10000, 11000);
6638 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
6640 /* Quiesce the device without resetting the hardware */
6641 e1000e_down(adapter, false);
6642 e1000_free_irq(adapter);
6646 e1000e_reset_interrupt_capability(adapter);
6648 /* Allow time for pending master requests to run */
6649 e1000e_disable_pcie_master(&adapter->hw);
6654 static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
6656 struct net_device *netdev = pci_get_drvdata(pdev);
6657 struct e1000_adapter *adapter = netdev_priv(netdev);
6658 struct e1000_hw *hw = &adapter->hw;
6659 u32 ctrl, ctrl_ext, rctl, status, wufc;
6662 /* Runtime suspend should only enable wakeup for link changes */
6664 wufc = E1000_WUFC_LNKC;
6665 else if (device_may_wakeup(&pdev->dev))
6666 wufc = adapter->wol;
6670 status = er32(STATUS);
6671 if (status & E1000_STATUS_LU)
6672 wufc &= ~E1000_WUFC_LNKC;
6675 e1000_setup_rctl(adapter);
6676 e1000e_set_rx_mode(netdev);
6678 /* turn on all-multi mode if wake on multicast is enabled */
6679 if (wufc & E1000_WUFC_MC) {
6681 rctl |= E1000_RCTL_MPE;
6686 ctrl |= E1000_CTRL_ADVD3WUC;
6687 if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
6688 ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
6691 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
6692 adapter->hw.phy.media_type ==
6693 e1000_media_type_internal_serdes) {
6694 /* keep the laser running in D3 */
6695 ctrl_ext = er32(CTRL_EXT);
6696 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
6697 ew32(CTRL_EXT, ctrl_ext);
6701 e1000e_power_up_phy(adapter);
6703 if (adapter->flags & FLAG_IS_ICH)
6704 e1000_suspend_workarounds_ich8lan(&adapter->hw);
6706 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
6707 /* enable wakeup by the PHY */
6708 retval = e1000_init_phy_wakeup(adapter, wufc);
6712 /* enable wakeup by the MAC */
6714 ew32(WUC, E1000_WUC_PME_EN);
6720 e1000_power_down_phy(adapter);
6723 if (adapter->hw.phy.type == e1000_phy_igp_3) {
6724 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
6725 } else if (hw->mac.type >= e1000_pch_lpt) {
6726 if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
6727 /* ULP does not support wake from unicast, multicast
6730 retval = e1000_enable_ulp_lpt_lp(hw, !runtime);
6736 /* Ensure that the appropriate bits are set in LPI_CTRL
6739 if ((hw->phy.type >= e1000_phy_i217) &&
6740 adapter->eee_advert && hw->dev_spec.ich8lan.eee_lp_ability) {
6743 retval = hw->phy.ops.acquire(hw);
6745 retval = e1e_rphy_locked(hw, I82579_LPI_CTRL,
6748 if (adapter->eee_advert &
6749 hw->dev_spec.ich8lan.eee_lp_ability &
6750 I82579_EEE_100_SUPPORTED)
6751 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
6752 if (adapter->eee_advert &
6753 hw->dev_spec.ich8lan.eee_lp_ability &
6754 I82579_EEE_1000_SUPPORTED)
6755 lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
6757 retval = e1e_wphy_locked(hw, I82579_LPI_CTRL,
6761 hw->phy.ops.release(hw);
6764 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6765 * would have already happened in close and is redundant.
6767 e1000e_release_hw_control(adapter);
6769 pci_clear_master(pdev);
6771 /* The pci-e switch on some quad port adapters will report a
6772 * correctable error when the MAC transitions from D0 to D3. To
6773 * prevent this we need to mask off the correctable errors on the
6774 * downstream port of the pci-e switch.
6776 * We don't have the associated upstream bridge while assigning
6777 * the PCI device into guest. For example, the KVM on power is
6780 if (adapter->flags & FLAG_IS_QUAD_PORT) {
6781 struct pci_dev *us_dev = pdev->bus->self;
6787 pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl);
6788 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL,
6789 (devctl & ~PCI_EXP_DEVCTL_CERE));
6791 pci_save_state(pdev);
6792 pci_prepare_to_sleep(pdev);
6794 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl);
6801 * __e1000e_disable_aspm - Disable ASPM states
6802 * @pdev: pointer to PCI device struct
6803 * @state: bit-mask of ASPM states to disable
6804 * @locked: indication if this context holds pci_bus_sem locked.
6806 * Some devices *must* have certain ASPM states disabled per hardware errata.
6808 static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state, int locked)
6810 struct pci_dev *parent = pdev->bus->self;
6811 u16 aspm_dis_mask = 0;
6812 u16 pdev_aspmc, parent_aspmc;
6815 case PCIE_LINK_STATE_L0S:
6816 case PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1:
6817 aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L0S;
6818 fallthrough; /* can't have L1 without L0s */
6819 case PCIE_LINK_STATE_L1:
6820 aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L1;
6826 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
6827 pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
6830 pcie_capability_read_word(parent, PCI_EXP_LNKCTL,
6832 parent_aspmc &= PCI_EXP_LNKCTL_ASPMC;
6835 /* Nothing to do if the ASPM states to be disabled already are */
6836 if (!(pdev_aspmc & aspm_dis_mask) &&
6837 (!parent || !(parent_aspmc & aspm_dis_mask)))
6840 dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
6841 (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L0S) ?
6843 (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L1) ?
6846 #ifdef CONFIG_PCIEASPM
6848 pci_disable_link_state_locked(pdev, state);
6850 pci_disable_link_state(pdev, state);
6852 /* Double-check ASPM control. If not disabled by the above, the
6853 * BIOS is preventing that from happening (or CONFIG_PCIEASPM is
6854 * not enabled); override by writing PCI config space directly.
6856 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
6857 pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
6859 if (!(aspm_dis_mask & pdev_aspmc))
6863 /* Both device and parent should have the same ASPM setting.
6864 * Disable ASPM in downstream component first and then upstream.
6866 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_dis_mask);
6869 pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
6874 * e1000e_disable_aspm - Disable ASPM states.
6875 * @pdev: pointer to PCI device struct
6876 * @state: bit-mask of ASPM states to disable
6878 * This function acquires the pci_bus_sem!
6879 * Some devices *must* have certain ASPM states disabled per hardware errata.
6881 static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
6883 __e1000e_disable_aspm(pdev, state, 0);
6887 * e1000e_disable_aspm_locked - Disable ASPM states.
6888 * @pdev: pointer to PCI device struct
6889 * @state: bit-mask of ASPM states to disable
6891 * This function must be called with pci_bus_sem acquired!
6892 * Some devices *must* have certain ASPM states disabled per hardware errata.
6894 static void e1000e_disable_aspm_locked(struct pci_dev *pdev, u16 state)
6896 __e1000e_disable_aspm(pdev, state, 1);
6899 static int e1000e_pm_thaw(struct device *dev)
6901 struct net_device *netdev = dev_get_drvdata(dev);
6902 struct e1000_adapter *adapter = netdev_priv(netdev);
6905 e1000e_set_interrupt_capability(adapter);
6908 if (netif_running(netdev)) {
6909 rc = e1000_request_irq(adapter);
6916 netif_device_attach(netdev);
6923 static int __e1000_resume(struct pci_dev *pdev)
6925 struct net_device *netdev = pci_get_drvdata(pdev);
6926 struct e1000_adapter *adapter = netdev_priv(netdev);
6927 struct e1000_hw *hw = &adapter->hw;
6928 u16 aspm_disable_flag = 0;
6930 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
6931 aspm_disable_flag = PCIE_LINK_STATE_L0S;
6932 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
6933 aspm_disable_flag |= PCIE_LINK_STATE_L1;
6934 if (aspm_disable_flag)
6935 e1000e_disable_aspm(pdev, aspm_disable_flag);
6937 pci_set_master(pdev);
6939 if (hw->mac.type >= e1000_pch2lan)
6940 e1000_resume_workarounds_pchlan(&adapter->hw);
6942 e1000e_power_up_phy(adapter);
6944 /* report the system wakeup cause from S3/S4 */
6945 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
6948 e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
6950 e_info("PHY Wakeup cause - %s\n",
6951 phy_data & E1000_WUS_EX ? "Unicast Packet" :
6952 phy_data & E1000_WUS_MC ? "Multicast Packet" :
6953 phy_data & E1000_WUS_BC ? "Broadcast Packet" :
6954 phy_data & E1000_WUS_MAG ? "Magic Packet" :
6955 phy_data & E1000_WUS_LNKC ?
6956 "Link Status Change" : "other");
6958 e1e_wphy(&adapter->hw, BM_WUS, ~0);
6960 u32 wus = er32(WUS);
6963 e_info("MAC Wakeup cause - %s\n",
6964 wus & E1000_WUS_EX ? "Unicast Packet" :
6965 wus & E1000_WUS_MC ? "Multicast Packet" :
6966 wus & E1000_WUS_BC ? "Broadcast Packet" :
6967 wus & E1000_WUS_MAG ? "Magic Packet" :
6968 wus & E1000_WUS_LNKC ? "Link Status Change" :
6974 e1000e_reset(adapter);
6976 e1000_init_manageability_pt(adapter);
6978 /* If the controller has AMT, do not set DRV_LOAD until the interface
6979 * is up. For all other cases, let the f/w know that the h/w is now
6980 * under the control of the driver.
6982 if (!(adapter->flags & FLAG_HAS_AMT))
6983 e1000e_get_hw_control(adapter);
6988 static __maybe_unused int e1000e_pm_prepare(struct device *dev)
6990 return pm_runtime_suspended(dev) &&
6991 pm_suspend_via_firmware();
6994 static __maybe_unused int e1000e_pm_suspend(struct device *dev)
6996 struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
6997 struct e1000_adapter *adapter = netdev_priv(netdev);
6998 struct pci_dev *pdev = to_pci_dev(dev);
6999 struct e1000_hw *hw = &adapter->hw;
7003 if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
7004 hw->mac.type >= e1000_pch_adp) {
7005 /* Mask OEM Bits / Gig Disable / Restart AN (772_26[12] = 1) */
7006 e1e_rphy(hw, I217_MEMPWR, &phy_data);
7007 phy_data |= I217_MEMPWR_MOEM;
7008 e1e_wphy(hw, I217_MEMPWR, phy_data);
7010 /* Disable LCD reset */
7011 hw->phy.reset_disable = true;
7014 e1000e_flush_lpic(pdev);
7016 e1000e_pm_freeze(dev);
7018 rc = __e1000_shutdown(pdev, false);
7020 e1000e_pm_thaw(dev);
7022 /* Introduce S0ix implementation */
7023 if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
7024 e1000e_s0ix_entry_flow(adapter);
7030 static __maybe_unused int e1000e_pm_resume(struct device *dev)
7032 struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
7033 struct e1000_adapter *adapter = netdev_priv(netdev);
7034 struct pci_dev *pdev = to_pci_dev(dev);
7035 struct e1000_hw *hw = &adapter->hw;
7039 /* Introduce S0ix implementation */
7040 if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
7041 e1000e_s0ix_exit_flow(adapter);
7043 rc = __e1000_resume(pdev);
7047 if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
7048 hw->mac.type >= e1000_pch_adp) {
7049 /* Unmask OEM Bits / Gig Disable / Restart AN 772_26[12] = 0 */
7050 e1e_rphy(hw, I217_MEMPWR, &phy_data);
7051 phy_data &= ~I217_MEMPWR_MOEM;
7052 e1e_wphy(hw, I217_MEMPWR, phy_data);
7054 /* Enable LCD reset */
7055 hw->phy.reset_disable = false;
7058 return e1000e_pm_thaw(dev);
7061 static __maybe_unused int e1000e_pm_runtime_idle(struct device *dev)
7063 struct net_device *netdev = dev_get_drvdata(dev);
7064 struct e1000_adapter *adapter = netdev_priv(netdev);
7067 eee_lp = adapter->hw.dev_spec.ich8lan.eee_lp_ability;
7069 if (!e1000e_has_link(adapter)) {
7070 adapter->hw.dev_spec.ich8lan.eee_lp_ability = eee_lp;
7071 pm_schedule_suspend(dev, 5 * MSEC_PER_SEC);
7077 static __maybe_unused int e1000e_pm_runtime_resume(struct device *dev)
7079 struct pci_dev *pdev = to_pci_dev(dev);
7080 struct net_device *netdev = pci_get_drvdata(pdev);
7081 struct e1000_adapter *adapter = netdev_priv(netdev);
7084 rc = __e1000_resume(pdev);
7088 if (netdev->flags & IFF_UP)
7094 static __maybe_unused int e1000e_pm_runtime_suspend(struct device *dev)
7096 struct pci_dev *pdev = to_pci_dev(dev);
7097 struct net_device *netdev = pci_get_drvdata(pdev);
7098 struct e1000_adapter *adapter = netdev_priv(netdev);
7100 if (netdev->flags & IFF_UP) {
7101 int count = E1000_CHECK_RESET_COUNT;
7103 while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
7104 usleep_range(10000, 11000);
7106 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
7108 /* Down the device without resetting the hardware */
7109 e1000e_down(adapter, false);
7112 if (__e1000_shutdown(pdev, true)) {
7113 e1000e_pm_runtime_resume(dev);
7120 static void e1000_shutdown(struct pci_dev *pdev)
7122 e1000e_flush_lpic(pdev);
7124 e1000e_pm_freeze(&pdev->dev);
7126 __e1000_shutdown(pdev, false);
7129 #ifdef CONFIG_NET_POLL_CONTROLLER
7131 static irqreturn_t e1000_intr_msix(int __always_unused irq, void *data)
7133 struct net_device *netdev = data;
7134 struct e1000_adapter *adapter = netdev_priv(netdev);
7136 if (adapter->msix_entries) {
7137 int vector, msix_irq;
7140 msix_irq = adapter->msix_entries[vector].vector;
7141 if (disable_hardirq(msix_irq))
7142 e1000_intr_msix_rx(msix_irq, netdev);
7143 enable_irq(msix_irq);
7146 msix_irq = adapter->msix_entries[vector].vector;
7147 if (disable_hardirq(msix_irq))
7148 e1000_intr_msix_tx(msix_irq, netdev);
7149 enable_irq(msix_irq);
7152 msix_irq = adapter->msix_entries[vector].vector;
7153 if (disable_hardirq(msix_irq))
7154 e1000_msix_other(msix_irq, netdev);
7155 enable_irq(msix_irq);
7163 * @netdev: network interface device structure
7165 * Polling 'interrupt' - used by things like netconsole to send skbs
7166 * without having to re-enable interrupts. It's not called while
7167 * the interrupt routine is executing.
7169 static void e1000_netpoll(struct net_device *netdev)
7171 struct e1000_adapter *adapter = netdev_priv(netdev);
7173 switch (adapter->int_mode) {
7174 case E1000E_INT_MODE_MSIX:
7175 e1000_intr_msix(adapter->pdev->irq, netdev);
7177 case E1000E_INT_MODE_MSI:
7178 if (disable_hardirq(adapter->pdev->irq))
7179 e1000_intr_msi(adapter->pdev->irq, netdev);
7180 enable_irq(adapter->pdev->irq);
7182 default: /* E1000E_INT_MODE_LEGACY */
7183 if (disable_hardirq(adapter->pdev->irq))
7184 e1000_intr(adapter->pdev->irq, netdev);
7185 enable_irq(adapter->pdev->irq);
7192 * e1000_io_error_detected - called when PCI error is detected
7193 * @pdev: Pointer to PCI device
7194 * @state: The current pci connection state
7196 * This function is called after a PCI bus error affecting
7197 * this device has been detected.
7199 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
7200 pci_channel_state_t state)
7202 e1000e_pm_freeze(&pdev->dev);
7204 if (state == pci_channel_io_perm_failure)
7205 return PCI_ERS_RESULT_DISCONNECT;
7207 pci_disable_device(pdev);
7209 /* Request a slot reset. */
7210 return PCI_ERS_RESULT_NEED_RESET;
7214 * e1000_io_slot_reset - called after the pci bus has been reset.
7215 * @pdev: Pointer to PCI device
7217 * Restart the card from scratch, as if from a cold-boot. Implementation
7218 * resembles the first-half of the e1000e_pm_resume routine.
7220 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
7222 struct net_device *netdev = pci_get_drvdata(pdev);
7223 struct e1000_adapter *adapter = netdev_priv(netdev);
7224 struct e1000_hw *hw = &adapter->hw;
7225 u16 aspm_disable_flag = 0;
7227 pci_ers_result_t result;
7229 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
7230 aspm_disable_flag = PCIE_LINK_STATE_L0S;
7231 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
7232 aspm_disable_flag |= PCIE_LINK_STATE_L1;
7233 if (aspm_disable_flag)
7234 e1000e_disable_aspm_locked(pdev, aspm_disable_flag);
7236 err = pci_enable_device_mem(pdev);
7239 "Cannot re-enable PCI device after reset.\n");
7240 result = PCI_ERS_RESULT_DISCONNECT;
7242 pdev->state_saved = true;
7243 pci_restore_state(pdev);
7244 pci_set_master(pdev);
7246 pci_enable_wake(pdev, PCI_D3hot, 0);
7247 pci_enable_wake(pdev, PCI_D3cold, 0);
7249 e1000e_reset(adapter);
7251 result = PCI_ERS_RESULT_RECOVERED;
7258 * e1000_io_resume - called when traffic can start flowing again.
7259 * @pdev: Pointer to PCI device
7261 * This callback is called when the error recovery driver tells us that
7262 * its OK to resume normal operation. Implementation resembles the
7263 * second-half of the e1000e_pm_resume routine.
7265 static void e1000_io_resume(struct pci_dev *pdev)
7267 struct net_device *netdev = pci_get_drvdata(pdev);
7268 struct e1000_adapter *adapter = netdev_priv(netdev);
7270 e1000_init_manageability_pt(adapter);
7272 e1000e_pm_thaw(&pdev->dev);
7274 /* If the controller has AMT, do not set DRV_LOAD until the interface
7275 * is up. For all other cases, let the f/w know that the h/w is now
7276 * under the control of the driver.
7278 if (!(adapter->flags & FLAG_HAS_AMT))
7279 e1000e_get_hw_control(adapter);
7282 static void e1000_print_device_info(struct e1000_adapter *adapter)
7284 struct e1000_hw *hw = &adapter->hw;
7285 struct net_device *netdev = adapter->netdev;
7287 u8 pba_str[E1000_PBANUM_LENGTH];
7289 /* print bus type/speed/width info */
7290 e_info("(PCI Express:2.5GT/s:%s) %pM\n",
7292 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
7296 e_info("Intel(R) PRO/%s Network Connection\n",
7297 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
7298 ret_val = e1000_read_pba_string_generic(hw, pba_str,
7299 E1000_PBANUM_LENGTH);
7301 strlcpy((char *)pba_str, "Unknown", sizeof(pba_str));
7302 e_info("MAC: %d, PHY: %d, PBA No: %s\n",
7303 hw->mac.type, hw->phy.type, pba_str);
7306 static void e1000_eeprom_checks(struct e1000_adapter *adapter)
7308 struct e1000_hw *hw = &adapter->hw;
7312 if (hw->mac.type != e1000_82573)
7315 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
7317 if (!ret_val && (!(buf & BIT(0)))) {
7318 /* Deep Smart Power Down (DSPD) */
7319 dev_warn(&adapter->pdev->dev,
7320 "Warning: detected DSPD enabled in EEPROM\n");
7324 static netdev_features_t e1000_fix_features(struct net_device *netdev,
7325 netdev_features_t features)
7327 struct e1000_adapter *adapter = netdev_priv(netdev);
7328 struct e1000_hw *hw = &adapter->hw;
7330 /* Jumbo frame workaround on 82579 and newer requires CRC be stripped */
7331 if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN))
7332 features &= ~NETIF_F_RXFCS;
7334 /* Since there is no support for separate Rx/Tx vlan accel
7335 * enable/disable make sure Tx flag is always in same state as Rx.
7337 if (features & NETIF_F_HW_VLAN_CTAG_RX)
7338 features |= NETIF_F_HW_VLAN_CTAG_TX;
7340 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
7345 static int e1000_set_features(struct net_device *netdev,
7346 netdev_features_t features)
7348 struct e1000_adapter *adapter = netdev_priv(netdev);
7349 netdev_features_t changed = features ^ netdev->features;
7351 if (changed & (NETIF_F_TSO | NETIF_F_TSO6))
7352 adapter->flags |= FLAG_TSO_FORCE;
7354 if (!(changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
7355 NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXFCS |
7359 if (changed & NETIF_F_RXFCS) {
7360 if (features & NETIF_F_RXFCS) {
7361 adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
7363 /* We need to take it back to defaults, which might mean
7364 * stripping is still disabled at the adapter level.
7366 if (adapter->flags2 & FLAG2_DFLT_CRC_STRIPPING)
7367 adapter->flags2 |= FLAG2_CRC_STRIPPING;
7369 adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
7373 netdev->features = features;
7375 if (netif_running(netdev))
7376 e1000e_reinit_locked(adapter);
7378 e1000e_reset(adapter);
7383 static const struct net_device_ops e1000e_netdev_ops = {
7384 .ndo_open = e1000e_open,
7385 .ndo_stop = e1000e_close,
7386 .ndo_start_xmit = e1000_xmit_frame,
7387 .ndo_get_stats64 = e1000e_get_stats64,
7388 .ndo_set_rx_mode = e1000e_set_rx_mode,
7389 .ndo_set_mac_address = e1000_set_mac,
7390 .ndo_change_mtu = e1000_change_mtu,
7391 .ndo_eth_ioctl = e1000_ioctl,
7392 .ndo_tx_timeout = e1000_tx_timeout,
7393 .ndo_validate_addr = eth_validate_addr,
7395 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
7396 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
7397 #ifdef CONFIG_NET_POLL_CONTROLLER
7398 .ndo_poll_controller = e1000_netpoll,
7400 .ndo_set_features = e1000_set_features,
7401 .ndo_fix_features = e1000_fix_features,
7402 .ndo_features_check = passthru_features_check,
7406 * e1000_probe - Device Initialization Routine
7407 * @pdev: PCI device information struct
7408 * @ent: entry in e1000_pci_tbl
7410 * Returns 0 on success, negative on failure
7412 * e1000_probe initializes an adapter identified by a pci_dev structure.
7413 * The OS initialization, configuring of the adapter private structure,
7414 * and a hardware reset occur.
7416 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7418 struct net_device *netdev;
7419 struct e1000_adapter *adapter;
7420 struct e1000_hw *hw;
7421 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
7422 resource_size_t mmio_start, mmio_len;
7423 resource_size_t flash_start, flash_len;
7424 static int cards_found;
7425 u16 aspm_disable_flag = 0;
7426 int bars, i, err, pci_using_dac;
7427 u16 eeprom_data = 0;
7428 u16 eeprom_apme_mask = E1000_EEPROM_APME;
7431 if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
7432 aspm_disable_flag = PCIE_LINK_STATE_L0S;
7433 if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
7434 aspm_disable_flag |= PCIE_LINK_STATE_L1;
7435 if (aspm_disable_flag)
7436 e1000e_disable_aspm(pdev, aspm_disable_flag);
7438 err = pci_enable_device_mem(pdev);
7443 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7447 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7450 "No usable DMA configuration, aborting\n");
7455 bars = pci_select_bars(pdev, IORESOURCE_MEM);
7456 err = pci_request_selected_regions_exclusive(pdev, bars,
7457 e1000e_driver_name);
7461 /* AER (Advanced Error Reporting) hooks */
7462 pci_enable_pcie_error_reporting(pdev);
7464 pci_set_master(pdev);
7465 /* PCI config space info */
7466 err = pci_save_state(pdev);
7468 goto err_alloc_etherdev;
7471 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
7473 goto err_alloc_etherdev;
7475 SET_NETDEV_DEV(netdev, &pdev->dev);
7477 netdev->irq = pdev->irq;
7479 pci_set_drvdata(pdev, netdev);
7480 adapter = netdev_priv(netdev);
7482 adapter->netdev = netdev;
7483 adapter->pdev = pdev;
7485 adapter->pba = ei->pba;
7486 adapter->flags = ei->flags;
7487 adapter->flags2 = ei->flags2;
7488 adapter->hw.adapter = adapter;
7489 adapter->hw.mac.type = ei->mac;
7490 adapter->max_hw_frame_size = ei->max_hw_frame_size;
7491 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
7493 mmio_start = pci_resource_start(pdev, 0);
7494 mmio_len = pci_resource_len(pdev, 0);
7497 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
7498 if (!adapter->hw.hw_addr)
7501 if ((adapter->flags & FLAG_HAS_FLASH) &&
7502 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM) &&
7503 (hw->mac.type < e1000_pch_spt)) {
7504 flash_start = pci_resource_start(pdev, 1);
7505 flash_len = pci_resource_len(pdev, 1);
7506 adapter->hw.flash_address = ioremap(flash_start, flash_len);
7507 if (!adapter->hw.flash_address)
7511 /* Set default EEE advertisement */
7512 if (adapter->flags2 & FLAG2_HAS_EEE)
7513 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
7515 /* construct the net_device struct */
7516 netdev->netdev_ops = &e1000e_netdev_ops;
7517 e1000e_set_ethtool_ops(netdev);
7518 netdev->watchdog_timeo = 5 * HZ;
7519 netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64);
7520 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
7522 netdev->mem_start = mmio_start;
7523 netdev->mem_end = mmio_start + mmio_len;
7525 adapter->bd_number = cards_found++;
7527 e1000e_check_options(adapter);
7529 /* setup adapter struct */
7530 err = e1000_sw_init(adapter);
7534 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
7535 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
7536 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
7538 err = ei->get_variants(adapter);
7542 if ((adapter->flags & FLAG_IS_ICH) &&
7543 (adapter->flags & FLAG_READ_ONLY_NVM) &&
7544 (hw->mac.type < e1000_pch_spt))
7545 e1000e_write_protect_nvm_ich8lan(&adapter->hw);
7547 hw->mac.ops.get_bus_info(&adapter->hw);
7549 adapter->hw.phy.autoneg_wait_to_complete = 0;
7551 /* Copper options */
7552 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
7553 adapter->hw.phy.mdix = AUTO_ALL_MODES;
7554 adapter->hw.phy.disable_polarity_correction = 0;
7555 adapter->hw.phy.ms_type = e1000_ms_hw_default;
7558 if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
7559 dev_info(&pdev->dev,
7560 "PHY reset is blocked due to SOL/IDER session.\n");
7562 /* Set initial default active device features */
7563 netdev->features = (NETIF_F_SG |
7564 NETIF_F_HW_VLAN_CTAG_RX |
7565 NETIF_F_HW_VLAN_CTAG_TX |
7572 /* Set user-changeable features (subset of all device features) */
7573 netdev->hw_features = netdev->features;
7574 netdev->hw_features |= NETIF_F_RXFCS;
7575 netdev->priv_flags |= IFF_SUPP_NOFCS;
7576 netdev->hw_features |= NETIF_F_RXALL;
7578 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
7579 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7581 netdev->vlan_features |= (NETIF_F_SG |
7586 netdev->priv_flags |= IFF_UNICAST_FLT;
7588 if (pci_using_dac) {
7589 netdev->features |= NETIF_F_HIGHDMA;
7590 netdev->vlan_features |= NETIF_F_HIGHDMA;
7593 /* MTU range: 68 - max_hw_frame_size */
7594 netdev->min_mtu = ETH_MIN_MTU;
7595 netdev->max_mtu = adapter->max_hw_frame_size -
7596 (VLAN_ETH_HLEN + ETH_FCS_LEN);
7598 if (e1000e_enable_mng_pass_thru(&adapter->hw))
7599 adapter->flags |= FLAG_MNG_PT_ENABLED;
7601 /* before reading the NVM, reset the controller to
7602 * put the device in a known good starting state
7604 adapter->hw.mac.ops.reset_hw(&adapter->hw);
7606 /* systems with ASPM and others may see the checksum fail on the first
7607 * attempt. Let's give it a few tries
7610 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
7613 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
7619 e1000_eeprom_checks(adapter);
7621 /* copy the MAC address */
7622 if (e1000e_read_mac_addr(&adapter->hw))
7624 "NVM Read Error while reading MAC address\n");
7626 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
7628 if (!is_valid_ether_addr(netdev->dev_addr)) {
7629 dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
7635 timer_setup(&adapter->watchdog_timer, e1000_watchdog, 0);
7636 timer_setup(&adapter->phy_info_timer, e1000_update_phy_info, 0);
7638 INIT_WORK(&adapter->reset_task, e1000_reset_task);
7639 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
7640 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
7641 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
7642 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
7644 /* Initialize link parameters. User can change them with ethtool */
7645 adapter->hw.mac.autoneg = 1;
7646 adapter->fc_autoneg = true;
7647 adapter->hw.fc.requested_mode = e1000_fc_default;
7648 adapter->hw.fc.current_mode = e1000_fc_default;
7649 adapter->hw.phy.autoneg_advertised = 0x2f;
7651 /* Initial Wake on LAN setting - If APM wake is enabled in
7652 * the EEPROM, enable the ACPI Magic Packet filter
7654 if (adapter->flags & FLAG_APME_IN_WUC) {
7655 /* APME bit in EEPROM is mapped to WUC.APME */
7656 eeprom_data = er32(WUC);
7657 eeprom_apme_mask = E1000_WUC_APME;
7658 if ((hw->mac.type > e1000_ich10lan) &&
7659 (eeprom_data & E1000_WUC_PHY_WAKE))
7660 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
7661 } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
7662 if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
7663 (adapter->hw.bus.func == 1))
7664 ret_val = e1000_read_nvm(&adapter->hw,
7665 NVM_INIT_CONTROL3_PORT_B,
7668 ret_val = e1000_read_nvm(&adapter->hw,
7669 NVM_INIT_CONTROL3_PORT_A,
7673 /* fetch WoL from EEPROM */
7675 e_dbg("NVM read error getting WoL initial values: %d\n", ret_val);
7676 else if (eeprom_data & eeprom_apme_mask)
7677 adapter->eeprom_wol |= E1000_WUFC_MAG;
7679 /* now that we have the eeprom settings, apply the special cases
7680 * where the eeprom may be wrong or the board simply won't support
7681 * wake on lan on a particular port
7683 if (!(adapter->flags & FLAG_HAS_WOL))
7684 adapter->eeprom_wol = 0;
7686 /* initialize the wol settings based on the eeprom settings */
7687 adapter->wol = adapter->eeprom_wol;
7689 /* make sure adapter isn't asleep if manageability is enabled */
7690 if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) ||
7691 (hw->mac.ops.check_mng_mode(hw)))
7692 device_wakeup_enable(&pdev->dev);
7694 /* save off EEPROM version number */
7695 ret_val = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
7698 e_dbg("NVM read error getting EEPROM version: %d\n", ret_val);
7699 adapter->eeprom_vers = 0;
7702 /* init PTP hardware clock */
7703 e1000e_ptp_init(adapter);
7705 /* reset the hardware with the new settings */
7706 e1000e_reset(adapter);
7708 /* If the controller has AMT, do not set DRV_LOAD until the interface
7709 * is up. For all other cases, let the f/w know that the h/w is now
7710 * under the control of the driver.
7712 if (!(adapter->flags & FLAG_HAS_AMT))
7713 e1000e_get_hw_control(adapter);
7715 if (hw->mac.type >= e1000_pch_cnp)
7716 adapter->flags2 |= FLAG2_ENABLE_S0IX_FLOWS;
7718 strlcpy(netdev->name, "eth%d", sizeof(netdev->name));
7719 err = register_netdev(netdev);
7723 /* carrier off reporting is important to ethtool even BEFORE open */
7724 netif_carrier_off(netdev);
7726 e1000_print_device_info(adapter);
7728 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_SMART_PREPARE);
7730 if (pci_dev_run_wake(pdev) && hw->mac.type != e1000_pch_cnp)
7731 pm_runtime_put_noidle(&pdev->dev);
7736 if (!(adapter->flags & FLAG_HAS_AMT))
7737 e1000e_release_hw_control(adapter);
7739 if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw))
7740 e1000_phy_hw_reset(&adapter->hw);
7742 kfree(adapter->tx_ring);
7743 kfree(adapter->rx_ring);
7745 if ((adapter->hw.flash_address) && (hw->mac.type < e1000_pch_spt))
7746 iounmap(adapter->hw.flash_address);
7747 e1000e_reset_interrupt_capability(adapter);
7749 iounmap(adapter->hw.hw_addr);
7751 free_netdev(netdev);
7753 pci_disable_pcie_error_reporting(pdev);
7754 pci_release_mem_regions(pdev);
7757 pci_disable_device(pdev);
7762 * e1000_remove - Device Removal Routine
7763 * @pdev: PCI device information struct
7765 * e1000_remove is called by the PCI subsystem to alert the driver
7766 * that it should release a PCI device. This could be caused by a
7767 * Hot-Plug event, or because the driver is going to be removed from
7770 static void e1000_remove(struct pci_dev *pdev)
7772 struct net_device *netdev = pci_get_drvdata(pdev);
7773 struct e1000_adapter *adapter = netdev_priv(netdev);
7775 e1000e_ptp_remove(adapter);
7777 /* The timers may be rescheduled, so explicitly disable them
7778 * from being rescheduled.
7780 set_bit(__E1000_DOWN, &adapter->state);
7781 del_timer_sync(&adapter->watchdog_timer);
7782 del_timer_sync(&adapter->phy_info_timer);
7784 cancel_work_sync(&adapter->reset_task);
7785 cancel_work_sync(&adapter->watchdog_task);
7786 cancel_work_sync(&adapter->downshift_task);
7787 cancel_work_sync(&adapter->update_phy_task);
7788 cancel_work_sync(&adapter->print_hang_task);
7790 if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
7791 cancel_work_sync(&adapter->tx_hwtstamp_work);
7792 if (adapter->tx_hwtstamp_skb) {
7793 dev_consume_skb_any(adapter->tx_hwtstamp_skb);
7794 adapter->tx_hwtstamp_skb = NULL;
7798 unregister_netdev(netdev);
7800 if (pci_dev_run_wake(pdev))
7801 pm_runtime_get_noresume(&pdev->dev);
7803 /* Release control of h/w to f/w. If f/w is AMT enabled, this
7804 * would have already happened in close and is redundant.
7806 e1000e_release_hw_control(adapter);
7808 e1000e_reset_interrupt_capability(adapter);
7809 kfree(adapter->tx_ring);
7810 kfree(adapter->rx_ring);
7812 iounmap(adapter->hw.hw_addr);
7813 if ((adapter->hw.flash_address) &&
7814 (adapter->hw.mac.type < e1000_pch_spt))
7815 iounmap(adapter->hw.flash_address);
7816 pci_release_mem_regions(pdev);
7818 free_netdev(netdev);
7821 pci_disable_pcie_error_reporting(pdev);
7823 pci_disable_device(pdev);
7826 /* PCI Error Recovery (ERS) */
7827 static const struct pci_error_handlers e1000_err_handler = {
7828 .error_detected = e1000_io_error_detected,
7829 .slot_reset = e1000_io_slot_reset,
7830 .resume = e1000_io_resume,
7833 static const struct pci_device_id e1000_pci_tbl[] = {
7834 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
7835 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
7836 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
7837 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP),
7839 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
7840 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
7841 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
7842 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 },
7843 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 },
7845 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
7846 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
7847 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
7848 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
7850 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
7851 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
7852 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
7854 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
7855 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 },
7856 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 },
7858 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
7859 board_80003es2lan },
7860 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
7861 board_80003es2lan },
7862 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT),
7863 board_80003es2lan },
7864 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
7865 board_80003es2lan },
7867 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
7868 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
7869 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
7870 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan },
7871 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
7872 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
7873 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
7874 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
7876 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
7877 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
7878 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
7879 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
7880 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
7881 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan },
7882 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
7883 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
7884 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
7886 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
7887 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
7888 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
7890 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
7891 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
7892 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan },
7894 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
7895 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
7896 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
7897 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
7899 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
7900 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
7902 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt },
7903 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt },
7904 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt },
7905 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt },
7906 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM2), board_pch_lpt },
7907 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V2), board_pch_lpt },
7908 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM3), board_pch_lpt },
7909 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V3), board_pch_lpt },
7910 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM), board_pch_spt },
7911 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V), board_pch_spt },
7912 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM2), board_pch_spt },
7913 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V2), board_pch_spt },
7914 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LBG_I219_LM3), board_pch_spt },
7915 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM4), board_pch_spt },
7916 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V4), board_pch_spt },
7917 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM5), board_pch_spt },
7918 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V5), board_pch_spt },
7919 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_LM6), board_pch_cnp },
7920 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_V6), board_pch_cnp },
7921 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_LM7), board_pch_cnp },
7922 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_V7), board_pch_cnp },
7923 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_LM8), board_pch_cnp },
7924 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_V8), board_pch_cnp },
7925 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_LM9), board_pch_cnp },
7926 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_V9), board_pch_cnp },
7927 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM10), board_pch_cnp },
7928 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V10), board_pch_cnp },
7929 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM11), board_pch_cnp },
7930 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V11), board_pch_cnp },
7931 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM12), board_pch_spt },
7932 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V12), board_pch_spt },
7933 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM13), board_pch_tgp },
7934 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V13), board_pch_tgp },
7935 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_tgp },
7936 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_tgp },
7937 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_tgp },
7938 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_tgp },
7939 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_adp },
7940 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_adp },
7941 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_adp },
7942 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_adp },
7943 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_adp },
7944 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_adp },
7945 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_adp },
7946 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_adp },
7947 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_adp },
7948 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_adp },
7949 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_adp },
7950 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_adp },
7951 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_adp },
7952 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_adp },
7953 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_adp },
7954 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_adp },
7956 { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
7958 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
7960 static const struct dev_pm_ops e1000_pm_ops = {
7961 #ifdef CONFIG_PM_SLEEP
7962 .prepare = e1000e_pm_prepare,
7963 .suspend = e1000e_pm_suspend,
7964 .resume = e1000e_pm_resume,
7965 .freeze = e1000e_pm_freeze,
7966 .thaw = e1000e_pm_thaw,
7967 .poweroff = e1000e_pm_suspend,
7968 .restore = e1000e_pm_resume,
7970 SET_RUNTIME_PM_OPS(e1000e_pm_runtime_suspend, e1000e_pm_runtime_resume,
7971 e1000e_pm_runtime_idle)
7974 /* PCI Device API Driver */
7975 static struct pci_driver e1000_driver = {
7976 .name = e1000e_driver_name,
7977 .id_table = e1000_pci_tbl,
7978 .probe = e1000_probe,
7979 .remove = e1000_remove,
7981 .pm = &e1000_pm_ops,
7983 .shutdown = e1000_shutdown,
7984 .err_handler = &e1000_err_handler
7988 * e1000_init_module - Driver Registration Routine
7990 * e1000_init_module is the first routine called when the driver is
7991 * loaded. All it does is register with the PCI subsystem.
7993 static int __init e1000_init_module(void)
7995 pr_info("Intel(R) PRO/1000 Network Driver\n");
7996 pr_info("Copyright(c) 1999 - 2015 Intel Corporation.\n");
7998 return pci_register_driver(&e1000_driver);
8000 module_init(e1000_init_module);
8003 * e1000_exit_module - Driver Exit Cleanup Routine
8005 * e1000_exit_module is called just before the driver is removed
8008 static void __exit e1000_exit_module(void)
8010 pci_unregister_driver(&e1000_driver);
8012 module_exit(e1000_exit_module);
8014 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
8015 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
8016 MODULE_LICENSE("GPL v2");