1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018 Intel Corporation */
4 #include <linux/module.h>
5 #include <linux/types.h>
6 #include <linux/if_vlan.h>
10 #include <linux/pm_runtime.h>
11 #include <net/pkt_sched.h>
12 #include <linux/bpf_trace.h>
13 #include <net/xdp_sock_drv.h>
14 #include <linux/pci.h>
23 #define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver"
25 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
27 #define IGC_XDP_PASS 0
28 #define IGC_XDP_CONSUMED BIT(0)
29 #define IGC_XDP_TX BIT(1)
30 #define IGC_XDP_REDIRECT BIT(2)
32 static int debug = -1;
34 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
35 MODULE_DESCRIPTION(DRV_SUMMARY);
36 MODULE_LICENSE("GPL v2");
37 module_param(debug, int, 0);
38 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
40 char igc_driver_name[] = "igc";
41 static const char igc_driver_string[] = DRV_SUMMARY;
42 static const char igc_copyright[] =
43 "Copyright(c) 2018 Intel Corporation.";
45 static const struct igc_info *igc_info_tbl[] = {
46 [board_base] = &igc_base_info,
49 static const struct pci_device_id igc_pci_tbl[] = {
50 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base },
51 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base },
52 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base },
53 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base },
54 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base },
55 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base },
56 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base },
57 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base },
58 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LMVP), board_base },
59 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base },
60 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base },
61 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base },
62 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base },
63 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base },
64 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base },
65 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base },
66 /* required last entry */
70 MODULE_DEVICE_TABLE(pci, igc_pci_tbl);
79 void igc_reset(struct igc_adapter *adapter)
81 struct net_device *dev = adapter->netdev;
82 struct igc_hw *hw = &adapter->hw;
83 struct igc_fc_info *fc = &hw->fc;
86 /* Repartition PBA for greater than 9k MTU if required */
89 /* flow control settings
90 * The high water mark must be low enough to fit one full frame
91 * after transmitting the pause frame. As such we must have enough
92 * space to allow for us to complete our current transmit and then
93 * receive the frame that is in progress from the link partner.
95 * - the full Rx FIFO size minus one full Tx plus one full Rx frame
97 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
99 fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
100 fc->low_water = fc->high_water - 16;
101 fc->pause_time = 0xFFFF;
103 fc->current_mode = fc->requested_mode;
105 hw->mac.ops.reset_hw(hw);
107 if (hw->mac.ops.init_hw(hw))
108 netdev_err(dev, "Error on hardware initialization\n");
110 /* Re-establish EEE setting */
111 igc_set_eee_i225(hw, true, true, true);
113 if (!netif_running(adapter->netdev))
114 igc_power_down_phy_copper_base(&adapter->hw);
116 /* Enable HW to recognize an 802.1Q VLAN Ethernet packet */
117 wr32(IGC_VET, ETH_P_8021Q);
119 /* Re-enable PTP, where applicable. */
120 igc_ptp_reset(adapter);
122 /* Re-enable TSN offloading, where applicable. */
123 igc_tsn_reset(adapter);
125 igc_get_phy_info(hw);
129 * igc_power_up_link - Power up the phy link
130 * @adapter: address of board private structure
132 static void igc_power_up_link(struct igc_adapter *adapter)
134 igc_reset_phy(&adapter->hw);
136 igc_power_up_phy_copper(&adapter->hw);
138 igc_setup_link(&adapter->hw);
142 * igc_release_hw_control - release control of the h/w to f/w
143 * @adapter: address of board private structure
145 * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
146 * For ASF and Pass Through versions of f/w this means that the
147 * driver is no longer loaded.
149 static void igc_release_hw_control(struct igc_adapter *adapter)
151 struct igc_hw *hw = &adapter->hw;
154 if (!pci_device_is_present(adapter->pdev))
157 /* Let firmware take over control of h/w */
158 ctrl_ext = rd32(IGC_CTRL_EXT);
160 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
164 * igc_get_hw_control - get control of the h/w from f/w
165 * @adapter: address of board private structure
167 * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
168 * For ASF and Pass Through versions of f/w this means that
169 * the driver is loaded.
171 static void igc_get_hw_control(struct igc_adapter *adapter)
173 struct igc_hw *hw = &adapter->hw;
176 /* Let firmware know the driver has taken over */
177 ctrl_ext = rd32(IGC_CTRL_EXT);
179 ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
182 static void igc_unmap_tx_buffer(struct device *dev, struct igc_tx_buffer *buf)
184 dma_unmap_single(dev, dma_unmap_addr(buf, dma),
185 dma_unmap_len(buf, len), DMA_TO_DEVICE);
187 dma_unmap_len_set(buf, len, 0);
191 * igc_clean_tx_ring - Free Tx Buffers
192 * @tx_ring: ring to be cleaned
194 static void igc_clean_tx_ring(struct igc_ring *tx_ring)
196 u16 i = tx_ring->next_to_clean;
197 struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
200 while (i != tx_ring->next_to_use) {
201 union igc_adv_tx_desc *eop_desc, *tx_desc;
203 switch (tx_buffer->type) {
204 case IGC_TX_BUFFER_TYPE_XSK:
207 case IGC_TX_BUFFER_TYPE_XDP:
208 xdp_return_frame(tx_buffer->xdpf);
209 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
211 case IGC_TX_BUFFER_TYPE_SKB:
212 dev_kfree_skb_any(tx_buffer->skb);
213 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
216 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n");
220 /* check for eop_desc to determine the end of the packet */
221 eop_desc = tx_buffer->next_to_watch;
222 tx_desc = IGC_TX_DESC(tx_ring, i);
224 /* unmap remaining buffers */
225 while (tx_desc != eop_desc) {
229 if (unlikely(i == tx_ring->count)) {
231 tx_buffer = tx_ring->tx_buffer_info;
232 tx_desc = IGC_TX_DESC(tx_ring, 0);
235 /* unmap any remaining paged data */
236 if (dma_unmap_len(tx_buffer, len))
237 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
240 tx_buffer->next_to_watch = NULL;
242 /* move us one more past the eop_desc for start of next pkt */
245 if (unlikely(i == tx_ring->count)) {
247 tx_buffer = tx_ring->tx_buffer_info;
251 if (tx_ring->xsk_pool && xsk_frames)
252 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
254 /* reset BQL for queue */
255 netdev_tx_reset_queue(txring_txq(tx_ring));
257 /* Zero out the buffer ring */
258 memset(tx_ring->tx_buffer_info, 0,
259 sizeof(*tx_ring->tx_buffer_info) * tx_ring->count);
261 /* Zero out the descriptor ring */
262 memset(tx_ring->desc, 0, tx_ring->size);
264 /* reset next_to_use and next_to_clean */
265 tx_ring->next_to_use = 0;
266 tx_ring->next_to_clean = 0;
270 * igc_free_tx_resources - Free Tx Resources per Queue
271 * @tx_ring: Tx descriptor ring for a specific queue
273 * Free all transmit software resources
275 void igc_free_tx_resources(struct igc_ring *tx_ring)
277 igc_disable_tx_ring(tx_ring);
279 vfree(tx_ring->tx_buffer_info);
280 tx_ring->tx_buffer_info = NULL;
282 /* if not set, then don't free */
286 dma_free_coherent(tx_ring->dev, tx_ring->size,
287 tx_ring->desc, tx_ring->dma);
289 tx_ring->desc = NULL;
293 * igc_free_all_tx_resources - Free Tx Resources for All Queues
294 * @adapter: board private structure
296 * Free all transmit software resources
298 static void igc_free_all_tx_resources(struct igc_adapter *adapter)
302 for (i = 0; i < adapter->num_tx_queues; i++)
303 igc_free_tx_resources(adapter->tx_ring[i]);
307 * igc_clean_all_tx_rings - Free Tx Buffers for all queues
308 * @adapter: board private structure
310 static void igc_clean_all_tx_rings(struct igc_adapter *adapter)
314 for (i = 0; i < adapter->num_tx_queues; i++)
315 if (adapter->tx_ring[i])
316 igc_clean_tx_ring(adapter->tx_ring[i]);
320 * igc_setup_tx_resources - allocate Tx resources (Descriptors)
321 * @tx_ring: tx descriptor ring (for a specific queue) to setup
323 * Return 0 on success, negative on failure
325 int igc_setup_tx_resources(struct igc_ring *tx_ring)
327 struct net_device *ndev = tx_ring->netdev;
328 struct device *dev = tx_ring->dev;
331 size = sizeof(struct igc_tx_buffer) * tx_ring->count;
332 tx_ring->tx_buffer_info = vzalloc(size);
333 if (!tx_ring->tx_buffer_info)
336 /* round up to nearest 4K */
337 tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc);
338 tx_ring->size = ALIGN(tx_ring->size, 4096);
340 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
341 &tx_ring->dma, GFP_KERNEL);
346 tx_ring->next_to_use = 0;
347 tx_ring->next_to_clean = 0;
352 vfree(tx_ring->tx_buffer_info);
353 netdev_err(ndev, "Unable to allocate memory for Tx descriptor ring\n");
358 * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues
359 * @adapter: board private structure
361 * Return 0 on success, negative on failure
363 static int igc_setup_all_tx_resources(struct igc_adapter *adapter)
365 struct net_device *dev = adapter->netdev;
368 for (i = 0; i < adapter->num_tx_queues; i++) {
369 err = igc_setup_tx_resources(adapter->tx_ring[i]);
371 netdev_err(dev, "Error on Tx queue %u setup\n", i);
372 for (i--; i >= 0; i--)
373 igc_free_tx_resources(adapter->tx_ring[i]);
381 static void igc_clean_rx_ring_page_shared(struct igc_ring *rx_ring)
383 u16 i = rx_ring->next_to_clean;
385 dev_kfree_skb(rx_ring->skb);
388 /* Free all the Rx ring sk_buffs */
389 while (i != rx_ring->next_to_alloc) {
390 struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
392 /* Invalidate cache lines that may have been written to by
393 * device so that we avoid corrupting memory.
395 dma_sync_single_range_for_cpu(rx_ring->dev,
397 buffer_info->page_offset,
398 igc_rx_bufsz(rx_ring),
401 /* free resources associated with mapping */
402 dma_unmap_page_attrs(rx_ring->dev,
404 igc_rx_pg_size(rx_ring),
407 __page_frag_cache_drain(buffer_info->page,
408 buffer_info->pagecnt_bias);
411 if (i == rx_ring->count)
416 static void igc_clean_rx_ring_xsk_pool(struct igc_ring *ring)
418 struct igc_rx_buffer *bi;
421 for (i = 0; i < ring->count; i++) {
422 bi = &ring->rx_buffer_info[i];
426 xsk_buff_free(bi->xdp);
432 * igc_clean_rx_ring - Free Rx Buffers per Queue
433 * @ring: ring to free buffers from
435 static void igc_clean_rx_ring(struct igc_ring *ring)
438 igc_clean_rx_ring_xsk_pool(ring);
440 igc_clean_rx_ring_page_shared(ring);
442 clear_ring_uses_large_buffer(ring);
444 ring->next_to_alloc = 0;
445 ring->next_to_clean = 0;
446 ring->next_to_use = 0;
450 * igc_clean_all_rx_rings - Free Rx Buffers for all queues
451 * @adapter: board private structure
453 static void igc_clean_all_rx_rings(struct igc_adapter *adapter)
457 for (i = 0; i < adapter->num_rx_queues; i++)
458 if (adapter->rx_ring[i])
459 igc_clean_rx_ring(adapter->rx_ring[i]);
463 * igc_free_rx_resources - Free Rx Resources
464 * @rx_ring: ring to clean the resources from
466 * Free all receive software resources
468 void igc_free_rx_resources(struct igc_ring *rx_ring)
470 igc_clean_rx_ring(rx_ring);
472 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
474 vfree(rx_ring->rx_buffer_info);
475 rx_ring->rx_buffer_info = NULL;
477 /* if not set, then don't free */
481 dma_free_coherent(rx_ring->dev, rx_ring->size,
482 rx_ring->desc, rx_ring->dma);
484 rx_ring->desc = NULL;
488 * igc_free_all_rx_resources - Free Rx Resources for All Queues
489 * @adapter: board private structure
491 * Free all receive software resources
493 static void igc_free_all_rx_resources(struct igc_adapter *adapter)
497 for (i = 0; i < adapter->num_rx_queues; i++)
498 igc_free_rx_resources(adapter->rx_ring[i]);
502 * igc_setup_rx_resources - allocate Rx resources (Descriptors)
503 * @rx_ring: rx descriptor ring (for a specific queue) to setup
505 * Returns 0 on success, negative on failure
507 int igc_setup_rx_resources(struct igc_ring *rx_ring)
509 struct net_device *ndev = rx_ring->netdev;
510 struct device *dev = rx_ring->dev;
511 u8 index = rx_ring->queue_index;
512 int size, desc_len, res;
514 /* XDP RX-queue info */
515 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
516 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
517 res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index,
518 rx_ring->q_vector->napi.napi_id);
520 netdev_err(ndev, "Failed to register xdp_rxq index %u\n",
525 size = sizeof(struct igc_rx_buffer) * rx_ring->count;
526 rx_ring->rx_buffer_info = vzalloc(size);
527 if (!rx_ring->rx_buffer_info)
530 desc_len = sizeof(union igc_adv_rx_desc);
532 /* Round up to nearest 4K */
533 rx_ring->size = rx_ring->count * desc_len;
534 rx_ring->size = ALIGN(rx_ring->size, 4096);
536 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
537 &rx_ring->dma, GFP_KERNEL);
542 rx_ring->next_to_alloc = 0;
543 rx_ring->next_to_clean = 0;
544 rx_ring->next_to_use = 0;
549 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
550 vfree(rx_ring->rx_buffer_info);
551 rx_ring->rx_buffer_info = NULL;
552 netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n");
557 * igc_setup_all_rx_resources - wrapper to allocate Rx resources
558 * (Descriptors) for all queues
559 * @adapter: board private structure
561 * Return 0 on success, negative on failure
563 static int igc_setup_all_rx_resources(struct igc_adapter *adapter)
565 struct net_device *dev = adapter->netdev;
568 for (i = 0; i < adapter->num_rx_queues; i++) {
569 err = igc_setup_rx_resources(adapter->rx_ring[i]);
571 netdev_err(dev, "Error on Rx queue %u setup\n", i);
572 for (i--; i >= 0; i--)
573 igc_free_rx_resources(adapter->rx_ring[i]);
581 static struct xsk_buff_pool *igc_get_xsk_pool(struct igc_adapter *adapter,
582 struct igc_ring *ring)
584 if (!igc_xdp_is_enabled(adapter) ||
585 !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags))
588 return xsk_get_pool_from_qid(ring->netdev, ring->queue_index);
592 * igc_configure_rx_ring - Configure a receive ring after Reset
593 * @adapter: board private structure
594 * @ring: receive ring to be configured
596 * Configure the Rx unit of the MAC after a reset.
598 static void igc_configure_rx_ring(struct igc_adapter *adapter,
599 struct igc_ring *ring)
601 struct igc_hw *hw = &adapter->hw;
602 union igc_adv_rx_desc *rx_desc;
603 int reg_idx = ring->reg_idx;
604 u32 srrctl = 0, rxdctl = 0;
605 u64 rdba = ring->dma;
608 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
609 ring->xsk_pool = igc_get_xsk_pool(adapter, ring);
610 if (ring->xsk_pool) {
611 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
612 MEM_TYPE_XSK_BUFF_POOL,
614 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
616 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
617 MEM_TYPE_PAGE_SHARED,
621 if (igc_xdp_is_enabled(adapter))
622 set_ring_uses_large_buffer(ring);
624 /* disable the queue */
625 wr32(IGC_RXDCTL(reg_idx), 0);
627 /* Set DMA base address registers */
628 wr32(IGC_RDBAL(reg_idx),
629 rdba & 0x00000000ffffffffULL);
630 wr32(IGC_RDBAH(reg_idx), rdba >> 32);
631 wr32(IGC_RDLEN(reg_idx),
632 ring->count * sizeof(union igc_adv_rx_desc));
634 /* initialize head and tail */
635 ring->tail = adapter->io_addr + IGC_RDT(reg_idx);
636 wr32(IGC_RDH(reg_idx), 0);
637 writel(0, ring->tail);
639 /* reset next-to- use/clean to place SW in sync with hardware */
640 ring->next_to_clean = 0;
641 ring->next_to_use = 0;
644 buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool);
645 else if (ring_uses_large_buffer(ring))
646 buf_size = IGC_RXBUFFER_3072;
648 buf_size = IGC_RXBUFFER_2048;
650 srrctl = rd32(IGC_SRRCTL(reg_idx));
651 srrctl &= ~(IGC_SRRCTL_BSIZEPKT_MASK | IGC_SRRCTL_BSIZEHDR_MASK |
652 IGC_SRRCTL_DESCTYPE_MASK);
653 srrctl |= IGC_SRRCTL_BSIZEHDR(IGC_RX_HDR_LEN);
654 srrctl |= IGC_SRRCTL_BSIZEPKT(buf_size);
655 srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
657 wr32(IGC_SRRCTL(reg_idx), srrctl);
659 rxdctl |= IGC_RX_PTHRESH;
660 rxdctl |= IGC_RX_HTHRESH << 8;
661 rxdctl |= IGC_RX_WTHRESH << 16;
663 /* initialize rx_buffer_info */
664 memset(ring->rx_buffer_info, 0,
665 sizeof(struct igc_rx_buffer) * ring->count);
667 /* initialize Rx descriptor 0 */
668 rx_desc = IGC_RX_DESC(ring, 0);
669 rx_desc->wb.upper.length = 0;
671 /* enable receive descriptor fetching */
672 rxdctl |= IGC_RXDCTL_QUEUE_ENABLE;
674 wr32(IGC_RXDCTL(reg_idx), rxdctl);
678 * igc_configure_rx - Configure receive Unit after Reset
679 * @adapter: board private structure
681 * Configure the Rx unit of the MAC after a reset.
683 static void igc_configure_rx(struct igc_adapter *adapter)
687 /* Setup the HW Rx Head and Tail Descriptor Pointers and
688 * the Base and Length of the Rx Descriptor Ring
690 for (i = 0; i < adapter->num_rx_queues; i++)
691 igc_configure_rx_ring(adapter, adapter->rx_ring[i]);
695 * igc_configure_tx_ring - Configure transmit ring after Reset
696 * @adapter: board private structure
697 * @ring: tx ring to configure
699 * Configure a transmit ring after a reset.
701 static void igc_configure_tx_ring(struct igc_adapter *adapter,
702 struct igc_ring *ring)
704 struct igc_hw *hw = &adapter->hw;
705 int reg_idx = ring->reg_idx;
706 u64 tdba = ring->dma;
709 ring->xsk_pool = igc_get_xsk_pool(adapter, ring);
711 /* disable the queue */
712 wr32(IGC_TXDCTL(reg_idx), 0);
715 wr32(IGC_TDLEN(reg_idx),
716 ring->count * sizeof(union igc_adv_tx_desc));
717 wr32(IGC_TDBAL(reg_idx),
718 tdba & 0x00000000ffffffffULL);
719 wr32(IGC_TDBAH(reg_idx), tdba >> 32);
721 ring->tail = adapter->io_addr + IGC_TDT(reg_idx);
722 wr32(IGC_TDH(reg_idx), 0);
723 writel(0, ring->tail);
725 txdctl |= IGC_TX_PTHRESH;
726 txdctl |= IGC_TX_HTHRESH << 8;
727 txdctl |= IGC_TX_WTHRESH << 16;
729 txdctl |= IGC_TXDCTL_QUEUE_ENABLE;
730 wr32(IGC_TXDCTL(reg_idx), txdctl);
734 * igc_configure_tx - Configure transmit Unit after Reset
735 * @adapter: board private structure
737 * Configure the Tx unit of the MAC after a reset.
739 static void igc_configure_tx(struct igc_adapter *adapter)
743 for (i = 0; i < adapter->num_tx_queues; i++)
744 igc_configure_tx_ring(adapter, adapter->tx_ring[i]);
748 * igc_setup_mrqc - configure the multiple receive queue control registers
749 * @adapter: Board private structure
751 static void igc_setup_mrqc(struct igc_adapter *adapter)
753 struct igc_hw *hw = &adapter->hw;
754 u32 j, num_rx_queues;
758 netdev_rss_key_fill(rss_key, sizeof(rss_key));
759 for (j = 0; j < 10; j++)
760 wr32(IGC_RSSRK(j), rss_key[j]);
762 num_rx_queues = adapter->rss_queues;
764 if (adapter->rss_indir_tbl_init != num_rx_queues) {
765 for (j = 0; j < IGC_RETA_SIZE; j++)
766 adapter->rss_indir_tbl[j] =
767 (j * num_rx_queues) / IGC_RETA_SIZE;
768 adapter->rss_indir_tbl_init = num_rx_queues;
770 igc_write_rss_indir_tbl(adapter);
772 /* Disable raw packet checksumming so that RSS hash is placed in
773 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
774 * offloads as they are enabled by default
776 rxcsum = rd32(IGC_RXCSUM);
777 rxcsum |= IGC_RXCSUM_PCSD;
779 /* Enable Receive Checksum Offload for SCTP */
780 rxcsum |= IGC_RXCSUM_CRCOFL;
782 /* Don't need to set TUOFL or IPOFL, they default to 1 */
783 wr32(IGC_RXCSUM, rxcsum);
785 /* Generate RSS hash based on packet types, TCP/UDP
786 * port numbers and/or IPv4/v6 src and dst addresses
788 mrqc = IGC_MRQC_RSS_FIELD_IPV4 |
789 IGC_MRQC_RSS_FIELD_IPV4_TCP |
790 IGC_MRQC_RSS_FIELD_IPV6 |
791 IGC_MRQC_RSS_FIELD_IPV6_TCP |
792 IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
794 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP)
795 mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
796 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP)
797 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
799 mrqc |= IGC_MRQC_ENABLE_RSS_MQ;
801 wr32(IGC_MRQC, mrqc);
805 * igc_setup_rctl - configure the receive control registers
806 * @adapter: Board private structure
808 static void igc_setup_rctl(struct igc_adapter *adapter)
810 struct igc_hw *hw = &adapter->hw;
813 rctl = rd32(IGC_RCTL);
815 rctl &= ~(3 << IGC_RCTL_MO_SHIFT);
816 rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC);
818 rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF |
819 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT);
821 /* enable stripping of CRC. Newer features require
822 * that the HW strips the CRC.
824 rctl |= IGC_RCTL_SECRC;
826 /* disable store bad packets and clear size bits. */
827 rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256);
829 /* enable LPE to allow for reception of jumbo frames */
830 rctl |= IGC_RCTL_LPE;
832 /* disable queue 0 to prevent tail write w/o re-config */
833 wr32(IGC_RXDCTL(0), 0);
835 /* This is useful for sniffing bad packets. */
836 if (adapter->netdev->features & NETIF_F_RXALL) {
837 /* UPE and MPE will be handled by normal PROMISC logic
840 rctl |= (IGC_RCTL_SBP | /* Receive bad packets */
841 IGC_RCTL_BAM | /* RX All Bcast Pkts */
842 IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
844 rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */
845 IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */
848 wr32(IGC_RCTL, rctl);
852 * igc_setup_tctl - configure the transmit control registers
853 * @adapter: Board private structure
855 static void igc_setup_tctl(struct igc_adapter *adapter)
857 struct igc_hw *hw = &adapter->hw;
860 /* disable queue 0 which icould be enabled by default */
861 wr32(IGC_TXDCTL(0), 0);
863 /* Program the Transmit Control Register */
864 tctl = rd32(IGC_TCTL);
865 tctl &= ~IGC_TCTL_CT;
866 tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC |
867 (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT);
869 /* Enable transmits */
872 wr32(IGC_TCTL, tctl);
876 * igc_set_mac_filter_hw() - Set MAC address filter in hardware
877 * @adapter: Pointer to adapter where the filter should be set
878 * @index: Filter index
879 * @type: MAC address filter type (source or destination)
881 * @queue: If non-negative, queue assignment feature is enabled and frames
882 * matching the filter are enqueued onto 'queue'. Otherwise, queue
883 * assignment is disabled.
885 static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index,
886 enum igc_mac_filter_type type,
887 const u8 *addr, int queue)
889 struct net_device *dev = adapter->netdev;
890 struct igc_hw *hw = &adapter->hw;
893 if (WARN_ON(index >= hw->mac.rar_entry_count))
896 ral = le32_to_cpup((__le32 *)(addr));
897 rah = le16_to_cpup((__le16 *)(addr + 4));
899 if (type == IGC_MAC_FILTER_TYPE_SRC) {
900 rah &= ~IGC_RAH_ASEL_MASK;
901 rah |= IGC_RAH_ASEL_SRC_ADDR;
905 rah &= ~IGC_RAH_QSEL_MASK;
906 rah |= (queue << IGC_RAH_QSEL_SHIFT);
907 rah |= IGC_RAH_QSEL_ENABLE;
912 wr32(IGC_RAL(index), ral);
913 wr32(IGC_RAH(index), rah);
915 netdev_dbg(dev, "MAC address filter set in HW: index %d", index);
919 * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware
920 * @adapter: Pointer to adapter where the filter should be cleared
921 * @index: Filter index
923 static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index)
925 struct net_device *dev = adapter->netdev;
926 struct igc_hw *hw = &adapter->hw;
928 if (WARN_ON(index >= hw->mac.rar_entry_count))
931 wr32(IGC_RAL(index), 0);
932 wr32(IGC_RAH(index), 0);
934 netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index);
937 /* Set default MAC address for the PF in the first RAR entry */
938 static void igc_set_default_mac_filter(struct igc_adapter *adapter)
940 struct net_device *dev = adapter->netdev;
941 u8 *addr = adapter->hw.mac.addr;
943 netdev_dbg(dev, "Set default MAC address filter: address %pM", addr);
945 igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1);
949 * igc_set_mac - Change the Ethernet Address of the NIC
950 * @netdev: network interface device structure
951 * @p: pointer to an address structure
953 * Returns 0 on success, negative on failure
955 static int igc_set_mac(struct net_device *netdev, void *p)
957 struct igc_adapter *adapter = netdev_priv(netdev);
958 struct igc_hw *hw = &adapter->hw;
959 struct sockaddr *addr = p;
961 if (!is_valid_ether_addr(addr->sa_data))
962 return -EADDRNOTAVAIL;
964 eth_hw_addr_set(netdev, addr->sa_data);
965 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
967 /* set the correct pool for the new PF MAC address in entry 0 */
968 igc_set_default_mac_filter(adapter);
974 * igc_write_mc_addr_list - write multicast addresses to MTA
975 * @netdev: network interface device structure
977 * Writes multicast address list to the MTA hash table.
978 * Returns: -ENOMEM on failure
979 * 0 on no addresses written
980 * X on writing X addresses to MTA
982 static int igc_write_mc_addr_list(struct net_device *netdev)
984 struct igc_adapter *adapter = netdev_priv(netdev);
985 struct igc_hw *hw = &adapter->hw;
986 struct netdev_hw_addr *ha;
990 if (netdev_mc_empty(netdev)) {
991 /* nothing to program, so clear mc list */
992 igc_update_mc_addr_list(hw, NULL, 0);
996 mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC);
1000 /* The shared function expects a packed array of only addresses. */
1002 netdev_for_each_mc_addr(ha, netdev)
1003 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
1005 igc_update_mc_addr_list(hw, mta_list, i);
1008 return netdev_mc_count(netdev);
1011 static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime,
1012 bool *first_flag, bool *insert_empty)
1014 struct igc_adapter *adapter = netdev_priv(ring->netdev);
1015 ktime_t cycle_time = adapter->cycle_time;
1016 ktime_t base_time = adapter->base_time;
1017 ktime_t now = ktime_get_clocktai();
1018 ktime_t baset_est, end_of_cycle;
1022 n = div64_s64(ktime_sub_ns(now, base_time), cycle_time);
1024 baset_est = ktime_add_ns(base_time, cycle_time * (n));
1025 end_of_cycle = ktime_add_ns(baset_est, cycle_time);
1027 if (ktime_compare(txtime, end_of_cycle) >= 0) {
1028 if (baset_est != ring->last_ff_cycle) {
1030 ring->last_ff_cycle = baset_est;
1032 if (ktime_compare(end_of_cycle, ring->last_tx_cycle) > 0)
1033 *insert_empty = true;
1037 /* Introducing a window at end of cycle on which packets
1038 * potentially not honor launchtime. Window of 5us chosen
1039 * considering software update the tail pointer and packets
1040 * are dma'ed to packet buffer.
1042 if ((ktime_sub_ns(end_of_cycle, now) < 5 * NSEC_PER_USEC))
1043 netdev_warn(ring->netdev, "Packet with txtime=%llu may not be honoured\n",
1046 ring->last_tx_cycle = end_of_cycle;
1048 launchtime = ktime_sub_ns(txtime, baset_est);
1050 div_s64_rem(launchtime, cycle_time, &launchtime);
1054 return cpu_to_le32(launchtime);
1057 static int igc_init_empty_frame(struct igc_ring *ring,
1058 struct igc_tx_buffer *buffer,
1059 struct sk_buff *skb)
1064 size = skb_headlen(skb);
1066 dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE);
1067 if (dma_mapping_error(ring->dev, dma)) {
1068 netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
1073 buffer->protocol = 0;
1074 buffer->bytecount = skb->len;
1075 buffer->gso_segs = 1;
1076 buffer->time_stamp = jiffies;
1077 dma_unmap_len_set(buffer, len, skb->len);
1078 dma_unmap_addr_set(buffer, dma, dma);
1083 static int igc_init_tx_empty_descriptor(struct igc_ring *ring,
1084 struct sk_buff *skb,
1085 struct igc_tx_buffer *first)
1087 union igc_adv_tx_desc *desc;
1088 u32 cmd_type, olinfo_status;
1091 if (!igc_desc_unused(ring))
1094 err = igc_init_empty_frame(ring, first, skb);
1098 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
1099 IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
1101 olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
1103 desc = IGC_TX_DESC(ring, ring->next_to_use);
1104 desc->read.cmd_type_len = cpu_to_le32(cmd_type);
1105 desc->read.olinfo_status = cpu_to_le32(olinfo_status);
1106 desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma));
1108 netdev_tx_sent_queue(txring_txq(ring), skb->len);
1110 first->next_to_watch = desc;
1112 ring->next_to_use++;
1113 if (ring->next_to_use == ring->count)
1114 ring->next_to_use = 0;
1119 #define IGC_EMPTY_FRAME_SIZE 60
1121 static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
1122 __le32 launch_time, bool first_flag,
1123 u32 vlan_macip_lens, u32 type_tucmd,
1126 struct igc_adv_tx_context_desc *context_desc;
1127 u16 i = tx_ring->next_to_use;
1129 context_desc = IGC_TX_CTXTDESC(tx_ring, i);
1132 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1134 /* set bits to identify this as an advanced context descriptor */
1135 type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT;
1137 /* For i225, context index must be unique per ring. */
1138 if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
1139 mss_l4len_idx |= tx_ring->reg_idx << 4;
1142 mss_l4len_idx |= IGC_ADVTXD_TSN_CNTX_FIRST;
1144 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
1145 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
1146 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
1147 context_desc->launch_time = launch_time;
1150 static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first,
1151 __le32 launch_time, bool first_flag)
1153 struct sk_buff *skb = first->skb;
1154 u32 vlan_macip_lens = 0;
1157 if (skb->ip_summed != CHECKSUM_PARTIAL) {
1159 if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) &&
1160 !tx_ring->launchtime_enable)
1165 switch (skb->csum_offset) {
1166 case offsetof(struct tcphdr, check):
1167 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
1169 case offsetof(struct udphdr, check):
1171 case offsetof(struct sctphdr, checksum):
1172 /* validate that this is actually an SCTP request */
1173 if (skb_csum_is_sctp(skb)) {
1174 type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP;
1179 skb_checksum_help(skb);
1183 /* update TX checksum flag */
1184 first->tx_flags |= IGC_TX_FLAGS_CSUM;
1185 vlan_macip_lens = skb_checksum_start_offset(skb) -
1186 skb_network_offset(skb);
1188 vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT;
1189 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
1191 igc_tx_ctxtdesc(tx_ring, launch_time, first_flag,
1192 vlan_macip_lens, type_tucmd, 0);
1195 static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
1197 struct net_device *netdev = tx_ring->netdev;
1199 netif_stop_subqueue(netdev, tx_ring->queue_index);
1201 /* memory barriier comment */
1204 /* We need to check again in a case another CPU has just
1205 * made room available.
1207 if (igc_desc_unused(tx_ring) < size)
1211 netif_wake_subqueue(netdev, tx_ring->queue_index);
1213 u64_stats_update_begin(&tx_ring->tx_syncp2);
1214 tx_ring->tx_stats.restart_queue2++;
1215 u64_stats_update_end(&tx_ring->tx_syncp2);
1220 static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
1222 if (igc_desc_unused(tx_ring) >= size)
1224 return __igc_maybe_stop_tx(tx_ring, size);
1227 #define IGC_SET_FLAG(_input, _flag, _result) \
1228 (((_flag) <= (_result)) ? \
1229 ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \
1230 ((u32)((_input) & (_flag)) / ((_flag) / (_result))))
1232 static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
1234 /* set type for advanced descriptor with frame checksum insertion */
1235 u32 cmd_type = IGC_ADVTXD_DTYP_DATA |
1236 IGC_ADVTXD_DCMD_DEXT |
1237 IGC_ADVTXD_DCMD_IFCS;
1239 /* set HW vlan bit if vlan is present */
1240 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_VLAN,
1241 IGC_ADVTXD_DCMD_VLE);
1243 /* set segmentation bits for TSO */
1244 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO,
1245 (IGC_ADVTXD_DCMD_TSE));
1247 /* set timestamp bit if present */
1248 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP,
1249 (IGC_ADVTXD_MAC_TSTAMP));
1251 /* insert frame checksum */
1252 cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS);
1257 static void igc_tx_olinfo_status(struct igc_ring *tx_ring,
1258 union igc_adv_tx_desc *tx_desc,
1259 u32 tx_flags, unsigned int paylen)
1261 u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT;
1263 /* insert L4 checksum */
1264 olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) *
1265 ((IGC_TXD_POPTS_TXSM << 8) /
1268 /* insert IPv4 checksum */
1269 olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) *
1270 (((IGC_TXD_POPTS_IXSM << 8)) /
1273 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
1276 static int igc_tx_map(struct igc_ring *tx_ring,
1277 struct igc_tx_buffer *first,
1280 struct sk_buff *skb = first->skb;
1281 struct igc_tx_buffer *tx_buffer;
1282 union igc_adv_tx_desc *tx_desc;
1283 u32 tx_flags = first->tx_flags;
1285 u16 i = tx_ring->next_to_use;
1286 unsigned int data_len, size;
1290 cmd_type = igc_tx_cmd_type(skb, tx_flags);
1291 tx_desc = IGC_TX_DESC(tx_ring, i);
1293 igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
1295 size = skb_headlen(skb);
1296 data_len = skb->data_len;
1298 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1302 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1303 if (dma_mapping_error(tx_ring->dev, dma))
1306 /* record length, and DMA address */
1307 dma_unmap_len_set(tx_buffer, len, size);
1308 dma_unmap_addr_set(tx_buffer, dma, dma);
1310 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1312 while (unlikely(size > IGC_MAX_DATA_PER_TXD)) {
1313 tx_desc->read.cmd_type_len =
1314 cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD);
1318 if (i == tx_ring->count) {
1319 tx_desc = IGC_TX_DESC(tx_ring, 0);
1322 tx_desc->read.olinfo_status = 0;
1324 dma += IGC_MAX_DATA_PER_TXD;
1325 size -= IGC_MAX_DATA_PER_TXD;
1327 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1330 if (likely(!data_len))
1333 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
1337 if (i == tx_ring->count) {
1338 tx_desc = IGC_TX_DESC(tx_ring, 0);
1341 tx_desc->read.olinfo_status = 0;
1343 size = skb_frag_size(frag);
1346 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
1347 size, DMA_TO_DEVICE);
1349 tx_buffer = &tx_ring->tx_buffer_info[i];
1352 /* write last descriptor with RS and EOP bits */
1353 cmd_type |= size | IGC_TXD_DCMD;
1354 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
1356 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1358 /* set the timestamp */
1359 first->time_stamp = jiffies;
1361 skb_tx_timestamp(skb);
1363 /* Force memory writes to complete before letting h/w know there
1364 * are new descriptors to fetch. (Only applicable for weak-ordered
1365 * memory model archs, such as IA-64).
1367 * We also need this memory barrier to make certain all of the
1368 * status bits have been updated before next_to_watch is written.
1372 /* set next_to_watch value indicating a packet is present */
1373 first->next_to_watch = tx_desc;
1376 if (i == tx_ring->count)
1379 tx_ring->next_to_use = i;
1381 /* Make sure there is space in the ring for the next send. */
1382 igc_maybe_stop_tx(tx_ring, DESC_NEEDED);
1384 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
1385 writel(i, tx_ring->tail);
1390 netdev_err(tx_ring->netdev, "TX DMA map failed\n");
1391 tx_buffer = &tx_ring->tx_buffer_info[i];
1393 /* clear dma mappings for failed tx_buffer_info map */
1394 while (tx_buffer != first) {
1395 if (dma_unmap_len(tx_buffer, len))
1396 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
1399 i += tx_ring->count;
1400 tx_buffer = &tx_ring->tx_buffer_info[i];
1403 if (dma_unmap_len(tx_buffer, len))
1404 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
1406 dev_kfree_skb_any(tx_buffer->skb);
1407 tx_buffer->skb = NULL;
1409 tx_ring->next_to_use = i;
1414 static int igc_tso(struct igc_ring *tx_ring,
1415 struct igc_tx_buffer *first,
1416 __le32 launch_time, bool first_flag,
1419 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
1420 struct sk_buff *skb = first->skb;
1431 u32 paylen, l4_offset;
1434 if (skb->ip_summed != CHECKSUM_PARTIAL)
1437 if (!skb_is_gso(skb))
1440 err = skb_cow_head(skb, 0);
1444 ip.hdr = skb_network_header(skb);
1445 l4.hdr = skb_checksum_start(skb);
1447 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
1448 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
1450 /* initialize outer IP header fields */
1451 if (ip.v4->version == 4) {
1452 unsigned char *csum_start = skb_checksum_start(skb);
1453 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
1455 /* IP header will have to cancel out any data that
1456 * is not a part of the outer IP header
1458 ip.v4->check = csum_fold(csum_partial(trans_start,
1459 csum_start - trans_start,
1461 type_tucmd |= IGC_ADVTXD_TUCMD_IPV4;
1464 first->tx_flags |= IGC_TX_FLAGS_TSO |
1468 ip.v6->payload_len = 0;
1469 first->tx_flags |= IGC_TX_FLAGS_TSO |
1473 /* determine offset of inner transport header */
1474 l4_offset = l4.hdr - skb->data;
1476 /* remove payload length from inner checksum */
1477 paylen = skb->len - l4_offset;
1478 if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) {
1479 /* compute length of segmentation header */
1480 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
1481 csum_replace_by_diff(&l4.tcp->check,
1482 (__force __wsum)htonl(paylen));
1484 /* compute length of segmentation header */
1485 *hdr_len = sizeof(*l4.udp) + l4_offset;
1486 csum_replace_by_diff(&l4.udp->check,
1487 (__force __wsum)htonl(paylen));
1490 /* update gso size and bytecount with header size */
1491 first->gso_segs = skb_shinfo(skb)->gso_segs;
1492 first->bytecount += (first->gso_segs - 1) * *hdr_len;
1495 mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT;
1496 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT;
1498 /* VLAN MACLEN IPLEN */
1499 vlan_macip_lens = l4.hdr - ip.hdr;
1500 vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT;
1501 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
1503 igc_tx_ctxtdesc(tx_ring, launch_time, first_flag,
1504 vlan_macip_lens, type_tucmd, mss_l4len_idx);
1509 static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
1510 struct igc_ring *tx_ring)
1512 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
1513 bool first_flag = false, insert_empty = false;
1514 u16 count = TXD_USE_COUNT(skb_headlen(skb));
1515 __be16 protocol = vlan_get_protocol(skb);
1516 struct igc_tx_buffer *first;
1517 __le32 launch_time = 0;
1524 /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD,
1525 * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD,
1526 * + 2 desc gap to keep tail from touching head,
1527 * + 1 desc for context descriptor,
1528 * otherwise try next time
1530 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1531 count += TXD_USE_COUNT(skb_frag_size(
1532 &skb_shinfo(skb)->frags[f]));
1534 if (igc_maybe_stop_tx(tx_ring, count + 5)) {
1535 /* this is a hard error */
1536 return NETDEV_TX_BUSY;
1539 if (!tx_ring->launchtime_enable)
1542 txtime = skb->tstamp;
1543 skb->tstamp = ktime_set(0, 0);
1544 launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty);
1547 struct igc_tx_buffer *empty_info;
1548 struct sk_buff *empty;
1551 empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
1552 empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC);
1556 data = skb_put(empty, IGC_EMPTY_FRAME_SIZE);
1557 memset(data, 0, IGC_EMPTY_FRAME_SIZE);
1559 igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0);
1561 if (igc_init_tx_empty_descriptor(tx_ring,
1564 dev_kfree_skb_any(empty);
1568 /* record the location of the first descriptor for this packet */
1569 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
1570 first->type = IGC_TX_BUFFER_TYPE_SKB;
1572 first->bytecount = skb->len;
1573 first->gso_segs = 1;
1575 if (adapter->qbv_transition || tx_ring->oper_gate_closed)
1578 if (tx_ring->max_sdu > 0 && first->bytecount > tx_ring->max_sdu) {
1579 adapter->stats.txdrop++;
1583 if (unlikely(test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags) &&
1584 skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
1585 /* FIXME: add support for retrieving timestamps from
1586 * the other timer registers before skipping the
1587 * timestamping request.
1589 unsigned long flags;
1591 spin_lock_irqsave(&adapter->ptp_tx_lock, flags);
1592 if (!adapter->ptp_tx_skb) {
1593 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1594 tx_flags |= IGC_TX_FLAGS_TSTAMP;
1596 adapter->ptp_tx_skb = skb_get(skb);
1597 adapter->ptp_tx_start = jiffies;
1599 adapter->tx_hwtstamp_skipped++;
1602 spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags);
1605 if (skb_vlan_tag_present(skb)) {
1606 tx_flags |= IGC_TX_FLAGS_VLAN;
1607 tx_flags |= (skb_vlan_tag_get(skb) << IGC_TX_FLAGS_VLAN_SHIFT);
1610 /* record initial flags and protocol */
1611 first->tx_flags = tx_flags;
1612 first->protocol = protocol;
1614 tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len);
1618 igc_tx_csum(tx_ring, first, launch_time, first_flag);
1620 igc_tx_map(tx_ring, first, hdr_len);
1622 return NETDEV_TX_OK;
1625 dev_kfree_skb_any(first->skb);
1628 return NETDEV_TX_OK;
1631 static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter,
1632 struct sk_buff *skb)
1634 unsigned int r_idx = skb->queue_mapping;
1636 if (r_idx >= adapter->num_tx_queues)
1637 r_idx = r_idx % adapter->num_tx_queues;
1639 return adapter->tx_ring[r_idx];
1642 static netdev_tx_t igc_xmit_frame(struct sk_buff *skb,
1643 struct net_device *netdev)
1645 struct igc_adapter *adapter = netdev_priv(netdev);
1647 /* The minimum packet size with TCTL.PSP set is 17 so pad the skb
1648 * in order to meet this minimum size requirement.
1650 if (skb->len < 17) {
1651 if (skb_padto(skb, 17))
1652 return NETDEV_TX_OK;
1656 return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb));
1659 static void igc_rx_checksum(struct igc_ring *ring,
1660 union igc_adv_rx_desc *rx_desc,
1661 struct sk_buff *skb)
1663 skb_checksum_none_assert(skb);
1665 /* Ignore Checksum bit is set */
1666 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM))
1669 /* Rx checksum disabled via ethtool */
1670 if (!(ring->netdev->features & NETIF_F_RXCSUM))
1673 /* TCP/UDP checksum error bit is set */
1674 if (igc_test_staterr(rx_desc,
1675 IGC_RXDEXT_STATERR_L4E |
1676 IGC_RXDEXT_STATERR_IPE)) {
1677 /* work around errata with sctp packets where the TCPE aka
1678 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
1679 * packets (aka let the stack check the crc32c)
1681 if (!(skb->len == 60 &&
1682 test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
1683 u64_stats_update_begin(&ring->rx_syncp);
1684 ring->rx_stats.csum_err++;
1685 u64_stats_update_end(&ring->rx_syncp);
1687 /* let the stack verify checksum errors */
1690 /* It must be a TCP or UDP packet with a valid checksum */
1691 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS |
1692 IGC_RXD_STAT_UDPCS))
1693 skb->ip_summed = CHECKSUM_UNNECESSARY;
1695 netdev_dbg(ring->netdev, "cksum success: bits %08X\n",
1696 le32_to_cpu(rx_desc->wb.upper.status_error));
1699 /* Mapping HW RSS Type to enum pkt_hash_types */
1700 static const enum pkt_hash_types igc_rss_type_table[IGC_RSS_TYPE_MAX_TABLE] = {
1701 [IGC_RSS_TYPE_NO_HASH] = PKT_HASH_TYPE_L2,
1702 [IGC_RSS_TYPE_HASH_TCP_IPV4] = PKT_HASH_TYPE_L4,
1703 [IGC_RSS_TYPE_HASH_IPV4] = PKT_HASH_TYPE_L3,
1704 [IGC_RSS_TYPE_HASH_TCP_IPV6] = PKT_HASH_TYPE_L4,
1705 [IGC_RSS_TYPE_HASH_IPV6_EX] = PKT_HASH_TYPE_L3,
1706 [IGC_RSS_TYPE_HASH_IPV6] = PKT_HASH_TYPE_L3,
1707 [IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = PKT_HASH_TYPE_L4,
1708 [IGC_RSS_TYPE_HASH_UDP_IPV4] = PKT_HASH_TYPE_L4,
1709 [IGC_RSS_TYPE_HASH_UDP_IPV6] = PKT_HASH_TYPE_L4,
1710 [IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = PKT_HASH_TYPE_L4,
1711 [10] = PKT_HASH_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */
1712 [11] = PKT_HASH_TYPE_NONE, /* keep array sized for SW bit-mask */
1713 [12] = PKT_HASH_TYPE_NONE, /* to handle future HW revisons */
1714 [13] = PKT_HASH_TYPE_NONE,
1715 [14] = PKT_HASH_TYPE_NONE,
1716 [15] = PKT_HASH_TYPE_NONE,
1719 static inline void igc_rx_hash(struct igc_ring *ring,
1720 union igc_adv_rx_desc *rx_desc,
1721 struct sk_buff *skb)
1723 if (ring->netdev->features & NETIF_F_RXHASH) {
1724 u32 rss_hash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
1725 u32 rss_type = igc_rss_type(rx_desc);
1727 skb_set_hash(skb, rss_hash, igc_rss_type_table[rss_type]);
1731 static void igc_rx_vlan(struct igc_ring *rx_ring,
1732 union igc_adv_rx_desc *rx_desc,
1733 struct sk_buff *skb)
1735 struct net_device *dev = rx_ring->netdev;
1738 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1739 igc_test_staterr(rx_desc, IGC_RXD_STAT_VP)) {
1740 if (igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_LB) &&
1741 test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
1742 vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan);
1744 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1746 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1751 * igc_process_skb_fields - Populate skb header fields from Rx descriptor
1752 * @rx_ring: rx descriptor ring packet is being transacted on
1753 * @rx_desc: pointer to the EOP Rx descriptor
1754 * @skb: pointer to current skb being populated
1756 * This function checks the ring, descriptor, and packet information in order
1757 * to populate the hash, checksum, VLAN, protocol, and other fields within the
1760 static void igc_process_skb_fields(struct igc_ring *rx_ring,
1761 union igc_adv_rx_desc *rx_desc,
1762 struct sk_buff *skb)
1764 igc_rx_hash(rx_ring, rx_desc, skb);
1766 igc_rx_checksum(rx_ring, rx_desc, skb);
1768 igc_rx_vlan(rx_ring, rx_desc, skb);
1770 skb_record_rx_queue(skb, rx_ring->queue_index);
1772 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1775 static void igc_vlan_mode(struct net_device *netdev, netdev_features_t features)
1777 bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
1778 struct igc_adapter *adapter = netdev_priv(netdev);
1779 struct igc_hw *hw = &adapter->hw;
1782 ctrl = rd32(IGC_CTRL);
1785 /* enable VLAN tag insert/strip */
1786 ctrl |= IGC_CTRL_VME;
1788 /* disable VLAN tag insert/strip */
1789 ctrl &= ~IGC_CTRL_VME;
1791 wr32(IGC_CTRL, ctrl);
1794 static void igc_restore_vlan(struct igc_adapter *adapter)
1796 igc_vlan_mode(adapter->netdev, adapter->netdev->features);
1799 static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring,
1800 const unsigned int size,
1801 int *rx_buffer_pgcnt)
1803 struct igc_rx_buffer *rx_buffer;
1805 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
1807 #if (PAGE_SIZE < 8192)
1808 page_count(rx_buffer->page);
1812 prefetchw(rx_buffer->page);
1814 /* we are reusing so sync this buffer for CPU use */
1815 dma_sync_single_range_for_cpu(rx_ring->dev,
1817 rx_buffer->page_offset,
1821 rx_buffer->pagecnt_bias--;
1826 static void igc_rx_buffer_flip(struct igc_rx_buffer *buffer,
1827 unsigned int truesize)
1829 #if (PAGE_SIZE < 8192)
1830 buffer->page_offset ^= truesize;
1832 buffer->page_offset += truesize;
1836 static unsigned int igc_get_rx_frame_truesize(struct igc_ring *ring,
1839 unsigned int truesize;
1841 #if (PAGE_SIZE < 8192)
1842 truesize = igc_rx_pg_size(ring) / 2;
1844 truesize = ring_uses_build_skb(ring) ?
1845 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1846 SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
1847 SKB_DATA_ALIGN(size);
1853 * igc_add_rx_frag - Add contents of Rx buffer to sk_buff
1854 * @rx_ring: rx descriptor ring to transact packets on
1855 * @rx_buffer: buffer containing page to add
1856 * @skb: sk_buff to place the data into
1857 * @size: size of buffer to be added
1859 * This function will add the data contained in rx_buffer->page to the skb.
1861 static void igc_add_rx_frag(struct igc_ring *rx_ring,
1862 struct igc_rx_buffer *rx_buffer,
1863 struct sk_buff *skb,
1866 unsigned int truesize;
1868 #if (PAGE_SIZE < 8192)
1869 truesize = igc_rx_pg_size(rx_ring) / 2;
1871 truesize = ring_uses_build_skb(rx_ring) ?
1872 SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
1873 SKB_DATA_ALIGN(size);
1875 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1876 rx_buffer->page_offset, size, truesize);
1878 igc_rx_buffer_flip(rx_buffer, truesize);
1881 static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
1882 struct igc_rx_buffer *rx_buffer,
1883 struct xdp_buff *xdp)
1885 unsigned int size = xdp->data_end - xdp->data;
1886 unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
1887 unsigned int metasize = xdp->data - xdp->data_meta;
1888 struct sk_buff *skb;
1890 /* prefetch first cache line of first page */
1891 net_prefetch(xdp->data_meta);
1893 /* build an skb around the page buffer */
1894 skb = napi_build_skb(xdp->data_hard_start, truesize);
1898 /* update pointers within the skb to store the data */
1899 skb_reserve(skb, xdp->data - xdp->data_hard_start);
1900 __skb_put(skb, size);
1902 skb_metadata_set(skb, metasize);
1904 igc_rx_buffer_flip(rx_buffer, truesize);
1908 static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
1909 struct igc_rx_buffer *rx_buffer,
1910 struct xdp_buff *xdp,
1913 unsigned int metasize = xdp->data - xdp->data_meta;
1914 unsigned int size = xdp->data_end - xdp->data;
1915 unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
1916 void *va = xdp->data;
1917 unsigned int headlen;
1918 struct sk_buff *skb;
1920 /* prefetch first cache line of first page */
1921 net_prefetch(xdp->data_meta);
1923 /* allocate a skb to store the frags */
1924 skb = napi_alloc_skb(&rx_ring->q_vector->napi,
1925 IGC_RX_HDR_LEN + metasize);
1930 skb_hwtstamps(skb)->hwtstamp = timestamp;
1932 /* Determine available headroom for copy */
1934 if (headlen > IGC_RX_HDR_LEN)
1935 headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN);
1937 /* align pull length to size of long to optimize memcpy performance */
1938 memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta,
1939 ALIGN(headlen + metasize, sizeof(long)));
1942 skb_metadata_set(skb, metasize);
1943 __skb_pull(skb, metasize);
1946 /* update all of the pointers */
1949 skb_add_rx_frag(skb, 0, rx_buffer->page,
1950 (va + headlen) - page_address(rx_buffer->page),
1952 igc_rx_buffer_flip(rx_buffer, truesize);
1954 rx_buffer->pagecnt_bias++;
1961 * igc_reuse_rx_page - page flip buffer and store it back on the ring
1962 * @rx_ring: rx descriptor ring to store buffers on
1963 * @old_buff: donor buffer to have page reused
1965 * Synchronizes page for reuse by the adapter
1967 static void igc_reuse_rx_page(struct igc_ring *rx_ring,
1968 struct igc_rx_buffer *old_buff)
1970 u16 nta = rx_ring->next_to_alloc;
1971 struct igc_rx_buffer *new_buff;
1973 new_buff = &rx_ring->rx_buffer_info[nta];
1975 /* update, and store next to alloc */
1977 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1979 /* Transfer page from old buffer to new buffer.
1980 * Move each member individually to avoid possible store
1981 * forwarding stalls.
1983 new_buff->dma = old_buff->dma;
1984 new_buff->page = old_buff->page;
1985 new_buff->page_offset = old_buff->page_offset;
1986 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1989 static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer,
1990 int rx_buffer_pgcnt)
1992 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1993 struct page *page = rx_buffer->page;
1995 /* avoid re-using remote and pfmemalloc pages */
1996 if (!dev_page_is_reusable(page))
1999 #if (PAGE_SIZE < 8192)
2000 /* if we are only owner of page we can reuse it */
2001 if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
2004 #define IGC_LAST_OFFSET \
2005 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048)
2007 if (rx_buffer->page_offset > IGC_LAST_OFFSET)
2011 /* If we have drained the page fragment pool we need to update
2012 * the pagecnt_bias and page count so that we fully restock the
2013 * number of references the driver holds.
2015 if (unlikely(pagecnt_bias == 1)) {
2016 page_ref_add(page, USHRT_MAX - 1);
2017 rx_buffer->pagecnt_bias = USHRT_MAX;
2024 * igc_is_non_eop - process handling of non-EOP buffers
2025 * @rx_ring: Rx ring being processed
2026 * @rx_desc: Rx descriptor for current buffer
2028 * This function updates next to clean. If the buffer is an EOP buffer
2029 * this function exits returning false, otherwise it will place the
2030 * sk_buff in the next buffer to be chained and return true indicating
2031 * that this is in fact a non-EOP buffer.
2033 static bool igc_is_non_eop(struct igc_ring *rx_ring,
2034 union igc_adv_rx_desc *rx_desc)
2036 u32 ntc = rx_ring->next_to_clean + 1;
2038 /* fetch, update, and store next to clean */
2039 ntc = (ntc < rx_ring->count) ? ntc : 0;
2040 rx_ring->next_to_clean = ntc;
2042 prefetch(IGC_RX_DESC(rx_ring, ntc));
2044 if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP)))
2051 * igc_cleanup_headers - Correct corrupted or empty headers
2052 * @rx_ring: rx descriptor ring packet is being transacted on
2053 * @rx_desc: pointer to the EOP Rx descriptor
2054 * @skb: pointer to current skb being fixed
2056 * Address the case where we are pulling data in on pages only
2057 * and as such no data is present in the skb header.
2059 * In addition if skb is not at least 60 bytes we need to pad it so that
2060 * it is large enough to qualify as a valid Ethernet frame.
2062 * Returns true if an error was encountered and skb was freed.
2064 static bool igc_cleanup_headers(struct igc_ring *rx_ring,
2065 union igc_adv_rx_desc *rx_desc,
2066 struct sk_buff *skb)
2068 /* XDP packets use error pointer so abort at this point */
2072 if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) {
2073 struct net_device *netdev = rx_ring->netdev;
2075 if (!(netdev->features & NETIF_F_RXALL)) {
2076 dev_kfree_skb_any(skb);
2081 /* if eth_skb_pad returns an error the skb was freed */
2082 if (eth_skb_pad(skb))
2088 static void igc_put_rx_buffer(struct igc_ring *rx_ring,
2089 struct igc_rx_buffer *rx_buffer,
2090 int rx_buffer_pgcnt)
2092 if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
2093 /* hand second half of page back to the ring */
2094 igc_reuse_rx_page(rx_ring, rx_buffer);
2096 /* We are not reusing the buffer so unmap it and free
2097 * any references we are holding to it
2099 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2100 igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
2102 __page_frag_cache_drain(rx_buffer->page,
2103 rx_buffer->pagecnt_bias);
2106 /* clear contents of rx_buffer */
2107 rx_buffer->page = NULL;
2110 static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring)
2112 struct igc_adapter *adapter = rx_ring->q_vector->adapter;
2114 if (ring_uses_build_skb(rx_ring))
2116 if (igc_xdp_is_enabled(adapter))
2117 return XDP_PACKET_HEADROOM;
2122 static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
2123 struct igc_rx_buffer *bi)
2125 struct page *page = bi->page;
2128 /* since we are recycling buffers we should seldom need to alloc */
2132 /* alloc new page for storage */
2133 page = dev_alloc_pages(igc_rx_pg_order(rx_ring));
2134 if (unlikely(!page)) {
2135 rx_ring->rx_stats.alloc_failed++;
2139 /* map page for use */
2140 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
2141 igc_rx_pg_size(rx_ring),
2145 /* if mapping failed free memory back to system since
2146 * there isn't much point in holding memory we can't use
2148 if (dma_mapping_error(rx_ring->dev, dma)) {
2151 rx_ring->rx_stats.alloc_failed++;
2157 bi->page_offset = igc_rx_offset(rx_ring);
2158 page_ref_add(page, USHRT_MAX - 1);
2159 bi->pagecnt_bias = USHRT_MAX;
2165 * igc_alloc_rx_buffers - Replace used receive buffers; packet split
2166 * @rx_ring: rx descriptor ring
2167 * @cleaned_count: number of buffers to clean
2169 static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count)
2171 union igc_adv_rx_desc *rx_desc;
2172 u16 i = rx_ring->next_to_use;
2173 struct igc_rx_buffer *bi;
2180 rx_desc = IGC_RX_DESC(rx_ring, i);
2181 bi = &rx_ring->rx_buffer_info[i];
2182 i -= rx_ring->count;
2184 bufsz = igc_rx_bufsz(rx_ring);
2187 if (!igc_alloc_mapped_page(rx_ring, bi))
2190 /* sync the buffer for use by the device */
2191 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
2192 bi->page_offset, bufsz,
2195 /* Refresh the desc even if buffer_addrs didn't change
2196 * because each write-back erases this info.
2198 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
2204 rx_desc = IGC_RX_DESC(rx_ring, 0);
2205 bi = rx_ring->rx_buffer_info;
2206 i -= rx_ring->count;
2209 /* clear the length for the next_to_use descriptor */
2210 rx_desc->wb.upper.length = 0;
2213 } while (cleaned_count);
2215 i += rx_ring->count;
2217 if (rx_ring->next_to_use != i) {
2218 /* record the next descriptor to use */
2219 rx_ring->next_to_use = i;
2221 /* update next to alloc since we have filled the ring */
2222 rx_ring->next_to_alloc = i;
2224 /* Force memory writes to complete before letting h/w
2225 * know there are new descriptors to fetch. (Only
2226 * applicable for weak-ordered memory model archs,
2230 writel(i, rx_ring->tail);
2234 static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count)
2236 union igc_adv_rx_desc *desc;
2237 u16 i = ring->next_to_use;
2238 struct igc_rx_buffer *bi;
2245 XSK_CHECK_PRIV_TYPE(struct igc_xdp_buff);
2247 desc = IGC_RX_DESC(ring, i);
2248 bi = &ring->rx_buffer_info[i];
2252 bi->xdp = xsk_buff_alloc(ring->xsk_pool);
2258 dma = xsk_buff_xdp_get_dma(bi->xdp);
2259 desc->read.pkt_addr = cpu_to_le64(dma);
2265 desc = IGC_RX_DESC(ring, 0);
2266 bi = ring->rx_buffer_info;
2270 /* Clear the length for the next_to_use descriptor. */
2271 desc->wb.upper.length = 0;
2278 if (ring->next_to_use != i) {
2279 ring->next_to_use = i;
2281 /* Force memory writes to complete before letting h/w
2282 * know there are new descriptors to fetch. (Only
2283 * applicable for weak-ordered memory model archs,
2287 writel(i, ring->tail);
2293 /* This function requires __netif_tx_lock is held by the caller. */
2294 static int igc_xdp_init_tx_descriptor(struct igc_ring *ring,
2295 struct xdp_frame *xdpf)
2297 struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
2298 u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
2299 u16 count, index = ring->next_to_use;
2300 struct igc_tx_buffer *head = &ring->tx_buffer_info[index];
2301 struct igc_tx_buffer *buffer = head;
2302 union igc_adv_tx_desc *desc = IGC_TX_DESC(ring, index);
2303 u32 olinfo_status, len = xdpf->len, cmd_type;
2304 void *data = xdpf->data;
2307 count = TXD_USE_COUNT(len);
2308 for (i = 0; i < nr_frags; i++)
2309 count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i]));
2311 if (igc_maybe_stop_tx(ring, count + 3)) {
2312 /* this is a hard error */
2317 head->bytecount = xdp_get_frame_len(xdpf);
2318 head->type = IGC_TX_BUFFER_TYPE_XDP;
2322 olinfo_status = head->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
2323 desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2328 dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE);
2329 if (dma_mapping_error(ring->dev, dma)) {
2330 netdev_err_once(ring->netdev,
2331 "Failed to map DMA for TX\n");
2335 dma_unmap_len_set(buffer, len, len);
2336 dma_unmap_addr_set(buffer, dma, dma);
2338 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
2339 IGC_ADVTXD_DCMD_IFCS | len;
2341 desc->read.cmd_type_len = cpu_to_le32(cmd_type);
2342 desc->read.buffer_addr = cpu_to_le64(dma);
2344 buffer->protocol = 0;
2346 if (++index == ring->count)
2352 buffer = &ring->tx_buffer_info[index];
2353 desc = IGC_TX_DESC(ring, index);
2354 desc->read.olinfo_status = 0;
2356 data = skb_frag_address(&sinfo->frags[i]);
2357 len = skb_frag_size(&sinfo->frags[i]);
2360 desc->read.cmd_type_len |= cpu_to_le32(IGC_TXD_DCMD);
2362 netdev_tx_sent_queue(txring_txq(ring), head->bytecount);
2363 /* set the timestamp */
2364 head->time_stamp = jiffies;
2365 /* set next_to_watch value indicating a packet is present */
2366 head->next_to_watch = desc;
2367 ring->next_to_use = index;
2373 buffer = &ring->tx_buffer_info[index];
2374 if (dma_unmap_len(buffer, len))
2375 dma_unmap_page(ring->dev,
2376 dma_unmap_addr(buffer, dma),
2377 dma_unmap_len(buffer, len),
2379 dma_unmap_len_set(buffer, len, 0);
2384 index += ring->count;
2391 static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter,
2396 if (unlikely(index < 0))
2399 while (index >= adapter->num_tx_queues)
2400 index -= adapter->num_tx_queues;
2402 return adapter->tx_ring[index];
2405 static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp)
2407 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
2408 int cpu = smp_processor_id();
2409 struct netdev_queue *nq;
2410 struct igc_ring *ring;
2413 if (unlikely(!xdpf))
2416 ring = igc_xdp_get_tx_ring(adapter, cpu);
2417 nq = txring_txq(ring);
2419 __netif_tx_lock(nq, cpu);
2420 /* Avoid transmit queue timeout since we share it with the slow path */
2421 txq_trans_cond_update(nq);
2422 res = igc_xdp_init_tx_descriptor(ring, xdpf);
2423 __netif_tx_unlock(nq);
2427 /* This function assumes rcu_read_lock() is held by the caller. */
2428 static int __igc_xdp_run_prog(struct igc_adapter *adapter,
2429 struct bpf_prog *prog,
2430 struct xdp_buff *xdp)
2432 u32 act = bpf_prog_run_xdp(prog, xdp);
2436 return IGC_XDP_PASS;
2438 if (igc_xdp_xmit_back(adapter, xdp) < 0)
2442 if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0)
2444 return IGC_XDP_REDIRECT;
2447 bpf_warn_invalid_xdp_action(adapter->netdev, prog, act);
2451 trace_xdp_exception(adapter->netdev, prog, act);
2454 return IGC_XDP_CONSUMED;
2458 static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
2459 struct xdp_buff *xdp)
2461 struct bpf_prog *prog;
2464 prog = READ_ONCE(adapter->xdp_prog);
2470 res = __igc_xdp_run_prog(adapter, prog, xdp);
2473 return ERR_PTR(-res);
2476 /* This function assumes __netif_tx_lock is held by the caller. */
2477 static void igc_flush_tx_descriptors(struct igc_ring *ring)
2479 /* Once tail pointer is updated, hardware can fetch the descriptors
2480 * any time so we issue a write membar here to ensure all memory
2481 * writes are complete before the tail pointer is updated.
2484 writel(ring->next_to_use, ring->tail);
2487 static void igc_finalize_xdp(struct igc_adapter *adapter, int status)
2489 int cpu = smp_processor_id();
2490 struct netdev_queue *nq;
2491 struct igc_ring *ring;
2493 if (status & IGC_XDP_TX) {
2494 ring = igc_xdp_get_tx_ring(adapter, cpu);
2495 nq = txring_txq(ring);
2497 __netif_tx_lock(nq, cpu);
2498 igc_flush_tx_descriptors(ring);
2499 __netif_tx_unlock(nq);
2502 if (status & IGC_XDP_REDIRECT)
2506 static void igc_update_rx_stats(struct igc_q_vector *q_vector,
2507 unsigned int packets, unsigned int bytes)
2509 struct igc_ring *ring = q_vector->rx.ring;
2511 u64_stats_update_begin(&ring->rx_syncp);
2512 ring->rx_stats.packets += packets;
2513 ring->rx_stats.bytes += bytes;
2514 u64_stats_update_end(&ring->rx_syncp);
2516 q_vector->rx.total_packets += packets;
2517 q_vector->rx.total_bytes += bytes;
2520 static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
2522 unsigned int total_bytes = 0, total_packets = 0;
2523 struct igc_adapter *adapter = q_vector->adapter;
2524 struct igc_ring *rx_ring = q_vector->rx.ring;
2525 struct sk_buff *skb = rx_ring->skb;
2526 u16 cleaned_count = igc_desc_unused(rx_ring);
2527 int xdp_status = 0, rx_buffer_pgcnt;
2529 while (likely(total_packets < budget)) {
2530 union igc_adv_rx_desc *rx_desc;
2531 struct igc_rx_buffer *rx_buffer;
2532 unsigned int size, truesize;
2533 struct igc_xdp_buff ctx;
2534 ktime_t timestamp = 0;
2538 /* return some buffers to hardware, one at a time is too slow */
2539 if (cleaned_count >= IGC_RX_BUFFER_WRITE) {
2540 igc_alloc_rx_buffers(rx_ring, cleaned_count);
2544 rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean);
2545 size = le16_to_cpu(rx_desc->wb.upper.length);
2549 /* This memory barrier is needed to keep us from reading
2550 * any other fields out of the rx_desc until we know the
2551 * descriptor has been written back
2555 rx_buffer = igc_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt);
2556 truesize = igc_get_rx_frame_truesize(rx_ring, size);
2558 pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
2560 if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) {
2561 timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
2563 ctx.rx_ts = timestamp;
2564 pkt_offset = IGC_TS_HDR_LEN;
2565 size -= IGC_TS_HDR_LEN;
2569 xdp_init_buff(&ctx.xdp, truesize, &rx_ring->xdp_rxq);
2570 xdp_prepare_buff(&ctx.xdp, pktbuf - igc_rx_offset(rx_ring),
2571 igc_rx_offset(rx_ring) + pkt_offset,
2573 xdp_buff_clear_frags_flag(&ctx.xdp);
2574 ctx.rx_desc = rx_desc;
2576 skb = igc_xdp_run_prog(adapter, &ctx.xdp);
2580 unsigned int xdp_res = -PTR_ERR(skb);
2583 case IGC_XDP_CONSUMED:
2584 rx_buffer->pagecnt_bias++;
2587 case IGC_XDP_REDIRECT:
2588 igc_rx_buffer_flip(rx_buffer, truesize);
2589 xdp_status |= xdp_res;
2594 total_bytes += size;
2596 igc_add_rx_frag(rx_ring, rx_buffer, skb, size);
2597 else if (ring_uses_build_skb(rx_ring))
2598 skb = igc_build_skb(rx_ring, rx_buffer, &ctx.xdp);
2600 skb = igc_construct_skb(rx_ring, rx_buffer, &ctx.xdp,
2603 /* exit if we failed to retrieve a buffer */
2605 rx_ring->rx_stats.alloc_failed++;
2606 rx_buffer->pagecnt_bias++;
2610 igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
2613 /* fetch next buffer in frame if non-eop */
2614 if (igc_is_non_eop(rx_ring, rx_desc))
2617 /* verify the packet layout is correct */
2618 if (igc_cleanup_headers(rx_ring, rx_desc, skb)) {
2623 /* probably a little skewed due to removing CRC */
2624 total_bytes += skb->len;
2626 /* populate checksum, VLAN, and protocol */
2627 igc_process_skb_fields(rx_ring, rx_desc, skb);
2629 napi_gro_receive(&q_vector->napi, skb);
2631 /* reset skb pointer */
2634 /* update budget accounting */
2639 igc_finalize_xdp(adapter, xdp_status);
2641 /* place incomplete frames back on ring for completion */
2644 igc_update_rx_stats(q_vector, total_packets, total_bytes);
2647 igc_alloc_rx_buffers(rx_ring, cleaned_count);
2649 return total_packets;
2652 static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
2653 struct xdp_buff *xdp)
2655 unsigned int totalsize = xdp->data_end - xdp->data_meta;
2656 unsigned int metasize = xdp->data - xdp->data_meta;
2657 struct sk_buff *skb;
2659 net_prefetch(xdp->data_meta);
2661 skb = __napi_alloc_skb(&ring->q_vector->napi, totalsize,
2662 GFP_ATOMIC | __GFP_NOWARN);
2666 memcpy(__skb_put(skb, totalsize), xdp->data_meta,
2667 ALIGN(totalsize, sizeof(long)));
2670 skb_metadata_set(skb, metasize);
2671 __skb_pull(skb, metasize);
2677 static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector,
2678 union igc_adv_rx_desc *desc,
2679 struct xdp_buff *xdp,
2682 struct igc_ring *ring = q_vector->rx.ring;
2683 struct sk_buff *skb;
2685 skb = igc_construct_skb_zc(ring, xdp);
2687 ring->rx_stats.alloc_failed++;
2692 skb_hwtstamps(skb)->hwtstamp = timestamp;
2694 if (igc_cleanup_headers(ring, desc, skb))
2697 igc_process_skb_fields(ring, desc, skb);
2698 napi_gro_receive(&q_vector->napi, skb);
2701 static struct igc_xdp_buff *xsk_buff_to_igc_ctx(struct xdp_buff *xdp)
2703 /* xdp_buff pointer used by ZC code path is alloc as xdp_buff_xsk. The
2704 * igc_xdp_buff shares its layout with xdp_buff_xsk and private
2705 * igc_xdp_buff fields fall into xdp_buff_xsk->cb
2707 return (struct igc_xdp_buff *)xdp;
2710 static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
2712 struct igc_adapter *adapter = q_vector->adapter;
2713 struct igc_ring *ring = q_vector->rx.ring;
2714 u16 cleaned_count = igc_desc_unused(ring);
2715 int total_bytes = 0, total_packets = 0;
2716 u16 ntc = ring->next_to_clean;
2717 struct bpf_prog *prog;
2718 bool failure = false;
2723 prog = READ_ONCE(adapter->xdp_prog);
2725 while (likely(total_packets < budget)) {
2726 union igc_adv_rx_desc *desc;
2727 struct igc_rx_buffer *bi;
2728 struct igc_xdp_buff *ctx;
2729 ktime_t timestamp = 0;
2733 desc = IGC_RX_DESC(ring, ntc);
2734 size = le16_to_cpu(desc->wb.upper.length);
2738 /* This memory barrier is needed to keep us from reading
2739 * any other fields out of the rx_desc until we know the
2740 * descriptor has been written back
2744 bi = &ring->rx_buffer_info[ntc];
2746 ctx = xsk_buff_to_igc_ctx(bi->xdp);
2747 ctx->rx_desc = desc;
2749 if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) {
2750 timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
2752 ctx->rx_ts = timestamp;
2754 bi->xdp->data += IGC_TS_HDR_LEN;
2756 /* HW timestamp has been copied into local variable. Metadata
2757 * length when XDP program is called should be 0.
2759 bi->xdp->data_meta += IGC_TS_HDR_LEN;
2760 size -= IGC_TS_HDR_LEN;
2763 bi->xdp->data_end = bi->xdp->data + size;
2764 xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool);
2766 res = __igc_xdp_run_prog(adapter, prog, bi->xdp);
2769 igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp);
2771 case IGC_XDP_CONSUMED:
2772 xsk_buff_free(bi->xdp);
2775 case IGC_XDP_REDIRECT:
2781 total_bytes += size;
2785 if (ntc == ring->count)
2789 ring->next_to_clean = ntc;
2792 if (cleaned_count >= IGC_RX_BUFFER_WRITE)
2793 failure = !igc_alloc_rx_buffers_zc(ring, cleaned_count);
2796 igc_finalize_xdp(adapter, xdp_status);
2798 igc_update_rx_stats(q_vector, total_packets, total_bytes);
2800 if (xsk_uses_need_wakeup(ring->xsk_pool)) {
2801 if (failure || ring->next_to_clean == ring->next_to_use)
2802 xsk_set_rx_need_wakeup(ring->xsk_pool);
2804 xsk_clear_rx_need_wakeup(ring->xsk_pool);
2805 return total_packets;
2808 return failure ? budget : total_packets;
2811 static void igc_update_tx_stats(struct igc_q_vector *q_vector,
2812 unsigned int packets, unsigned int bytes)
2814 struct igc_ring *ring = q_vector->tx.ring;
2816 u64_stats_update_begin(&ring->tx_syncp);
2817 ring->tx_stats.bytes += bytes;
2818 ring->tx_stats.packets += packets;
2819 u64_stats_update_end(&ring->tx_syncp);
2821 q_vector->tx.total_bytes += bytes;
2822 q_vector->tx.total_packets += packets;
2825 static void igc_xdp_xmit_zc(struct igc_ring *ring)
2827 struct xsk_buff_pool *pool = ring->xsk_pool;
2828 struct netdev_queue *nq = txring_txq(ring);
2829 union igc_adv_tx_desc *tx_desc = NULL;
2830 int cpu = smp_processor_id();
2831 u16 ntu = ring->next_to_use;
2832 struct xdp_desc xdp_desc;
2835 if (!netif_carrier_ok(ring->netdev))
2838 __netif_tx_lock(nq, cpu);
2840 /* Avoid transmit queue timeout since we share it with the slow path */
2841 txq_trans_cond_update(nq);
2843 budget = igc_desc_unused(ring);
2845 while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) {
2846 u32 cmd_type, olinfo_status;
2847 struct igc_tx_buffer *bi;
2850 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
2851 IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
2853 olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT;
2855 dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2856 xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len);
2858 tx_desc = IGC_TX_DESC(ring, ntu);
2859 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
2860 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2861 tx_desc->read.buffer_addr = cpu_to_le64(dma);
2863 bi = &ring->tx_buffer_info[ntu];
2864 bi->type = IGC_TX_BUFFER_TYPE_XSK;
2866 bi->bytecount = xdp_desc.len;
2868 bi->time_stamp = jiffies;
2869 bi->next_to_watch = tx_desc;
2871 netdev_tx_sent_queue(txring_txq(ring), xdp_desc.len);
2874 if (ntu == ring->count)
2878 ring->next_to_use = ntu;
2880 igc_flush_tx_descriptors(ring);
2881 xsk_tx_release(pool);
2884 __netif_tx_unlock(nq);
2888 * igc_clean_tx_irq - Reclaim resources after transmit completes
2889 * @q_vector: pointer to q_vector containing needed info
2890 * @napi_budget: Used to determine if we are in netpoll
2892 * returns true if ring is completely cleaned
2894 static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
2896 struct igc_adapter *adapter = q_vector->adapter;
2897 unsigned int total_bytes = 0, total_packets = 0;
2898 unsigned int budget = q_vector->tx.work_limit;
2899 struct igc_ring *tx_ring = q_vector->tx.ring;
2900 unsigned int i = tx_ring->next_to_clean;
2901 struct igc_tx_buffer *tx_buffer;
2902 union igc_adv_tx_desc *tx_desc;
2905 if (test_bit(__IGC_DOWN, &adapter->state))
2908 tx_buffer = &tx_ring->tx_buffer_info[i];
2909 tx_desc = IGC_TX_DESC(tx_ring, i);
2910 i -= tx_ring->count;
2913 union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
2915 /* if next_to_watch is not set then there is no work pending */
2919 /* prevent any other reads prior to eop_desc */
2922 /* if DD is not set pending work has not been completed */
2923 if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD)))
2926 /* clear next_to_watch to prevent false hangs */
2927 tx_buffer->next_to_watch = NULL;
2929 /* update the statistics for this packet */
2930 total_bytes += tx_buffer->bytecount;
2931 total_packets += tx_buffer->gso_segs;
2933 switch (tx_buffer->type) {
2934 case IGC_TX_BUFFER_TYPE_XSK:
2937 case IGC_TX_BUFFER_TYPE_XDP:
2938 xdp_return_frame(tx_buffer->xdpf);
2939 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
2941 case IGC_TX_BUFFER_TYPE_SKB:
2942 napi_consume_skb(tx_buffer->skb, napi_budget);
2943 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
2946 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n");
2950 /* clear last DMA location and unmap remaining buffers */
2951 while (tx_desc != eop_desc) {
2956 i -= tx_ring->count;
2957 tx_buffer = tx_ring->tx_buffer_info;
2958 tx_desc = IGC_TX_DESC(tx_ring, 0);
2961 /* unmap any remaining paged data */
2962 if (dma_unmap_len(tx_buffer, len))
2963 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
2966 /* move us one more past the eop_desc for start of next pkt */
2971 i -= tx_ring->count;
2972 tx_buffer = tx_ring->tx_buffer_info;
2973 tx_desc = IGC_TX_DESC(tx_ring, 0);
2976 /* issue prefetch for next Tx descriptor */
2979 /* update budget accounting */
2981 } while (likely(budget));
2983 netdev_tx_completed_queue(txring_txq(tx_ring),
2984 total_packets, total_bytes);
2986 i += tx_ring->count;
2987 tx_ring->next_to_clean = i;
2989 igc_update_tx_stats(q_vector, total_packets, total_bytes);
2991 if (tx_ring->xsk_pool) {
2993 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
2994 if (xsk_uses_need_wakeup(tx_ring->xsk_pool))
2995 xsk_set_tx_need_wakeup(tx_ring->xsk_pool);
2996 igc_xdp_xmit_zc(tx_ring);
2999 if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
3000 struct igc_hw *hw = &adapter->hw;
3002 /* Detect a transmit hang in hardware, this serializes the
3003 * check with the clearing of time_stamp and movement of i
3005 clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
3006 if (tx_buffer->next_to_watch &&
3007 time_after(jiffies, tx_buffer->time_stamp +
3008 (adapter->tx_timeout_factor * HZ)) &&
3009 !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF) &&
3010 (rd32(IGC_TDH(tx_ring->reg_idx)) != readl(tx_ring->tail)) &&
3011 !tx_ring->oper_gate_closed) {
3012 /* detected Tx unit hang */
3013 netdev_err(tx_ring->netdev,
3014 "Detected Tx Unit Hang\n"
3018 " next_to_use <%x>\n"
3019 " next_to_clean <%x>\n"
3020 "buffer_info[next_to_clean]\n"
3021 " time_stamp <%lx>\n"
3022 " next_to_watch <%p>\n"
3024 " desc.status <%x>\n",
3025 tx_ring->queue_index,
3026 rd32(IGC_TDH(tx_ring->reg_idx)),
3027 readl(tx_ring->tail),
3028 tx_ring->next_to_use,
3029 tx_ring->next_to_clean,
3030 tx_buffer->time_stamp,
3031 tx_buffer->next_to_watch,
3033 tx_buffer->next_to_watch->wb.status);
3034 netif_stop_subqueue(tx_ring->netdev,
3035 tx_ring->queue_index);
3037 /* we are about to reset, no point in enabling stuff */
3042 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
3043 if (unlikely(total_packets &&
3044 netif_carrier_ok(tx_ring->netdev) &&
3045 igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
3046 /* Make sure that anybody stopping the queue after this
3047 * sees the new next_to_clean.
3050 if (__netif_subqueue_stopped(tx_ring->netdev,
3051 tx_ring->queue_index) &&
3052 !(test_bit(__IGC_DOWN, &adapter->state))) {
3053 netif_wake_subqueue(tx_ring->netdev,
3054 tx_ring->queue_index);
3056 u64_stats_update_begin(&tx_ring->tx_syncp);
3057 tx_ring->tx_stats.restart_queue++;
3058 u64_stats_update_end(&tx_ring->tx_syncp);
3065 static int igc_find_mac_filter(struct igc_adapter *adapter,
3066 enum igc_mac_filter_type type, const u8 *addr)
3068 struct igc_hw *hw = &adapter->hw;
3069 int max_entries = hw->mac.rar_entry_count;
3073 for (i = 0; i < max_entries; i++) {
3074 ral = rd32(IGC_RAL(i));
3075 rah = rd32(IGC_RAH(i));
3077 if (!(rah & IGC_RAH_AV))
3079 if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type)
3081 if ((rah & IGC_RAH_RAH_MASK) !=
3082 le16_to_cpup((__le16 *)(addr + 4)))
3084 if (ral != le32_to_cpup((__le32 *)(addr)))
3093 static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter)
3095 struct igc_hw *hw = &adapter->hw;
3096 int max_entries = hw->mac.rar_entry_count;
3100 for (i = 0; i < max_entries; i++) {
3101 rah = rd32(IGC_RAH(i));
3103 if (!(rah & IGC_RAH_AV))
3111 * igc_add_mac_filter() - Add MAC address filter
3112 * @adapter: Pointer to adapter where the filter should be added
3113 * @type: MAC address filter type (source or destination)
3114 * @addr: MAC address
3115 * @queue: If non-negative, queue assignment feature is enabled and frames
3116 * matching the filter are enqueued onto 'queue'. Otherwise, queue
3117 * assignment is disabled.
3119 * Return: 0 in case of success, negative errno code otherwise.
3121 static int igc_add_mac_filter(struct igc_adapter *adapter,
3122 enum igc_mac_filter_type type, const u8 *addr,
3125 struct net_device *dev = adapter->netdev;
3128 index = igc_find_mac_filter(adapter, type, addr);
3132 index = igc_get_avail_mac_filter_slot(adapter);
3136 netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n",
3137 index, type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
3141 igc_set_mac_filter_hw(adapter, index, type, addr, queue);
3146 * igc_del_mac_filter() - Delete MAC address filter
3147 * @adapter: Pointer to adapter where the filter should be deleted from
3148 * @type: MAC address filter type (source or destination)
3149 * @addr: MAC address
3151 static void igc_del_mac_filter(struct igc_adapter *adapter,
3152 enum igc_mac_filter_type type, const u8 *addr)
3154 struct net_device *dev = adapter->netdev;
3157 index = igc_find_mac_filter(adapter, type, addr);
3162 /* If this is the default filter, we don't actually delete it.
3163 * We just reset to its default value i.e. disable queue
3166 netdev_dbg(dev, "Disable default MAC filter queue assignment");
3168 igc_set_mac_filter_hw(adapter, 0, type, addr, -1);
3170 netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n",
3172 type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
3175 igc_clear_mac_filter_hw(adapter, index);
3180 * igc_add_vlan_prio_filter() - Add VLAN priority filter
3181 * @adapter: Pointer to adapter where the filter should be added
3182 * @prio: VLAN priority value
3183 * @queue: Queue number which matching frames are assigned to
3185 * Return: 0 in case of success, negative errno code otherwise.
3187 static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio,
3190 struct net_device *dev = adapter->netdev;
3191 struct igc_hw *hw = &adapter->hw;
3194 vlanpqf = rd32(IGC_VLANPQF);
3196 if (vlanpqf & IGC_VLANPQF_VALID(prio)) {
3197 netdev_dbg(dev, "VLAN priority filter already in use\n");
3201 vlanpqf |= IGC_VLANPQF_QSEL(prio, queue);
3202 vlanpqf |= IGC_VLANPQF_VALID(prio);
3204 wr32(IGC_VLANPQF, vlanpqf);
3206 netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n",
3212 * igc_del_vlan_prio_filter() - Delete VLAN priority filter
3213 * @adapter: Pointer to adapter where the filter should be deleted from
3214 * @prio: VLAN priority value
3216 static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio)
3218 struct igc_hw *hw = &adapter->hw;
3221 vlanpqf = rd32(IGC_VLANPQF);
3223 vlanpqf &= ~IGC_VLANPQF_VALID(prio);
3224 vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK);
3226 wr32(IGC_VLANPQF, vlanpqf);
3228 netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n",
3232 static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter)
3234 struct igc_hw *hw = &adapter->hw;
3237 for (i = 0; i < MAX_ETYPE_FILTER; i++) {
3238 u32 etqf = rd32(IGC_ETQF(i));
3240 if (!(etqf & IGC_ETQF_FILTER_ENABLE))
3248 * igc_add_etype_filter() - Add ethertype filter
3249 * @adapter: Pointer to adapter where the filter should be added
3250 * @etype: Ethertype value
3251 * @queue: If non-negative, queue assignment feature is enabled and frames
3252 * matching the filter are enqueued onto 'queue'. Otherwise, queue
3253 * assignment is disabled.
3255 * Return: 0 in case of success, negative errno code otherwise.
3257 static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype,
3260 struct igc_hw *hw = &adapter->hw;
3264 index = igc_get_avail_etype_filter_slot(adapter);
3268 etqf = rd32(IGC_ETQF(index));
3270 etqf &= ~IGC_ETQF_ETYPE_MASK;
3274 etqf &= ~IGC_ETQF_QUEUE_MASK;
3275 etqf |= (queue << IGC_ETQF_QUEUE_SHIFT);
3276 etqf |= IGC_ETQF_QUEUE_ENABLE;
3279 etqf |= IGC_ETQF_FILTER_ENABLE;
3281 wr32(IGC_ETQF(index), etqf);
3283 netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n",
3288 static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype)
3290 struct igc_hw *hw = &adapter->hw;
3293 for (i = 0; i < MAX_ETYPE_FILTER; i++) {
3294 u32 etqf = rd32(IGC_ETQF(i));
3296 if ((etqf & IGC_ETQF_ETYPE_MASK) == etype)
3304 * igc_del_etype_filter() - Delete ethertype filter
3305 * @adapter: Pointer to adapter where the filter should be deleted from
3306 * @etype: Ethertype value
3308 static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype)
3310 struct igc_hw *hw = &adapter->hw;
3313 index = igc_find_etype_filter(adapter, etype);
3317 wr32(IGC_ETQF(index), 0);
3319 netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n",
3323 static int igc_flex_filter_select(struct igc_adapter *adapter,
3324 struct igc_flex_filter *input,
3327 struct igc_hw *hw = &adapter->hw;
3331 if (input->index >= MAX_FLEX_FILTER) {
3332 dev_err(&adapter->pdev->dev, "Wrong Flex Filter index selected!\n");
3336 /* Indirect table select register */
3337 fhftsl = rd32(IGC_FHFTSL);
3338 fhftsl &= ~IGC_FHFTSL_FTSL_MASK;
3339 switch (input->index) {
3353 wr32(IGC_FHFTSL, fhftsl);
3355 /* Normalize index down to host table register */
3356 fhft_index = input->index % 8;
3358 *fhft = (fhft_index < 4) ? IGC_FHFT(fhft_index) :
3359 IGC_FHFT_EXT(fhft_index - 4);
3364 static int igc_write_flex_filter_ll(struct igc_adapter *adapter,
3365 struct igc_flex_filter *input)
3367 struct device *dev = &adapter->pdev->dev;
3368 struct igc_hw *hw = &adapter->hw;
3369 u8 *data = input->data;
3370 u8 *mask = input->mask;
3377 /* Length has to be aligned to 8. Otherwise the filter will fail. Bail
3378 * out early to avoid surprises later.
3380 if (input->length % 8 != 0) {
3381 dev_err(dev, "The length of a flex filter has to be 8 byte aligned!\n");
3385 /* Select corresponding flex filter register and get base for host table. */
3386 ret = igc_flex_filter_select(adapter, input, &fhft);
3390 /* When adding a filter globally disable flex filter feature. That is
3391 * recommended within the datasheet.
3393 wufc = rd32(IGC_WUFC);
3394 wufc &= ~IGC_WUFC_FLEX_HQ;
3395 wr32(IGC_WUFC, wufc);
3397 /* Configure filter */
3398 queuing = input->length & IGC_FHFT_LENGTH_MASK;
3399 queuing |= (input->rx_queue << IGC_FHFT_QUEUE_SHIFT) & IGC_FHFT_QUEUE_MASK;
3400 queuing |= (input->prio << IGC_FHFT_PRIO_SHIFT) & IGC_FHFT_PRIO_MASK;
3402 if (input->immediate_irq)
3403 queuing |= IGC_FHFT_IMM_INT;
3406 queuing |= IGC_FHFT_DROP;
3408 wr32(fhft + 0xFC, queuing);
3410 /* Write data (128 byte) and mask (128 bit) */
3411 for (i = 0; i < 16; ++i) {
3412 const size_t data_idx = i * 8;
3413 const size_t row_idx = i * 16;
3415 (data[data_idx + 0] << 0) |
3416 (data[data_idx + 1] << 8) |
3417 (data[data_idx + 2] << 16) |
3418 (data[data_idx + 3] << 24);
3420 (data[data_idx + 4] << 0) |
3421 (data[data_idx + 5] << 8) |
3422 (data[data_idx + 6] << 16) |
3423 (data[data_idx + 7] << 24);
3426 /* Write row: dw0, dw1 and mask */
3427 wr32(fhft + row_idx, dw0);
3428 wr32(fhft + row_idx + 4, dw1);
3430 /* mask is only valid for MASK(7, 0) */
3431 tmp = rd32(fhft + row_idx + 8);
3432 tmp &= ~GENMASK(7, 0);
3434 wr32(fhft + row_idx + 8, tmp);
3437 /* Enable filter. */
3438 wufc |= IGC_WUFC_FLEX_HQ;
3439 if (input->index > 8) {
3440 /* Filter 0-7 are enabled via WUFC. The other 24 filters are not. */
3441 u32 wufc_ext = rd32(IGC_WUFC_EXT);
3443 wufc_ext |= (IGC_WUFC_EXT_FLX8 << (input->index - 8));
3445 wr32(IGC_WUFC_EXT, wufc_ext);
3447 wufc |= (IGC_WUFC_FLX0 << input->index);
3449 wr32(IGC_WUFC, wufc);
3451 dev_dbg(&adapter->pdev->dev, "Added flex filter %u to HW.\n",
3457 static void igc_flex_filter_add_field(struct igc_flex_filter *flex,
3458 const void *src, unsigned int offset,
3459 size_t len, const void *mask)
3464 memcpy(&flex->data[offset], src, len);
3467 for (i = 0; i < len; ++i) {
3468 const unsigned int idx = i + offset;
3469 const u8 *ptr = mask;
3473 flex->mask[idx / 8] |= BIT(idx % 8);
3478 flex->mask[idx / 8] |= BIT(idx % 8);
3482 static int igc_find_avail_flex_filter_slot(struct igc_adapter *adapter)
3484 struct igc_hw *hw = &adapter->hw;
3488 wufc = rd32(IGC_WUFC);
3489 wufc_ext = rd32(IGC_WUFC_EXT);
3491 for (i = 0; i < MAX_FLEX_FILTER; i++) {
3493 if (!(wufc & (IGC_WUFC_FLX0 << i)))
3496 if (!(wufc_ext & (IGC_WUFC_EXT_FLX8 << (i - 8))))
3504 static bool igc_flex_filter_in_use(struct igc_adapter *adapter)
3506 struct igc_hw *hw = &adapter->hw;
3509 wufc = rd32(IGC_WUFC);
3510 wufc_ext = rd32(IGC_WUFC_EXT);
3512 if (wufc & IGC_WUFC_FILTER_MASK)
3515 if (wufc_ext & IGC_WUFC_EXT_FILTER_MASK)
3521 static int igc_add_flex_filter(struct igc_adapter *adapter,
3522 struct igc_nfc_rule *rule)
3524 struct igc_flex_filter flex = { };
3525 struct igc_nfc_filter *filter = &rule->filter;
3526 unsigned int eth_offset, user_offset;
3530 index = igc_find_avail_flex_filter_slot(adapter);
3534 /* Construct the flex filter:
3541 * -> = 26 bytes => 32 length
3545 flex.rx_queue = rule->action;
3547 vlan = rule->filter.vlan_tci || rule->filter.vlan_etype;
3548 eth_offset = vlan ? 16 : 12;
3549 user_offset = vlan ? 18 : 14;
3551 /* Add destination MAC */
3552 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
3553 igc_flex_filter_add_field(&flex, &filter->dst_addr, 0,
3556 /* Add source MAC */
3557 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
3558 igc_flex_filter_add_field(&flex, &filter->src_addr, 6,
3561 /* Add VLAN etype */
3562 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE)
3563 igc_flex_filter_add_field(&flex, &filter->vlan_etype, 12,
3564 sizeof(filter->vlan_etype),
3568 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI)
3569 igc_flex_filter_add_field(&flex, &filter->vlan_tci, 14,
3570 sizeof(filter->vlan_tci), NULL);
3572 /* Add Ether type */
3573 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
3574 __be16 etype = cpu_to_be16(filter->etype);
3576 igc_flex_filter_add_field(&flex, &etype, eth_offset,
3577 sizeof(etype), NULL);
3581 if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA)
3582 igc_flex_filter_add_field(&flex, &filter->user_data,
3584 sizeof(filter->user_data),
3587 /* Add it down to the hardware and enable it. */
3588 ret = igc_write_flex_filter_ll(adapter, &flex);
3592 filter->flex_index = index;
3597 static void igc_del_flex_filter(struct igc_adapter *adapter,
3600 struct igc_hw *hw = &adapter->hw;
3603 /* Just disable the filter. The filter table itself is kept
3604 * intact. Another flex_filter_add() should override the "old" data
3607 if (reg_index > 8) {
3608 u32 wufc_ext = rd32(IGC_WUFC_EXT);
3610 wufc_ext &= ~(IGC_WUFC_EXT_FLX8 << (reg_index - 8));
3611 wr32(IGC_WUFC_EXT, wufc_ext);
3613 wufc = rd32(IGC_WUFC);
3615 wufc &= ~(IGC_WUFC_FLX0 << reg_index);
3616 wr32(IGC_WUFC, wufc);
3619 if (igc_flex_filter_in_use(adapter))
3622 /* No filters are in use, we may disable flex filters */
3623 wufc = rd32(IGC_WUFC);
3624 wufc &= ~IGC_WUFC_FLEX_HQ;
3625 wr32(IGC_WUFC, wufc);
3628 static int igc_enable_nfc_rule(struct igc_adapter *adapter,
3629 struct igc_nfc_rule *rule)
3634 return igc_add_flex_filter(adapter, rule);
3637 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
3638 err = igc_add_etype_filter(adapter, rule->filter.etype,
3644 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) {
3645 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
3646 rule->filter.src_addr, rule->action);
3651 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
3652 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
3653 rule->filter.dst_addr, rule->action);
3658 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
3659 int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
3662 err = igc_add_vlan_prio_filter(adapter, prio, rule->action);
3670 static void igc_disable_nfc_rule(struct igc_adapter *adapter,
3671 const struct igc_nfc_rule *rule)
3674 igc_del_flex_filter(adapter, rule->filter.flex_index);
3678 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE)
3679 igc_del_etype_filter(adapter, rule->filter.etype);
3681 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
3682 int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
3685 igc_del_vlan_prio_filter(adapter, prio);
3688 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
3689 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
3690 rule->filter.src_addr);
3692 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
3693 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
3694 rule->filter.dst_addr);
3698 * igc_get_nfc_rule() - Get NFC rule
3699 * @adapter: Pointer to adapter
3700 * @location: Rule location
3702 * Context: Expects adapter->nfc_rule_lock to be held by caller.
3704 * Return: Pointer to NFC rule at @location. If not found, NULL.
3706 struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter,
3709 struct igc_nfc_rule *rule;
3711 list_for_each_entry(rule, &adapter->nfc_rule_list, list) {
3712 if (rule->location == location)
3714 if (rule->location > location)
3722 * igc_del_nfc_rule() - Delete NFC rule
3723 * @adapter: Pointer to adapter
3724 * @rule: Pointer to rule to be deleted
3726 * Disable NFC rule in hardware and delete it from adapter.
3728 * Context: Expects adapter->nfc_rule_lock to be held by caller.
3730 void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
3732 igc_disable_nfc_rule(adapter, rule);
3734 list_del(&rule->list);
3735 adapter->nfc_rule_count--;
3740 static void igc_flush_nfc_rules(struct igc_adapter *adapter)
3742 struct igc_nfc_rule *rule, *tmp;
3744 mutex_lock(&adapter->nfc_rule_lock);
3746 list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list)
3747 igc_del_nfc_rule(adapter, rule);
3749 mutex_unlock(&adapter->nfc_rule_lock);
3753 * igc_add_nfc_rule() - Add NFC rule
3754 * @adapter: Pointer to adapter
3755 * @rule: Pointer to rule to be added
3757 * Enable NFC rule in hardware and add it to adapter.
3759 * Context: Expects adapter->nfc_rule_lock to be held by caller.
3761 * Return: 0 on success, negative errno on failure.
3763 int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
3765 struct igc_nfc_rule *pred, *cur;
3768 err = igc_enable_nfc_rule(adapter, rule);
3773 list_for_each_entry(cur, &adapter->nfc_rule_list, list) {
3774 if (cur->location >= rule->location)
3779 list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list);
3780 adapter->nfc_rule_count++;
3784 static void igc_restore_nfc_rules(struct igc_adapter *adapter)
3786 struct igc_nfc_rule *rule;
3788 mutex_lock(&adapter->nfc_rule_lock);
3790 list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list)
3791 igc_enable_nfc_rule(adapter, rule);
3793 mutex_unlock(&adapter->nfc_rule_lock);
3796 static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr)
3798 struct igc_adapter *adapter = netdev_priv(netdev);
3800 return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1);
3803 static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr)
3805 struct igc_adapter *adapter = netdev_priv(netdev);
3807 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr);
3812 * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
3813 * @netdev: network interface device structure
3815 * The set_rx_mode entry point is called whenever the unicast or multicast
3816 * address lists or the network interface flags are updated. This routine is
3817 * responsible for configuring the hardware for proper unicast, multicast,
3818 * promiscuous mode, and all-multi behavior.
3820 static void igc_set_rx_mode(struct net_device *netdev)
3822 struct igc_adapter *adapter = netdev_priv(netdev);
3823 struct igc_hw *hw = &adapter->hw;
3824 u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
3827 /* Check for Promiscuous and All Multicast modes */
3828 if (netdev->flags & IFF_PROMISC) {
3829 rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE;
3831 if (netdev->flags & IFF_ALLMULTI) {
3832 rctl |= IGC_RCTL_MPE;
3834 /* Write addresses to the MTA, if the attempt fails
3835 * then we should just turn on promiscuous mode so
3836 * that we can at least receive multicast traffic
3838 count = igc_write_mc_addr_list(netdev);
3840 rctl |= IGC_RCTL_MPE;
3844 /* Write addresses to available RAR registers, if there is not
3845 * sufficient space to store all the addresses then enable
3846 * unicast promiscuous mode
3848 if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync))
3849 rctl |= IGC_RCTL_UPE;
3851 /* update state of unicast and multicast */
3852 rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE);
3853 wr32(IGC_RCTL, rctl);
3855 #if (PAGE_SIZE < 8192)
3856 if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB)
3857 rlpml = IGC_MAX_FRAME_BUILD_SKB;
3859 wr32(IGC_RLPML, rlpml);
3863 * igc_configure - configure the hardware for RX and TX
3864 * @adapter: private board structure
3866 static void igc_configure(struct igc_adapter *adapter)
3868 struct net_device *netdev = adapter->netdev;
3871 igc_get_hw_control(adapter);
3872 igc_set_rx_mode(netdev);
3874 igc_restore_vlan(adapter);
3876 igc_setup_tctl(adapter);
3877 igc_setup_mrqc(adapter);
3878 igc_setup_rctl(adapter);
3880 igc_set_default_mac_filter(adapter);
3881 igc_restore_nfc_rules(adapter);
3883 igc_configure_tx(adapter);
3884 igc_configure_rx(adapter);
3886 igc_rx_fifo_flush_base(&adapter->hw);
3888 /* call igc_desc_unused which always leaves
3889 * at least 1 descriptor unused to make sure
3890 * next_to_use != next_to_clean
3892 for (i = 0; i < adapter->num_rx_queues; i++) {
3893 struct igc_ring *ring = adapter->rx_ring[i];
3896 igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring));
3898 igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
3903 * igc_write_ivar - configure ivar for given MSI-X vector
3904 * @hw: pointer to the HW structure
3905 * @msix_vector: vector number we are allocating to a given ring
3906 * @index: row index of IVAR register to write within IVAR table
3907 * @offset: column offset of in IVAR, should be multiple of 8
3909 * The IVAR table consists of 2 columns,
3910 * each containing an cause allocation for an Rx and Tx ring, and a
3911 * variable number of rows depending on the number of queues supported.
3913 static void igc_write_ivar(struct igc_hw *hw, int msix_vector,
3914 int index, int offset)
3916 u32 ivar = array_rd32(IGC_IVAR0, index);
3918 /* clear any bits that are currently set */
3919 ivar &= ~((u32)0xFF << offset);
3921 /* write vector and valid bit */
3922 ivar |= (msix_vector | IGC_IVAR_VALID) << offset;
3924 array_wr32(IGC_IVAR0, index, ivar);
3927 static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector)
3929 struct igc_adapter *adapter = q_vector->adapter;
3930 struct igc_hw *hw = &adapter->hw;
3931 int rx_queue = IGC_N0_QUEUE;
3932 int tx_queue = IGC_N0_QUEUE;
3934 if (q_vector->rx.ring)
3935 rx_queue = q_vector->rx.ring->reg_idx;
3936 if (q_vector->tx.ring)
3937 tx_queue = q_vector->tx.ring->reg_idx;
3939 switch (hw->mac.type) {
3941 if (rx_queue > IGC_N0_QUEUE)
3942 igc_write_ivar(hw, msix_vector,
3944 (rx_queue & 0x1) << 4);
3945 if (tx_queue > IGC_N0_QUEUE)
3946 igc_write_ivar(hw, msix_vector,
3948 ((tx_queue & 0x1) << 4) + 8);
3949 q_vector->eims_value = BIT(msix_vector);
3952 WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n");
3956 /* add q_vector eims value to global eims_enable_mask */
3957 adapter->eims_enable_mask |= q_vector->eims_value;
3959 /* configure q_vector to set itr on first interrupt */
3960 q_vector->set_itr = 1;
3964 * igc_configure_msix - Configure MSI-X hardware
3965 * @adapter: Pointer to adapter structure
3967 * igc_configure_msix sets up the hardware to properly
3968 * generate MSI-X interrupts.
3970 static void igc_configure_msix(struct igc_adapter *adapter)
3972 struct igc_hw *hw = &adapter->hw;
3976 adapter->eims_enable_mask = 0;
3978 /* set vector for other causes, i.e. link changes */
3979 switch (hw->mac.type) {
3981 /* Turn on MSI-X capability first, or our settings
3982 * won't stick. And it will take days to debug.
3984 wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE |
3985 IGC_GPIE_PBA | IGC_GPIE_EIAME |
3988 /* enable msix_other interrupt */
3989 adapter->eims_other = BIT(vector);
3990 tmp = (vector++ | IGC_IVAR_VALID) << 8;
3992 wr32(IGC_IVAR_MISC, tmp);
3995 /* do nothing, since nothing else supports MSI-X */
3997 } /* switch (hw->mac.type) */
3999 adapter->eims_enable_mask |= adapter->eims_other;
4001 for (i = 0; i < adapter->num_q_vectors; i++)
4002 igc_assign_vector(adapter->q_vector[i], vector++);
4008 * igc_irq_enable - Enable default interrupt generation settings
4009 * @adapter: board private structure
4011 static void igc_irq_enable(struct igc_adapter *adapter)
4013 struct igc_hw *hw = &adapter->hw;
4015 if (adapter->msix_entries) {
4016 u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA;
4017 u32 regval = rd32(IGC_EIAC);
4019 wr32(IGC_EIAC, regval | adapter->eims_enable_mask);
4020 regval = rd32(IGC_EIAM);
4021 wr32(IGC_EIAM, regval | adapter->eims_enable_mask);
4022 wr32(IGC_EIMS, adapter->eims_enable_mask);
4025 wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
4026 wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
4031 * igc_irq_disable - Mask off interrupt generation on the NIC
4032 * @adapter: board private structure
4034 static void igc_irq_disable(struct igc_adapter *adapter)
4036 struct igc_hw *hw = &adapter->hw;
4038 if (adapter->msix_entries) {
4039 u32 regval = rd32(IGC_EIAM);
4041 wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask);
4042 wr32(IGC_EIMC, adapter->eims_enable_mask);
4043 regval = rd32(IGC_EIAC);
4044 wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask);
4051 if (adapter->msix_entries) {
4054 synchronize_irq(adapter->msix_entries[vector++].vector);
4056 for (i = 0; i < adapter->num_q_vectors; i++)
4057 synchronize_irq(adapter->msix_entries[vector++].vector);
4059 synchronize_irq(adapter->pdev->irq);
4063 void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
4064 const u32 max_rss_queues)
4066 /* Determine if we need to pair queues. */
4067 /* If rss_queues > half of max_rss_queues, pair the queues in
4068 * order to conserve interrupts due to limited supply.
4070 if (adapter->rss_queues > (max_rss_queues / 2))
4071 adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
4073 adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS;
4076 unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter)
4078 return IGC_MAX_RX_QUEUES;
4081 static void igc_init_queue_configuration(struct igc_adapter *adapter)
4085 max_rss_queues = igc_get_max_rss_queues(adapter);
4086 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
4088 igc_set_flag_queue_pairs(adapter, max_rss_queues);
4092 * igc_reset_q_vector - Reset config for interrupt vector
4093 * @adapter: board private structure to initialize
4094 * @v_idx: Index of vector to be reset
4096 * If NAPI is enabled it will delete any references to the
4097 * NAPI struct. This is preparation for igc_free_q_vector.
4099 static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx)
4101 struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
4103 /* if we're coming from igc_set_interrupt_capability, the vectors are
4109 if (q_vector->tx.ring)
4110 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
4112 if (q_vector->rx.ring)
4113 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
4115 netif_napi_del(&q_vector->napi);
4119 * igc_free_q_vector - Free memory allocated for specific interrupt vector
4120 * @adapter: board private structure to initialize
4121 * @v_idx: Index of vector to be freed
4123 * This function frees the memory allocated to the q_vector.
4125 static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx)
4127 struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
4129 adapter->q_vector[v_idx] = NULL;
4131 /* igc_get_stats64() might access the rings on this vector,
4132 * we must wait a grace period before freeing it.
4135 kfree_rcu(q_vector, rcu);
4139 * igc_free_q_vectors - Free memory allocated for interrupt vectors
4140 * @adapter: board private structure to initialize
4142 * This function frees the memory allocated to the q_vectors. In addition if
4143 * NAPI is enabled it will delete any references to the NAPI struct prior
4144 * to freeing the q_vector.
4146 static void igc_free_q_vectors(struct igc_adapter *adapter)
4148 int v_idx = adapter->num_q_vectors;
4150 adapter->num_tx_queues = 0;
4151 adapter->num_rx_queues = 0;
4152 adapter->num_q_vectors = 0;
4155 igc_reset_q_vector(adapter, v_idx);
4156 igc_free_q_vector(adapter, v_idx);
4161 * igc_update_itr - update the dynamic ITR value based on statistics
4162 * @q_vector: pointer to q_vector
4163 * @ring_container: ring info to update the itr for
4165 * Stores a new ITR value based on packets and byte
4166 * counts during the last interrupt. The advantage of per interrupt
4167 * computation is faster updates and more accurate ITR for the current
4168 * traffic pattern. Constants in this function were computed
4169 * based on theoretical maximum wire speed and thresholds were set based
4170 * on testing data as well as attempting to minimize response time
4171 * while increasing bulk throughput.
4172 * NOTE: These calculations are only valid when operating in a single-
4173 * queue environment.
4175 static void igc_update_itr(struct igc_q_vector *q_vector,
4176 struct igc_ring_container *ring_container)
4178 unsigned int packets = ring_container->total_packets;
4179 unsigned int bytes = ring_container->total_bytes;
4180 u8 itrval = ring_container->itr;
4182 /* no packets, exit with status unchanged */
4187 case lowest_latency:
4188 /* handle TSO and jumbo frames */
4189 if (bytes / packets > 8000)
4190 itrval = bulk_latency;
4191 else if ((packets < 5) && (bytes > 512))
4192 itrval = low_latency;
4194 case low_latency: /* 50 usec aka 20000 ints/s */
4195 if (bytes > 10000) {
4196 /* this if handles the TSO accounting */
4197 if (bytes / packets > 8000)
4198 itrval = bulk_latency;
4199 else if ((packets < 10) || ((bytes / packets) > 1200))
4200 itrval = bulk_latency;
4201 else if ((packets > 35))
4202 itrval = lowest_latency;
4203 } else if (bytes / packets > 2000) {
4204 itrval = bulk_latency;
4205 } else if (packets <= 2 && bytes < 512) {
4206 itrval = lowest_latency;
4209 case bulk_latency: /* 250 usec aka 4000 ints/s */
4210 if (bytes > 25000) {
4212 itrval = low_latency;
4213 } else if (bytes < 1500) {
4214 itrval = low_latency;
4219 /* clear work counters since we have the values we need */
4220 ring_container->total_bytes = 0;
4221 ring_container->total_packets = 0;
4223 /* write updated itr to ring container */
4224 ring_container->itr = itrval;
4227 static void igc_set_itr(struct igc_q_vector *q_vector)
4229 struct igc_adapter *adapter = q_vector->adapter;
4230 u32 new_itr = q_vector->itr_val;
4233 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
4234 switch (adapter->link_speed) {
4238 new_itr = IGC_4K_ITR;
4244 igc_update_itr(q_vector, &q_vector->tx);
4245 igc_update_itr(q_vector, &q_vector->rx);
4247 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
4249 /* conservative mode (itr 3) eliminates the lowest_latency setting */
4250 if (current_itr == lowest_latency &&
4251 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
4252 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
4253 current_itr = low_latency;
4255 switch (current_itr) {
4256 /* counts and packets in update_itr are dependent on these numbers */
4257 case lowest_latency:
4258 new_itr = IGC_70K_ITR; /* 70,000 ints/sec */
4261 new_itr = IGC_20K_ITR; /* 20,000 ints/sec */
4264 new_itr = IGC_4K_ITR; /* 4,000 ints/sec */
4271 if (new_itr != q_vector->itr_val) {
4272 /* this attempts to bias the interrupt rate towards Bulk
4273 * by adding intermediate steps when interrupt rate is
4276 new_itr = new_itr > q_vector->itr_val ?
4277 max((new_itr * q_vector->itr_val) /
4278 (new_itr + (q_vector->itr_val >> 2)),
4280 /* Don't write the value here; it resets the adapter's
4281 * internal timer, and causes us to delay far longer than
4282 * we should between interrupts. Instead, we write the ITR
4283 * value at the beginning of the next interrupt so the timing
4284 * ends up being correct.
4286 q_vector->itr_val = new_itr;
4287 q_vector->set_itr = 1;
4291 static void igc_reset_interrupt_capability(struct igc_adapter *adapter)
4293 int v_idx = adapter->num_q_vectors;
4295 if (adapter->msix_entries) {
4296 pci_disable_msix(adapter->pdev);
4297 kfree(adapter->msix_entries);
4298 adapter->msix_entries = NULL;
4299 } else if (adapter->flags & IGC_FLAG_HAS_MSI) {
4300 pci_disable_msi(adapter->pdev);
4304 igc_reset_q_vector(adapter, v_idx);
4308 * igc_set_interrupt_capability - set MSI or MSI-X if supported
4309 * @adapter: Pointer to adapter structure
4310 * @msix: boolean value for MSI-X capability
4312 * Attempt to configure interrupts using the best available
4313 * capabilities of the hardware and kernel.
4315 static void igc_set_interrupt_capability(struct igc_adapter *adapter,
4323 adapter->flags |= IGC_FLAG_HAS_MSIX;
4325 /* Number of supported queues. */
4326 adapter->num_rx_queues = adapter->rss_queues;
4328 adapter->num_tx_queues = adapter->rss_queues;
4330 /* start with one vector for every Rx queue */
4331 numvecs = adapter->num_rx_queues;
4333 /* if Tx handler is separate add 1 for every Tx queue */
4334 if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS))
4335 numvecs += adapter->num_tx_queues;
4337 /* store the number of vectors reserved for queues */
4338 adapter->num_q_vectors = numvecs;
4340 /* add 1 vector for link status interrupts */
4343 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
4346 if (!adapter->msix_entries)
4349 /* populate entry values */
4350 for (i = 0; i < numvecs; i++)
4351 adapter->msix_entries[i].entry = i;
4353 err = pci_enable_msix_range(adapter->pdev,
4354 adapter->msix_entries,
4360 kfree(adapter->msix_entries);
4361 adapter->msix_entries = NULL;
4363 igc_reset_interrupt_capability(adapter);
4366 adapter->flags &= ~IGC_FLAG_HAS_MSIX;
4368 adapter->rss_queues = 1;
4369 adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
4370 adapter->num_rx_queues = 1;
4371 adapter->num_tx_queues = 1;
4372 adapter->num_q_vectors = 1;
4373 if (!pci_enable_msi(adapter->pdev))
4374 adapter->flags |= IGC_FLAG_HAS_MSI;
4378 * igc_update_ring_itr - update the dynamic ITR value based on packet size
4379 * @q_vector: pointer to q_vector
4381 * Stores a new ITR value based on strictly on packet size. This
4382 * algorithm is less sophisticated than that used in igc_update_itr,
4383 * due to the difficulty of synchronizing statistics across multiple
4384 * receive rings. The divisors and thresholds used by this function
4385 * were determined based on theoretical maximum wire speed and testing
4386 * data, in order to minimize response time while increasing bulk
4388 * NOTE: This function is called only when operating in a multiqueue
4389 * receive environment.
4391 static void igc_update_ring_itr(struct igc_q_vector *q_vector)
4393 struct igc_adapter *adapter = q_vector->adapter;
4394 int new_val = q_vector->itr_val;
4395 int avg_wire_size = 0;
4396 unsigned int packets;
4398 /* For non-gigabit speeds, just fix the interrupt rate at 4000
4399 * ints/sec - ITR timer value of 120 ticks.
4401 switch (adapter->link_speed) {
4404 new_val = IGC_4K_ITR;
4410 packets = q_vector->rx.total_packets;
4412 avg_wire_size = q_vector->rx.total_bytes / packets;
4414 packets = q_vector->tx.total_packets;
4416 avg_wire_size = max_t(u32, avg_wire_size,
4417 q_vector->tx.total_bytes / packets);
4419 /* if avg_wire_size isn't set no work was done */
4423 /* Add 24 bytes to size to account for CRC, preamble, and gap */
4424 avg_wire_size += 24;
4426 /* Don't starve jumbo frames */
4427 avg_wire_size = min(avg_wire_size, 3000);
4429 /* Give a little boost to mid-size frames */
4430 if (avg_wire_size > 300 && avg_wire_size < 1200)
4431 new_val = avg_wire_size / 3;
4433 new_val = avg_wire_size / 2;
4435 /* conservative mode (itr 3) eliminates the lowest_latency setting */
4436 if (new_val < IGC_20K_ITR &&
4437 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
4438 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
4439 new_val = IGC_20K_ITR;
4442 if (new_val != q_vector->itr_val) {
4443 q_vector->itr_val = new_val;
4444 q_vector->set_itr = 1;
4447 q_vector->rx.total_bytes = 0;
4448 q_vector->rx.total_packets = 0;
4449 q_vector->tx.total_bytes = 0;
4450 q_vector->tx.total_packets = 0;
4453 static void igc_ring_irq_enable(struct igc_q_vector *q_vector)
4455 struct igc_adapter *adapter = q_vector->adapter;
4456 struct igc_hw *hw = &adapter->hw;
4458 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
4459 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
4460 if (adapter->num_q_vectors == 1)
4461 igc_set_itr(q_vector);
4463 igc_update_ring_itr(q_vector);
4466 if (!test_bit(__IGC_DOWN, &adapter->state)) {
4467 if (adapter->msix_entries)
4468 wr32(IGC_EIMS, q_vector->eims_value);
4470 igc_irq_enable(adapter);
4474 static void igc_add_ring(struct igc_ring *ring,
4475 struct igc_ring_container *head)
4482 * igc_cache_ring_register - Descriptor ring to register mapping
4483 * @adapter: board private structure to initialize
4485 * Once we know the feature-set enabled for the device, we'll cache
4486 * the register offset the descriptor ring is assigned to.
4488 static void igc_cache_ring_register(struct igc_adapter *adapter)
4492 switch (adapter->hw.mac.type) {
4495 for (; i < adapter->num_rx_queues; i++)
4496 adapter->rx_ring[i]->reg_idx = i;
4497 for (; j < adapter->num_tx_queues; j++)
4498 adapter->tx_ring[j]->reg_idx = j;
4504 * igc_poll - NAPI Rx polling callback
4505 * @napi: napi polling structure
4506 * @budget: count of how many packets we should handle
4508 static int igc_poll(struct napi_struct *napi, int budget)
4510 struct igc_q_vector *q_vector = container_of(napi,
4511 struct igc_q_vector,
4513 struct igc_ring *rx_ring = q_vector->rx.ring;
4514 bool clean_complete = true;
4517 if (q_vector->tx.ring)
4518 clean_complete = igc_clean_tx_irq(q_vector, budget);
4521 int cleaned = rx_ring->xsk_pool ?
4522 igc_clean_rx_irq_zc(q_vector, budget) :
4523 igc_clean_rx_irq(q_vector, budget);
4525 work_done += cleaned;
4526 if (cleaned >= budget)
4527 clean_complete = false;
4530 /* If all work not completed, return budget and keep polling */
4531 if (!clean_complete)
4534 /* Exit the polling mode, but don't re-enable interrupts if stack might
4535 * poll us due to busy-polling
4537 if (likely(napi_complete_done(napi, work_done)))
4538 igc_ring_irq_enable(q_vector);
4540 return min(work_done, budget - 1);
4544 * igc_alloc_q_vector - Allocate memory for a single interrupt vector
4545 * @adapter: board private structure to initialize
4546 * @v_count: q_vectors allocated on adapter, used for ring interleaving
4547 * @v_idx: index of vector in adapter struct
4548 * @txr_count: total number of Tx rings to allocate
4549 * @txr_idx: index of first Tx ring to allocate
4550 * @rxr_count: total number of Rx rings to allocate
4551 * @rxr_idx: index of first Rx ring to allocate
4553 * We allocate one q_vector. If allocation fails we return -ENOMEM.
4555 static int igc_alloc_q_vector(struct igc_adapter *adapter,
4556 unsigned int v_count, unsigned int v_idx,
4557 unsigned int txr_count, unsigned int txr_idx,
4558 unsigned int rxr_count, unsigned int rxr_idx)
4560 struct igc_q_vector *q_vector;
4561 struct igc_ring *ring;
4564 /* igc only supports 1 Tx and/or 1 Rx queue per vector */
4565 if (txr_count > 1 || rxr_count > 1)
4568 ring_count = txr_count + rxr_count;
4570 /* allocate q_vector and rings */
4571 q_vector = adapter->q_vector[v_idx];
4573 q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
4576 memset(q_vector, 0, struct_size(q_vector, ring, ring_count));
4580 /* initialize NAPI */
4581 netif_napi_add(adapter->netdev, &q_vector->napi, igc_poll);
4583 /* tie q_vector and adapter together */
4584 adapter->q_vector[v_idx] = q_vector;
4585 q_vector->adapter = adapter;
4587 /* initialize work limits */
4588 q_vector->tx.work_limit = adapter->tx_work_limit;
4590 /* initialize ITR configuration */
4591 q_vector->itr_register = adapter->io_addr + IGC_EITR(0);
4592 q_vector->itr_val = IGC_START_ITR;
4594 /* initialize pointer to rings */
4595 ring = q_vector->ring;
4597 /* initialize ITR */
4599 /* rx or rx/tx vector */
4600 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
4601 q_vector->itr_val = adapter->rx_itr_setting;
4603 /* tx only vector */
4604 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
4605 q_vector->itr_val = adapter->tx_itr_setting;
4609 /* assign generic ring traits */
4610 ring->dev = &adapter->pdev->dev;
4611 ring->netdev = adapter->netdev;
4613 /* configure backlink on ring */
4614 ring->q_vector = q_vector;
4616 /* update q_vector Tx values */
4617 igc_add_ring(ring, &q_vector->tx);
4619 /* apply Tx specific ring traits */
4620 ring->count = adapter->tx_ring_count;
4621 ring->queue_index = txr_idx;
4623 /* assign ring to adapter */
4624 adapter->tx_ring[txr_idx] = ring;
4626 /* push pointer to next ring */
4631 /* assign generic ring traits */
4632 ring->dev = &adapter->pdev->dev;
4633 ring->netdev = adapter->netdev;
4635 /* configure backlink on ring */
4636 ring->q_vector = q_vector;
4638 /* update q_vector Rx values */
4639 igc_add_ring(ring, &q_vector->rx);
4641 /* apply Rx specific ring traits */
4642 ring->count = adapter->rx_ring_count;
4643 ring->queue_index = rxr_idx;
4645 /* assign ring to adapter */
4646 adapter->rx_ring[rxr_idx] = ring;
4653 * igc_alloc_q_vectors - Allocate memory for interrupt vectors
4654 * @adapter: board private structure to initialize
4656 * We allocate one q_vector per queue interrupt. If allocation fails we
4659 static int igc_alloc_q_vectors(struct igc_adapter *adapter)
4661 int rxr_remaining = adapter->num_rx_queues;
4662 int txr_remaining = adapter->num_tx_queues;
4663 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
4664 int q_vectors = adapter->num_q_vectors;
4667 if (q_vectors >= (rxr_remaining + txr_remaining)) {
4668 for (; rxr_remaining; v_idx++) {
4669 err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
4675 /* update counts and index */
4681 for (; v_idx < q_vectors; v_idx++) {
4682 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
4683 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
4685 err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
4686 tqpv, txr_idx, rqpv, rxr_idx);
4691 /* update counts and index */
4692 rxr_remaining -= rqpv;
4693 txr_remaining -= tqpv;
4701 adapter->num_tx_queues = 0;
4702 adapter->num_rx_queues = 0;
4703 adapter->num_q_vectors = 0;
4706 igc_free_q_vector(adapter, v_idx);
4712 * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
4713 * @adapter: Pointer to adapter structure
4714 * @msix: boolean for MSI-X capability
4716 * This function initializes the interrupts and allocates all of the queues.
4718 static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix)
4720 struct net_device *dev = adapter->netdev;
4723 igc_set_interrupt_capability(adapter, msix);
4725 err = igc_alloc_q_vectors(adapter);
4727 netdev_err(dev, "Unable to allocate memory for vectors\n");
4728 goto err_alloc_q_vectors;
4731 igc_cache_ring_register(adapter);
4735 err_alloc_q_vectors:
4736 igc_reset_interrupt_capability(adapter);
4741 * igc_sw_init - Initialize general software structures (struct igc_adapter)
4742 * @adapter: board private structure to initialize
4744 * igc_sw_init initializes the Adapter private data structure.
4745 * Fields are initialized based on PCI device information and
4746 * OS network device settings (MTU size).
4748 static int igc_sw_init(struct igc_adapter *adapter)
4750 struct net_device *netdev = adapter->netdev;
4751 struct pci_dev *pdev = adapter->pdev;
4752 struct igc_hw *hw = &adapter->hw;
4754 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
4756 /* set default ring sizes */
4757 adapter->tx_ring_count = IGC_DEFAULT_TXD;
4758 adapter->rx_ring_count = IGC_DEFAULT_RXD;
4760 /* set default ITR values */
4761 adapter->rx_itr_setting = IGC_DEFAULT_ITR;
4762 adapter->tx_itr_setting = IGC_DEFAULT_ITR;
4764 /* set default work limits */
4765 adapter->tx_work_limit = IGC_DEFAULT_TX_WORK;
4767 /* adjust max frame to be at least the size of a standard frame */
4768 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
4770 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
4772 mutex_init(&adapter->nfc_rule_lock);
4773 INIT_LIST_HEAD(&adapter->nfc_rule_list);
4774 adapter->nfc_rule_count = 0;
4776 spin_lock_init(&adapter->stats64_lock);
4777 /* Assume MSI-X interrupts, will be checked during IRQ allocation */
4778 adapter->flags |= IGC_FLAG_HAS_MSIX;
4780 igc_init_queue_configuration(adapter);
4782 /* This call may decrease the number of queues */
4783 if (igc_init_interrupt_scheme(adapter, true)) {
4784 netdev_err(netdev, "Unable to allocate memory for queues\n");
4788 /* Explicitly disable IRQ since the NIC can be in any state. */
4789 igc_irq_disable(adapter);
4791 set_bit(__IGC_DOWN, &adapter->state);
4797 * igc_up - Open the interface and prepare it to handle traffic
4798 * @adapter: board private structure
4800 void igc_up(struct igc_adapter *adapter)
4802 struct igc_hw *hw = &adapter->hw;
4805 /* hardware has been reset, we need to reload some things */
4806 igc_configure(adapter);
4808 clear_bit(__IGC_DOWN, &adapter->state);
4810 for (i = 0; i < adapter->num_q_vectors; i++)
4811 napi_enable(&adapter->q_vector[i]->napi);
4813 if (adapter->msix_entries)
4814 igc_configure_msix(adapter);
4816 igc_assign_vector(adapter->q_vector[0], 0);
4818 /* Clear any pending interrupts. */
4820 igc_irq_enable(adapter);
4822 netif_tx_start_all_queues(adapter->netdev);
4824 /* start the watchdog. */
4825 hw->mac.get_link_status = true;
4826 schedule_work(&adapter->watchdog_task);
4830 * igc_update_stats - Update the board statistics counters
4831 * @adapter: board private structure
4833 void igc_update_stats(struct igc_adapter *adapter)
4835 struct rtnl_link_stats64 *net_stats = &adapter->stats64;
4836 struct pci_dev *pdev = adapter->pdev;
4837 struct igc_hw *hw = &adapter->hw;
4838 u64 _bytes, _packets;
4844 /* Prevent stats update while adapter is being reset, or if the pci
4845 * connection is down.
4847 if (adapter->link_speed == 0)
4849 if (pci_channel_offline(pdev))
4856 for (i = 0; i < adapter->num_rx_queues; i++) {
4857 struct igc_ring *ring = adapter->rx_ring[i];
4858 u32 rqdpc = rd32(IGC_RQDPC(i));
4860 if (hw->mac.type >= igc_i225)
4861 wr32(IGC_RQDPC(i), 0);
4864 ring->rx_stats.drops += rqdpc;
4865 net_stats->rx_fifo_errors += rqdpc;
4869 start = u64_stats_fetch_begin(&ring->rx_syncp);
4870 _bytes = ring->rx_stats.bytes;
4871 _packets = ring->rx_stats.packets;
4872 } while (u64_stats_fetch_retry(&ring->rx_syncp, start));
4874 packets += _packets;
4877 net_stats->rx_bytes = bytes;
4878 net_stats->rx_packets = packets;
4882 for (i = 0; i < adapter->num_tx_queues; i++) {
4883 struct igc_ring *ring = adapter->tx_ring[i];
4886 start = u64_stats_fetch_begin(&ring->tx_syncp);
4887 _bytes = ring->tx_stats.bytes;
4888 _packets = ring->tx_stats.packets;
4889 } while (u64_stats_fetch_retry(&ring->tx_syncp, start));
4891 packets += _packets;
4893 net_stats->tx_bytes = bytes;
4894 net_stats->tx_packets = packets;
4897 /* read stats registers */
4898 adapter->stats.crcerrs += rd32(IGC_CRCERRS);
4899 adapter->stats.gprc += rd32(IGC_GPRC);
4900 adapter->stats.gorc += rd32(IGC_GORCL);
4901 rd32(IGC_GORCH); /* clear GORCL */
4902 adapter->stats.bprc += rd32(IGC_BPRC);
4903 adapter->stats.mprc += rd32(IGC_MPRC);
4904 adapter->stats.roc += rd32(IGC_ROC);
4906 adapter->stats.prc64 += rd32(IGC_PRC64);
4907 adapter->stats.prc127 += rd32(IGC_PRC127);
4908 adapter->stats.prc255 += rd32(IGC_PRC255);
4909 adapter->stats.prc511 += rd32(IGC_PRC511);
4910 adapter->stats.prc1023 += rd32(IGC_PRC1023);
4911 adapter->stats.prc1522 += rd32(IGC_PRC1522);
4912 adapter->stats.tlpic += rd32(IGC_TLPIC);
4913 adapter->stats.rlpic += rd32(IGC_RLPIC);
4914 adapter->stats.hgptc += rd32(IGC_HGPTC);
4916 mpc = rd32(IGC_MPC);
4917 adapter->stats.mpc += mpc;
4918 net_stats->rx_fifo_errors += mpc;
4919 adapter->stats.scc += rd32(IGC_SCC);
4920 adapter->stats.ecol += rd32(IGC_ECOL);
4921 adapter->stats.mcc += rd32(IGC_MCC);
4922 adapter->stats.latecol += rd32(IGC_LATECOL);
4923 adapter->stats.dc += rd32(IGC_DC);
4924 adapter->stats.rlec += rd32(IGC_RLEC);
4925 adapter->stats.xonrxc += rd32(IGC_XONRXC);
4926 adapter->stats.xontxc += rd32(IGC_XONTXC);
4927 adapter->stats.xoffrxc += rd32(IGC_XOFFRXC);
4928 adapter->stats.xofftxc += rd32(IGC_XOFFTXC);
4929 adapter->stats.fcruc += rd32(IGC_FCRUC);
4930 adapter->stats.gptc += rd32(IGC_GPTC);
4931 adapter->stats.gotc += rd32(IGC_GOTCL);
4932 rd32(IGC_GOTCH); /* clear GOTCL */
4933 adapter->stats.rnbc += rd32(IGC_RNBC);
4934 adapter->stats.ruc += rd32(IGC_RUC);
4935 adapter->stats.rfc += rd32(IGC_RFC);
4936 adapter->stats.rjc += rd32(IGC_RJC);
4937 adapter->stats.tor += rd32(IGC_TORH);
4938 adapter->stats.tot += rd32(IGC_TOTH);
4939 adapter->stats.tpr += rd32(IGC_TPR);
4941 adapter->stats.ptc64 += rd32(IGC_PTC64);
4942 adapter->stats.ptc127 += rd32(IGC_PTC127);
4943 adapter->stats.ptc255 += rd32(IGC_PTC255);
4944 adapter->stats.ptc511 += rd32(IGC_PTC511);
4945 adapter->stats.ptc1023 += rd32(IGC_PTC1023);
4946 adapter->stats.ptc1522 += rd32(IGC_PTC1522);
4948 adapter->stats.mptc += rd32(IGC_MPTC);
4949 adapter->stats.bptc += rd32(IGC_BPTC);
4951 adapter->stats.tpt += rd32(IGC_TPT);
4952 adapter->stats.colc += rd32(IGC_COLC);
4953 adapter->stats.colc += rd32(IGC_RERC);
4955 adapter->stats.algnerrc += rd32(IGC_ALGNERRC);
4957 adapter->stats.tsctc += rd32(IGC_TSCTC);
4959 adapter->stats.iac += rd32(IGC_IAC);
4961 /* Fill out the OS statistics structure */
4962 net_stats->multicast = adapter->stats.mprc;
4963 net_stats->collisions = adapter->stats.colc;
4967 /* RLEC on some newer hardware can be incorrect so build
4968 * our own version based on RUC and ROC
4970 net_stats->rx_errors = adapter->stats.rxerrc +
4971 adapter->stats.crcerrs + adapter->stats.algnerrc +
4972 adapter->stats.ruc + adapter->stats.roc +
4973 adapter->stats.cexterr;
4974 net_stats->rx_length_errors = adapter->stats.ruc +
4976 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4977 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4978 net_stats->rx_missed_errors = adapter->stats.mpc;
4981 net_stats->tx_errors = adapter->stats.ecol +
4982 adapter->stats.latecol;
4983 net_stats->tx_aborted_errors = adapter->stats.ecol;
4984 net_stats->tx_window_errors = adapter->stats.latecol;
4985 net_stats->tx_carrier_errors = adapter->stats.tncrs;
4988 net_stats->tx_dropped = adapter->stats.txdrop;
4990 /* Management Stats */
4991 adapter->stats.mgptc += rd32(IGC_MGTPTC);
4992 adapter->stats.mgprc += rd32(IGC_MGTPRC);
4993 adapter->stats.mgpdc += rd32(IGC_MGTPDC);
4997 * igc_down - Close the interface
4998 * @adapter: board private structure
5000 void igc_down(struct igc_adapter *adapter)
5002 struct net_device *netdev = adapter->netdev;
5003 struct igc_hw *hw = &adapter->hw;
5007 set_bit(__IGC_DOWN, &adapter->state);
5009 igc_ptp_suspend(adapter);
5011 if (pci_device_is_present(adapter->pdev)) {
5012 /* disable receives in the hardware */
5013 rctl = rd32(IGC_RCTL);
5014 wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
5015 /* flush and sleep below */
5017 /* set trans_start so we don't get spurious watchdogs during reset */
5018 netif_trans_update(netdev);
5020 netif_carrier_off(netdev);
5021 netif_tx_stop_all_queues(netdev);
5023 if (pci_device_is_present(adapter->pdev)) {
5024 /* disable transmits in the hardware */
5025 tctl = rd32(IGC_TCTL);
5026 tctl &= ~IGC_TCTL_EN;
5027 wr32(IGC_TCTL, tctl);
5028 /* flush both disables and wait for them to finish */
5030 usleep_range(10000, 20000);
5032 igc_irq_disable(adapter);
5035 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
5037 for (i = 0; i < adapter->num_q_vectors; i++) {
5038 if (adapter->q_vector[i]) {
5039 napi_synchronize(&adapter->q_vector[i]->napi);
5040 napi_disable(&adapter->q_vector[i]->napi);
5044 del_timer_sync(&adapter->watchdog_timer);
5045 del_timer_sync(&adapter->phy_info_timer);
5047 /* record the stats before reset*/
5048 spin_lock(&adapter->stats64_lock);
5049 igc_update_stats(adapter);
5050 spin_unlock(&adapter->stats64_lock);
5052 adapter->link_speed = 0;
5053 adapter->link_duplex = 0;
5055 if (!pci_channel_offline(adapter->pdev))
5058 /* clear VLAN promisc flag so VFTA will be updated if necessary */
5059 adapter->flags &= ~IGC_FLAG_VLAN_PROMISC;
5061 igc_clean_all_tx_rings(adapter);
5062 igc_clean_all_rx_rings(adapter);
5065 void igc_reinit_locked(struct igc_adapter *adapter)
5067 while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
5068 usleep_range(1000, 2000);
5071 clear_bit(__IGC_RESETTING, &adapter->state);
5074 static void igc_reset_task(struct work_struct *work)
5076 struct igc_adapter *adapter;
5078 adapter = container_of(work, struct igc_adapter, reset_task);
5081 /* If we're already down or resetting, just bail */
5082 if (test_bit(__IGC_DOWN, &adapter->state) ||
5083 test_bit(__IGC_RESETTING, &adapter->state)) {
5088 igc_rings_dump(adapter);
5089 igc_regs_dump(adapter);
5090 netdev_err(adapter->netdev, "Reset adapter\n");
5091 igc_reinit_locked(adapter);
5096 * igc_change_mtu - Change the Maximum Transfer Unit
5097 * @netdev: network interface device structure
5098 * @new_mtu: new value for maximum frame size
5100 * Returns 0 on success, negative on failure
5102 static int igc_change_mtu(struct net_device *netdev, int new_mtu)
5104 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
5105 struct igc_adapter *adapter = netdev_priv(netdev);
5107 if (igc_xdp_is_enabled(adapter) && new_mtu > ETH_DATA_LEN) {
5108 netdev_dbg(netdev, "Jumbo frames not supported with XDP");
5112 /* adjust max frame to be at least the size of a standard frame */
5113 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
5114 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
5116 while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
5117 usleep_range(1000, 2000);
5119 /* igc_down has a dependency on max_frame_size */
5120 adapter->max_frame_size = max_frame;
5122 if (netif_running(netdev))
5125 netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5126 netdev->mtu = new_mtu;
5128 if (netif_running(netdev))
5133 clear_bit(__IGC_RESETTING, &adapter->state);
5139 * igc_tx_timeout - Respond to a Tx Hang
5140 * @netdev: network interface device structure
5141 * @txqueue: queue number that timed out
5143 static void igc_tx_timeout(struct net_device *netdev,
5144 unsigned int __always_unused txqueue)
5146 struct igc_adapter *adapter = netdev_priv(netdev);
5147 struct igc_hw *hw = &adapter->hw;
5149 /* Do the reset outside of interrupt context */
5150 adapter->tx_timeout_count++;
5151 schedule_work(&adapter->reset_task);
5153 (adapter->eims_enable_mask & ~adapter->eims_other));
5157 * igc_get_stats64 - Get System Network Statistics
5158 * @netdev: network interface device structure
5159 * @stats: rtnl_link_stats64 pointer
5161 * Returns the address of the device statistics structure.
5162 * The statistics are updated here and also from the timer callback.
5164 static void igc_get_stats64(struct net_device *netdev,
5165 struct rtnl_link_stats64 *stats)
5167 struct igc_adapter *adapter = netdev_priv(netdev);
5169 spin_lock(&adapter->stats64_lock);
5170 if (!test_bit(__IGC_RESETTING, &adapter->state))
5171 igc_update_stats(adapter);
5172 memcpy(stats, &adapter->stats64, sizeof(*stats));
5173 spin_unlock(&adapter->stats64_lock);
5176 static netdev_features_t igc_fix_features(struct net_device *netdev,
5177 netdev_features_t features)
5179 /* Since there is no support for separate Rx/Tx vlan accel
5180 * enable/disable make sure Tx flag is always in same state as Rx.
5182 if (features & NETIF_F_HW_VLAN_CTAG_RX)
5183 features |= NETIF_F_HW_VLAN_CTAG_TX;
5185 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
5190 static int igc_set_features(struct net_device *netdev,
5191 netdev_features_t features)
5193 netdev_features_t changed = netdev->features ^ features;
5194 struct igc_adapter *adapter = netdev_priv(netdev);
5196 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
5197 igc_vlan_mode(netdev, features);
5199 /* Add VLAN support */
5200 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
5203 if (!(features & NETIF_F_NTUPLE))
5204 igc_flush_nfc_rules(adapter);
5206 netdev->features = features;
5208 if (netif_running(netdev))
5209 igc_reinit_locked(adapter);
5216 static netdev_features_t
5217 igc_features_check(struct sk_buff *skb, struct net_device *dev,
5218 netdev_features_t features)
5220 unsigned int network_hdr_len, mac_hdr_len;
5222 /* Make certain the headers can be described by a context descriptor */
5223 mac_hdr_len = skb_network_header(skb) - skb->data;
5224 if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN))
5225 return features & ~(NETIF_F_HW_CSUM |
5227 NETIF_F_HW_VLAN_CTAG_TX |
5231 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
5232 if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN))
5233 return features & ~(NETIF_F_HW_CSUM |
5238 /* We can only support IPv4 TSO in tunnels if we can mangle the
5239 * inner IP ID field, so strip TSO if MANGLEID is not supported.
5241 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
5242 features &= ~NETIF_F_TSO;
5247 static void igc_tsync_interrupt(struct igc_adapter *adapter)
5249 u32 ack, tsauxc, sec, nsec, tsicr;
5250 struct igc_hw *hw = &adapter->hw;
5251 struct ptp_clock_event event;
5252 struct timespec64 ts;
5254 tsicr = rd32(IGC_TSICR);
5257 if (tsicr & IGC_TSICR_SYS_WRAP) {
5258 event.type = PTP_CLOCK_PPS;
5259 if (adapter->ptp_caps.pps)
5260 ptp_clock_event(adapter->ptp_clock, &event);
5261 ack |= IGC_TSICR_SYS_WRAP;
5264 if (tsicr & IGC_TSICR_TXTS) {
5265 /* retrieve hardware timestamp */
5266 igc_ptp_tx_tstamp_event(adapter);
5267 ack |= IGC_TSICR_TXTS;
5270 if (tsicr & IGC_TSICR_TT0) {
5271 spin_lock(&adapter->tmreg_lock);
5272 ts = timespec64_add(adapter->perout[0].start,
5273 adapter->perout[0].period);
5274 wr32(IGC_TRGTTIML0, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0);
5275 wr32(IGC_TRGTTIMH0, (u32)ts.tv_sec);
5276 tsauxc = rd32(IGC_TSAUXC);
5277 tsauxc |= IGC_TSAUXC_EN_TT0;
5278 wr32(IGC_TSAUXC, tsauxc);
5279 adapter->perout[0].start = ts;
5280 spin_unlock(&adapter->tmreg_lock);
5281 ack |= IGC_TSICR_TT0;
5284 if (tsicr & IGC_TSICR_TT1) {
5285 spin_lock(&adapter->tmreg_lock);
5286 ts = timespec64_add(adapter->perout[1].start,
5287 adapter->perout[1].period);
5288 wr32(IGC_TRGTTIML1, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0);
5289 wr32(IGC_TRGTTIMH1, (u32)ts.tv_sec);
5290 tsauxc = rd32(IGC_TSAUXC);
5291 tsauxc |= IGC_TSAUXC_EN_TT1;
5292 wr32(IGC_TSAUXC, tsauxc);
5293 adapter->perout[1].start = ts;
5294 spin_unlock(&adapter->tmreg_lock);
5295 ack |= IGC_TSICR_TT1;
5298 if (tsicr & IGC_TSICR_AUTT0) {
5299 nsec = rd32(IGC_AUXSTMPL0);
5300 sec = rd32(IGC_AUXSTMPH0);
5301 event.type = PTP_CLOCK_EXTTS;
5303 event.timestamp = sec * NSEC_PER_SEC + nsec;
5304 ptp_clock_event(adapter->ptp_clock, &event);
5305 ack |= IGC_TSICR_AUTT0;
5308 if (tsicr & IGC_TSICR_AUTT1) {
5309 nsec = rd32(IGC_AUXSTMPL1);
5310 sec = rd32(IGC_AUXSTMPH1);
5311 event.type = PTP_CLOCK_EXTTS;
5313 event.timestamp = sec * NSEC_PER_SEC + nsec;
5314 ptp_clock_event(adapter->ptp_clock, &event);
5315 ack |= IGC_TSICR_AUTT1;
5318 /* acknowledge the interrupts */
5319 wr32(IGC_TSICR, ack);
5323 * igc_msix_other - msix other interrupt handler
5324 * @irq: interrupt number
5325 * @data: pointer to a q_vector
5327 static irqreturn_t igc_msix_other(int irq, void *data)
5329 struct igc_adapter *adapter = data;
5330 struct igc_hw *hw = &adapter->hw;
5331 u32 icr = rd32(IGC_ICR);
5333 /* reading ICR causes bit 31 of EICR to be cleared */
5334 if (icr & IGC_ICR_DRSTA)
5335 schedule_work(&adapter->reset_task);
5337 if (icr & IGC_ICR_DOUTSYNC) {
5338 /* HW is reporting DMA is out of sync */
5339 adapter->stats.doosync++;
5342 if (icr & IGC_ICR_LSC) {
5343 hw->mac.get_link_status = true;
5344 /* guard against interrupt when we're going down */
5345 if (!test_bit(__IGC_DOWN, &adapter->state))
5346 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5349 if (icr & IGC_ICR_TS)
5350 igc_tsync_interrupt(adapter);
5352 wr32(IGC_EIMS, adapter->eims_other);
5357 static void igc_write_itr(struct igc_q_vector *q_vector)
5359 u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK;
5361 if (!q_vector->set_itr)
5365 itr_val = IGC_ITR_VAL_MASK;
5367 itr_val |= IGC_EITR_CNT_IGNR;
5369 writel(itr_val, q_vector->itr_register);
5370 q_vector->set_itr = 0;
5373 static irqreturn_t igc_msix_ring(int irq, void *data)
5375 struct igc_q_vector *q_vector = data;
5377 /* Write the ITR value calculated from the previous interrupt. */
5378 igc_write_itr(q_vector);
5380 napi_schedule(&q_vector->napi);
5386 * igc_request_msix - Initialize MSI-X interrupts
5387 * @adapter: Pointer to adapter structure
5389 * igc_request_msix allocates MSI-X vectors and requests interrupts from the
5392 static int igc_request_msix(struct igc_adapter *adapter)
5394 unsigned int num_q_vectors = adapter->num_q_vectors;
5395 int i = 0, err = 0, vector = 0, free_vector = 0;
5396 struct net_device *netdev = adapter->netdev;
5398 err = request_irq(adapter->msix_entries[vector].vector,
5399 &igc_msix_other, 0, netdev->name, adapter);
5403 if (num_q_vectors > MAX_Q_VECTORS) {
5404 num_q_vectors = MAX_Q_VECTORS;
5405 dev_warn(&adapter->pdev->dev,
5406 "The number of queue vectors (%d) is higher than max allowed (%d)\n",
5407 adapter->num_q_vectors, MAX_Q_VECTORS);
5409 for (i = 0; i < num_q_vectors; i++) {
5410 struct igc_q_vector *q_vector = adapter->q_vector[i];
5414 q_vector->itr_register = adapter->io_addr + IGC_EITR(vector);
5416 if (q_vector->rx.ring && q_vector->tx.ring)
5417 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
5418 q_vector->rx.ring->queue_index);
5419 else if (q_vector->tx.ring)
5420 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
5421 q_vector->tx.ring->queue_index);
5422 else if (q_vector->rx.ring)
5423 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
5424 q_vector->rx.ring->queue_index);
5426 sprintf(q_vector->name, "%s-unused", netdev->name);
5428 err = request_irq(adapter->msix_entries[vector].vector,
5429 igc_msix_ring, 0, q_vector->name,
5435 igc_configure_msix(adapter);
5439 /* free already assigned IRQs */
5440 free_irq(adapter->msix_entries[free_vector++].vector, adapter);
5443 for (i = 0; i < vector; i++) {
5444 free_irq(adapter->msix_entries[free_vector++].vector,
5445 adapter->q_vector[i]);
5452 * igc_clear_interrupt_scheme - reset the device to a state of no interrupts
5453 * @adapter: Pointer to adapter structure
5455 * This function resets the device so that it has 0 rx queues, tx queues, and
5456 * MSI-X interrupts allocated.
5458 static void igc_clear_interrupt_scheme(struct igc_adapter *adapter)
5460 igc_free_q_vectors(adapter);
5461 igc_reset_interrupt_capability(adapter);
5464 /* Need to wait a few seconds after link up to get diagnostic information from
5467 static void igc_update_phy_info(struct timer_list *t)
5469 struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer);
5471 igc_get_phy_info(&adapter->hw);
5475 * igc_has_link - check shared code for link and determine up/down
5476 * @adapter: pointer to driver private info
5478 bool igc_has_link(struct igc_adapter *adapter)
5480 struct igc_hw *hw = &adapter->hw;
5481 bool link_active = false;
5483 /* get_link_status is set on LSC (link status) interrupt or
5484 * rx sequence error interrupt. get_link_status will stay
5485 * false until the igc_check_for_link establishes link
5486 * for copper adapters ONLY
5488 if (!hw->mac.get_link_status)
5490 hw->mac.ops.check_for_link(hw);
5491 link_active = !hw->mac.get_link_status;
5493 if (hw->mac.type == igc_i225) {
5494 if (!netif_carrier_ok(adapter->netdev)) {
5495 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
5496 } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) {
5497 adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE;
5498 adapter->link_check_timeout = jiffies;
5506 * igc_watchdog - Timer Call-back
5507 * @t: timer for the watchdog
5509 static void igc_watchdog(struct timer_list *t)
5511 struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer);
5512 /* Do the rest outside of interrupt context */
5513 schedule_work(&adapter->watchdog_task);
5516 static void igc_watchdog_task(struct work_struct *work)
5518 struct igc_adapter *adapter = container_of(work,
5521 struct net_device *netdev = adapter->netdev;
5522 struct igc_hw *hw = &adapter->hw;
5523 struct igc_phy_info *phy = &hw->phy;
5524 u16 phy_data, retry_count = 20;
5528 link = igc_has_link(adapter);
5530 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) {
5531 if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
5532 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
5538 /* Cancel scheduled suspend requests. */
5539 pm_runtime_resume(netdev->dev.parent);
5541 if (!netif_carrier_ok(netdev)) {
5544 hw->mac.ops.get_speed_and_duplex(hw,
5545 &adapter->link_speed,
5546 &adapter->link_duplex);
5548 ctrl = rd32(IGC_CTRL);
5549 /* Link status message must follow this format */
5551 "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
5552 adapter->link_speed,
5553 adapter->link_duplex == FULL_DUPLEX ?
5555 (ctrl & IGC_CTRL_TFCE) &&
5556 (ctrl & IGC_CTRL_RFCE) ? "RX/TX" :
5557 (ctrl & IGC_CTRL_RFCE) ? "RX" :
5558 (ctrl & IGC_CTRL_TFCE) ? "TX" : "None");
5560 /* disable EEE if enabled */
5561 if ((adapter->flags & IGC_FLAG_EEE) &&
5562 adapter->link_duplex == HALF_DUPLEX) {
5564 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n");
5565 adapter->hw.dev_spec._base.eee_enable = false;
5566 adapter->flags &= ~IGC_FLAG_EEE;
5569 /* check if SmartSpeed worked */
5570 igc_check_downshift(hw);
5571 if (phy->speed_downgraded)
5572 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
5574 /* adjust timeout factor according to speed/duplex */
5575 adapter->tx_timeout_factor = 1;
5576 switch (adapter->link_speed) {
5578 adapter->tx_timeout_factor = 14;
5583 adapter->tx_timeout_factor = 1;
5587 /* Once the launch time has been set on the wire, there
5588 * is a delay before the link speed can be determined
5589 * based on link-up activity. Write into the register
5590 * as soon as we know the correct link speed.
5592 igc_tsn_adjust_txtime_offset(adapter);
5594 if (adapter->link_speed != SPEED_1000)
5597 /* wait for Remote receiver status OK */
5599 if (!igc_read_phy_reg(hw, PHY_1000T_STATUS,
5601 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
5605 goto retry_read_status;
5606 } else if (!retry_count) {
5607 netdev_err(netdev, "exceed max 2 second\n");
5610 netdev_err(netdev, "read 1000Base-T Status Reg\n");
5613 netif_carrier_on(netdev);
5615 /* link state has changed, schedule phy info update */
5616 if (!test_bit(__IGC_DOWN, &adapter->state))
5617 mod_timer(&adapter->phy_info_timer,
5618 round_jiffies(jiffies + 2 * HZ));
5621 if (netif_carrier_ok(netdev)) {
5622 adapter->link_speed = 0;
5623 adapter->link_duplex = 0;
5625 /* Links status message must follow this format */
5626 netdev_info(netdev, "NIC Link is Down\n");
5627 netif_carrier_off(netdev);
5629 /* link state has changed, schedule phy info update */
5630 if (!test_bit(__IGC_DOWN, &adapter->state))
5631 mod_timer(&adapter->phy_info_timer,
5632 round_jiffies(jiffies + 2 * HZ));
5634 pm_schedule_suspend(netdev->dev.parent,
5639 spin_lock(&adapter->stats64_lock);
5640 igc_update_stats(adapter);
5641 spin_unlock(&adapter->stats64_lock);
5643 for (i = 0; i < adapter->num_tx_queues; i++) {
5644 struct igc_ring *tx_ring = adapter->tx_ring[i];
5646 if (!netif_carrier_ok(netdev)) {
5647 /* We've lost link, so the controller stops DMA,
5648 * but we've got queued Tx work that's never going
5649 * to get done, so reset controller to flush Tx.
5650 * (Do the reset outside of interrupt context).
5652 if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) {
5653 adapter->tx_timeout_count++;
5654 schedule_work(&adapter->reset_task);
5655 /* return immediately since reset is imminent */
5660 /* Force detection of hung controller every watchdog period */
5661 set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
5664 /* Cause software interrupt to ensure Rx ring is cleaned */
5665 if (adapter->flags & IGC_FLAG_HAS_MSIX) {
5668 for (i = 0; i < adapter->num_q_vectors; i++)
5669 eics |= adapter->q_vector[i]->eims_value;
5670 wr32(IGC_EICS, eics);
5672 wr32(IGC_ICS, IGC_ICS_RXDMT0);
5675 igc_ptp_tx_hang(adapter);
5677 /* Reset the timer */
5678 if (!test_bit(__IGC_DOWN, &adapter->state)) {
5679 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)
5680 mod_timer(&adapter->watchdog_timer,
5681 round_jiffies(jiffies + HZ));
5683 mod_timer(&adapter->watchdog_timer,
5684 round_jiffies(jiffies + 2 * HZ));
5689 * igc_intr_msi - Interrupt Handler
5690 * @irq: interrupt number
5691 * @data: pointer to a network interface device structure
5693 static irqreturn_t igc_intr_msi(int irq, void *data)
5695 struct igc_adapter *adapter = data;
5696 struct igc_q_vector *q_vector = adapter->q_vector[0];
5697 struct igc_hw *hw = &adapter->hw;
5698 /* read ICR disables interrupts using IAM */
5699 u32 icr = rd32(IGC_ICR);
5701 igc_write_itr(q_vector);
5703 if (icr & IGC_ICR_DRSTA)
5704 schedule_work(&adapter->reset_task);
5706 if (icr & IGC_ICR_DOUTSYNC) {
5707 /* HW is reporting DMA is out of sync */
5708 adapter->stats.doosync++;
5711 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
5712 hw->mac.get_link_status = true;
5713 if (!test_bit(__IGC_DOWN, &adapter->state))
5714 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5717 if (icr & IGC_ICR_TS)
5718 igc_tsync_interrupt(adapter);
5720 napi_schedule(&q_vector->napi);
5726 * igc_intr - Legacy Interrupt Handler
5727 * @irq: interrupt number
5728 * @data: pointer to a network interface device structure
5730 static irqreturn_t igc_intr(int irq, void *data)
5732 struct igc_adapter *adapter = data;
5733 struct igc_q_vector *q_vector = adapter->q_vector[0];
5734 struct igc_hw *hw = &adapter->hw;
5735 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5736 * need for the IMC write
5738 u32 icr = rd32(IGC_ICR);
5740 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5741 * not set, then the adapter didn't send an interrupt
5743 if (!(icr & IGC_ICR_INT_ASSERTED))
5746 igc_write_itr(q_vector);
5748 if (icr & IGC_ICR_DRSTA)
5749 schedule_work(&adapter->reset_task);
5751 if (icr & IGC_ICR_DOUTSYNC) {
5752 /* HW is reporting DMA is out of sync */
5753 adapter->stats.doosync++;
5756 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
5757 hw->mac.get_link_status = true;
5758 /* guard against interrupt when we're going down */
5759 if (!test_bit(__IGC_DOWN, &adapter->state))
5760 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5763 if (icr & IGC_ICR_TS)
5764 igc_tsync_interrupt(adapter);
5766 napi_schedule(&q_vector->napi);
5771 static void igc_free_irq(struct igc_adapter *adapter)
5773 if (adapter->msix_entries) {
5776 free_irq(adapter->msix_entries[vector++].vector, adapter);
5778 for (i = 0; i < adapter->num_q_vectors; i++)
5779 free_irq(adapter->msix_entries[vector++].vector,
5780 adapter->q_vector[i]);
5782 free_irq(adapter->pdev->irq, adapter);
5787 * igc_request_irq - initialize interrupts
5788 * @adapter: Pointer to adapter structure
5790 * Attempts to configure interrupts using the best available
5791 * capabilities of the hardware and kernel.
5793 static int igc_request_irq(struct igc_adapter *adapter)
5795 struct net_device *netdev = adapter->netdev;
5796 struct pci_dev *pdev = adapter->pdev;
5799 if (adapter->flags & IGC_FLAG_HAS_MSIX) {
5800 err = igc_request_msix(adapter);
5803 /* fall back to MSI */
5804 igc_free_all_tx_resources(adapter);
5805 igc_free_all_rx_resources(adapter);
5807 igc_clear_interrupt_scheme(adapter);
5808 err = igc_init_interrupt_scheme(adapter, false);
5811 igc_setup_all_tx_resources(adapter);
5812 igc_setup_all_rx_resources(adapter);
5813 igc_configure(adapter);
5816 igc_assign_vector(adapter->q_vector[0], 0);
5818 if (adapter->flags & IGC_FLAG_HAS_MSI) {
5819 err = request_irq(pdev->irq, &igc_intr_msi, 0,
5820 netdev->name, adapter);
5824 /* fall back to legacy interrupts */
5825 igc_reset_interrupt_capability(adapter);
5826 adapter->flags &= ~IGC_FLAG_HAS_MSI;
5829 err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED,
5830 netdev->name, adapter);
5833 netdev_err(netdev, "Error %d getting interrupt\n", err);
5840 * __igc_open - Called when a network interface is made active
5841 * @netdev: network interface device structure
5842 * @resuming: boolean indicating if the device is resuming
5844 * Returns 0 on success, negative value on failure
5846 * The open entry point is called when a network interface is made
5847 * active by the system (IFF_UP). At this point all resources needed
5848 * for transmit and receive operations are allocated, the interrupt
5849 * handler is registered with the OS, the watchdog timer is started,
5850 * and the stack is notified that the interface is ready.
5852 static int __igc_open(struct net_device *netdev, bool resuming)
5854 struct igc_adapter *adapter = netdev_priv(netdev);
5855 struct pci_dev *pdev = adapter->pdev;
5856 struct igc_hw *hw = &adapter->hw;
5860 /* disallow open during test */
5862 if (test_bit(__IGC_TESTING, &adapter->state)) {
5868 pm_runtime_get_sync(&pdev->dev);
5870 netif_carrier_off(netdev);
5872 /* allocate transmit descriptors */
5873 err = igc_setup_all_tx_resources(adapter);
5877 /* allocate receive descriptors */
5878 err = igc_setup_all_rx_resources(adapter);
5882 igc_power_up_link(adapter);
5884 igc_configure(adapter);
5886 err = igc_request_irq(adapter);
5890 /* Notify the stack of the actual queue counts. */
5891 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
5893 goto err_set_queues;
5895 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
5897 goto err_set_queues;
5899 clear_bit(__IGC_DOWN, &adapter->state);
5901 for (i = 0; i < adapter->num_q_vectors; i++)
5902 napi_enable(&adapter->q_vector[i]->napi);
5904 /* Clear any pending interrupts. */
5906 igc_irq_enable(adapter);
5909 pm_runtime_put(&pdev->dev);
5911 netif_tx_start_all_queues(netdev);
5913 /* start the watchdog. */
5914 hw->mac.get_link_status = true;
5915 schedule_work(&adapter->watchdog_task);
5920 igc_free_irq(adapter);
5922 igc_release_hw_control(adapter);
5923 igc_power_down_phy_copper_base(&adapter->hw);
5924 igc_free_all_rx_resources(adapter);
5926 igc_free_all_tx_resources(adapter);
5930 pm_runtime_put(&pdev->dev);
5935 int igc_open(struct net_device *netdev)
5937 return __igc_open(netdev, false);
5941 * __igc_close - Disables a network interface
5942 * @netdev: network interface device structure
5943 * @suspending: boolean indicating the device is suspending
5945 * Returns 0, this is not allowed to fail
5947 * The close entry point is called when an interface is de-activated
5948 * by the OS. The hardware is still under the driver's control, but
5949 * needs to be disabled. A global MAC reset is issued to stop the
5950 * hardware, and all transmit and receive resources are freed.
5952 static int __igc_close(struct net_device *netdev, bool suspending)
5954 struct igc_adapter *adapter = netdev_priv(netdev);
5955 struct pci_dev *pdev = adapter->pdev;
5957 WARN_ON(test_bit(__IGC_RESETTING, &adapter->state));
5960 pm_runtime_get_sync(&pdev->dev);
5964 igc_release_hw_control(adapter);
5966 igc_free_irq(adapter);
5968 igc_free_all_tx_resources(adapter);
5969 igc_free_all_rx_resources(adapter);
5972 pm_runtime_put_sync(&pdev->dev);
5977 int igc_close(struct net_device *netdev)
5979 if (netif_device_present(netdev) || netdev->dismantle)
5980 return __igc_close(netdev, false);
5985 * igc_ioctl - Access the hwtstamp interface
5986 * @netdev: network interface device structure
5987 * @ifr: interface request data
5988 * @cmd: ioctl command
5990 static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5994 return igc_ptp_get_ts_config(netdev, ifr);
5996 return igc_ptp_set_ts_config(netdev, ifr);
6002 static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue,
6005 struct igc_ring *ring;
6007 if (queue < 0 || queue >= adapter->num_tx_queues)
6010 ring = adapter->tx_ring[queue];
6011 ring->launchtime_enable = enable;
6016 static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now)
6018 struct timespec64 b;
6020 b = ktime_to_timespec64(base_time);
6022 return timespec64_compare(now, &b) > 0;
6025 static bool validate_schedule(struct igc_adapter *adapter,
6026 const struct tc_taprio_qopt_offload *qopt)
6028 int queue_uses[IGC_MAX_TX_QUEUES] = { };
6029 struct igc_hw *hw = &adapter->hw;
6030 struct timespec64 now;
6033 if (qopt->cycle_time_extension)
6036 igc_ptp_read(adapter, &now);
6038 /* If we program the controller's BASET registers with a time
6039 * in the future, it will hold all the packets until that
6040 * time, causing a lot of TX Hangs, so to avoid that, we
6041 * reject schedules that would start in the future.
6042 * Note: Limitation above is no longer in i226.
6044 if (!is_base_time_past(qopt->base_time, &now) &&
6045 igc_is_device_id_i225(hw))
6048 for (n = 0; n < qopt->num_entries; n++) {
6049 const struct tc_taprio_sched_entry *e, *prev;
6052 prev = n ? &qopt->entries[n - 1] : NULL;
6053 e = &qopt->entries[n];
6055 /* i225 only supports "global" frame preemption
6058 if (e->command != TC_TAPRIO_CMD_SET_GATES)
6061 for (i = 0; i < adapter->num_tx_queues; i++)
6062 if (e->gate_mask & BIT(i)) {
6065 /* There are limitations: A single queue cannot
6066 * be opened and closed multiple times per cycle
6067 * unless the gate stays open. Check for it.
6069 if (queue_uses[i] > 1 &&
6070 !(prev->gate_mask & BIT(i)))
6078 static int igc_tsn_enable_launchtime(struct igc_adapter *adapter,
6079 struct tc_etf_qopt_offload *qopt)
6081 struct igc_hw *hw = &adapter->hw;
6084 if (hw->mac.type != igc_i225)
6087 err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable);
6091 return igc_tsn_offload_apply(adapter);
6094 static int igc_tsn_clear_schedule(struct igc_adapter *adapter)
6098 adapter->base_time = 0;
6099 adapter->cycle_time = NSEC_PER_SEC;
6100 adapter->taprio_offload_enable = false;
6101 adapter->qbv_config_change_errors = 0;
6102 adapter->qbv_transition = false;
6103 adapter->qbv_count = 0;
6105 for (i = 0; i < adapter->num_tx_queues; i++) {
6106 struct igc_ring *ring = adapter->tx_ring[i];
6108 ring->start_time = 0;
6109 ring->end_time = NSEC_PER_SEC;
6111 ring->oper_gate_closed = false;
6112 ring->admin_gate_closed = false;
6118 static int igc_save_qbv_schedule(struct igc_adapter *adapter,
6119 struct tc_taprio_qopt_offload *qopt)
6121 bool queue_configured[IGC_MAX_TX_QUEUES] = { };
6122 struct igc_hw *hw = &adapter->hw;
6123 u32 start_time = 0, end_time = 0;
6124 struct timespec64 now;
6128 if (qopt->cmd == TAPRIO_CMD_DESTROY)
6129 return igc_tsn_clear_schedule(adapter);
6131 if (qopt->cmd != TAPRIO_CMD_REPLACE)
6134 if (qopt->base_time < 0)
6137 if (igc_is_device_id_i225(hw) && adapter->taprio_offload_enable)
6140 if (!validate_schedule(adapter, qopt))
6143 adapter->cycle_time = qopt->cycle_time;
6144 adapter->base_time = qopt->base_time;
6145 adapter->taprio_offload_enable = true;
6147 igc_ptp_read(adapter, &now);
6149 for (n = 0; n < qopt->num_entries; n++) {
6150 struct tc_taprio_sched_entry *e = &qopt->entries[n];
6152 end_time += e->interval;
6154 /* If any of the conditions below are true, we need to manually
6155 * control the end time of the cycle.
6156 * 1. Qbv users can specify a cycle time that is not equal
6157 * to the total GCL intervals. Hence, recalculation is
6158 * necessary here to exclude the time interval that
6159 * exceeds the cycle time.
6160 * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2,
6161 * once the end of the list is reached, it will switch
6162 * to the END_OF_CYCLE state and leave the gates in the
6163 * same state until the next cycle is started.
6165 if (end_time > adapter->cycle_time ||
6166 n + 1 == qopt->num_entries)
6167 end_time = adapter->cycle_time;
6169 for (i = 0; i < adapter->num_tx_queues; i++) {
6170 struct igc_ring *ring = adapter->tx_ring[i];
6172 if (!(e->gate_mask & BIT(i)))
6175 /* Check whether a queue stays open for more than one
6176 * entry. If so, keep the start and advance the end
6179 if (!queue_configured[i])
6180 ring->start_time = start_time;
6181 ring->end_time = end_time;
6183 if (ring->start_time >= adapter->cycle_time)
6184 queue_configured[i] = false;
6186 queue_configured[i] = true;
6189 start_time += e->interval;
6192 /* Check whether a queue gets configured.
6193 * If not, set the start and end time to be end time.
6195 for (i = 0; i < adapter->num_tx_queues; i++) {
6196 struct igc_ring *ring = adapter->tx_ring[i];
6198 if (!is_base_time_past(qopt->base_time, &now)) {
6199 ring->admin_gate_closed = false;
6201 ring->oper_gate_closed = false;
6202 ring->admin_gate_closed = false;
6205 if (!queue_configured[i]) {
6206 if (!is_base_time_past(qopt->base_time, &now))
6207 ring->admin_gate_closed = true;
6209 ring->oper_gate_closed = true;
6211 ring->start_time = end_time;
6212 ring->end_time = end_time;
6216 for (i = 0; i < adapter->num_tx_queues; i++) {
6217 struct igc_ring *ring = adapter->tx_ring[i];
6218 struct net_device *dev = adapter->netdev;
6220 if (qopt->max_sdu[i])
6221 ring->max_sdu = qopt->max_sdu[i] + dev->hard_header_len - ETH_TLEN;
6229 static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter,
6230 struct tc_taprio_qopt_offload *qopt)
6232 struct igc_hw *hw = &adapter->hw;
6235 if (hw->mac.type != igc_i225)
6238 err = igc_save_qbv_schedule(adapter, qopt);
6242 return igc_tsn_offload_apply(adapter);
6245 static int igc_save_cbs_params(struct igc_adapter *adapter, int queue,
6246 bool enable, int idleslope, int sendslope,
6247 int hicredit, int locredit)
6249 bool cbs_status[IGC_MAX_SR_QUEUES] = { false };
6250 struct net_device *netdev = adapter->netdev;
6251 struct igc_ring *ring;
6254 /* i225 has two sets of credit-based shaper logic.
6255 * Supporting it only on the top two priority queues
6257 if (queue < 0 || queue > 1)
6260 ring = adapter->tx_ring[queue];
6262 for (i = 0; i < IGC_MAX_SR_QUEUES; i++)
6263 if (adapter->tx_ring[i])
6264 cbs_status[i] = adapter->tx_ring[i]->cbs_enable;
6266 /* CBS should be enabled on the highest priority queue first in order
6267 * for the CBS algorithm to operate as intended.
6270 if (queue == 1 && !cbs_status[0]) {
6272 "Enabling CBS on queue1 before queue0\n");
6276 if (queue == 0 && cbs_status[1]) {
6278 "Disabling CBS on queue0 before queue1\n");
6283 ring->cbs_enable = enable;
6284 ring->idleslope = idleslope;
6285 ring->sendslope = sendslope;
6286 ring->hicredit = hicredit;
6287 ring->locredit = locredit;
6292 static int igc_tsn_enable_cbs(struct igc_adapter *adapter,
6293 struct tc_cbs_qopt_offload *qopt)
6295 struct igc_hw *hw = &adapter->hw;
6298 if (hw->mac.type != igc_i225)
6301 if (qopt->queue < 0 || qopt->queue > 1)
6304 err = igc_save_cbs_params(adapter, qopt->queue, qopt->enable,
6305 qopt->idleslope, qopt->sendslope,
6306 qopt->hicredit, qopt->locredit);
6310 return igc_tsn_offload_apply(adapter);
6313 static int igc_tc_query_caps(struct igc_adapter *adapter,
6314 struct tc_query_caps_base *base)
6316 struct igc_hw *hw = &adapter->hw;
6318 switch (base->type) {
6319 case TC_SETUP_QDISC_TAPRIO: {
6320 struct tc_taprio_caps *caps = base->caps;
6322 caps->broken_mqprio = true;
6324 if (hw->mac.type == igc_i225) {
6325 caps->supports_queue_max_sdu = true;
6326 caps->gate_mask_per_txq = true;
6336 static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
6339 struct igc_adapter *adapter = netdev_priv(dev);
6341 adapter->tc_setup_type = type;
6345 return igc_tc_query_caps(adapter, type_data);
6346 case TC_SETUP_QDISC_TAPRIO:
6347 return igc_tsn_enable_qbv_scheduling(adapter, type_data);
6349 case TC_SETUP_QDISC_ETF:
6350 return igc_tsn_enable_launchtime(adapter, type_data);
6352 case TC_SETUP_QDISC_CBS:
6353 return igc_tsn_enable_cbs(adapter, type_data);
6360 static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6362 struct igc_adapter *adapter = netdev_priv(dev);
6364 switch (bpf->command) {
6365 case XDP_SETUP_PROG:
6366 return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack);
6367 case XDP_SETUP_XSK_POOL:
6368 return igc_xdp_setup_pool(adapter, bpf->xsk.pool,
6375 static int igc_xdp_xmit(struct net_device *dev, int num_frames,
6376 struct xdp_frame **frames, u32 flags)
6378 struct igc_adapter *adapter = netdev_priv(dev);
6379 int cpu = smp_processor_id();
6380 struct netdev_queue *nq;
6381 struct igc_ring *ring;
6384 if (unlikely(test_bit(__IGC_DOWN, &adapter->state)))
6387 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6390 ring = igc_xdp_get_tx_ring(adapter, cpu);
6391 nq = txring_txq(ring);
6393 __netif_tx_lock(nq, cpu);
6395 /* Avoid transmit queue timeout since we share it with the slow path */
6396 txq_trans_cond_update(nq);
6399 for (i = 0; i < num_frames; i++) {
6401 struct xdp_frame *xdpf = frames[i];
6403 err = igc_xdp_init_tx_descriptor(ring, xdpf);
6405 xdp_return_frame_rx_napi(xdpf);
6410 if (flags & XDP_XMIT_FLUSH)
6411 igc_flush_tx_descriptors(ring);
6413 __netif_tx_unlock(nq);
6415 return num_frames - drops;
6418 static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter,
6419 struct igc_q_vector *q_vector)
6421 struct igc_hw *hw = &adapter->hw;
6424 eics |= q_vector->eims_value;
6425 wr32(IGC_EICS, eics);
6428 int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
6430 struct igc_adapter *adapter = netdev_priv(dev);
6431 struct igc_q_vector *q_vector;
6432 struct igc_ring *ring;
6434 if (test_bit(__IGC_DOWN, &adapter->state))
6437 if (!igc_xdp_is_enabled(adapter))
6440 if (queue_id >= adapter->num_rx_queues)
6443 ring = adapter->rx_ring[queue_id];
6445 if (!ring->xsk_pool)
6448 q_vector = adapter->q_vector[queue_id];
6449 if (!napi_if_scheduled_mark_missed(&q_vector->napi))
6450 igc_trigger_rxtxq_interrupt(adapter, q_vector);
6455 static const struct net_device_ops igc_netdev_ops = {
6456 .ndo_open = igc_open,
6457 .ndo_stop = igc_close,
6458 .ndo_start_xmit = igc_xmit_frame,
6459 .ndo_set_rx_mode = igc_set_rx_mode,
6460 .ndo_set_mac_address = igc_set_mac,
6461 .ndo_change_mtu = igc_change_mtu,
6462 .ndo_tx_timeout = igc_tx_timeout,
6463 .ndo_get_stats64 = igc_get_stats64,
6464 .ndo_fix_features = igc_fix_features,
6465 .ndo_set_features = igc_set_features,
6466 .ndo_features_check = igc_features_check,
6467 .ndo_eth_ioctl = igc_ioctl,
6468 .ndo_setup_tc = igc_setup_tc,
6470 .ndo_xdp_xmit = igc_xdp_xmit,
6471 .ndo_xsk_wakeup = igc_xsk_wakeup,
6474 /* PCIe configuration access */
6475 void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
6477 struct igc_adapter *adapter = hw->back;
6479 pci_read_config_word(adapter->pdev, reg, value);
6482 void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
6484 struct igc_adapter *adapter = hw->back;
6486 pci_write_config_word(adapter->pdev, reg, *value);
6489 s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
6491 struct igc_adapter *adapter = hw->back;
6493 if (!pci_is_pcie(adapter->pdev))
6494 return -IGC_ERR_CONFIG;
6496 pcie_capability_read_word(adapter->pdev, reg, value);
6501 s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
6503 struct igc_adapter *adapter = hw->back;
6505 if (!pci_is_pcie(adapter->pdev))
6506 return -IGC_ERR_CONFIG;
6508 pcie_capability_write_word(adapter->pdev, reg, *value);
6513 u32 igc_rd32(struct igc_hw *hw, u32 reg)
6515 struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw);
6516 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
6519 if (IGC_REMOVED(hw_addr))
6522 value = readl(&hw_addr[reg]);
6524 /* reads should not return all F's */
6525 if (!(~value) && (!reg || !(~readl(hw_addr)))) {
6526 struct net_device *netdev = igc->netdev;
6529 netif_device_detach(netdev);
6530 netdev_err(netdev, "PCIe link lost, device now detached\n");
6531 WARN(pci_device_is_present(igc->pdev),
6532 "igc: Failed to read reg 0x%x!\n", reg);
6538 /* Mapping HW RSS Type to enum xdp_rss_hash_type */
6539 static enum xdp_rss_hash_type igc_xdp_rss_type[IGC_RSS_TYPE_MAX_TABLE] = {
6540 [IGC_RSS_TYPE_NO_HASH] = XDP_RSS_TYPE_L2,
6541 [IGC_RSS_TYPE_HASH_TCP_IPV4] = XDP_RSS_TYPE_L4_IPV4_TCP,
6542 [IGC_RSS_TYPE_HASH_IPV4] = XDP_RSS_TYPE_L3_IPV4,
6543 [IGC_RSS_TYPE_HASH_TCP_IPV6] = XDP_RSS_TYPE_L4_IPV6_TCP,
6544 [IGC_RSS_TYPE_HASH_IPV6_EX] = XDP_RSS_TYPE_L3_IPV6_EX,
6545 [IGC_RSS_TYPE_HASH_IPV6] = XDP_RSS_TYPE_L3_IPV6,
6546 [IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_TCP_EX,
6547 [IGC_RSS_TYPE_HASH_UDP_IPV4] = XDP_RSS_TYPE_L4_IPV4_UDP,
6548 [IGC_RSS_TYPE_HASH_UDP_IPV6] = XDP_RSS_TYPE_L4_IPV6_UDP,
6549 [IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_UDP_EX,
6550 [10] = XDP_RSS_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */
6551 [11] = XDP_RSS_TYPE_NONE, /* keep array sized for SW bit-mask */
6552 [12] = XDP_RSS_TYPE_NONE, /* to handle future HW revisons */
6553 [13] = XDP_RSS_TYPE_NONE,
6554 [14] = XDP_RSS_TYPE_NONE,
6555 [15] = XDP_RSS_TYPE_NONE,
6558 static int igc_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash,
6559 enum xdp_rss_hash_type *rss_type)
6561 const struct igc_xdp_buff *ctx = (void *)_ctx;
6563 if (!(ctx->xdp.rxq->dev->features & NETIF_F_RXHASH))
6566 *hash = le32_to_cpu(ctx->rx_desc->wb.lower.hi_dword.rss);
6567 *rss_type = igc_xdp_rss_type[igc_rss_type(ctx->rx_desc)];
6572 static int igc_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
6574 const struct igc_xdp_buff *ctx = (void *)_ctx;
6576 if (igc_test_staterr(ctx->rx_desc, IGC_RXDADV_STAT_TSIP)) {
6577 *timestamp = ctx->rx_ts;
6585 static const struct xdp_metadata_ops igc_xdp_metadata_ops = {
6586 .xmo_rx_hash = igc_xdp_rx_hash,
6587 .xmo_rx_timestamp = igc_xdp_rx_timestamp,
6590 static enum hrtimer_restart igc_qbv_scheduling_timer(struct hrtimer *timer)
6592 struct igc_adapter *adapter = container_of(timer, struct igc_adapter,
6596 adapter->qbv_transition = true;
6597 for (i = 0; i < adapter->num_tx_queues; i++) {
6598 struct igc_ring *tx_ring = adapter->tx_ring[i];
6600 if (tx_ring->admin_gate_closed) {
6601 tx_ring->admin_gate_closed = false;
6602 tx_ring->oper_gate_closed = true;
6604 tx_ring->oper_gate_closed = false;
6607 adapter->qbv_transition = false;
6608 return HRTIMER_NORESTART;
6612 * igc_probe - Device Initialization Routine
6613 * @pdev: PCI device information struct
6614 * @ent: entry in igc_pci_tbl
6616 * Returns 0 on success, negative on failure
6618 * igc_probe initializes an adapter identified by a pci_dev structure.
6619 * The OS initialization, configuring the adapter private structure,
6620 * and a hardware reset occur.
6622 static int igc_probe(struct pci_dev *pdev,
6623 const struct pci_device_id *ent)
6625 struct igc_adapter *adapter;
6626 struct net_device *netdev;
6628 const struct igc_info *ei = igc_info_tbl[ent->driver_data];
6631 err = pci_enable_device_mem(pdev);
6635 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6638 "No usable DMA configuration, aborting\n");
6642 err = pci_request_mem_regions(pdev, igc_driver_name);
6646 err = pci_enable_ptm(pdev, NULL);
6648 dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n");
6650 pci_set_master(pdev);
6653 netdev = alloc_etherdev_mq(sizeof(struct igc_adapter),
6657 goto err_alloc_etherdev;
6659 SET_NETDEV_DEV(netdev, &pdev->dev);
6661 pci_set_drvdata(pdev, netdev);
6662 adapter = netdev_priv(netdev);
6663 adapter->netdev = netdev;
6664 adapter->pdev = pdev;
6667 adapter->port_num = hw->bus.func;
6668 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
6670 err = pci_save_state(pdev);
6675 adapter->io_addr = ioremap(pci_resource_start(pdev, 0),
6676 pci_resource_len(pdev, 0));
6677 if (!adapter->io_addr)
6680 /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */
6681 hw->hw_addr = adapter->io_addr;
6683 netdev->netdev_ops = &igc_netdev_ops;
6684 netdev->xdp_metadata_ops = &igc_xdp_metadata_ops;
6685 igc_ethtool_set_ops(netdev);
6686 netdev->watchdog_timeo = 5 * HZ;
6688 netdev->mem_start = pci_resource_start(pdev, 0);
6689 netdev->mem_end = pci_resource_end(pdev, 0);
6691 /* PCI config space info */
6692 hw->vendor_id = pdev->vendor;
6693 hw->device_id = pdev->device;
6694 hw->revision_id = pdev->revision;
6695 hw->subsystem_vendor_id = pdev->subsystem_vendor;
6696 hw->subsystem_device_id = pdev->subsystem_device;
6698 /* Copy the default MAC and PHY function pointers */
6699 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
6700 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
6702 /* Initialize skew-specific constants */
6703 err = ei->get_invariants(hw);
6707 /* Add supported features to the features list*/
6708 netdev->features |= NETIF_F_SG;
6709 netdev->features |= NETIF_F_TSO;
6710 netdev->features |= NETIF_F_TSO6;
6711 netdev->features |= NETIF_F_TSO_ECN;
6712 netdev->features |= NETIF_F_RXHASH;
6713 netdev->features |= NETIF_F_RXCSUM;
6714 netdev->features |= NETIF_F_HW_CSUM;
6715 netdev->features |= NETIF_F_SCTP_CRC;
6716 netdev->features |= NETIF_F_HW_TC;
6718 #define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
6719 NETIF_F_GSO_GRE_CSUM | \
6720 NETIF_F_GSO_IPXIP4 | \
6721 NETIF_F_GSO_IPXIP6 | \
6722 NETIF_F_GSO_UDP_TUNNEL | \
6723 NETIF_F_GSO_UDP_TUNNEL_CSUM)
6725 netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES;
6726 netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES;
6728 /* setup the private structure */
6729 err = igc_sw_init(adapter);
6733 /* copy netdev features into list of user selectable features */
6734 netdev->hw_features |= NETIF_F_NTUPLE;
6735 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
6736 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
6737 netdev->hw_features |= netdev->features;
6739 netdev->features |= NETIF_F_HIGHDMA;
6741 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
6742 netdev->mpls_features |= NETIF_F_HW_CSUM;
6743 netdev->hw_enc_features |= netdev->vlan_features;
6745 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
6746 NETDEV_XDP_ACT_XSK_ZEROCOPY;
6748 /* MTU range: 68 - 9216 */
6749 netdev->min_mtu = ETH_MIN_MTU;
6750 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
6752 /* before reading the NVM, reset the controller to put the device in a
6753 * known good starting state
6755 hw->mac.ops.reset_hw(hw);
6757 if (igc_get_flash_presence_i225(hw)) {
6758 if (hw->nvm.ops.validate(hw) < 0) {
6759 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
6765 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
6766 /* copy the MAC address out of the NVM */
6767 if (hw->mac.ops.read_mac_addr(hw))
6768 dev_err(&pdev->dev, "NVM Read Error\n");
6771 eth_hw_addr_set(netdev, hw->mac.addr);
6773 if (!is_valid_ether_addr(netdev->dev_addr)) {
6774 dev_err(&pdev->dev, "Invalid MAC Address\n");
6779 /* configure RXPBSIZE and TXPBSIZE */
6780 wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT);
6781 wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
6783 timer_setup(&adapter->watchdog_timer, igc_watchdog, 0);
6784 timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0);
6786 INIT_WORK(&adapter->reset_task, igc_reset_task);
6787 INIT_WORK(&adapter->watchdog_task, igc_watchdog_task);
6789 hrtimer_init(&adapter->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6790 adapter->hrtimer.function = &igc_qbv_scheduling_timer;
6792 /* Initialize link properties that are user-changeable */
6793 adapter->fc_autoneg = true;
6794 hw->mac.autoneg = true;
6795 hw->phy.autoneg_advertised = 0xaf;
6797 hw->fc.requested_mode = igc_fc_default;
6798 hw->fc.current_mode = igc_fc_default;
6800 /* By default, support wake on port A */
6801 adapter->flags |= IGC_FLAG_WOL_SUPPORTED;
6803 /* initialize the wol settings based on the eeprom settings */
6804 if (adapter->flags & IGC_FLAG_WOL_SUPPORTED)
6805 adapter->wol |= IGC_WUFC_MAG;
6807 device_set_wakeup_enable(&adapter->pdev->dev,
6808 adapter->flags & IGC_FLAG_WOL_SUPPORTED);
6810 igc_ptp_init(adapter);
6812 igc_tsn_clear_schedule(adapter);
6814 /* reset the hardware with the new settings */
6817 /* let the f/w know that the h/w is now under the control of the
6820 igc_get_hw_control(adapter);
6822 strncpy(netdev->name, "eth%d", IFNAMSIZ);
6823 err = register_netdev(netdev);
6827 /* carrier off reporting is important to ethtool even BEFORE open */
6828 netif_carrier_off(netdev);
6830 /* Check if Media Autosense is enabled */
6833 /* print pcie link status and MAC address */
6834 pcie_print_link_status(pdev);
6835 netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
6837 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
6838 /* Disable EEE for internal PHY devices */
6839 hw->dev_spec._base.eee_enable = false;
6840 adapter->flags &= ~IGC_FLAG_EEE;
6841 igc_set_eee_i225(hw, false, false, false);
6843 pm_runtime_put_noidle(&pdev->dev);
6848 igc_release_hw_control(adapter);
6850 if (!igc_check_reset_block(hw))
6853 igc_clear_interrupt_scheme(adapter);
6854 iounmap(adapter->io_addr);
6856 free_netdev(netdev);
6858 pci_release_mem_regions(pdev);
6861 pci_disable_device(pdev);
6866 * igc_remove - Device Removal Routine
6867 * @pdev: PCI device information struct
6869 * igc_remove is called by the PCI subsystem to alert the driver
6870 * that it should release a PCI device. This could be caused by a
6871 * Hot-Plug event, or because the driver is going to be removed from
6874 static void igc_remove(struct pci_dev *pdev)
6876 struct net_device *netdev = pci_get_drvdata(pdev);
6877 struct igc_adapter *adapter = netdev_priv(netdev);
6879 pm_runtime_get_noresume(&pdev->dev);
6881 igc_flush_nfc_rules(adapter);
6883 igc_ptp_stop(adapter);
6885 pci_disable_ptm(pdev);
6886 pci_clear_master(pdev);
6888 set_bit(__IGC_DOWN, &adapter->state);
6890 del_timer_sync(&adapter->watchdog_timer);
6891 del_timer_sync(&adapter->phy_info_timer);
6893 cancel_work_sync(&adapter->reset_task);
6894 cancel_work_sync(&adapter->watchdog_task);
6895 hrtimer_cancel(&adapter->hrtimer);
6897 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6898 * would have already happened in close and is redundant.
6900 igc_release_hw_control(adapter);
6901 unregister_netdev(netdev);
6903 igc_clear_interrupt_scheme(adapter);
6904 pci_iounmap(pdev, adapter->io_addr);
6905 pci_release_mem_regions(pdev);
6907 free_netdev(netdev);
6909 pci_disable_device(pdev);
6912 static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake,
6915 struct net_device *netdev = pci_get_drvdata(pdev);
6916 struct igc_adapter *adapter = netdev_priv(netdev);
6917 u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol;
6918 struct igc_hw *hw = &adapter->hw;
6919 u32 ctrl, rctl, status;
6923 netif_device_detach(netdev);
6925 if (netif_running(netdev))
6926 __igc_close(netdev, true);
6928 igc_ptp_suspend(adapter);
6930 igc_clear_interrupt_scheme(adapter);
6933 status = rd32(IGC_STATUS);
6934 if (status & IGC_STATUS_LU)
6935 wufc &= ~IGC_WUFC_LNKC;
6938 igc_setup_rctl(adapter);
6939 igc_set_rx_mode(netdev);
6941 /* turn on all-multi mode if wake on multicast is enabled */
6942 if (wufc & IGC_WUFC_MC) {
6943 rctl = rd32(IGC_RCTL);
6944 rctl |= IGC_RCTL_MPE;
6945 wr32(IGC_RCTL, rctl);
6948 ctrl = rd32(IGC_CTRL);
6949 ctrl |= IGC_CTRL_ADVD3WUC;
6950 wr32(IGC_CTRL, ctrl);
6952 /* Allow time for pending master requests to run */
6953 igc_disable_pcie_master(hw);
6955 wr32(IGC_WUC, IGC_WUC_PME_EN);
6956 wr32(IGC_WUFC, wufc);
6962 wake = wufc || adapter->en_mng_pt;
6964 igc_power_down_phy_copper_base(&adapter->hw);
6966 igc_power_up_link(adapter);
6969 *enable_wake = wake;
6971 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6972 * would have already happened in close and is redundant.
6974 igc_release_hw_control(adapter);
6976 pci_disable_device(pdev);
6982 static int __maybe_unused igc_runtime_suspend(struct device *dev)
6984 return __igc_shutdown(to_pci_dev(dev), NULL, 1);
6987 static void igc_deliver_wake_packet(struct net_device *netdev)
6989 struct igc_adapter *adapter = netdev_priv(netdev);
6990 struct igc_hw *hw = &adapter->hw;
6991 struct sk_buff *skb;
6994 wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK;
6996 /* WUPM stores only the first 128 bytes of the wake packet.
6997 * Read the packet only if we have the whole thing.
6999 if (wupl == 0 || wupl > IGC_WUPM_BYTES)
7002 skb = netdev_alloc_skb_ip_align(netdev, IGC_WUPM_BYTES);
7008 /* Ensure reads are 32-bit aligned */
7009 wupl = roundup(wupl, 4);
7011 memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl);
7013 skb->protocol = eth_type_trans(skb, netdev);
7017 static int __maybe_unused igc_resume(struct device *dev)
7019 struct pci_dev *pdev = to_pci_dev(dev);
7020 struct net_device *netdev = pci_get_drvdata(pdev);
7021 struct igc_adapter *adapter = netdev_priv(netdev);
7022 struct igc_hw *hw = &adapter->hw;
7025 pci_set_power_state(pdev, PCI_D0);
7026 pci_restore_state(pdev);
7027 pci_save_state(pdev);
7029 if (!pci_device_is_present(pdev))
7031 err = pci_enable_device_mem(pdev);
7033 netdev_err(netdev, "Cannot enable PCI device from suspend\n");
7036 pci_set_master(pdev);
7038 pci_enable_wake(pdev, PCI_D3hot, 0);
7039 pci_enable_wake(pdev, PCI_D3cold, 0);
7041 if (igc_init_interrupt_scheme(adapter, true)) {
7042 netdev_err(netdev, "Unable to allocate memory for queues\n");
7048 /* let the f/w know that the h/w is now under the control of the
7051 igc_get_hw_control(adapter);
7053 val = rd32(IGC_WUS);
7054 if (val & WAKE_PKT_WUS)
7055 igc_deliver_wake_packet(netdev);
7060 if (!err && netif_running(netdev))
7061 err = __igc_open(netdev, true);
7064 netif_device_attach(netdev);
7070 static int __maybe_unused igc_runtime_resume(struct device *dev)
7072 return igc_resume(dev);
7075 static int __maybe_unused igc_suspend(struct device *dev)
7077 return __igc_shutdown(to_pci_dev(dev), NULL, 0);
7080 static int __maybe_unused igc_runtime_idle(struct device *dev)
7082 struct net_device *netdev = dev_get_drvdata(dev);
7083 struct igc_adapter *adapter = netdev_priv(netdev);
7085 if (!igc_has_link(adapter))
7086 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
7090 #endif /* CONFIG_PM */
7092 static void igc_shutdown(struct pci_dev *pdev)
7096 __igc_shutdown(pdev, &wake, 0);
7098 if (system_state == SYSTEM_POWER_OFF) {
7099 pci_wake_from_d3(pdev, wake);
7100 pci_set_power_state(pdev, PCI_D3hot);
7105 * igc_io_error_detected - called when PCI error is detected
7106 * @pdev: Pointer to PCI device
7107 * @state: The current PCI connection state
7109 * This function is called after a PCI bus error affecting
7110 * this device has been detected.
7112 static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev,
7113 pci_channel_state_t state)
7115 struct net_device *netdev = pci_get_drvdata(pdev);
7116 struct igc_adapter *adapter = netdev_priv(netdev);
7118 netif_device_detach(netdev);
7120 if (state == pci_channel_io_perm_failure)
7121 return PCI_ERS_RESULT_DISCONNECT;
7123 if (netif_running(netdev))
7125 pci_disable_device(pdev);
7127 /* Request a slot reset. */
7128 return PCI_ERS_RESULT_NEED_RESET;
7132 * igc_io_slot_reset - called after the PCI bus has been reset.
7133 * @pdev: Pointer to PCI device
7135 * Restart the card from scratch, as if from a cold-boot. Implementation
7136 * resembles the first-half of the igc_resume routine.
7138 static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
7140 struct net_device *netdev = pci_get_drvdata(pdev);
7141 struct igc_adapter *adapter = netdev_priv(netdev);
7142 struct igc_hw *hw = &adapter->hw;
7143 pci_ers_result_t result;
7145 if (pci_enable_device_mem(pdev)) {
7146 netdev_err(netdev, "Could not re-enable PCI device after reset\n");
7147 result = PCI_ERS_RESULT_DISCONNECT;
7149 pci_set_master(pdev);
7150 pci_restore_state(pdev);
7151 pci_save_state(pdev);
7153 pci_enable_wake(pdev, PCI_D3hot, 0);
7154 pci_enable_wake(pdev, PCI_D3cold, 0);
7156 /* In case of PCI error, adapter loses its HW address
7157 * so we should re-assign it here.
7159 hw->hw_addr = adapter->io_addr;
7163 result = PCI_ERS_RESULT_RECOVERED;
7170 * igc_io_resume - called when traffic can start to flow again.
7171 * @pdev: Pointer to PCI device
7173 * This callback is called when the error recovery driver tells us that
7174 * its OK to resume normal operation. Implementation resembles the
7175 * second-half of the igc_resume routine.
7177 static void igc_io_resume(struct pci_dev *pdev)
7179 struct net_device *netdev = pci_get_drvdata(pdev);
7180 struct igc_adapter *adapter = netdev_priv(netdev);
7183 if (netif_running(netdev)) {
7184 if (igc_open(netdev)) {
7185 netdev_err(netdev, "igc_open failed after reset\n");
7190 netif_device_attach(netdev);
7192 /* let the f/w know that the h/w is now under the control of the
7195 igc_get_hw_control(adapter);
7199 static const struct pci_error_handlers igc_err_handler = {
7200 .error_detected = igc_io_error_detected,
7201 .slot_reset = igc_io_slot_reset,
7202 .resume = igc_io_resume,
7206 static const struct dev_pm_ops igc_pm_ops = {
7207 SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume)
7208 SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume,
7213 static struct pci_driver igc_driver = {
7214 .name = igc_driver_name,
7215 .id_table = igc_pci_tbl,
7217 .remove = igc_remove,
7219 .driver.pm = &igc_pm_ops,
7221 .shutdown = igc_shutdown,
7222 .err_handler = &igc_err_handler,
7226 * igc_reinit_queues - return error
7227 * @adapter: pointer to adapter structure
7229 int igc_reinit_queues(struct igc_adapter *adapter)
7231 struct net_device *netdev = adapter->netdev;
7234 if (netif_running(netdev))
7237 igc_reset_interrupt_capability(adapter);
7239 if (igc_init_interrupt_scheme(adapter, true)) {
7240 netdev_err(netdev, "Unable to allocate memory for queues\n");
7244 if (netif_running(netdev))
7245 err = igc_open(netdev);
7251 * igc_get_hw_dev - return device
7252 * @hw: pointer to hardware structure
7254 * used by hardware layer to print debugging information
7256 struct net_device *igc_get_hw_dev(struct igc_hw *hw)
7258 struct igc_adapter *adapter = hw->back;
7260 return adapter->netdev;
7263 static void igc_disable_rx_ring_hw(struct igc_ring *ring)
7265 struct igc_hw *hw = &ring->q_vector->adapter->hw;
7266 u8 idx = ring->reg_idx;
7269 rxdctl = rd32(IGC_RXDCTL(idx));
7270 rxdctl &= ~IGC_RXDCTL_QUEUE_ENABLE;
7271 rxdctl |= IGC_RXDCTL_SWFLUSH;
7272 wr32(IGC_RXDCTL(idx), rxdctl);
7275 void igc_disable_rx_ring(struct igc_ring *ring)
7277 igc_disable_rx_ring_hw(ring);
7278 igc_clean_rx_ring(ring);
7281 void igc_enable_rx_ring(struct igc_ring *ring)
7283 struct igc_adapter *adapter = ring->q_vector->adapter;
7285 igc_configure_rx_ring(adapter, ring);
7288 igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring));
7290 igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
7293 static void igc_disable_tx_ring_hw(struct igc_ring *ring)
7295 struct igc_hw *hw = &ring->q_vector->adapter->hw;
7296 u8 idx = ring->reg_idx;
7299 txdctl = rd32(IGC_TXDCTL(idx));
7300 txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE;
7301 txdctl |= IGC_TXDCTL_SWFLUSH;
7302 wr32(IGC_TXDCTL(idx), txdctl);
7305 void igc_disable_tx_ring(struct igc_ring *ring)
7307 igc_disable_tx_ring_hw(ring);
7308 igc_clean_tx_ring(ring);
7311 void igc_enable_tx_ring(struct igc_ring *ring)
7313 struct igc_adapter *adapter = ring->q_vector->adapter;
7315 igc_configure_tx_ring(adapter, ring);
7319 * igc_init_module - Driver Registration Routine
7321 * igc_init_module is the first routine called when the driver is
7322 * loaded. All it does is register with the PCI subsystem.
7324 static int __init igc_init_module(void)
7328 pr_info("%s\n", igc_driver_string);
7329 pr_info("%s\n", igc_copyright);
7331 ret = pci_register_driver(&igc_driver);
7335 module_init(igc_init_module);
7338 * igc_exit_module - Driver Exit Cleanup Routine
7340 * igc_exit_module is called just before the driver is removed
7343 static void __exit igc_exit_module(void)
7345 pci_unregister_driver(&igc_driver);
7348 module_exit(igc_exit_module);