1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
5 #include "iavf_prototype.h"
6 #include "iavf_client.h"
7 /* All iavf tracepoints are defined by the include below, which must
8 * be included exactly once across the whole kernel with
9 * CREATE_TRACE_POINTS defined
11 #define CREATE_TRACE_POINTS
12 #include "iavf_trace.h"
14 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter);
15 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter);
16 static int iavf_close(struct net_device *netdev);
17 static void iavf_init_get_resources(struct iavf_adapter *adapter);
18 static int iavf_check_reset_complete(struct iavf_hw *hw);
20 char iavf_driver_name[] = "iavf";
21 static const char iavf_driver_string[] =
22 "Intel(R) Ethernet Adaptive Virtual Function Network Driver";
24 static const char iavf_copyright[] =
25 "Copyright (c) 2013 - 2018 Intel Corporation.";
27 /* iavf_pci_tbl - PCI Device ID Table
29 * Wildcard entries (PCI_ANY_ID) should come last
30 * Last entry must be all 0s
32 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
33 * Class, Class Mask, private data (not used) }
35 static const struct pci_device_id iavf_pci_tbl[] = {
36 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0},
37 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0},
38 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0},
39 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0},
40 /* required last entry */
44 MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
46 MODULE_ALIAS("i40evf");
47 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
48 MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
49 MODULE_LICENSE("GPL v2");
51 static const struct net_device_ops iavf_netdev_ops;
52 struct workqueue_struct *iavf_wq;
55 * iavf_pdev_to_adapter - go from pci_dev to adapter
56 * @pdev: pci_dev pointer
58 static struct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev)
60 return netdev_priv(pci_get_drvdata(pdev));
64 * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code
65 * @hw: pointer to the HW structure
66 * @mem: ptr to mem struct to fill out
67 * @size: size of memory requested
68 * @alignment: what to align the allocation to
70 enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
71 struct iavf_dma_mem *mem,
72 u64 size, u32 alignment)
74 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
77 return IAVF_ERR_PARAM;
79 mem->size = ALIGN(size, alignment);
80 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
81 (dma_addr_t *)&mem->pa, GFP_KERNEL);
85 return IAVF_ERR_NO_MEMORY;
89 * iavf_free_dma_mem_d - OS specific memory free for shared code
90 * @hw: pointer to the HW structure
91 * @mem: ptr to mem struct to free
93 enum iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw,
94 struct iavf_dma_mem *mem)
96 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
99 return IAVF_ERR_PARAM;
100 dma_free_coherent(&adapter->pdev->dev, mem->size,
101 mem->va, (dma_addr_t)mem->pa);
106 * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code
107 * @hw: pointer to the HW structure
108 * @mem: ptr to mem struct to fill out
109 * @size: size of memory requested
111 enum iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,
112 struct iavf_virt_mem *mem, u32 size)
115 return IAVF_ERR_PARAM;
118 mem->va = kzalloc(size, GFP_KERNEL);
123 return IAVF_ERR_NO_MEMORY;
127 * iavf_free_virt_mem_d - OS specific memory free for shared code
128 * @hw: pointer to the HW structure
129 * @mem: ptr to mem struct to free
131 enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,
132 struct iavf_virt_mem *mem)
135 return IAVF_ERR_PARAM;
137 /* it's ok to kfree a NULL pointer */
144 * iavf_lock_timeout - try to lock mutex but give up after timeout
145 * @lock: mutex that should be locked
146 * @msecs: timeout in msecs
148 * Returns 0 on success, negative on failure
150 int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
152 unsigned int wait, delay = 10;
154 for (wait = 0; wait < msecs; wait += delay) {
155 if (mutex_trylock(lock))
165 * iavf_schedule_reset - Set the flags and schedule a reset event
166 * @adapter: board private structure
168 void iavf_schedule_reset(struct iavf_adapter *adapter)
170 if (!(adapter->flags &
171 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
172 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
173 queue_work(iavf_wq, &adapter->reset_task);
178 * iavf_schedule_request_stats - Set the flags and schedule statistics request
179 * @adapter: board private structure
181 * Sets IAVF_FLAG_AQ_REQUEST_STATS flag so iavf_watchdog_task() will explicitly
182 * request and refresh ethtool stats
184 void iavf_schedule_request_stats(struct iavf_adapter *adapter)
186 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS;
187 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
191 * iavf_tx_timeout - Respond to a Tx Hang
192 * @netdev: network interface device structure
193 * @txqueue: queue number that is timing out
195 static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
197 struct iavf_adapter *adapter = netdev_priv(netdev);
199 adapter->tx_timeout_count++;
200 iavf_schedule_reset(adapter);
204 * iavf_misc_irq_disable - Mask off interrupt generation on the NIC
205 * @adapter: board private structure
207 static void iavf_misc_irq_disable(struct iavf_adapter *adapter)
209 struct iavf_hw *hw = &adapter->hw;
211 if (!adapter->msix_entries)
214 wr32(hw, IAVF_VFINT_DYN_CTL01, 0);
218 synchronize_irq(adapter->msix_entries[0].vector);
222 * iavf_misc_irq_enable - Enable default interrupt generation settings
223 * @adapter: board private structure
225 static void iavf_misc_irq_enable(struct iavf_adapter *adapter)
227 struct iavf_hw *hw = &adapter->hw;
229 wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK |
230 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
231 wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
237 * iavf_irq_disable - Mask off interrupt generation on the NIC
238 * @adapter: board private structure
240 static void iavf_irq_disable(struct iavf_adapter *adapter)
243 struct iavf_hw *hw = &adapter->hw;
245 if (!adapter->msix_entries)
248 for (i = 1; i < adapter->num_msix_vectors; i++) {
249 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0);
250 synchronize_irq(adapter->msix_entries[i].vector);
256 * iavf_irq_enable_queues - Enable interrupt for specified queues
257 * @adapter: board private structure
258 * @mask: bitmap of queues to enable
260 void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask)
262 struct iavf_hw *hw = &adapter->hw;
265 for (i = 1; i < adapter->num_msix_vectors; i++) {
266 if (mask & BIT(i - 1)) {
267 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
268 IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
269 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
275 * iavf_irq_enable - Enable default interrupt generation settings
276 * @adapter: board private structure
277 * @flush: boolean value whether to run rd32()
279 void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
281 struct iavf_hw *hw = &adapter->hw;
283 iavf_misc_irq_enable(adapter);
284 iavf_irq_enable_queues(adapter, ~0);
291 * iavf_msix_aq - Interrupt handler for vector 0
292 * @irq: interrupt number
293 * @data: pointer to netdev
295 static irqreturn_t iavf_msix_aq(int irq, void *data)
297 struct net_device *netdev = data;
298 struct iavf_adapter *adapter = netdev_priv(netdev);
299 struct iavf_hw *hw = &adapter->hw;
301 /* handle non-queue interrupts, these reads clear the registers */
302 rd32(hw, IAVF_VFINT_ICR01);
303 rd32(hw, IAVF_VFINT_ICR0_ENA1);
305 /* schedule work on the private workqueue */
306 queue_work(iavf_wq, &adapter->adminq_task);
312 * iavf_msix_clean_rings - MSIX mode Interrupt Handler
313 * @irq: interrupt number
314 * @data: pointer to a q_vector
316 static irqreturn_t iavf_msix_clean_rings(int irq, void *data)
318 struct iavf_q_vector *q_vector = data;
320 if (!q_vector->tx.ring && !q_vector->rx.ring)
323 napi_schedule_irqoff(&q_vector->napi);
329 * iavf_map_vector_to_rxq - associate irqs with rx queues
330 * @adapter: board private structure
331 * @v_idx: interrupt number
332 * @r_idx: queue number
335 iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
337 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
338 struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx];
339 struct iavf_hw *hw = &adapter->hw;
341 rx_ring->q_vector = q_vector;
342 rx_ring->next = q_vector->rx.ring;
343 rx_ring->vsi = &adapter->vsi;
344 q_vector->rx.ring = rx_ring;
345 q_vector->rx.count++;
346 q_vector->rx.next_update = jiffies + 1;
347 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
348 q_vector->ring_mask |= BIT(r_idx);
349 wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
350 q_vector->rx.current_itr >> 1);
351 q_vector->rx.current_itr = q_vector->rx.target_itr;
355 * iavf_map_vector_to_txq - associate irqs with tx queues
356 * @adapter: board private structure
357 * @v_idx: interrupt number
358 * @t_idx: queue number
361 iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
363 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
364 struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx];
365 struct iavf_hw *hw = &adapter->hw;
367 tx_ring->q_vector = q_vector;
368 tx_ring->next = q_vector->tx.ring;
369 tx_ring->vsi = &adapter->vsi;
370 q_vector->tx.ring = tx_ring;
371 q_vector->tx.count++;
372 q_vector->tx.next_update = jiffies + 1;
373 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
374 q_vector->num_ringpairs++;
375 wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
376 q_vector->tx.target_itr >> 1);
377 q_vector->tx.current_itr = q_vector->tx.target_itr;
381 * iavf_map_rings_to_vectors - Maps descriptor rings to vectors
382 * @adapter: board private structure to initialize
384 * This function maps descriptor rings to the queue-specific vectors
385 * we were allotted through the MSI-X enabling code. Ideally, we'd have
386 * one vector per ring/queue, but on a constrained vector budget, we
387 * group the rings as "efficiently" as possible. You would add new
388 * mapping configurations in here.
390 static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
392 int rings_remaining = adapter->num_active_queues;
393 int ridx = 0, vidx = 0;
396 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
398 for (; ridx < rings_remaining; ridx++) {
399 iavf_map_vector_to_rxq(adapter, vidx, ridx);
400 iavf_map_vector_to_txq(adapter, vidx, ridx);
402 /* In the case where we have more queues than vectors, continue
403 * round-robin on vectors until all queues are mapped.
405 if (++vidx >= q_vectors)
409 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
413 * iavf_irq_affinity_notify - Callback for affinity changes
414 * @notify: context as to what irq was changed
415 * @mask: the new affinity mask
417 * This is a callback function used by the irq_set_affinity_notifier function
418 * so that we may register to receive changes to the irq affinity masks.
420 static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify,
421 const cpumask_t *mask)
423 struct iavf_q_vector *q_vector =
424 container_of(notify, struct iavf_q_vector, affinity_notify);
426 cpumask_copy(&q_vector->affinity_mask, mask);
430 * iavf_irq_affinity_release - Callback for affinity notifier release
431 * @ref: internal core kernel usage
433 * This is a callback function used by the irq_set_affinity_notifier function
434 * to inform the current notification subscriber that they will no longer
435 * receive notifications.
437 static void iavf_irq_affinity_release(struct kref *ref) {}
440 * iavf_request_traffic_irqs - Initialize MSI-X interrupts
441 * @adapter: board private structure
442 * @basename: device basename
444 * Allocates MSI-X vectors for tx and rx handling, and requests
445 * interrupts from the kernel.
448 iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
450 unsigned int vector, q_vectors;
451 unsigned int rx_int_idx = 0, tx_int_idx = 0;
455 iavf_irq_disable(adapter);
456 /* Decrement for Other and TCP Timer vectors */
457 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
459 for (vector = 0; vector < q_vectors; vector++) {
460 struct iavf_q_vector *q_vector = &adapter->q_vectors[vector];
462 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
464 if (q_vector->tx.ring && q_vector->rx.ring) {
465 snprintf(q_vector->name, sizeof(q_vector->name),
466 "iavf-%s-TxRx-%d", basename, rx_int_idx++);
468 } else if (q_vector->rx.ring) {
469 snprintf(q_vector->name, sizeof(q_vector->name),
470 "iavf-%s-rx-%d", basename, rx_int_idx++);
471 } else if (q_vector->tx.ring) {
472 snprintf(q_vector->name, sizeof(q_vector->name),
473 "iavf-%s-tx-%d", basename, tx_int_idx++);
475 /* skip this unused q_vector */
478 err = request_irq(irq_num,
479 iavf_msix_clean_rings,
484 dev_info(&adapter->pdev->dev,
485 "Request_irq failed, error: %d\n", err);
486 goto free_queue_irqs;
488 /* register for affinity change notifications */
489 q_vector->affinity_notify.notify = iavf_irq_affinity_notify;
490 q_vector->affinity_notify.release =
491 iavf_irq_affinity_release;
492 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
493 /* Spread the IRQ affinity hints across online CPUs. Note that
494 * get_cpu_mask returns a mask with a permanent lifetime so
495 * it's safe to use as a hint for irq_update_affinity_hint.
497 cpu = cpumask_local_spread(q_vector->v_idx, -1);
498 irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
506 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
507 irq_set_affinity_notifier(irq_num, NULL);
508 irq_update_affinity_hint(irq_num, NULL);
509 free_irq(irq_num, &adapter->q_vectors[vector]);
515 * iavf_request_misc_irq - Initialize MSI-X interrupts
516 * @adapter: board private structure
518 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
519 * vector is only for the admin queue, and stays active even when the netdev
522 static int iavf_request_misc_irq(struct iavf_adapter *adapter)
524 struct net_device *netdev = adapter->netdev;
527 snprintf(adapter->misc_vector_name,
528 sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx",
529 dev_name(&adapter->pdev->dev));
530 err = request_irq(adapter->msix_entries[0].vector,
532 adapter->misc_vector_name, netdev);
534 dev_err(&adapter->pdev->dev,
535 "request_irq for %s failed: %d\n",
536 adapter->misc_vector_name, err);
537 free_irq(adapter->msix_entries[0].vector, netdev);
543 * iavf_free_traffic_irqs - Free MSI-X interrupts
544 * @adapter: board private structure
546 * Frees all MSI-X vectors other than 0.
548 static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
550 int vector, irq_num, q_vectors;
552 if (!adapter->msix_entries)
555 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
557 for (vector = 0; vector < q_vectors; vector++) {
558 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
559 irq_set_affinity_notifier(irq_num, NULL);
560 irq_update_affinity_hint(irq_num, NULL);
561 free_irq(irq_num, &adapter->q_vectors[vector]);
566 * iavf_free_misc_irq - Free MSI-X miscellaneous vector
567 * @adapter: board private structure
569 * Frees MSI-X vector 0.
571 static void iavf_free_misc_irq(struct iavf_adapter *adapter)
573 struct net_device *netdev = adapter->netdev;
575 if (!adapter->msix_entries)
578 free_irq(adapter->msix_entries[0].vector, netdev);
582 * iavf_configure_tx - Configure Transmit Unit after Reset
583 * @adapter: board private structure
585 * Configure the Tx unit of the MAC after a reset.
587 static void iavf_configure_tx(struct iavf_adapter *adapter)
589 struct iavf_hw *hw = &adapter->hw;
592 for (i = 0; i < adapter->num_active_queues; i++)
593 adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i);
597 * iavf_configure_rx - Configure Receive Unit after Reset
598 * @adapter: board private structure
600 * Configure the Rx unit of the MAC after a reset.
602 static void iavf_configure_rx(struct iavf_adapter *adapter)
604 unsigned int rx_buf_len = IAVF_RXBUFFER_2048;
605 struct iavf_hw *hw = &adapter->hw;
608 /* Legacy Rx will always default to a 2048 buffer size. */
609 #if (PAGE_SIZE < 8192)
610 if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) {
611 struct net_device *netdev = adapter->netdev;
613 /* For jumbo frames on systems with 4K pages we have to use
614 * an order 1 page, so we might as well increase the size
615 * of our Rx buffer to make better use of the available space
617 rx_buf_len = IAVF_RXBUFFER_3072;
619 /* We use a 1536 buffer size for configurations with
620 * standard Ethernet mtu. On x86 this gives us enough room
621 * for shared info and 192 bytes of padding.
623 if (!IAVF_2K_TOO_SMALL_WITH_PADDING &&
624 (netdev->mtu <= ETH_DATA_LEN))
625 rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
629 for (i = 0; i < adapter->num_active_queues; i++) {
630 adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
631 adapter->rx_rings[i].rx_buf_len = rx_buf_len;
633 if (adapter->flags & IAVF_FLAG_LEGACY_RX)
634 clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
636 set_ring_build_skb_enabled(&adapter->rx_rings[i]);
641 * iavf_find_vlan - Search filter list for specific vlan filter
642 * @adapter: board private structure
645 * Returns ptr to the filter object or NULL. Must be called while holding the
646 * mac_vlan_list_lock.
649 iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan)
651 struct iavf_vlan_filter *f;
653 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
661 * iavf_add_vlan - Add a vlan filter to the list
662 * @adapter: board private structure
665 * Returns ptr to the filter object or NULL when no memory available.
668 iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan)
670 struct iavf_vlan_filter *f = NULL;
672 spin_lock_bh(&adapter->mac_vlan_list_lock);
674 f = iavf_find_vlan(adapter, vlan);
676 f = kzalloc(sizeof(*f), GFP_ATOMIC);
682 list_add_tail(&f->list, &adapter->vlan_filter_list);
684 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
688 spin_unlock_bh(&adapter->mac_vlan_list_lock);
693 * iavf_del_vlan - Remove a vlan filter from the list
694 * @adapter: board private structure
697 static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan)
699 struct iavf_vlan_filter *f;
701 spin_lock_bh(&adapter->mac_vlan_list_lock);
703 f = iavf_find_vlan(adapter, vlan);
706 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
709 spin_unlock_bh(&adapter->mac_vlan_list_lock);
713 * iavf_restore_filters
714 * @adapter: board private structure
716 * Restore existing non MAC filters when VF netdev comes back up
718 static void iavf_restore_filters(struct iavf_adapter *adapter)
722 /* re-add all VLAN filters */
723 for_each_set_bit(vid, adapter->vsi.active_vlans, VLAN_N_VID)
724 iavf_add_vlan(adapter, vid);
728 * iavf_vlan_rx_add_vid - Add a VLAN filter to a device
729 * @netdev: network device struct
730 * @proto: unused protocol data
733 static int iavf_vlan_rx_add_vid(struct net_device *netdev,
734 __always_unused __be16 proto, u16 vid)
736 struct iavf_adapter *adapter = netdev_priv(netdev);
738 if (!VLAN_ALLOWED(adapter))
741 if (iavf_add_vlan(adapter, vid) == NULL)
744 set_bit(vid, adapter->vsi.active_vlans);
749 * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device
750 * @netdev: network device struct
751 * @proto: unused protocol data
754 static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
755 __always_unused __be16 proto, u16 vid)
757 struct iavf_adapter *adapter = netdev_priv(netdev);
759 iavf_del_vlan(adapter, vid);
760 clear_bit(vid, adapter->vsi.active_vlans);
766 * iavf_find_filter - Search filter list for specific mac filter
767 * @adapter: board private structure
768 * @macaddr: the MAC address
770 * Returns ptr to the filter object or NULL. Must be called while holding the
771 * mac_vlan_list_lock.
774 iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
777 struct iavf_mac_filter *f;
782 list_for_each_entry(f, &adapter->mac_filter_list, list) {
783 if (ether_addr_equal(macaddr, f->macaddr))
790 * iavf_add_filter - Add a mac filter to the filter list
791 * @adapter: board private structure
792 * @macaddr: the MAC address
794 * Returns ptr to the filter object or NULL when no memory available.
796 struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
799 struct iavf_mac_filter *f;
804 f = iavf_find_filter(adapter, macaddr);
806 f = kzalloc(sizeof(*f), GFP_ATOMIC);
810 ether_addr_copy(f->macaddr, macaddr);
812 list_add_tail(&f->list, &adapter->mac_filter_list);
814 f->is_new_mac = true;
815 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
824 * iavf_set_mac - NDO callback to set port mac address
825 * @netdev: network interface device structure
826 * @p: pointer to an address structure
828 * Returns 0 on success, negative on failure
830 static int iavf_set_mac(struct net_device *netdev, void *p)
832 struct iavf_adapter *adapter = netdev_priv(netdev);
833 struct iavf_hw *hw = &adapter->hw;
834 struct iavf_mac_filter *f;
835 struct sockaddr *addr = p;
837 if (!is_valid_ether_addr(addr->sa_data))
838 return -EADDRNOTAVAIL;
840 if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
843 spin_lock_bh(&adapter->mac_vlan_list_lock);
845 f = iavf_find_filter(adapter, hw->mac.addr);
848 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
851 f = iavf_add_filter(adapter, addr->sa_data);
853 spin_unlock_bh(&adapter->mac_vlan_list_lock);
856 ether_addr_copy(hw->mac.addr, addr->sa_data);
859 return (f == NULL) ? -ENOMEM : 0;
863 * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address
864 * @netdev: the netdevice
865 * @addr: address to add
867 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
868 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
870 static int iavf_addr_sync(struct net_device *netdev, const u8 *addr)
872 struct iavf_adapter *adapter = netdev_priv(netdev);
874 if (iavf_add_filter(adapter, addr))
881 * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
882 * @netdev: the netdevice
883 * @addr: address to add
885 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
886 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
888 static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
890 struct iavf_adapter *adapter = netdev_priv(netdev);
891 struct iavf_mac_filter *f;
893 /* Under some circumstances, we might receive a request to delete
894 * our own device address from our uc list. Because we store the
895 * device address in the VSI's MAC/VLAN filter list, we need to ignore
896 * such requests and not delete our device address from this list.
898 if (ether_addr_equal(addr, netdev->dev_addr))
901 f = iavf_find_filter(adapter, addr);
904 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
910 * iavf_set_rx_mode - NDO callback to set the netdev filters
911 * @netdev: network interface device structure
913 static void iavf_set_rx_mode(struct net_device *netdev)
915 struct iavf_adapter *adapter = netdev_priv(netdev);
917 spin_lock_bh(&adapter->mac_vlan_list_lock);
918 __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
919 __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
920 spin_unlock_bh(&adapter->mac_vlan_list_lock);
922 if (netdev->flags & IFF_PROMISC &&
923 !(adapter->flags & IAVF_FLAG_PROMISC_ON))
924 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC;
925 else if (!(netdev->flags & IFF_PROMISC) &&
926 adapter->flags & IAVF_FLAG_PROMISC_ON)
927 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC;
929 if (netdev->flags & IFF_ALLMULTI &&
930 !(adapter->flags & IAVF_FLAG_ALLMULTI_ON))
931 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI;
932 else if (!(netdev->flags & IFF_ALLMULTI) &&
933 adapter->flags & IAVF_FLAG_ALLMULTI_ON)
934 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI;
938 * iavf_napi_enable_all - enable NAPI on all queue vectors
939 * @adapter: board private structure
941 static void iavf_napi_enable_all(struct iavf_adapter *adapter)
944 struct iavf_q_vector *q_vector;
945 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
947 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
948 struct napi_struct *napi;
950 q_vector = &adapter->q_vectors[q_idx];
951 napi = &q_vector->napi;
957 * iavf_napi_disable_all - disable NAPI on all queue vectors
958 * @adapter: board private structure
960 static void iavf_napi_disable_all(struct iavf_adapter *adapter)
963 struct iavf_q_vector *q_vector;
964 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
966 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
967 q_vector = &adapter->q_vectors[q_idx];
968 napi_disable(&q_vector->napi);
973 * iavf_configure - set up transmit and receive data structures
974 * @adapter: board private structure
976 static void iavf_configure(struct iavf_adapter *adapter)
978 struct net_device *netdev = adapter->netdev;
981 iavf_set_rx_mode(netdev);
983 iavf_configure_tx(adapter);
984 iavf_configure_rx(adapter);
985 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES;
987 for (i = 0; i < adapter->num_active_queues; i++) {
988 struct iavf_ring *ring = &adapter->rx_rings[i];
990 iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring));
995 * iavf_up_complete - Finish the last steps of bringing up a connection
996 * @adapter: board private structure
998 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
1000 static void iavf_up_complete(struct iavf_adapter *adapter)
1002 iavf_change_state(adapter, __IAVF_RUNNING);
1003 clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1005 iavf_napi_enable_all(adapter);
1007 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
1008 if (CLIENT_ENABLED(adapter))
1009 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
1010 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
1014 * iavf_down - Shutdown the connection processing
1015 * @adapter: board private structure
1017 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
1019 void iavf_down(struct iavf_adapter *adapter)
1021 struct net_device *netdev = adapter->netdev;
1022 struct iavf_vlan_filter *vlf;
1023 struct iavf_cloud_filter *cf;
1024 struct iavf_fdir_fltr *fdir;
1025 struct iavf_mac_filter *f;
1026 struct iavf_adv_rss *rss;
1028 if (adapter->state <= __IAVF_DOWN_PENDING)
1031 netif_carrier_off(netdev);
1032 netif_tx_disable(netdev);
1033 adapter->link_up = false;
1034 iavf_napi_disable_all(adapter);
1035 iavf_irq_disable(adapter);
1037 spin_lock_bh(&adapter->mac_vlan_list_lock);
1039 /* clear the sync flag on all filters */
1040 __dev_uc_unsync(adapter->netdev, NULL);
1041 __dev_mc_unsync(adapter->netdev, NULL);
1043 /* remove all MAC filters */
1044 list_for_each_entry(f, &adapter->mac_filter_list, list) {
1048 /* remove all VLAN filters */
1049 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
1053 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1055 /* remove all cloud filters */
1056 spin_lock_bh(&adapter->cloud_filter_list_lock);
1057 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1060 spin_unlock_bh(&adapter->cloud_filter_list_lock);
1062 /* remove all Flow Director filters */
1063 spin_lock_bh(&adapter->fdir_fltr_lock);
1064 list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
1065 fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
1067 spin_unlock_bh(&adapter->fdir_fltr_lock);
1069 /* remove all advance RSS configuration */
1070 spin_lock_bh(&adapter->adv_rss_lock);
1071 list_for_each_entry(rss, &adapter->adv_rss_list_head, list)
1072 rss->state = IAVF_ADV_RSS_DEL_REQUEST;
1073 spin_unlock_bh(&adapter->adv_rss_lock);
1075 if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) &&
1076 adapter->state != __IAVF_RESETTING) {
1077 /* cancel any current operation */
1078 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1079 /* Schedule operations to close down the HW. Don't wait
1080 * here for this to complete. The watchdog is still running
1081 * and it will take care of this.
1083 adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER;
1084 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
1085 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
1086 adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
1087 adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
1088 adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
1091 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
1095 * iavf_acquire_msix_vectors - Setup the MSIX capability
1096 * @adapter: board private structure
1097 * @vectors: number of vectors to request
1099 * Work with the OS to set up the MSIX vectors needed.
1101 * Returns 0 on success, negative on failure
1104 iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors)
1106 int err, vector_threshold;
1108 /* We'll want at least 3 (vector_threshold):
1109 * 0) Other (Admin Queue and link, mostly)
1113 vector_threshold = MIN_MSIX_COUNT;
1115 /* The more we get, the more we will assign to Tx/Rx Cleanup
1116 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1117 * Right now, we simply care about how many we'll get; we'll
1118 * set them up later while requesting irq's.
1120 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1121 vector_threshold, vectors);
1123 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
1124 kfree(adapter->msix_entries);
1125 adapter->msix_entries = NULL;
1129 /* Adjust for only the vectors we'll use, which is minimum
1130 * of max_msix_q_vectors + NONQ_VECS, or the number of
1131 * vectors we were allocated.
1133 adapter->num_msix_vectors = err;
1138 * iavf_free_queues - Free memory for all rings
1139 * @adapter: board private structure to initialize
1141 * Free all of the memory associated with queue pairs.
1143 static void iavf_free_queues(struct iavf_adapter *adapter)
1145 if (!adapter->vsi_res)
1147 adapter->num_active_queues = 0;
1148 kfree(adapter->tx_rings);
1149 adapter->tx_rings = NULL;
1150 kfree(adapter->rx_rings);
1151 adapter->rx_rings = NULL;
1155 * iavf_alloc_queues - Allocate memory for all rings
1156 * @adapter: board private structure to initialize
1158 * We allocate one ring per queue at run-time since we don't know the
1159 * number of queues at compile-time. The polling_netdev array is
1160 * intended for Multiqueue, but should work fine with a single queue.
1162 static int iavf_alloc_queues(struct iavf_adapter *adapter)
1164 int i, num_active_queues;
1166 /* If we're in reset reallocating queues we don't actually know yet for
1167 * certain the PF gave us the number of queues we asked for but we'll
1168 * assume it did. Once basic reset is finished we'll confirm once we
1169 * start negotiating config with PF.
1171 if (adapter->num_req_queues)
1172 num_active_queues = adapter->num_req_queues;
1173 else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1175 num_active_queues = adapter->ch_config.total_qps;
1177 num_active_queues = min_t(int,
1178 adapter->vsi_res->num_queue_pairs,
1179 (int)(num_online_cpus()));
1182 adapter->tx_rings = kcalloc(num_active_queues,
1183 sizeof(struct iavf_ring), GFP_KERNEL);
1184 if (!adapter->tx_rings)
1186 adapter->rx_rings = kcalloc(num_active_queues,
1187 sizeof(struct iavf_ring), GFP_KERNEL);
1188 if (!adapter->rx_rings)
1191 for (i = 0; i < num_active_queues; i++) {
1192 struct iavf_ring *tx_ring;
1193 struct iavf_ring *rx_ring;
1195 tx_ring = &adapter->tx_rings[i];
1197 tx_ring->queue_index = i;
1198 tx_ring->netdev = adapter->netdev;
1199 tx_ring->dev = &adapter->pdev->dev;
1200 tx_ring->count = adapter->tx_desc_count;
1201 tx_ring->itr_setting = IAVF_ITR_TX_DEF;
1202 if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE)
1203 tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR;
1205 rx_ring = &adapter->rx_rings[i];
1206 rx_ring->queue_index = i;
1207 rx_ring->netdev = adapter->netdev;
1208 rx_ring->dev = &adapter->pdev->dev;
1209 rx_ring->count = adapter->rx_desc_count;
1210 rx_ring->itr_setting = IAVF_ITR_RX_DEF;
1213 adapter->num_active_queues = num_active_queues;
1218 iavf_free_queues(adapter);
1223 * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported
1224 * @adapter: board private structure to initialize
1226 * Attempt to configure the interrupts using the best available
1227 * capabilities of the hardware and the kernel.
1229 static int iavf_set_interrupt_capability(struct iavf_adapter *adapter)
1231 int vector, v_budget;
1235 if (!adapter->vsi_res) {
1239 pairs = adapter->num_active_queues;
1241 /* It's easy to be greedy for MSI-X vectors, but it really doesn't do
1242 * us much good if we have more vectors than CPUs. However, we already
1243 * limit the total number of queues by the number of CPUs so we do not
1244 * need any further limiting here.
1246 v_budget = min_t(int, pairs + NONQ_VECS,
1247 (int)adapter->vf_res->max_vectors);
1249 adapter->msix_entries = kcalloc(v_budget,
1250 sizeof(struct msix_entry), GFP_KERNEL);
1251 if (!adapter->msix_entries) {
1256 for (vector = 0; vector < v_budget; vector++)
1257 adapter->msix_entries[vector].entry = vector;
1259 err = iavf_acquire_msix_vectors(adapter, v_budget);
1262 netif_set_real_num_rx_queues(adapter->netdev, pairs);
1263 netif_set_real_num_tx_queues(adapter->netdev, pairs);
1268 * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands
1269 * @adapter: board private structure
1271 * Return 0 on success, negative on failure
1273 static int iavf_config_rss_aq(struct iavf_adapter *adapter)
1275 struct iavf_aqc_get_set_rss_key_data *rss_key =
1276 (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key;
1277 struct iavf_hw *hw = &adapter->hw;
1280 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1281 /* bail because we already have a command pending */
1282 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
1283 adapter->current_op);
1287 ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
1289 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
1290 iavf_stat_str(hw, ret),
1291 iavf_aq_str(hw, hw->aq.asq_last_status));
1296 ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
1297 adapter->rss_lut, adapter->rss_lut_size);
1299 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
1300 iavf_stat_str(hw, ret),
1301 iavf_aq_str(hw, hw->aq.asq_last_status));
1309 * iavf_config_rss_reg - Configure RSS keys and lut by writing registers
1310 * @adapter: board private structure
1312 * Returns 0 on success, negative on failure
1314 static int iavf_config_rss_reg(struct iavf_adapter *adapter)
1316 struct iavf_hw *hw = &adapter->hw;
1320 dw = (u32 *)adapter->rss_key;
1321 for (i = 0; i <= adapter->rss_key_size / 4; i++)
1322 wr32(hw, IAVF_VFQF_HKEY(i), dw[i]);
1324 dw = (u32 *)adapter->rss_lut;
1325 for (i = 0; i <= adapter->rss_lut_size / 4; i++)
1326 wr32(hw, IAVF_VFQF_HLUT(i), dw[i]);
1334 * iavf_config_rss - Configure RSS keys and lut
1335 * @adapter: board private structure
1337 * Returns 0 on success, negative on failure
1339 int iavf_config_rss(struct iavf_adapter *adapter)
1342 if (RSS_PF(adapter)) {
1343 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT |
1344 IAVF_FLAG_AQ_SET_RSS_KEY;
1346 } else if (RSS_AQ(adapter)) {
1347 return iavf_config_rss_aq(adapter);
1349 return iavf_config_rss_reg(adapter);
1354 * iavf_fill_rss_lut - Fill the lut with default values
1355 * @adapter: board private structure
1357 static void iavf_fill_rss_lut(struct iavf_adapter *adapter)
1361 for (i = 0; i < adapter->rss_lut_size; i++)
1362 adapter->rss_lut[i] = i % adapter->num_active_queues;
1366 * iavf_init_rss - Prepare for RSS
1367 * @adapter: board private structure
1369 * Return 0 on success, negative on failure
1371 static int iavf_init_rss(struct iavf_adapter *adapter)
1373 struct iavf_hw *hw = &adapter->hw;
1376 if (!RSS_PF(adapter)) {
1377 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
1378 if (adapter->vf_res->vf_cap_flags &
1379 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1380 adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED;
1382 adapter->hena = IAVF_DEFAULT_RSS_HENA;
1384 wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena);
1385 wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32));
1388 iavf_fill_rss_lut(adapter);
1389 netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
1390 ret = iavf_config_rss(adapter);
1396 * iavf_alloc_q_vectors - Allocate memory for interrupt vectors
1397 * @adapter: board private structure to initialize
1399 * We allocate one q_vector per queue interrupt. If allocation fails we
1402 static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
1404 int q_idx = 0, num_q_vectors;
1405 struct iavf_q_vector *q_vector;
1407 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1408 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
1410 if (!adapter->q_vectors)
1413 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1414 q_vector = &adapter->q_vectors[q_idx];
1415 q_vector->adapter = adapter;
1416 q_vector->vsi = &adapter->vsi;
1417 q_vector->v_idx = q_idx;
1418 q_vector->reg_idx = q_idx;
1419 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
1420 netif_napi_add(adapter->netdev, &q_vector->napi,
1421 iavf_napi_poll, NAPI_POLL_WEIGHT);
1428 * iavf_free_q_vectors - Free memory allocated for interrupt vectors
1429 * @adapter: board private structure to initialize
1431 * This function frees the memory allocated to the q_vectors. In addition if
1432 * NAPI is enabled it will delete any references to the NAPI struct prior
1433 * to freeing the q_vector.
1435 static void iavf_free_q_vectors(struct iavf_adapter *adapter)
1437 int q_idx, num_q_vectors;
1440 if (!adapter->q_vectors)
1443 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1444 napi_vectors = adapter->num_active_queues;
1446 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1447 struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
1449 if (q_idx < napi_vectors)
1450 netif_napi_del(&q_vector->napi);
1452 kfree(adapter->q_vectors);
1453 adapter->q_vectors = NULL;
1457 * iavf_reset_interrupt_capability - Reset MSIX setup
1458 * @adapter: board private structure
1461 void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
1463 if (!adapter->msix_entries)
1466 pci_disable_msix(adapter->pdev);
1467 kfree(adapter->msix_entries);
1468 adapter->msix_entries = NULL;
1472 * iavf_init_interrupt_scheme - Determine if MSIX is supported and init
1473 * @adapter: board private structure to initialize
1476 int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
1480 err = iavf_alloc_queues(adapter);
1482 dev_err(&adapter->pdev->dev,
1483 "Unable to allocate memory for queues\n");
1484 goto err_alloc_queues;
1488 err = iavf_set_interrupt_capability(adapter);
1491 dev_err(&adapter->pdev->dev,
1492 "Unable to setup interrupt capabilities\n");
1493 goto err_set_interrupt;
1496 err = iavf_alloc_q_vectors(adapter);
1498 dev_err(&adapter->pdev->dev,
1499 "Unable to allocate memory for queue vectors\n");
1500 goto err_alloc_q_vectors;
1503 /* If we've made it so far while ADq flag being ON, then we haven't
1504 * bailed out anywhere in middle. And ADq isn't just enabled but actual
1505 * resources have been allocated in the reset path.
1506 * Now we can truly claim that ADq is enabled.
1508 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1510 dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
1513 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
1514 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
1515 adapter->num_active_queues);
1518 err_alloc_q_vectors:
1519 iavf_reset_interrupt_capability(adapter);
1521 iavf_free_queues(adapter);
1527 * iavf_free_rss - Free memory used by RSS structs
1528 * @adapter: board private structure
1530 static void iavf_free_rss(struct iavf_adapter *adapter)
1532 kfree(adapter->rss_key);
1533 adapter->rss_key = NULL;
1535 kfree(adapter->rss_lut);
1536 adapter->rss_lut = NULL;
1540 * iavf_reinit_interrupt_scheme - Reallocate queues and vectors
1541 * @adapter: board private structure
1543 * Returns 0 on success, negative on failure
1545 static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
1547 struct net_device *netdev = adapter->netdev;
1550 if (netif_running(netdev))
1551 iavf_free_traffic_irqs(adapter);
1552 iavf_free_misc_irq(adapter);
1553 iavf_reset_interrupt_capability(adapter);
1554 iavf_free_q_vectors(adapter);
1555 iavf_free_queues(adapter);
1557 err = iavf_init_interrupt_scheme(adapter);
1561 netif_tx_stop_all_queues(netdev);
1563 err = iavf_request_misc_irq(adapter);
1567 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1569 iavf_map_rings_to_vectors(adapter);
1575 * iavf_process_aq_command - process aq_required flags
1576 * and sends aq command
1577 * @adapter: pointer to iavf adapter structure
1579 * Returns 0 on success
1580 * Returns error code if no command was sent
1581 * or error code if the command failed.
1583 static int iavf_process_aq_command(struct iavf_adapter *adapter)
1585 if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG)
1586 return iavf_send_vf_config_msg(adapter);
1587 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
1588 iavf_disable_queues(adapter);
1592 if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) {
1593 iavf_map_queues(adapter);
1597 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) {
1598 iavf_add_ether_addrs(adapter);
1602 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) {
1603 iavf_add_vlans(adapter);
1607 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) {
1608 iavf_del_ether_addrs(adapter);
1612 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) {
1613 iavf_del_vlans(adapter);
1617 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
1618 iavf_enable_vlan_stripping(adapter);
1622 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
1623 iavf_disable_vlan_stripping(adapter);
1627 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) {
1628 iavf_configure_queues(adapter);
1632 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) {
1633 iavf_enable_queues(adapter);
1637 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) {
1638 /* This message goes straight to the firmware, not the
1639 * PF, so we don't have to set current_op as we will
1640 * not get a response through the ARQ.
1642 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS;
1645 if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) {
1646 iavf_get_hena(adapter);
1649 if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) {
1650 iavf_set_hena(adapter);
1653 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
1654 iavf_set_rss_key(adapter);
1657 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) {
1658 iavf_set_rss_lut(adapter);
1662 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) {
1663 iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
1664 FLAG_VF_MULTICAST_PROMISC);
1668 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) {
1669 iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
1672 if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) ||
1673 (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
1674 iavf_set_promiscuous(adapter, 0);
1678 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) {
1679 iavf_enable_channels(adapter);
1683 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) {
1684 iavf_disable_channels(adapter);
1687 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
1688 iavf_add_cloud_filter(adapter);
1692 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
1693 iavf_del_cloud_filter(adapter);
1696 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
1697 iavf_del_cloud_filter(adapter);
1700 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
1701 iavf_add_cloud_filter(adapter);
1704 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) {
1705 iavf_add_fdir_filter(adapter);
1706 return IAVF_SUCCESS;
1708 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) {
1709 iavf_del_fdir_filter(adapter);
1710 return IAVF_SUCCESS;
1712 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) {
1713 iavf_add_adv_rss_cfg(adapter);
1716 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) {
1717 iavf_del_adv_rss_cfg(adapter);
1720 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) {
1721 iavf_request_stats(adapter);
1729 * iavf_startup - first step of driver startup
1730 * @adapter: board private structure
1732 * Function process __IAVF_STARTUP driver state.
1733 * When success the state is changed to __IAVF_INIT_VERSION_CHECK
1734 * when fails the state is changed to __IAVF_INIT_FAILED
1736 static void iavf_startup(struct iavf_adapter *adapter)
1738 struct pci_dev *pdev = adapter->pdev;
1739 struct iavf_hw *hw = &adapter->hw;
1742 WARN_ON(adapter->state != __IAVF_STARTUP);
1744 /* driver loaded, probe complete */
1745 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
1746 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
1747 err = iavf_set_mac_type(hw);
1749 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", err);
1753 err = iavf_check_reset_complete(hw);
1755 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
1759 hw->aq.num_arq_entries = IAVF_AQ_LEN;
1760 hw->aq.num_asq_entries = IAVF_AQ_LEN;
1761 hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
1762 hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
1764 err = iavf_init_adminq(hw);
1766 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", err);
1769 err = iavf_send_api_ver(adapter);
1771 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
1772 iavf_shutdown_adminq(hw);
1775 iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK);
1778 iavf_change_state(adapter, __IAVF_INIT_FAILED);
1782 * iavf_init_version_check - second step of driver startup
1783 * @adapter: board private structure
1785 * Function process __IAVF_INIT_VERSION_CHECK driver state.
1786 * When success the state is changed to __IAVF_INIT_GET_RESOURCES
1787 * when fails the state is changed to __IAVF_INIT_FAILED
1789 static void iavf_init_version_check(struct iavf_adapter *adapter)
1791 struct pci_dev *pdev = adapter->pdev;
1792 struct iavf_hw *hw = &adapter->hw;
1795 WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK);
1797 if (!iavf_asq_done(hw)) {
1798 dev_err(&pdev->dev, "Admin queue command never completed\n");
1799 iavf_shutdown_adminq(hw);
1800 iavf_change_state(adapter, __IAVF_STARTUP);
1804 /* aq msg sent, awaiting reply */
1805 err = iavf_verify_api_ver(adapter);
1807 if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK)
1808 err = iavf_send_api_ver(adapter);
1810 dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
1811 adapter->pf_version.major,
1812 adapter->pf_version.minor,
1813 VIRTCHNL_VERSION_MAJOR,
1814 VIRTCHNL_VERSION_MINOR);
1817 err = iavf_send_vf_config_msg(adapter);
1819 dev_err(&pdev->dev, "Unable to send config request (%d)\n",
1823 iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES);
1826 iavf_change_state(adapter, __IAVF_INIT_FAILED);
1830 * iavf_init_get_resources - third step of driver startup
1831 * @adapter: board private structure
1833 * Function process __IAVF_INIT_GET_RESOURCES driver state and
1834 * finishes driver initialization procedure.
1835 * When success the state is changed to __IAVF_DOWN
1836 * when fails the state is changed to __IAVF_INIT_FAILED
1838 static void iavf_init_get_resources(struct iavf_adapter *adapter)
1840 struct net_device *netdev = adapter->netdev;
1841 struct pci_dev *pdev = adapter->pdev;
1842 struct iavf_hw *hw = &adapter->hw;
1845 WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES);
1846 /* aq msg sent, awaiting reply */
1847 if (!adapter->vf_res) {
1848 adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE,
1850 if (!adapter->vf_res) {
1855 err = iavf_get_vf_config(adapter);
1856 if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) {
1857 err = iavf_send_vf_config_msg(adapter);
1859 } else if (err == IAVF_ERR_PARAM) {
1860 /* We only get ERR_PARAM if the device is in a very bad
1861 * state or if we've been disabled for previous bad
1862 * behavior. Either way, we're done now.
1864 iavf_shutdown_adminq(hw);
1865 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
1869 dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err);
1873 err = iavf_process_config(adapter);
1876 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1878 adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED;
1880 netdev->netdev_ops = &iavf_netdev_ops;
1881 iavf_set_ethtool_ops(netdev);
1882 netdev->watchdog_timeo = 5 * HZ;
1884 /* MTU range: 68 - 9710 */
1885 netdev->min_mtu = ETH_MIN_MTU;
1886 netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD;
1888 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
1889 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
1890 adapter->hw.mac.addr);
1891 eth_hw_addr_random(netdev);
1892 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
1894 eth_hw_addr_set(netdev, adapter->hw.mac.addr);
1895 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
1898 adapter->tx_desc_count = IAVF_DEFAULT_TXD;
1899 adapter->rx_desc_count = IAVF_DEFAULT_RXD;
1900 err = iavf_init_interrupt_scheme(adapter);
1903 iavf_map_rings_to_vectors(adapter);
1904 if (adapter->vf_res->vf_cap_flags &
1905 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1906 adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE;
1908 err = iavf_request_misc_irq(adapter);
1912 netif_carrier_off(netdev);
1913 adapter->link_up = false;
1915 /* set the semaphore to prevent any callbacks after device registration
1916 * up to time when state of driver will be set to __IAVF_DOWN
1919 if (!adapter->netdev_registered) {
1920 err = register_netdevice(netdev);
1927 adapter->netdev_registered = true;
1929 netif_tx_stop_all_queues(netdev);
1930 if (CLIENT_ALLOWED(adapter)) {
1931 err = iavf_lan_add_device(adapter);
1933 dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
1936 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
1937 if (netdev->features & NETIF_F_GRO)
1938 dev_info(&pdev->dev, "GRO is enabled\n");
1940 iavf_change_state(adapter, __IAVF_DOWN);
1941 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1944 iavf_misc_irq_enable(adapter);
1945 wake_up(&adapter->down_waitqueue);
1947 adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
1948 adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
1949 if (!adapter->rss_key || !adapter->rss_lut) {
1953 if (RSS_AQ(adapter))
1954 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
1956 iavf_init_rss(adapter);
1960 iavf_free_rss(adapter);
1962 iavf_free_misc_irq(adapter);
1964 iavf_reset_interrupt_capability(adapter);
1966 kfree(adapter->vf_res);
1967 adapter->vf_res = NULL;
1969 iavf_change_state(adapter, __IAVF_INIT_FAILED);
1973 * iavf_watchdog_task - Periodic call-back task
1974 * @work: pointer to work_struct
1976 static void iavf_watchdog_task(struct work_struct *work)
1978 struct iavf_adapter *adapter = container_of(work,
1979 struct iavf_adapter,
1980 watchdog_task.work);
1981 struct iavf_hw *hw = &adapter->hw;
1984 if (!mutex_trylock(&adapter->crit_lock))
1985 goto restart_watchdog;
1987 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
1988 iavf_change_state(adapter, __IAVF_COMM_FAILED);
1990 if (adapter->flags & IAVF_FLAG_RESET_NEEDED &&
1991 adapter->state != __IAVF_RESETTING) {
1992 iavf_change_state(adapter, __IAVF_RESETTING);
1993 adapter->aq_required = 0;
1994 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1997 switch (adapter->state) {
1998 case __IAVF_STARTUP:
1999 iavf_startup(adapter);
2000 mutex_unlock(&adapter->crit_lock);
2001 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2002 msecs_to_jiffies(30));
2004 case __IAVF_INIT_VERSION_CHECK:
2005 iavf_init_version_check(adapter);
2006 mutex_unlock(&adapter->crit_lock);
2007 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2008 msecs_to_jiffies(30));
2010 case __IAVF_INIT_GET_RESOURCES:
2011 iavf_init_get_resources(adapter);
2012 mutex_unlock(&adapter->crit_lock);
2013 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2014 msecs_to_jiffies(1));
2016 case __IAVF_INIT_FAILED:
2017 if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
2018 dev_err(&adapter->pdev->dev,
2019 "Failed to communicate with PF; waiting before retry\n");
2020 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
2021 iavf_shutdown_adminq(hw);
2022 mutex_unlock(&adapter->crit_lock);
2023 queue_delayed_work(iavf_wq,
2024 &adapter->watchdog_task, (5 * HZ));
2027 /* Try again from failed step*/
2028 iavf_change_state(adapter, adapter->last_state);
2029 mutex_unlock(&adapter->crit_lock);
2030 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ);
2032 case __IAVF_COMM_FAILED:
2033 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
2034 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
2035 if (reg_val == VIRTCHNL_VFR_VFACTIVE ||
2036 reg_val == VIRTCHNL_VFR_COMPLETED) {
2037 /* A chance for redemption! */
2038 dev_err(&adapter->pdev->dev,
2039 "Hardware came out of reset. Attempting reinit.\n");
2040 /* When init task contacts the PF and
2041 * gets everything set up again, it'll restart the
2042 * watchdog for us. Down, boy. Sit. Stay. Woof.
2044 iavf_change_state(adapter, __IAVF_STARTUP);
2045 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2047 adapter->aq_required = 0;
2048 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2049 mutex_unlock(&adapter->crit_lock);
2050 queue_delayed_work(iavf_wq,
2051 &adapter->watchdog_task,
2052 msecs_to_jiffies(10));
2054 case __IAVF_RESETTING:
2055 mutex_unlock(&adapter->crit_lock);
2056 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
2059 case __IAVF_DOWN_PENDING:
2060 case __IAVF_TESTING:
2061 case __IAVF_RUNNING:
2062 if (adapter->current_op) {
2063 if (!iavf_asq_done(hw)) {
2064 dev_dbg(&adapter->pdev->dev,
2065 "Admin queue timeout\n");
2066 iavf_send_api_ver(adapter);
2069 /* An error will be returned if no commands were
2070 * processed; use this opportunity to update stats
2072 if (iavf_process_aq_command(adapter) &&
2073 adapter->state == __IAVF_RUNNING)
2074 iavf_request_stats(adapter);
2076 if (adapter->state == __IAVF_RUNNING)
2077 iavf_detect_recover_hung(&adapter->vsi);
2081 mutex_unlock(&adapter->crit_lock);
2085 /* check for hw reset */
2086 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
2088 adapter->flags |= IAVF_FLAG_RESET_PENDING;
2089 adapter->aq_required = 0;
2090 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2091 dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
2092 queue_work(iavf_wq, &adapter->reset_task);
2093 mutex_unlock(&adapter->crit_lock);
2094 queue_delayed_work(iavf_wq,
2095 &adapter->watchdog_task, HZ * 2);
2099 schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
2100 mutex_unlock(&adapter->crit_lock);
2102 queue_work(iavf_wq, &adapter->adminq_task);
2103 if (adapter->aq_required)
2104 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2105 msecs_to_jiffies(20));
2107 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
2110 static void iavf_disable_vf(struct iavf_adapter *adapter)
2112 struct iavf_mac_filter *f, *ftmp;
2113 struct iavf_vlan_filter *fv, *fvtmp;
2114 struct iavf_cloud_filter *cf, *cftmp;
2116 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
2118 /* We don't use netif_running() because it may be true prior to
2119 * ndo_open() returning, so we can't assume it means all our open
2120 * tasks have finished, since we're not holding the rtnl_lock here.
2122 if (adapter->state == __IAVF_RUNNING) {
2123 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
2124 netif_carrier_off(adapter->netdev);
2125 netif_tx_disable(adapter->netdev);
2126 adapter->link_up = false;
2127 iavf_napi_disable_all(adapter);
2128 iavf_irq_disable(adapter);
2129 iavf_free_traffic_irqs(adapter);
2130 iavf_free_all_tx_resources(adapter);
2131 iavf_free_all_rx_resources(adapter);
2134 spin_lock_bh(&adapter->mac_vlan_list_lock);
2136 /* Delete all of the filters */
2137 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
2142 list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
2143 list_del(&fv->list);
2147 spin_unlock_bh(&adapter->mac_vlan_list_lock);
2149 spin_lock_bh(&adapter->cloud_filter_list_lock);
2150 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
2151 list_del(&cf->list);
2153 adapter->num_cloud_filters--;
2155 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2157 iavf_free_misc_irq(adapter);
2158 iavf_reset_interrupt_capability(adapter);
2159 iavf_free_q_vectors(adapter);
2160 iavf_free_queues(adapter);
2161 memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
2162 iavf_shutdown_adminq(&adapter->hw);
2163 adapter->netdev->flags &= ~IFF_UP;
2164 mutex_unlock(&adapter->crit_lock);
2165 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2166 iavf_change_state(adapter, __IAVF_DOWN);
2167 wake_up(&adapter->down_waitqueue);
2168 dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
2172 * iavf_reset_task - Call-back task to handle hardware reset
2173 * @work: pointer to work_struct
2175 * During reset we need to shut down and reinitialize the admin queue
2176 * before we can use it to communicate with the PF again. We also clear
2177 * and reinit the rings because that context is lost as well.
2179 static void iavf_reset_task(struct work_struct *work)
2181 struct iavf_adapter *adapter = container_of(work,
2182 struct iavf_adapter,
2184 struct virtchnl_vf_resource *vfres = adapter->vf_res;
2185 struct net_device *netdev = adapter->netdev;
2186 struct iavf_hw *hw = &adapter->hw;
2187 struct iavf_mac_filter *f, *ftmp;
2188 struct iavf_cloud_filter *cf;
2193 /* When device is being removed it doesn't make sense to run the reset
2194 * task, just return in such a case.
2196 if (mutex_is_locked(&adapter->remove_lock))
2199 if (iavf_lock_timeout(&adapter->crit_lock, 200)) {
2200 schedule_work(&adapter->reset_task);
2203 while (!mutex_trylock(&adapter->client_lock))
2204 usleep_range(500, 1000);
2205 if (CLIENT_ENABLED(adapter)) {
2206 adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN |
2207 IAVF_FLAG_CLIENT_NEEDS_CLOSE |
2208 IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
2209 IAVF_FLAG_SERVICE_CLIENT_REQUESTED);
2210 cancel_delayed_work_sync(&adapter->client_task);
2211 iavf_notify_client_close(&adapter->vsi, true);
2213 iavf_misc_irq_disable(adapter);
2214 if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
2215 adapter->flags &= ~IAVF_FLAG_RESET_NEEDED;
2216 /* Restart the AQ here. If we have been reset but didn't
2217 * detect it, or if the PF had to reinit, our AQ will be hosed.
2219 iavf_shutdown_adminq(hw);
2220 iavf_init_adminq(hw);
2221 iavf_request_reset(adapter);
2223 adapter->flags |= IAVF_FLAG_RESET_PENDING;
2225 /* poll until we see the reset actually happen */
2226 for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) {
2227 reg_val = rd32(hw, IAVF_VF_ARQLEN1) &
2228 IAVF_VF_ARQLEN1_ARQENABLE_MASK;
2231 usleep_range(5000, 10000);
2233 if (i == IAVF_RESET_WAIT_DETECTED_COUNT) {
2234 dev_info(&adapter->pdev->dev, "Never saw reset\n");
2235 goto continue_reset; /* act like the reset happened */
2238 /* wait until the reset is complete and the PF is responding to us */
2239 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
2240 /* sleep first to make sure a minimum wait time is met */
2241 msleep(IAVF_RESET_WAIT_MS);
2243 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
2244 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
2245 if (reg_val == VIRTCHNL_VFR_VFACTIVE)
2249 pci_set_master(adapter->pdev);
2250 pci_restore_msi_state(adapter->pdev);
2252 if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
2253 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
2255 iavf_disable_vf(adapter);
2256 mutex_unlock(&adapter->client_lock);
2257 return; /* Do not attempt to reinit. It's dead, Jim. */
2261 /* We don't use netif_running() because it may be true prior to
2262 * ndo_open() returning, so we can't assume it means all our open
2263 * tasks have finished, since we're not holding the rtnl_lock here.
2265 running = ((adapter->state == __IAVF_RUNNING) ||
2266 (adapter->state == __IAVF_RESETTING));
2269 netdev->flags &= ~IFF_UP;
2270 netif_carrier_off(netdev);
2271 netif_tx_stop_all_queues(netdev);
2272 adapter->link_up = false;
2273 iavf_napi_disable_all(adapter);
2275 iavf_irq_disable(adapter);
2277 iavf_change_state(adapter, __IAVF_RESETTING);
2278 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2280 /* free the Tx/Rx rings and descriptors, might be better to just
2281 * re-use them sometime in the future
2283 iavf_free_all_rx_resources(adapter);
2284 iavf_free_all_tx_resources(adapter);
2286 adapter->flags |= IAVF_FLAG_QUEUES_DISABLED;
2287 /* kill and reinit the admin queue */
2288 iavf_shutdown_adminq(hw);
2289 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2290 err = iavf_init_adminq(hw);
2292 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
2294 adapter->aq_required = 0;
2296 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
2297 err = iavf_reinit_interrupt_scheme(adapter);
2302 if (RSS_AQ(adapter)) {
2303 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
2305 err = iavf_init_rss(adapter);
2310 adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
2311 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
2313 spin_lock_bh(&adapter->mac_vlan_list_lock);
2315 /* Delete filter for the current MAC address, it could have
2316 * been changed by the PF via administratively set MAC.
2317 * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES.
2319 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
2320 if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) {
2325 /* re-add all MAC filters */
2326 list_for_each_entry(f, &adapter->mac_filter_list, list) {
2329 spin_unlock_bh(&adapter->mac_vlan_list_lock);
2331 /* check if TCs are running and re-add all cloud filters */
2332 spin_lock_bh(&adapter->cloud_filter_list_lock);
2333 if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
2335 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
2339 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2341 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
2342 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
2343 iavf_misc_irq_enable(adapter);
2345 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2);
2347 /* We were running when the reset started, so we need to restore some
2351 /* allocate transmit descriptors */
2352 err = iavf_setup_all_tx_resources(adapter);
2356 /* allocate receive descriptors */
2357 err = iavf_setup_all_rx_resources(adapter);
2361 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
2362 err = iavf_request_traffic_irqs(adapter, netdev->name);
2366 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
2369 iavf_configure(adapter);
2371 /* iavf_up_complete() will switch device back
2374 iavf_up_complete(adapter);
2375 netdev->flags |= IFF_UP;
2376 iavf_irq_enable(adapter, true);
2378 iavf_change_state(adapter, __IAVF_DOWN);
2379 wake_up(&adapter->down_waitqueue);
2381 mutex_unlock(&adapter->client_lock);
2382 mutex_unlock(&adapter->crit_lock);
2386 mutex_unlock(&adapter->client_lock);
2387 mutex_unlock(&adapter->crit_lock);
2389 iavf_change_state(adapter, __IAVF_RUNNING);
2390 netdev->flags |= IFF_UP;
2392 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
2397 * iavf_adminq_task - worker thread to clean the admin queue
2398 * @work: pointer to work_struct containing our data
2400 static void iavf_adminq_task(struct work_struct *work)
2402 struct iavf_adapter *adapter =
2403 container_of(work, struct iavf_adapter, adminq_task);
2404 struct iavf_hw *hw = &adapter->hw;
2405 struct iavf_arq_event_info event;
2406 enum virtchnl_ops v_op;
2407 enum iavf_status ret, v_ret;
2411 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
2414 event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
2415 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
2419 if (iavf_lock_timeout(&adapter->crit_lock, 200))
2422 ret = iavf_clean_arq_element(hw, &event, &pending);
2423 v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
2424 v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
2427 break; /* No event to process or error cleaning ARQ */
2429 iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
2432 memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
2434 mutex_unlock(&adapter->crit_lock);
2436 if ((adapter->flags &
2437 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
2438 adapter->state == __IAVF_RESETTING)
2441 /* check for error indications */
2442 val = rd32(hw, hw->aq.arq.len);
2443 if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */
2446 if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
2447 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
2448 val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK;
2450 if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) {
2451 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
2452 val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK;
2454 if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) {
2455 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
2456 val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK;
2459 wr32(hw, hw->aq.arq.len, val);
2461 val = rd32(hw, hw->aq.asq.len);
2463 if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) {
2464 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
2465 val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK;
2467 if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) {
2468 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
2469 val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK;
2471 if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
2472 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
2473 val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK;
2476 wr32(hw, hw->aq.asq.len, val);
2479 kfree(event.msg_buf);
2481 /* re-enable Admin queue interrupt cause */
2482 iavf_misc_irq_enable(adapter);
2486 * iavf_client_task - worker thread to perform client work
2487 * @work: pointer to work_struct containing our data
2489 * This task handles client interactions. Because client calls can be
2490 * reentrant, we can't handle them in the watchdog.
2492 static void iavf_client_task(struct work_struct *work)
2494 struct iavf_adapter *adapter =
2495 container_of(work, struct iavf_adapter, client_task.work);
2497 /* If we can't get the client bit, just give up. We'll be rescheduled
2501 if (!mutex_trylock(&adapter->client_lock))
2504 if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) {
2505 iavf_client_subtask(adapter);
2506 adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
2509 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
2510 iavf_notify_client_l2_params(&adapter->vsi);
2511 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
2514 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) {
2515 iavf_notify_client_close(&adapter->vsi, false);
2516 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE;
2519 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) {
2520 iavf_notify_client_open(&adapter->vsi);
2521 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN;
2524 mutex_unlock(&adapter->client_lock);
2528 * iavf_free_all_tx_resources - Free Tx Resources for All Queues
2529 * @adapter: board private structure
2531 * Free all transmit software resources
2533 void iavf_free_all_tx_resources(struct iavf_adapter *adapter)
2537 if (!adapter->tx_rings)
2540 for (i = 0; i < adapter->num_active_queues; i++)
2541 if (adapter->tx_rings[i].desc)
2542 iavf_free_tx_resources(&adapter->tx_rings[i]);
2546 * iavf_setup_all_tx_resources - allocate all queues Tx resources
2547 * @adapter: board private structure
2549 * If this function returns with an error, then it's possible one or
2550 * more of the rings is populated (while the rest are not). It is the
2551 * callers duty to clean those orphaned rings.
2553 * Return 0 on success, negative on failure
2555 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter)
2559 for (i = 0; i < adapter->num_active_queues; i++) {
2560 adapter->tx_rings[i].count = adapter->tx_desc_count;
2561 err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]);
2564 dev_err(&adapter->pdev->dev,
2565 "Allocation for Tx Queue %u failed\n", i);
2573 * iavf_setup_all_rx_resources - allocate all queues Rx resources
2574 * @adapter: board private structure
2576 * If this function returns with an error, then it's possible one or
2577 * more of the rings is populated (while the rest are not). It is the
2578 * callers duty to clean those orphaned rings.
2580 * Return 0 on success, negative on failure
2582 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter)
2586 for (i = 0; i < adapter->num_active_queues; i++) {
2587 adapter->rx_rings[i].count = adapter->rx_desc_count;
2588 err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]);
2591 dev_err(&adapter->pdev->dev,
2592 "Allocation for Rx Queue %u failed\n", i);
2599 * iavf_free_all_rx_resources - Free Rx Resources for All Queues
2600 * @adapter: board private structure
2602 * Free all receive software resources
2604 void iavf_free_all_rx_resources(struct iavf_adapter *adapter)
2608 if (!adapter->rx_rings)
2611 for (i = 0; i < adapter->num_active_queues; i++)
2612 if (adapter->rx_rings[i].desc)
2613 iavf_free_rx_resources(&adapter->rx_rings[i]);
2617 * iavf_validate_tx_bandwidth - validate the max Tx bandwidth
2618 * @adapter: board private structure
2619 * @max_tx_rate: max Tx bw for a tc
2621 static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter,
2624 int speed = 0, ret = 0;
2626 if (ADV_LINK_SUPPORT(adapter)) {
2627 if (adapter->link_speed_mbps < U32_MAX) {
2628 speed = adapter->link_speed_mbps;
2631 dev_err(&adapter->pdev->dev, "Unknown link speed\n");
2636 switch (adapter->link_speed) {
2637 case VIRTCHNL_LINK_SPEED_40GB:
2638 speed = SPEED_40000;
2640 case VIRTCHNL_LINK_SPEED_25GB:
2641 speed = SPEED_25000;
2643 case VIRTCHNL_LINK_SPEED_20GB:
2644 speed = SPEED_20000;
2646 case VIRTCHNL_LINK_SPEED_10GB:
2647 speed = SPEED_10000;
2649 case VIRTCHNL_LINK_SPEED_5GB:
2652 case VIRTCHNL_LINK_SPEED_2_5GB:
2655 case VIRTCHNL_LINK_SPEED_1GB:
2658 case VIRTCHNL_LINK_SPEED_100MB:
2666 if (max_tx_rate > speed) {
2667 dev_err(&adapter->pdev->dev,
2668 "Invalid tx rate specified\n");
2676 * iavf_validate_ch_config - validate queue mapping info
2677 * @adapter: board private structure
2678 * @mqprio_qopt: queue parameters
2680 * This function validates if the config provided by the user to
2681 * configure queue channels is valid or not. Returns 0 on a valid
2684 static int iavf_validate_ch_config(struct iavf_adapter *adapter,
2685 struct tc_mqprio_qopt_offload *mqprio_qopt)
2687 u64 total_max_rate = 0;
2692 if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS ||
2693 mqprio_qopt->qopt.num_tc < 1)
2696 for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
2697 if (!mqprio_qopt->qopt.count[i] ||
2698 mqprio_qopt->qopt.offset[i] != num_qps)
2700 if (mqprio_qopt->min_rate[i]) {
2701 dev_err(&adapter->pdev->dev,
2702 "Invalid min tx rate (greater than 0) specified\n");
2705 /*convert to Mbps */
2706 tx_rate = div_u64(mqprio_qopt->max_rate[i],
2708 total_max_rate += tx_rate;
2709 num_qps += mqprio_qopt->qopt.count[i];
2711 if (num_qps > adapter->num_active_queues) {
2712 dev_err(&adapter->pdev->dev,
2713 "Cannot support requested number of queues\n");
2717 ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
2722 * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes
2723 * @adapter: board private structure
2725 static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
2727 struct iavf_cloud_filter *cf, *cftmp;
2729 spin_lock_bh(&adapter->cloud_filter_list_lock);
2730 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
2732 list_del(&cf->list);
2734 adapter->num_cloud_filters--;
2736 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2740 * __iavf_setup_tc - configure multiple traffic classes
2741 * @netdev: network interface device structure
2742 * @type_data: tc offload data
2744 * This function processes the config information provided by the
2745 * user to configure traffic classes/queue channels and packages the
2746 * information to request the PF to setup traffic classes.
2748 * Returns 0 on success.
2750 static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
2752 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
2753 struct iavf_adapter *adapter = netdev_priv(netdev);
2754 struct virtchnl_vf_resource *vfres = adapter->vf_res;
2755 u8 num_tc = 0, total_qps = 0;
2756 int ret = 0, netdev_tc = 0;
2761 num_tc = mqprio_qopt->qopt.num_tc;
2762 mode = mqprio_qopt->mode;
2764 /* delete queue_channel */
2765 if (!mqprio_qopt->qopt.hw) {
2766 if (adapter->ch_config.state == __IAVF_TC_RUNNING) {
2767 /* reset the tc configuration */
2768 netdev_reset_tc(netdev);
2769 adapter->num_tc = 0;
2770 netif_tx_stop_all_queues(netdev);
2771 netif_tx_disable(netdev);
2772 iavf_del_all_cloud_filters(adapter);
2773 adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS;
2780 /* add queue channel */
2781 if (mode == TC_MQPRIO_MODE_CHANNEL) {
2782 if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) {
2783 dev_err(&adapter->pdev->dev, "ADq not supported\n");
2786 if (adapter->ch_config.state != __IAVF_TC_INVALID) {
2787 dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
2791 ret = iavf_validate_ch_config(adapter, mqprio_qopt);
2794 /* Return if same TC config is requested */
2795 if (adapter->num_tc == num_tc)
2797 adapter->num_tc = num_tc;
2799 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
2801 adapter->ch_config.ch_info[i].count =
2802 mqprio_qopt->qopt.count[i];
2803 adapter->ch_config.ch_info[i].offset =
2804 mqprio_qopt->qopt.offset[i];
2805 total_qps += mqprio_qopt->qopt.count[i];
2806 max_tx_rate = mqprio_qopt->max_rate[i];
2807 /* convert to Mbps */
2808 max_tx_rate = div_u64(max_tx_rate,
2810 adapter->ch_config.ch_info[i].max_tx_rate =
2813 adapter->ch_config.ch_info[i].count = 1;
2814 adapter->ch_config.ch_info[i].offset = 0;
2817 adapter->ch_config.total_qps = total_qps;
2818 netif_tx_stop_all_queues(netdev);
2819 netif_tx_disable(netdev);
2820 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS;
2821 netdev_reset_tc(netdev);
2822 /* Report the tc mapping up the stack */
2823 netdev_set_num_tc(adapter->netdev, num_tc);
2824 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
2825 u16 qcount = mqprio_qopt->qopt.count[i];
2826 u16 qoffset = mqprio_qopt->qopt.offset[i];
2829 netdev_set_tc_queue(netdev, netdev_tc++, qcount,
2838 * iavf_parse_cls_flower - Parse tc flower filters provided by kernel
2839 * @adapter: board private structure
2840 * @f: pointer to struct flow_cls_offload
2841 * @filter: pointer to cloud filter structure
2843 static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
2844 struct flow_cls_offload *f,
2845 struct iavf_cloud_filter *filter)
2847 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2848 struct flow_dissector *dissector = rule->match.dissector;
2849 u16 n_proto_mask = 0;
2850 u16 n_proto_key = 0;
2855 struct virtchnl_filter *vf = &filter->f;
2857 if (dissector->used_keys &
2858 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2859 BIT(FLOW_DISSECTOR_KEY_BASIC) |
2860 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2861 BIT(FLOW_DISSECTOR_KEY_VLAN) |
2862 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2863 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2864 BIT(FLOW_DISSECTOR_KEY_PORTS) |
2865 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
2866 dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n",
2867 dissector->used_keys);
2871 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
2872 struct flow_match_enc_keyid match;
2874 flow_rule_match_enc_keyid(rule, &match);
2875 if (match.mask->keyid != 0)
2876 field_flags |= IAVF_CLOUD_FIELD_TEN_ID;
2879 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2880 struct flow_match_basic match;
2882 flow_rule_match_basic(rule, &match);
2883 n_proto_key = ntohs(match.key->n_proto);
2884 n_proto_mask = ntohs(match.mask->n_proto);
2886 if (n_proto_key == ETH_P_ALL) {
2890 n_proto = n_proto_key & n_proto_mask;
2891 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
2893 if (n_proto == ETH_P_IPV6) {
2894 /* specify flow type as TCP IPv6 */
2895 vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
2898 if (match.key->ip_proto != IPPROTO_TCP) {
2899 dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
2904 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2905 struct flow_match_eth_addrs match;
2907 flow_rule_match_eth_addrs(rule, &match);
2909 /* use is_broadcast and is_zero to check for all 0xf or 0 */
2910 if (!is_zero_ether_addr(match.mask->dst)) {
2911 if (is_broadcast_ether_addr(match.mask->dst)) {
2912 field_flags |= IAVF_CLOUD_FIELD_OMAC;
2914 dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
2916 return IAVF_ERR_CONFIG;
2920 if (!is_zero_ether_addr(match.mask->src)) {
2921 if (is_broadcast_ether_addr(match.mask->src)) {
2922 field_flags |= IAVF_CLOUD_FIELD_IMAC;
2924 dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
2926 return IAVF_ERR_CONFIG;
2930 if (!is_zero_ether_addr(match.key->dst))
2931 if (is_valid_ether_addr(match.key->dst) ||
2932 is_multicast_ether_addr(match.key->dst)) {
2933 /* set the mask if a valid dst_mac address */
2934 for (i = 0; i < ETH_ALEN; i++)
2935 vf->mask.tcp_spec.dst_mac[i] |= 0xff;
2936 ether_addr_copy(vf->data.tcp_spec.dst_mac,
2940 if (!is_zero_ether_addr(match.key->src))
2941 if (is_valid_ether_addr(match.key->src) ||
2942 is_multicast_ether_addr(match.key->src)) {
2943 /* set the mask if a valid dst_mac address */
2944 for (i = 0; i < ETH_ALEN; i++)
2945 vf->mask.tcp_spec.src_mac[i] |= 0xff;
2946 ether_addr_copy(vf->data.tcp_spec.src_mac,
2951 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
2952 struct flow_match_vlan match;
2954 flow_rule_match_vlan(rule, &match);
2955 if (match.mask->vlan_id) {
2956 if (match.mask->vlan_id == VLAN_VID_MASK) {
2957 field_flags |= IAVF_CLOUD_FIELD_IVLAN;
2959 dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
2960 match.mask->vlan_id);
2961 return IAVF_ERR_CONFIG;
2964 vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
2965 vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id);
2968 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
2969 struct flow_match_control match;
2971 flow_rule_match_control(rule, &match);
2972 addr_type = match.key->addr_type;
2975 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2976 struct flow_match_ipv4_addrs match;
2978 flow_rule_match_ipv4_addrs(rule, &match);
2979 if (match.mask->dst) {
2980 if (match.mask->dst == cpu_to_be32(0xffffffff)) {
2981 field_flags |= IAVF_CLOUD_FIELD_IIP;
2983 dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
2984 be32_to_cpu(match.mask->dst));
2985 return IAVF_ERR_CONFIG;
2989 if (match.mask->src) {
2990 if (match.mask->src == cpu_to_be32(0xffffffff)) {
2991 field_flags |= IAVF_CLOUD_FIELD_IIP;
2993 dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
2994 be32_to_cpu(match.mask->dst));
2995 return IAVF_ERR_CONFIG;
2999 if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) {
3000 dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
3001 return IAVF_ERR_CONFIG;
3003 if (match.key->dst) {
3004 vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
3005 vf->data.tcp_spec.dst_ip[0] = match.key->dst;
3007 if (match.key->src) {
3008 vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
3009 vf->data.tcp_spec.src_ip[0] = match.key->src;
3013 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
3014 struct flow_match_ipv6_addrs match;
3016 flow_rule_match_ipv6_addrs(rule, &match);
3018 /* validate mask, make sure it is not IPV6_ADDR_ANY */
3019 if (ipv6_addr_any(&match.mask->dst)) {
3020 dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
3022 return IAVF_ERR_CONFIG;
3025 /* src and dest IPv6 address should not be LOOPBACK
3026 * (0:0:0:0:0:0:0:1) which can be represented as ::1
3028 if (ipv6_addr_loopback(&match.key->dst) ||
3029 ipv6_addr_loopback(&match.key->src)) {
3030 dev_err(&adapter->pdev->dev,
3031 "ipv6 addr should not be loopback\n");
3032 return IAVF_ERR_CONFIG;
3034 if (!ipv6_addr_any(&match.mask->dst) ||
3035 !ipv6_addr_any(&match.mask->src))
3036 field_flags |= IAVF_CLOUD_FIELD_IIP;
3038 for (i = 0; i < 4; i++)
3039 vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
3040 memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32,
3041 sizeof(vf->data.tcp_spec.dst_ip));
3042 for (i = 0; i < 4; i++)
3043 vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
3044 memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32,
3045 sizeof(vf->data.tcp_spec.src_ip));
3047 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
3048 struct flow_match_ports match;
3050 flow_rule_match_ports(rule, &match);
3051 if (match.mask->src) {
3052 if (match.mask->src == cpu_to_be16(0xffff)) {
3053 field_flags |= IAVF_CLOUD_FIELD_IIP;
3055 dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
3056 be16_to_cpu(match.mask->src));
3057 return IAVF_ERR_CONFIG;
3061 if (match.mask->dst) {
3062 if (match.mask->dst == cpu_to_be16(0xffff)) {
3063 field_flags |= IAVF_CLOUD_FIELD_IIP;
3065 dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
3066 be16_to_cpu(match.mask->dst));
3067 return IAVF_ERR_CONFIG;
3070 if (match.key->dst) {
3071 vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
3072 vf->data.tcp_spec.dst_port = match.key->dst;
3075 if (match.key->src) {
3076 vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
3077 vf->data.tcp_spec.src_port = match.key->src;
3080 vf->field_flags = field_flags;
3086 * iavf_handle_tclass - Forward to a traffic class on the device
3087 * @adapter: board private structure
3088 * @tc: traffic class index on the device
3089 * @filter: pointer to cloud filter structure
3091 static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc,
3092 struct iavf_cloud_filter *filter)
3096 if (tc < adapter->num_tc) {
3097 if (!filter->f.data.tcp_spec.dst_port) {
3098 dev_err(&adapter->pdev->dev,
3099 "Specify destination port to redirect to traffic class other than TC0\n");
3103 /* redirect to a traffic class on the same device */
3104 filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
3105 filter->f.action_meta = tc;
3110 * iavf_configure_clsflower - Add tc flower filters
3111 * @adapter: board private structure
3112 * @cls_flower: Pointer to struct flow_cls_offload
3114 static int iavf_configure_clsflower(struct iavf_adapter *adapter,
3115 struct flow_cls_offload *cls_flower)
3117 int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
3118 struct iavf_cloud_filter *filter = NULL;
3119 int err = -EINVAL, count = 50;
3122 dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
3126 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
3130 while (!mutex_trylock(&adapter->crit_lock)) {
3138 filter->cookie = cls_flower->cookie;
3140 /* set the mask to all zeroes to begin with */
3141 memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
3142 /* start out with flow type and eth type IPv4 to begin with */
3143 filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
3144 err = iavf_parse_cls_flower(adapter, cls_flower, filter);
3148 err = iavf_handle_tclass(adapter, tc, filter);
3152 /* add filter to the list */
3153 spin_lock_bh(&adapter->cloud_filter_list_lock);
3154 list_add_tail(&filter->list, &adapter->cloud_filter_list);
3155 adapter->num_cloud_filters++;
3157 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
3158 spin_unlock_bh(&adapter->cloud_filter_list_lock);
3163 mutex_unlock(&adapter->crit_lock);
3167 /* iavf_find_cf - Find the cloud filter in the list
3168 * @adapter: Board private structure
3169 * @cookie: filter specific cookie
3171 * Returns ptr to the filter object or NULL. Must be called while holding the
3172 * cloud_filter_list_lock.
3174 static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
3175 unsigned long *cookie)
3177 struct iavf_cloud_filter *filter = NULL;
3182 list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
3183 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
3190 * iavf_delete_clsflower - Remove tc flower filters
3191 * @adapter: board private structure
3192 * @cls_flower: Pointer to struct flow_cls_offload
3194 static int iavf_delete_clsflower(struct iavf_adapter *adapter,
3195 struct flow_cls_offload *cls_flower)
3197 struct iavf_cloud_filter *filter = NULL;
3200 spin_lock_bh(&adapter->cloud_filter_list_lock);
3201 filter = iavf_find_cf(adapter, &cls_flower->cookie);
3204 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
3208 spin_unlock_bh(&adapter->cloud_filter_list_lock);
3214 * iavf_setup_tc_cls_flower - flower classifier offloads
3215 * @adapter: board private structure
3216 * @cls_flower: pointer to flow_cls_offload struct with flow info
3218 static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
3219 struct flow_cls_offload *cls_flower)
3221 switch (cls_flower->command) {
3222 case FLOW_CLS_REPLACE:
3223 return iavf_configure_clsflower(adapter, cls_flower);
3224 case FLOW_CLS_DESTROY:
3225 return iavf_delete_clsflower(adapter, cls_flower);
3226 case FLOW_CLS_STATS:
3234 * iavf_setup_tc_block_cb - block callback for tc
3235 * @type: type of offload
3236 * @type_data: offload data
3239 * This function is the block callback for traffic classes
3241 static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3244 struct iavf_adapter *adapter = cb_priv;
3246 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
3250 case TC_SETUP_CLSFLOWER:
3251 return iavf_setup_tc_cls_flower(cb_priv, type_data);
3257 static LIST_HEAD(iavf_block_cb_list);
3260 * iavf_setup_tc - configure multiple traffic classes
3261 * @netdev: network interface device structure
3262 * @type: type of offload
3263 * @type_data: tc offload data
3265 * This function is the callback to ndo_setup_tc in the
3268 * Returns 0 on success
3270 static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
3273 struct iavf_adapter *adapter = netdev_priv(netdev);
3276 case TC_SETUP_QDISC_MQPRIO:
3277 return __iavf_setup_tc(netdev, type_data);
3278 case TC_SETUP_BLOCK:
3279 return flow_block_cb_setup_simple(type_data,
3280 &iavf_block_cb_list,
3281 iavf_setup_tc_block_cb,
3282 adapter, adapter, true);
3289 * iavf_open - Called when a network interface is made active
3290 * @netdev: network interface device structure
3292 * Returns 0 on success, negative value on failure
3294 * The open entry point is called when a network interface is made
3295 * active by the system (IFF_UP). At this point all resources needed
3296 * for transmit and receive operations are allocated, the interrupt
3297 * handler is registered with the OS, the watchdog is started,
3298 * and the stack is notified that the interface is ready.
3300 static int iavf_open(struct net_device *netdev)
3302 struct iavf_adapter *adapter = netdev_priv(netdev);
3305 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
3306 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
3310 while (!mutex_trylock(&adapter->crit_lock))
3311 usleep_range(500, 1000);
3313 if (adapter->state != __IAVF_DOWN) {
3318 if (adapter->state == __IAVF_RUNNING &&
3319 !test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) {
3320 dev_dbg(&adapter->pdev->dev, "VF is already open.\n");
3325 /* allocate transmit descriptors */
3326 err = iavf_setup_all_tx_resources(adapter);
3330 /* allocate receive descriptors */
3331 err = iavf_setup_all_rx_resources(adapter);
3335 /* clear any pending interrupts, may auto mask */
3336 err = iavf_request_traffic_irqs(adapter, netdev->name);
3340 spin_lock_bh(&adapter->mac_vlan_list_lock);
3342 iavf_add_filter(adapter, adapter->hw.mac.addr);
3344 spin_unlock_bh(&adapter->mac_vlan_list_lock);
3346 /* Restore VLAN filters that were removed with IFF_DOWN */
3347 iavf_restore_filters(adapter);
3349 iavf_configure(adapter);
3351 iavf_up_complete(adapter);
3353 iavf_irq_enable(adapter, true);
3355 mutex_unlock(&adapter->crit_lock);
3361 iavf_free_traffic_irqs(adapter);
3363 iavf_free_all_rx_resources(adapter);
3365 iavf_free_all_tx_resources(adapter);
3367 mutex_unlock(&adapter->crit_lock);
3373 * iavf_close - Disables a network interface
3374 * @netdev: network interface device structure
3376 * Returns 0, this is not allowed to fail
3378 * The close entry point is called when an interface is de-activated
3379 * by the OS. The hardware is still under the drivers control, but
3380 * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
3381 * are freed, along with all transmit and receive resources.
3383 static int iavf_close(struct net_device *netdev)
3385 struct iavf_adapter *adapter = netdev_priv(netdev);
3388 if (adapter->state <= __IAVF_DOWN_PENDING)
3391 while (!mutex_trylock(&adapter->crit_lock))
3392 usleep_range(500, 1000);
3394 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
3395 if (CLIENT_ENABLED(adapter))
3396 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
3399 iavf_change_state(adapter, __IAVF_DOWN_PENDING);
3400 iavf_free_traffic_irqs(adapter);
3402 mutex_unlock(&adapter->crit_lock);
3404 /* We explicitly don't free resources here because the hardware is
3405 * still active and can DMA into memory. Resources are cleared in
3406 * iavf_virtchnl_completion() after we get confirmation from the PF
3407 * driver that the rings have been stopped.
3409 * Also, we wait for state to transition to __IAVF_DOWN before
3410 * returning. State change occurs in iavf_virtchnl_completion() after
3411 * VF resources are released (which occurs after PF driver processes and
3412 * responds to admin queue commands).
3415 status = wait_event_timeout(adapter->down_waitqueue,
3416 adapter->state == __IAVF_DOWN,
3417 msecs_to_jiffies(500));
3419 netdev_warn(netdev, "Device resources not yet released\n");
3424 * iavf_change_mtu - Change the Maximum Transfer Unit
3425 * @netdev: network interface device structure
3426 * @new_mtu: new value for maximum frame size
3428 * Returns 0 on success, negative on failure
3430 static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
3432 struct iavf_adapter *adapter = netdev_priv(netdev);
3434 netdev->mtu = new_mtu;
3435 if (CLIENT_ENABLED(adapter)) {
3436 iavf_notify_client_l2_params(&adapter->vsi);
3437 adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
3439 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
3440 queue_work(iavf_wq, &adapter->reset_task);
3446 * iavf_set_features - set the netdev feature flags
3447 * @netdev: ptr to the netdev being adjusted
3448 * @features: the feature set that the stack is suggesting
3449 * Note: expects to be called while under rtnl_lock()
3451 static int iavf_set_features(struct net_device *netdev,
3452 netdev_features_t features)
3454 struct iavf_adapter *adapter = netdev_priv(netdev);
3456 /* Don't allow enabling VLAN features when adapter is not capable
3457 * of VLAN offload/filtering
3459 if (!VLAN_ALLOWED(adapter)) {
3460 netdev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
3461 NETIF_F_HW_VLAN_CTAG_TX |
3462 NETIF_F_HW_VLAN_CTAG_FILTER);
3463 if (features & (NETIF_F_HW_VLAN_CTAG_RX |
3464 NETIF_F_HW_VLAN_CTAG_TX |
3465 NETIF_F_HW_VLAN_CTAG_FILTER))
3467 } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) {
3468 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3469 adapter->aq_required |=
3470 IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
3472 adapter->aq_required |=
3473 IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
3480 * iavf_features_check - Validate encapsulated packet conforms to limits
3482 * @dev: This physical port's netdev
3483 * @features: Offload features that the stack believes apply
3485 static netdev_features_t iavf_features_check(struct sk_buff *skb,
3486 struct net_device *dev,
3487 netdev_features_t features)
3491 /* No point in doing any of this if neither checksum nor GSO are
3492 * being requested for this frame. We can rule out both by just
3493 * checking for CHECKSUM_PARTIAL
3495 if (skb->ip_summed != CHECKSUM_PARTIAL)
3498 /* We cannot support GSO if the MSS is going to be less than
3499 * 64 bytes. If it is then we need to drop support for GSO.
3501 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
3502 features &= ~NETIF_F_GSO_MASK;
3504 /* MACLEN can support at most 63 words */
3505 len = skb_network_header(skb) - skb->data;
3506 if (len & ~(63 * 2))
3509 /* IPLEN and EIPLEN can support at most 127 dwords */
3510 len = skb_transport_header(skb) - skb_network_header(skb);
3511 if (len & ~(127 * 4))
3514 if (skb->encapsulation) {
3515 /* L4TUNLEN can support 127 words */
3516 len = skb_inner_network_header(skb) - skb_transport_header(skb);
3517 if (len & ~(127 * 2))
3520 /* IPLEN can support at most 127 dwords */
3521 len = skb_inner_transport_header(skb) -
3522 skb_inner_network_header(skb);
3523 if (len & ~(127 * 4))
3527 /* No need to validate L4LEN as TCP is the only protocol with a
3528 * a flexible value and we support all possible values supported
3529 * by TCP, which is at most 15 dwords
3534 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3538 * iavf_fix_features - fix up the netdev feature bits
3539 * @netdev: our net device
3540 * @features: desired feature bits
3542 * Returns fixed-up features bits
3544 static netdev_features_t iavf_fix_features(struct net_device *netdev,
3545 netdev_features_t features)
3547 struct iavf_adapter *adapter = netdev_priv(netdev);
3549 if (adapter->vf_res &&
3550 !(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
3551 features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
3552 NETIF_F_HW_VLAN_CTAG_RX |
3553 NETIF_F_HW_VLAN_CTAG_FILTER);
3558 static const struct net_device_ops iavf_netdev_ops = {
3559 .ndo_open = iavf_open,
3560 .ndo_stop = iavf_close,
3561 .ndo_start_xmit = iavf_xmit_frame,
3562 .ndo_set_rx_mode = iavf_set_rx_mode,
3563 .ndo_validate_addr = eth_validate_addr,
3564 .ndo_set_mac_address = iavf_set_mac,
3565 .ndo_change_mtu = iavf_change_mtu,
3566 .ndo_tx_timeout = iavf_tx_timeout,
3567 .ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid,
3568 .ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid,
3569 .ndo_features_check = iavf_features_check,
3570 .ndo_fix_features = iavf_fix_features,
3571 .ndo_set_features = iavf_set_features,
3572 .ndo_setup_tc = iavf_setup_tc,
3576 * iavf_check_reset_complete - check that VF reset is complete
3577 * @hw: pointer to hw struct
3579 * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
3581 static int iavf_check_reset_complete(struct iavf_hw *hw)
3586 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
3587 rstat = rd32(hw, IAVF_VFGEN_RSTAT) &
3588 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
3589 if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
3590 (rstat == VIRTCHNL_VFR_COMPLETED))
3592 usleep_range(10, 20);
3598 * iavf_process_config - Process the config information we got from the PF
3599 * @adapter: board private structure
3601 * Verify that we have a valid config struct, and set up our netdev features
3602 * and our VSI struct.
3604 int iavf_process_config(struct iavf_adapter *adapter)
3606 struct virtchnl_vf_resource *vfres = adapter->vf_res;
3607 int i, num_req_queues = adapter->num_req_queues;
3608 struct net_device *netdev = adapter->netdev;
3609 struct iavf_vsi *vsi = &adapter->vsi;
3610 netdev_features_t hw_enc_features;
3611 netdev_features_t hw_features;
3613 /* got VF config message back from PF, now we can parse it */
3614 for (i = 0; i < vfres->num_vsis; i++) {
3615 if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
3616 adapter->vsi_res = &vfres->vsi_res[i];
3618 if (!adapter->vsi_res) {
3619 dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
3623 if (num_req_queues &&
3624 num_req_queues > adapter->vsi_res->num_queue_pairs) {
3625 /* Problem. The PF gave us fewer queues than what we had
3626 * negotiated in our request. Need a reset to see if we can't
3627 * get back to a working state.
3629 dev_err(&adapter->pdev->dev,
3630 "Requested %d queues, but PF only gave us %d.\n",
3632 adapter->vsi_res->num_queue_pairs);
3633 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
3634 adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
3635 iavf_schedule_reset(adapter);
3638 adapter->num_req_queues = 0;
3640 hw_enc_features = NETIF_F_SG |
3644 NETIF_F_SOFT_FEATURES |
3653 /* advertise to stack only if offloads for encapsulated packets is
3656 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
3657 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
3659 NETIF_F_GSO_GRE_CSUM |
3660 NETIF_F_GSO_IPXIP4 |
3661 NETIF_F_GSO_IPXIP6 |
3662 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3663 NETIF_F_GSO_PARTIAL |
3666 if (!(vfres->vf_cap_flags &
3667 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
3668 netdev->gso_partial_features |=
3669 NETIF_F_GSO_UDP_TUNNEL_CSUM;
3671 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
3672 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
3673 netdev->hw_enc_features |= hw_enc_features;
3675 /* record features VLANs can make use of */
3676 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
3678 /* Write features and hw_features separately to avoid polluting
3679 * with, or dropping, features that are set when we registered.
3681 hw_features = hw_enc_features;
3683 /* Enable VLAN features if supported */
3684 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
3685 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
3686 NETIF_F_HW_VLAN_CTAG_RX);
3687 /* Enable cloud filter if ADQ is supported */
3688 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
3689 hw_features |= NETIF_F_HW_TC;
3690 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO)
3691 hw_features |= NETIF_F_GSO_UDP_L4;
3693 netdev->hw_features |= hw_features;
3695 netdev->features |= hw_features;
3697 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
3698 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3700 netdev->priv_flags |= IFF_UNICAST_FLT;
3702 /* Do not turn on offloads when they are requested to be turned off.
3703 * TSO needs minimum 576 bytes to work correctly.
3705 if (netdev->wanted_features) {
3706 if (!(netdev->wanted_features & NETIF_F_TSO) ||
3708 netdev->features &= ~NETIF_F_TSO;
3709 if (!(netdev->wanted_features & NETIF_F_TSO6) ||
3711 netdev->features &= ~NETIF_F_TSO6;
3712 if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
3713 netdev->features &= ~NETIF_F_TSO_ECN;
3714 if (!(netdev->wanted_features & NETIF_F_GRO))
3715 netdev->features &= ~NETIF_F_GRO;
3716 if (!(netdev->wanted_features & NETIF_F_GSO))
3717 netdev->features &= ~NETIF_F_GSO;
3720 adapter->vsi.id = adapter->vsi_res->vsi_id;
3722 adapter->vsi.back = adapter;
3723 adapter->vsi.base_vector = 1;
3724 adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK;
3725 vsi->netdev = adapter->netdev;
3726 vsi->qs_handle = adapter->vsi_res->qset_handle;
3727 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
3728 adapter->rss_key_size = vfres->rss_key_size;
3729 adapter->rss_lut_size = vfres->rss_lut_size;
3731 adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE;
3732 adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE;
3739 * iavf_shutdown - Shutdown the device in preparation for a reboot
3740 * @pdev: pci device structure
3742 static void iavf_shutdown(struct pci_dev *pdev)
3744 struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
3745 struct net_device *netdev = adapter->netdev;
3747 netif_device_detach(netdev);
3749 if (netif_running(netdev))
3752 if (iavf_lock_timeout(&adapter->crit_lock, 5000))
3753 dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
3754 /* Prevent the watchdog from running. */
3755 iavf_change_state(adapter, __IAVF_REMOVE);
3756 adapter->aq_required = 0;
3757 mutex_unlock(&adapter->crit_lock);
3760 pci_save_state(pdev);
3763 pci_disable_device(pdev);
3767 * iavf_probe - Device Initialization Routine
3768 * @pdev: PCI device information struct
3769 * @ent: entry in iavf_pci_tbl
3771 * Returns 0 on success, negative on failure
3773 * iavf_probe initializes an adapter identified by a pci_dev structure.
3774 * The OS initialization, configuring of the adapter private structure,
3775 * and a hardware reset occur.
3777 static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3779 struct net_device *netdev;
3780 struct iavf_adapter *adapter = NULL;
3781 struct iavf_hw *hw = NULL;
3784 err = pci_enable_device(pdev);
3788 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3790 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3793 "DMA configuration failed: 0x%x\n", err);
3798 err = pci_request_regions(pdev, iavf_driver_name);
3801 "pci_request_regions failed 0x%x\n", err);
3805 pci_enable_pcie_error_reporting(pdev);
3807 pci_set_master(pdev);
3809 netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter),
3810 IAVF_MAX_REQ_QUEUES);
3813 goto err_alloc_etherdev;
3816 SET_NETDEV_DEV(netdev, &pdev->dev);
3818 pci_set_drvdata(pdev, netdev);
3819 adapter = netdev_priv(netdev);
3821 adapter->netdev = netdev;
3822 adapter->pdev = pdev;
3827 adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
3828 iavf_change_state(adapter, __IAVF_STARTUP);
3830 /* Call save state here because it relies on the adapter struct. */
3831 pci_save_state(pdev);
3833 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3834 pci_resource_len(pdev, 0));
3839 hw->vendor_id = pdev->vendor;
3840 hw->device_id = pdev->device;
3841 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
3842 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3843 hw->subsystem_device_id = pdev->subsystem_device;
3844 hw->bus.device = PCI_SLOT(pdev->devfn);
3845 hw->bus.func = PCI_FUNC(pdev->devfn);
3846 hw->bus.bus_id = pdev->bus->number;
3848 /* set up the locks for the AQ, do this only once in probe
3849 * and destroy them only once in remove
3851 mutex_init(&adapter->crit_lock);
3852 mutex_init(&adapter->client_lock);
3853 mutex_init(&adapter->remove_lock);
3854 mutex_init(&hw->aq.asq_mutex);
3855 mutex_init(&hw->aq.arq_mutex);
3857 spin_lock_init(&adapter->mac_vlan_list_lock);
3858 spin_lock_init(&adapter->cloud_filter_list_lock);
3859 spin_lock_init(&adapter->fdir_fltr_lock);
3860 spin_lock_init(&adapter->adv_rss_lock);
3862 INIT_LIST_HEAD(&adapter->mac_filter_list);
3863 INIT_LIST_HEAD(&adapter->vlan_filter_list);
3864 INIT_LIST_HEAD(&adapter->cloud_filter_list);
3865 INIT_LIST_HEAD(&adapter->fdir_list_head);
3866 INIT_LIST_HEAD(&adapter->adv_rss_list_head);
3868 INIT_WORK(&adapter->reset_task, iavf_reset_task);
3869 INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
3870 INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
3871 INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
3872 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
3873 msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
3875 /* Setup the wait queue for indicating transition to down status */
3876 init_waitqueue_head(&adapter->down_waitqueue);
3881 free_netdev(netdev);
3883 pci_disable_pcie_error_reporting(pdev);
3884 pci_release_regions(pdev);
3887 pci_disable_device(pdev);
3892 * iavf_suspend - Power management suspend routine
3893 * @dev_d: device info pointer
3895 * Called when the system (VM) is entering sleep/suspend.
3897 static int __maybe_unused iavf_suspend(struct device *dev_d)
3899 struct net_device *netdev = dev_get_drvdata(dev_d);
3900 struct iavf_adapter *adapter = netdev_priv(netdev);
3902 netif_device_detach(netdev);
3904 while (!mutex_trylock(&adapter->crit_lock))
3905 usleep_range(500, 1000);
3907 if (netif_running(netdev)) {
3912 iavf_free_misc_irq(adapter);
3913 iavf_reset_interrupt_capability(adapter);
3915 mutex_unlock(&adapter->crit_lock);
3921 * iavf_resume - Power management resume routine
3922 * @dev_d: device info pointer
3924 * Called when the system (VM) is resumed from sleep/suspend.
3926 static int __maybe_unused iavf_resume(struct device *dev_d)
3928 struct pci_dev *pdev = to_pci_dev(dev_d);
3929 struct iavf_adapter *adapter;
3932 adapter = iavf_pdev_to_adapter(pdev);
3934 pci_set_master(pdev);
3937 err = iavf_set_interrupt_capability(adapter);
3940 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
3943 err = iavf_request_misc_irq(adapter);
3946 dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
3950 queue_work(iavf_wq, &adapter->reset_task);
3952 netif_device_attach(adapter->netdev);
3958 * iavf_remove - Device Removal Routine
3959 * @pdev: PCI device information struct
3961 * iavf_remove is called by the PCI subsystem to alert the driver
3962 * that it should release a PCI device. The could be caused by a
3963 * Hot-Plug event, or because the driver is going to be removed from
3966 static void iavf_remove(struct pci_dev *pdev)
3968 struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
3969 enum iavf_state_t prev_state = adapter->last_state;
3970 struct net_device *netdev = adapter->netdev;
3971 struct iavf_fdir_fltr *fdir, *fdirtmp;
3972 struct iavf_vlan_filter *vlf, *vlftmp;
3973 struct iavf_adv_rss *rss, *rsstmp;
3974 struct iavf_mac_filter *f, *ftmp;
3975 struct iavf_cloud_filter *cf, *cftmp;
3976 struct iavf_hw *hw = &adapter->hw;
3978 /* Indicate we are in remove and not to run reset_task */
3979 mutex_lock(&adapter->remove_lock);
3980 cancel_work_sync(&adapter->reset_task);
3981 cancel_delayed_work_sync(&adapter->watchdog_task);
3982 cancel_delayed_work_sync(&adapter->client_task);
3983 if (adapter->netdev_registered) {
3984 unregister_netdev(netdev);
3985 adapter->netdev_registered = false;
3987 if (CLIENT_ALLOWED(adapter)) {
3988 err = iavf_lan_del_device(adapter);
3990 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
3994 iavf_request_reset(adapter);
3996 /* If the FW isn't responding, kick it once, but only once. */
3997 if (!iavf_asq_done(hw)) {
3998 iavf_request_reset(adapter);
4001 if (iavf_lock_timeout(&adapter->crit_lock, 5000))
4002 dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
4004 /* Shut down all the garbage mashers on the detention level */
4005 iavf_change_state(adapter, __IAVF_REMOVE);
4006 adapter->aq_required = 0;
4007 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
4009 iavf_free_all_tx_resources(adapter);
4010 iavf_free_all_rx_resources(adapter);
4011 iavf_misc_irq_disable(adapter);
4012 iavf_free_misc_irq(adapter);
4014 /* In case we enter iavf_remove from erroneous state, free traffic irqs
4015 * here, so as to not cause a kernel crash, when calling
4016 * iavf_reset_interrupt_capability.
4018 if ((adapter->last_state == __IAVF_RESETTING &&
4019 prev_state != __IAVF_DOWN) ||
4020 (adapter->last_state == __IAVF_RUNNING &&
4021 !(netdev->flags & IFF_UP)))
4022 iavf_free_traffic_irqs(adapter);
4024 iavf_reset_interrupt_capability(adapter);
4025 iavf_free_q_vectors(adapter);
4027 cancel_delayed_work_sync(&adapter->watchdog_task);
4029 cancel_work_sync(&adapter->adminq_task);
4031 iavf_free_rss(adapter);
4033 if (hw->aq.asq.count)
4034 iavf_shutdown_adminq(hw);
4036 /* destroy the locks only once, here */
4037 mutex_destroy(&hw->aq.arq_mutex);
4038 mutex_destroy(&hw->aq.asq_mutex);
4039 mutex_destroy(&adapter->client_lock);
4040 mutex_unlock(&adapter->crit_lock);
4041 mutex_destroy(&adapter->crit_lock);
4042 mutex_unlock(&adapter->remove_lock);
4043 mutex_destroy(&adapter->remove_lock);
4045 iounmap(hw->hw_addr);
4046 pci_release_regions(pdev);
4047 iavf_free_queues(adapter);
4048 kfree(adapter->vf_res);
4049 spin_lock_bh(&adapter->mac_vlan_list_lock);
4050 /* If we got removed before an up/down sequence, we've got a filter
4051 * hanging out there that we need to get rid of.
4053 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
4057 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
4059 list_del(&vlf->list);
4063 spin_unlock_bh(&adapter->mac_vlan_list_lock);
4065 spin_lock_bh(&adapter->cloud_filter_list_lock);
4066 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
4067 list_del(&cf->list);
4070 spin_unlock_bh(&adapter->cloud_filter_list_lock);
4072 spin_lock_bh(&adapter->fdir_fltr_lock);
4073 list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) {
4074 list_del(&fdir->list);
4077 spin_unlock_bh(&adapter->fdir_fltr_lock);
4079 spin_lock_bh(&adapter->adv_rss_lock);
4080 list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
4082 list_del(&rss->list);
4085 spin_unlock_bh(&adapter->adv_rss_lock);
4087 free_netdev(netdev);
4089 pci_disable_pcie_error_reporting(pdev);
4091 pci_disable_device(pdev);
4094 static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
4096 static struct pci_driver iavf_driver = {
4097 .name = iavf_driver_name,
4098 .id_table = iavf_pci_tbl,
4099 .probe = iavf_probe,
4100 .remove = iavf_remove,
4101 .driver.pm = &iavf_pm_ops,
4102 .shutdown = iavf_shutdown,
4106 * iavf_init_module - Driver Registration Routine
4108 * iavf_init_module is the first routine called when the driver is
4109 * loaded. All it does is register with the PCI subsystem.
4111 static int __init iavf_init_module(void)
4115 pr_info("iavf: %s\n", iavf_driver_string);
4117 pr_info("%s\n", iavf_copyright);
4119 iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
4122 pr_err("%s: Failed to create workqueue\n", iavf_driver_name);
4125 ret = pci_register_driver(&iavf_driver);
4129 module_init(iavf_init_module);
4132 * iavf_exit_module - Driver Exit Cleanup Routine
4134 * iavf_exit_module is called just before the driver is removed
4137 static void __exit iavf_exit_module(void)
4139 pci_unregister_driver(&iavf_driver);
4140 destroy_workqueue(iavf_wq);
4143 module_exit(iavf_exit_module);