1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2021 Intel Corporation. */
4 #include <linux/etherdevice.h>
5 #include <linux/of_net.h>
8 #include <generated/utsrelease.h>
9 #include <linux/crash_dump.h>
13 #include "i40e_diag.h"
15 #include <net/udp_tunnel.h>
16 #include <net/xdp_sock_drv.h>
17 /* All i40e tracepoints are defined by the include below, which
18 * must be included exactly once across the whole kernel with
19 * CREATE_TRACE_POINTS defined
21 #define CREATE_TRACE_POINTS
22 #include "i40e_trace.h"
24 const char i40e_driver_name[] = "i40e";
25 static const char i40e_driver_string[] =
26 "Intel(R) Ethernet Connection XL710 Network Driver";
28 static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation.";
30 /* a bit of forward declarations */
31 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
32 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
33 static int i40e_add_vsi(struct i40e_vsi *vsi);
34 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
35 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired);
36 static int i40e_setup_misc_vector(struct i40e_pf *pf);
37 static void i40e_determine_queue_usage(struct i40e_pf *pf);
38 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
39 static void i40e_prep_for_reset(struct i40e_pf *pf);
40 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
42 static int i40e_reset(struct i40e_pf *pf);
43 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
44 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
45 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf);
46 static bool i40e_check_recovery_mode(struct i40e_pf *pf);
47 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw);
48 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
49 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
50 static int i40e_get_capabilities(struct i40e_pf *pf,
51 enum i40e_admin_queue_opc list_type);
52 static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf);
54 /* i40e_pci_tbl - PCI Device ID Table
56 * Last entry must be all 0s
58 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
59 * Class, Class Mask, private data (not used) }
61 static const struct pci_device_id i40e_pci_tbl[] = {
62 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
63 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
64 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
65 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
66 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
67 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
68 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0},
72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0},
73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0},
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
79 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
80 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722_A), 0},
81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
82 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
83 {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
84 {PCI_VDEVICE(INTEL, I40E_DEV_ID_XXV710_N3000), 0},
85 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
86 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
87 /* required last entry */
90 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
92 #define I40E_MAX_VF_COUNT 128
93 static int debug = -1;
94 module_param(debug, uint, 0);
95 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
97 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
98 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
99 MODULE_LICENSE("GPL v2");
101 static struct workqueue_struct *i40e_wq;
103 static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f,
104 struct net_device *netdev, int delta)
106 struct netdev_hw_addr *ha;
111 netdev_for_each_mc_addr(ha, netdev) {
112 if (ether_addr_equal(ha->addr, f->macaddr)) {
113 ha->refcount += delta;
114 if (ha->refcount <= 0)
122 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
123 * @hw: pointer to the HW structure
124 * @mem: ptr to mem struct to fill out
125 * @size: size of memory requested
126 * @alignment: what to align the allocation to
128 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
129 u64 size, u32 alignment)
131 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
133 mem->size = ALIGN(size, alignment);
134 mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
143 * i40e_free_dma_mem_d - OS specific memory free for shared code
144 * @hw: pointer to the HW structure
145 * @mem: ptr to mem struct to free
147 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
149 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
151 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
160 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
161 * @hw: pointer to the HW structure
162 * @mem: ptr to mem struct to fill out
163 * @size: size of memory requested
165 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
169 mem->va = kzalloc(size, GFP_KERNEL);
178 * i40e_free_virt_mem_d - OS specific memory free for shared code
179 * @hw: pointer to the HW structure
180 * @mem: ptr to mem struct to free
182 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
184 /* it's ok to kfree a NULL pointer */
193 * i40e_get_lump - find a lump of free generic resource
194 * @pf: board private structure
195 * @pile: the pile of resource to search
196 * @needed: the number of items needed
197 * @id: an owner id to stick on the items assigned
199 * Returns the base item index of the lump, or negative for error
201 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
207 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
208 dev_info(&pf->pdev->dev,
209 "param err: pile=%s needed=%d id=0x%04x\n",
210 pile ? "<valid>" : "<null>", needed, id);
214 /* Allocate last queue in the pile for FDIR VSI queue
215 * so it doesn't fragment the qp_pile
217 if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) {
218 if (pile->list[pile->num_entries - 1] & I40E_PILE_VALID_BIT) {
219 dev_err(&pf->pdev->dev,
220 "Cannot allocate queue %d for I40E_VSI_FDIR\n",
221 pile->num_entries - 1);
224 pile->list[pile->num_entries - 1] = id | I40E_PILE_VALID_BIT;
225 return pile->num_entries - 1;
229 while (i < pile->num_entries) {
230 /* skip already allocated entries */
231 if (pile->list[i] & I40E_PILE_VALID_BIT) {
236 /* do we have enough in this lump? */
237 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
238 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
243 /* there was enough, so assign it to the requestor */
244 for (j = 0; j < needed; j++)
245 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
250 /* not enough, so skip over it and continue looking */
258 * i40e_put_lump - return a lump of generic resource
259 * @pile: the pile of resource to search
260 * @index: the base item index
261 * @id: the owner id of the items assigned
263 * Returns the count of items in the lump
265 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
267 int valid_id = (id | I40E_PILE_VALID_BIT);
271 if (!pile || index >= pile->num_entries)
275 i < pile->num_entries && pile->list[i] == valid_id;
286 * i40e_find_vsi_from_id - searches for the vsi with the given id
287 * @pf: the pf structure to search for the vsi
288 * @id: id of the vsi it is searching for
290 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
294 for (i = 0; i < pf->num_alloc_vsi; i++)
295 if (pf->vsi[i] && (pf->vsi[i]->id == id))
302 * i40e_service_event_schedule - Schedule the service task to wake up
303 * @pf: board private structure
305 * If not already scheduled, this puts the task into the work queue
307 void i40e_service_event_schedule(struct i40e_pf *pf)
309 if ((!test_bit(__I40E_DOWN, pf->state) &&
310 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) ||
311 test_bit(__I40E_RECOVERY_MODE, pf->state))
312 queue_work(i40e_wq, &pf->service_task);
316 * i40e_tx_timeout - Respond to a Tx Hang
317 * @netdev: network interface device structure
318 * @txqueue: queue number timing out
320 * If any port has noticed a Tx timeout, it is likely that the whole
321 * device is munged, not just the one netdev port, so go for the full
324 static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
326 struct i40e_netdev_priv *np = netdev_priv(netdev);
327 struct i40e_vsi *vsi = np->vsi;
328 struct i40e_pf *pf = vsi->back;
329 struct i40e_ring *tx_ring = NULL;
333 pf->tx_timeout_count++;
335 /* with txqueue index, find the tx_ring struct */
336 for (i = 0; i < vsi->num_queue_pairs; i++) {
337 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
339 vsi->tx_rings[i]->queue_index) {
340 tx_ring = vsi->tx_rings[i];
346 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
347 pf->tx_timeout_recovery_level = 1; /* reset after some time */
348 else if (time_before(jiffies,
349 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
350 return; /* don't do any new action before the next timeout */
352 /* don't kick off another recovery if one is already pending */
353 if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
357 head = i40e_get_head(tx_ring);
358 /* Read interrupt register */
359 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
361 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
362 tx_ring->vsi->base_vector - 1));
364 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
366 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
367 vsi->seid, txqueue, tx_ring->next_to_clean,
368 head, tx_ring->next_to_use,
369 readl(tx_ring->tail), val);
372 pf->tx_timeout_last_recovery = jiffies;
373 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n",
374 pf->tx_timeout_recovery_level, txqueue);
376 switch (pf->tx_timeout_recovery_level) {
378 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
381 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
384 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
387 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
391 i40e_service_event_schedule(pf);
392 pf->tx_timeout_recovery_level++;
396 * i40e_get_vsi_stats_struct - Get System Network Statistics
397 * @vsi: the VSI we care about
399 * Returns the address of the device statistics structure.
400 * The statistics are actually updated from the service task.
402 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
404 return &vsi->net_stats;
408 * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
409 * @ring: Tx ring to get statistics from
410 * @stats: statistics entry to be updated
412 static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
413 struct rtnl_link_stats64 *stats)
419 start = u64_stats_fetch_begin_irq(&ring->syncp);
420 packets = ring->stats.packets;
421 bytes = ring->stats.bytes;
422 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
424 stats->tx_packets += packets;
425 stats->tx_bytes += bytes;
429 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
430 * @netdev: network interface device structure
431 * @stats: data structure to store statistics
433 * Returns the address of the device statistics structure.
434 * The statistics are actually updated from the service task.
436 static void i40e_get_netdev_stats_struct(struct net_device *netdev,
437 struct rtnl_link_stats64 *stats)
439 struct i40e_netdev_priv *np = netdev_priv(netdev);
440 struct i40e_vsi *vsi = np->vsi;
441 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
442 struct i40e_ring *ring;
445 if (test_bit(__I40E_VSI_DOWN, vsi->state))
452 for (i = 0; i < vsi->num_queue_pairs; i++) {
456 ring = READ_ONCE(vsi->tx_rings[i]);
459 i40e_get_netdev_stats_struct_tx(ring, stats);
461 if (i40e_enabled_xdp_vsi(vsi)) {
462 ring = READ_ONCE(vsi->xdp_rings[i]);
465 i40e_get_netdev_stats_struct_tx(ring, stats);
468 ring = READ_ONCE(vsi->rx_rings[i]);
472 start = u64_stats_fetch_begin_irq(&ring->syncp);
473 packets = ring->stats.packets;
474 bytes = ring->stats.bytes;
475 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
477 stats->rx_packets += packets;
478 stats->rx_bytes += bytes;
483 /* following stats updated by i40e_watchdog_subtask() */
484 stats->multicast = vsi_stats->multicast;
485 stats->tx_errors = vsi_stats->tx_errors;
486 stats->tx_dropped = vsi_stats->tx_dropped;
487 stats->rx_errors = vsi_stats->rx_errors;
488 stats->rx_dropped = vsi_stats->rx_dropped;
489 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
490 stats->rx_length_errors = vsi_stats->rx_length_errors;
494 * i40e_vsi_reset_stats - Resets all stats of the given vsi
495 * @vsi: the VSI to have its stats reset
497 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
499 struct rtnl_link_stats64 *ns;
505 ns = i40e_get_vsi_stats_struct(vsi);
506 memset(ns, 0, sizeof(*ns));
507 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
508 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
509 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
510 if (vsi->rx_rings && vsi->rx_rings[0]) {
511 for (i = 0; i < vsi->num_queue_pairs; i++) {
512 memset(&vsi->rx_rings[i]->stats, 0,
513 sizeof(vsi->rx_rings[i]->stats));
514 memset(&vsi->rx_rings[i]->rx_stats, 0,
515 sizeof(vsi->rx_rings[i]->rx_stats));
516 memset(&vsi->tx_rings[i]->stats, 0,
517 sizeof(vsi->tx_rings[i]->stats));
518 memset(&vsi->tx_rings[i]->tx_stats, 0,
519 sizeof(vsi->tx_rings[i]->tx_stats));
522 vsi->stat_offsets_loaded = false;
526 * i40e_pf_reset_stats - Reset all of the stats for the given PF
527 * @pf: the PF to be reset
529 void i40e_pf_reset_stats(struct i40e_pf *pf)
533 memset(&pf->stats, 0, sizeof(pf->stats));
534 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
535 pf->stat_offsets_loaded = false;
537 for (i = 0; i < I40E_MAX_VEB; i++) {
539 memset(&pf->veb[i]->stats, 0,
540 sizeof(pf->veb[i]->stats));
541 memset(&pf->veb[i]->stats_offsets, 0,
542 sizeof(pf->veb[i]->stats_offsets));
543 memset(&pf->veb[i]->tc_stats, 0,
544 sizeof(pf->veb[i]->tc_stats));
545 memset(&pf->veb[i]->tc_stats_offsets, 0,
546 sizeof(pf->veb[i]->tc_stats_offsets));
547 pf->veb[i]->stat_offsets_loaded = false;
550 pf->hw_csum_rx_error = 0;
554 * i40e_compute_pci_to_hw_id - compute index form PCI function.
555 * @vsi: ptr to the VSI to read from.
556 * @hw: ptr to the hardware info.
558 static u32 i40e_compute_pci_to_hw_id(struct i40e_vsi *vsi, struct i40e_hw *hw)
560 int pf_count = i40e_get_pf_count(hw);
562 if (vsi->type == I40E_VSI_SRIOV)
563 return (hw->port * BIT(7)) / pf_count + vsi->vf_id;
565 return hw->port + BIT(7);
569 * i40e_stat_update64 - read and update a 64 bit stat from the chip.
570 * @hw: ptr to the hardware info.
571 * @hireg: the high 32 bit reg to read.
572 * @loreg: the low 32 bit reg to read.
573 * @offset_loaded: has the initial offset been loaded yet.
574 * @offset: ptr to current offset value.
575 * @stat: ptr to the stat.
577 * Since the device stats are not reset at PFReset, they will not
578 * be zeroed when the driver starts. We'll save the first values read
579 * and use them as offsets to be subtracted from the raw values in order
580 * to report stats that count from zero.
582 static void i40e_stat_update64(struct i40e_hw *hw, u32 hireg, u32 loreg,
583 bool offset_loaded, u64 *offset, u64 *stat)
587 new_data = rd64(hw, loreg);
589 if (!offset_loaded || new_data < *offset)
591 *stat = new_data - *offset;
595 * i40e_stat_update48 - read and update a 48 bit stat from the chip
596 * @hw: ptr to the hardware info
597 * @hireg: the high 32 bit reg to read
598 * @loreg: the low 32 bit reg to read
599 * @offset_loaded: has the initial offset been loaded yet
600 * @offset: ptr to current offset value
601 * @stat: ptr to the stat
603 * Since the device stats are not reset at PFReset, they likely will not
604 * be zeroed when the driver starts. We'll save the first values read
605 * and use them as offsets to be subtracted from the raw values in order
606 * to report stats that count from zero. In the process, we also manage
607 * the potential roll-over.
609 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
610 bool offset_loaded, u64 *offset, u64 *stat)
614 if (hw->device_id == I40E_DEV_ID_QEMU) {
615 new_data = rd32(hw, loreg);
616 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
618 new_data = rd64(hw, loreg);
622 if (likely(new_data >= *offset))
623 *stat = new_data - *offset;
625 *stat = (new_data + BIT_ULL(48)) - *offset;
626 *stat &= 0xFFFFFFFFFFFFULL;
630 * i40e_stat_update32 - read and update a 32 bit stat from the chip
631 * @hw: ptr to the hardware info
632 * @reg: the hw reg to read
633 * @offset_loaded: has the initial offset been loaded yet
634 * @offset: ptr to current offset value
635 * @stat: ptr to the stat
637 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
638 bool offset_loaded, u64 *offset, u64 *stat)
642 new_data = rd32(hw, reg);
645 if (likely(new_data >= *offset))
646 *stat = (u32)(new_data - *offset);
648 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
652 * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
653 * @hw: ptr to the hardware info
654 * @reg: the hw reg to read and clear
655 * @stat: ptr to the stat
657 static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
659 u32 new_data = rd32(hw, reg);
661 wr32(hw, reg, 1); /* must write a nonzero value to clear register */
666 * i40e_stats_update_rx_discards - update rx_discards.
667 * @vsi: ptr to the VSI to be updated.
668 * @hw: ptr to the hardware info.
669 * @stat_idx: VSI's stat_counter_idx.
670 * @offset_loaded: ptr to the VSI's stat_offsets_loaded.
671 * @stat_offset: ptr to stat_offset to store first read of specific register.
672 * @stat: ptr to VSI's stat to be updated.
675 i40e_stats_update_rx_discards(struct i40e_vsi *vsi, struct i40e_hw *hw,
676 int stat_idx, bool offset_loaded,
677 struct i40e_eth_stats *stat_offset,
678 struct i40e_eth_stats *stat)
680 u64 rx_rdpc, rx_rxerr;
682 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), offset_loaded,
683 &stat_offset->rx_discards, &rx_rdpc);
684 i40e_stat_update64(hw,
685 I40E_GL_RXERR1H(i40e_compute_pci_to_hw_id(vsi, hw)),
686 I40E_GL_RXERR1L(i40e_compute_pci_to_hw_id(vsi, hw)),
687 offset_loaded, &stat_offset->rx_discards_other,
690 stat->rx_discards = rx_rdpc + rx_rxerr;
694 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
695 * @vsi: the VSI to be updated
697 void i40e_update_eth_stats(struct i40e_vsi *vsi)
699 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
700 struct i40e_pf *pf = vsi->back;
701 struct i40e_hw *hw = &pf->hw;
702 struct i40e_eth_stats *oes;
703 struct i40e_eth_stats *es; /* device's eth stats */
705 es = &vsi->eth_stats;
706 oes = &vsi->eth_stats_offsets;
708 /* Gather up the stats that the hw collects */
709 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
710 vsi->stat_offsets_loaded,
711 &oes->tx_errors, &es->tx_errors);
712 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
713 vsi->stat_offsets_loaded,
714 &oes->rx_discards, &es->rx_discards);
715 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
716 vsi->stat_offsets_loaded,
717 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
719 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
720 I40E_GLV_GORCL(stat_idx),
721 vsi->stat_offsets_loaded,
722 &oes->rx_bytes, &es->rx_bytes);
723 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
724 I40E_GLV_UPRCL(stat_idx),
725 vsi->stat_offsets_loaded,
726 &oes->rx_unicast, &es->rx_unicast);
727 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
728 I40E_GLV_MPRCL(stat_idx),
729 vsi->stat_offsets_loaded,
730 &oes->rx_multicast, &es->rx_multicast);
731 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
732 I40E_GLV_BPRCL(stat_idx),
733 vsi->stat_offsets_loaded,
734 &oes->rx_broadcast, &es->rx_broadcast);
736 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
737 I40E_GLV_GOTCL(stat_idx),
738 vsi->stat_offsets_loaded,
739 &oes->tx_bytes, &es->tx_bytes);
740 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
741 I40E_GLV_UPTCL(stat_idx),
742 vsi->stat_offsets_loaded,
743 &oes->tx_unicast, &es->tx_unicast);
744 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
745 I40E_GLV_MPTCL(stat_idx),
746 vsi->stat_offsets_loaded,
747 &oes->tx_multicast, &es->tx_multicast);
748 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
749 I40E_GLV_BPTCL(stat_idx),
750 vsi->stat_offsets_loaded,
751 &oes->tx_broadcast, &es->tx_broadcast);
753 i40e_stats_update_rx_discards(vsi, hw, stat_idx,
754 vsi->stat_offsets_loaded, oes, es);
756 vsi->stat_offsets_loaded = true;
760 * i40e_update_veb_stats - Update Switch component statistics
761 * @veb: the VEB being updated
763 void i40e_update_veb_stats(struct i40e_veb *veb)
765 struct i40e_pf *pf = veb->pf;
766 struct i40e_hw *hw = &pf->hw;
767 struct i40e_eth_stats *oes;
768 struct i40e_eth_stats *es; /* device's eth stats */
769 struct i40e_veb_tc_stats *veb_oes;
770 struct i40e_veb_tc_stats *veb_es;
773 idx = veb->stats_idx;
775 oes = &veb->stats_offsets;
776 veb_es = &veb->tc_stats;
777 veb_oes = &veb->tc_stats_offsets;
779 /* Gather up the stats that the hw collects */
780 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
781 veb->stat_offsets_loaded,
782 &oes->tx_discards, &es->tx_discards);
783 if (hw->revision_id > 0)
784 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
785 veb->stat_offsets_loaded,
786 &oes->rx_unknown_protocol,
787 &es->rx_unknown_protocol);
788 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
789 veb->stat_offsets_loaded,
790 &oes->rx_bytes, &es->rx_bytes);
791 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
792 veb->stat_offsets_loaded,
793 &oes->rx_unicast, &es->rx_unicast);
794 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
795 veb->stat_offsets_loaded,
796 &oes->rx_multicast, &es->rx_multicast);
797 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
798 veb->stat_offsets_loaded,
799 &oes->rx_broadcast, &es->rx_broadcast);
801 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
802 veb->stat_offsets_loaded,
803 &oes->tx_bytes, &es->tx_bytes);
804 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
805 veb->stat_offsets_loaded,
806 &oes->tx_unicast, &es->tx_unicast);
807 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
808 veb->stat_offsets_loaded,
809 &oes->tx_multicast, &es->tx_multicast);
810 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
811 veb->stat_offsets_loaded,
812 &oes->tx_broadcast, &es->tx_broadcast);
813 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
814 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
815 I40E_GLVEBTC_RPCL(i, idx),
816 veb->stat_offsets_loaded,
817 &veb_oes->tc_rx_packets[i],
818 &veb_es->tc_rx_packets[i]);
819 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
820 I40E_GLVEBTC_RBCL(i, idx),
821 veb->stat_offsets_loaded,
822 &veb_oes->tc_rx_bytes[i],
823 &veb_es->tc_rx_bytes[i]);
824 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
825 I40E_GLVEBTC_TPCL(i, idx),
826 veb->stat_offsets_loaded,
827 &veb_oes->tc_tx_packets[i],
828 &veb_es->tc_tx_packets[i]);
829 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
830 I40E_GLVEBTC_TBCL(i, idx),
831 veb->stat_offsets_loaded,
832 &veb_oes->tc_tx_bytes[i],
833 &veb_es->tc_tx_bytes[i]);
835 veb->stat_offsets_loaded = true;
839 * i40e_update_vsi_stats - Update the vsi statistics counters.
840 * @vsi: the VSI to be updated
842 * There are a few instances where we store the same stat in a
843 * couple of different structs. This is partly because we have
844 * the netdev stats that need to be filled out, which is slightly
845 * different from the "eth_stats" defined by the chip and used in
846 * VF communications. We sort it out here.
848 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
850 u64 rx_page, rx_buf, rx_reuse, rx_alloc, rx_waive, rx_busy;
851 struct i40e_pf *pf = vsi->back;
852 struct rtnl_link_stats64 *ons;
853 struct rtnl_link_stats64 *ns; /* netdev stats */
854 struct i40e_eth_stats *oes;
855 struct i40e_eth_stats *es; /* device's eth stats */
856 u64 tx_restart, tx_busy;
867 if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
868 test_bit(__I40E_CONFIG_BUSY, pf->state))
871 ns = i40e_get_vsi_stats_struct(vsi);
872 ons = &vsi->net_stats_offsets;
873 es = &vsi->eth_stats;
874 oes = &vsi->eth_stats_offsets;
876 /* Gather up the netdev and vsi stats that the driver collects
877 * on the fly during packet processing
881 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
890 for (q = 0; q < vsi->num_queue_pairs; q++) {
892 p = READ_ONCE(vsi->tx_rings[q]);
897 start = u64_stats_fetch_begin_irq(&p->syncp);
898 packets = p->stats.packets;
899 bytes = p->stats.bytes;
900 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
903 tx_restart += p->tx_stats.restart_queue;
904 tx_busy += p->tx_stats.tx_busy;
905 tx_linearize += p->tx_stats.tx_linearize;
906 tx_force_wb += p->tx_stats.tx_force_wb;
907 tx_stopped += p->tx_stats.tx_stopped;
910 p = READ_ONCE(vsi->rx_rings[q]);
915 start = u64_stats_fetch_begin_irq(&p->syncp);
916 packets = p->stats.packets;
917 bytes = p->stats.bytes;
918 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
921 rx_buf += p->rx_stats.alloc_buff_failed;
922 rx_page += p->rx_stats.alloc_page_failed;
923 rx_reuse += p->rx_stats.page_reuse_count;
924 rx_alloc += p->rx_stats.page_alloc_count;
925 rx_waive += p->rx_stats.page_waive_count;
926 rx_busy += p->rx_stats.page_busy_count;
928 if (i40e_enabled_xdp_vsi(vsi)) {
929 /* locate XDP ring */
930 p = READ_ONCE(vsi->xdp_rings[q]);
935 start = u64_stats_fetch_begin_irq(&p->syncp);
936 packets = p->stats.packets;
937 bytes = p->stats.bytes;
938 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
941 tx_restart += p->tx_stats.restart_queue;
942 tx_busy += p->tx_stats.tx_busy;
943 tx_linearize += p->tx_stats.tx_linearize;
944 tx_force_wb += p->tx_stats.tx_force_wb;
948 vsi->tx_restart = tx_restart;
949 vsi->tx_busy = tx_busy;
950 vsi->tx_linearize = tx_linearize;
951 vsi->tx_force_wb = tx_force_wb;
952 vsi->tx_stopped = tx_stopped;
953 vsi->rx_page_failed = rx_page;
954 vsi->rx_buf_failed = rx_buf;
955 vsi->rx_page_reuse = rx_reuse;
956 vsi->rx_page_alloc = rx_alloc;
957 vsi->rx_page_waive = rx_waive;
958 vsi->rx_page_busy = rx_busy;
960 ns->rx_packets = rx_p;
962 ns->tx_packets = tx_p;
965 /* update netdev stats from eth stats */
966 i40e_update_eth_stats(vsi);
967 ons->tx_errors = oes->tx_errors;
968 ns->tx_errors = es->tx_errors;
969 ons->multicast = oes->rx_multicast;
970 ns->multicast = es->rx_multicast;
971 ons->rx_dropped = oes->rx_discards;
972 ns->rx_dropped = es->rx_discards;
973 ons->tx_dropped = oes->tx_discards;
974 ns->tx_dropped = es->tx_discards;
976 /* pull in a couple PF stats if this is the main vsi */
977 if (vsi == pf->vsi[pf->lan_vsi]) {
978 ns->rx_crc_errors = pf->stats.crc_errors;
979 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
980 ns->rx_length_errors = pf->stats.rx_length_errors;
985 * i40e_update_pf_stats - Update the PF statistics counters.
986 * @pf: the PF to be updated
988 static void i40e_update_pf_stats(struct i40e_pf *pf)
990 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
991 struct i40e_hw_port_stats *nsd = &pf->stats;
992 struct i40e_hw *hw = &pf->hw;
996 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
997 I40E_GLPRT_GORCL(hw->port),
998 pf->stat_offsets_loaded,
999 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
1000 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
1001 I40E_GLPRT_GOTCL(hw->port),
1002 pf->stat_offsets_loaded,
1003 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
1004 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
1005 pf->stat_offsets_loaded,
1006 &osd->eth.rx_discards,
1007 &nsd->eth.rx_discards);
1008 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
1009 I40E_GLPRT_UPRCL(hw->port),
1010 pf->stat_offsets_loaded,
1011 &osd->eth.rx_unicast,
1012 &nsd->eth.rx_unicast);
1013 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
1014 I40E_GLPRT_MPRCL(hw->port),
1015 pf->stat_offsets_loaded,
1016 &osd->eth.rx_multicast,
1017 &nsd->eth.rx_multicast);
1018 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
1019 I40E_GLPRT_BPRCL(hw->port),
1020 pf->stat_offsets_loaded,
1021 &osd->eth.rx_broadcast,
1022 &nsd->eth.rx_broadcast);
1023 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
1024 I40E_GLPRT_UPTCL(hw->port),
1025 pf->stat_offsets_loaded,
1026 &osd->eth.tx_unicast,
1027 &nsd->eth.tx_unicast);
1028 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
1029 I40E_GLPRT_MPTCL(hw->port),
1030 pf->stat_offsets_loaded,
1031 &osd->eth.tx_multicast,
1032 &nsd->eth.tx_multicast);
1033 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
1034 I40E_GLPRT_BPTCL(hw->port),
1035 pf->stat_offsets_loaded,
1036 &osd->eth.tx_broadcast,
1037 &nsd->eth.tx_broadcast);
1039 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
1040 pf->stat_offsets_loaded,
1041 &osd->tx_dropped_link_down,
1042 &nsd->tx_dropped_link_down);
1044 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
1045 pf->stat_offsets_loaded,
1046 &osd->crc_errors, &nsd->crc_errors);
1048 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
1049 pf->stat_offsets_loaded,
1050 &osd->illegal_bytes, &nsd->illegal_bytes);
1052 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
1053 pf->stat_offsets_loaded,
1054 &osd->mac_local_faults,
1055 &nsd->mac_local_faults);
1056 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
1057 pf->stat_offsets_loaded,
1058 &osd->mac_remote_faults,
1059 &nsd->mac_remote_faults);
1061 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
1062 pf->stat_offsets_loaded,
1063 &osd->rx_length_errors,
1064 &nsd->rx_length_errors);
1066 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
1067 pf->stat_offsets_loaded,
1068 &osd->link_xon_rx, &nsd->link_xon_rx);
1069 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
1070 pf->stat_offsets_loaded,
1071 &osd->link_xon_tx, &nsd->link_xon_tx);
1072 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
1073 pf->stat_offsets_loaded,
1074 &osd->link_xoff_rx, &nsd->link_xoff_rx);
1075 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1076 pf->stat_offsets_loaded,
1077 &osd->link_xoff_tx, &nsd->link_xoff_tx);
1079 for (i = 0; i < 8; i++) {
1080 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
1081 pf->stat_offsets_loaded,
1082 &osd->priority_xoff_rx[i],
1083 &nsd->priority_xoff_rx[i]);
1084 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1085 pf->stat_offsets_loaded,
1086 &osd->priority_xon_rx[i],
1087 &nsd->priority_xon_rx[i]);
1088 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1089 pf->stat_offsets_loaded,
1090 &osd->priority_xon_tx[i],
1091 &nsd->priority_xon_tx[i]);
1092 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1093 pf->stat_offsets_loaded,
1094 &osd->priority_xoff_tx[i],
1095 &nsd->priority_xoff_tx[i]);
1096 i40e_stat_update32(hw,
1097 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1098 pf->stat_offsets_loaded,
1099 &osd->priority_xon_2_xoff[i],
1100 &nsd->priority_xon_2_xoff[i]);
1103 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
1104 I40E_GLPRT_PRC64L(hw->port),
1105 pf->stat_offsets_loaded,
1106 &osd->rx_size_64, &nsd->rx_size_64);
1107 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
1108 I40E_GLPRT_PRC127L(hw->port),
1109 pf->stat_offsets_loaded,
1110 &osd->rx_size_127, &nsd->rx_size_127);
1111 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1112 I40E_GLPRT_PRC255L(hw->port),
1113 pf->stat_offsets_loaded,
1114 &osd->rx_size_255, &nsd->rx_size_255);
1115 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1116 I40E_GLPRT_PRC511L(hw->port),
1117 pf->stat_offsets_loaded,
1118 &osd->rx_size_511, &nsd->rx_size_511);
1119 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1120 I40E_GLPRT_PRC1023L(hw->port),
1121 pf->stat_offsets_loaded,
1122 &osd->rx_size_1023, &nsd->rx_size_1023);
1123 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1124 I40E_GLPRT_PRC1522L(hw->port),
1125 pf->stat_offsets_loaded,
1126 &osd->rx_size_1522, &nsd->rx_size_1522);
1127 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1128 I40E_GLPRT_PRC9522L(hw->port),
1129 pf->stat_offsets_loaded,
1130 &osd->rx_size_big, &nsd->rx_size_big);
1132 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1133 I40E_GLPRT_PTC64L(hw->port),
1134 pf->stat_offsets_loaded,
1135 &osd->tx_size_64, &nsd->tx_size_64);
1136 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1137 I40E_GLPRT_PTC127L(hw->port),
1138 pf->stat_offsets_loaded,
1139 &osd->tx_size_127, &nsd->tx_size_127);
1140 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1141 I40E_GLPRT_PTC255L(hw->port),
1142 pf->stat_offsets_loaded,
1143 &osd->tx_size_255, &nsd->tx_size_255);
1144 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1145 I40E_GLPRT_PTC511L(hw->port),
1146 pf->stat_offsets_loaded,
1147 &osd->tx_size_511, &nsd->tx_size_511);
1148 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1149 I40E_GLPRT_PTC1023L(hw->port),
1150 pf->stat_offsets_loaded,
1151 &osd->tx_size_1023, &nsd->tx_size_1023);
1152 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1153 I40E_GLPRT_PTC1522L(hw->port),
1154 pf->stat_offsets_loaded,
1155 &osd->tx_size_1522, &nsd->tx_size_1522);
1156 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1157 I40E_GLPRT_PTC9522L(hw->port),
1158 pf->stat_offsets_loaded,
1159 &osd->tx_size_big, &nsd->tx_size_big);
1161 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1162 pf->stat_offsets_loaded,
1163 &osd->rx_undersize, &nsd->rx_undersize);
1164 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1165 pf->stat_offsets_loaded,
1166 &osd->rx_fragments, &nsd->rx_fragments);
1167 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1168 pf->stat_offsets_loaded,
1169 &osd->rx_oversize, &nsd->rx_oversize);
1170 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1171 pf->stat_offsets_loaded,
1172 &osd->rx_jabber, &nsd->rx_jabber);
1175 i40e_stat_update_and_clear32(hw,
1176 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
1177 &nsd->fd_atr_match);
1178 i40e_stat_update_and_clear32(hw,
1179 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
1181 i40e_stat_update_and_clear32(hw,
1182 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
1183 &nsd->fd_atr_tunnel_match);
1185 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1186 nsd->tx_lpi_status =
1187 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1188 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1189 nsd->rx_lpi_status =
1190 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1191 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1192 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1193 pf->stat_offsets_loaded,
1194 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1195 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1196 pf->stat_offsets_loaded,
1197 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1199 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1200 !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
1201 nsd->fd_sb_status = true;
1203 nsd->fd_sb_status = false;
1205 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1206 !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
1207 nsd->fd_atr_status = true;
1209 nsd->fd_atr_status = false;
1211 pf->stat_offsets_loaded = true;
1215 * i40e_update_stats - Update the various statistics counters.
1216 * @vsi: the VSI to be updated
1218 * Update the various stats for this VSI and its related entities.
1220 void i40e_update_stats(struct i40e_vsi *vsi)
1222 struct i40e_pf *pf = vsi->back;
1224 if (vsi == pf->vsi[pf->lan_vsi])
1225 i40e_update_pf_stats(pf);
1227 i40e_update_vsi_stats(vsi);
1231 * i40e_count_filters - counts VSI mac filters
1232 * @vsi: the VSI to be searched
1234 * Returns count of mac filters
1236 int i40e_count_filters(struct i40e_vsi *vsi)
1238 struct i40e_mac_filter *f;
1239 struct hlist_node *h;
1243 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
1250 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1251 * @vsi: the VSI to be searched
1252 * @macaddr: the MAC address
1255 * Returns ptr to the filter object or NULL
1257 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1258 const u8 *macaddr, s16 vlan)
1260 struct i40e_mac_filter *f;
1263 if (!vsi || !macaddr)
1266 key = i40e_addr_to_hkey(macaddr);
1267 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1268 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1276 * i40e_find_mac - Find a mac addr in the macvlan filters list
1277 * @vsi: the VSI to be searched
1278 * @macaddr: the MAC address we are searching for
1280 * Returns the first filter with the provided MAC address or NULL if
1281 * MAC address was not found
1283 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
1285 struct i40e_mac_filter *f;
1288 if (!vsi || !macaddr)
1291 key = i40e_addr_to_hkey(macaddr);
1292 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1293 if ((ether_addr_equal(macaddr, f->macaddr)))
1300 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1301 * @vsi: the VSI to be searched
1303 * Returns true if VSI is in vlan mode or false otherwise
1305 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1307 /* If we have a PVID, always operate in VLAN mode */
1311 /* We need to operate in VLAN mode whenever we have any filters with
1312 * a VLAN other than I40E_VLAN_ALL. We could check the table each
1313 * time, incurring search cost repeatedly. However, we can notice two
1316 * 1) the only place where we can gain a VLAN filter is in
1319 * 2) the only place where filters are actually removed is in
1320 * i40e_sync_filters_subtask.
1322 * Thus, we can simply use a boolean value, has_vlan_filters which we
1323 * will set to true when we add a VLAN filter in i40e_add_filter. Then
1324 * we have to perform the full search after deleting filters in
1325 * i40e_sync_filters_subtask, but we already have to search
1326 * filters here and can perform the check at the same time. This
1327 * results in avoiding embedding a loop for VLAN mode inside another
1328 * loop over all the filters, and should maintain correctness as noted
1331 return vsi->has_vlan_filter;
1335 * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
1336 * @vsi: the VSI to configure
1337 * @tmp_add_list: list of filters ready to be added
1338 * @tmp_del_list: list of filters ready to be deleted
1339 * @vlan_filters: the number of active VLAN filters
1341 * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
1342 * behave as expected. If we have any active VLAN filters remaining or about
1343 * to be added then we need to update non-VLAN filters to be marked as VLAN=0
1344 * so that they only match against untagged traffic. If we no longer have any
1345 * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
1346 * so that they match against both tagged and untagged traffic. In this way,
1347 * we ensure that we correctly receive the desired traffic. This ensures that
1348 * when we have an active VLAN we will receive only untagged traffic and
1349 * traffic matching active VLANs. If we have no active VLANs then we will
1350 * operate in non-VLAN mode and receive all traffic, tagged or untagged.
1352 * Finally, in a similar fashion, this function also corrects filters when
1353 * there is an active PVID assigned to this VSI.
1355 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1357 * This function is only expected to be called from within
1358 * i40e_sync_vsi_filters.
1360 * NOTE: This function expects to be called while under the
1361 * mac_filter_hash_lock
1363 static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
1364 struct hlist_head *tmp_add_list,
1365 struct hlist_head *tmp_del_list,
1368 s16 pvid = le16_to_cpu(vsi->info.pvid);
1369 struct i40e_mac_filter *f, *add_head;
1370 struct i40e_new_mac_filter *new;
1371 struct hlist_node *h;
1374 /* To determine if a particular filter needs to be replaced we
1375 * have the three following conditions:
1377 * a) if we have a PVID assigned, then all filters which are
1378 * not marked as VLAN=PVID must be replaced with filters that
1380 * b) otherwise, if we have any active VLANS, all filters
1381 * which are marked as VLAN=-1 must be replaced with
1382 * filters marked as VLAN=0
1383 * c) finally, if we do not have any active VLANS, all filters
1384 * which are marked as VLAN=0 must be replaced with filters
1388 /* Update the filters about to be added in place */
1389 hlist_for_each_entry(new, tmp_add_list, hlist) {
1390 if (pvid && new->f->vlan != pvid)
1391 new->f->vlan = pvid;
1392 else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
1394 else if (!vlan_filters && new->f->vlan == 0)
1395 new->f->vlan = I40E_VLAN_ANY;
1398 /* Update the remaining active filters */
1399 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1400 /* Combine the checks for whether a filter needs to be changed
1401 * and then determine the new VLAN inside the if block, in
1402 * order to avoid duplicating code for adding the new filter
1403 * then deleting the old filter.
1405 if ((pvid && f->vlan != pvid) ||
1406 (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1407 (!vlan_filters && f->vlan == 0)) {
1408 /* Determine the new vlan we will be adding */
1411 else if (vlan_filters)
1414 new_vlan = I40E_VLAN_ANY;
1416 /* Create the new filter */
1417 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1421 /* Create a temporary i40e_new_mac_filter */
1422 new = kzalloc(sizeof(*new), GFP_ATOMIC);
1427 new->state = add_head->state;
1429 /* Add the new filter to the tmp list */
1430 hlist_add_head(&new->hlist, tmp_add_list);
1432 /* Put the original filter into the delete list */
1433 f->state = I40E_FILTER_REMOVE;
1434 hash_del(&f->hlist);
1435 hlist_add_head(&f->hlist, tmp_del_list);
1439 vsi->has_vlan_filter = !!vlan_filters;
1445 * i40e_get_vf_new_vlan - Get new vlan id on a vf
1446 * @vsi: the vsi to configure
1447 * @new_mac: new mac filter to be added
1448 * @f: existing mac filter, replaced with new_mac->f if new_mac is not NULL
1449 * @vlan_filters: the number of active VLAN filters
1450 * @trusted: flag if the VF is trusted
1452 * Get new VLAN id based on current VLAN filters, trust, PVID
1453 * and vf-vlan-prune-disable flag.
1455 * Returns the value of the new vlan filter or
1456 * the old value if no new filter is needed.
1458 static s16 i40e_get_vf_new_vlan(struct i40e_vsi *vsi,
1459 struct i40e_new_mac_filter *new_mac,
1460 struct i40e_mac_filter *f,
1464 s16 pvid = le16_to_cpu(vsi->info.pvid);
1465 struct i40e_pf *pf = vsi->back;
1471 if (pvid && f->vlan != pvid)
1474 is_any = (trusted ||
1475 !(pf->flags & I40E_FLAG_VF_VLAN_PRUNING));
1477 if ((vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1478 (!is_any && !vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1479 (is_any && !vlan_filters && f->vlan == 0)) {
1481 return I40E_VLAN_ANY;
1490 * i40e_correct_vf_mac_vlan_filters - Correct non-VLAN VF filters if necessary
1491 * @vsi: the vsi to configure
1492 * @tmp_add_list: list of filters ready to be added
1493 * @tmp_del_list: list of filters ready to be deleted
1494 * @vlan_filters: the number of active VLAN filters
1495 * @trusted: flag if the VF is trusted
1497 * Correct VF VLAN filters based on current VLAN filters, trust, PVID
1498 * and vf-vlan-prune-disable flag.
1500 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1502 * This function is only expected to be called from within
1503 * i40e_sync_vsi_filters.
1505 * NOTE: This function expects to be called while under the
1506 * mac_filter_hash_lock
1508 static int i40e_correct_vf_mac_vlan_filters(struct i40e_vsi *vsi,
1509 struct hlist_head *tmp_add_list,
1510 struct hlist_head *tmp_del_list,
1514 struct i40e_mac_filter *f, *add_head;
1515 struct i40e_new_mac_filter *new_mac;
1516 struct hlist_node *h;
1519 hlist_for_each_entry(new_mac, tmp_add_list, hlist) {
1520 new_mac->f->vlan = i40e_get_vf_new_vlan(vsi, new_mac, NULL,
1521 vlan_filters, trusted);
1524 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1525 new_vlan = i40e_get_vf_new_vlan(vsi, NULL, f, vlan_filters,
1527 if (new_vlan != f->vlan) {
1528 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1531 /* Create a temporary i40e_new_mac_filter */
1532 new_mac = kzalloc(sizeof(*new_mac), GFP_ATOMIC);
1535 new_mac->f = add_head;
1536 new_mac->state = add_head->state;
1538 /* Add the new filter to the tmp list */
1539 hlist_add_head(&new_mac->hlist, tmp_add_list);
1541 /* Put the original filter into the delete list */
1542 f->state = I40E_FILTER_REMOVE;
1543 hash_del(&f->hlist);
1544 hlist_add_head(&f->hlist, tmp_del_list);
1548 vsi->has_vlan_filter = !!vlan_filters;
1553 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1554 * @vsi: the PF Main VSI - inappropriate for any other VSI
1555 * @macaddr: the MAC address
1557 * Remove whatever filter the firmware set up so the driver can manage
1558 * its own filtering intelligently.
1560 static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1562 struct i40e_aqc_remove_macvlan_element_data element;
1563 struct i40e_pf *pf = vsi->back;
1565 /* Only appropriate for the PF main VSI */
1566 if (vsi->type != I40E_VSI_MAIN)
1569 memset(&element, 0, sizeof(element));
1570 ether_addr_copy(element.mac_addr, macaddr);
1571 element.vlan_tag = 0;
1572 /* Ignore error returns, some firmware does it this way... */
1573 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1574 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1576 memset(&element, 0, sizeof(element));
1577 ether_addr_copy(element.mac_addr, macaddr);
1578 element.vlan_tag = 0;
1579 /* ...and some firmware does it this way. */
1580 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1581 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1582 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1586 * i40e_add_filter - Add a mac/vlan filter to the VSI
1587 * @vsi: the VSI to be searched
1588 * @macaddr: the MAC address
1591 * Returns ptr to the filter object or NULL when no memory available.
1593 * NOTE: This function is expected to be called with mac_filter_hash_lock
1596 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1597 const u8 *macaddr, s16 vlan)
1599 struct i40e_mac_filter *f;
1602 if (!vsi || !macaddr)
1605 f = i40e_find_filter(vsi, macaddr, vlan);
1607 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1611 /* Update the boolean indicating if we need to function in
1615 vsi->has_vlan_filter = true;
1617 ether_addr_copy(f->macaddr, macaddr);
1619 f->state = I40E_FILTER_NEW;
1620 INIT_HLIST_NODE(&f->hlist);
1622 key = i40e_addr_to_hkey(macaddr);
1623 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1625 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1626 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1629 /* If we're asked to add a filter that has been marked for removal, it
1630 * is safe to simply restore it to active state. __i40e_del_filter
1631 * will have simply deleted any filters which were previously marked
1632 * NEW or FAILED, so if it is currently marked REMOVE it must have
1633 * previously been ACTIVE. Since we haven't yet run the sync filters
1634 * task, just restore this filter to the ACTIVE state so that the
1635 * sync task leaves it in place
1637 if (f->state == I40E_FILTER_REMOVE)
1638 f->state = I40E_FILTER_ACTIVE;
1644 * __i40e_del_filter - Remove a specific filter from the VSI
1645 * @vsi: VSI to remove from
1646 * @f: the filter to remove from the list
1648 * This function should be called instead of i40e_del_filter only if you know
1649 * the exact filter you will remove already, such as via i40e_find_filter or
1652 * NOTE: This function is expected to be called with mac_filter_hash_lock
1654 * ANOTHER NOTE: This function MUST be called from within the context of
1655 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1656 * instead of list_for_each_entry().
1658 void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
1663 /* If the filter was never added to firmware then we can just delete it
1664 * directly and we don't want to set the status to remove or else an
1665 * admin queue command will unnecessarily fire.
1667 if ((f->state == I40E_FILTER_FAILED) ||
1668 (f->state == I40E_FILTER_NEW)) {
1669 hash_del(&f->hlist);
1672 f->state = I40E_FILTER_REMOVE;
1675 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1676 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1680 * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
1681 * @vsi: the VSI to be searched
1682 * @macaddr: the MAC address
1685 * NOTE: This function is expected to be called with mac_filter_hash_lock
1687 * ANOTHER NOTE: This function MUST be called from within the context of
1688 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1689 * instead of list_for_each_entry().
1691 void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1693 struct i40e_mac_filter *f;
1695 if (!vsi || !macaddr)
1698 f = i40e_find_filter(vsi, macaddr, vlan);
1699 __i40e_del_filter(vsi, f);
1703 * i40e_add_mac_filter - Add a MAC filter for all active VLANs
1704 * @vsi: the VSI to be searched
1705 * @macaddr: the mac address to be filtered
1707 * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
1708 * go through all the macvlan filters and add a macvlan filter for each
1709 * unique vlan that already exists. If a PVID has been assigned, instead only
1710 * add the macaddr to that VLAN.
1712 * Returns last filter added on success, else NULL
1714 struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
1717 struct i40e_mac_filter *f, *add = NULL;
1718 struct hlist_node *h;
1722 return i40e_add_filter(vsi, macaddr,
1723 le16_to_cpu(vsi->info.pvid));
1725 if (!i40e_is_vsi_in_vlan(vsi))
1726 return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
1728 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1729 if (f->state == I40E_FILTER_REMOVE)
1731 add = i40e_add_filter(vsi, macaddr, f->vlan);
1740 * i40e_del_mac_filter - Remove a MAC filter from all VLANs
1741 * @vsi: the VSI to be searched
1742 * @macaddr: the mac address to be removed
1744 * Removes a given MAC address from a VSI regardless of what VLAN it has been
1747 * Returns 0 for success, or error
1749 int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
1751 struct i40e_mac_filter *f;
1752 struct hlist_node *h;
1756 lockdep_assert_held(&vsi->mac_filter_hash_lock);
1757 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1758 if (ether_addr_equal(macaddr, f->macaddr)) {
1759 __i40e_del_filter(vsi, f);
1771 * i40e_set_mac - NDO callback to set mac address
1772 * @netdev: network interface device structure
1773 * @p: pointer to an address structure
1775 * Returns 0 on success, negative on failure
1777 static int i40e_set_mac(struct net_device *netdev, void *p)
1779 struct i40e_netdev_priv *np = netdev_priv(netdev);
1780 struct i40e_vsi *vsi = np->vsi;
1781 struct i40e_pf *pf = vsi->back;
1782 struct i40e_hw *hw = &pf->hw;
1783 struct sockaddr *addr = p;
1785 if (!is_valid_ether_addr(addr->sa_data))
1786 return -EADDRNOTAVAIL;
1788 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1789 netdev_info(netdev, "already using mac address %pM\n",
1794 if (test_bit(__I40E_DOWN, pf->state) ||
1795 test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
1796 return -EADDRNOTAVAIL;
1798 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1799 netdev_info(netdev, "returning to hw mac address %pM\n",
1802 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1804 /* Copy the address first, so that we avoid a possible race with
1806 * - Remove old address from MAC filter
1807 * - Copy new address
1808 * - Add new address to MAC filter
1810 spin_lock_bh(&vsi->mac_filter_hash_lock);
1811 i40e_del_mac_filter(vsi, netdev->dev_addr);
1812 eth_hw_addr_set(netdev, addr->sa_data);
1813 i40e_add_mac_filter(vsi, netdev->dev_addr);
1814 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1816 if (vsi->type == I40E_VSI_MAIN) {
1819 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
1820 addr->sa_data, NULL);
1822 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1823 i40e_stat_str(hw, ret),
1824 i40e_aq_str(hw, hw->aq.asq_last_status));
1827 /* schedule our worker thread which will take care of
1828 * applying the new filter changes
1830 i40e_service_event_schedule(pf);
1835 * i40e_config_rss_aq - Prepare for RSS using AQ commands
1836 * @vsi: vsi structure
1837 * @seed: RSS hash seed
1838 * @lut: pointer to lookup table of lut_size
1839 * @lut_size: size of the lookup table
1841 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
1842 u8 *lut, u16 lut_size)
1844 struct i40e_pf *pf = vsi->back;
1845 struct i40e_hw *hw = &pf->hw;
1849 struct i40e_aqc_get_set_rss_key_data *seed_dw =
1850 (struct i40e_aqc_get_set_rss_key_data *)seed;
1851 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
1853 dev_info(&pf->pdev->dev,
1854 "Cannot set RSS key, err %s aq_err %s\n",
1855 i40e_stat_str(hw, ret),
1856 i40e_aq_str(hw, hw->aq.asq_last_status));
1861 bool pf_lut = vsi->type == I40E_VSI_MAIN;
1863 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
1865 dev_info(&pf->pdev->dev,
1866 "Cannot set RSS lut, err %s aq_err %s\n",
1867 i40e_stat_str(hw, ret),
1868 i40e_aq_str(hw, hw->aq.asq_last_status));
1876 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
1877 * @vsi: VSI structure
1879 static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
1881 struct i40e_pf *pf = vsi->back;
1882 u8 seed[I40E_HKEY_ARRAY_SIZE];
1886 if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
1889 vsi->rss_size = min_t(int, pf->alloc_rss_size,
1890 vsi->num_queue_pairs);
1893 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1897 /* Use the user configured hash keys and lookup table if there is one,
1898 * otherwise use default
1900 if (vsi->rss_lut_user)
1901 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1903 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
1904 if (vsi->rss_hkey_user)
1905 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
1907 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
1908 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
1914 * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config
1915 * @vsi: the VSI being configured,
1916 * @ctxt: VSI context structure
1917 * @enabled_tc: number of traffic classes to enable
1919 * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
1921 static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi,
1922 struct i40e_vsi_context *ctxt,
1925 u16 qcount = 0, max_qcount, qmap, sections = 0;
1926 int i, override_q, pow, num_qps, ret;
1927 u8 netdev_tc = 0, offset = 0;
1929 if (vsi->type != I40E_VSI_MAIN)
1931 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1932 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1933 vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc;
1934 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1935 num_qps = vsi->mqprio_qopt.qopt.count[0];
1937 /* find the next higher power-of-2 of num queue pairs */
1938 pow = ilog2(num_qps);
1939 if (!is_power_of_2(num_qps))
1941 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1942 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1944 /* Setup queue offset/count for all TCs for given VSI */
1945 max_qcount = vsi->mqprio_qopt.qopt.count[0];
1946 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1947 /* See if the given TC is enabled for the given VSI */
1948 if (vsi->tc_config.enabled_tc & BIT(i)) {
1949 offset = vsi->mqprio_qopt.qopt.offset[i];
1950 qcount = vsi->mqprio_qopt.qopt.count[i];
1951 if (qcount > max_qcount)
1952 max_qcount = qcount;
1953 vsi->tc_config.tc_info[i].qoffset = offset;
1954 vsi->tc_config.tc_info[i].qcount = qcount;
1955 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1957 /* TC is not enabled so set the offset to
1958 * default queue and allocate one queue
1961 vsi->tc_config.tc_info[i].qoffset = 0;
1962 vsi->tc_config.tc_info[i].qcount = 1;
1963 vsi->tc_config.tc_info[i].netdev_tc = 0;
1967 /* Set actual Tx/Rx queue pairs */
1968 vsi->num_queue_pairs = offset + qcount;
1970 /* Setup queue TC[0].qmap for given VSI context */
1971 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
1972 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1973 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1974 ctxt->info.valid_sections |= cpu_to_le16(sections);
1976 /* Reconfigure RSS for main VSI with max queue count */
1977 vsi->rss_size = max_qcount;
1978 ret = i40e_vsi_config_rss(vsi);
1980 dev_info(&vsi->back->pdev->dev,
1981 "Failed to reconfig rss for num_queues (%u)\n",
1985 vsi->reconfig_rss = true;
1986 dev_dbg(&vsi->back->pdev->dev,
1987 "Reconfigured rss with num_queues (%u)\n", max_qcount);
1989 /* Find queue count available for channel VSIs and starting offset
1992 override_q = vsi->mqprio_qopt.qopt.count[0];
1993 if (override_q && override_q < vsi->num_queue_pairs) {
1994 vsi->cnt_q_avail = vsi->num_queue_pairs - override_q;
1995 vsi->next_base_queue = override_q;
2001 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
2002 * @vsi: the VSI being setup
2003 * @ctxt: VSI context structure
2004 * @enabled_tc: Enabled TCs bitmap
2005 * @is_add: True if called before Add VSI
2007 * Setup VSI queue mapping for enabled traffic classes.
2009 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
2010 struct i40e_vsi_context *ctxt,
2014 struct i40e_pf *pf = vsi->back;
2024 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2026 /* zero out queue mapping, it will get updated on the end of the function */
2027 memset(ctxt->info.queue_mapping, 0, sizeof(ctxt->info.queue_mapping));
2029 if (vsi->type == I40E_VSI_MAIN) {
2030 /* This code helps add more queue to the VSI if we have
2031 * more cores than RSS can support, the higher cores will
2032 * be served by ATR or other filters. Furthermore, the
2033 * non-zero req_queue_pairs says that user requested a new
2034 * queue count via ethtool's set_channels, so use this
2035 * value for queues distribution across traffic classes
2037 if (vsi->req_queue_pairs > 0)
2038 vsi->num_queue_pairs = vsi->req_queue_pairs;
2039 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
2040 vsi->num_queue_pairs = pf->num_lan_msix;
2043 /* Number of queues per enabled TC */
2044 if (vsi->type == I40E_VSI_MAIN ||
2045 (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs != 0))
2046 num_tc_qps = vsi->num_queue_pairs;
2048 num_tc_qps = vsi->alloc_queue_pairs;
2050 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
2051 /* Find numtc from enabled TC bitmap */
2052 for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2053 if (enabled_tc & BIT(i)) /* TC is enabled */
2057 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
2060 num_tc_qps = num_tc_qps / numtc;
2061 num_tc_qps = min_t(int, num_tc_qps,
2062 i40e_pf_get_max_q_per_tc(pf));
2065 vsi->tc_config.numtc = numtc;
2066 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
2068 /* Do not allow use more TC queue pairs than MSI-X vectors exist */
2069 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
2070 num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
2072 /* Setup queue offset/count for all TCs for given VSI */
2073 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2074 /* See if the given TC is enabled for the given VSI */
2075 if (vsi->tc_config.enabled_tc & BIT(i)) {
2079 switch (vsi->type) {
2081 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED |
2082 I40E_FLAG_FD_ATR_ENABLED)) ||
2083 vsi->tc_config.enabled_tc != 1) {
2084 qcount = min_t(int, pf->alloc_rss_size,
2090 case I40E_VSI_SRIOV:
2091 case I40E_VSI_VMDQ2:
2093 qcount = num_tc_qps;
2097 vsi->tc_config.tc_info[i].qoffset = offset;
2098 vsi->tc_config.tc_info[i].qcount = qcount;
2100 /* find the next higher power-of-2 of num queue pairs */
2103 while (num_qps && (BIT_ULL(pow) < qcount)) {
2108 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
2110 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2111 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
2115 /* TC is not enabled so set the offset to
2116 * default queue and allocate one queue
2119 vsi->tc_config.tc_info[i].qoffset = 0;
2120 vsi->tc_config.tc_info[i].qcount = 1;
2121 vsi->tc_config.tc_info[i].netdev_tc = 0;
2125 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
2127 /* Do not change previously set num_queue_pairs for PFs and VFs*/
2128 if ((vsi->type == I40E_VSI_MAIN && numtc != 1) ||
2129 (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs == 0) ||
2130 (vsi->type != I40E_VSI_MAIN && vsi->type != I40E_VSI_SRIOV))
2131 vsi->num_queue_pairs = offset;
2133 /* Scheduler section valid can only be set for ADD VSI */
2135 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
2137 ctxt->info.up_enable_bits = enabled_tc;
2139 if (vsi->type == I40E_VSI_SRIOV) {
2140 ctxt->info.mapping_flags |=
2141 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
2142 for (i = 0; i < vsi->num_queue_pairs; i++)
2143 ctxt->info.queue_mapping[i] =
2144 cpu_to_le16(vsi->base_queue + i);
2146 ctxt->info.mapping_flags |=
2147 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2148 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
2150 ctxt->info.valid_sections |= cpu_to_le16(sections);
2154 * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
2155 * @netdev: the netdevice
2156 * @addr: address to add
2158 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
2159 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
2161 static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
2163 struct i40e_netdev_priv *np = netdev_priv(netdev);
2164 struct i40e_vsi *vsi = np->vsi;
2166 if (i40e_add_mac_filter(vsi, addr))
2173 * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
2174 * @netdev: the netdevice
2175 * @addr: address to add
2177 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
2178 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
2180 static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
2182 struct i40e_netdev_priv *np = netdev_priv(netdev);
2183 struct i40e_vsi *vsi = np->vsi;
2185 /* Under some circumstances, we might receive a request to delete
2186 * our own device address from our uc list. Because we store the
2187 * device address in the VSI's MAC/VLAN filter list, we need to ignore
2188 * such requests and not delete our device address from this list.
2190 if (ether_addr_equal(addr, netdev->dev_addr))
2193 i40e_del_mac_filter(vsi, addr);
2199 * i40e_set_rx_mode - NDO callback to set the netdev filters
2200 * @netdev: network interface device structure
2202 static void i40e_set_rx_mode(struct net_device *netdev)
2204 struct i40e_netdev_priv *np = netdev_priv(netdev);
2205 struct i40e_vsi *vsi = np->vsi;
2207 spin_lock_bh(&vsi->mac_filter_hash_lock);
2209 __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
2210 __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
2212 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2214 /* check for other flag changes */
2215 if (vsi->current_netdev_flags != vsi->netdev->flags) {
2216 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2217 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
2222 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
2223 * @vsi: Pointer to VSI struct
2224 * @from: Pointer to list which contains MAC filter entries - changes to
2225 * those entries needs to be undone.
2227 * MAC filter entries from this list were slated for deletion.
2229 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
2230 struct hlist_head *from)
2232 struct i40e_mac_filter *f;
2233 struct hlist_node *h;
2235 hlist_for_each_entry_safe(f, h, from, hlist) {
2236 u64 key = i40e_addr_to_hkey(f->macaddr);
2238 /* Move the element back into MAC filter list*/
2239 hlist_del(&f->hlist);
2240 hash_add(vsi->mac_filter_hash, &f->hlist, key);
2245 * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
2246 * @vsi: Pointer to vsi struct
2247 * @from: Pointer to list which contains MAC filter entries - changes to
2248 * those entries needs to be undone.
2250 * MAC filter entries from this list were slated for addition.
2252 static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
2253 struct hlist_head *from)
2255 struct i40e_new_mac_filter *new;
2256 struct hlist_node *h;
2258 hlist_for_each_entry_safe(new, h, from, hlist) {
2259 /* We can simply free the wrapper structure */
2260 hlist_del(&new->hlist);
2261 netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
2267 * i40e_next_filter - Get the next non-broadcast filter from a list
2268 * @next: pointer to filter in list
2270 * Returns the next non-broadcast filter in the list. Required so that we
2271 * ignore broadcast filters within the list, since these are not handled via
2272 * the normal firmware update path.
2275 struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
2277 hlist_for_each_entry_continue(next, hlist) {
2278 if (!is_broadcast_ether_addr(next->f->macaddr))
2286 * i40e_update_filter_state - Update filter state based on return data
2288 * @count: Number of filters added
2289 * @add_list: return data from fw
2290 * @add_head: pointer to first filter in current batch
2292 * MAC filter entries from list were slated to be added to device. Returns
2293 * number of successful filters. Note that 0 does NOT mean success!
2296 i40e_update_filter_state(int count,
2297 struct i40e_aqc_add_macvlan_element_data *add_list,
2298 struct i40e_new_mac_filter *add_head)
2303 for (i = 0; i < count; i++) {
2304 /* Always check status of each filter. We don't need to check
2305 * the firmware return status because we pre-set the filter
2306 * status to I40E_AQC_MM_ERR_NO_RES when sending the filter
2307 * request to the adminq. Thus, if it no longer matches then
2308 * we know the filter is active.
2310 if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
2311 add_head->state = I40E_FILTER_FAILED;
2313 add_head->state = I40E_FILTER_ACTIVE;
2317 add_head = i40e_next_filter(add_head);
2326 * i40e_aqc_del_filters - Request firmware to delete a set of filters
2327 * @vsi: ptr to the VSI
2328 * @vsi_name: name to display in messages
2329 * @list: the list of filters to send to firmware
2330 * @num_del: the number of filters to delete
2331 * @retval: Set to -EIO on failure to delete
2333 * Send a request to firmware via AdminQ to delete a set of filters. Uses
2334 * *retval instead of a return value so that success does not force ret_val to
2335 * be set to 0. This ensures that a sequence of calls to this function
2336 * preserve the previous value of *retval on successful delete.
2339 void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
2340 struct i40e_aqc_remove_macvlan_element_data *list,
2341 int num_del, int *retval)
2343 struct i40e_hw *hw = &vsi->back->hw;
2344 enum i40e_admin_queue_err aq_status;
2347 aq_ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, list, num_del, NULL,
2350 /* Explicitly ignore and do not report when firmware returns ENOENT */
2351 if (aq_ret && !(aq_status == I40E_AQ_RC_ENOENT)) {
2353 dev_info(&vsi->back->pdev->dev,
2354 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
2355 vsi_name, i40e_stat_str(hw, aq_ret),
2356 i40e_aq_str(hw, aq_status));
2361 * i40e_aqc_add_filters - Request firmware to add a set of filters
2362 * @vsi: ptr to the VSI
2363 * @vsi_name: name to display in messages
2364 * @list: the list of filters to send to firmware
2365 * @add_head: Position in the add hlist
2366 * @num_add: the number of filters to add
2368 * Send a request to firmware via AdminQ to add a chunk of filters. Will set
2369 * __I40E_VSI_OVERFLOW_PROMISC bit in vsi->state if the firmware has run out of
2370 * space for more filters.
2373 void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
2374 struct i40e_aqc_add_macvlan_element_data *list,
2375 struct i40e_new_mac_filter *add_head,
2378 struct i40e_hw *hw = &vsi->back->hw;
2379 enum i40e_admin_queue_err aq_status;
2382 i40e_aq_add_macvlan_v2(hw, vsi->seid, list, num_add, NULL, &aq_status);
2383 fcnt = i40e_update_filter_state(num_add, list, add_head);
2385 if (fcnt != num_add) {
2386 if (vsi->type == I40E_VSI_MAIN) {
2387 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2388 dev_warn(&vsi->back->pdev->dev,
2389 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2390 i40e_aq_str(hw, aq_status), vsi_name);
2391 } else if (vsi->type == I40E_VSI_SRIOV ||
2392 vsi->type == I40E_VSI_VMDQ1 ||
2393 vsi->type == I40E_VSI_VMDQ2) {
2394 dev_warn(&vsi->back->pdev->dev,
2395 "Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
2396 i40e_aq_str(hw, aq_status), vsi_name,
2399 dev_warn(&vsi->back->pdev->dev,
2400 "Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
2401 i40e_aq_str(hw, aq_status), vsi_name,
2408 * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
2409 * @vsi: pointer to the VSI
2410 * @vsi_name: the VSI name
2413 * This function sets or clears the promiscuous broadcast flags for VLAN
2414 * filters in order to properly receive broadcast frames. Assumes that only
2415 * broadcast filters are passed.
2417 * Returns status indicating success or failure;
2420 i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
2421 struct i40e_mac_filter *f)
2423 bool enable = f->state == I40E_FILTER_NEW;
2424 struct i40e_hw *hw = &vsi->back->hw;
2427 if (f->vlan == I40E_VLAN_ANY) {
2428 aq_ret = i40e_aq_set_vsi_broadcast(hw,
2433 aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
2441 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2442 dev_warn(&vsi->back->pdev->dev,
2443 "Error %s, forcing overflow promiscuous on %s\n",
2444 i40e_aq_str(hw, hw->aq.asq_last_status),
2452 * i40e_set_promiscuous - set promiscuous mode
2453 * @pf: board private structure
2454 * @promisc: promisc on or off
2456 * There are different ways of setting promiscuous mode on a PF depending on
2457 * what state/environment we're in. This identifies and sets it appropriately.
2458 * Returns 0 on success.
2460 static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
2462 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
2463 struct i40e_hw *hw = &pf->hw;
2466 if (vsi->type == I40E_VSI_MAIN &&
2467 pf->lan_veb != I40E_NO_VEB &&
2468 !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2469 /* set defport ON for Main VSI instead of true promisc
2470 * this way we will get all unicast/multicast and VLAN
2471 * promisc behavior but will not get VF or VMDq traffic
2472 * replicated on the Main VSI.
2475 aq_ret = i40e_aq_set_default_vsi(hw,
2479 aq_ret = i40e_aq_clear_default_vsi(hw,
2483 dev_info(&pf->pdev->dev,
2484 "Set default VSI failed, err %s, aq_err %s\n",
2485 i40e_stat_str(hw, aq_ret),
2486 i40e_aq_str(hw, hw->aq.asq_last_status));
2489 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2495 dev_info(&pf->pdev->dev,
2496 "set unicast promisc failed, err %s, aq_err %s\n",
2497 i40e_stat_str(hw, aq_ret),
2498 i40e_aq_str(hw, hw->aq.asq_last_status));
2500 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2505 dev_info(&pf->pdev->dev,
2506 "set multicast promisc failed, err %s, aq_err %s\n",
2507 i40e_stat_str(hw, aq_ret),
2508 i40e_aq_str(hw, hw->aq.asq_last_status));
2513 pf->cur_promisc = promisc;
2519 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
2520 * @vsi: ptr to the VSI
2522 * Push any outstanding VSI filter changes through the AdminQ.
2524 * Returns 0 or error value
2526 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2528 struct hlist_head tmp_add_list, tmp_del_list;
2529 struct i40e_mac_filter *f;
2530 struct i40e_new_mac_filter *new, *add_head = NULL;
2531 struct i40e_hw *hw = &vsi->back->hw;
2532 bool old_overflow, new_overflow;
2533 unsigned int failed_filters = 0;
2534 unsigned int vlan_filters = 0;
2535 char vsi_name[16] = "PF";
2536 int filter_list_len = 0;
2537 i40e_status aq_ret = 0;
2538 u32 changed_flags = 0;
2539 struct hlist_node *h;
2548 /* empty array typed pointers, kcalloc later */
2549 struct i40e_aqc_add_macvlan_element_data *add_list;
2550 struct i40e_aqc_remove_macvlan_element_data *del_list;
2552 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
2553 usleep_range(1000, 2000);
2556 old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2559 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
2560 vsi->current_netdev_flags = vsi->netdev->flags;
2563 INIT_HLIST_HEAD(&tmp_add_list);
2564 INIT_HLIST_HEAD(&tmp_del_list);
2566 if (vsi->type == I40E_VSI_SRIOV)
2567 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
2568 else if (vsi->type != I40E_VSI_MAIN)
2569 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
2571 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
2572 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
2574 spin_lock_bh(&vsi->mac_filter_hash_lock);
2575 /* Create a list of filters to delete. */
2576 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2577 if (f->state == I40E_FILTER_REMOVE) {
2578 /* Move the element into temporary del_list */
2579 hash_del(&f->hlist);
2580 hlist_add_head(&f->hlist, &tmp_del_list);
2582 /* Avoid counting removed filters */
2585 if (f->state == I40E_FILTER_NEW) {
2586 /* Create a temporary i40e_new_mac_filter */
2587 new = kzalloc(sizeof(*new), GFP_ATOMIC);
2589 goto err_no_memory_locked;
2591 /* Store pointer to the real filter */
2593 new->state = f->state;
2595 /* Add it to the hash list */
2596 hlist_add_head(&new->hlist, &tmp_add_list);
2599 /* Count the number of active (current and new) VLAN
2600 * filters we have now. Does not count filters which
2601 * are marked for deletion.
2607 if (vsi->type != I40E_VSI_SRIOV)
2608 retval = i40e_correct_mac_vlan_filters
2609 (vsi, &tmp_add_list, &tmp_del_list,
2612 retval = i40e_correct_vf_mac_vlan_filters
2613 (vsi, &tmp_add_list, &tmp_del_list,
2614 vlan_filters, pf->vf[vsi->vf_id].trusted);
2616 hlist_for_each_entry(new, &tmp_add_list, hlist)
2617 netdev_hw_addr_refcnt(new->f, vsi->netdev, 1);
2620 goto err_no_memory_locked;
2622 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2625 /* Now process 'del_list' outside the lock */
2626 if (!hlist_empty(&tmp_del_list)) {
2627 filter_list_len = hw->aq.asq_buf_size /
2628 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2629 list_size = filter_list_len *
2630 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2631 del_list = kzalloc(list_size, GFP_ATOMIC);
2635 hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
2638 /* handle broadcast filters by updating the broadcast
2639 * promiscuous flag and release filter list.
2641 if (is_broadcast_ether_addr(f->macaddr)) {
2642 i40e_aqc_broadcast_filter(vsi, vsi_name, f);
2644 hlist_del(&f->hlist);
2649 /* add to delete list */
2650 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
2651 if (f->vlan == I40E_VLAN_ANY) {
2652 del_list[num_del].vlan_tag = 0;
2653 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2655 del_list[num_del].vlan_tag =
2656 cpu_to_le16((u16)(f->vlan));
2659 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2660 del_list[num_del].flags = cmd_flags;
2663 /* flush a full buffer */
2664 if (num_del == filter_list_len) {
2665 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2667 memset(del_list, 0, list_size);
2670 /* Release memory for MAC filter entries which were
2671 * synced up with HW.
2673 hlist_del(&f->hlist);
2678 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2686 if (!hlist_empty(&tmp_add_list)) {
2687 /* Do all the adds now. */
2688 filter_list_len = hw->aq.asq_buf_size /
2689 sizeof(struct i40e_aqc_add_macvlan_element_data);
2690 list_size = filter_list_len *
2691 sizeof(struct i40e_aqc_add_macvlan_element_data);
2692 add_list = kzalloc(list_size, GFP_ATOMIC);
2697 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2698 /* handle broadcast filters by updating the broadcast
2699 * promiscuous flag instead of adding a MAC filter.
2701 if (is_broadcast_ether_addr(new->f->macaddr)) {
2702 if (i40e_aqc_broadcast_filter(vsi, vsi_name,
2704 new->state = I40E_FILTER_FAILED;
2706 new->state = I40E_FILTER_ACTIVE;
2710 /* add to add array */
2714 ether_addr_copy(add_list[num_add].mac_addr,
2716 if (new->f->vlan == I40E_VLAN_ANY) {
2717 add_list[num_add].vlan_tag = 0;
2718 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2720 add_list[num_add].vlan_tag =
2721 cpu_to_le16((u16)(new->f->vlan));
2723 add_list[num_add].queue_number = 0;
2724 /* set invalid match method for later detection */
2725 add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
2726 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2727 add_list[num_add].flags = cpu_to_le16(cmd_flags);
2730 /* flush a full buffer */
2731 if (num_add == filter_list_len) {
2732 i40e_aqc_add_filters(vsi, vsi_name, add_list,
2734 memset(add_list, 0, list_size);
2739 i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
2742 /* Now move all of the filters from the temp add list back to
2745 spin_lock_bh(&vsi->mac_filter_hash_lock);
2746 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2747 /* Only update the state if we're still NEW */
2748 if (new->f->state == I40E_FILTER_NEW)
2749 new->f->state = new->state;
2750 hlist_del(&new->hlist);
2751 netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
2754 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2759 /* Determine the number of active and failed filters. */
2760 spin_lock_bh(&vsi->mac_filter_hash_lock);
2761 vsi->active_filters = 0;
2762 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
2763 if (f->state == I40E_FILTER_ACTIVE)
2764 vsi->active_filters++;
2765 else if (f->state == I40E_FILTER_FAILED)
2768 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2770 /* Check if we are able to exit overflow promiscuous mode. We can
2771 * safely exit if we didn't just enter, we no longer have any failed
2772 * filters, and we have reduced filters below the threshold value.
2774 if (old_overflow && !failed_filters &&
2775 vsi->active_filters < vsi->promisc_threshold) {
2776 dev_info(&pf->pdev->dev,
2777 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2779 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2780 vsi->promisc_threshold = 0;
2783 /* if the VF is not trusted do not do promisc */
2784 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
2785 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2789 new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2791 /* If we are entering overflow promiscuous, we need to calculate a new
2792 * threshold for when we are safe to exit
2794 if (!old_overflow && new_overflow)
2795 vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
2797 /* check for changes in promiscuous modes */
2798 if (changed_flags & IFF_ALLMULTI) {
2799 bool cur_multipromisc;
2801 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2802 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2807 retval = i40e_aq_rc_to_posix(aq_ret,
2808 hw->aq.asq_last_status);
2809 dev_info(&pf->pdev->dev,
2810 "set multi promisc failed on %s, err %s aq_err %s\n",
2812 i40e_stat_str(hw, aq_ret),
2813 i40e_aq_str(hw, hw->aq.asq_last_status));
2815 dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
2816 cur_multipromisc ? "entering" : "leaving");
2820 if ((changed_flags & IFF_PROMISC) || old_overflow != new_overflow) {
2823 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2825 aq_ret = i40e_set_promiscuous(pf, cur_promisc);
2827 retval = i40e_aq_rc_to_posix(aq_ret,
2828 hw->aq.asq_last_status);
2829 dev_info(&pf->pdev->dev,
2830 "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
2831 cur_promisc ? "on" : "off",
2833 i40e_stat_str(hw, aq_ret),
2834 i40e_aq_str(hw, hw->aq.asq_last_status));
2838 /* if something went wrong then set the changed flag so we try again */
2840 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2842 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2846 /* Restore elements on the temporary add and delete lists */
2847 spin_lock_bh(&vsi->mac_filter_hash_lock);
2848 err_no_memory_locked:
2849 i40e_undo_del_filter_entries(vsi, &tmp_del_list);
2850 i40e_undo_add_filter_entries(vsi, &tmp_add_list);
2851 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2853 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2854 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2859 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2860 * @pf: board private structure
2862 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2868 if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
2870 if (test_bit(__I40E_VF_DISABLE, pf->state)) {
2871 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
2875 for (v = 0; v < pf->num_alloc_vsi; v++) {
2877 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED) &&
2878 !test_bit(__I40E_VSI_RELEASING, pf->vsi[v]->state)) {
2879 int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2882 /* come back and try again later */
2883 set_bit(__I40E_MACVLAN_SYNC_PENDING,
2892 * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2895 static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
2897 if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
2898 return I40E_RXBUFFER_2048;
2900 return I40E_RXBUFFER_3072;
2904 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2905 * @netdev: network interface device structure
2906 * @new_mtu: new value for maximum frame size
2908 * Returns 0 on success, negative on failure
2910 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2912 struct i40e_netdev_priv *np = netdev_priv(netdev);
2913 struct i40e_vsi *vsi = np->vsi;
2914 struct i40e_pf *pf = vsi->back;
2916 if (i40e_enabled_xdp_vsi(vsi)) {
2917 int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2919 if (frame_size > i40e_max_xdp_frame_size(vsi))
2923 netdev_dbg(netdev, "changing MTU from %d to %d\n",
2924 netdev->mtu, new_mtu);
2925 netdev->mtu = new_mtu;
2926 if (netif_running(netdev))
2927 i40e_vsi_reinit_locked(vsi);
2928 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
2929 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
2934 * i40e_ioctl - Access the hwtstamp interface
2935 * @netdev: network interface device structure
2936 * @ifr: interface request data
2937 * @cmd: ioctl command
2939 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2941 struct i40e_netdev_priv *np = netdev_priv(netdev);
2942 struct i40e_pf *pf = np->vsi->back;
2946 return i40e_ptp_get_ts_config(pf, ifr);
2948 return i40e_ptp_set_ts_config(pf, ifr);
2955 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2956 * @vsi: the vsi being adjusted
2958 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2960 struct i40e_vsi_context ctxt;
2963 /* Don't modify stripping options if a port VLAN is active */
2967 if ((vsi->info.valid_sections &
2968 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2969 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2970 return; /* already enabled */
2972 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2973 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2974 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2976 ctxt.seid = vsi->seid;
2977 ctxt.info = vsi->info;
2978 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2980 dev_info(&vsi->back->pdev->dev,
2981 "update vlan stripping failed, err %s aq_err %s\n",
2982 i40e_stat_str(&vsi->back->hw, ret),
2983 i40e_aq_str(&vsi->back->hw,
2984 vsi->back->hw.aq.asq_last_status));
2989 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2990 * @vsi: the vsi being adjusted
2992 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2994 struct i40e_vsi_context ctxt;
2997 /* Don't modify stripping options if a port VLAN is active */
3001 if ((vsi->info.valid_sections &
3002 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
3003 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
3004 I40E_AQ_VSI_PVLAN_EMOD_MASK))
3005 return; /* already disabled */
3007 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
3008 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
3009 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
3011 ctxt.seid = vsi->seid;
3012 ctxt.info = vsi->info;
3013 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
3015 dev_info(&vsi->back->pdev->dev,
3016 "update vlan stripping failed, err %s aq_err %s\n",
3017 i40e_stat_str(&vsi->back->hw, ret),
3018 i40e_aq_str(&vsi->back->hw,
3019 vsi->back->hw.aq.asq_last_status));
3024 * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
3025 * @vsi: the vsi being configured
3026 * @vid: vlan id to be added (0 = untagged only , -1 = any)
3028 * This is a helper function for adding a new MAC/VLAN filter with the
3029 * specified VLAN for each existing MAC address already in the hash table.
3030 * This function does *not* perform any accounting to update filters based on
3033 * NOTE: this function expects to be called while under the
3034 * mac_filter_hash_lock
3036 int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
3038 struct i40e_mac_filter *f, *add_f;
3039 struct hlist_node *h;
3042 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
3043 /* If we're asked to add a filter that has been marked for
3044 * removal, it is safe to simply restore it to active state.
3045 * __i40e_del_filter will have simply deleted any filters which
3046 * were previously marked NEW or FAILED, so if it is currently
3047 * marked REMOVE it must have previously been ACTIVE. Since we
3048 * haven't yet run the sync filters task, just restore this
3049 * filter to the ACTIVE state so that the sync task leaves it
3052 if (f->state == I40E_FILTER_REMOVE && f->vlan == vid) {
3053 f->state = I40E_FILTER_ACTIVE;
3055 } else if (f->state == I40E_FILTER_REMOVE) {
3058 add_f = i40e_add_filter(vsi, f->macaddr, vid);
3060 dev_info(&vsi->back->pdev->dev,
3061 "Could not add vlan filter %d for %pM\n",
3071 * i40e_vsi_add_vlan - Add VSI membership for given VLAN
3072 * @vsi: the VSI being configured
3073 * @vid: VLAN id to be added
3075 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
3082 /* The network stack will attempt to add VID=0, with the intention to
3083 * receive priority tagged packets with a VLAN of 0. Our HW receives
3084 * these packets by default when configured to receive untagged
3085 * packets, so we don't need to add a filter for this case.
3086 * Additionally, HW interprets adding a VID=0 filter as meaning to
3087 * receive *only* tagged traffic and stops receiving untagged traffic.
3088 * Thus, we do not want to actually add a filter for VID=0
3093 /* Locked once because all functions invoked below iterates list*/
3094 spin_lock_bh(&vsi->mac_filter_hash_lock);
3095 err = i40e_add_vlan_all_mac(vsi, vid);
3096 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3100 /* schedule our worker thread which will take care of
3101 * applying the new filter changes
3103 i40e_service_event_schedule(vsi->back);
3108 * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
3109 * @vsi: the vsi being configured
3110 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
3112 * This function should be used to remove all VLAN filters which match the
3113 * given VID. It does not schedule the service event and does not take the
3114 * mac_filter_hash_lock so it may be combined with other operations under
3115 * a single invocation of the mac_filter_hash_lock.
3117 * NOTE: this function expects to be called while under the
3118 * mac_filter_hash_lock
3120 void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
3122 struct i40e_mac_filter *f;
3123 struct hlist_node *h;
3126 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
3128 __i40e_del_filter(vsi, f);
3133 * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
3134 * @vsi: the VSI being configured
3135 * @vid: VLAN id to be removed
3137 void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
3139 if (!vid || vsi->info.pvid)
3142 spin_lock_bh(&vsi->mac_filter_hash_lock);
3143 i40e_rm_vlan_all_mac(vsi, vid);
3144 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3146 /* schedule our worker thread which will take care of
3147 * applying the new filter changes
3149 i40e_service_event_schedule(vsi->back);
3153 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
3154 * @netdev: network interface to be adjusted
3155 * @proto: unused protocol value
3156 * @vid: vlan id to be added
3158 * net_device_ops implementation for adding vlan ids
3160 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
3161 __always_unused __be16 proto, u16 vid)
3163 struct i40e_netdev_priv *np = netdev_priv(netdev);
3164 struct i40e_vsi *vsi = np->vsi;
3167 if (vid >= VLAN_N_VID)
3170 ret = i40e_vsi_add_vlan(vsi, vid);
3172 set_bit(vid, vsi->active_vlans);
3178 * i40e_vlan_rx_add_vid_up - Add a vlan id filter to HW offload in UP path
3179 * @netdev: network interface to be adjusted
3180 * @proto: unused protocol value
3181 * @vid: vlan id to be added
3183 static void i40e_vlan_rx_add_vid_up(struct net_device *netdev,
3184 __always_unused __be16 proto, u16 vid)
3186 struct i40e_netdev_priv *np = netdev_priv(netdev);
3187 struct i40e_vsi *vsi = np->vsi;
3189 if (vid >= VLAN_N_VID)
3191 set_bit(vid, vsi->active_vlans);
3195 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
3196 * @netdev: network interface to be adjusted
3197 * @proto: unused protocol value
3198 * @vid: vlan id to be removed
3200 * net_device_ops implementation for removing vlan ids
3202 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
3203 __always_unused __be16 proto, u16 vid)
3205 struct i40e_netdev_priv *np = netdev_priv(netdev);
3206 struct i40e_vsi *vsi = np->vsi;
3208 /* return code is ignored as there is nothing a user
3209 * can do about failure to remove and a log message was
3210 * already printed from the other function
3212 i40e_vsi_kill_vlan(vsi, vid);
3214 clear_bit(vid, vsi->active_vlans);
3220 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
3221 * @vsi: the vsi being brought back up
3223 static void i40e_restore_vlan(struct i40e_vsi *vsi)
3230 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
3231 i40e_vlan_stripping_enable(vsi);
3233 i40e_vlan_stripping_disable(vsi);
3235 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
3236 i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q),
3241 * i40e_vsi_add_pvid - Add pvid for the VSI
3242 * @vsi: the vsi being adjusted
3243 * @vid: the vlan id to set as a PVID
3245 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
3247 struct i40e_vsi_context ctxt;
3250 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
3251 vsi->info.pvid = cpu_to_le16(vid);
3252 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
3253 I40E_AQ_VSI_PVLAN_INSERT_PVID |
3254 I40E_AQ_VSI_PVLAN_EMOD_STR;
3256 ctxt.seid = vsi->seid;
3257 ctxt.info = vsi->info;
3258 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
3260 dev_info(&vsi->back->pdev->dev,
3261 "add pvid failed, err %s aq_err %s\n",
3262 i40e_stat_str(&vsi->back->hw, ret),
3263 i40e_aq_str(&vsi->back->hw,
3264 vsi->back->hw.aq.asq_last_status));
3272 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
3273 * @vsi: the vsi being adjusted
3275 * Just use the vlan_rx_register() service to put it back to normal
3277 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
3281 i40e_vlan_stripping_disable(vsi);
3285 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
3286 * @vsi: ptr to the VSI
3288 * If this function returns with an error, then it's possible one or
3289 * more of the rings is populated (while the rest are not). It is the
3290 * callers duty to clean those orphaned rings.
3292 * Return 0 on success, negative on failure
3294 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
3298 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3299 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
3301 if (!i40e_enabled_xdp_vsi(vsi))
3304 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3305 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
3311 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
3312 * @vsi: ptr to the VSI
3314 * Free VSI's transmit software resources
3316 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
3320 if (vsi->tx_rings) {
3321 for (i = 0; i < vsi->num_queue_pairs; i++)
3322 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
3323 i40e_free_tx_resources(vsi->tx_rings[i]);
3326 if (vsi->xdp_rings) {
3327 for (i = 0; i < vsi->num_queue_pairs; i++)
3328 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
3329 i40e_free_tx_resources(vsi->xdp_rings[i]);
3334 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
3335 * @vsi: ptr to the VSI
3337 * If this function returns with an error, then it's possible one or
3338 * more of the rings is populated (while the rest are not). It is the
3339 * callers duty to clean those orphaned rings.
3341 * Return 0 on success, negative on failure
3343 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
3347 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3348 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
3353 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
3354 * @vsi: ptr to the VSI
3356 * Free all receive software resources
3358 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
3365 for (i = 0; i < vsi->num_queue_pairs; i++)
3366 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
3367 i40e_free_rx_resources(vsi->rx_rings[i]);
3371 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
3372 * @ring: The Tx ring to configure
3374 * This enables/disables XPS for a given Tx descriptor ring
3375 * based on the TCs enabled for the VSI that ring belongs to.
3377 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
3381 if (!ring->q_vector || !ring->netdev || ring->ch)
3384 /* We only initialize XPS once, so as not to overwrite user settings */
3385 if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
3388 cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
3389 netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
3394 * i40e_xsk_pool - Retrieve the AF_XDP buffer pool if XDP and ZC is enabled
3395 * @ring: The Tx or Rx ring
3397 * Returns the AF_XDP buffer pool or NULL.
3399 static struct xsk_buff_pool *i40e_xsk_pool(struct i40e_ring *ring)
3401 bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
3402 int qid = ring->queue_index;
3404 if (ring_is_xdp(ring))
3405 qid -= ring->vsi->alloc_queue_pairs;
3407 if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
3410 return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
3414 * i40e_configure_tx_ring - Configure a transmit ring context and rest
3415 * @ring: The Tx ring to configure
3417 * Configure the Tx descriptor ring in the HMC context.
3419 static int i40e_configure_tx_ring(struct i40e_ring *ring)
3421 struct i40e_vsi *vsi = ring->vsi;
3422 u16 pf_q = vsi->base_queue + ring->queue_index;
3423 struct i40e_hw *hw = &vsi->back->hw;
3424 struct i40e_hmc_obj_txq tx_ctx;
3425 i40e_status err = 0;
3428 if (ring_is_xdp(ring))
3429 ring->xsk_pool = i40e_xsk_pool(ring);
3431 /* some ATR related tx ring init */
3432 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
3433 ring->atr_sample_rate = vsi->back->atr_sample_rate;
3434 ring->atr_count = 0;
3436 ring->atr_sample_rate = 0;
3440 i40e_config_xps_tx_ring(ring);
3442 /* clear the context structure first */
3443 memset(&tx_ctx, 0, sizeof(tx_ctx));
3445 tx_ctx.new_context = 1;
3446 tx_ctx.base = (ring->dma / 128);
3447 tx_ctx.qlen = ring->count;
3448 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
3449 I40E_FLAG_FD_ATR_ENABLED));
3450 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
3451 /* FDIR VSI tx ring can still use RS bit and writebacks */
3452 if (vsi->type != I40E_VSI_FDIR)
3453 tx_ctx.head_wb_ena = 1;
3454 tx_ctx.head_wb_addr = ring->dma +
3455 (ring->count * sizeof(struct i40e_tx_desc));
3457 /* As part of VSI creation/update, FW allocates certain
3458 * Tx arbitration queue sets for each TC enabled for
3459 * the VSI. The FW returns the handles to these queue
3460 * sets as part of the response buffer to Add VSI,
3461 * Update VSI, etc. AQ commands. It is expected that
3462 * these queue set handles be associated with the Tx
3463 * queues by the driver as part of the TX queue context
3464 * initialization. This has to be done regardless of
3465 * DCB as by default everything is mapped to TC0.
3470 le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]);
3473 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
3475 tx_ctx.rdylist_act = 0;
3477 /* clear the context in the HMC */
3478 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
3480 dev_info(&vsi->back->pdev->dev,
3481 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
3482 ring->queue_index, pf_q, err);
3486 /* set the context in the HMC */
3487 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
3489 dev_info(&vsi->back->pdev->dev,
3490 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
3491 ring->queue_index, pf_q, err);
3495 /* Now associate this queue with this PCI function */
3497 if (ring->ch->type == I40E_VSI_VMDQ2)
3498 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3502 qtx_ctl |= (ring->ch->vsi_number <<
3503 I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3504 I40E_QTX_CTL_VFVM_INDX_MASK;
3506 if (vsi->type == I40E_VSI_VMDQ2) {
3507 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3508 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3509 I40E_QTX_CTL_VFVM_INDX_MASK;
3511 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
3515 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
3516 I40E_QTX_CTL_PF_INDX_MASK);
3517 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
3520 /* cache tail off for easier writes later */
3521 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
3527 * i40e_rx_offset - Return expected offset into page to access data
3528 * @rx_ring: Ring we are requesting offset of
3530 * Returns the offset value for ring into the data buffer.
3532 static unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
3534 return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
3538 * i40e_configure_rx_ring - Configure a receive ring context
3539 * @ring: The Rx ring to configure
3541 * Configure the Rx descriptor ring in the HMC context.
3543 static int i40e_configure_rx_ring(struct i40e_ring *ring)
3545 struct i40e_vsi *vsi = ring->vsi;
3546 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
3547 u16 pf_q = vsi->base_queue + ring->queue_index;
3548 struct i40e_hw *hw = &vsi->back->hw;
3549 struct i40e_hmc_obj_rxq rx_ctx;
3550 i40e_status err = 0;
3554 bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
3556 /* clear the context structure first */
3557 memset(&rx_ctx, 0, sizeof(rx_ctx));
3559 if (ring->vsi->type == I40E_VSI_MAIN)
3560 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
3563 ring->xsk_pool = i40e_xsk_pool(ring);
3564 if (ring->xsk_pool) {
3565 ret = i40e_alloc_rx_bi_zc(ring);
3569 xsk_pool_get_rx_frame_size(ring->xsk_pool);
3570 /* For AF_XDP ZC, we disallow packets to span on
3571 * multiple buffers, thus letting us skip that
3572 * handling in the fast-path.
3575 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3576 MEM_TYPE_XSK_BUFF_POOL,
3580 dev_info(&vsi->back->pdev->dev,
3581 "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
3585 ret = i40e_alloc_rx_bi(ring);
3588 ring->rx_buf_len = vsi->rx_buf_len;
3589 if (ring->vsi->type == I40E_VSI_MAIN) {
3590 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3591 MEM_TYPE_PAGE_SHARED,
3598 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
3599 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3601 rx_ctx.base = (ring->dma / 128);
3602 rx_ctx.qlen = ring->count;
3604 /* use 16 byte descriptors */
3607 /* descriptor type is always zero
3610 rx_ctx.hsplit_0 = 0;
3612 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
3613 if (hw->revision_id == 0)
3614 rx_ctx.lrxqthresh = 0;
3616 rx_ctx.lrxqthresh = 1;
3617 rx_ctx.crcstrip = 1;
3619 /* this controls whether VLAN is stripped from inner headers */
3621 /* set the prefena field to 1 because the manual says to */
3624 /* clear the context in the HMC */
3625 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
3627 dev_info(&vsi->back->pdev->dev,
3628 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3629 ring->queue_index, pf_q, err);
3633 /* set the context in the HMC */
3634 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
3636 dev_info(&vsi->back->pdev->dev,
3637 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3638 ring->queue_index, pf_q, err);
3642 /* configure Rx buffer alignment */
3643 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
3644 clear_ring_build_skb_enabled(ring);
3646 set_ring_build_skb_enabled(ring);
3648 ring->rx_offset = i40e_rx_offset(ring);
3650 /* cache tail for quicker writes, and clear the reg before use */
3651 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3652 writel(0, ring->tail);
3654 if (ring->xsk_pool) {
3655 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
3656 ok = i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring));
3658 ok = !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3661 /* Log this in case the user has forgotten to give the kernel
3662 * any buffers, even later in the application.
3664 dev_info(&vsi->back->pdev->dev,
3665 "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
3666 ring->xsk_pool ? "AF_XDP ZC enabled " : "",
3667 ring->queue_index, pf_q);
3674 * i40e_vsi_configure_tx - Configure the VSI for Tx
3675 * @vsi: VSI structure describing this set of rings and resources
3677 * Configure the Tx VSI for operation.
3679 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
3684 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3685 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
3687 if (err || !i40e_enabled_xdp_vsi(vsi))
3690 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3691 err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
3697 * i40e_vsi_configure_rx - Configure the VSI for Rx
3698 * @vsi: the VSI being configured
3700 * Configure the Rx VSI for operation.
3702 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3707 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
3708 vsi->max_frame = I40E_MAX_RXBUFFER;
3709 vsi->rx_buf_len = I40E_RXBUFFER_2048;
3710 #if (PAGE_SIZE < 8192)
3711 } else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
3712 (vsi->netdev->mtu <= ETH_DATA_LEN)) {
3713 vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3714 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3717 vsi->max_frame = I40E_MAX_RXBUFFER;
3718 vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
3722 /* set up individual rings */
3723 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3724 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3730 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3731 * @vsi: ptr to the VSI
3733 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3735 struct i40e_ring *tx_ring, *rx_ring;
3736 u16 qoffset, qcount;
3739 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3740 /* Reset the TC information */
3741 for (i = 0; i < vsi->num_queue_pairs; i++) {
3742 rx_ring = vsi->rx_rings[i];
3743 tx_ring = vsi->tx_rings[i];
3744 rx_ring->dcb_tc = 0;
3745 tx_ring->dcb_tc = 0;
3750 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3751 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3754 qoffset = vsi->tc_config.tc_info[n].qoffset;
3755 qcount = vsi->tc_config.tc_info[n].qcount;
3756 for (i = qoffset; i < (qoffset + qcount); i++) {
3757 rx_ring = vsi->rx_rings[i];
3758 tx_ring = vsi->tx_rings[i];
3759 rx_ring->dcb_tc = n;
3760 tx_ring->dcb_tc = n;
3766 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3767 * @vsi: ptr to the VSI
3769 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3772 i40e_set_rx_mode(vsi->netdev);
3776 * i40e_reset_fdir_filter_cnt - Reset flow director filter counters
3777 * @pf: Pointer to the targeted PF
3779 * Set all flow director counters to 0.
3781 static void i40e_reset_fdir_filter_cnt(struct i40e_pf *pf)
3783 pf->fd_tcp4_filter_cnt = 0;
3784 pf->fd_udp4_filter_cnt = 0;
3785 pf->fd_sctp4_filter_cnt = 0;
3786 pf->fd_ip4_filter_cnt = 0;
3787 pf->fd_tcp6_filter_cnt = 0;
3788 pf->fd_udp6_filter_cnt = 0;
3789 pf->fd_sctp6_filter_cnt = 0;
3790 pf->fd_ip6_filter_cnt = 0;
3794 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3795 * @vsi: Pointer to the targeted VSI
3797 * This function replays the hlist on the hw where all the SB Flow Director
3798 * filters were saved.
3800 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3802 struct i40e_fdir_filter *filter;
3803 struct i40e_pf *pf = vsi->back;
3804 struct hlist_node *node;
3806 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3809 /* Reset FDir counters as we're replaying all existing filters */
3810 i40e_reset_fdir_filter_cnt(pf);
3812 hlist_for_each_entry_safe(filter, node,
3813 &pf->fdir_filter_list, fdir_node) {
3814 i40e_add_del_fdir(vsi, filter, true);
3819 * i40e_vsi_configure - Set up the VSI for action
3820 * @vsi: the VSI being configured
3822 static int i40e_vsi_configure(struct i40e_vsi *vsi)
3826 i40e_set_vsi_rx_mode(vsi);
3827 i40e_restore_vlan(vsi);
3828 i40e_vsi_config_dcb_rings(vsi);
3829 err = i40e_vsi_configure_tx(vsi);
3831 err = i40e_vsi_configure_rx(vsi);
3837 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3838 * @vsi: the VSI being configured
3840 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3842 bool has_xdp = i40e_enabled_xdp_vsi(vsi);
3843 struct i40e_pf *pf = vsi->back;
3844 struct i40e_hw *hw = &pf->hw;
3849 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
3850 * and PFINT_LNKLSTn registers, e.g.:
3851 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
3853 qp = vsi->base_queue;
3854 vector = vsi->base_vector;
3855 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3856 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3858 q_vector->rx.next_update = jiffies + 1;
3859 q_vector->rx.target_itr =
3860 ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
3861 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3862 q_vector->rx.target_itr >> 1);
3863 q_vector->rx.current_itr = q_vector->rx.target_itr;
3865 q_vector->tx.next_update = jiffies + 1;
3866 q_vector->tx.target_itr =
3867 ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
3868 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3869 q_vector->tx.target_itr >> 1);
3870 q_vector->tx.current_itr = q_vector->tx.target_itr;
3872 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3873 i40e_intrl_usec_to_reg(vsi->int_rate_limit));
3875 /* Linked list for the queuepairs assigned to this vector */
3876 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3877 for (q = 0; q < q_vector->num_ringpairs; q++) {
3878 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
3881 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3882 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3883 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3884 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
3885 (I40E_QUEUE_TYPE_TX <<
3886 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3888 wr32(hw, I40E_QINT_RQCTL(qp), val);
3891 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3892 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3893 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3894 (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3895 (I40E_QUEUE_TYPE_TX <<
3896 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3898 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3901 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3902 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3903 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3904 ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3905 (I40E_QUEUE_TYPE_RX <<
3906 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3908 /* Terminate the linked list */
3909 if (q == (q_vector->num_ringpairs - 1))
3910 val |= (I40E_QUEUE_END_OF_LIST <<
3911 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3913 wr32(hw, I40E_QINT_TQCTL(qp), val);
3922 * i40e_enable_misc_int_causes - enable the non-queue interrupts
3923 * @pf: pointer to private device data structure
3925 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3927 struct i40e_hw *hw = &pf->hw;
3930 /* clear things first */
3931 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
3932 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
3934 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3935 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3936 I40E_PFINT_ICR0_ENA_GRST_MASK |
3937 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3938 I40E_PFINT_ICR0_ENA_GPIO_MASK |
3939 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3940 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3941 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3943 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3944 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3946 if (pf->flags & I40E_FLAG_PTP)
3947 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3949 wr32(hw, I40E_PFINT_ICR0_ENA, val);
3951 /* SW_ITR_IDX = 0, but don't change INTENA */
3952 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3953 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3955 /* OTHER_ITR_IDX = 0 */
3956 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3960 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3961 * @vsi: the VSI being configured
3963 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3965 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
3966 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3967 struct i40e_pf *pf = vsi->back;
3968 struct i40e_hw *hw = &pf->hw;
3971 /* set the ITR configuration */
3972 q_vector->rx.next_update = jiffies + 1;
3973 q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
3974 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1);
3975 q_vector->rx.current_itr = q_vector->rx.target_itr;
3976 q_vector->tx.next_update = jiffies + 1;
3977 q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
3978 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1);
3979 q_vector->tx.current_itr = q_vector->tx.target_itr;
3981 i40e_enable_misc_int_causes(pf);
3983 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3984 wr32(hw, I40E_PFINT_LNKLST0, 0);
3986 /* Associate the queue pair to the vector and enable the queue int */
3987 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3988 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3989 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3990 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3992 wr32(hw, I40E_QINT_RQCTL(0), val);
3994 if (i40e_enabled_xdp_vsi(vsi)) {
3995 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3996 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
3998 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
4000 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
4003 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4004 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
4005 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
4007 wr32(hw, I40E_QINT_TQCTL(0), val);
4012 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
4013 * @pf: board private structure
4015 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
4017 struct i40e_hw *hw = &pf->hw;
4019 wr32(hw, I40E_PFINT_DYN_CTL0,
4020 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
4025 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
4026 * @pf: board private structure
4028 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
4030 struct i40e_hw *hw = &pf->hw;
4033 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
4034 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4035 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
4037 wr32(hw, I40E_PFINT_DYN_CTL0, val);
4042 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
4043 * @irq: interrupt number
4044 * @data: pointer to a q_vector
4046 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
4048 struct i40e_q_vector *q_vector = data;
4050 if (!q_vector->tx.ring && !q_vector->rx.ring)
4053 napi_schedule_irqoff(&q_vector->napi);
4059 * i40e_irq_affinity_notify - Callback for affinity changes
4060 * @notify: context as to what irq was changed
4061 * @mask: the new affinity mask
4063 * This is a callback function used by the irq_set_affinity_notifier function
4064 * so that we may register to receive changes to the irq affinity masks.
4066 static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
4067 const cpumask_t *mask)
4069 struct i40e_q_vector *q_vector =
4070 container_of(notify, struct i40e_q_vector, affinity_notify);
4072 cpumask_copy(&q_vector->affinity_mask, mask);
4076 * i40e_irq_affinity_release - Callback for affinity notifier release
4077 * @ref: internal core kernel usage
4079 * This is a callback function used by the irq_set_affinity_notifier function
4080 * to inform the current notification subscriber that they will no longer
4081 * receive notifications.
4083 static void i40e_irq_affinity_release(struct kref *ref) {}
4086 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
4087 * @vsi: the VSI being configured
4088 * @basename: name for the vector
4090 * Allocates MSI-X vectors and requests interrupts from the kernel.
4092 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
4094 int q_vectors = vsi->num_q_vectors;
4095 struct i40e_pf *pf = vsi->back;
4096 int base = vsi->base_vector;
4103 for (vector = 0; vector < q_vectors; vector++) {
4104 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
4106 irq_num = pf->msix_entries[base + vector].vector;
4108 if (q_vector->tx.ring && q_vector->rx.ring) {
4109 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
4110 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
4112 } else if (q_vector->rx.ring) {
4113 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
4114 "%s-%s-%d", basename, "rx", rx_int_idx++);
4115 } else if (q_vector->tx.ring) {
4116 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
4117 "%s-%s-%d", basename, "tx", tx_int_idx++);
4119 /* skip this unused q_vector */
4122 err = request_irq(irq_num,
4128 dev_info(&pf->pdev->dev,
4129 "MSIX request_irq failed, error: %d\n", err);
4130 goto free_queue_irqs;
4133 /* register for affinity change notifications */
4134 q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
4135 q_vector->affinity_notify.release = i40e_irq_affinity_release;
4136 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
4137 /* Spread affinity hints out across online CPUs.
4139 * get_cpu_mask returns a static constant mask with
4140 * a permanent lifetime so it's ok to pass to
4141 * irq_update_affinity_hint without making a copy.
4143 cpu = cpumask_local_spread(q_vector->v_idx, -1);
4144 irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
4147 vsi->irqs_ready = true;
4153 irq_num = pf->msix_entries[base + vector].vector;
4154 irq_set_affinity_notifier(irq_num, NULL);
4155 irq_update_affinity_hint(irq_num, NULL);
4156 free_irq(irq_num, &vsi->q_vectors[vector]);
4162 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
4163 * @vsi: the VSI being un-configured
4165 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
4167 struct i40e_pf *pf = vsi->back;
4168 struct i40e_hw *hw = &pf->hw;
4169 int base = vsi->base_vector;
4172 /* disable interrupt causation from each queue */
4173 for (i = 0; i < vsi->num_queue_pairs; i++) {
4176 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
4177 val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
4178 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
4180 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
4181 val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
4182 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
4184 if (!i40e_enabled_xdp_vsi(vsi))
4186 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
4189 /* disable each interrupt */
4190 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4191 for (i = vsi->base_vector;
4192 i < (vsi->num_q_vectors + vsi->base_vector); i++)
4193 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
4196 for (i = 0; i < vsi->num_q_vectors; i++)
4197 synchronize_irq(pf->msix_entries[i + base].vector);
4199 /* Legacy and MSI mode - this stops all interrupt handling */
4200 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
4201 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
4203 synchronize_irq(pf->pdev->irq);
4208 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
4209 * @vsi: the VSI being configured
4211 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
4213 struct i40e_pf *pf = vsi->back;
4216 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4217 for (i = 0; i < vsi->num_q_vectors; i++)
4218 i40e_irq_dynamic_enable(vsi, i);
4220 i40e_irq_dynamic_enable_icr0(pf);
4223 i40e_flush(&pf->hw);
4228 * i40e_free_misc_vector - Free the vector that handles non-queue events
4229 * @pf: board private structure
4231 static void i40e_free_misc_vector(struct i40e_pf *pf)
4234 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
4235 i40e_flush(&pf->hw);
4237 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
4238 free_irq(pf->msix_entries[0].vector, pf);
4239 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
4244 * i40e_intr - MSI/Legacy and non-queue interrupt handler
4245 * @irq: interrupt number
4246 * @data: pointer to a q_vector
4248 * This is the handler used for all MSI/Legacy interrupts, and deals
4249 * with both queue and non-queue interrupts. This is also used in
4250 * MSIX mode to handle the non-queue interrupts.
4252 static irqreturn_t i40e_intr(int irq, void *data)
4254 struct i40e_pf *pf = (struct i40e_pf *)data;
4255 struct i40e_hw *hw = &pf->hw;
4256 irqreturn_t ret = IRQ_NONE;
4257 u32 icr0, icr0_remaining;
4260 icr0 = rd32(hw, I40E_PFINT_ICR0);
4261 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
4263 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
4264 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
4267 /* if interrupt but no bits showing, must be SWINT */
4268 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
4269 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
4272 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
4273 (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
4274 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
4275 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
4276 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
4279 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
4280 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
4281 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
4282 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
4284 /* We do not have a way to disarm Queue causes while leaving
4285 * interrupt enabled for all other causes, ideally
4286 * interrupt should be disabled while we are in NAPI but
4287 * this is not a performance path and napi_schedule()
4288 * can deal with rescheduling.
4290 if (!test_bit(__I40E_DOWN, pf->state))
4291 napi_schedule_irqoff(&q_vector->napi);
4294 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
4295 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4296 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
4297 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
4300 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
4301 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
4302 set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
4305 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
4306 /* disable any further VFLR event notifications */
4307 if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) {
4308 u32 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4310 reg &= ~I40E_PFINT_ICR0_VFLR_MASK;
4311 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4313 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
4314 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
4318 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
4319 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
4320 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
4321 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
4322 val = rd32(hw, I40E_GLGEN_RSTAT);
4323 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
4324 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
4325 if (val == I40E_RESET_CORER) {
4327 } else if (val == I40E_RESET_GLOBR) {
4329 } else if (val == I40E_RESET_EMPR) {
4331 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
4335 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
4336 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
4337 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
4338 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
4339 rd32(hw, I40E_PFHMC_ERRORINFO),
4340 rd32(hw, I40E_PFHMC_ERRORDATA));
4343 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
4344 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
4346 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_EVENT0_MASK)
4347 schedule_work(&pf->ptp_extts0_work);
4349 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK)
4350 i40e_ptp_tx_hwtstamp(pf);
4352 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
4355 /* If a critical error is pending we have no choice but to reset the
4357 * Report and mask out any remaining unexpected interrupts.
4359 icr0_remaining = icr0 & ena_mask;
4360 if (icr0_remaining) {
4361 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
4363 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
4364 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
4365 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
4366 dev_info(&pf->pdev->dev, "device will be reset\n");
4367 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
4368 i40e_service_event_schedule(pf);
4370 ena_mask &= ~icr0_remaining;
4375 /* re-enable interrupt causes */
4376 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
4377 if (!test_bit(__I40E_DOWN, pf->state) ||
4378 test_bit(__I40E_RECOVERY_MODE, pf->state)) {
4379 i40e_service_event_schedule(pf);
4380 i40e_irq_dynamic_enable_icr0(pf);
4387 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
4388 * @tx_ring: tx ring to clean
4389 * @budget: how many cleans we're allowed
4391 * Returns true if there's any budget left (e.g. the clean is finished)
4393 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
4395 struct i40e_vsi *vsi = tx_ring->vsi;
4396 u16 i = tx_ring->next_to_clean;
4397 struct i40e_tx_buffer *tx_buf;
4398 struct i40e_tx_desc *tx_desc;
4400 tx_buf = &tx_ring->tx_bi[i];
4401 tx_desc = I40E_TX_DESC(tx_ring, i);
4402 i -= tx_ring->count;
4405 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
4407 /* if next_to_watch is not set then there is no work pending */
4411 /* prevent any other reads prior to eop_desc */
4414 /* if the descriptor isn't done, no work yet to do */
4415 if (!(eop_desc->cmd_type_offset_bsz &
4416 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
4419 /* clear next_to_watch to prevent false hangs */
4420 tx_buf->next_to_watch = NULL;
4422 tx_desc->buffer_addr = 0;
4423 tx_desc->cmd_type_offset_bsz = 0;
4424 /* move past filter desc */
4429 i -= tx_ring->count;
4430 tx_buf = tx_ring->tx_bi;
4431 tx_desc = I40E_TX_DESC(tx_ring, 0);
4433 /* unmap skb header data */
4434 dma_unmap_single(tx_ring->dev,
4435 dma_unmap_addr(tx_buf, dma),
4436 dma_unmap_len(tx_buf, len),
4438 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
4439 kfree(tx_buf->raw_buf);
4441 tx_buf->raw_buf = NULL;
4442 tx_buf->tx_flags = 0;
4443 tx_buf->next_to_watch = NULL;
4444 dma_unmap_len_set(tx_buf, len, 0);
4445 tx_desc->buffer_addr = 0;
4446 tx_desc->cmd_type_offset_bsz = 0;
4448 /* move us past the eop_desc for start of next FD desc */
4453 i -= tx_ring->count;
4454 tx_buf = tx_ring->tx_bi;
4455 tx_desc = I40E_TX_DESC(tx_ring, 0);
4458 /* update budget accounting */
4460 } while (likely(budget));
4462 i += tx_ring->count;
4463 tx_ring->next_to_clean = i;
4465 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
4466 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
4472 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
4473 * @irq: interrupt number
4474 * @data: pointer to a q_vector
4476 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
4478 struct i40e_q_vector *q_vector = data;
4479 struct i40e_vsi *vsi;
4481 if (!q_vector->tx.ring)
4484 vsi = q_vector->tx.ring->vsi;
4485 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
4491 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
4492 * @vsi: the VSI being configured
4493 * @v_idx: vector index
4494 * @qp_idx: queue pair index
4496 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
4498 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4499 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
4500 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
4502 tx_ring->q_vector = q_vector;
4503 tx_ring->next = q_vector->tx.ring;
4504 q_vector->tx.ring = tx_ring;
4505 q_vector->tx.count++;
4507 /* Place XDP Tx ring in the same q_vector ring list as regular Tx */
4508 if (i40e_enabled_xdp_vsi(vsi)) {
4509 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
4511 xdp_ring->q_vector = q_vector;
4512 xdp_ring->next = q_vector->tx.ring;
4513 q_vector->tx.ring = xdp_ring;
4514 q_vector->tx.count++;
4517 rx_ring->q_vector = q_vector;
4518 rx_ring->next = q_vector->rx.ring;
4519 q_vector->rx.ring = rx_ring;
4520 q_vector->rx.count++;
4524 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
4525 * @vsi: the VSI being configured
4527 * This function maps descriptor rings to the queue-specific vectors
4528 * we were allotted through the MSI-X enabling code. Ideally, we'd have
4529 * one vector per queue pair, but on a constrained vector budget, we
4530 * group the queue pairs as "efficiently" as possible.
4532 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
4534 int qp_remaining = vsi->num_queue_pairs;
4535 int q_vectors = vsi->num_q_vectors;
4540 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
4541 * group them so there are multiple queues per vector.
4542 * It is also important to go through all the vectors available to be
4543 * sure that if we don't use all the vectors, that the remaining vectors
4544 * are cleared. This is especially important when decreasing the
4545 * number of queues in use.
4547 for (; v_start < q_vectors; v_start++) {
4548 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
4550 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
4552 q_vector->num_ringpairs = num_ringpairs;
4553 q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1;
4555 q_vector->rx.count = 0;
4556 q_vector->tx.count = 0;
4557 q_vector->rx.ring = NULL;
4558 q_vector->tx.ring = NULL;
4560 while (num_ringpairs--) {
4561 i40e_map_vector_to_qp(vsi, v_start, qp_idx);
4569 * i40e_vsi_request_irq - Request IRQ from the OS
4570 * @vsi: the VSI being configured
4571 * @basename: name for the vector
4573 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
4575 struct i40e_pf *pf = vsi->back;
4578 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4579 err = i40e_vsi_request_irq_msix(vsi, basename);
4580 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
4581 err = request_irq(pf->pdev->irq, i40e_intr, 0,
4584 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
4588 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
4593 #ifdef CONFIG_NET_POLL_CONTROLLER
4595 * i40e_netpoll - A Polling 'interrupt' handler
4596 * @netdev: network interface device structure
4598 * This is used by netconsole to send skbs without having to re-enable
4599 * interrupts. It's not called while the normal interrupt routine is executing.
4601 static void i40e_netpoll(struct net_device *netdev)
4603 struct i40e_netdev_priv *np = netdev_priv(netdev);
4604 struct i40e_vsi *vsi = np->vsi;
4605 struct i40e_pf *pf = vsi->back;
4608 /* if interface is down do nothing */
4609 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4612 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4613 for (i = 0; i < vsi->num_q_vectors; i++)
4614 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
4616 i40e_intr(pf->pdev->irq, netdev);
4621 #define I40E_QTX_ENA_WAIT_COUNT 50
4624 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
4625 * @pf: the PF being configured
4626 * @pf_q: the PF queue
4627 * @enable: enable or disable state of the queue
4629 * This routine will wait for the given Tx queue of the PF to reach the
4630 * enabled or disabled state.
4631 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4632 * multiple retries; else will return 0 in case of success.
4634 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4639 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4640 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
4641 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4644 usleep_range(10, 20);
4646 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4653 * i40e_control_tx_q - Start or stop a particular Tx queue
4654 * @pf: the PF structure
4655 * @pf_q: the PF queue to configure
4656 * @enable: start or stop the queue
4658 * This function enables or disables a single queue. Note that any delay
4659 * required after the operation is expected to be handled by the caller of
4662 static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
4664 struct i40e_hw *hw = &pf->hw;
4668 /* warn the TX unit of coming changes */
4669 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
4671 usleep_range(10, 20);
4673 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4674 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
4675 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
4676 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
4678 usleep_range(1000, 2000);
4681 /* Skip if the queue is already in the requested state */
4682 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4685 /* turn on/off the queue */
4687 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
4688 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4690 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4693 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
4697 * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
4699 * @pf: the PF structure
4700 * @pf_q: the PF queue to configure
4701 * @is_xdp: true if the queue is used for XDP
4702 * @enable: start or stop the queue
4704 int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
4705 bool is_xdp, bool enable)
4709 i40e_control_tx_q(pf, pf_q, enable);
4711 /* wait for the change to finish */
4712 ret = i40e_pf_txq_wait(pf, pf_q, enable);
4714 dev_info(&pf->pdev->dev,
4715 "VSI seid %d %sTx ring %d %sable timeout\n",
4716 seid, (is_xdp ? "XDP " : ""), pf_q,
4717 (enable ? "en" : "dis"));
4724 * i40e_vsi_enable_tx - Start a VSI's rings
4725 * @vsi: the VSI being configured
4727 static int i40e_vsi_enable_tx(struct i40e_vsi *vsi)
4729 struct i40e_pf *pf = vsi->back;
4730 int i, pf_q, ret = 0;
4732 pf_q = vsi->base_queue;
4733 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4734 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4736 false /*is xdp*/, true);
4740 if (!i40e_enabled_xdp_vsi(vsi))
4743 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4744 pf_q + vsi->alloc_queue_pairs,
4745 true /*is xdp*/, true);
4753 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4754 * @pf: the PF being configured
4755 * @pf_q: the PF queue
4756 * @enable: enable or disable state of the queue
4758 * This routine will wait for the given Rx queue of the PF to reach the
4759 * enabled or disabled state.
4760 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4761 * multiple retries; else will return 0 in case of success.
4763 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4768 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4769 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
4770 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4773 usleep_range(10, 20);
4775 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4782 * i40e_control_rx_q - Start or stop a particular Rx queue
4783 * @pf: the PF structure
4784 * @pf_q: the PF queue to configure
4785 * @enable: start or stop the queue
4787 * This function enables or disables a single queue. Note that
4788 * any delay required after the operation is expected to be
4789 * handled by the caller of this function.
4791 static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4793 struct i40e_hw *hw = &pf->hw;
4797 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4798 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
4799 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
4800 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
4802 usleep_range(1000, 2000);
4805 /* Skip if the queue is already in the requested state */
4806 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4809 /* turn on/off the queue */
4811 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4813 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4815 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
4819 * i40e_control_wait_rx_q
4820 * @pf: the PF structure
4821 * @pf_q: queue being configured
4822 * @enable: start or stop the rings
4824 * This function enables or disables a single queue along with waiting
4825 * for the change to finish. The caller of this function should handle
4826 * the delays needed in the case of disabling queues.
4828 int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4832 i40e_control_rx_q(pf, pf_q, enable);
4834 /* wait for the change to finish */
4835 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4843 * i40e_vsi_enable_rx - Start a VSI's rings
4844 * @vsi: the VSI being configured
4846 static int i40e_vsi_enable_rx(struct i40e_vsi *vsi)
4848 struct i40e_pf *pf = vsi->back;
4849 int i, pf_q, ret = 0;
4851 pf_q = vsi->base_queue;
4852 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4853 ret = i40e_control_wait_rx_q(pf, pf_q, true);
4855 dev_info(&pf->pdev->dev,
4856 "VSI seid %d Rx ring %d enable timeout\n",
4866 * i40e_vsi_start_rings - Start a VSI's rings
4867 * @vsi: the VSI being configured
4869 int i40e_vsi_start_rings(struct i40e_vsi *vsi)
4873 /* do rx first for enable and last for disable */
4874 ret = i40e_vsi_enable_rx(vsi);
4877 ret = i40e_vsi_enable_tx(vsi);
4882 #define I40E_DISABLE_TX_GAP_MSEC 50
4885 * i40e_vsi_stop_rings - Stop a VSI's rings
4886 * @vsi: the VSI being configured
4888 void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
4890 struct i40e_pf *pf = vsi->back;
4891 int pf_q, err, q_end;
4893 /* When port TX is suspended, don't wait */
4894 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
4895 return i40e_vsi_stop_rings_no_wait(vsi);
4897 q_end = vsi->base_queue + vsi->num_queue_pairs;
4898 for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
4899 i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false);
4901 for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) {
4902 err = i40e_control_wait_rx_q(pf, pf_q, false);
4904 dev_info(&pf->pdev->dev,
4905 "VSI seid %d Rx ring %d disable timeout\n",
4909 msleep(I40E_DISABLE_TX_GAP_MSEC);
4910 pf_q = vsi->base_queue;
4911 for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
4912 wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0);
4914 i40e_vsi_wait_queues_disabled(vsi);
4918 * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
4919 * @vsi: the VSI being shutdown
4921 * This function stops all the rings for a VSI but does not delay to verify
4922 * that rings have been disabled. It is expected that the caller is shutting
4923 * down multiple VSIs at once and will delay together for all the VSIs after
4924 * initiating the shutdown. This is particularly useful for shutting down lots
4925 * of VFs together. Otherwise, a large delay can be incurred while configuring
4926 * each VSI in serial.
4928 void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
4930 struct i40e_pf *pf = vsi->back;
4933 pf_q = vsi->base_queue;
4934 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4935 i40e_control_tx_q(pf, pf_q, false);
4936 i40e_control_rx_q(pf, pf_q, false);
4941 * i40e_vsi_free_irq - Free the irq association with the OS
4942 * @vsi: the VSI being configured
4944 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4946 struct i40e_pf *pf = vsi->back;
4947 struct i40e_hw *hw = &pf->hw;
4948 int base = vsi->base_vector;
4952 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4953 if (!vsi->q_vectors)
4956 if (!vsi->irqs_ready)
4959 vsi->irqs_ready = false;
4960 for (i = 0; i < vsi->num_q_vectors; i++) {
4965 irq_num = pf->msix_entries[vector].vector;
4967 /* free only the irqs that were actually requested */
4968 if (!vsi->q_vectors[i] ||
4969 !vsi->q_vectors[i]->num_ringpairs)
4972 /* clear the affinity notifier in the IRQ descriptor */
4973 irq_set_affinity_notifier(irq_num, NULL);
4974 /* remove our suggested affinity mask for this IRQ */
4975 irq_update_affinity_hint(irq_num, NULL);
4976 free_irq(irq_num, vsi->q_vectors[i]);
4978 /* Tear down the interrupt queue link list
4980 * We know that they come in pairs and always
4981 * the Rx first, then the Tx. To clear the
4982 * link list, stick the EOL value into the
4983 * next_q field of the registers.
4985 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4986 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4987 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4988 val |= I40E_QUEUE_END_OF_LIST
4989 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4990 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4992 while (qp != I40E_QUEUE_END_OF_LIST) {
4995 val = rd32(hw, I40E_QINT_RQCTL(qp));
4997 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4998 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4999 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
5000 I40E_QINT_RQCTL_INTEVENT_MASK);
5002 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
5003 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
5005 wr32(hw, I40E_QINT_RQCTL(qp), val);
5007 val = rd32(hw, I40E_QINT_TQCTL(qp));
5009 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
5010 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
5012 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
5013 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
5014 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
5015 I40E_QINT_TQCTL_INTEVENT_MASK);
5017 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
5018 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
5020 wr32(hw, I40E_QINT_TQCTL(qp), val);
5025 free_irq(pf->pdev->irq, pf);
5027 val = rd32(hw, I40E_PFINT_LNKLST0);
5028 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
5029 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
5030 val |= I40E_QUEUE_END_OF_LIST
5031 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
5032 wr32(hw, I40E_PFINT_LNKLST0, val);
5034 val = rd32(hw, I40E_QINT_RQCTL(qp));
5035 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
5036 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
5037 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
5038 I40E_QINT_RQCTL_INTEVENT_MASK);
5040 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
5041 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
5043 wr32(hw, I40E_QINT_RQCTL(qp), val);
5045 val = rd32(hw, I40E_QINT_TQCTL(qp));
5047 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
5048 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
5049 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
5050 I40E_QINT_TQCTL_INTEVENT_MASK);
5052 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
5053 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
5055 wr32(hw, I40E_QINT_TQCTL(qp), val);
5060 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
5061 * @vsi: the VSI being configured
5062 * @v_idx: Index of vector to be freed
5064 * This function frees the memory allocated to the q_vector. In addition if
5065 * NAPI is enabled it will delete any references to the NAPI struct prior
5066 * to freeing the q_vector.
5068 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
5070 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
5071 struct i40e_ring *ring;
5076 /* disassociate q_vector from rings */
5077 i40e_for_each_ring(ring, q_vector->tx)
5078 ring->q_vector = NULL;
5080 i40e_for_each_ring(ring, q_vector->rx)
5081 ring->q_vector = NULL;
5083 /* only VSI w/ an associated netdev is set up w/ NAPI */
5085 netif_napi_del(&q_vector->napi);
5087 vsi->q_vectors[v_idx] = NULL;
5089 kfree_rcu(q_vector, rcu);
5093 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
5094 * @vsi: the VSI being un-configured
5096 * This frees the memory allocated to the q_vectors and
5097 * deletes references to the NAPI struct.
5099 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
5103 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
5104 i40e_free_q_vector(vsi, v_idx);
5108 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
5109 * @pf: board private structure
5111 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
5113 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
5114 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
5115 pci_disable_msix(pf->pdev);
5116 kfree(pf->msix_entries);
5117 pf->msix_entries = NULL;
5118 kfree(pf->irq_pile);
5119 pf->irq_pile = NULL;
5120 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
5121 pci_disable_msi(pf->pdev);
5123 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
5127 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
5128 * @pf: board private structure
5130 * We go through and clear interrupt specific resources and reset the structure
5131 * to pre-load conditions
5133 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
5137 if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state))
5138 i40e_free_misc_vector(pf);
5140 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
5141 I40E_IWARP_IRQ_PILE_ID);
5143 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
5144 for (i = 0; i < pf->num_alloc_vsi; i++)
5146 i40e_vsi_free_q_vectors(pf->vsi[i]);
5147 i40e_reset_interrupt_capability(pf);
5151 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
5152 * @vsi: the VSI being configured
5154 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
5161 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
5162 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
5164 if (q_vector->rx.ring || q_vector->tx.ring)
5165 napi_enable(&q_vector->napi);
5170 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
5171 * @vsi: the VSI being configured
5173 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
5180 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
5181 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
5183 if (q_vector->rx.ring || q_vector->tx.ring)
5184 napi_disable(&q_vector->napi);
5189 * i40e_vsi_close - Shut down a VSI
5190 * @vsi: the vsi to be quelled
5192 static void i40e_vsi_close(struct i40e_vsi *vsi)
5194 struct i40e_pf *pf = vsi->back;
5195 if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
5197 i40e_vsi_free_irq(vsi);
5198 i40e_vsi_free_tx_resources(vsi);
5199 i40e_vsi_free_rx_resources(vsi);
5200 vsi->current_netdev_flags = 0;
5201 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
5202 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
5203 set_bit(__I40E_CLIENT_RESET, pf->state);
5207 * i40e_quiesce_vsi - Pause a given VSI
5208 * @vsi: the VSI being paused
5210 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
5212 if (test_bit(__I40E_VSI_DOWN, vsi->state))
5215 set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
5216 if (vsi->netdev && netif_running(vsi->netdev))
5217 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
5219 i40e_vsi_close(vsi);
5223 * i40e_unquiesce_vsi - Resume a given VSI
5224 * @vsi: the VSI being resumed
5226 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
5228 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
5231 if (vsi->netdev && netif_running(vsi->netdev))
5232 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
5234 i40e_vsi_open(vsi); /* this clears the DOWN bit */
5238 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
5241 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
5245 for (v = 0; v < pf->num_alloc_vsi; v++) {
5247 i40e_quiesce_vsi(pf->vsi[v]);
5252 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
5255 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
5259 for (v = 0; v < pf->num_alloc_vsi; v++) {
5261 i40e_unquiesce_vsi(pf->vsi[v]);
5266 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
5267 * @vsi: the VSI being configured
5269 * Wait until all queues on a given VSI have been disabled.
5271 int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
5273 struct i40e_pf *pf = vsi->back;
5276 pf_q = vsi->base_queue;
5277 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
5278 /* Check and wait for the Tx queue */
5279 ret = i40e_pf_txq_wait(pf, pf_q, false);
5281 dev_info(&pf->pdev->dev,
5282 "VSI seid %d Tx ring %d disable timeout\n",
5287 if (!i40e_enabled_xdp_vsi(vsi))
5290 /* Check and wait for the XDP Tx queue */
5291 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
5294 dev_info(&pf->pdev->dev,
5295 "VSI seid %d XDP Tx ring %d disable timeout\n",
5300 /* Check and wait for the Rx queue */
5301 ret = i40e_pf_rxq_wait(pf, pf_q, false);
5303 dev_info(&pf->pdev->dev,
5304 "VSI seid %d Rx ring %d disable timeout\n",
5313 #ifdef CONFIG_I40E_DCB
5315 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
5318 * This function waits for the queues to be in disabled state for all the
5319 * VSIs that are managed by this PF.
5321 static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
5325 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
5327 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
5339 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
5340 * @pf: pointer to PF
5342 * Get TC map for ISCSI PF type that will include iSCSI TC
5345 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
5347 struct i40e_dcb_app_priority_table app;
5348 struct i40e_hw *hw = &pf->hw;
5349 u8 enabled_tc = 1; /* TC0 is always enabled */
5351 /* Get the iSCSI APP TLV */
5352 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5354 for (i = 0; i < dcbcfg->numapps; i++) {
5355 app = dcbcfg->app[i];
5356 if (app.selector == I40E_APP_SEL_TCPIP &&
5357 app.protocolid == I40E_APP_PROTOID_ISCSI) {
5358 tc = dcbcfg->etscfg.prioritytable[app.priority];
5359 enabled_tc |= BIT(tc);
5368 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
5369 * @dcbcfg: the corresponding DCBx configuration structure
5371 * Return the number of TCs from given DCBx configuration
5373 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
5375 int i, tc_unused = 0;
5379 /* Scan the ETS Config Priority Table to find
5380 * traffic class enabled for a given priority
5381 * and create a bitmask of enabled TCs
5383 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
5384 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
5386 /* Now scan the bitmask to check for
5387 * contiguous TCs starting with TC0
5389 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5390 if (num_tc & BIT(i)) {
5394 pr_err("Non-contiguous TC - Disabling DCB\n");
5402 /* There is always at least TC0 */
5410 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
5411 * @dcbcfg: the corresponding DCBx configuration structure
5413 * Query the current DCB configuration and return the number of
5414 * traffic classes enabled from the given DCBX config
5416 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
5418 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
5422 for (i = 0; i < num_tc; i++)
5423 enabled_tc |= BIT(i);
5429 * i40e_mqprio_get_enabled_tc - Get enabled traffic classes
5430 * @pf: PF being queried
5432 * Query the current MQPRIO configuration and return the number of
5433 * traffic classes enabled.
5435 static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
5437 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5438 u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
5439 u8 enabled_tc = 1, i;
5441 for (i = 1; i < num_tc; i++)
5442 enabled_tc |= BIT(i);
5447 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
5448 * @pf: PF being queried
5450 * Return number of traffic classes enabled for the given PF
5452 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
5454 struct i40e_hw *hw = &pf->hw;
5455 u8 i, enabled_tc = 1;
5457 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5459 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5460 return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
5462 /* If neither MQPRIO nor DCB is enabled, then always use single TC */
5463 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5466 /* SFP mode will be enabled for all TCs on port */
5467 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5468 return i40e_dcb_get_num_tc(dcbcfg);
5470 /* MFP mode return count of enabled TCs for this PF */
5471 if (pf->hw.func_caps.iscsi)
5472 enabled_tc = i40e_get_iscsi_tc_map(pf);
5474 return 1; /* Only TC0 */
5476 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5477 if (enabled_tc & BIT(i))
5484 * i40e_pf_get_tc_map - Get bitmap for enabled traffic classes
5485 * @pf: PF being queried
5487 * Return a bitmap for enabled traffic classes for this PF.
5489 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
5491 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5492 return i40e_mqprio_get_enabled_tc(pf);
5494 /* If neither MQPRIO nor DCB is enabled for this PF then just return
5497 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5498 return I40E_DEFAULT_TRAFFIC_CLASS;
5500 /* SFP mode we want PF to be enabled for all TCs */
5501 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5502 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
5504 /* MFP enabled and iSCSI PF type */
5505 if (pf->hw.func_caps.iscsi)
5506 return i40e_get_iscsi_tc_map(pf);
5508 return I40E_DEFAULT_TRAFFIC_CLASS;
5512 * i40e_vsi_get_bw_info - Query VSI BW Information
5513 * @vsi: the VSI being queried
5515 * Returns 0 on success, negative value on failure
5517 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
5519 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
5520 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5521 struct i40e_pf *pf = vsi->back;
5522 struct i40e_hw *hw = &pf->hw;
5527 /* Get the VSI level BW configuration */
5528 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5530 dev_info(&pf->pdev->dev,
5531 "couldn't get PF vsi bw config, err %s aq_err %s\n",
5532 i40e_stat_str(&pf->hw, ret),
5533 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5537 /* Get the VSI level BW configuration per TC */
5538 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
5541 dev_info(&pf->pdev->dev,
5542 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
5543 i40e_stat_str(&pf->hw, ret),
5544 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5548 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
5549 dev_info(&pf->pdev->dev,
5550 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
5551 bw_config.tc_valid_bits,
5552 bw_ets_config.tc_valid_bits);
5553 /* Still continuing */
5556 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
5557 vsi->bw_max_quanta = bw_config.max_bw;
5558 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
5559 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
5560 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5561 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
5562 vsi->bw_ets_limit_credits[i] =
5563 le16_to_cpu(bw_ets_config.credits[i]);
5564 /* 3 bits out of 4 for each TC */
5565 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
5572 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
5573 * @vsi: the VSI being configured
5574 * @enabled_tc: TC bitmap
5575 * @bw_share: BW shared credits per TC
5577 * Returns 0 on success, negative value on failure
5579 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
5582 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5583 struct i40e_pf *pf = vsi->back;
5587 /* There is no need to reset BW when mqprio mode is on. */
5588 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5590 if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5591 ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
5593 dev_info(&pf->pdev->dev,
5594 "Failed to reset tx rate for vsi->seid %u\n",
5598 memset(&bw_data, 0, sizeof(bw_data));
5599 bw_data.tc_valid_bits = enabled_tc;
5600 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5601 bw_data.tc_bw_credits[i] = bw_share[i];
5603 ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
5605 dev_info(&pf->pdev->dev,
5606 "AQ command Config VSI BW allocation per TC failed = %d\n",
5607 pf->hw.aq.asq_last_status);
5611 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5612 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
5618 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
5619 * @vsi: the VSI being configured
5620 * @enabled_tc: TC map to be enabled
5623 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5625 struct net_device *netdev = vsi->netdev;
5626 struct i40e_pf *pf = vsi->back;
5627 struct i40e_hw *hw = &pf->hw;
5630 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5636 netdev_reset_tc(netdev);
5640 /* Set up actual enabled TCs on the VSI */
5641 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
5644 /* set per TC queues for the VSI */
5645 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5646 /* Only set TC queues for enabled tcs
5648 * e.g. For a VSI that has TC0 and TC3 enabled the
5649 * enabled_tc bitmap would be 0x00001001; the driver
5650 * will set the numtc for netdev as 2 that will be
5651 * referenced by the netdev layer as TC 0 and 1.
5653 if (vsi->tc_config.enabled_tc & BIT(i))
5654 netdev_set_tc_queue(netdev,
5655 vsi->tc_config.tc_info[i].netdev_tc,
5656 vsi->tc_config.tc_info[i].qcount,
5657 vsi->tc_config.tc_info[i].qoffset);
5660 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5663 /* Assign UP2TC map for the VSI */
5664 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
5665 /* Get the actual TC# for the UP */
5666 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
5667 /* Get the mapped netdev TC# for the UP */
5668 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
5669 netdev_set_prio_tc_map(netdev, i, netdev_tc);
5674 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
5675 * @vsi: the VSI being configured
5676 * @ctxt: the ctxt buffer returned from AQ VSI update param command
5678 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
5679 struct i40e_vsi_context *ctxt)
5681 /* copy just the sections touched not the entire info
5682 * since not all sections are valid as returned by
5685 vsi->info.mapping_flags = ctxt->info.mapping_flags;
5686 memcpy(&vsi->info.queue_mapping,
5687 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
5688 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
5689 sizeof(vsi->info.tc_mapping));
5693 * i40e_update_adq_vsi_queues - update queue mapping for ADq VSI
5694 * @vsi: the VSI being reconfigured
5695 * @vsi_offset: offset from main VF VSI
5697 int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset)
5699 struct i40e_vsi_context ctxt = {};
5705 return I40E_ERR_PARAM;
5709 ctxt.seid = vsi->seid;
5710 ctxt.pf_num = hw->pf_id;
5711 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id + vsi_offset;
5712 ctxt.uplink_seid = vsi->uplink_seid;
5713 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
5714 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5715 ctxt.info = vsi->info;
5717 i40e_vsi_setup_queue_map(vsi, &ctxt, vsi->tc_config.enabled_tc,
5719 if (vsi->reconfig_rss) {
5720 vsi->rss_size = min_t(int, pf->alloc_rss_size,
5721 vsi->num_queue_pairs);
5722 ret = i40e_vsi_config_rss(vsi);
5724 dev_info(&pf->pdev->dev, "Failed to reconfig rss for num_queues\n");
5727 vsi->reconfig_rss = false;
5730 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5732 dev_info(&pf->pdev->dev, "Update vsi config failed, err %s aq_err %s\n",
5733 i40e_stat_str(hw, ret),
5734 i40e_aq_str(hw, hw->aq.asq_last_status));
5737 /* update the local VSI info with updated queue map */
5738 i40e_vsi_update_queue_map(vsi, &ctxt);
5739 vsi->info.valid_sections = 0;
5745 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
5746 * @vsi: VSI to be configured
5747 * @enabled_tc: TC bitmap
5749 * This configures a particular VSI for TCs that are mapped to the
5750 * given TC bitmap. It uses default bandwidth share for TCs across
5751 * VSIs to configure TC for a particular VSI.
5754 * It is expected that the VSI queues have been quisced before calling
5757 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5759 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5760 struct i40e_pf *pf = vsi->back;
5761 struct i40e_hw *hw = &pf->hw;
5762 struct i40e_vsi_context ctxt;
5766 /* Check if enabled_tc is same as existing or new TCs */
5767 if (vsi->tc_config.enabled_tc == enabled_tc &&
5768 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
5771 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5772 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5773 if (enabled_tc & BIT(i))
5777 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5779 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5781 dev_info(&pf->pdev->dev,
5782 "Failed configuring TC map %d for VSI %d\n",
5783 enabled_tc, vsi->seid);
5784 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid,
5787 dev_info(&pf->pdev->dev,
5788 "Failed querying vsi bw info, err %s aq_err %s\n",
5789 i40e_stat_str(hw, ret),
5790 i40e_aq_str(hw, hw->aq.asq_last_status));
5793 if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
5794 u8 valid_tc = bw_config.tc_valid_bits & enabled_tc;
5797 valid_tc = bw_config.tc_valid_bits;
5798 /* Always enable TC0, no matter what */
5800 dev_info(&pf->pdev->dev,
5801 "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
5802 enabled_tc, bw_config.tc_valid_bits, valid_tc);
5803 enabled_tc = valid_tc;
5806 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5808 dev_err(&pf->pdev->dev,
5809 "Unable to configure TC map %d for VSI %d\n",
5810 enabled_tc, vsi->seid);
5815 /* Update Queue Pairs Mapping for currently enabled UPs */
5816 ctxt.seid = vsi->seid;
5817 ctxt.pf_num = vsi->back->hw.pf_id;
5819 ctxt.uplink_seid = vsi->uplink_seid;
5820 ctxt.info = vsi->info;
5821 if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) {
5822 ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
5826 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5829 /* On destroying the qdisc, reset vsi->rss_size, as number of enabled
5832 if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) {
5833 vsi->rss_size = min_t(int, vsi->back->alloc_rss_size,
5834 vsi->num_queue_pairs);
5835 ret = i40e_vsi_config_rss(vsi);
5837 dev_info(&vsi->back->pdev->dev,
5838 "Failed to reconfig rss for num_queues\n");
5841 vsi->reconfig_rss = false;
5843 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
5844 ctxt.info.valid_sections |=
5845 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
5846 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
5849 /* Update the VSI after updating the VSI queue-mapping
5852 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5854 dev_info(&pf->pdev->dev,
5855 "Update vsi tc config failed, err %s aq_err %s\n",
5856 i40e_stat_str(hw, ret),
5857 i40e_aq_str(hw, hw->aq.asq_last_status));
5860 /* update the local VSI info with updated queue map */
5861 i40e_vsi_update_queue_map(vsi, &ctxt);
5862 vsi->info.valid_sections = 0;
5864 /* Update current VSI BW information */
5865 ret = i40e_vsi_get_bw_info(vsi);
5867 dev_info(&pf->pdev->dev,
5868 "Failed updating vsi bw info, err %s aq_err %s\n",
5869 i40e_stat_str(hw, ret),
5870 i40e_aq_str(hw, hw->aq.asq_last_status));
5874 /* Update the netdev TC setup */
5875 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
5881 * i40e_get_link_speed - Returns link speed for the interface
5882 * @vsi: VSI to be configured
5885 static int i40e_get_link_speed(struct i40e_vsi *vsi)
5887 struct i40e_pf *pf = vsi->back;
5889 switch (pf->hw.phy.link_info.link_speed) {
5890 case I40E_LINK_SPEED_40GB:
5892 case I40E_LINK_SPEED_25GB:
5894 case I40E_LINK_SPEED_20GB:
5896 case I40E_LINK_SPEED_10GB:
5898 case I40E_LINK_SPEED_1GB:
5906 * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
5907 * @vsi: VSI to be configured
5908 * @seid: seid of the channel/VSI
5909 * @max_tx_rate: max TX rate to be configured as BW limit
5911 * Helper function to set BW limit for a given VSI
5913 int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
5915 struct i40e_pf *pf = vsi->back;
5920 speed = i40e_get_link_speed(vsi);
5921 if (max_tx_rate > speed) {
5922 dev_err(&pf->pdev->dev,
5923 "Invalid max tx rate %llu specified for VSI seid %d.",
5927 if (max_tx_rate && max_tx_rate < 50) {
5928 dev_warn(&pf->pdev->dev,
5929 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5933 /* Tx rate credits are in values of 50Mbps, 0 is disabled */
5934 credits = max_tx_rate;
5935 do_div(credits, I40E_BW_CREDIT_DIVISOR);
5936 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits,
5937 I40E_MAX_BW_INACTIVE_ACCUM, NULL);
5939 dev_err(&pf->pdev->dev,
5940 "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
5941 max_tx_rate, seid, i40e_stat_str(&pf->hw, ret),
5942 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5947 * i40e_remove_queue_channels - Remove queue channels for the TCs
5948 * @vsi: VSI to be configured
5950 * Remove queue channels for the TCs
5952 static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
5954 enum i40e_admin_queue_err last_aq_status;
5955 struct i40e_cloud_filter *cfilter;
5956 struct i40e_channel *ch, *ch_tmp;
5957 struct i40e_pf *pf = vsi->back;
5958 struct hlist_node *node;
5961 /* Reset rss size that was stored when reconfiguring rss for
5962 * channel VSIs with non-power-of-2 queue count.
5964 vsi->current_rss_size = 0;
5966 /* perform cleanup for channels if they exist */
5967 if (list_empty(&vsi->ch_list))
5970 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5971 struct i40e_vsi *p_vsi;
5973 list_del(&ch->list);
5974 p_vsi = ch->parent_vsi;
5975 if (!p_vsi || !ch->initialized) {
5979 /* Reset queue contexts */
5980 for (i = 0; i < ch->num_queue_pairs; i++) {
5981 struct i40e_ring *tx_ring, *rx_ring;
5984 pf_q = ch->base_queue + i;
5985 tx_ring = vsi->tx_rings[pf_q];
5988 rx_ring = vsi->rx_rings[pf_q];
5992 /* Reset BW configured for this VSI via mqprio */
5993 ret = i40e_set_bw_limit(vsi, ch->seid, 0);
5995 dev_info(&vsi->back->pdev->dev,
5996 "Failed to reset tx rate for ch->seid %u\n",
5999 /* delete cloud filters associated with this channel */
6000 hlist_for_each_entry_safe(cfilter, node,
6001 &pf->cloud_filter_list, cloud_node) {
6002 if (cfilter->seid != ch->seid)
6005 hash_del(&cfilter->cloud_node);
6006 if (cfilter->dst_port)
6007 ret = i40e_add_del_cloud_filter_big_buf(vsi,
6011 ret = i40e_add_del_cloud_filter(vsi, cfilter,
6013 last_aq_status = pf->hw.aq.asq_last_status;
6015 dev_info(&pf->pdev->dev,
6016 "Failed to delete cloud filter, err %s aq_err %s\n",
6017 i40e_stat_str(&pf->hw, ret),
6018 i40e_aq_str(&pf->hw, last_aq_status));
6022 /* delete VSI from FW */
6023 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
6026 dev_err(&vsi->back->pdev->dev,
6027 "unable to remove channel (%d) for parent VSI(%d)\n",
6028 ch->seid, p_vsi->seid);
6031 INIT_LIST_HEAD(&vsi->ch_list);
6035 * i40e_get_max_queues_for_channel
6036 * @vsi: ptr to VSI to which channels are associated with
6038 * Helper function which returns max value among the queue counts set on the
6039 * channels/TCs created.
6041 static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi)
6043 struct i40e_channel *ch, *ch_tmp;
6046 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
6047 if (!ch->initialized)
6049 if (ch->num_queue_pairs > max)
6050 max = ch->num_queue_pairs;
6057 * i40e_validate_num_queues - validate num_queues w.r.t channel
6058 * @pf: ptr to PF device
6059 * @num_queues: number of queues
6060 * @vsi: the parent VSI
6061 * @reconfig_rss: indicates should the RSS be reconfigured or not
6063 * This function validates number of queues in the context of new channel
6064 * which is being established and determines if RSS should be reconfigured
6065 * or not for parent VSI.
6067 static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
6068 struct i40e_vsi *vsi, bool *reconfig_rss)
6075 *reconfig_rss = false;
6076 if (vsi->current_rss_size) {
6077 if (num_queues > vsi->current_rss_size) {
6078 dev_dbg(&pf->pdev->dev,
6079 "Error: num_queues (%d) > vsi's current_size(%d)\n",
6080 num_queues, vsi->current_rss_size);
6082 } else if ((num_queues < vsi->current_rss_size) &&
6083 (!is_power_of_2(num_queues))) {
6084 dev_dbg(&pf->pdev->dev,
6085 "Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
6086 num_queues, vsi->current_rss_size);
6091 if (!is_power_of_2(num_queues)) {
6092 /* Find the max num_queues configured for channel if channel
6094 * if channel exist, then enforce 'num_queues' to be more than
6095 * max ever queues configured for channel.
6097 max_ch_queues = i40e_get_max_queues_for_channel(vsi);
6098 if (num_queues < max_ch_queues) {
6099 dev_dbg(&pf->pdev->dev,
6100 "Error: num_queues (%d) < max queues configured for channel(%d)\n",
6101 num_queues, max_ch_queues);
6104 *reconfig_rss = true;
6111 * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size
6112 * @vsi: the VSI being setup
6113 * @rss_size: size of RSS, accordingly LUT gets reprogrammed
6115 * This function reconfigures RSS by reprogramming LUTs using 'rss_size'
6117 static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
6119 struct i40e_pf *pf = vsi->back;
6120 u8 seed[I40E_HKEY_ARRAY_SIZE];
6121 struct i40e_hw *hw = &pf->hw;
6129 if (rss_size > vsi->rss_size)
6132 local_rss_size = min_t(int, vsi->rss_size, rss_size);
6133 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
6137 /* Ignoring user configured lut if there is one */
6138 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size);
6140 /* Use user configured hash key if there is one, otherwise
6143 if (vsi->rss_hkey_user)
6144 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
6146 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
6148 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
6150 dev_info(&pf->pdev->dev,
6151 "Cannot set RSS lut, err %s aq_err %s\n",
6152 i40e_stat_str(hw, ret),
6153 i40e_aq_str(hw, hw->aq.asq_last_status));
6159 /* Do the update w.r.t. storing rss_size */
6160 if (!vsi->orig_rss_size)
6161 vsi->orig_rss_size = vsi->rss_size;
6162 vsi->current_rss_size = local_rss_size;
6168 * i40e_channel_setup_queue_map - Setup a channel queue map
6169 * @pf: ptr to PF device
6170 * @ctxt: VSI context structure
6171 * @ch: ptr to channel structure
6173 * Setup queue map for a specific channel
6175 static void i40e_channel_setup_queue_map(struct i40e_pf *pf,
6176 struct i40e_vsi_context *ctxt,
6177 struct i40e_channel *ch)
6179 u16 qcount, qmap, sections = 0;
6183 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
6184 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
6186 qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix);
6187 ch->num_queue_pairs = qcount;
6189 /* find the next higher power-of-2 of num queue pairs */
6190 pow = ilog2(qcount);
6191 if (!is_power_of_2(qcount))
6194 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
6195 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
6197 /* Setup queue TC[0].qmap for given VSI context */
6198 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
6200 ctxt->info.up_enable_bits = 0x1; /* TC0 enabled */
6201 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
6202 ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue);
6203 ctxt->info.valid_sections |= cpu_to_le16(sections);
6207 * i40e_add_channel - add a channel by adding VSI
6208 * @pf: ptr to PF device
6209 * @uplink_seid: underlying HW switching element (VEB) ID
6210 * @ch: ptr to channel structure
6212 * Add a channel (VSI) using add_vsi and queue_map
6214 static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
6215 struct i40e_channel *ch)
6217 struct i40e_hw *hw = &pf->hw;
6218 struct i40e_vsi_context ctxt;
6219 u8 enabled_tc = 0x1; /* TC0 enabled */
6222 if (ch->type != I40E_VSI_VMDQ2) {
6223 dev_info(&pf->pdev->dev,
6224 "add new vsi failed, ch->type %d\n", ch->type);
6228 memset(&ctxt, 0, sizeof(ctxt));
6229 ctxt.pf_num = hw->pf_id;
6231 ctxt.uplink_seid = uplink_seid;
6232 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
6233 if (ch->type == I40E_VSI_VMDQ2)
6234 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
6236 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) {
6237 ctxt.info.valid_sections |=
6238 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6239 ctxt.info.switch_id =
6240 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6243 /* Set queue map for a given VSI context */
6244 i40e_channel_setup_queue_map(pf, &ctxt, ch);
6246 /* Now time to create VSI */
6247 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
6249 dev_info(&pf->pdev->dev,
6250 "add new vsi failed, err %s aq_err %s\n",
6251 i40e_stat_str(&pf->hw, ret),
6252 i40e_aq_str(&pf->hw,
6253 pf->hw.aq.asq_last_status));
6257 /* Success, update channel, set enabled_tc only if the channel
6260 ch->enabled_tc = !i40e_is_channel_macvlan(ch) && enabled_tc;
6261 ch->seid = ctxt.seid;
6262 ch->vsi_number = ctxt.vsi_number;
6263 ch->stat_counter_idx = le16_to_cpu(ctxt.info.stat_counter_idx);
6265 /* copy just the sections touched not the entire info
6266 * since not all sections are valid as returned by
6269 ch->info.mapping_flags = ctxt.info.mapping_flags;
6270 memcpy(&ch->info.queue_mapping,
6271 &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping));
6272 memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping,
6273 sizeof(ctxt.info.tc_mapping));
6278 static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
6281 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
6285 memset(&bw_data, 0, sizeof(bw_data));
6286 bw_data.tc_valid_bits = ch->enabled_tc;
6287 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
6288 bw_data.tc_bw_credits[i] = bw_share[i];
6290 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid,
6293 dev_info(&vsi->back->pdev->dev,
6294 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
6295 vsi->back->hw.aq.asq_last_status, ch->seid);
6299 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
6300 ch->info.qs_handle[i] = bw_data.qs_handles[i];
6306 * i40e_channel_config_tx_ring - config TX ring associated with new channel
6307 * @pf: ptr to PF device
6308 * @vsi: the VSI being setup
6309 * @ch: ptr to channel structure
6311 * Configure TX rings associated with channel (VSI) since queues are being
6314 static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
6315 struct i40e_vsi *vsi,
6316 struct i40e_channel *ch)
6320 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
6322 /* Enable ETS TCs with equal BW Share for now across all VSIs */
6323 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6324 if (ch->enabled_tc & BIT(i))
6328 /* configure BW for new VSI */
6329 ret = i40e_channel_config_bw(vsi, ch, bw_share);
6331 dev_info(&vsi->back->pdev->dev,
6332 "Failed configuring TC map %d for channel (seid %u)\n",
6333 ch->enabled_tc, ch->seid);
6337 for (i = 0; i < ch->num_queue_pairs; i++) {
6338 struct i40e_ring *tx_ring, *rx_ring;
6341 pf_q = ch->base_queue + i;
6343 /* Get to TX ring ptr of main VSI, for re-setup TX queue
6346 tx_ring = vsi->tx_rings[pf_q];
6349 /* Get the RX ring ptr */
6350 rx_ring = vsi->rx_rings[pf_q];
6358 * i40e_setup_hw_channel - setup new channel
6359 * @pf: ptr to PF device
6360 * @vsi: the VSI being setup
6361 * @ch: ptr to channel structure
6362 * @uplink_seid: underlying HW switching element (VEB) ID
6363 * @type: type of channel to be created (VMDq2/VF)
6365 * Setup new channel (VSI) based on specified type (VMDq2/VF)
6366 * and configures TX rings accordingly
6368 static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
6369 struct i40e_vsi *vsi,
6370 struct i40e_channel *ch,
6371 u16 uplink_seid, u8 type)
6375 ch->initialized = false;
6376 ch->base_queue = vsi->next_base_queue;
6379 /* Proceed with creation of channel (VMDq2) VSI */
6380 ret = i40e_add_channel(pf, uplink_seid, ch);
6382 dev_info(&pf->pdev->dev,
6383 "failed to add_channel using uplink_seid %u\n",
6388 /* Mark the successful creation of channel */
6389 ch->initialized = true;
6391 /* Reconfigure TX queues using QTX_CTL register */
6392 ret = i40e_channel_config_tx_ring(pf, vsi, ch);
6394 dev_info(&pf->pdev->dev,
6395 "failed to configure TX rings for channel %u\n",
6400 /* update 'next_base_queue' */
6401 vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs;
6402 dev_dbg(&pf->pdev->dev,
6403 "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
6404 ch->seid, ch->vsi_number, ch->stat_counter_idx,
6405 ch->num_queue_pairs,
6406 vsi->next_base_queue);
6411 * i40e_setup_channel - setup new channel using uplink element
6412 * @pf: ptr to PF device
6413 * @vsi: pointer to the VSI to set up the channel within
6414 * @ch: ptr to channel structure
6416 * Setup new channel (VSI) based on specified type (VMDq2/VF)
6417 * and uplink switching element (uplink_seid)
6419 static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
6420 struct i40e_channel *ch)
6426 if (vsi->type == I40E_VSI_MAIN) {
6427 vsi_type = I40E_VSI_VMDQ2;
6429 dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n",
6434 /* underlying switching element */
6435 seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6437 /* create channel (VSI), configure TX rings */
6438 ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type);
6440 dev_err(&pf->pdev->dev, "failed to setup hw_channel\n");
6444 return ch->initialized ? true : false;
6448 * i40e_validate_and_set_switch_mode - sets up switch mode correctly
6449 * @vsi: ptr to VSI which has PF backing
6451 * Sets up switch mode correctly if it needs to be changed and perform
6452 * what are allowed modes.
6454 static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
6457 struct i40e_pf *pf = vsi->back;
6458 struct i40e_hw *hw = &pf->hw;
6461 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities);
6465 if (hw->dev_caps.switch_mode) {
6466 /* if switch mode is set, support mode2 (non-tunneled for
6467 * cloud filter) for now
6469 u32 switch_mode = hw->dev_caps.switch_mode &
6470 I40E_SWITCH_MODE_MASK;
6471 if (switch_mode >= I40E_CLOUD_FILTER_MODE1) {
6472 if (switch_mode == I40E_CLOUD_FILTER_MODE2)
6474 dev_err(&pf->pdev->dev,
6475 "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
6476 hw->dev_caps.switch_mode);
6481 /* Set Bit 7 to be valid */
6482 mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
6484 /* Set L4type for TCP support */
6485 mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
6487 /* Set cloud filter mode */
6488 mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
6490 /* Prep mode field for set_switch_config */
6491 ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags,
6492 pf->last_sw_conf_valid_flags,
6494 if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
6495 dev_err(&pf->pdev->dev,
6496 "couldn't set switch config bits, err %s aq_err %s\n",
6497 i40e_stat_str(hw, ret),
6499 hw->aq.asq_last_status));
6505 * i40e_create_queue_channel - function to create channel
6506 * @vsi: VSI to be configured
6507 * @ch: ptr to channel (it contains channel specific params)
6509 * This function creates channel (VSI) using num_queues specified by user,
6510 * reconfigs RSS if needed.
6512 int i40e_create_queue_channel(struct i40e_vsi *vsi,
6513 struct i40e_channel *ch)
6515 struct i40e_pf *pf = vsi->back;
6522 if (!ch->num_queue_pairs) {
6523 dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n",
6524 ch->num_queue_pairs);
6528 /* validate user requested num_queues for channel */
6529 err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi,
6532 dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n",
6533 ch->num_queue_pairs);
6537 /* By default we are in VEPA mode, if this is the first VF/VMDq
6538 * VSI to be added switch to VEB mode.
6541 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
6542 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
6544 if (vsi->type == I40E_VSI_MAIN) {
6545 if (pf->flags & I40E_FLAG_TC_MQPRIO)
6546 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
6548 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
6550 /* now onwards for main VSI, number of queues will be value
6551 * of TC0's queue count
6555 /* By this time, vsi->cnt_q_avail shall be set to non-zero and
6556 * it should be more than num_queues
6558 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) {
6559 dev_dbg(&pf->pdev->dev,
6560 "Error: cnt_q_avail (%u) less than num_queues %d\n",
6561 vsi->cnt_q_avail, ch->num_queue_pairs);
6565 /* reconfig_rss only if vsi type is MAIN_VSI */
6566 if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) {
6567 err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs);
6569 dev_info(&pf->pdev->dev,
6570 "Error: unable to reconfig rss for num_queues (%u)\n",
6571 ch->num_queue_pairs);
6576 if (!i40e_setup_channel(pf, vsi, ch)) {
6577 dev_info(&pf->pdev->dev, "Failed to setup channel\n");
6581 dev_info(&pf->pdev->dev,
6582 "Setup channel (id:%u) utilizing num_queues %d\n",
6583 ch->seid, ch->num_queue_pairs);
6585 /* configure VSI for BW limit */
6586 if (ch->max_tx_rate) {
6587 u64 credits = ch->max_tx_rate;
6589 if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate))
6592 do_div(credits, I40E_BW_CREDIT_DIVISOR);
6593 dev_dbg(&pf->pdev->dev,
6594 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6600 /* in case of VF, this will be main SRIOV VSI */
6601 ch->parent_vsi = vsi;
6603 /* and update main_vsi's count for queue_available to use */
6604 vsi->cnt_q_avail -= ch->num_queue_pairs;
6610 * i40e_configure_queue_channels - Add queue channel for the given TCs
6611 * @vsi: VSI to be configured
6613 * Configures queue channel mapping to the given TCs
6615 static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
6617 struct i40e_channel *ch;
6621 /* Create app vsi with the TCs. Main VSI with TC0 is already set up */
6622 vsi->tc_seid_map[0] = vsi->seid;
6623 for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6624 if (vsi->tc_config.enabled_tc & BIT(i)) {
6625 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
6631 INIT_LIST_HEAD(&ch->list);
6632 ch->num_queue_pairs =
6633 vsi->tc_config.tc_info[i].qcount;
6635 vsi->tc_config.tc_info[i].qoffset;
6637 /* Bandwidth limit through tc interface is in bytes/s,
6640 max_rate = vsi->mqprio_qopt.max_rate[i];
6641 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6642 ch->max_tx_rate = max_rate;
6644 list_add_tail(&ch->list, &vsi->ch_list);
6646 ret = i40e_create_queue_channel(vsi, ch);
6648 dev_err(&vsi->back->pdev->dev,
6649 "Failed creating queue channel with TC%d: queues %d\n",
6650 i, ch->num_queue_pairs);
6653 vsi->tc_seid_map[i] = ch->seid;
6659 i40e_remove_queue_channels(vsi);
6664 * i40e_veb_config_tc - Configure TCs for given VEB
6666 * @enabled_tc: TC bitmap
6668 * Configures given TC bitmap for VEB (switching) element
6670 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
6672 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
6673 struct i40e_pf *pf = veb->pf;
6677 /* No TCs or already enabled TCs just return */
6678 if (!enabled_tc || veb->enabled_tc == enabled_tc)
6681 bw_data.tc_valid_bits = enabled_tc;
6682 /* bw_data.absolute_credits is not set (relative) */
6684 /* Enable ETS TCs with equal BW Share for now */
6685 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6686 if (enabled_tc & BIT(i))
6687 bw_data.tc_bw_share_credits[i] = 1;
6690 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
6693 dev_info(&pf->pdev->dev,
6694 "VEB bw config failed, err %s aq_err %s\n",
6695 i40e_stat_str(&pf->hw, ret),
6696 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6700 /* Update the BW information */
6701 ret = i40e_veb_get_bw_info(veb);
6703 dev_info(&pf->pdev->dev,
6704 "Failed getting veb bw config, err %s aq_err %s\n",
6705 i40e_stat_str(&pf->hw, ret),
6706 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6713 #ifdef CONFIG_I40E_DCB
6715 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
6718 * Reconfigure VEB/VSIs on a given PF; it is assumed that
6719 * the caller would've quiesce all the VSIs before calling
6722 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
6728 /* Enable the TCs available on PF to all VEBs */
6729 tc_map = i40e_pf_get_tc_map(pf);
6730 if (tc_map == I40E_DEFAULT_TRAFFIC_CLASS)
6733 for (v = 0; v < I40E_MAX_VEB; v++) {
6736 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
6738 dev_info(&pf->pdev->dev,
6739 "Failed configuring TC for VEB seid=%d\n",
6741 /* Will try to configure as many components */
6745 /* Update each VSI */
6746 for (v = 0; v < pf->num_alloc_vsi; v++) {
6750 /* - Enable all TCs for the LAN VSI
6751 * - For all others keep them at TC0 for now
6753 if (v == pf->lan_vsi)
6754 tc_map = i40e_pf_get_tc_map(pf);
6756 tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
6758 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
6760 dev_info(&pf->pdev->dev,
6761 "Failed configuring TC for VSI seid=%d\n",
6763 /* Will try to configure as many components */
6765 /* Re-configure VSI vectors based on updated TC map */
6766 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
6767 if (pf->vsi[v]->netdev)
6768 i40e_dcbnl_set_all(pf->vsi[v]);
6774 * i40e_resume_port_tx - Resume port Tx
6777 * Resume a port's Tx and issue a PF reset in case of failure to
6780 static int i40e_resume_port_tx(struct i40e_pf *pf)
6782 struct i40e_hw *hw = &pf->hw;
6785 ret = i40e_aq_resume_port_tx(hw, NULL);
6787 dev_info(&pf->pdev->dev,
6788 "Resume Port Tx failed, err %s aq_err %s\n",
6789 i40e_stat_str(&pf->hw, ret),
6790 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6791 /* Schedule PF reset to recover */
6792 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6793 i40e_service_event_schedule(pf);
6800 * i40e_suspend_port_tx - Suspend port Tx
6803 * Suspend a port's Tx and issue a PF reset in case of failure.
6805 static int i40e_suspend_port_tx(struct i40e_pf *pf)
6807 struct i40e_hw *hw = &pf->hw;
6810 ret = i40e_aq_suspend_port_tx(hw, pf->mac_seid, NULL);
6812 dev_info(&pf->pdev->dev,
6813 "Suspend Port Tx failed, err %s aq_err %s\n",
6814 i40e_stat_str(&pf->hw, ret),
6815 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6816 /* Schedule PF reset to recover */
6817 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6818 i40e_service_event_schedule(pf);
6825 * i40e_hw_set_dcb_config - Program new DCBX settings into HW
6826 * @pf: PF being configured
6827 * @new_cfg: New DCBX configuration
6829 * Program DCB settings into HW and reconfigure VEB/VSIs on
6830 * given PF. Uses "Set LLDP MIB" AQC to program the hardware.
6832 static int i40e_hw_set_dcb_config(struct i40e_pf *pf,
6833 struct i40e_dcbx_config *new_cfg)
6835 struct i40e_dcbx_config *old_cfg = &pf->hw.local_dcbx_config;
6838 /* Check if need reconfiguration */
6839 if (!memcmp(&new_cfg, &old_cfg, sizeof(new_cfg))) {
6840 dev_dbg(&pf->pdev->dev, "No Change in DCB Config required.\n");
6844 /* Config change disable all VSIs */
6845 i40e_pf_quiesce_all_vsi(pf);
6847 /* Copy the new config to the current config */
6848 *old_cfg = *new_cfg;
6849 old_cfg->etsrec = old_cfg->etscfg;
6850 ret = i40e_set_dcb_config(&pf->hw);
6852 dev_info(&pf->pdev->dev,
6853 "Set DCB Config failed, err %s aq_err %s\n",
6854 i40e_stat_str(&pf->hw, ret),
6855 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6859 /* Changes in configuration update VEB/VSI */
6860 i40e_dcb_reconfigure(pf);
6862 /* In case of reset do not try to resume anything */
6863 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) {
6864 /* Re-start the VSIs if disabled */
6865 ret = i40e_resume_port_tx(pf);
6866 /* In case of error no point in resuming VSIs */
6869 i40e_pf_unquiesce_all_vsi(pf);
6876 * i40e_hw_dcb_config - Program new DCBX settings into HW
6877 * @pf: PF being configured
6878 * @new_cfg: New DCBX configuration
6880 * Program DCB settings into HW and reconfigure VEB/VSIs on
6883 int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
6885 struct i40e_aqc_configure_switching_comp_ets_data ets_data;
6886 u8 prio_type[I40E_MAX_TRAFFIC_CLASS] = {0};
6887 u32 mfs_tc[I40E_MAX_TRAFFIC_CLASS];
6888 struct i40e_dcbx_config *old_cfg;
6889 u8 mode[I40E_MAX_TRAFFIC_CLASS];
6890 struct i40e_rx_pb_config pb_cfg;
6891 struct i40e_hw *hw = &pf->hw;
6892 u8 num_ports = hw->num_ports;
6900 dev_dbg(&pf->pdev->dev, "Configuring DCB registers directly\n");
6901 /* Un-pack information to Program ETS HW via shared API
6904 * ETS/NON-ETS arbiter mode
6905 * max exponent (credit refills)
6906 * Total number of ports
6907 * PFC priority bit-map
6910 * Arbiter mode between UPs sharing same TC
6911 * TSA table (ETS or non-ETS)
6912 * EEE enabled or not
6916 new_numtc = i40e_dcb_get_num_tc(new_cfg);
6918 memset(&ets_data, 0, sizeof(ets_data));
6919 for (i = 0; i < new_numtc; i++) {
6921 switch (new_cfg->etscfg.tsatable[i]) {
6922 case I40E_IEEE_TSA_ETS:
6923 prio_type[i] = I40E_DCB_PRIO_TYPE_ETS;
6924 ets_data.tc_bw_share_credits[i] =
6925 new_cfg->etscfg.tcbwtable[i];
6927 case I40E_IEEE_TSA_STRICT:
6928 prio_type[i] = I40E_DCB_PRIO_TYPE_STRICT;
6930 ets_data.tc_bw_share_credits[i] =
6931 I40E_DCB_STRICT_PRIO_CREDITS;
6934 /* Invalid TSA type */
6935 need_reconfig = false;
6940 old_cfg = &hw->local_dcbx_config;
6941 /* Check if need reconfiguration */
6942 need_reconfig = i40e_dcb_need_reconfig(pf, old_cfg, new_cfg);
6944 /* If needed, enable/disable frame tagging, disable all VSIs
6945 * and suspend port tx
6947 if (need_reconfig) {
6948 /* Enable DCB tagging only when more than one TC */
6950 pf->flags |= I40E_FLAG_DCB_ENABLED;
6952 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6954 set_bit(__I40E_PORT_SUSPENDED, pf->state);
6955 /* Reconfiguration needed quiesce all VSIs */
6956 i40e_pf_quiesce_all_vsi(pf);
6957 ret = i40e_suspend_port_tx(pf);
6962 /* Configure Port ETS Tx Scheduler */
6963 ets_data.tc_valid_bits = tc_map;
6964 ets_data.tc_strict_priority_flags = lltc_map;
6965 ret = i40e_aq_config_switch_comp_ets
6966 (hw, pf->mac_seid, &ets_data,
6967 i40e_aqc_opc_modify_switching_comp_ets, NULL);
6969 dev_info(&pf->pdev->dev,
6970 "Modify Port ETS failed, err %s aq_err %s\n",
6971 i40e_stat_str(&pf->hw, ret),
6972 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6976 /* Configure Rx ETS HW */
6977 memset(&mode, I40E_DCB_ARB_MODE_ROUND_ROBIN, sizeof(mode));
6978 i40e_dcb_hw_set_num_tc(hw, new_numtc);
6979 i40e_dcb_hw_rx_fifo_config(hw, I40E_DCB_ARB_MODE_ROUND_ROBIN,
6980 I40E_DCB_ARB_MODE_STRICT_PRIORITY,
6981 I40E_DCB_DEFAULT_MAX_EXPONENT,
6983 i40e_dcb_hw_rx_cmd_monitor_config(hw, new_numtc, num_ports);
6984 i40e_dcb_hw_rx_ets_bw_config(hw, new_cfg->etscfg.tcbwtable, mode,
6986 i40e_dcb_hw_pfc_config(hw, new_cfg->pfc.pfcenable,
6987 new_cfg->etscfg.prioritytable);
6988 i40e_dcb_hw_rx_up2tc_config(hw, new_cfg->etscfg.prioritytable);
6990 /* Configure Rx Packet Buffers in HW */
6991 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6992 mfs_tc[i] = pf->vsi[pf->lan_vsi]->netdev->mtu;
6993 mfs_tc[i] += I40E_PACKET_HDR_PAD;
6996 i40e_dcb_hw_calculate_pool_sizes(hw, num_ports,
6997 false, new_cfg->pfc.pfcenable,
6999 i40e_dcb_hw_rx_pb_config(hw, &pf->pb_cfg, &pb_cfg);
7001 /* Update the local Rx Packet buffer config */
7002 pf->pb_cfg = pb_cfg;
7004 /* Inform the FW about changes to DCB configuration */
7005 ret = i40e_aq_dcb_updated(&pf->hw, NULL);
7007 dev_info(&pf->pdev->dev,
7008 "DCB Updated failed, err %s aq_err %s\n",
7009 i40e_stat_str(&pf->hw, ret),
7010 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7014 /* Update the port DCBx configuration */
7015 *old_cfg = *new_cfg;
7017 /* Changes in configuration update VEB/VSI */
7018 i40e_dcb_reconfigure(pf);
7020 /* Re-start the VSIs if disabled */
7021 if (need_reconfig) {
7022 ret = i40e_resume_port_tx(pf);
7024 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
7025 /* In case of error no point in resuming VSIs */
7029 /* Wait for the PF's queues to be disabled */
7030 ret = i40e_pf_wait_queues_disabled(pf);
7032 /* Schedule PF reset to recover */
7033 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
7034 i40e_service_event_schedule(pf);
7037 i40e_pf_unquiesce_all_vsi(pf);
7038 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
7039 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
7041 /* registers are set, lets apply */
7042 if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB)
7043 ret = i40e_hw_set_dcb_config(pf, new_cfg);
7051 * i40e_dcb_sw_default_config - Set default DCB configuration when DCB in SW
7052 * @pf: PF being queried
7054 * Set default DCB configuration in case DCB is to be done in SW.
7056 int i40e_dcb_sw_default_config(struct i40e_pf *pf)
7058 struct i40e_dcbx_config *dcb_cfg = &pf->hw.local_dcbx_config;
7059 struct i40e_aqc_configure_switching_comp_ets_data ets_data;
7060 struct i40e_hw *hw = &pf->hw;
7063 if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB) {
7064 /* Update the local cached instance with TC0 ETS */
7065 memset(&pf->tmp_cfg, 0, sizeof(struct i40e_dcbx_config));
7066 pf->tmp_cfg.etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING;
7067 pf->tmp_cfg.etscfg.maxtcs = 0;
7068 pf->tmp_cfg.etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW;
7069 pf->tmp_cfg.etscfg.tsatable[0] = I40E_IEEE_TSA_ETS;
7070 pf->tmp_cfg.pfc.willing = I40E_IEEE_DEFAULT_PFC_WILLING;
7071 pf->tmp_cfg.pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
7072 /* FW needs one App to configure HW */
7073 pf->tmp_cfg.numapps = I40E_IEEE_DEFAULT_NUM_APPS;
7074 pf->tmp_cfg.app[0].selector = I40E_APP_SEL_ETHTYPE;
7075 pf->tmp_cfg.app[0].priority = I40E_IEEE_DEFAULT_APP_PRIO;
7076 pf->tmp_cfg.app[0].protocolid = I40E_APP_PROTOID_FCOE;
7078 return i40e_hw_set_dcb_config(pf, &pf->tmp_cfg);
7081 memset(&ets_data, 0, sizeof(ets_data));
7082 ets_data.tc_valid_bits = I40E_DEFAULT_TRAFFIC_CLASS; /* TC0 only */
7083 ets_data.tc_strict_priority_flags = 0; /* ETS */
7084 ets_data.tc_bw_share_credits[0] = I40E_IEEE_DEFAULT_ETS_TCBW; /* 100% to TC0 */
7086 /* Enable ETS on the Physical port */
7087 err = i40e_aq_config_switch_comp_ets
7088 (hw, pf->mac_seid, &ets_data,
7089 i40e_aqc_opc_enable_switching_comp_ets, NULL);
7091 dev_info(&pf->pdev->dev,
7092 "Enable Port ETS failed, err %s aq_err %s\n",
7093 i40e_stat_str(&pf->hw, err),
7094 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7099 /* Update the local cached instance with TC0 ETS */
7100 dcb_cfg->etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING;
7101 dcb_cfg->etscfg.cbs = 0;
7102 dcb_cfg->etscfg.maxtcs = I40E_MAX_TRAFFIC_CLASS;
7103 dcb_cfg->etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW;
7110 * i40e_init_pf_dcb - Initialize DCB configuration
7111 * @pf: PF being configured
7113 * Query the current DCB configuration and cache it
7114 * in the hardware structure
7116 static int i40e_init_pf_dcb(struct i40e_pf *pf)
7118 struct i40e_hw *hw = &pf->hw;
7121 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable
7122 * Also do not enable DCBx if FW LLDP agent is disabled
7124 if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT) {
7125 dev_info(&pf->pdev->dev, "DCB is not supported.\n");
7126 err = I40E_NOT_SUPPORTED;
7129 if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) {
7130 dev_info(&pf->pdev->dev, "FW LLDP is disabled, attempting SW DCB\n");
7131 err = i40e_dcb_sw_default_config(pf);
7133 dev_info(&pf->pdev->dev, "Could not initialize SW DCB\n");
7136 dev_info(&pf->pdev->dev, "SW DCB initialization succeeded.\n");
7137 pf->dcbx_cap = DCB_CAP_DCBX_HOST |
7138 DCB_CAP_DCBX_VER_IEEE;
7139 /* at init capable but disabled */
7140 pf->flags |= I40E_FLAG_DCB_CAPABLE;
7141 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
7144 err = i40e_init_dcb(hw, true);
7146 /* Device/Function is not DCBX capable */
7147 if ((!hw->func_caps.dcb) ||
7148 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
7149 dev_info(&pf->pdev->dev,
7150 "DCBX offload is not supported or is disabled for this PF.\n");
7152 /* When status is not DISABLED then DCBX in FW */
7153 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
7154 DCB_CAP_DCBX_VER_IEEE;
7156 pf->flags |= I40E_FLAG_DCB_CAPABLE;
7157 /* Enable DCB tagging only when more than one TC
7158 * or explicitly disable if only one TC
7160 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
7161 pf->flags |= I40E_FLAG_DCB_ENABLED;
7163 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
7164 dev_dbg(&pf->pdev->dev,
7165 "DCBX offload is supported for this PF.\n");
7167 } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
7168 dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
7169 pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
7171 dev_info(&pf->pdev->dev,
7172 "Query for DCB configuration failed, err %s aq_err %s\n",
7173 i40e_stat_str(&pf->hw, err),
7174 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7180 #endif /* CONFIG_I40E_DCB */
7183 * i40e_print_link_message - print link up or down
7184 * @vsi: the VSI for which link needs a message
7185 * @isup: true of link is up, false otherwise
7187 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
7189 enum i40e_aq_link_speed new_speed;
7190 struct i40e_pf *pf = vsi->back;
7191 char *speed = "Unknown";
7192 char *fc = "Unknown";
7198 new_speed = pf->hw.phy.link_info.link_speed;
7200 new_speed = I40E_LINK_SPEED_UNKNOWN;
7202 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
7204 vsi->current_isup = isup;
7205 vsi->current_speed = new_speed;
7207 netdev_info(vsi->netdev, "NIC Link is Down\n");
7211 /* Warn user if link speed on NPAR enabled partition is not at
7214 if (pf->hw.func_caps.npar_enable &&
7215 (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
7216 pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
7217 netdev_warn(vsi->netdev,
7218 "The partition detected link speed that is less than 10Gbps\n");
7220 switch (pf->hw.phy.link_info.link_speed) {
7221 case I40E_LINK_SPEED_40GB:
7224 case I40E_LINK_SPEED_20GB:
7227 case I40E_LINK_SPEED_25GB:
7230 case I40E_LINK_SPEED_10GB:
7233 case I40E_LINK_SPEED_5GB:
7236 case I40E_LINK_SPEED_2_5GB:
7239 case I40E_LINK_SPEED_1GB:
7242 case I40E_LINK_SPEED_100MB:
7249 switch (pf->hw.fc.current_mode) {
7253 case I40E_FC_TX_PAUSE:
7256 case I40E_FC_RX_PAUSE:
7264 if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
7269 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
7272 if (pf->hw.phy.link_info.fec_info &
7273 I40E_AQ_CONFIG_FEC_KR_ENA)
7274 fec = "CL74 FC-FEC/BASE-R";
7275 else if (pf->hw.phy.link_info.fec_info &
7276 I40E_AQ_CONFIG_FEC_RS_ENA)
7277 fec = "CL108 RS-FEC";
7279 /* 'CL108 RS-FEC' should be displayed when RS is requested, or
7280 * both RS and FC are requested
7282 if (vsi->back->hw.phy.link_info.req_fec_info &
7283 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
7284 if (vsi->back->hw.phy.link_info.req_fec_info &
7285 I40E_AQ_REQUEST_FEC_RS)
7286 req_fec = "CL108 RS-FEC";
7288 req_fec = "CL74 FC-FEC/BASE-R";
7290 netdev_info(vsi->netdev,
7291 "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
7292 speed, req_fec, fec, an, fc);
7293 } else if (pf->hw.device_id == I40E_DEV_ID_KX_X722) {
7298 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
7301 if (pf->hw.phy.link_info.fec_info &
7302 I40E_AQ_CONFIG_FEC_KR_ENA)
7303 fec = "CL74 FC-FEC/BASE-R";
7305 if (pf->hw.phy.link_info.req_fec_info &
7306 I40E_AQ_REQUEST_FEC_KR)
7307 req_fec = "CL74 FC-FEC/BASE-R";
7309 netdev_info(vsi->netdev,
7310 "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
7311 speed, req_fec, fec, an, fc);
7313 netdev_info(vsi->netdev,
7314 "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n",
7321 * i40e_up_complete - Finish the last steps of bringing up a connection
7322 * @vsi: the VSI being configured
7324 static int i40e_up_complete(struct i40e_vsi *vsi)
7326 struct i40e_pf *pf = vsi->back;
7329 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7330 i40e_vsi_configure_msix(vsi);
7332 i40e_configure_msi_and_legacy(vsi);
7335 err = i40e_vsi_start_rings(vsi);
7339 clear_bit(__I40E_VSI_DOWN, vsi->state);
7340 i40e_napi_enable_all(vsi);
7341 i40e_vsi_enable_irq(vsi);
7343 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
7345 i40e_print_link_message(vsi, true);
7346 netif_tx_start_all_queues(vsi->netdev);
7347 netif_carrier_on(vsi->netdev);
7350 /* replay FDIR SB filters */
7351 if (vsi->type == I40E_VSI_FDIR) {
7352 /* reset fd counters */
7355 i40e_fdir_filter_restore(vsi);
7358 /* On the next run of the service_task, notify any clients of the new
7361 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
7362 i40e_service_event_schedule(pf);
7368 * i40e_vsi_reinit_locked - Reset the VSI
7369 * @vsi: the VSI being configured
7371 * Rebuild the ring structs after some configuration
7372 * has changed, e.g. MTU size.
7374 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
7376 struct i40e_pf *pf = vsi->back;
7378 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
7379 usleep_range(1000, 2000);
7383 clear_bit(__I40E_CONFIG_BUSY, pf->state);
7387 * i40e_force_link_state - Force the link status
7388 * @pf: board private structure
7389 * @is_up: whether the link state should be forced up or down
7391 static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
7393 struct i40e_aq_get_phy_abilities_resp abilities;
7394 struct i40e_aq_set_phy_config config = {0};
7395 bool non_zero_phy_type = is_up;
7396 struct i40e_hw *hw = &pf->hw;
7401 /* Card might've been put in an unstable state by other drivers
7402 * and applications, which causes incorrect speed values being
7403 * set on startup. In order to clear speed registers, we call
7404 * get_phy_capabilities twice, once to get initial state of
7405 * available speeds, and once to get current PHY config.
7407 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities,
7410 dev_err(&pf->pdev->dev,
7411 "failed to get phy cap., ret = %s last_status = %s\n",
7412 i40e_stat_str(hw, err),
7413 i40e_aq_str(hw, hw->aq.asq_last_status));
7416 speed = abilities.link_speed;
7418 /* Get the current phy config */
7419 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
7422 dev_err(&pf->pdev->dev,
7423 "failed to get phy cap., ret = %s last_status = %s\n",
7424 i40e_stat_str(hw, err),
7425 i40e_aq_str(hw, hw->aq.asq_last_status));
7429 /* If link needs to go up, but was not forced to go down,
7430 * and its speed values are OK, no need for a flap
7431 * if non_zero_phy_type was set, still need to force up
7433 if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)
7434 non_zero_phy_type = true;
7435 else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
7436 return I40E_SUCCESS;
7438 /* To force link we need to set bits for all supported PHY types,
7439 * but there are now more than 32, so we need to split the bitmap
7440 * across two fields.
7442 mask = I40E_PHY_TYPES_BITMASK;
7444 non_zero_phy_type ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
7445 config.phy_type_ext =
7446 non_zero_phy_type ? (u8)((mask >> 32) & 0xff) : 0;
7447 /* Copy the old settings, except of phy_type */
7448 config.abilities = abilities.abilities;
7449 if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) {
7451 config.abilities |= I40E_AQ_PHY_ENABLE_LINK;
7453 config.abilities &= ~(I40E_AQ_PHY_ENABLE_LINK);
7455 if (abilities.link_speed != 0)
7456 config.link_speed = abilities.link_speed;
7458 config.link_speed = speed;
7459 config.eee_capability = abilities.eee_capability;
7460 config.eeer = abilities.eeer_val;
7461 config.low_power_ctrl = abilities.d3_lpan;
7462 config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
7463 I40E_AQ_PHY_FEC_CONFIG_MASK;
7464 err = i40e_aq_set_phy_config(hw, &config, NULL);
7467 dev_err(&pf->pdev->dev,
7468 "set phy config ret = %s last_status = %s\n",
7469 i40e_stat_str(&pf->hw, err),
7470 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7474 /* Update the link info */
7475 err = i40e_update_link_info(hw);
7477 /* Wait a little bit (on 40G cards it sometimes takes a really
7478 * long time for link to come back from the atomic reset)
7482 i40e_update_link_info(hw);
7485 i40e_aq_set_link_restart_an(hw, is_up, NULL);
7487 return I40E_SUCCESS;
7491 * i40e_up - Bring the connection back up after being down
7492 * @vsi: the VSI being configured
7494 int i40e_up(struct i40e_vsi *vsi)
7498 if (vsi->type == I40E_VSI_MAIN &&
7499 (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
7500 vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
7501 i40e_force_link_state(vsi->back, true);
7503 err = i40e_vsi_configure(vsi);
7505 err = i40e_up_complete(vsi);
7511 * i40e_down - Shutdown the connection processing
7512 * @vsi: the VSI being stopped
7514 void i40e_down(struct i40e_vsi *vsi)
7518 /* It is assumed that the caller of this function
7519 * sets the vsi->state __I40E_VSI_DOWN bit.
7522 netif_carrier_off(vsi->netdev);
7523 netif_tx_disable(vsi->netdev);
7525 i40e_vsi_disable_irq(vsi);
7526 i40e_vsi_stop_rings(vsi);
7527 if (vsi->type == I40E_VSI_MAIN &&
7528 (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
7529 vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
7530 i40e_force_link_state(vsi->back, false);
7531 i40e_napi_disable_all(vsi);
7533 for (i = 0; i < vsi->num_queue_pairs; i++) {
7534 i40e_clean_tx_ring(vsi->tx_rings[i]);
7535 if (i40e_enabled_xdp_vsi(vsi)) {
7536 /* Make sure that in-progress ndo_xdp_xmit and
7537 * ndo_xsk_wakeup calls are completed.
7540 i40e_clean_tx_ring(vsi->xdp_rings[i]);
7542 i40e_clean_rx_ring(vsi->rx_rings[i]);
7548 * i40e_validate_mqprio_qopt- validate queue mapping info
7549 * @vsi: the VSI being configured
7550 * @mqprio_qopt: queue parametrs
7552 static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
7553 struct tc_mqprio_qopt_offload *mqprio_qopt)
7555 u64 sum_max_rate = 0;
7559 if (mqprio_qopt->qopt.offset[0] != 0 ||
7560 mqprio_qopt->qopt.num_tc < 1 ||
7561 mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS)
7563 for (i = 0; ; i++) {
7564 if (!mqprio_qopt->qopt.count[i])
7566 if (mqprio_qopt->min_rate[i]) {
7567 dev_err(&vsi->back->pdev->dev,
7568 "Invalid min tx rate (greater than 0) specified\n");
7571 max_rate = mqprio_qopt->max_rate[i];
7572 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
7573 sum_max_rate += max_rate;
7575 if (i >= mqprio_qopt->qopt.num_tc - 1)
7577 if (mqprio_qopt->qopt.offset[i + 1] !=
7578 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
7581 if (vsi->num_queue_pairs <
7582 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
7583 dev_err(&vsi->back->pdev->dev,
7584 "Failed to create traffic channel, insufficient number of queues.\n");
7587 if (sum_max_rate > i40e_get_link_speed(vsi)) {
7588 dev_err(&vsi->back->pdev->dev,
7589 "Invalid max tx rate specified\n");
7596 * i40e_vsi_set_default_tc_config - set default values for tc configuration
7597 * @vsi: the VSI being configured
7599 static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
7604 /* Only TC0 is enabled */
7605 vsi->tc_config.numtc = 1;
7606 vsi->tc_config.enabled_tc = 1;
7607 qcount = min_t(int, vsi->alloc_queue_pairs,
7608 i40e_pf_get_max_q_per_tc(vsi->back));
7609 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
7610 /* For the TC that is not enabled set the offset to default
7611 * queue and allocate one queue for the given TC.
7613 vsi->tc_config.tc_info[i].qoffset = 0;
7615 vsi->tc_config.tc_info[i].qcount = qcount;
7617 vsi->tc_config.tc_info[i].qcount = 1;
7618 vsi->tc_config.tc_info[i].netdev_tc = 0;
7623 * i40e_del_macvlan_filter
7624 * @hw: pointer to the HW structure
7625 * @seid: seid of the channel VSI
7626 * @macaddr: the mac address to apply as a filter
7627 * @aq_err: store the admin Q error
7629 * This function deletes a mac filter on the channel VSI which serves as the
7630 * macvlan. Returns 0 on success.
7632 static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
7633 const u8 *macaddr, int *aq_err)
7635 struct i40e_aqc_remove_macvlan_element_data element;
7638 memset(&element, 0, sizeof(element));
7639 ether_addr_copy(element.mac_addr, macaddr);
7640 element.vlan_tag = 0;
7641 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
7642 status = i40e_aq_remove_macvlan(hw, seid, &element, 1, NULL);
7643 *aq_err = hw->aq.asq_last_status;
7649 * i40e_add_macvlan_filter
7650 * @hw: pointer to the HW structure
7651 * @seid: seid of the channel VSI
7652 * @macaddr: the mac address to apply as a filter
7653 * @aq_err: store the admin Q error
7655 * This function adds a mac filter on the channel VSI which serves as the
7656 * macvlan. Returns 0 on success.
7658 static i40e_status i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid,
7659 const u8 *macaddr, int *aq_err)
7661 struct i40e_aqc_add_macvlan_element_data element;
7665 ether_addr_copy(element.mac_addr, macaddr);
7666 element.vlan_tag = 0;
7667 element.queue_number = 0;
7668 element.match_method = I40E_AQC_MM_ERR_NO_RES;
7669 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
7670 element.flags = cpu_to_le16(cmd_flags);
7671 status = i40e_aq_add_macvlan(hw, seid, &element, 1, NULL);
7672 *aq_err = hw->aq.asq_last_status;
7678 * i40e_reset_ch_rings - Reset the queue contexts in a channel
7679 * @vsi: the VSI we want to access
7680 * @ch: the channel we want to access
7682 static void i40e_reset_ch_rings(struct i40e_vsi *vsi, struct i40e_channel *ch)
7684 struct i40e_ring *tx_ring, *rx_ring;
7688 for (i = 0; i < ch->num_queue_pairs; i++) {
7689 pf_q = ch->base_queue + i;
7690 tx_ring = vsi->tx_rings[pf_q];
7692 rx_ring = vsi->rx_rings[pf_q];
7698 * i40e_free_macvlan_channels
7699 * @vsi: the VSI we want to access
7701 * This function frees the Qs of the channel VSI from
7702 * the stack and also deletes the channel VSIs which
7703 * serve as macvlans.
7705 static void i40e_free_macvlan_channels(struct i40e_vsi *vsi)
7707 struct i40e_channel *ch, *ch_tmp;
7710 if (list_empty(&vsi->macvlan_list))
7713 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7714 struct i40e_vsi *parent_vsi;
7716 if (i40e_is_channel_macvlan(ch)) {
7717 i40e_reset_ch_rings(vsi, ch);
7718 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7719 netdev_unbind_sb_channel(vsi->netdev, ch->fwd->netdev);
7720 netdev_set_sb_channel(ch->fwd->netdev, 0);
7725 list_del(&ch->list);
7726 parent_vsi = ch->parent_vsi;
7727 if (!parent_vsi || !ch->initialized) {
7732 /* remove the VSI */
7733 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
7736 dev_err(&vsi->back->pdev->dev,
7737 "unable to remove channel (%d) for parent VSI(%d)\n",
7738 ch->seid, parent_vsi->seid);
7741 vsi->macvlan_cnt = 0;
7745 * i40e_fwd_ring_up - bring the macvlan device up
7746 * @vsi: the VSI we want to access
7747 * @vdev: macvlan netdevice
7748 * @fwd: the private fwd structure
7750 static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
7751 struct i40e_fwd_adapter *fwd)
7753 struct i40e_channel *ch = NULL, *ch_tmp, *iter;
7754 int ret = 0, num_tc = 1, i, aq_err;
7755 struct i40e_pf *pf = vsi->back;
7756 struct i40e_hw *hw = &pf->hw;
7758 /* Go through the list and find an available channel */
7759 list_for_each_entry_safe(iter, ch_tmp, &vsi->macvlan_list, list) {
7760 if (!i40e_is_channel_macvlan(iter)) {
7762 /* record configuration for macvlan interface in vdev */
7763 for (i = 0; i < num_tc; i++)
7764 netdev_bind_sb_channel_queue(vsi->netdev, vdev,
7766 iter->num_queue_pairs,
7768 for (i = 0; i < iter->num_queue_pairs; i++) {
7769 struct i40e_ring *tx_ring, *rx_ring;
7772 pf_q = iter->base_queue + i;
7774 /* Get to TX ring ptr */
7775 tx_ring = vsi->tx_rings[pf_q];
7778 /* Get the RX ring ptr */
7779 rx_ring = vsi->rx_rings[pf_q];
7790 /* Guarantee all rings are updated before we update the
7791 * MAC address filter.
7795 /* Add a mac filter */
7796 ret = i40e_add_macvlan_filter(hw, ch->seid, vdev->dev_addr, &aq_err);
7798 /* if we cannot add the MAC rule then disable the offload */
7799 macvlan_release_l2fw_offload(vdev);
7800 for (i = 0; i < ch->num_queue_pairs; i++) {
7801 struct i40e_ring *rx_ring;
7804 pf_q = ch->base_queue + i;
7805 rx_ring = vsi->rx_rings[pf_q];
7806 rx_ring->netdev = NULL;
7808 dev_info(&pf->pdev->dev,
7809 "Error adding mac filter on macvlan err %s, aq_err %s\n",
7810 i40e_stat_str(hw, ret),
7811 i40e_aq_str(hw, aq_err));
7812 netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n");
7819 * i40e_setup_macvlans - create the channels which will be macvlans
7820 * @vsi: the VSI we want to access
7821 * @macvlan_cnt: no. of macvlans to be setup
7822 * @qcnt: no. of Qs per macvlan
7823 * @vdev: macvlan netdevice
7825 static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
7826 struct net_device *vdev)
7828 struct i40e_pf *pf = vsi->back;
7829 struct i40e_hw *hw = &pf->hw;
7830 struct i40e_vsi_context ctxt;
7831 u16 sections, qmap, num_qps;
7832 struct i40e_channel *ch;
7833 int i, pow, ret = 0;
7836 if (vsi->type != I40E_VSI_MAIN || !macvlan_cnt)
7839 num_qps = vsi->num_queue_pairs - (macvlan_cnt * qcnt);
7841 /* find the next higher power-of-2 of num queue pairs */
7842 pow = fls(roundup_pow_of_two(num_qps) - 1);
7844 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
7845 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
7847 /* Setup context bits for the main VSI */
7848 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
7849 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
7850 memset(&ctxt, 0, sizeof(ctxt));
7851 ctxt.seid = vsi->seid;
7852 ctxt.pf_num = vsi->back->hw.pf_id;
7854 ctxt.uplink_seid = vsi->uplink_seid;
7855 ctxt.info = vsi->info;
7856 ctxt.info.tc_mapping[0] = cpu_to_le16(qmap);
7857 ctxt.info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
7858 ctxt.info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
7859 ctxt.info.valid_sections |= cpu_to_le16(sections);
7861 /* Reconfigure RSS for main VSI with new max queue count */
7862 vsi->rss_size = max_t(u16, num_qps, qcnt);
7863 ret = i40e_vsi_config_rss(vsi);
7865 dev_info(&pf->pdev->dev,
7866 "Failed to reconfig RSS for num_queues (%u)\n",
7870 vsi->reconfig_rss = true;
7871 dev_dbg(&vsi->back->pdev->dev,
7872 "Reconfigured RSS with num_queues (%u)\n", vsi->rss_size);
7873 vsi->next_base_queue = num_qps;
7874 vsi->cnt_q_avail = vsi->num_queue_pairs - num_qps;
7876 /* Update the VSI after updating the VSI queue-mapping
7879 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
7881 dev_info(&pf->pdev->dev,
7882 "Update vsi tc config failed, err %s aq_err %s\n",
7883 i40e_stat_str(hw, ret),
7884 i40e_aq_str(hw, hw->aq.asq_last_status));
7887 /* update the local VSI info with updated queue map */
7888 i40e_vsi_update_queue_map(vsi, &ctxt);
7889 vsi->info.valid_sections = 0;
7891 /* Create channels for macvlans */
7892 INIT_LIST_HEAD(&vsi->macvlan_list);
7893 for (i = 0; i < macvlan_cnt; i++) {
7894 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
7899 INIT_LIST_HEAD(&ch->list);
7900 ch->num_queue_pairs = qcnt;
7901 if (!i40e_setup_channel(pf, vsi, ch)) {
7906 ch->parent_vsi = vsi;
7907 vsi->cnt_q_avail -= ch->num_queue_pairs;
7909 list_add_tail(&ch->list, &vsi->macvlan_list);
7915 dev_info(&pf->pdev->dev, "Failed to setup macvlans\n");
7916 i40e_free_macvlan_channels(vsi);
7922 * i40e_fwd_add - configure macvlans
7923 * @netdev: net device to configure
7924 * @vdev: macvlan netdevice
7926 static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev)
7928 struct i40e_netdev_priv *np = netdev_priv(netdev);
7929 u16 q_per_macvlan = 0, macvlan_cnt = 0, vectors;
7930 struct i40e_vsi *vsi = np->vsi;
7931 struct i40e_pf *pf = vsi->back;
7932 struct i40e_fwd_adapter *fwd;
7933 int avail_macvlan, ret;
7935 if ((pf->flags & I40E_FLAG_DCB_ENABLED)) {
7936 netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n");
7937 return ERR_PTR(-EINVAL);
7939 if ((pf->flags & I40E_FLAG_TC_MQPRIO)) {
7940 netdev_info(netdev, "Macvlans are not supported when HW TC offload is on\n");
7941 return ERR_PTR(-EINVAL);
7943 if (pf->num_lan_msix < I40E_MIN_MACVLAN_VECTORS) {
7944 netdev_info(netdev, "Not enough vectors available to support macvlans\n");
7945 return ERR_PTR(-EINVAL);
7948 /* The macvlan device has to be a single Q device so that the
7949 * tc_to_txq field can be reused to pick the tx queue.
7951 if (netif_is_multiqueue(vdev))
7952 return ERR_PTR(-ERANGE);
7954 if (!vsi->macvlan_cnt) {
7955 /* reserve bit 0 for the pf device */
7956 set_bit(0, vsi->fwd_bitmask);
7958 /* Try to reserve as many queues as possible for macvlans. First
7959 * reserve 3/4th of max vectors, then half, then quarter and
7960 * calculate Qs per macvlan as you go
7962 vectors = pf->num_lan_msix;
7963 if (vectors <= I40E_MAX_MACVLANS && vectors > 64) {
7964 /* allocate 4 Qs per macvlan and 32 Qs to the PF*/
7966 macvlan_cnt = (vectors - 32) / 4;
7967 } else if (vectors <= 64 && vectors > 32) {
7968 /* allocate 2 Qs per macvlan and 16 Qs to the PF*/
7970 macvlan_cnt = (vectors - 16) / 2;
7971 } else if (vectors <= 32 && vectors > 16) {
7972 /* allocate 1 Q per macvlan and 16 Qs to the PF*/
7974 macvlan_cnt = vectors - 16;
7975 } else if (vectors <= 16 && vectors > 8) {
7976 /* allocate 1 Q per macvlan and 8 Qs to the PF */
7978 macvlan_cnt = vectors - 8;
7980 /* allocate 1 Q per macvlan and 1 Q to the PF */
7982 macvlan_cnt = vectors - 1;
7985 if (macvlan_cnt == 0)
7986 return ERR_PTR(-EBUSY);
7988 /* Quiesce VSI queues */
7989 i40e_quiesce_vsi(vsi);
7991 /* sets up the macvlans but does not "enable" them */
7992 ret = i40e_setup_macvlans(vsi, macvlan_cnt, q_per_macvlan,
7995 return ERR_PTR(ret);
7998 i40e_unquiesce_vsi(vsi);
8000 avail_macvlan = find_first_zero_bit(vsi->fwd_bitmask,
8002 if (avail_macvlan >= I40E_MAX_MACVLANS)
8003 return ERR_PTR(-EBUSY);
8005 /* create the fwd struct */
8006 fwd = kzalloc(sizeof(*fwd), GFP_KERNEL);
8008 return ERR_PTR(-ENOMEM);
8010 set_bit(avail_macvlan, vsi->fwd_bitmask);
8011 fwd->bit_no = avail_macvlan;
8012 netdev_set_sb_channel(vdev, avail_macvlan);
8015 if (!netif_running(netdev))
8018 /* Set fwd ring up */
8019 ret = i40e_fwd_ring_up(vsi, vdev, fwd);
8021 /* unbind the queues and drop the subordinate channel config */
8022 netdev_unbind_sb_channel(netdev, vdev);
8023 netdev_set_sb_channel(vdev, 0);
8026 return ERR_PTR(-EINVAL);
8033 * i40e_del_all_macvlans - Delete all the mac filters on the channels
8034 * @vsi: the VSI we want to access
8036 static void i40e_del_all_macvlans(struct i40e_vsi *vsi)
8038 struct i40e_channel *ch, *ch_tmp;
8039 struct i40e_pf *pf = vsi->back;
8040 struct i40e_hw *hw = &pf->hw;
8041 int aq_err, ret = 0;
8043 if (list_empty(&vsi->macvlan_list))
8046 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
8047 if (i40e_is_channel_macvlan(ch)) {
8048 ret = i40e_del_macvlan_filter(hw, ch->seid,
8049 i40e_channel_mac(ch),
8052 /* Reset queue contexts */
8053 i40e_reset_ch_rings(vsi, ch);
8054 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
8055 netdev_unbind_sb_channel(vsi->netdev,
8057 netdev_set_sb_channel(ch->fwd->netdev, 0);
8066 * i40e_fwd_del - delete macvlan interfaces
8067 * @netdev: net device to configure
8068 * @vdev: macvlan netdevice
8070 static void i40e_fwd_del(struct net_device *netdev, void *vdev)
8072 struct i40e_netdev_priv *np = netdev_priv(netdev);
8073 struct i40e_fwd_adapter *fwd = vdev;
8074 struct i40e_channel *ch, *ch_tmp;
8075 struct i40e_vsi *vsi = np->vsi;
8076 struct i40e_pf *pf = vsi->back;
8077 struct i40e_hw *hw = &pf->hw;
8078 int aq_err, ret = 0;
8080 /* Find the channel associated with the macvlan and del mac filter */
8081 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
8082 if (i40e_is_channel_macvlan(ch) &&
8083 ether_addr_equal(i40e_channel_mac(ch),
8084 fwd->netdev->dev_addr)) {
8085 ret = i40e_del_macvlan_filter(hw, ch->seid,
8086 i40e_channel_mac(ch),
8089 /* Reset queue contexts */
8090 i40e_reset_ch_rings(vsi, ch);
8091 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
8092 netdev_unbind_sb_channel(netdev, fwd->netdev);
8093 netdev_set_sb_channel(fwd->netdev, 0);
8097 dev_info(&pf->pdev->dev,
8098 "Error deleting mac filter on macvlan err %s, aq_err %s\n",
8099 i40e_stat_str(hw, ret),
8100 i40e_aq_str(hw, aq_err));
8108 * i40e_setup_tc - configure multiple traffic classes
8109 * @netdev: net device to configure
8110 * @type_data: tc offload data
8112 static int i40e_setup_tc(struct net_device *netdev, void *type_data)
8114 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
8115 struct i40e_netdev_priv *np = netdev_priv(netdev);
8116 struct i40e_vsi *vsi = np->vsi;
8117 struct i40e_pf *pf = vsi->back;
8118 u8 enabled_tc = 0, num_tc, hw;
8119 bool need_reset = false;
8120 int old_queue_pairs;
8125 old_queue_pairs = vsi->num_queue_pairs;
8126 num_tc = mqprio_qopt->qopt.num_tc;
8127 hw = mqprio_qopt->qopt.hw;
8128 mode = mqprio_qopt->mode;
8130 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
8131 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8135 /* Check if MFP enabled */
8136 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
8138 "Configuring TC not supported in MFP mode\n");
8142 case TC_MQPRIO_MODE_DCB:
8143 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
8145 /* Check if DCB enabled to continue */
8146 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
8148 "DCB is not enabled for adapter\n");
8152 /* Check whether tc count is within enabled limit */
8153 if (num_tc > i40e_pf_get_num_tc(pf)) {
8155 "TC count greater than enabled on link for adapter\n");
8159 case TC_MQPRIO_MODE_CHANNEL:
8160 if (pf->flags & I40E_FLAG_DCB_ENABLED) {
8162 "Full offload of TC Mqprio options is not supported when DCB is enabled\n");
8165 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
8167 ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt);
8170 memcpy(&vsi->mqprio_qopt, mqprio_qopt,
8171 sizeof(*mqprio_qopt));
8172 pf->flags |= I40E_FLAG_TC_MQPRIO;
8173 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
8180 /* Generate TC map for number of tc requested */
8181 for (i = 0; i < num_tc; i++)
8182 enabled_tc |= BIT(i);
8184 /* Requesting same TC configuration as already enabled */
8185 if (enabled_tc == vsi->tc_config.enabled_tc &&
8186 mode != TC_MQPRIO_MODE_CHANNEL)
8189 /* Quiesce VSI queues */
8190 i40e_quiesce_vsi(vsi);
8192 if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO))
8193 i40e_remove_queue_channels(vsi);
8195 /* Configure VSI for enabled TCs */
8196 ret = i40e_vsi_config_tc(vsi, enabled_tc);
8198 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
8202 } else if (enabled_tc &&
8203 (!is_power_of_2(vsi->tc_config.tc_info[0].qcount))) {
8205 "Failed to create channel. Override queues (%u) not power of 2\n",
8206 vsi->tc_config.tc_info[0].qcount);
8212 dev_info(&vsi->back->pdev->dev,
8213 "Setup channel (id:%u) utilizing num_queues %d\n",
8214 vsi->seid, vsi->tc_config.tc_info[0].qcount);
8216 if (pf->flags & I40E_FLAG_TC_MQPRIO) {
8217 if (vsi->mqprio_qopt.max_rate[0]) {
8218 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
8220 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
8221 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
8223 u64 credits = max_tx_rate;
8225 do_div(credits, I40E_BW_CREDIT_DIVISOR);
8226 dev_dbg(&vsi->back->pdev->dev,
8227 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
8236 ret = i40e_configure_queue_channels(vsi);
8238 vsi->num_queue_pairs = old_queue_pairs;
8240 "Failed configuring queue channels\n");
8247 /* Reset the configuration data to defaults, only TC0 is enabled */
8249 i40e_vsi_set_default_tc_config(vsi);
8254 i40e_unquiesce_vsi(vsi);
8259 * i40e_set_cld_element - sets cloud filter element data
8260 * @filter: cloud filter rule
8261 * @cld: ptr to cloud filter element data
8263 * This is helper function to copy data into cloud filter element
8266 i40e_set_cld_element(struct i40e_cloud_filter *filter,
8267 struct i40e_aqc_cloud_filters_element_data *cld)
8272 memset(cld, 0, sizeof(*cld));
8273 ether_addr_copy(cld->outer_mac, filter->dst_mac);
8274 ether_addr_copy(cld->inner_mac, filter->src_mac);
8276 if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6)
8279 if (filter->n_proto == ETH_P_IPV6) {
8280 #define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
8281 for (i = 0; i < ARRAY_SIZE(filter->dst_ipv6); i++) {
8282 ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
8284 *(__le32 *)&cld->ipaddr.raw_v6.data[i * 2] = cpu_to_le32(ipa);
8287 ipa = be32_to_cpu(filter->dst_ipv4);
8289 memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
8292 cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id));
8294 /* tenant_id is not supported by FW now, once the support is enabled
8295 * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id)
8297 if (filter->tenant_id)
8302 * i40e_add_del_cloud_filter - Add/del cloud filter
8303 * @vsi: pointer to VSI
8304 * @filter: cloud filter rule
8305 * @add: if true, add, if false, delete
8307 * Add or delete a cloud filter for a specific flow spec.
8308 * Returns 0 if the filter were successfully added.
8310 int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
8311 struct i40e_cloud_filter *filter, bool add)
8313 struct i40e_aqc_cloud_filters_element_data cld_filter;
8314 struct i40e_pf *pf = vsi->back;
8316 static const u16 flag_table[128] = {
8317 [I40E_CLOUD_FILTER_FLAGS_OMAC] =
8318 I40E_AQC_ADD_CLOUD_FILTER_OMAC,
8319 [I40E_CLOUD_FILTER_FLAGS_IMAC] =
8320 I40E_AQC_ADD_CLOUD_FILTER_IMAC,
8321 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] =
8322 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
8323 [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
8324 I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
8325 [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
8326 I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
8327 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
8328 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
8329 [I40E_CLOUD_FILTER_FLAGS_IIP] =
8330 I40E_AQC_ADD_CLOUD_FILTER_IIP,
8333 if (filter->flags >= ARRAY_SIZE(flag_table))
8334 return I40E_ERR_CONFIG;
8336 memset(&cld_filter, 0, sizeof(cld_filter));
8338 /* copy element needed to add cloud filter from filter */
8339 i40e_set_cld_element(filter, &cld_filter);
8341 if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE)
8342 cld_filter.flags = cpu_to_le16(filter->tunnel_type <<
8343 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
8345 if (filter->n_proto == ETH_P_IPV6)
8346 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
8347 I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
8349 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
8350 I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
8353 ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid,
8356 ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid,
8359 dev_dbg(&pf->pdev->dev,
8360 "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
8361 add ? "add" : "delete", filter->dst_port, ret,
8362 pf->hw.aq.asq_last_status);
8364 dev_info(&pf->pdev->dev,
8365 "%s cloud filter for VSI: %d\n",
8366 add ? "Added" : "Deleted", filter->seid);
8371 * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf
8372 * @vsi: pointer to VSI
8373 * @filter: cloud filter rule
8374 * @add: if true, add, if false, delete
8376 * Add or delete a cloud filter for a specific flow spec using big buffer.
8377 * Returns 0 if the filter were successfully added.
8379 int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
8380 struct i40e_cloud_filter *filter,
8383 struct i40e_aqc_cloud_filters_element_bb cld_filter;
8384 struct i40e_pf *pf = vsi->back;
8387 /* Both (src/dst) valid mac_addr are not supported */
8388 if ((is_valid_ether_addr(filter->dst_mac) &&
8389 is_valid_ether_addr(filter->src_mac)) ||
8390 (is_multicast_ether_addr(filter->dst_mac) &&
8391 is_multicast_ether_addr(filter->src_mac)))
8394 /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
8395 * ports are not supported via big buffer now.
8397 if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
8400 /* adding filter using src_port/src_ip is not supported at this stage */
8401 if (filter->src_port ||
8402 (filter->src_ipv4 && filter->n_proto != ETH_P_IPV6) ||
8403 !ipv6_addr_any(&filter->ip.v6.src_ip6))
8406 memset(&cld_filter, 0, sizeof(cld_filter));
8408 /* copy element needed to add cloud filter from filter */
8409 i40e_set_cld_element(filter, &cld_filter.element);
8411 if (is_valid_ether_addr(filter->dst_mac) ||
8412 is_valid_ether_addr(filter->src_mac) ||
8413 is_multicast_ether_addr(filter->dst_mac) ||
8414 is_multicast_ether_addr(filter->src_mac)) {
8415 /* MAC + IP : unsupported mode */
8416 if (filter->dst_ipv4)
8419 /* since we validated that L4 port must be valid before
8420 * we get here, start with respective "flags" value
8421 * and update if vlan is present or not
8423 cld_filter.element.flags =
8424 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT);
8426 if (filter->vlan_id) {
8427 cld_filter.element.flags =
8428 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
8431 } else if ((filter->dst_ipv4 && filter->n_proto != ETH_P_IPV6) ||
8432 !ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
8433 cld_filter.element.flags =
8434 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
8435 if (filter->n_proto == ETH_P_IPV6)
8436 cld_filter.element.flags |=
8437 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
8439 cld_filter.element.flags |=
8440 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
8442 dev_err(&pf->pdev->dev,
8443 "either mac or ip has to be valid for cloud filter\n");
8447 /* Now copy L4 port in Byte 6..7 in general fields */
8448 cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] =
8449 be16_to_cpu(filter->dst_port);
8452 /* Validate current device switch mode, change if necessary */
8453 ret = i40e_validate_and_set_switch_mode(vsi);
8455 dev_err(&pf->pdev->dev,
8456 "failed to set switch mode, ret %d\n",
8461 ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid,
8464 ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid,
8469 dev_dbg(&pf->pdev->dev,
8470 "Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
8471 add ? "add" : "delete", ret, pf->hw.aq.asq_last_status);
8473 dev_info(&pf->pdev->dev,
8474 "%s cloud filter for VSI: %d, L4 port: %d\n",
8475 add ? "add" : "delete", filter->seid,
8476 ntohs(filter->dst_port));
8481 * i40e_parse_cls_flower - Parse tc flower filters provided by kernel
8482 * @vsi: Pointer to VSI
8483 * @f: Pointer to struct flow_cls_offload
8484 * @filter: Pointer to cloud filter structure
8487 static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
8488 struct flow_cls_offload *f,
8489 struct i40e_cloud_filter *filter)
8491 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
8492 struct flow_dissector *dissector = rule->match.dissector;
8493 u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
8494 struct i40e_pf *pf = vsi->back;
8497 if (dissector->used_keys &
8498 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
8499 BIT(FLOW_DISSECTOR_KEY_BASIC) |
8500 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
8501 BIT(FLOW_DISSECTOR_KEY_VLAN) |
8502 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
8503 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
8504 BIT(FLOW_DISSECTOR_KEY_PORTS) |
8505 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
8506 dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n",
8507 dissector->used_keys);
8511 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
8512 struct flow_match_enc_keyid match;
8514 flow_rule_match_enc_keyid(rule, &match);
8515 if (match.mask->keyid != 0)
8516 field_flags |= I40E_CLOUD_FIELD_TEN_ID;
8518 filter->tenant_id = be32_to_cpu(match.key->keyid);
8521 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
8522 struct flow_match_basic match;
8524 flow_rule_match_basic(rule, &match);
8525 n_proto_key = ntohs(match.key->n_proto);
8526 n_proto_mask = ntohs(match.mask->n_proto);
8528 if (n_proto_key == ETH_P_ALL) {
8532 filter->n_proto = n_proto_key & n_proto_mask;
8533 filter->ip_proto = match.key->ip_proto;
8536 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
8537 struct flow_match_eth_addrs match;
8539 flow_rule_match_eth_addrs(rule, &match);
8541 /* use is_broadcast and is_zero to check for all 0xf or 0 */
8542 if (!is_zero_ether_addr(match.mask->dst)) {
8543 if (is_broadcast_ether_addr(match.mask->dst)) {
8544 field_flags |= I40E_CLOUD_FIELD_OMAC;
8546 dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
8548 return I40E_ERR_CONFIG;
8552 if (!is_zero_ether_addr(match.mask->src)) {
8553 if (is_broadcast_ether_addr(match.mask->src)) {
8554 field_flags |= I40E_CLOUD_FIELD_IMAC;
8556 dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
8558 return I40E_ERR_CONFIG;
8561 ether_addr_copy(filter->dst_mac, match.key->dst);
8562 ether_addr_copy(filter->src_mac, match.key->src);
8565 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
8566 struct flow_match_vlan match;
8568 flow_rule_match_vlan(rule, &match);
8569 if (match.mask->vlan_id) {
8570 if (match.mask->vlan_id == VLAN_VID_MASK) {
8571 field_flags |= I40E_CLOUD_FIELD_IVLAN;
8574 dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
8575 match.mask->vlan_id);
8576 return I40E_ERR_CONFIG;
8580 filter->vlan_id = cpu_to_be16(match.key->vlan_id);
8583 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
8584 struct flow_match_control match;
8586 flow_rule_match_control(rule, &match);
8587 addr_type = match.key->addr_type;
8590 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
8591 struct flow_match_ipv4_addrs match;
8593 flow_rule_match_ipv4_addrs(rule, &match);
8594 if (match.mask->dst) {
8595 if (match.mask->dst == cpu_to_be32(0xffffffff)) {
8596 field_flags |= I40E_CLOUD_FIELD_IIP;
8598 dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
8600 return I40E_ERR_CONFIG;
8604 if (match.mask->src) {
8605 if (match.mask->src == cpu_to_be32(0xffffffff)) {
8606 field_flags |= I40E_CLOUD_FIELD_IIP;
8608 dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
8610 return I40E_ERR_CONFIG;
8614 if (field_flags & I40E_CLOUD_FIELD_TEN_ID) {
8615 dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
8616 return I40E_ERR_CONFIG;
8618 filter->dst_ipv4 = match.key->dst;
8619 filter->src_ipv4 = match.key->src;
8622 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
8623 struct flow_match_ipv6_addrs match;
8625 flow_rule_match_ipv6_addrs(rule, &match);
8627 /* src and dest IPV6 address should not be LOOPBACK
8628 * (0:0:0:0:0:0:0:1), which can be represented as ::1
8630 if (ipv6_addr_loopback(&match.key->dst) ||
8631 ipv6_addr_loopback(&match.key->src)) {
8632 dev_err(&pf->pdev->dev,
8633 "Bad ipv6, addr is LOOPBACK\n");
8634 return I40E_ERR_CONFIG;
8636 if (!ipv6_addr_any(&match.mask->dst) ||
8637 !ipv6_addr_any(&match.mask->src))
8638 field_flags |= I40E_CLOUD_FIELD_IIP;
8640 memcpy(&filter->src_ipv6, &match.key->src.s6_addr32,
8641 sizeof(filter->src_ipv6));
8642 memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32,
8643 sizeof(filter->dst_ipv6));
8646 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
8647 struct flow_match_ports match;
8649 flow_rule_match_ports(rule, &match);
8650 if (match.mask->src) {
8651 if (match.mask->src == cpu_to_be16(0xffff)) {
8652 field_flags |= I40E_CLOUD_FIELD_IIP;
8654 dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
8655 be16_to_cpu(match.mask->src));
8656 return I40E_ERR_CONFIG;
8660 if (match.mask->dst) {
8661 if (match.mask->dst == cpu_to_be16(0xffff)) {
8662 field_flags |= I40E_CLOUD_FIELD_IIP;
8664 dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
8665 be16_to_cpu(match.mask->dst));
8666 return I40E_ERR_CONFIG;
8670 filter->dst_port = match.key->dst;
8671 filter->src_port = match.key->src;
8673 switch (filter->ip_proto) {
8678 dev_err(&pf->pdev->dev,
8679 "Only UDP and TCP transport are supported\n");
8683 filter->flags = field_flags;
8688 * i40e_handle_tclass: Forward to a traffic class on the device
8689 * @vsi: Pointer to VSI
8690 * @tc: traffic class index on the device
8691 * @filter: Pointer to cloud filter structure
8694 static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc,
8695 struct i40e_cloud_filter *filter)
8697 struct i40e_channel *ch, *ch_tmp;
8699 /* direct to a traffic class on the same device */
8701 filter->seid = vsi->seid;
8703 } else if (vsi->tc_config.enabled_tc & BIT(tc)) {
8704 if (!filter->dst_port) {
8705 dev_err(&vsi->back->pdev->dev,
8706 "Specify destination port to direct to traffic class that is not default\n");
8709 if (list_empty(&vsi->ch_list))
8711 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list,
8713 if (ch->seid == vsi->tc_seid_map[tc])
8714 filter->seid = ch->seid;
8718 dev_err(&vsi->back->pdev->dev, "TC is not enabled\n");
8723 * i40e_configure_clsflower - Configure tc flower filters
8724 * @vsi: Pointer to VSI
8725 * @cls_flower: Pointer to struct flow_cls_offload
8728 static int i40e_configure_clsflower(struct i40e_vsi *vsi,
8729 struct flow_cls_offload *cls_flower)
8731 int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
8732 struct i40e_cloud_filter *filter = NULL;
8733 struct i40e_pf *pf = vsi->back;
8737 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
8742 dev_err(&pf->pdev->dev, "Unable to add filter because of invalid destination");
8746 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
8747 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
8750 if (pf->fdir_pf_active_filters ||
8751 (!hlist_empty(&pf->fdir_filter_list))) {
8752 dev_err(&vsi->back->pdev->dev,
8753 "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
8757 if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) {
8758 dev_err(&vsi->back->pdev->dev,
8759 "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
8760 vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8761 vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8764 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
8768 filter->cookie = cls_flower->cookie;
8770 err = i40e_parse_cls_flower(vsi, cls_flower, filter);
8774 err = i40e_handle_tclass(vsi, tc, filter);
8778 /* Add cloud filter */
8779 if (filter->dst_port)
8780 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true);
8782 err = i40e_add_del_cloud_filter(vsi, filter, true);
8785 dev_err(&pf->pdev->dev, "Failed to add cloud filter, err %d\n",
8790 /* add filter to the ordered list */
8791 INIT_HLIST_NODE(&filter->cloud_node);
8793 hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list);
8795 pf->num_cloud_filters++;
8804 * i40e_find_cloud_filter - Find the could filter in the list
8805 * @vsi: Pointer to VSI
8806 * @cookie: filter specific cookie
8809 static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi,
8810 unsigned long *cookie)
8812 struct i40e_cloud_filter *filter = NULL;
8813 struct hlist_node *node2;
8815 hlist_for_each_entry_safe(filter, node2,
8816 &vsi->back->cloud_filter_list, cloud_node)
8817 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
8823 * i40e_delete_clsflower - Remove tc flower filters
8824 * @vsi: Pointer to VSI
8825 * @cls_flower: Pointer to struct flow_cls_offload
8828 static int i40e_delete_clsflower(struct i40e_vsi *vsi,
8829 struct flow_cls_offload *cls_flower)
8831 struct i40e_cloud_filter *filter = NULL;
8832 struct i40e_pf *pf = vsi->back;
8835 filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie);
8840 hash_del(&filter->cloud_node);
8842 if (filter->dst_port)
8843 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false);
8845 err = i40e_add_del_cloud_filter(vsi, filter, false);
8849 dev_err(&pf->pdev->dev,
8850 "Failed to delete cloud filter, err %s\n",
8851 i40e_stat_str(&pf->hw, err));
8852 return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
8855 pf->num_cloud_filters--;
8856 if (!pf->num_cloud_filters)
8857 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
8858 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
8859 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8860 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8861 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
8867 * i40e_setup_tc_cls_flower - flower classifier offloads
8868 * @np: net device to configure
8869 * @cls_flower: offload data
8871 static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
8872 struct flow_cls_offload *cls_flower)
8874 struct i40e_vsi *vsi = np->vsi;
8876 switch (cls_flower->command) {
8877 case FLOW_CLS_REPLACE:
8878 return i40e_configure_clsflower(vsi, cls_flower);
8879 case FLOW_CLS_DESTROY:
8880 return i40e_delete_clsflower(vsi, cls_flower);
8881 case FLOW_CLS_STATS:
8888 static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
8891 struct i40e_netdev_priv *np = cb_priv;
8893 if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data))
8897 case TC_SETUP_CLSFLOWER:
8898 return i40e_setup_tc_cls_flower(np, type_data);
8905 static LIST_HEAD(i40e_block_cb_list);
8907 static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
8910 struct i40e_netdev_priv *np = netdev_priv(netdev);
8913 case TC_SETUP_QDISC_MQPRIO:
8914 return i40e_setup_tc(netdev, type_data);
8915 case TC_SETUP_BLOCK:
8916 return flow_block_cb_setup_simple(type_data,
8917 &i40e_block_cb_list,
8918 i40e_setup_tc_block_cb,
8926 * i40e_open - Called when a network interface is made active
8927 * @netdev: network interface device structure
8929 * The open entry point is called when a network interface is made
8930 * active by the system (IFF_UP). At this point all resources needed
8931 * for transmit and receive operations are allocated, the interrupt
8932 * handler is registered with the OS, the netdev watchdog subtask is
8933 * enabled, and the stack is notified that the interface is ready.
8935 * Returns 0 on success, negative value on failure
8937 int i40e_open(struct net_device *netdev)
8939 struct i40e_netdev_priv *np = netdev_priv(netdev);
8940 struct i40e_vsi *vsi = np->vsi;
8941 struct i40e_pf *pf = vsi->back;
8944 /* disallow open during test or if eeprom is broken */
8945 if (test_bit(__I40E_TESTING, pf->state) ||
8946 test_bit(__I40E_BAD_EEPROM, pf->state))
8949 netif_carrier_off(netdev);
8951 if (i40e_force_link_state(pf, true))
8954 err = i40e_vsi_open(vsi);
8958 /* configure global TSO hardware offload settings */
8959 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
8960 TCP_FLAG_FIN) >> 16);
8961 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
8963 TCP_FLAG_CWR) >> 16);
8964 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
8965 udp_tunnel_get_rx_info(netdev);
8971 * i40e_netif_set_realnum_tx_rx_queues - Update number of tx/rx queues
8972 * @vsi: vsi structure
8974 * This updates netdev's number of tx/rx queues
8976 * Returns status of setting tx/rx queues
8978 static int i40e_netif_set_realnum_tx_rx_queues(struct i40e_vsi *vsi)
8982 ret = netif_set_real_num_rx_queues(vsi->netdev,
8983 vsi->num_queue_pairs);
8987 return netif_set_real_num_tx_queues(vsi->netdev,
8988 vsi->num_queue_pairs);
8993 * @vsi: the VSI to open
8995 * Finish initialization of the VSI.
8997 * Returns 0 on success, negative value on failure
8999 * Note: expects to be called while under rtnl_lock()
9001 int i40e_vsi_open(struct i40e_vsi *vsi)
9003 struct i40e_pf *pf = vsi->back;
9004 char int_name[I40E_INT_NAME_STR_LEN];
9007 /* allocate descriptors */
9008 err = i40e_vsi_setup_tx_resources(vsi);
9011 err = i40e_vsi_setup_rx_resources(vsi);
9015 err = i40e_vsi_configure(vsi);
9020 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
9021 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
9022 err = i40e_vsi_request_irq(vsi, int_name);
9026 /* Notify the stack of the actual queue counts. */
9027 err = i40e_netif_set_realnum_tx_rx_queues(vsi);
9029 goto err_set_queues;
9031 } else if (vsi->type == I40E_VSI_FDIR) {
9032 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
9033 dev_driver_string(&pf->pdev->dev),
9034 dev_name(&pf->pdev->dev));
9035 err = i40e_vsi_request_irq(vsi, int_name);
9044 err = i40e_up_complete(vsi);
9046 goto err_up_complete;
9053 i40e_vsi_free_irq(vsi);
9055 i40e_vsi_free_rx_resources(vsi);
9057 i40e_vsi_free_tx_resources(vsi);
9058 if (vsi == pf->vsi[pf->lan_vsi])
9059 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
9065 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
9066 * @pf: Pointer to PF
9068 * This function destroys the hlist where all the Flow Director
9069 * filters were saved.
9071 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
9073 struct i40e_fdir_filter *filter;
9074 struct i40e_flex_pit *pit_entry, *tmp;
9075 struct hlist_node *node2;
9077 hlist_for_each_entry_safe(filter, node2,
9078 &pf->fdir_filter_list, fdir_node) {
9079 hlist_del(&filter->fdir_node);
9083 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
9084 list_del(&pit_entry->list);
9087 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
9089 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
9090 list_del(&pit_entry->list);
9093 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
9095 pf->fdir_pf_active_filters = 0;
9096 i40e_reset_fdir_filter_cnt(pf);
9098 /* Reprogram the default input set for TCP/IPv4 */
9099 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
9100 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
9101 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9103 /* Reprogram the default input set for TCP/IPv6 */
9104 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
9105 I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
9106 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9108 /* Reprogram the default input set for UDP/IPv4 */
9109 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
9110 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
9111 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9113 /* Reprogram the default input set for UDP/IPv6 */
9114 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
9115 I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
9116 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9118 /* Reprogram the default input set for SCTP/IPv4 */
9119 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
9120 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
9121 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9123 /* Reprogram the default input set for SCTP/IPv6 */
9124 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
9125 I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
9126 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9128 /* Reprogram the default input set for Other/IPv4 */
9129 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
9130 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
9132 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
9133 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
9135 /* Reprogram the default input set for Other/IPv6 */
9136 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
9137 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
9139 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV6,
9140 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
9144 * i40e_cloud_filter_exit - Cleans up the cloud filters
9145 * @pf: Pointer to PF
9147 * This function destroys the hlist where all the cloud filters
9150 static void i40e_cloud_filter_exit(struct i40e_pf *pf)
9152 struct i40e_cloud_filter *cfilter;
9153 struct hlist_node *node;
9155 hlist_for_each_entry_safe(cfilter, node,
9156 &pf->cloud_filter_list, cloud_node) {
9157 hlist_del(&cfilter->cloud_node);
9160 pf->num_cloud_filters = 0;
9162 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
9163 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
9164 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
9165 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
9166 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
9171 * i40e_close - Disables a network interface
9172 * @netdev: network interface device structure
9174 * The close entry point is called when an interface is de-activated
9175 * by the OS. The hardware is still under the driver's control, but
9176 * this netdev interface is disabled.
9178 * Returns 0, this is not allowed to fail
9180 int i40e_close(struct net_device *netdev)
9182 struct i40e_netdev_priv *np = netdev_priv(netdev);
9183 struct i40e_vsi *vsi = np->vsi;
9185 i40e_vsi_close(vsi);
9191 * i40e_do_reset - Start a PF or Core Reset sequence
9192 * @pf: board private structure
9193 * @reset_flags: which reset is requested
9194 * @lock_acquired: indicates whether or not the lock has been acquired
9195 * before this function was called.
9197 * The essential difference in resets is that the PF Reset
9198 * doesn't clear the packet buffers, doesn't reset the PE
9199 * firmware, and doesn't bother the other PFs on the chip.
9201 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
9205 /* do the biggest reset indicated */
9206 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
9208 /* Request a Global Reset
9210 * This will start the chip's countdown to the actual full
9211 * chip reset event, and a warning interrupt to be sent
9212 * to all PFs, including the requestor. Our handler
9213 * for the warning interrupt will deal with the shutdown
9214 * and recovery of the switch setup.
9216 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
9217 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
9218 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
9219 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
9221 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
9223 /* Request a Core Reset
9225 * Same as Global Reset, except does *not* include the MAC/PHY
9227 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
9228 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
9229 val |= I40E_GLGEN_RTRIG_CORER_MASK;
9230 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
9231 i40e_flush(&pf->hw);
9233 } else if (reset_flags & I40E_PF_RESET_FLAG) {
9235 /* Request a PF Reset
9237 * Resets only the PF-specific registers
9239 * This goes directly to the tear-down and rebuild of
9240 * the switch, since we need to do all the recovery as
9241 * for the Core Reset.
9243 dev_dbg(&pf->pdev->dev, "PFR requested\n");
9244 i40e_handle_reset_warning(pf, lock_acquired);
9246 } else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
9247 /* Request a PF Reset
9249 * Resets PF and reinitializes PFs VSI.
9251 i40e_prep_for_reset(pf);
9252 i40e_reset_and_rebuild(pf, true, lock_acquired);
9253 dev_info(&pf->pdev->dev,
9254 pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
9255 "FW LLDP is disabled\n" :
9256 "FW LLDP is enabled\n");
9258 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
9261 /* Find the VSI(s) that requested a re-init */
9262 dev_info(&pf->pdev->dev,
9263 "VSI reinit requested\n");
9264 for (v = 0; v < pf->num_alloc_vsi; v++) {
9265 struct i40e_vsi *vsi = pf->vsi[v];
9268 test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
9270 i40e_vsi_reinit_locked(pf->vsi[v]);
9272 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
9275 /* Find the VSI(s) that needs to be brought down */
9276 dev_info(&pf->pdev->dev, "VSI down requested\n");
9277 for (v = 0; v < pf->num_alloc_vsi; v++) {
9278 struct i40e_vsi *vsi = pf->vsi[v];
9281 test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
9283 set_bit(__I40E_VSI_DOWN, vsi->state);
9288 dev_info(&pf->pdev->dev,
9289 "bad reset request 0x%08x\n", reset_flags);
9293 #ifdef CONFIG_I40E_DCB
9295 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
9296 * @pf: board private structure
9297 * @old_cfg: current DCB config
9298 * @new_cfg: new DCB config
9300 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
9301 struct i40e_dcbx_config *old_cfg,
9302 struct i40e_dcbx_config *new_cfg)
9304 bool need_reconfig = false;
9306 /* Check if ETS configuration has changed */
9307 if (memcmp(&new_cfg->etscfg,
9309 sizeof(new_cfg->etscfg))) {
9310 /* If Priority Table has changed reconfig is needed */
9311 if (memcmp(&new_cfg->etscfg.prioritytable,
9312 &old_cfg->etscfg.prioritytable,
9313 sizeof(new_cfg->etscfg.prioritytable))) {
9314 need_reconfig = true;
9315 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
9318 if (memcmp(&new_cfg->etscfg.tcbwtable,
9319 &old_cfg->etscfg.tcbwtable,
9320 sizeof(new_cfg->etscfg.tcbwtable)))
9321 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
9323 if (memcmp(&new_cfg->etscfg.tsatable,
9324 &old_cfg->etscfg.tsatable,
9325 sizeof(new_cfg->etscfg.tsatable)))
9326 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
9329 /* Check if PFC configuration has changed */
9330 if (memcmp(&new_cfg->pfc,
9332 sizeof(new_cfg->pfc))) {
9333 need_reconfig = true;
9334 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
9337 /* Check if APP Table has changed */
9338 if (memcmp(&new_cfg->app,
9340 sizeof(new_cfg->app))) {
9341 need_reconfig = true;
9342 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
9345 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
9346 return need_reconfig;
9350 * i40e_handle_lldp_event - Handle LLDP Change MIB event
9351 * @pf: board private structure
9352 * @e: event info posted on ARQ
9354 static int i40e_handle_lldp_event(struct i40e_pf *pf,
9355 struct i40e_arq_event_info *e)
9357 struct i40e_aqc_lldp_get_mib *mib =
9358 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
9359 struct i40e_hw *hw = &pf->hw;
9360 struct i40e_dcbx_config tmp_dcbx_cfg;
9361 bool need_reconfig = false;
9365 /* X710-T*L 2.5G and 5G speeds don't support DCB */
9366 if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
9367 (hw->phy.link_info.link_speed &
9368 ~(I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB)) &&
9369 !(pf->flags & I40E_FLAG_DCB_CAPABLE))
9370 /* let firmware decide if the DCB should be disabled */
9371 pf->flags |= I40E_FLAG_DCB_CAPABLE;
9373 /* Not DCB capable or capability disabled */
9374 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
9377 /* Ignore if event is not for Nearest Bridge */
9378 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
9379 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
9380 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
9381 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
9384 /* Check MIB Type and return if event for Remote MIB update */
9385 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
9386 dev_dbg(&pf->pdev->dev,
9387 "LLDP event mib type %s\n", type ? "remote" : "local");
9388 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
9389 /* Update the remote cached instance and return */
9390 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
9391 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
9392 &hw->remote_dcbx_config);
9396 /* Store the old configuration */
9397 tmp_dcbx_cfg = hw->local_dcbx_config;
9399 /* Reset the old DCBx configuration data */
9400 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
9401 /* Get updated DCBX data from firmware */
9402 ret = i40e_get_dcb_config(&pf->hw);
9404 /* X710-T*L 2.5G and 5G speeds don't support DCB */
9405 if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
9406 (hw->phy.link_info.link_speed &
9407 (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
9408 dev_warn(&pf->pdev->dev,
9409 "DCB is not supported for X710-T*L 2.5/5G speeds\n");
9410 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
9412 dev_info(&pf->pdev->dev,
9413 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
9414 i40e_stat_str(&pf->hw, ret),
9415 i40e_aq_str(&pf->hw,
9416 pf->hw.aq.asq_last_status));
9421 /* No change detected in DCBX configs */
9422 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
9423 sizeof(tmp_dcbx_cfg))) {
9424 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
9428 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
9429 &hw->local_dcbx_config);
9431 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
9436 /* Enable DCB tagging only when more than one TC */
9437 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
9438 pf->flags |= I40E_FLAG_DCB_ENABLED;
9440 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
9442 set_bit(__I40E_PORT_SUSPENDED, pf->state);
9443 /* Reconfiguration needed quiesce all VSIs */
9444 i40e_pf_quiesce_all_vsi(pf);
9446 /* Changes in configuration update VEB/VSI */
9447 i40e_dcb_reconfigure(pf);
9449 ret = i40e_resume_port_tx(pf);
9451 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
9452 /* In case of error no point in resuming VSIs */
9456 /* Wait for the PF's queues to be disabled */
9457 ret = i40e_pf_wait_queues_disabled(pf);
9459 /* Schedule PF reset to recover */
9460 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
9461 i40e_service_event_schedule(pf);
9463 i40e_pf_unquiesce_all_vsi(pf);
9464 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
9465 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
9471 #endif /* CONFIG_I40E_DCB */
9474 * i40e_do_reset_safe - Protected reset path for userland calls.
9475 * @pf: board private structure
9476 * @reset_flags: which reset is requested
9479 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
9482 i40e_do_reset(pf, reset_flags, true);
9487 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
9488 * @pf: board private structure
9489 * @e: event info posted on ARQ
9491 * Handler for LAN Queue Overflow Event generated by the firmware for PF
9494 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
9495 struct i40e_arq_event_info *e)
9497 struct i40e_aqc_lan_overflow *data =
9498 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
9499 u32 queue = le32_to_cpu(data->prtdcb_rupto);
9500 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
9501 struct i40e_hw *hw = &pf->hw;
9505 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
9508 /* Queue belongs to VF, find the VF and issue VF reset */
9509 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
9510 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
9511 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
9512 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
9513 vf_id -= hw->func_caps.vf_base_id;
9514 vf = &pf->vf[vf_id];
9515 i40e_vc_notify_vf_reset(vf);
9516 /* Allow VF to process pending reset notification */
9518 i40e_reset_vf(vf, false);
9523 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
9524 * @pf: board private structure
9526 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
9530 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
9531 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
9536 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
9537 * @pf: board private structure
9539 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
9543 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
9544 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
9545 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
9546 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
9551 * i40e_get_global_fd_count - Get total FD filters programmed on device
9552 * @pf: board private structure
9554 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
9558 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
9559 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
9560 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
9561 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
9566 * i40e_reenable_fdir_sb - Restore FDir SB capability
9567 * @pf: board private structure
9569 static void i40e_reenable_fdir_sb(struct i40e_pf *pf)
9571 if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
9572 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
9573 (I40E_DEBUG_FD & pf->hw.debug_mask))
9574 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
9578 * i40e_reenable_fdir_atr - Restore FDir ATR capability
9579 * @pf: board private structure
9581 static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
9583 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) {
9584 /* ATR uses the same filtering logic as SB rules. It only
9585 * functions properly if the input set mask is at the default
9586 * settings. It is safe to restore the default input set
9587 * because there are no active TCPv4 filter rules.
9589 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
9590 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
9591 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9593 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
9594 (I40E_DEBUG_FD & pf->hw.debug_mask))
9595 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
9600 * i40e_delete_invalid_filter - Delete an invalid FDIR filter
9601 * @pf: board private structure
9602 * @filter: FDir filter to remove
9604 static void i40e_delete_invalid_filter(struct i40e_pf *pf,
9605 struct i40e_fdir_filter *filter)
9607 /* Update counters */
9608 pf->fdir_pf_active_filters--;
9611 switch (filter->flow_type) {
9613 pf->fd_tcp4_filter_cnt--;
9616 pf->fd_udp4_filter_cnt--;
9619 pf->fd_sctp4_filter_cnt--;
9622 pf->fd_tcp6_filter_cnt--;
9625 pf->fd_udp6_filter_cnt--;
9628 pf->fd_udp6_filter_cnt--;
9631 switch (filter->ipl4_proto) {
9633 pf->fd_tcp4_filter_cnt--;
9636 pf->fd_udp4_filter_cnt--;
9639 pf->fd_sctp4_filter_cnt--;
9642 pf->fd_ip4_filter_cnt--;
9646 case IPV6_USER_FLOW:
9647 switch (filter->ipl4_proto) {
9649 pf->fd_tcp6_filter_cnt--;
9652 pf->fd_udp6_filter_cnt--;
9655 pf->fd_sctp6_filter_cnt--;
9658 pf->fd_ip6_filter_cnt--;
9664 /* Remove the filter from the list and free memory */
9665 hlist_del(&filter->fdir_node);
9670 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
9671 * @pf: board private structure
9673 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
9675 struct i40e_fdir_filter *filter;
9676 u32 fcnt_prog, fcnt_avail;
9677 struct hlist_node *node;
9679 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
9682 /* Check if we have enough room to re-enable FDir SB capability. */
9683 fcnt_prog = i40e_get_global_fd_count(pf);
9684 fcnt_avail = pf->fdir_pf_filter_count;
9685 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
9686 (pf->fd_add_err == 0) ||
9687 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt))
9688 i40e_reenable_fdir_sb(pf);
9690 /* We should wait for even more space before re-enabling ATR.
9691 * Additionally, we cannot enable ATR as long as we still have TCP SB
9694 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
9695 pf->fd_tcp4_filter_cnt == 0 && pf->fd_tcp6_filter_cnt == 0)
9696 i40e_reenable_fdir_atr(pf);
9698 /* if hw had a problem adding a filter, delete it */
9699 if (pf->fd_inv > 0) {
9700 hlist_for_each_entry_safe(filter, node,
9701 &pf->fdir_filter_list, fdir_node)
9702 if (filter->fd_id == pf->fd_inv)
9703 i40e_delete_invalid_filter(pf, filter);
9707 #define I40E_MIN_FD_FLUSH_INTERVAL 10
9708 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
9710 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
9711 * @pf: board private structure
9713 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
9715 unsigned long min_flush_time;
9716 int flush_wait_retry = 50;
9717 bool disable_atr = false;
9721 if (!time_after(jiffies, pf->fd_flush_timestamp +
9722 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
9725 /* If the flush is happening too quick and we have mostly SB rules we
9726 * should not re-enable ATR for some time.
9728 min_flush_time = pf->fd_flush_timestamp +
9729 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
9730 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
9732 if (!(time_after(jiffies, min_flush_time)) &&
9733 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
9734 if (I40E_DEBUG_FD & pf->hw.debug_mask)
9735 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
9739 pf->fd_flush_timestamp = jiffies;
9740 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
9741 /* flush all filters */
9742 wr32(&pf->hw, I40E_PFQF_CTL_1,
9743 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
9744 i40e_flush(&pf->hw);
9748 /* Check FD flush status every 5-6msec */
9749 usleep_range(5000, 6000);
9750 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
9751 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
9753 } while (flush_wait_retry--);
9754 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
9755 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
9757 /* replay sideband filters */
9758 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
9759 if (!disable_atr && !pf->fd_tcp4_filter_cnt)
9760 clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
9761 clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
9762 if (I40E_DEBUG_FD & pf->hw.debug_mask)
9763 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
9768 * i40e_get_current_atr_cnt - Get the count of total FD ATR filters programmed
9769 * @pf: board private structure
9771 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
9773 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
9777 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
9778 * @pf: board private structure
9780 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
9783 /* if interface is down do nothing */
9784 if (test_bit(__I40E_DOWN, pf->state))
9787 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
9788 i40e_fdir_flush_and_replay(pf);
9790 i40e_fdir_check_and_reenable(pf);
9795 * i40e_vsi_link_event - notify VSI of a link event
9796 * @vsi: vsi to be notified
9797 * @link_up: link up or down
9799 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
9801 if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
9804 switch (vsi->type) {
9806 if (!vsi->netdev || !vsi->netdev_registered)
9810 netif_carrier_on(vsi->netdev);
9811 netif_tx_wake_all_queues(vsi->netdev);
9813 netif_carrier_off(vsi->netdev);
9814 netif_tx_stop_all_queues(vsi->netdev);
9818 case I40E_VSI_SRIOV:
9819 case I40E_VSI_VMDQ2:
9821 case I40E_VSI_IWARP:
9822 case I40E_VSI_MIRROR:
9824 /* there is no notification for other VSIs */
9830 * i40e_veb_link_event - notify elements on the veb of a link event
9831 * @veb: veb to be notified
9832 * @link_up: link up or down
9834 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
9839 if (!veb || !veb->pf)
9843 /* depth first... */
9844 for (i = 0; i < I40E_MAX_VEB; i++)
9845 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
9846 i40e_veb_link_event(pf->veb[i], link_up);
9848 /* ... now the local VSIs */
9849 for (i = 0; i < pf->num_alloc_vsi; i++)
9850 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
9851 i40e_vsi_link_event(pf->vsi[i], link_up);
9855 * i40e_link_event - Update netif_carrier status
9856 * @pf: board private structure
9858 static void i40e_link_event(struct i40e_pf *pf)
9860 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9861 u8 new_link_speed, old_link_speed;
9863 bool new_link, old_link;
9864 #ifdef CONFIG_I40E_DCB
9866 #endif /* CONFIG_I40E_DCB */
9868 /* set this to force the get_link_status call to refresh state */
9869 pf->hw.phy.get_link_info = true;
9870 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
9871 status = i40e_get_link_status(&pf->hw, &new_link);
9873 /* On success, disable temp link polling */
9874 if (status == I40E_SUCCESS) {
9875 clear_bit(__I40E_TEMP_LINK_POLLING, pf->state);
9877 /* Enable link polling temporarily until i40e_get_link_status
9878 * returns I40E_SUCCESS
9880 set_bit(__I40E_TEMP_LINK_POLLING, pf->state);
9881 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
9886 old_link_speed = pf->hw.phy.link_info_old.link_speed;
9887 new_link_speed = pf->hw.phy.link_info.link_speed;
9889 if (new_link == old_link &&
9890 new_link_speed == old_link_speed &&
9891 (test_bit(__I40E_VSI_DOWN, vsi->state) ||
9892 new_link == netif_carrier_ok(vsi->netdev)))
9895 i40e_print_link_message(vsi, new_link);
9897 /* Notify the base of the switch tree connected to
9898 * the link. Floating VEBs are not notified.
9900 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
9901 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
9903 i40e_vsi_link_event(vsi, new_link);
9906 i40e_vc_notify_link_state(pf);
9908 if (pf->flags & I40E_FLAG_PTP)
9909 i40e_ptp_set_increment(pf);
9910 #ifdef CONFIG_I40E_DCB
9911 if (new_link == old_link)
9913 /* Not SW DCB so firmware will take care of default settings */
9914 if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)
9917 /* We cover here only link down, as after link up in case of SW DCB
9918 * SW LLDP agent will take care of setting it up
9921 dev_dbg(&pf->pdev->dev, "Reconfig DCB to single TC as result of Link Down\n");
9922 memset(&pf->tmp_cfg, 0, sizeof(pf->tmp_cfg));
9923 err = i40e_dcb_sw_default_config(pf);
9925 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
9926 I40E_FLAG_DCB_ENABLED);
9928 pf->dcbx_cap = DCB_CAP_DCBX_HOST |
9929 DCB_CAP_DCBX_VER_IEEE;
9930 pf->flags |= I40E_FLAG_DCB_CAPABLE;
9931 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
9934 #endif /* CONFIG_I40E_DCB */
9938 * i40e_watchdog_subtask - periodic checks not using event driven response
9939 * @pf: board private structure
9941 static void i40e_watchdog_subtask(struct i40e_pf *pf)
9945 /* if interface is down do nothing */
9946 if (test_bit(__I40E_DOWN, pf->state) ||
9947 test_bit(__I40E_CONFIG_BUSY, pf->state))
9950 /* make sure we don't do these things too often */
9951 if (time_before(jiffies, (pf->service_timer_previous +
9952 pf->service_timer_period)))
9954 pf->service_timer_previous = jiffies;
9956 if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
9957 test_bit(__I40E_TEMP_LINK_POLLING, pf->state))
9958 i40e_link_event(pf);
9960 /* Update the stats for active netdevs so the network stack
9961 * can look at updated numbers whenever it cares to
9963 for (i = 0; i < pf->num_alloc_vsi; i++)
9964 if (pf->vsi[i] && pf->vsi[i]->netdev)
9965 i40e_update_stats(pf->vsi[i]);
9967 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
9968 /* Update the stats for the active switching components */
9969 for (i = 0; i < I40E_MAX_VEB; i++)
9971 i40e_update_veb_stats(pf->veb[i]);
9974 i40e_ptp_rx_hang(pf);
9975 i40e_ptp_tx_hang(pf);
9979 * i40e_reset_subtask - Set up for resetting the device and driver
9980 * @pf: board private structure
9982 static void i40e_reset_subtask(struct i40e_pf *pf)
9984 u32 reset_flags = 0;
9986 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
9987 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
9988 clear_bit(__I40E_REINIT_REQUESTED, pf->state);
9990 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
9991 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
9992 clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
9994 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
9995 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
9996 clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
9998 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
9999 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
10000 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
10002 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
10003 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
10004 clear_bit(__I40E_DOWN_REQUESTED, pf->state);
10007 /* If there's a recovery already waiting, it takes
10008 * precedence before starting a new reset sequence.
10010 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
10011 i40e_prep_for_reset(pf);
10013 i40e_rebuild(pf, false, false);
10016 /* If we're already down or resetting, just bail */
10018 !test_bit(__I40E_DOWN, pf->state) &&
10019 !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
10020 i40e_do_reset(pf, reset_flags, false);
10025 * i40e_handle_link_event - Handle link event
10026 * @pf: board private structure
10027 * @e: event info posted on ARQ
10029 static void i40e_handle_link_event(struct i40e_pf *pf,
10030 struct i40e_arq_event_info *e)
10032 struct i40e_aqc_get_link_status *status =
10033 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
10035 /* Do a new status request to re-enable LSE reporting
10036 * and load new status information into the hw struct
10037 * This completely ignores any state information
10038 * in the ARQ event info, instead choosing to always
10039 * issue the AQ update link status command.
10041 i40e_link_event(pf);
10043 /* Check if module meets thermal requirements */
10044 if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) {
10045 dev_err(&pf->pdev->dev,
10046 "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
10047 dev_err(&pf->pdev->dev,
10048 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
10050 /* check for unqualified module, if link is down, suppress
10051 * the message if link was forced to be down.
10053 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
10054 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
10055 (!(status->link_info & I40E_AQ_LINK_UP)) &&
10056 (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
10057 dev_err(&pf->pdev->dev,
10058 "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
10059 dev_err(&pf->pdev->dev,
10060 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
10066 * i40e_clean_adminq_subtask - Clean the AdminQ rings
10067 * @pf: board private structure
10069 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
10071 struct i40e_arq_event_info event;
10072 struct i40e_hw *hw = &pf->hw;
10073 u16 pending, i = 0;
10079 /* Do not run clean AQ when PF reset fails */
10080 if (test_bit(__I40E_RESET_FAILED, pf->state))
10083 /* check for error indications */
10084 val = rd32(&pf->hw, pf->hw.aq.arq.len);
10086 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
10087 if (hw->debug_mask & I40E_DEBUG_AQ)
10088 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
10089 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
10091 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
10092 if (hw->debug_mask & I40E_DEBUG_AQ)
10093 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
10094 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
10095 pf->arq_overflows++;
10097 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
10098 if (hw->debug_mask & I40E_DEBUG_AQ)
10099 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
10100 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
10103 wr32(&pf->hw, pf->hw.aq.arq.len, val);
10105 val = rd32(&pf->hw, pf->hw.aq.asq.len);
10107 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
10108 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
10109 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
10110 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
10112 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
10113 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
10114 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
10115 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
10117 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
10118 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
10119 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
10120 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
10123 wr32(&pf->hw, pf->hw.aq.asq.len, val);
10125 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
10126 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
10127 if (!event.msg_buf)
10131 ret = i40e_clean_arq_element(hw, &event, &pending);
10132 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
10135 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
10139 opcode = le16_to_cpu(event.desc.opcode);
10142 case i40e_aqc_opc_get_link_status:
10144 i40e_handle_link_event(pf, &event);
10147 case i40e_aqc_opc_send_msg_to_pf:
10148 ret = i40e_vc_process_vf_msg(pf,
10149 le16_to_cpu(event.desc.retval),
10150 le32_to_cpu(event.desc.cookie_high),
10151 le32_to_cpu(event.desc.cookie_low),
10155 case i40e_aqc_opc_lldp_update_mib:
10156 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
10157 #ifdef CONFIG_I40E_DCB
10159 i40e_handle_lldp_event(pf, &event);
10161 #endif /* CONFIG_I40E_DCB */
10163 case i40e_aqc_opc_event_lan_overflow:
10164 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
10165 i40e_handle_lan_overflow_event(pf, &event);
10167 case i40e_aqc_opc_send_msg_to_peer:
10168 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
10170 case i40e_aqc_opc_nvm_erase:
10171 case i40e_aqc_opc_nvm_update:
10172 case i40e_aqc_opc_oem_post_update:
10173 i40e_debug(&pf->hw, I40E_DEBUG_NVM,
10174 "ARQ NVM operation 0x%04x completed\n",
10178 dev_info(&pf->pdev->dev,
10179 "ARQ: Unknown event 0x%04x ignored\n",
10183 } while (i++ < pf->adminq_work_limit);
10185 if (i < pf->adminq_work_limit)
10186 clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
10188 /* re-enable Admin queue interrupt cause */
10189 val = rd32(hw, I40E_PFINT_ICR0_ENA);
10190 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
10191 wr32(hw, I40E_PFINT_ICR0_ENA, val);
10194 kfree(event.msg_buf);
10198 * i40e_verify_eeprom - make sure eeprom is good to use
10199 * @pf: board private structure
10201 static void i40e_verify_eeprom(struct i40e_pf *pf)
10205 err = i40e_diag_eeprom_test(&pf->hw);
10207 /* retry in case of garbage read */
10208 err = i40e_diag_eeprom_test(&pf->hw);
10210 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
10212 set_bit(__I40E_BAD_EEPROM, pf->state);
10216 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
10217 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
10218 clear_bit(__I40E_BAD_EEPROM, pf->state);
10223 * i40e_enable_pf_switch_lb
10224 * @pf: pointer to the PF structure
10226 * enable switch loop back or die - no point in a return value
10228 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
10230 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10231 struct i40e_vsi_context ctxt;
10234 ctxt.seid = pf->main_vsi_seid;
10235 ctxt.pf_num = pf->hw.pf_id;
10237 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
10239 dev_info(&pf->pdev->dev,
10240 "couldn't get PF vsi config, err %s aq_err %s\n",
10241 i40e_stat_str(&pf->hw, ret),
10242 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10245 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
10246 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
10247 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
10249 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
10251 dev_info(&pf->pdev->dev,
10252 "update vsi switch failed, err %s aq_err %s\n",
10253 i40e_stat_str(&pf->hw, ret),
10254 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10259 * i40e_disable_pf_switch_lb
10260 * @pf: pointer to the PF structure
10262 * disable switch loop back or die - no point in a return value
10264 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
10266 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10267 struct i40e_vsi_context ctxt;
10270 ctxt.seid = pf->main_vsi_seid;
10271 ctxt.pf_num = pf->hw.pf_id;
10273 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
10275 dev_info(&pf->pdev->dev,
10276 "couldn't get PF vsi config, err %s aq_err %s\n",
10277 i40e_stat_str(&pf->hw, ret),
10278 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10281 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
10282 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
10283 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
10285 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
10287 dev_info(&pf->pdev->dev,
10288 "update vsi switch failed, err %s aq_err %s\n",
10289 i40e_stat_str(&pf->hw, ret),
10290 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10295 * i40e_config_bridge_mode - Configure the HW bridge mode
10296 * @veb: pointer to the bridge instance
10298 * Configure the loop back mode for the LAN VSI that is downlink to the
10299 * specified HW bridge instance. It is expected this function is called
10300 * when a new HW bridge is instantiated.
10302 static void i40e_config_bridge_mode(struct i40e_veb *veb)
10304 struct i40e_pf *pf = veb->pf;
10306 if (pf->hw.debug_mask & I40E_DEBUG_LAN)
10307 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
10308 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
10309 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
10310 i40e_disable_pf_switch_lb(pf);
10312 i40e_enable_pf_switch_lb(pf);
10316 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
10317 * @veb: pointer to the VEB instance
10319 * This is a recursive function that first builds the attached VSIs then
10320 * recurses in to build the next layer of VEB. We track the connections
10321 * through our own index numbers because the seid's from the HW could
10322 * change across the reset.
10324 static int i40e_reconstitute_veb(struct i40e_veb *veb)
10326 struct i40e_vsi *ctl_vsi = NULL;
10327 struct i40e_pf *pf = veb->pf;
10331 /* build VSI that owns this VEB, temporarily attached to base VEB */
10332 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
10334 pf->vsi[v]->veb_idx == veb->idx &&
10335 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
10336 ctl_vsi = pf->vsi[v];
10341 dev_info(&pf->pdev->dev,
10342 "missing owner VSI for veb_idx %d\n", veb->idx);
10344 goto end_reconstitute;
10346 if (ctl_vsi != pf->vsi[pf->lan_vsi])
10347 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
10348 ret = i40e_add_vsi(ctl_vsi);
10350 dev_info(&pf->pdev->dev,
10351 "rebuild of veb_idx %d owner VSI failed: %d\n",
10353 goto end_reconstitute;
10355 i40e_vsi_reset_stats(ctl_vsi);
10357 /* create the VEB in the switch and move the VSI onto the VEB */
10358 ret = i40e_add_veb(veb, ctl_vsi);
10360 goto end_reconstitute;
10362 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
10363 veb->bridge_mode = BRIDGE_MODE_VEB;
10365 veb->bridge_mode = BRIDGE_MODE_VEPA;
10366 i40e_config_bridge_mode(veb);
10368 /* create the remaining VSIs attached to this VEB */
10369 for (v = 0; v < pf->num_alloc_vsi; v++) {
10370 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
10373 if (pf->vsi[v]->veb_idx == veb->idx) {
10374 struct i40e_vsi *vsi = pf->vsi[v];
10376 vsi->uplink_seid = veb->seid;
10377 ret = i40e_add_vsi(vsi);
10379 dev_info(&pf->pdev->dev,
10380 "rebuild of vsi_idx %d failed: %d\n",
10382 goto end_reconstitute;
10384 i40e_vsi_reset_stats(vsi);
10388 /* create any VEBs attached to this VEB - RECURSION */
10389 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
10390 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
10391 pf->veb[veb_idx]->uplink_seid = veb->seid;
10392 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
10403 * i40e_get_capabilities - get info about the HW
10404 * @pf: the PF struct
10405 * @list_type: AQ capability to be queried
10407 static int i40e_get_capabilities(struct i40e_pf *pf,
10408 enum i40e_admin_queue_opc list_type)
10410 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
10415 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
10417 cap_buf = kzalloc(buf_len, GFP_KERNEL);
10421 /* this loads the data into the hw struct for us */
10422 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
10423 &data_size, list_type,
10425 /* data loaded, buffer no longer needed */
10428 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
10429 /* retry with a larger buffer */
10430 buf_len = data_size;
10431 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {
10432 dev_info(&pf->pdev->dev,
10433 "capability discovery failed, err %s aq_err %s\n",
10434 i40e_stat_str(&pf->hw, err),
10435 i40e_aq_str(&pf->hw,
10436 pf->hw.aq.asq_last_status));
10441 if (pf->hw.debug_mask & I40E_DEBUG_USER) {
10442 if (list_type == i40e_aqc_opc_list_func_capabilities) {
10443 dev_info(&pf->pdev->dev,
10444 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
10445 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
10446 pf->hw.func_caps.num_msix_vectors,
10447 pf->hw.func_caps.num_msix_vectors_vf,
10448 pf->hw.func_caps.fd_filters_guaranteed,
10449 pf->hw.func_caps.fd_filters_best_effort,
10450 pf->hw.func_caps.num_tx_qp,
10451 pf->hw.func_caps.num_vsis);
10452 } else if (list_type == i40e_aqc_opc_list_dev_capabilities) {
10453 dev_info(&pf->pdev->dev,
10454 "switch_mode=0x%04x, function_valid=0x%08x\n",
10455 pf->hw.dev_caps.switch_mode,
10456 pf->hw.dev_caps.valid_functions);
10457 dev_info(&pf->pdev->dev,
10458 "SR-IOV=%d, num_vfs for all function=%u\n",
10459 pf->hw.dev_caps.sr_iov_1_1,
10460 pf->hw.dev_caps.num_vfs);
10461 dev_info(&pf->pdev->dev,
10462 "num_vsis=%u, num_rx:%u, num_tx=%u\n",
10463 pf->hw.dev_caps.num_vsis,
10464 pf->hw.dev_caps.num_rx_qp,
10465 pf->hw.dev_caps.num_tx_qp);
10468 if (list_type == i40e_aqc_opc_list_func_capabilities) {
10469 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
10470 + pf->hw.func_caps.num_vfs)
10471 if (pf->hw.revision_id == 0 &&
10472 pf->hw.func_caps.num_vsis < DEF_NUM_VSI) {
10473 dev_info(&pf->pdev->dev,
10474 "got num_vsis %d, setting num_vsis to %d\n",
10475 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
10476 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
10482 static int i40e_vsi_clear(struct i40e_vsi *vsi);
10485 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
10486 * @pf: board private structure
10488 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
10490 struct i40e_vsi *vsi;
10492 /* quick workaround for an NVM issue that leaves a critical register
10495 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
10496 static const u32 hkey[] = {
10497 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
10498 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
10499 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
10503 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
10504 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
10507 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
10510 /* find existing VSI and see if it needs configuring */
10511 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
10513 /* create a new VSI if none exists */
10515 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
10516 pf->vsi[pf->lan_vsi]->seid, 0);
10518 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
10519 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
10520 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
10525 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
10529 * i40e_fdir_teardown - release the Flow Director resources
10530 * @pf: board private structure
10532 static void i40e_fdir_teardown(struct i40e_pf *pf)
10534 struct i40e_vsi *vsi;
10536 i40e_fdir_filter_exit(pf);
10537 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
10539 i40e_vsi_release(vsi);
10543 * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs
10544 * @vsi: PF main vsi
10545 * @seid: seid of main or channel VSIs
10547 * Rebuilds cloud filters associated with main VSI and channel VSIs if they
10548 * existed before reset
10550 static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
10552 struct i40e_cloud_filter *cfilter;
10553 struct i40e_pf *pf = vsi->back;
10554 struct hlist_node *node;
10557 /* Add cloud filters back if they exist */
10558 hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
10560 if (cfilter->seid != seid)
10563 if (cfilter->dst_port)
10564 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
10567 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
10570 dev_dbg(&pf->pdev->dev,
10571 "Failed to rebuild cloud filter, err %s aq_err %s\n",
10572 i40e_stat_str(&pf->hw, ret),
10573 i40e_aq_str(&pf->hw,
10574 pf->hw.aq.asq_last_status));
10582 * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset
10583 * @vsi: PF main vsi
10585 * Rebuilds channel VSIs if they existed before reset
10587 static int i40e_rebuild_channels(struct i40e_vsi *vsi)
10589 struct i40e_channel *ch, *ch_tmp;
10592 if (list_empty(&vsi->ch_list))
10595 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
10596 if (!ch->initialized)
10598 /* Proceed with creation of channel (VMDq2) VSI */
10599 ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch);
10601 dev_info(&vsi->back->pdev->dev,
10602 "failed to rebuild channels using uplink_seid %u\n",
10606 /* Reconfigure TX queues using QTX_CTL register */
10607 ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch);
10609 dev_info(&vsi->back->pdev->dev,
10610 "failed to configure TX rings for channel %u\n",
10614 /* update 'next_base_queue' */
10615 vsi->next_base_queue = vsi->next_base_queue +
10616 ch->num_queue_pairs;
10617 if (ch->max_tx_rate) {
10618 u64 credits = ch->max_tx_rate;
10620 if (i40e_set_bw_limit(vsi, ch->seid,
10624 do_div(credits, I40E_BW_CREDIT_DIVISOR);
10625 dev_dbg(&vsi->back->pdev->dev,
10626 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
10631 ret = i40e_rebuild_cloud_filters(vsi, ch->seid);
10633 dev_dbg(&vsi->back->pdev->dev,
10634 "Failed to rebuild cloud filters for channel VSI %u\n",
10643 * i40e_prep_for_reset - prep for the core to reset
10644 * @pf: board private structure
10646 * Close up the VFs and other things in prep for PF Reset.
10648 static void i40e_prep_for_reset(struct i40e_pf *pf)
10650 struct i40e_hw *hw = &pf->hw;
10651 i40e_status ret = 0;
10654 clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
10655 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
10657 if (i40e_check_asq_alive(&pf->hw))
10658 i40e_vc_notify_reset(pf);
10660 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
10662 /* quiesce the VSIs and their queues that are not already DOWN */
10663 i40e_pf_quiesce_all_vsi(pf);
10665 for (v = 0; v < pf->num_alloc_vsi; v++) {
10667 pf->vsi[v]->seid = 0;
10670 i40e_shutdown_adminq(&pf->hw);
10672 /* call shutdown HMC */
10673 if (hw->hmc.hmc_obj) {
10674 ret = i40e_shutdown_lan_hmc(hw);
10676 dev_warn(&pf->pdev->dev,
10677 "shutdown_lan_hmc failed: %d\n", ret);
10680 /* Save the current PTP time so that we can restore the time after the
10683 i40e_ptp_save_hw_time(pf);
10687 * i40e_send_version - update firmware with driver version
10690 static void i40e_send_version(struct i40e_pf *pf)
10692 struct i40e_driver_version dv;
10694 dv.major_version = 0xff;
10695 dv.minor_version = 0xff;
10696 dv.build_version = 0xff;
10697 dv.subbuild_version = 0;
10698 strlcpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string));
10699 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
10703 * i40e_get_oem_version - get OEM specific version information
10704 * @hw: pointer to the hardware structure
10706 static void i40e_get_oem_version(struct i40e_hw *hw)
10708 u16 block_offset = 0xffff;
10709 u16 block_length = 0;
10710 u16 capabilities = 0;
10714 #define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
10715 #define I40E_NVM_OEM_LENGTH_OFFSET 0x00
10716 #define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
10717 #define I40E_NVM_OEM_GEN_OFFSET 0x02
10718 #define I40E_NVM_OEM_RELEASE_OFFSET 0x03
10719 #define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
10720 #define I40E_NVM_OEM_LENGTH 3
10722 /* Check if pointer to OEM version block is valid. */
10723 i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
10724 if (block_offset == 0xffff)
10727 /* Check if OEM version block has correct length. */
10728 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
10730 if (block_length < I40E_NVM_OEM_LENGTH)
10733 /* Check if OEM version format is as expected. */
10734 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
10736 if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
10739 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
10741 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
10743 hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
10744 hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
10748 * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
10749 * @pf: board private structure
10751 static int i40e_reset(struct i40e_pf *pf)
10753 struct i40e_hw *hw = &pf->hw;
10756 ret = i40e_pf_reset(hw);
10758 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
10759 set_bit(__I40E_RESET_FAILED, pf->state);
10760 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
10768 * i40e_rebuild - rebuild using a saved config
10769 * @pf: board private structure
10770 * @reinit: if the Main VSI needs to re-initialized.
10771 * @lock_acquired: indicates whether or not the lock has been acquired
10772 * before this function was called.
10774 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
10776 int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state);
10777 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10778 struct i40e_hw *hw = &pf->hw;
10783 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
10784 i40e_check_recovery_mode(pf)) {
10785 i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
10788 if (test_bit(__I40E_DOWN, pf->state) &&
10789 !test_bit(__I40E_RECOVERY_MODE, pf->state) &&
10790 !old_recovery_mode_bit)
10791 goto clear_recovery;
10792 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
10794 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
10795 ret = i40e_init_adminq(&pf->hw);
10797 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
10798 i40e_stat_str(&pf->hw, ret),
10799 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10800 goto clear_recovery;
10802 i40e_get_oem_version(&pf->hw);
10804 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) {
10805 /* The following delay is necessary for firmware update. */
10809 /* re-verify the eeprom if we just had an EMP reset */
10810 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
10811 i40e_verify_eeprom(pf);
10813 /* if we are going out of or into recovery mode we have to act
10814 * accordingly with regard to resources initialization
10815 * and deinitialization
10817 if (test_bit(__I40E_RECOVERY_MODE, pf->state) ||
10818 old_recovery_mode_bit) {
10819 if (i40e_get_capabilities(pf,
10820 i40e_aqc_opc_list_func_capabilities))
10823 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
10824 /* we're staying in recovery mode so we'll reinitialize
10827 if (i40e_setup_misc_vector_for_recovery_mode(pf))
10830 if (!lock_acquired)
10832 /* we're going out of recovery mode so we'll free
10833 * the IRQ allocated specifically for recovery mode
10834 * and restore the interrupt scheme
10836 free_irq(pf->pdev->irq, pf);
10837 i40e_clear_interrupt_scheme(pf);
10838 if (i40e_restore_interrupt_scheme(pf))
10842 /* tell the firmware that we're starting */
10843 i40e_send_version(pf);
10845 /* bail out in case recovery mode was detected, as there is
10846 * no need for further configuration.
10851 i40e_clear_pxe_mode(hw);
10852 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
10854 goto end_core_reset;
10856 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
10857 hw->func_caps.num_rx_qp, 0, 0);
10859 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
10860 goto end_core_reset;
10862 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
10864 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
10865 goto end_core_reset;
10868 #ifdef CONFIG_I40E_DCB
10869 /* Enable FW to write a default DCB config on link-up
10870 * unless I40E_FLAG_TC_MQPRIO was enabled or DCB
10871 * is not supported with new link speed
10873 if (pf->flags & I40E_FLAG_TC_MQPRIO) {
10874 i40e_aq_set_dcb_parameters(hw, false, NULL);
10876 if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
10877 (hw->phy.link_info.link_speed &
10878 (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
10879 i40e_aq_set_dcb_parameters(hw, false, NULL);
10880 dev_warn(&pf->pdev->dev,
10881 "DCB is not supported for X710-T*L 2.5/5G speeds\n");
10882 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10884 i40e_aq_set_dcb_parameters(hw, true, NULL);
10885 ret = i40e_init_pf_dcb(pf);
10887 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n",
10889 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10890 /* Continue without DCB enabled */
10895 #endif /* CONFIG_I40E_DCB */
10896 if (!lock_acquired)
10898 ret = i40e_setup_pf_switch(pf, reinit, true);
10902 /* The driver only wants link up/down and module qualification
10903 * reports from firmware. Note the negative logic.
10905 ret = i40e_aq_set_phy_int_mask(&pf->hw,
10906 ~(I40E_AQ_EVENT_LINK_UPDOWN |
10907 I40E_AQ_EVENT_MEDIA_NA |
10908 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
10910 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
10911 i40e_stat_str(&pf->hw, ret),
10912 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10914 /* Rebuild the VSIs and VEBs that existed before reset.
10915 * They are still in our local switch element arrays, so only
10916 * need to rebuild the switch model in the HW.
10918 * If there were VEBs but the reconstitution failed, we'll try
10919 * to recover minimal use by getting the basic PF VSI working.
10921 if (vsi->uplink_seid != pf->mac_seid) {
10922 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
10923 /* find the one VEB connected to the MAC, and find orphans */
10924 for (v = 0; v < I40E_MAX_VEB; v++) {
10928 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
10929 pf->veb[v]->uplink_seid == 0) {
10930 ret = i40e_reconstitute_veb(pf->veb[v]);
10935 /* If Main VEB failed, we're in deep doodoo,
10936 * so give up rebuilding the switch and set up
10937 * for minimal rebuild of PF VSI.
10938 * If orphan failed, we'll report the error
10939 * but try to keep going.
10941 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
10942 dev_info(&pf->pdev->dev,
10943 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
10945 vsi->uplink_seid = pf->mac_seid;
10947 } else if (pf->veb[v]->uplink_seid == 0) {
10948 dev_info(&pf->pdev->dev,
10949 "rebuild of orphan VEB failed: %d\n",
10956 if (vsi->uplink_seid == pf->mac_seid) {
10957 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
10958 /* no VEB, so rebuild only the Main VSI */
10959 ret = i40e_add_vsi(vsi);
10961 dev_info(&pf->pdev->dev,
10962 "rebuild of Main VSI failed: %d\n", ret);
10967 if (vsi->mqprio_qopt.max_rate[0]) {
10968 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
10971 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
10972 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
10976 credits = max_tx_rate;
10977 do_div(credits, I40E_BW_CREDIT_DIVISOR);
10978 dev_dbg(&vsi->back->pdev->dev,
10979 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
10985 ret = i40e_rebuild_cloud_filters(vsi, vsi->seid);
10989 /* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs
10990 * for this main VSI if they exist
10992 ret = i40e_rebuild_channels(vsi);
10996 /* Reconfigure hardware for allowing smaller MSS in the case
10997 * of TSO, so that we avoid the MDD being fired and causing
10998 * a reset in the case of small MSS+TSO.
11000 #define I40E_REG_MSS 0x000E64DC
11001 #define I40E_REG_MSS_MIN_MASK 0x3FF0000
11002 #define I40E_64BYTE_MSS 0x400000
11003 val = rd32(hw, I40E_REG_MSS);
11004 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
11005 val &= ~I40E_REG_MSS_MIN_MASK;
11006 val |= I40E_64BYTE_MSS;
11007 wr32(hw, I40E_REG_MSS, val);
11010 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
11012 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
11014 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
11015 i40e_stat_str(&pf->hw, ret),
11016 i40e_aq_str(&pf->hw,
11017 pf->hw.aq.asq_last_status));
11019 /* reinit the misc interrupt */
11020 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
11021 ret = i40e_setup_misc_vector(pf);
11023 /* Add a filter to drop all Flow control frames from any VSI from being
11024 * transmitted. By doing so we stop a malicious VF from sending out
11025 * PAUSE or PFC frames and potentially controlling traffic for other
11027 * The FW can still send Flow control frames if enabled.
11029 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
11030 pf->main_vsi_seid);
11032 /* restart the VSIs that were rebuilt and running before the reset */
11033 i40e_pf_unquiesce_all_vsi(pf);
11035 /* Release the RTNL lock before we start resetting VFs */
11036 if (!lock_acquired)
11039 /* Restore promiscuous settings */
11040 ret = i40e_set_promiscuous(pf, pf->cur_promisc);
11042 dev_warn(&pf->pdev->dev,
11043 "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
11044 pf->cur_promisc ? "on" : "off",
11045 i40e_stat_str(&pf->hw, ret),
11046 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11048 i40e_reset_all_vfs(pf, true);
11050 /* tell the firmware that we're starting */
11051 i40e_send_version(pf);
11053 /* We've already released the lock, so don't do it again */
11054 goto end_core_reset;
11057 if (!lock_acquired)
11060 clear_bit(__I40E_RESET_FAILED, pf->state);
11062 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
11063 clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
11067 * i40e_reset_and_rebuild - reset and rebuild using a saved config
11068 * @pf: board private structure
11069 * @reinit: if the Main VSI needs to re-initialized.
11070 * @lock_acquired: indicates whether or not the lock has been acquired
11071 * before this function was called.
11073 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
11074 bool lock_acquired)
11078 if (test_bit(__I40E_IN_REMOVE, pf->state))
11080 /* Now we wait for GRST to settle out.
11081 * We don't have to delete the VEBs or VSIs from the hw switch
11082 * because the reset will make them disappear.
11084 ret = i40e_reset(pf);
11086 i40e_rebuild(pf, reinit, lock_acquired);
11090 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
11091 * @pf: board private structure
11093 * Close up the VFs and other things in prep for a Core Reset,
11094 * then get ready to rebuild the world.
11095 * @lock_acquired: indicates whether or not the lock has been acquired
11096 * before this function was called.
11098 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
11100 i40e_prep_for_reset(pf);
11101 i40e_reset_and_rebuild(pf, false, lock_acquired);
11105 * i40e_handle_mdd_event
11106 * @pf: pointer to the PF structure
11108 * Called from the MDD irq handler to identify possibly malicious vfs
11110 static void i40e_handle_mdd_event(struct i40e_pf *pf)
11112 struct i40e_hw *hw = &pf->hw;
11113 bool mdd_detected = false;
11114 struct i40e_vf *vf;
11118 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
11121 /* find what triggered the MDD event */
11122 reg = rd32(hw, I40E_GL_MDET_TX);
11123 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
11124 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
11125 I40E_GL_MDET_TX_PF_NUM_SHIFT;
11126 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
11127 I40E_GL_MDET_TX_VF_NUM_SHIFT;
11128 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
11129 I40E_GL_MDET_TX_EVENT_SHIFT;
11130 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
11131 I40E_GL_MDET_TX_QUEUE_SHIFT) -
11132 pf->hw.func_caps.base_queue;
11133 if (netif_msg_tx_err(pf))
11134 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
11135 event, queue, pf_num, vf_num);
11136 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
11137 mdd_detected = true;
11139 reg = rd32(hw, I40E_GL_MDET_RX);
11140 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
11141 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
11142 I40E_GL_MDET_RX_FUNCTION_SHIFT;
11143 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
11144 I40E_GL_MDET_RX_EVENT_SHIFT;
11145 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
11146 I40E_GL_MDET_RX_QUEUE_SHIFT) -
11147 pf->hw.func_caps.base_queue;
11148 if (netif_msg_rx_err(pf))
11149 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
11150 event, queue, func);
11151 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
11152 mdd_detected = true;
11155 if (mdd_detected) {
11156 reg = rd32(hw, I40E_PF_MDET_TX);
11157 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
11158 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
11159 dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n");
11161 reg = rd32(hw, I40E_PF_MDET_RX);
11162 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
11163 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
11164 dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n");
11168 /* see if one of the VFs needs its hand slapped */
11169 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
11171 reg = rd32(hw, I40E_VP_MDET_TX(i));
11172 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
11173 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
11174 vf->num_mdd_events++;
11175 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
11177 dev_info(&pf->pdev->dev,
11178 "Use PF Control I/F to re-enable the VF\n");
11179 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
11182 reg = rd32(hw, I40E_VP_MDET_RX(i));
11183 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
11184 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
11185 vf->num_mdd_events++;
11186 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
11188 dev_info(&pf->pdev->dev,
11189 "Use PF Control I/F to re-enable the VF\n");
11190 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
11194 /* re-enable mdd interrupt cause */
11195 clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
11196 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
11197 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
11198 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
11203 * i40e_service_task - Run the driver's async subtasks
11204 * @work: pointer to work_struct containing our data
11206 static void i40e_service_task(struct work_struct *work)
11208 struct i40e_pf *pf = container_of(work,
11211 unsigned long start_time = jiffies;
11213 /* don't bother with service tasks if a reset is in progress */
11214 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
11215 test_bit(__I40E_SUSPENDED, pf->state))
11218 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
11221 if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) {
11222 i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
11223 i40e_sync_filters_subtask(pf);
11224 i40e_reset_subtask(pf);
11225 i40e_handle_mdd_event(pf);
11226 i40e_vc_process_vflr_event(pf);
11227 i40e_watchdog_subtask(pf);
11228 i40e_fdir_reinit_subtask(pf);
11229 if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
11230 /* Client subtask will reopen next time through. */
11231 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi],
11234 i40e_client_subtask(pf);
11235 if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
11237 i40e_notify_client_of_l2_param_changes(
11238 pf->vsi[pf->lan_vsi]);
11240 i40e_sync_filters_subtask(pf);
11242 i40e_reset_subtask(pf);
11245 i40e_clean_adminq_subtask(pf);
11247 /* flush memory to make sure state is correct before next watchdog */
11248 smp_mb__before_atomic();
11249 clear_bit(__I40E_SERVICE_SCHED, pf->state);
11251 /* If the tasks have taken longer than one timer cycle or there
11252 * is more work to be done, reschedule the service task now
11253 * rather than wait for the timer to tick again.
11255 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
11256 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
11257 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
11258 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
11259 i40e_service_event_schedule(pf);
11263 * i40e_service_timer - timer callback
11264 * @t: timer list pointer
11266 static void i40e_service_timer(struct timer_list *t)
11268 struct i40e_pf *pf = from_timer(pf, t, service_timer);
11270 mod_timer(&pf->service_timer,
11271 round_jiffies(jiffies + pf->service_timer_period));
11272 i40e_service_event_schedule(pf);
11276 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
11277 * @vsi: the VSI being configured
11279 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
11281 struct i40e_pf *pf = vsi->back;
11283 switch (vsi->type) {
11284 case I40E_VSI_MAIN:
11285 vsi->alloc_queue_pairs = pf->num_lan_qps;
11286 if (!vsi->num_tx_desc)
11287 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11288 I40E_REQ_DESCRIPTOR_MULTIPLE);
11289 if (!vsi->num_rx_desc)
11290 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11291 I40E_REQ_DESCRIPTOR_MULTIPLE);
11292 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
11293 vsi->num_q_vectors = pf->num_lan_msix;
11295 vsi->num_q_vectors = 1;
11299 case I40E_VSI_FDIR:
11300 vsi->alloc_queue_pairs = 1;
11301 vsi->num_tx_desc = ALIGN(I40E_FDIR_RING_COUNT,
11302 I40E_REQ_DESCRIPTOR_MULTIPLE);
11303 vsi->num_rx_desc = ALIGN(I40E_FDIR_RING_COUNT,
11304 I40E_REQ_DESCRIPTOR_MULTIPLE);
11305 vsi->num_q_vectors = pf->num_fdsb_msix;
11308 case I40E_VSI_VMDQ2:
11309 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
11310 if (!vsi->num_tx_desc)
11311 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11312 I40E_REQ_DESCRIPTOR_MULTIPLE);
11313 if (!vsi->num_rx_desc)
11314 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11315 I40E_REQ_DESCRIPTOR_MULTIPLE);
11316 vsi->num_q_vectors = pf->num_vmdq_msix;
11319 case I40E_VSI_SRIOV:
11320 vsi->alloc_queue_pairs = pf->num_vf_qps;
11321 if (!vsi->num_tx_desc)
11322 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11323 I40E_REQ_DESCRIPTOR_MULTIPLE);
11324 if (!vsi->num_rx_desc)
11325 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11326 I40E_REQ_DESCRIPTOR_MULTIPLE);
11334 if (is_kdump_kernel()) {
11335 vsi->num_tx_desc = I40E_MIN_NUM_DESCRIPTORS;
11336 vsi->num_rx_desc = I40E_MIN_NUM_DESCRIPTORS;
11343 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
11344 * @vsi: VSI pointer
11345 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
11347 * On error: returns error code (negative)
11348 * On success: returns 0
11350 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
11352 struct i40e_ring **next_rings;
11356 /* allocate memory for both Tx, XDP Tx and Rx ring pointers */
11357 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
11358 (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
11359 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
11360 if (!vsi->tx_rings)
11362 next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
11363 if (i40e_enabled_xdp_vsi(vsi)) {
11364 vsi->xdp_rings = next_rings;
11365 next_rings += vsi->alloc_queue_pairs;
11367 vsi->rx_rings = next_rings;
11369 if (alloc_qvectors) {
11370 /* allocate memory for q_vector pointers */
11371 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
11372 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
11373 if (!vsi->q_vectors) {
11381 kfree(vsi->tx_rings);
11386 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
11387 * @pf: board private structure
11388 * @type: type of VSI
11390 * On error: returns error code (negative)
11391 * On success: returns vsi index in PF (positive)
11393 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
11396 struct i40e_vsi *vsi;
11400 /* Need to protect the allocation of the VSIs at the PF level */
11401 mutex_lock(&pf->switch_mutex);
11403 /* VSI list may be fragmented if VSI creation/destruction has
11404 * been happening. We can afford to do a quick scan to look
11405 * for any free VSIs in the list.
11407 * find next empty vsi slot, looping back around if necessary
11410 while (i < pf->num_alloc_vsi && pf->vsi[i])
11412 if (i >= pf->num_alloc_vsi) {
11414 while (i < pf->next_vsi && pf->vsi[i])
11418 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
11419 vsi_idx = i; /* Found one! */
11422 goto unlock_pf; /* out of VSI slots! */
11424 pf->next_vsi = ++i;
11426 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
11433 set_bit(__I40E_VSI_DOWN, vsi->state);
11435 vsi->idx = vsi_idx;
11436 vsi->int_rate_limit = 0;
11437 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
11438 pf->rss_table_size : 64;
11439 vsi->netdev_registered = false;
11440 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
11441 hash_init(vsi->mac_filter_hash);
11442 vsi->irqs_ready = false;
11444 if (type == I40E_VSI_MAIN) {
11445 vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL);
11446 if (!vsi->af_xdp_zc_qps)
11450 ret = i40e_set_num_rings_in_vsi(vsi);
11454 ret = i40e_vsi_alloc_arrays(vsi, true);
11458 /* Setup default MSIX irq handler for VSI */
11459 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
11461 /* Initialize VSI lock */
11462 spin_lock_init(&vsi->mac_filter_hash_lock);
11463 pf->vsi[vsi_idx] = vsi;
11468 bitmap_free(vsi->af_xdp_zc_qps);
11469 pf->next_vsi = i - 1;
11472 mutex_unlock(&pf->switch_mutex);
11477 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
11478 * @vsi: VSI pointer
11479 * @free_qvectors: a bool to specify if q_vectors need to be freed.
11481 * On error: returns error code (negative)
11482 * On success: returns 0
11484 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
11486 /* free the ring and vector containers */
11487 if (free_qvectors) {
11488 kfree(vsi->q_vectors);
11489 vsi->q_vectors = NULL;
11491 kfree(vsi->tx_rings);
11492 vsi->tx_rings = NULL;
11493 vsi->rx_rings = NULL;
11494 vsi->xdp_rings = NULL;
11498 * i40e_clear_rss_config_user - clear the user configured RSS hash keys
11500 * @vsi: Pointer to VSI structure
11502 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
11507 kfree(vsi->rss_hkey_user);
11508 vsi->rss_hkey_user = NULL;
11510 kfree(vsi->rss_lut_user);
11511 vsi->rss_lut_user = NULL;
11515 * i40e_vsi_clear - Deallocate the VSI provided
11516 * @vsi: the VSI being un-configured
11518 static int i40e_vsi_clear(struct i40e_vsi *vsi)
11520 struct i40e_pf *pf;
11529 mutex_lock(&pf->switch_mutex);
11530 if (!pf->vsi[vsi->idx]) {
11531 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n",
11532 vsi->idx, vsi->idx, vsi->type);
11536 if (pf->vsi[vsi->idx] != vsi) {
11537 dev_err(&pf->pdev->dev,
11538 "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n",
11539 pf->vsi[vsi->idx]->idx,
11540 pf->vsi[vsi->idx]->type,
11541 vsi->idx, vsi->type);
11545 /* updates the PF for this cleared vsi */
11546 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
11547 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
11549 bitmap_free(vsi->af_xdp_zc_qps);
11550 i40e_vsi_free_arrays(vsi, true);
11551 i40e_clear_rss_config_user(vsi);
11553 pf->vsi[vsi->idx] = NULL;
11554 if (vsi->idx < pf->next_vsi)
11555 pf->next_vsi = vsi->idx;
11558 mutex_unlock(&pf->switch_mutex);
11566 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
11567 * @vsi: the VSI being cleaned
11569 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
11573 if (vsi->tx_rings && vsi->tx_rings[0]) {
11574 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
11575 kfree_rcu(vsi->tx_rings[i], rcu);
11576 WRITE_ONCE(vsi->tx_rings[i], NULL);
11577 WRITE_ONCE(vsi->rx_rings[i], NULL);
11578 if (vsi->xdp_rings)
11579 WRITE_ONCE(vsi->xdp_rings[i], NULL);
11585 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
11586 * @vsi: the VSI being configured
11588 static int i40e_alloc_rings(struct i40e_vsi *vsi)
11590 int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
11591 struct i40e_pf *pf = vsi->back;
11592 struct i40e_ring *ring;
11594 /* Set basic values in the rings to be used later during open() */
11595 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
11596 /* allocate space for both Tx and Rx in one shot */
11597 ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
11601 ring->queue_index = i;
11602 ring->reg_idx = vsi->base_queue + i;
11603 ring->ring_active = false;
11605 ring->netdev = vsi->netdev;
11606 ring->dev = &pf->pdev->dev;
11607 ring->count = vsi->num_tx_desc;
11610 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
11611 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
11612 ring->itr_setting = pf->tx_itr_default;
11613 WRITE_ONCE(vsi->tx_rings[i], ring++);
11615 if (!i40e_enabled_xdp_vsi(vsi))
11618 ring->queue_index = vsi->alloc_queue_pairs + i;
11619 ring->reg_idx = vsi->base_queue + ring->queue_index;
11620 ring->ring_active = false;
11622 ring->netdev = NULL;
11623 ring->dev = &pf->pdev->dev;
11624 ring->count = vsi->num_tx_desc;
11627 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
11628 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
11629 set_ring_xdp(ring);
11630 ring->itr_setting = pf->tx_itr_default;
11631 WRITE_ONCE(vsi->xdp_rings[i], ring++);
11634 ring->queue_index = i;
11635 ring->reg_idx = vsi->base_queue + i;
11636 ring->ring_active = false;
11638 ring->netdev = vsi->netdev;
11639 ring->dev = &pf->pdev->dev;
11640 ring->count = vsi->num_rx_desc;
11643 ring->itr_setting = pf->rx_itr_default;
11644 WRITE_ONCE(vsi->rx_rings[i], ring);
11650 i40e_vsi_clear_rings(vsi);
11655 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
11656 * @pf: board private structure
11657 * @vectors: the number of MSI-X vectors to request
11659 * Returns the number of vectors reserved, or error
11661 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
11663 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
11664 I40E_MIN_MSIX, vectors);
11666 dev_info(&pf->pdev->dev,
11667 "MSI-X vector reservation failed: %d\n", vectors);
11675 * i40e_init_msix - Setup the MSIX capability
11676 * @pf: board private structure
11678 * Work with the OS to set up the MSIX vectors needed.
11680 * Returns the number of vectors reserved or negative on failure
11682 static int i40e_init_msix(struct i40e_pf *pf)
11684 struct i40e_hw *hw = &pf->hw;
11685 int cpus, extra_vectors;
11689 int iwarp_requested = 0;
11691 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
11694 /* The number of vectors we'll request will be comprised of:
11695 * - Add 1 for "other" cause for Admin Queue events, etc.
11696 * - The number of LAN queue pairs
11697 * - Queues being used for RSS.
11698 * We don't need as many as max_rss_size vectors.
11699 * use rss_size instead in the calculation since that
11700 * is governed by number of cpus in the system.
11701 * - assumes symmetric Tx/Rx pairing
11702 * - The number of VMDq pairs
11703 * - The CPU count within the NUMA node if iWARP is enabled
11704 * Once we count this up, try the request.
11706 * If we can't get what we want, we'll simplify to nearly nothing
11707 * and try again. If that still fails, we punt.
11709 vectors_left = hw->func_caps.num_msix_vectors;
11712 /* reserve one vector for miscellaneous handler */
11713 if (vectors_left) {
11718 /* reserve some vectors for the main PF traffic queues. Initially we
11719 * only reserve at most 50% of the available vectors, in the case that
11720 * the number of online CPUs is large. This ensures that we can enable
11721 * extra features as well. Once we've enabled the other features, we
11722 * will use any remaining vectors to reach as close as we can to the
11723 * number of online CPUs.
11725 cpus = num_online_cpus();
11726 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
11727 vectors_left -= pf->num_lan_msix;
11729 /* reserve one vector for sideband flow director */
11730 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11731 if (vectors_left) {
11732 pf->num_fdsb_msix = 1;
11736 pf->num_fdsb_msix = 0;
11740 /* can we reserve enough for iWARP? */
11741 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11742 iwarp_requested = pf->num_iwarp_msix;
11745 pf->num_iwarp_msix = 0;
11746 else if (vectors_left < pf->num_iwarp_msix)
11747 pf->num_iwarp_msix = 1;
11748 v_budget += pf->num_iwarp_msix;
11749 vectors_left -= pf->num_iwarp_msix;
11752 /* any vectors left over go for VMDq support */
11753 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
11754 if (!vectors_left) {
11755 pf->num_vmdq_msix = 0;
11756 pf->num_vmdq_qps = 0;
11758 int vmdq_vecs_wanted =
11759 pf->num_vmdq_vsis * pf->num_vmdq_qps;
11761 min_t(int, vectors_left, vmdq_vecs_wanted);
11763 /* if we're short on vectors for what's desired, we limit
11764 * the queues per vmdq. If this is still more than are
11765 * available, the user will need to change the number of
11766 * queues/vectors used by the PF later with the ethtool
11769 if (vectors_left < vmdq_vecs_wanted) {
11770 pf->num_vmdq_qps = 1;
11771 vmdq_vecs_wanted = pf->num_vmdq_vsis;
11772 vmdq_vecs = min_t(int,
11776 pf->num_vmdq_msix = pf->num_vmdq_qps;
11778 v_budget += vmdq_vecs;
11779 vectors_left -= vmdq_vecs;
11783 /* On systems with a large number of SMP cores, we previously limited
11784 * the number of vectors for num_lan_msix to be at most 50% of the
11785 * available vectors, to allow for other features. Now, we add back
11786 * the remaining vectors. However, we ensure that the total
11787 * num_lan_msix will not exceed num_online_cpus(). To do this, we
11788 * calculate the number of vectors we can add without going over the
11789 * cap of CPUs. For systems with a small number of CPUs this will be
11792 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
11793 pf->num_lan_msix += extra_vectors;
11794 vectors_left -= extra_vectors;
11796 WARN(vectors_left < 0,
11797 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
11799 v_budget += pf->num_lan_msix;
11800 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
11802 if (!pf->msix_entries)
11805 for (i = 0; i < v_budget; i++)
11806 pf->msix_entries[i].entry = i;
11807 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
11809 if (v_actual < I40E_MIN_MSIX) {
11810 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
11811 kfree(pf->msix_entries);
11812 pf->msix_entries = NULL;
11813 pci_disable_msix(pf->pdev);
11816 } else if (v_actual == I40E_MIN_MSIX) {
11817 /* Adjust for minimal MSIX use */
11818 pf->num_vmdq_vsis = 0;
11819 pf->num_vmdq_qps = 0;
11820 pf->num_lan_qps = 1;
11821 pf->num_lan_msix = 1;
11823 } else if (v_actual != v_budget) {
11824 /* If we have limited resources, we will start with no vectors
11825 * for the special features and then allocate vectors to some
11826 * of these features based on the policy and at the end disable
11827 * the features that did not get any vectors.
11831 dev_info(&pf->pdev->dev,
11832 "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n",
11833 v_actual, v_budget);
11834 /* reserve the misc vector */
11835 vec = v_actual - 1;
11837 /* Scale vector usage down */
11838 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
11839 pf->num_vmdq_vsis = 1;
11840 pf->num_vmdq_qps = 1;
11842 /* partition out the remaining vectors */
11845 pf->num_lan_msix = 1;
11848 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11849 pf->num_lan_msix = 1;
11850 pf->num_iwarp_msix = 1;
11852 pf->num_lan_msix = 2;
11856 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11857 pf->num_iwarp_msix = min_t(int, (vec / 3),
11859 pf->num_vmdq_vsis = min_t(int, (vec / 3),
11860 I40E_DEFAULT_NUM_VMDQ_VSI);
11862 pf->num_vmdq_vsis = min_t(int, (vec / 2),
11863 I40E_DEFAULT_NUM_VMDQ_VSI);
11865 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11866 pf->num_fdsb_msix = 1;
11869 pf->num_lan_msix = min_t(int,
11870 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
11872 pf->num_lan_qps = pf->num_lan_msix;
11877 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
11878 (pf->num_fdsb_msix == 0)) {
11879 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
11880 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
11881 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11883 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
11884 (pf->num_vmdq_msix == 0)) {
11885 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
11886 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
11889 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
11890 (pf->num_iwarp_msix == 0)) {
11891 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
11892 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
11894 i40e_debug(&pf->hw, I40E_DEBUG_INIT,
11895 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
11897 pf->num_vmdq_msix * pf->num_vmdq_vsis,
11899 pf->num_iwarp_msix);
11905 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
11906 * @vsi: the VSI being configured
11907 * @v_idx: index of the vector in the vsi struct
11909 * We allocate one q_vector. If allocation fails we return -ENOMEM.
11911 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
11913 struct i40e_q_vector *q_vector;
11915 /* allocate q_vector */
11916 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
11920 q_vector->vsi = vsi;
11921 q_vector->v_idx = v_idx;
11922 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
11925 netif_napi_add(vsi->netdev, &q_vector->napi,
11926 i40e_napi_poll, NAPI_POLL_WEIGHT);
11928 /* tie q_vector and vsi together */
11929 vsi->q_vectors[v_idx] = q_vector;
11935 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
11936 * @vsi: the VSI being configured
11938 * We allocate one q_vector per queue interrupt. If allocation fails we
11941 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
11943 struct i40e_pf *pf = vsi->back;
11944 int err, v_idx, num_q_vectors;
11946 /* if not MSIX, give the one vector only to the LAN VSI */
11947 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
11948 num_q_vectors = vsi->num_q_vectors;
11949 else if (vsi == pf->vsi[pf->lan_vsi])
11954 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
11955 err = i40e_vsi_alloc_q_vector(vsi, v_idx);
11964 i40e_free_q_vector(vsi, v_idx);
11970 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
11971 * @pf: board private structure to initialize
11973 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
11978 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11979 vectors = i40e_init_msix(pf);
11981 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
11982 I40E_FLAG_IWARP_ENABLED |
11983 I40E_FLAG_RSS_ENABLED |
11984 I40E_FLAG_DCB_CAPABLE |
11985 I40E_FLAG_DCB_ENABLED |
11986 I40E_FLAG_SRIOV_ENABLED |
11987 I40E_FLAG_FD_SB_ENABLED |
11988 I40E_FLAG_FD_ATR_ENABLED |
11989 I40E_FLAG_VMDQ_ENABLED);
11990 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11992 /* rework the queue expectations without MSIX */
11993 i40e_determine_queue_usage(pf);
11997 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
11998 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
11999 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
12000 vectors = pci_enable_msi(pf->pdev);
12002 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
12004 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
12006 vectors = 1; /* one MSI or Legacy vector */
12009 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
12010 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
12012 /* set up vector assignment tracking */
12013 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
12014 pf->irq_pile = kzalloc(size, GFP_KERNEL);
12018 pf->irq_pile->num_entries = vectors;
12020 /* track first vector for misc interrupts, ignore return */
12021 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
12027 * i40e_restore_interrupt_scheme - Restore the interrupt scheme
12028 * @pf: private board data structure
12030 * Restore the interrupt scheme that was cleared when we suspended the
12031 * device. This should be called during resume to re-allocate the q_vectors
12032 * and reacquire IRQs.
12034 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
12038 /* We cleared the MSI and MSI-X flags when disabling the old interrupt
12039 * scheme. We need to re-enabled them here in order to attempt to
12040 * re-acquire the MSI or MSI-X vectors
12042 pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
12044 err = i40e_init_interrupt_scheme(pf);
12048 /* Now that we've re-acquired IRQs, we need to remap the vectors and
12049 * rings together again.
12051 for (i = 0; i < pf->num_alloc_vsi; i++) {
12053 err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
12056 i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
12060 err = i40e_setup_misc_vector(pf);
12064 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
12065 i40e_client_update_msix_info(pf);
12072 i40e_vsi_free_q_vectors(pf->vsi[i]);
12079 * i40e_setup_misc_vector_for_recovery_mode - Setup the misc vector to handle
12080 * non queue events in recovery mode
12081 * @pf: board private structure
12083 * This sets up the handler for MSIX 0 or MSI/legacy, which is used to manage
12084 * the non-queue interrupts, e.g. AdminQ and errors in recovery mode.
12085 * This is handled differently than in recovery mode since no Tx/Rx resources
12086 * are being allocated.
12088 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf)
12092 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
12093 err = i40e_setup_misc_vector(pf);
12096 dev_info(&pf->pdev->dev,
12097 "MSI-X misc vector request failed, error %d\n",
12102 u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED;
12104 err = request_irq(pf->pdev->irq, i40e_intr, flags,
12108 dev_info(&pf->pdev->dev,
12109 "MSI/legacy misc vector request failed, error %d\n",
12113 i40e_enable_misc_int_causes(pf);
12114 i40e_irq_dynamic_enable_icr0(pf);
12121 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
12122 * @pf: board private structure
12124 * This sets up the handler for MSIX 0, which is used to manage the
12125 * non-queue interrupts, e.g. AdminQ and errors. This is not used
12126 * when in MSI or Legacy interrupt mode.
12128 static int i40e_setup_misc_vector(struct i40e_pf *pf)
12130 struct i40e_hw *hw = &pf->hw;
12133 /* Only request the IRQ once, the first time through. */
12134 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) {
12135 err = request_irq(pf->msix_entries[0].vector,
12136 i40e_intr, 0, pf->int_name, pf);
12138 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
12139 dev_info(&pf->pdev->dev,
12140 "request_irq for %s failed: %d\n",
12141 pf->int_name, err);
12146 i40e_enable_misc_int_causes(pf);
12148 /* associate no queues to the misc vector */
12149 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
12150 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K >> 1);
12154 i40e_irq_dynamic_enable_icr0(pf);
12160 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
12161 * @vsi: Pointer to vsi structure
12162 * @seed: Buffter to store the hash keys
12163 * @lut: Buffer to store the lookup table entries
12164 * @lut_size: Size of buffer to store the lookup table entries
12166 * Return 0 on success, negative on failure
12168 static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
12169 u8 *lut, u16 lut_size)
12171 struct i40e_pf *pf = vsi->back;
12172 struct i40e_hw *hw = &pf->hw;
12176 ret = i40e_aq_get_rss_key(hw, vsi->id,
12177 (struct i40e_aqc_get_set_rss_key_data *)seed);
12179 dev_info(&pf->pdev->dev,
12180 "Cannot get RSS key, err %s aq_err %s\n",
12181 i40e_stat_str(&pf->hw, ret),
12182 i40e_aq_str(&pf->hw,
12183 pf->hw.aq.asq_last_status));
12189 bool pf_lut = vsi->type == I40E_VSI_MAIN;
12191 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
12193 dev_info(&pf->pdev->dev,
12194 "Cannot get RSS lut, err %s aq_err %s\n",
12195 i40e_stat_str(&pf->hw, ret),
12196 i40e_aq_str(&pf->hw,
12197 pf->hw.aq.asq_last_status));
12206 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
12207 * @vsi: Pointer to vsi structure
12208 * @seed: RSS hash seed
12209 * @lut: Lookup table
12210 * @lut_size: Lookup table size
12212 * Returns 0 on success, negative on failure
12214 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
12215 const u8 *lut, u16 lut_size)
12217 struct i40e_pf *pf = vsi->back;
12218 struct i40e_hw *hw = &pf->hw;
12219 u16 vf_id = vsi->vf_id;
12222 /* Fill out hash function seed */
12224 u32 *seed_dw = (u32 *)seed;
12226 if (vsi->type == I40E_VSI_MAIN) {
12227 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
12228 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
12229 } else if (vsi->type == I40E_VSI_SRIOV) {
12230 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
12231 wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
12233 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
12238 u32 *lut_dw = (u32 *)lut;
12240 if (vsi->type == I40E_VSI_MAIN) {
12241 if (lut_size != I40E_HLUT_ARRAY_SIZE)
12243 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12244 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
12245 } else if (vsi->type == I40E_VSI_SRIOV) {
12246 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
12248 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
12249 wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
12251 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
12260 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
12261 * @vsi: Pointer to VSI structure
12262 * @seed: Buffer to store the keys
12263 * @lut: Buffer to store the lookup table entries
12264 * @lut_size: Size of buffer to store the lookup table entries
12266 * Returns 0 on success, negative on failure
12268 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
12269 u8 *lut, u16 lut_size)
12271 struct i40e_pf *pf = vsi->back;
12272 struct i40e_hw *hw = &pf->hw;
12276 u32 *seed_dw = (u32 *)seed;
12278 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
12279 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
12282 u32 *lut_dw = (u32 *)lut;
12284 if (lut_size != I40E_HLUT_ARRAY_SIZE)
12286 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12287 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
12294 * i40e_config_rss - Configure RSS keys and lut
12295 * @vsi: Pointer to VSI structure
12296 * @seed: RSS hash seed
12297 * @lut: Lookup table
12298 * @lut_size: Lookup table size
12300 * Returns 0 on success, negative on failure
12302 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
12304 struct i40e_pf *pf = vsi->back;
12306 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
12307 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
12309 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
12313 * i40e_get_rss - Get RSS keys and lut
12314 * @vsi: Pointer to VSI structure
12315 * @seed: Buffer to store the keys
12316 * @lut: Buffer to store the lookup table entries
12317 * @lut_size: Size of buffer to store the lookup table entries
12319 * Returns 0 on success, negative on failure
12321 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
12323 struct i40e_pf *pf = vsi->back;
12325 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
12326 return i40e_get_rss_aq(vsi, seed, lut, lut_size);
12328 return i40e_get_rss_reg(vsi, seed, lut, lut_size);
12332 * i40e_fill_rss_lut - Fill the RSS lookup table with default values
12333 * @pf: Pointer to board private structure
12334 * @lut: Lookup table
12335 * @rss_table_size: Lookup table size
12336 * @rss_size: Range of queue number for hashing
12338 void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
12339 u16 rss_table_size, u16 rss_size)
12343 for (i = 0; i < rss_table_size; i++)
12344 lut[i] = i % rss_size;
12348 * i40e_pf_config_rss - Prepare for RSS if used
12349 * @pf: board private structure
12351 static int i40e_pf_config_rss(struct i40e_pf *pf)
12353 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
12354 u8 seed[I40E_HKEY_ARRAY_SIZE];
12356 struct i40e_hw *hw = &pf->hw;
12361 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
12362 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
12363 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
12364 hena |= i40e_pf_get_default_rss_hena(pf);
12366 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
12367 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
12369 /* Determine the RSS table size based on the hardware capabilities */
12370 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
12371 reg_val = (pf->rss_table_size == 512) ?
12372 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
12373 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
12374 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
12376 /* Determine the RSS size of the VSI */
12377 if (!vsi->rss_size) {
12379 /* If the firmware does something weird during VSI init, we
12380 * could end up with zero TCs. Check for that to avoid
12381 * divide-by-zero. It probably won't pass traffic, but it also
12384 qcount = vsi->num_queue_pairs /
12385 (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1);
12386 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
12388 if (!vsi->rss_size)
12391 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
12395 /* Use user configured lut if there is one, otherwise use default */
12396 if (vsi->rss_lut_user)
12397 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
12399 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
12401 /* Use user configured hash key if there is one, otherwise
12404 if (vsi->rss_hkey_user)
12405 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
12407 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
12408 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
12415 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
12416 * @pf: board private structure
12417 * @queue_count: the requested queue count for rss.
12419 * returns 0 if rss is not enabled, if enabled returns the final rss queue
12420 * count which may be different from the requested queue count.
12421 * Note: expects to be called while under rtnl_lock()
12423 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
12425 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
12428 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
12431 queue_count = min_t(int, queue_count, num_online_cpus());
12432 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
12434 if (queue_count != vsi->num_queue_pairs) {
12437 vsi->req_queue_pairs = queue_count;
12438 i40e_prep_for_reset(pf);
12439 if (test_bit(__I40E_IN_REMOVE, pf->state))
12440 return pf->alloc_rss_size;
12442 pf->alloc_rss_size = new_rss_size;
12444 i40e_reset_and_rebuild(pf, true, true);
12446 /* Discard the user configured hash keys and lut, if less
12447 * queues are enabled.
12449 if (queue_count < vsi->rss_size) {
12450 i40e_clear_rss_config_user(vsi);
12451 dev_dbg(&pf->pdev->dev,
12452 "discard user configured hash keys and lut\n");
12455 /* Reset vsi->rss_size, as number of enabled queues changed */
12456 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
12457 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
12459 i40e_pf_config_rss(pf);
12461 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
12462 vsi->req_queue_pairs, pf->rss_size_max);
12463 return pf->alloc_rss_size;
12467 * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
12468 * @pf: board private structure
12470 i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
12472 i40e_status status;
12473 bool min_valid, max_valid;
12474 u32 max_bw, min_bw;
12476 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
12477 &min_valid, &max_valid);
12481 pf->min_bw = min_bw;
12483 pf->max_bw = max_bw;
12490 * i40e_set_partition_bw_setting - Set BW settings for this PF partition
12491 * @pf: board private structure
12493 i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
12495 struct i40e_aqc_configure_partition_bw_data bw_data;
12496 i40e_status status;
12498 memset(&bw_data, 0, sizeof(bw_data));
12500 /* Set the valid bit for this PF */
12501 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
12502 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
12503 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
12505 /* Set the new bandwidths */
12506 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
12512 * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
12513 * @pf: board private structure
12515 i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
12517 /* Commit temporary BW setting to permanent NVM image */
12518 enum i40e_admin_queue_err last_aq_status;
12522 if (pf->hw.partition_id != 1) {
12523 dev_info(&pf->pdev->dev,
12524 "Commit BW only works on partition 1! This is partition %d",
12525 pf->hw.partition_id);
12526 ret = I40E_NOT_SUPPORTED;
12527 goto bw_commit_out;
12530 /* Acquire NVM for read access */
12531 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
12532 last_aq_status = pf->hw.aq.asq_last_status;
12534 dev_info(&pf->pdev->dev,
12535 "Cannot acquire NVM for read access, err %s aq_err %s\n",
12536 i40e_stat_str(&pf->hw, ret),
12537 i40e_aq_str(&pf->hw, last_aq_status));
12538 goto bw_commit_out;
12541 /* Read word 0x10 of NVM - SW compatibility word 1 */
12542 ret = i40e_aq_read_nvm(&pf->hw,
12543 I40E_SR_NVM_CONTROL_WORD,
12544 0x10, sizeof(nvm_word), &nvm_word,
12546 /* Save off last admin queue command status before releasing
12549 last_aq_status = pf->hw.aq.asq_last_status;
12550 i40e_release_nvm(&pf->hw);
12552 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
12553 i40e_stat_str(&pf->hw, ret),
12554 i40e_aq_str(&pf->hw, last_aq_status));
12555 goto bw_commit_out;
12558 /* Wait a bit for NVM release to complete */
12561 /* Acquire NVM for write access */
12562 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
12563 last_aq_status = pf->hw.aq.asq_last_status;
12565 dev_info(&pf->pdev->dev,
12566 "Cannot acquire NVM for write access, err %s aq_err %s\n",
12567 i40e_stat_str(&pf->hw, ret),
12568 i40e_aq_str(&pf->hw, last_aq_status));
12569 goto bw_commit_out;
12571 /* Write it back out unchanged to initiate update NVM,
12572 * which will force a write of the shadow (alt) RAM to
12573 * the NVM - thus storing the bandwidth values permanently.
12575 ret = i40e_aq_update_nvm(&pf->hw,
12576 I40E_SR_NVM_CONTROL_WORD,
12577 0x10, sizeof(nvm_word),
12578 &nvm_word, true, 0, NULL);
12579 /* Save off last admin queue command status before releasing
12582 last_aq_status = pf->hw.aq.asq_last_status;
12583 i40e_release_nvm(&pf->hw);
12585 dev_info(&pf->pdev->dev,
12586 "BW settings NOT SAVED, err %s aq_err %s\n",
12587 i40e_stat_str(&pf->hw, ret),
12588 i40e_aq_str(&pf->hw, last_aq_status));
12595 * i40e_is_total_port_shutdown_enabled - read NVM and return value
12596 * if total port shutdown feature is enabled for this PF
12597 * @pf: board private structure
12599 static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
12601 #define I40E_TOTAL_PORT_SHUTDOWN_ENABLED BIT(4)
12602 #define I40E_FEATURES_ENABLE_PTR 0x2A
12603 #define I40E_CURRENT_SETTING_PTR 0x2B
12604 #define I40E_LINK_BEHAVIOR_WORD_OFFSET 0x2D
12605 #define I40E_LINK_BEHAVIOR_WORD_LENGTH 0x1
12606 #define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED BIT(0)
12607 #define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH 4
12608 i40e_status read_status = I40E_SUCCESS;
12609 u16 sr_emp_sr_settings_ptr = 0;
12610 u16 features_enable = 0;
12611 u16 link_behavior = 0;
12614 read_status = i40e_read_nvm_word(&pf->hw,
12615 I40E_SR_EMP_SR_SETTINGS_PTR,
12616 &sr_emp_sr_settings_ptr);
12619 read_status = i40e_read_nvm_word(&pf->hw,
12620 sr_emp_sr_settings_ptr +
12621 I40E_FEATURES_ENABLE_PTR,
12625 if (I40E_TOTAL_PORT_SHUTDOWN_ENABLED & features_enable) {
12626 read_status = i40e_read_nvm_module_data(&pf->hw,
12627 I40E_SR_EMP_SR_SETTINGS_PTR,
12628 I40E_CURRENT_SETTING_PTR,
12629 I40E_LINK_BEHAVIOR_WORD_OFFSET,
12630 I40E_LINK_BEHAVIOR_WORD_LENGTH,
12634 link_behavior >>= (pf->hw.port * I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH);
12635 ret = I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED & link_behavior;
12640 dev_warn(&pf->pdev->dev,
12641 "total-port-shutdown feature is off due to read nvm error: %s\n",
12642 i40e_stat_str(&pf->hw, read_status));
12647 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
12648 * @pf: board private structure to initialize
12650 * i40e_sw_init initializes the Adapter private data structure.
12651 * Fields are initialized based on PCI device information and
12652 * OS network device settings (MTU size).
12654 static int i40e_sw_init(struct i40e_pf *pf)
12660 /* Set default capability flags */
12661 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
12662 I40E_FLAG_MSI_ENABLED |
12663 I40E_FLAG_MSIX_ENABLED;
12665 /* Set default ITR */
12666 pf->rx_itr_default = I40E_ITR_RX_DEF;
12667 pf->tx_itr_default = I40E_ITR_TX_DEF;
12669 /* Depending on PF configurations, it is possible that the RSS
12670 * maximum might end up larger than the available queues
12672 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
12673 pf->alloc_rss_size = 1;
12674 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
12675 pf->rss_size_max = min_t(int, pf->rss_size_max,
12676 pf->hw.func_caps.num_tx_qp);
12678 /* find the next higher power-of-2 of num cpus */
12679 pow = roundup_pow_of_two(num_online_cpus());
12680 pf->rss_size_max = min_t(int, pf->rss_size_max, pow);
12682 if (pf->hw.func_caps.rss) {
12683 pf->flags |= I40E_FLAG_RSS_ENABLED;
12684 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
12685 num_online_cpus());
12688 /* MFP mode enabled */
12689 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
12690 pf->flags |= I40E_FLAG_MFP_ENABLED;
12691 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
12692 if (i40e_get_partition_bw_setting(pf)) {
12693 dev_warn(&pf->pdev->dev,
12694 "Could not get partition bw settings\n");
12696 dev_info(&pf->pdev->dev,
12697 "Partition BW Min = %8.8x, Max = %8.8x\n",
12698 pf->min_bw, pf->max_bw);
12700 /* nudge the Tx scheduler */
12701 i40e_set_partition_bw_setting(pf);
12705 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
12706 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
12707 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
12708 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
12709 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
12710 pf->hw.num_partitions > 1)
12711 dev_info(&pf->pdev->dev,
12712 "Flow Director Sideband mode Disabled in MFP mode\n");
12714 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
12715 pf->fdir_pf_filter_count =
12716 pf->hw.func_caps.fd_filters_guaranteed;
12717 pf->hw.fdir_shared_filter_count =
12718 pf->hw.func_caps.fd_filters_best_effort;
12721 if (pf->hw.mac.type == I40E_MAC_X722) {
12722 pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
12723 I40E_HW_128_QP_RSS_CAPABLE |
12724 I40E_HW_ATR_EVICT_CAPABLE |
12725 I40E_HW_WB_ON_ITR_CAPABLE |
12726 I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
12727 I40E_HW_NO_PCI_LINK_CHECK |
12728 I40E_HW_USE_SET_LLDP_MIB |
12729 I40E_HW_GENEVE_OFFLOAD_CAPABLE |
12730 I40E_HW_PTP_L4_CAPABLE |
12731 I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
12732 I40E_HW_OUTER_UDP_CSUM_CAPABLE);
12734 #define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
12735 if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
12736 I40E_FDEVICT_PCTYPE_DEFAULT) {
12737 dev_warn(&pf->pdev->dev,
12738 "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
12739 pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
12741 } else if ((pf->hw.aq.api_maj_ver > 1) ||
12742 ((pf->hw.aq.api_maj_ver == 1) &&
12743 (pf->hw.aq.api_min_ver > 4))) {
12744 /* Supported in FW API version higher than 1.4 */
12745 pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
12748 /* Enable HW ATR eviction if possible */
12749 if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
12750 pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
12752 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
12753 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
12754 (pf->hw.aq.fw_maj_ver < 4))) {
12755 pf->hw_features |= I40E_HW_RESTART_AUTONEG;
12756 /* No DCB support for FW < v4.33 */
12757 pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
12760 /* Disable FW LLDP if FW < v4.3 */
12761 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
12762 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
12763 (pf->hw.aq.fw_maj_ver < 4)))
12764 pf->hw_features |= I40E_HW_STOP_FW_LLDP;
12766 /* Use the FW Set LLDP MIB API if FW > v4.40 */
12767 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
12768 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
12769 (pf->hw.aq.fw_maj_ver >= 5)))
12770 pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
12772 /* Enable PTP L4 if FW > v6.0 */
12773 if (pf->hw.mac.type == I40E_MAC_XL710 &&
12774 pf->hw.aq.fw_maj_ver >= 6)
12775 pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
12777 if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
12778 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
12779 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
12780 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
12783 if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
12784 pf->flags |= I40E_FLAG_IWARP_ENABLED;
12785 /* IWARP needs one extra vector for CQP just like MISC.*/
12786 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
12788 /* Stopping FW LLDP engine is supported on XL710 and X722
12789 * starting from FW versions determined in i40e_init_adminq.
12790 * Stopping the FW LLDP engine is not supported on XL710
12791 * if NPAR is functioning so unset this hw flag in this case.
12793 if (pf->hw.mac.type == I40E_MAC_XL710 &&
12794 pf->hw.func_caps.npar_enable &&
12795 (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
12796 pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE;
12798 #ifdef CONFIG_PCI_IOV
12799 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
12800 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
12801 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
12802 pf->num_req_vfs = min_t(int,
12803 pf->hw.func_caps.num_vfs,
12804 I40E_MAX_VF_COUNT);
12806 #endif /* CONFIG_PCI_IOV */
12807 pf->eeprom_version = 0xDEAD;
12808 pf->lan_veb = I40E_NO_VEB;
12809 pf->lan_vsi = I40E_NO_VSI;
12811 /* By default FW has this off for performance reasons */
12812 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
12814 /* set up queue assignment tracking */
12815 size = sizeof(struct i40e_lump_tracking)
12816 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
12817 pf->qp_pile = kzalloc(size, GFP_KERNEL);
12818 if (!pf->qp_pile) {
12822 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
12824 pf->tx_timeout_recovery_level = 1;
12826 if (pf->hw.mac.type != I40E_MAC_X722 &&
12827 i40e_is_total_port_shutdown_enabled(pf)) {
12828 /* Link down on close must be on when total port shutdown
12829 * is enabled for a given port
12831 pf->flags |= (I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED |
12832 I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED);
12833 dev_info(&pf->pdev->dev,
12834 "total-port-shutdown was enabled, link-down-on-close is forced on\n");
12836 mutex_init(&pf->switch_mutex);
12843 * i40e_set_ntuple - set the ntuple feature flag and take action
12844 * @pf: board private structure to initialize
12845 * @features: the feature set that the stack is suggesting
12847 * returns a bool to indicate if reset needs to happen
12849 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
12851 bool need_reset = false;
12853 /* Check if Flow Director n-tuple support was enabled or disabled. If
12854 * the state changed, we need to reset.
12856 if (features & NETIF_F_NTUPLE) {
12857 /* Enable filters and mark for reset */
12858 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
12860 /* enable FD_SB only if there is MSI-X vector and no cloud
12863 if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) {
12864 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
12865 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
12868 /* turn off filters, mark for reset and clear SW filter list */
12869 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
12871 i40e_fdir_filter_exit(pf);
12873 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
12874 clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state);
12875 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
12877 /* reset fd counters */
12878 pf->fd_add_err = 0;
12879 pf->fd_atr_cnt = 0;
12880 /* if ATR was auto disabled it can be re-enabled. */
12881 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
12882 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
12883 (I40E_DEBUG_FD & pf->hw.debug_mask))
12884 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
12890 * i40e_clear_rss_lut - clear the rx hash lookup table
12891 * @vsi: the VSI being configured
12893 static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
12895 struct i40e_pf *pf = vsi->back;
12896 struct i40e_hw *hw = &pf->hw;
12897 u16 vf_id = vsi->vf_id;
12900 if (vsi->type == I40E_VSI_MAIN) {
12901 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12902 wr32(hw, I40E_PFQF_HLUT(i), 0);
12903 } else if (vsi->type == I40E_VSI_SRIOV) {
12904 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
12905 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
12907 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
12912 * i40e_set_features - set the netdev feature flags
12913 * @netdev: ptr to the netdev being adjusted
12914 * @features: the feature set that the stack is suggesting
12915 * Note: expects to be called while under rtnl_lock()
12917 static int i40e_set_features(struct net_device *netdev,
12918 netdev_features_t features)
12920 struct i40e_netdev_priv *np = netdev_priv(netdev);
12921 struct i40e_vsi *vsi = np->vsi;
12922 struct i40e_pf *pf = vsi->back;
12925 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
12926 i40e_pf_config_rss(pf);
12927 else if (!(features & NETIF_F_RXHASH) &&
12928 netdev->features & NETIF_F_RXHASH)
12929 i40e_clear_rss_lut(vsi);
12931 if (features & NETIF_F_HW_VLAN_CTAG_RX)
12932 i40e_vlan_stripping_enable(vsi);
12934 i40e_vlan_stripping_disable(vsi);
12936 if (!(features & NETIF_F_HW_TC) &&
12937 (netdev->features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
12938 dev_err(&pf->pdev->dev,
12939 "Offloaded tc filters active, can't turn hw_tc_offload off");
12943 if (!(features & NETIF_F_HW_L2FW_DOFFLOAD) && vsi->macvlan_cnt)
12944 i40e_del_all_macvlans(vsi);
12946 need_reset = i40e_set_ntuple(pf, features);
12949 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
12954 static int i40e_udp_tunnel_set_port(struct net_device *netdev,
12955 unsigned int table, unsigned int idx,
12956 struct udp_tunnel_info *ti)
12958 struct i40e_netdev_priv *np = netdev_priv(netdev);
12959 struct i40e_hw *hw = &np->vsi->back->hw;
12960 u8 type, filter_index;
12963 type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? I40E_AQC_TUNNEL_TYPE_VXLAN :
12964 I40E_AQC_TUNNEL_TYPE_NGE;
12966 ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index,
12969 netdev_info(netdev, "add UDP port failed, err %s aq_err %s\n",
12970 i40e_stat_str(hw, ret),
12971 i40e_aq_str(hw, hw->aq.asq_last_status));
12975 udp_tunnel_nic_set_port_priv(netdev, table, idx, filter_index);
12979 static int i40e_udp_tunnel_unset_port(struct net_device *netdev,
12980 unsigned int table, unsigned int idx,
12981 struct udp_tunnel_info *ti)
12983 struct i40e_netdev_priv *np = netdev_priv(netdev);
12984 struct i40e_hw *hw = &np->vsi->back->hw;
12987 ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL);
12989 netdev_info(netdev, "delete UDP port failed, err %s aq_err %s\n",
12990 i40e_stat_str(hw, ret),
12991 i40e_aq_str(hw, hw->aq.asq_last_status));
12998 static int i40e_get_phys_port_id(struct net_device *netdev,
12999 struct netdev_phys_item_id *ppid)
13001 struct i40e_netdev_priv *np = netdev_priv(netdev);
13002 struct i40e_pf *pf = np->vsi->back;
13003 struct i40e_hw *hw = &pf->hw;
13005 if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
13006 return -EOPNOTSUPP;
13008 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
13009 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
13015 * i40e_ndo_fdb_add - add an entry to the hardware database
13016 * @ndm: the input from the stack
13017 * @tb: pointer to array of nladdr (unused)
13018 * @dev: the net device pointer
13019 * @addr: the MAC address entry being added
13021 * @flags: instructions from stack about fdb operation
13022 * @extack: netlink extended ack, unused currently
13024 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
13025 struct net_device *dev,
13026 const unsigned char *addr, u16 vid,
13028 struct netlink_ext_ack *extack)
13030 struct i40e_netdev_priv *np = netdev_priv(dev);
13031 struct i40e_pf *pf = np->vsi->back;
13034 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
13035 return -EOPNOTSUPP;
13038 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
13042 /* Hardware does not support aging addresses so if a
13043 * ndm_state is given only allow permanent addresses
13045 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
13046 netdev_info(dev, "FDB only supports static addresses\n");
13050 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
13051 err = dev_uc_add_excl(dev, addr);
13052 else if (is_multicast_ether_addr(addr))
13053 err = dev_mc_add_excl(dev, addr);
13057 /* Only return duplicate errors if NLM_F_EXCL is set */
13058 if (err == -EEXIST && !(flags & NLM_F_EXCL))
13065 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
13066 * @dev: the netdev being configured
13067 * @nlh: RTNL message
13068 * @flags: bridge flags
13069 * @extack: netlink extended ack
13071 * Inserts a new hardware bridge if not already created and
13072 * enables the bridging mode requested (VEB or VEPA). If the
13073 * hardware bridge has already been inserted and the request
13074 * is to change the mode then that requires a PF reset to
13075 * allow rebuild of the components with required hardware
13076 * bridge mode enabled.
13078 * Note: expects to be called while under rtnl_lock()
13080 static int i40e_ndo_bridge_setlink(struct net_device *dev,
13081 struct nlmsghdr *nlh,
13083 struct netlink_ext_ack *extack)
13085 struct i40e_netdev_priv *np = netdev_priv(dev);
13086 struct i40e_vsi *vsi = np->vsi;
13087 struct i40e_pf *pf = vsi->back;
13088 struct i40e_veb *veb = NULL;
13089 struct nlattr *attr, *br_spec;
13092 /* Only for PF VSI for now */
13093 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
13094 return -EOPNOTSUPP;
13096 /* Find the HW bridge for PF VSI */
13097 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
13098 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
13102 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
13104 nla_for_each_nested(attr, br_spec, rem) {
13107 if (nla_type(attr) != IFLA_BRIDGE_MODE)
13110 mode = nla_get_u16(attr);
13111 if ((mode != BRIDGE_MODE_VEPA) &&
13112 (mode != BRIDGE_MODE_VEB))
13115 /* Insert a new HW bridge */
13117 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
13118 vsi->tc_config.enabled_tc);
13120 veb->bridge_mode = mode;
13121 i40e_config_bridge_mode(veb);
13123 /* No Bridge HW offload available */
13127 } else if (mode != veb->bridge_mode) {
13128 /* Existing HW bridge but different mode needs reset */
13129 veb->bridge_mode = mode;
13130 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
13131 if (mode == BRIDGE_MODE_VEB)
13132 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
13134 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
13135 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
13144 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
13147 * @seq: RTNL message seq #
13148 * @dev: the netdev being configured
13149 * @filter_mask: unused
13150 * @nlflags: netlink flags passed in
13152 * Return the mode in which the hardware bridge is operating in
13155 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
13156 struct net_device *dev,
13157 u32 __always_unused filter_mask,
13160 struct i40e_netdev_priv *np = netdev_priv(dev);
13161 struct i40e_vsi *vsi = np->vsi;
13162 struct i40e_pf *pf = vsi->back;
13163 struct i40e_veb *veb = NULL;
13166 /* Only for PF VSI for now */
13167 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
13168 return -EOPNOTSUPP;
13170 /* Find the HW bridge for the PF VSI */
13171 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
13172 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
13179 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
13180 0, 0, nlflags, filter_mask, NULL);
13184 * i40e_features_check - Validate encapsulated packet conforms to limits
13186 * @dev: This physical port's netdev
13187 * @features: Offload features that the stack believes apply
13189 static netdev_features_t i40e_features_check(struct sk_buff *skb,
13190 struct net_device *dev,
13191 netdev_features_t features)
13195 /* No point in doing any of this if neither checksum nor GSO are
13196 * being requested for this frame. We can rule out both by just
13197 * checking for CHECKSUM_PARTIAL
13199 if (skb->ip_summed != CHECKSUM_PARTIAL)
13202 /* We cannot support GSO if the MSS is going to be less than
13203 * 64 bytes. If it is then we need to drop support for GSO.
13205 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
13206 features &= ~NETIF_F_GSO_MASK;
13208 /* MACLEN can support at most 63 words */
13209 len = skb_network_header(skb) - skb->data;
13210 if (len & ~(63 * 2))
13213 /* IPLEN and EIPLEN can support at most 127 dwords */
13214 len = skb_transport_header(skb) - skb_network_header(skb);
13215 if (len & ~(127 * 4))
13218 if (skb->encapsulation) {
13219 /* L4TUNLEN can support 127 words */
13220 len = skb_inner_network_header(skb) - skb_transport_header(skb);
13221 if (len & ~(127 * 2))
13224 /* IPLEN can support at most 127 dwords */
13225 len = skb_inner_transport_header(skb) -
13226 skb_inner_network_header(skb);
13227 if (len & ~(127 * 4))
13231 /* No need to validate L4LEN as TCP is the only protocol with a
13232 * flexible value and we support all possible values supported
13233 * by TCP, which is at most 15 dwords
13238 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
13242 * i40e_xdp_setup - add/remove an XDP program
13243 * @vsi: VSI to changed
13244 * @prog: XDP program
13245 * @extack: netlink extended ack
13247 static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
13248 struct netlink_ext_ack *extack)
13250 int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
13251 struct i40e_pf *pf = vsi->back;
13252 struct bpf_prog *old_prog;
13256 /* Don't allow frames that span over multiple buffers */
13257 if (frame_size > vsi->rx_buf_len) {
13258 NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
13262 /* When turning XDP on->off/off->on we reset and rebuild the rings. */
13263 need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
13266 i40e_prep_for_reset(pf);
13268 /* VSI shall be deleted in a moment, just return EINVAL */
13269 if (test_bit(__I40E_IN_REMOVE, pf->state))
13272 old_prog = xchg(&vsi->xdp_prog, prog);
13276 /* Wait until ndo_xsk_wakeup completes. */
13278 i40e_reset_and_rebuild(pf, true, true);
13281 for (i = 0; i < vsi->num_queue_pairs; i++)
13282 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
13285 bpf_prog_put(old_prog);
13287 /* Kick start the NAPI context if there is an AF_XDP socket open
13288 * on that queue id. This so that receiving will start.
13290 if (need_reset && prog)
13291 for (i = 0; i < vsi->num_queue_pairs; i++)
13292 if (vsi->xdp_rings[i]->xsk_pool)
13293 (void)i40e_xsk_wakeup(vsi->netdev, i,
13300 * i40e_enter_busy_conf - Enters busy config state
13303 * Returns 0 on success, <0 for failure.
13305 static int i40e_enter_busy_conf(struct i40e_vsi *vsi)
13307 struct i40e_pf *pf = vsi->back;
13310 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
13314 usleep_range(1000, 2000);
13321 * i40e_exit_busy_conf - Exits busy config state
13324 static void i40e_exit_busy_conf(struct i40e_vsi *vsi)
13326 struct i40e_pf *pf = vsi->back;
13328 clear_bit(__I40E_CONFIG_BUSY, pf->state);
13332 * i40e_queue_pair_reset_stats - Resets all statistics for a queue pair
13334 * @queue_pair: queue pair
13336 static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
13338 memset(&vsi->rx_rings[queue_pair]->rx_stats, 0,
13339 sizeof(vsi->rx_rings[queue_pair]->rx_stats));
13340 memset(&vsi->tx_rings[queue_pair]->stats, 0,
13341 sizeof(vsi->tx_rings[queue_pair]->stats));
13342 if (i40e_enabled_xdp_vsi(vsi)) {
13343 memset(&vsi->xdp_rings[queue_pair]->stats, 0,
13344 sizeof(vsi->xdp_rings[queue_pair]->stats));
13349 * i40e_queue_pair_clean_rings - Cleans all the rings of a queue pair
13351 * @queue_pair: queue pair
13353 static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
13355 i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
13356 if (i40e_enabled_xdp_vsi(vsi)) {
13357 /* Make sure that in-progress ndo_xdp_xmit calls are
13361 i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
13363 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
13367 * i40e_queue_pair_toggle_napi - Enables/disables NAPI for a queue pair
13369 * @queue_pair: queue pair
13370 * @enable: true for enable, false for disable
13372 static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair,
13375 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13376 struct i40e_q_vector *q_vector = rxr->q_vector;
13381 /* All rings in a qp belong to the same qvector. */
13382 if (q_vector->rx.ring || q_vector->tx.ring) {
13384 napi_enable(&q_vector->napi);
13386 napi_disable(&q_vector->napi);
13391 * i40e_queue_pair_toggle_rings - Enables/disables all rings for a queue pair
13393 * @queue_pair: queue pair
13394 * @enable: true for enable, false for disable
13396 * Returns 0 on success, <0 on failure.
13398 static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair,
13401 struct i40e_pf *pf = vsi->back;
13404 pf_q = vsi->base_queue + queue_pair;
13405 ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q,
13406 false /*is xdp*/, enable);
13408 dev_info(&pf->pdev->dev,
13409 "VSI seid %d Tx ring %d %sable timeout\n",
13410 vsi->seid, pf_q, (enable ? "en" : "dis"));
13414 i40e_control_rx_q(pf, pf_q, enable);
13415 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
13417 dev_info(&pf->pdev->dev,
13418 "VSI seid %d Rx ring %d %sable timeout\n",
13419 vsi->seid, pf_q, (enable ? "en" : "dis"));
13423 /* Due to HW errata, on Rx disable only, the register can
13424 * indicate done before it really is. Needs 50ms to be sure
13429 if (!i40e_enabled_xdp_vsi(vsi))
13432 ret = i40e_control_wait_tx_q(vsi->seid, pf,
13433 pf_q + vsi->alloc_queue_pairs,
13434 true /*is xdp*/, enable);
13436 dev_info(&pf->pdev->dev,
13437 "VSI seid %d XDP Tx ring %d %sable timeout\n",
13438 vsi->seid, pf_q, (enable ? "en" : "dis"));
13445 * i40e_queue_pair_enable_irq - Enables interrupts for a queue pair
13447 * @queue_pair: queue_pair
13449 static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair)
13451 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13452 struct i40e_pf *pf = vsi->back;
13453 struct i40e_hw *hw = &pf->hw;
13455 /* All rings in a qp belong to the same qvector. */
13456 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
13457 i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx);
13459 i40e_irq_dynamic_enable_icr0(pf);
13465 * i40e_queue_pair_disable_irq - Disables interrupts for a queue pair
13467 * @queue_pair: queue_pair
13469 static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair)
13471 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13472 struct i40e_pf *pf = vsi->back;
13473 struct i40e_hw *hw = &pf->hw;
13475 /* For simplicity, instead of removing the qp interrupt causes
13476 * from the interrupt linked list, we simply disable the interrupt, and
13477 * leave the list intact.
13479 * All rings in a qp belong to the same qvector.
13481 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
13482 u32 intpf = vsi->base_vector + rxr->q_vector->v_idx;
13484 wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0);
13486 synchronize_irq(pf->msix_entries[intpf].vector);
13488 /* Legacy and MSI mode - this stops all interrupt handling */
13489 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
13490 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
13492 synchronize_irq(pf->pdev->irq);
13497 * i40e_queue_pair_disable - Disables a queue pair
13499 * @queue_pair: queue pair
13501 * Returns 0 on success, <0 on failure.
13503 int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
13507 err = i40e_enter_busy_conf(vsi);
13511 i40e_queue_pair_disable_irq(vsi, queue_pair);
13512 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
13513 i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
13514 i40e_queue_pair_clean_rings(vsi, queue_pair);
13515 i40e_queue_pair_reset_stats(vsi, queue_pair);
13521 * i40e_queue_pair_enable - Enables a queue pair
13523 * @queue_pair: queue pair
13525 * Returns 0 on success, <0 on failure.
13527 int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair)
13531 err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]);
13535 if (i40e_enabled_xdp_vsi(vsi)) {
13536 err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]);
13541 err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]);
13545 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, true /* on */);
13546 i40e_queue_pair_toggle_napi(vsi, queue_pair, true /* on */);
13547 i40e_queue_pair_enable_irq(vsi, queue_pair);
13549 i40e_exit_busy_conf(vsi);
13555 * i40e_xdp - implements ndo_bpf for i40e
13557 * @xdp: XDP command
13559 static int i40e_xdp(struct net_device *dev,
13560 struct netdev_bpf *xdp)
13562 struct i40e_netdev_priv *np = netdev_priv(dev);
13563 struct i40e_vsi *vsi = np->vsi;
13565 if (vsi->type != I40E_VSI_MAIN)
13568 switch (xdp->command) {
13569 case XDP_SETUP_PROG:
13570 return i40e_xdp_setup(vsi, xdp->prog, xdp->extack);
13571 case XDP_SETUP_XSK_POOL:
13572 return i40e_xsk_pool_setup(vsi, xdp->xsk.pool,
13573 xdp->xsk.queue_id);
13579 static const struct net_device_ops i40e_netdev_ops = {
13580 .ndo_open = i40e_open,
13581 .ndo_stop = i40e_close,
13582 .ndo_start_xmit = i40e_lan_xmit_frame,
13583 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
13584 .ndo_set_rx_mode = i40e_set_rx_mode,
13585 .ndo_validate_addr = eth_validate_addr,
13586 .ndo_set_mac_address = i40e_set_mac,
13587 .ndo_change_mtu = i40e_change_mtu,
13588 .ndo_eth_ioctl = i40e_ioctl,
13589 .ndo_tx_timeout = i40e_tx_timeout,
13590 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
13591 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
13592 #ifdef CONFIG_NET_POLL_CONTROLLER
13593 .ndo_poll_controller = i40e_netpoll,
13595 .ndo_setup_tc = __i40e_setup_tc,
13596 .ndo_select_queue = i40e_lan_select_queue,
13597 .ndo_set_features = i40e_set_features,
13598 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
13599 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
13600 .ndo_get_vf_stats = i40e_get_vf_stats,
13601 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
13602 .ndo_get_vf_config = i40e_ndo_get_vf_config,
13603 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
13604 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
13605 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
13606 .ndo_get_phys_port_id = i40e_get_phys_port_id,
13607 .ndo_fdb_add = i40e_ndo_fdb_add,
13608 .ndo_features_check = i40e_features_check,
13609 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
13610 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
13611 .ndo_bpf = i40e_xdp,
13612 .ndo_xdp_xmit = i40e_xdp_xmit,
13613 .ndo_xsk_wakeup = i40e_xsk_wakeup,
13614 .ndo_dfwd_add_station = i40e_fwd_add,
13615 .ndo_dfwd_del_station = i40e_fwd_del,
13619 * i40e_config_netdev - Setup the netdev flags
13620 * @vsi: the VSI being configured
13622 * Returns 0 on success, negative value on failure
13624 static int i40e_config_netdev(struct i40e_vsi *vsi)
13626 struct i40e_pf *pf = vsi->back;
13627 struct i40e_hw *hw = &pf->hw;
13628 struct i40e_netdev_priv *np;
13629 struct net_device *netdev;
13630 u8 broadcast[ETH_ALEN];
13631 u8 mac_addr[ETH_ALEN];
13633 netdev_features_t hw_enc_features;
13634 netdev_features_t hw_features;
13636 etherdev_size = sizeof(struct i40e_netdev_priv);
13637 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
13641 vsi->netdev = netdev;
13642 np = netdev_priv(netdev);
13645 hw_enc_features = NETIF_F_SG |
13648 NETIF_F_SOFT_FEATURES |
13653 NETIF_F_GSO_GRE_CSUM |
13654 NETIF_F_GSO_PARTIAL |
13655 NETIF_F_GSO_IPXIP4 |
13656 NETIF_F_GSO_IPXIP6 |
13657 NETIF_F_GSO_UDP_TUNNEL |
13658 NETIF_F_GSO_UDP_TUNNEL_CSUM |
13659 NETIF_F_GSO_UDP_L4 |
13665 if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
13666 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
13668 netdev->udp_tunnel_nic_info = &pf->udp_tunnel_nic;
13670 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
13672 netdev->hw_enc_features |= hw_enc_features;
13674 /* record features VLANs can make use of */
13675 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
13677 #define I40E_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
13678 NETIF_F_GSO_GRE_CSUM | \
13679 NETIF_F_GSO_IPXIP4 | \
13680 NETIF_F_GSO_IPXIP6 | \
13681 NETIF_F_GSO_UDP_TUNNEL | \
13682 NETIF_F_GSO_UDP_TUNNEL_CSUM)
13684 netdev->gso_partial_features = I40E_GSO_PARTIAL_FEATURES;
13685 netdev->features |= NETIF_F_GSO_PARTIAL |
13686 I40E_GSO_PARTIAL_FEATURES;
13688 netdev->mpls_features |= NETIF_F_SG;
13689 netdev->mpls_features |= NETIF_F_HW_CSUM;
13690 netdev->mpls_features |= NETIF_F_TSO;
13691 netdev->mpls_features |= NETIF_F_TSO6;
13692 netdev->mpls_features |= I40E_GSO_PARTIAL_FEATURES;
13694 /* enable macvlan offloads */
13695 netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD;
13697 hw_features = hw_enc_features |
13698 NETIF_F_HW_VLAN_CTAG_TX |
13699 NETIF_F_HW_VLAN_CTAG_RX;
13701 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
13702 hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
13704 netdev->hw_features |= hw_features;
13706 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
13707 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
13709 netdev->features &= ~NETIF_F_HW_TC;
13711 if (vsi->type == I40E_VSI_MAIN) {
13712 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
13713 ether_addr_copy(mac_addr, hw->mac.perm_addr);
13714 /* The following steps are necessary for two reasons. First,
13715 * some older NVM configurations load a default MAC-VLAN
13716 * filter that will accept any tagged packet, and we want to
13717 * replace this with a normal filter. Additionally, it is
13718 * possible our MAC address was provided by the platform using
13719 * Open Firmware or similar.
13721 * Thus, we need to remove the default filter and install one
13722 * specific to the MAC address.
13724 i40e_rm_default_mac_filter(vsi, mac_addr);
13725 spin_lock_bh(&vsi->mac_filter_hash_lock);
13726 i40e_add_mac_filter(vsi, mac_addr);
13727 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13729 /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
13730 * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
13731 * the end, which is 4 bytes long, so force truncation of the
13732 * original name by IFNAMSIZ - 4
13734 snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
13736 pf->vsi[pf->lan_vsi]->netdev->name);
13737 eth_random_addr(mac_addr);
13739 spin_lock_bh(&vsi->mac_filter_hash_lock);
13740 i40e_add_mac_filter(vsi, mac_addr);
13741 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13744 /* Add the broadcast filter so that we initially will receive
13745 * broadcast packets. Note that when a new VLAN is first added the
13746 * driver will convert all filters marked I40E_VLAN_ANY into VLAN
13747 * specific filters as part of transitioning into "vlan" operation.
13748 * When more VLANs are added, the driver will copy each existing MAC
13749 * filter and add it for the new VLAN.
13751 * Broadcast filters are handled specially by
13752 * i40e_sync_filters_subtask, as the driver must to set the broadcast
13753 * promiscuous bit instead of adding this directly as a MAC/VLAN
13754 * filter. The subtask will update the correct broadcast promiscuous
13755 * bits as VLANs become active or inactive.
13757 eth_broadcast_addr(broadcast);
13758 spin_lock_bh(&vsi->mac_filter_hash_lock);
13759 i40e_add_mac_filter(vsi, broadcast);
13760 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13762 eth_hw_addr_set(netdev, mac_addr);
13763 ether_addr_copy(netdev->perm_addr, mac_addr);
13765 /* i40iw_net_event() reads 16 bytes from neigh->primary_key */
13766 netdev->neigh_priv_len = sizeof(u32) * 4;
13768 netdev->priv_flags |= IFF_UNICAST_FLT;
13769 netdev->priv_flags |= IFF_SUPP_NOFCS;
13770 /* Setup netdev TC information */
13771 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
13773 netdev->netdev_ops = &i40e_netdev_ops;
13774 netdev->watchdog_timeo = 5 * HZ;
13775 i40e_set_ethtool_ops(netdev);
13777 /* MTU range: 68 - 9706 */
13778 netdev->min_mtu = ETH_MIN_MTU;
13779 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
13785 * i40e_vsi_delete - Delete a VSI from the switch
13786 * @vsi: the VSI being removed
13788 * Returns 0 on success, negative value on failure
13790 static void i40e_vsi_delete(struct i40e_vsi *vsi)
13792 /* remove default VSI is not allowed */
13793 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
13796 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
13800 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
13801 * @vsi: the VSI being queried
13803 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
13805 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
13807 struct i40e_veb *veb;
13808 struct i40e_pf *pf = vsi->back;
13810 /* Uplink is not a bridge so default to VEB */
13811 if (vsi->veb_idx >= I40E_MAX_VEB)
13814 veb = pf->veb[vsi->veb_idx];
13816 dev_info(&pf->pdev->dev,
13817 "There is no veb associated with the bridge\n");
13821 /* Uplink is a bridge in VEPA mode */
13822 if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
13825 /* Uplink is a bridge in VEB mode */
13829 /* VEPA is now default bridge, so return 0 */
13834 * i40e_add_vsi - Add a VSI to the switch
13835 * @vsi: the VSI being configured
13837 * This initializes a VSI context depending on the VSI type to be added and
13838 * passes it down to the add_vsi aq command.
13840 static int i40e_add_vsi(struct i40e_vsi *vsi)
13843 struct i40e_pf *pf = vsi->back;
13844 struct i40e_hw *hw = &pf->hw;
13845 struct i40e_vsi_context ctxt;
13846 struct i40e_mac_filter *f;
13847 struct hlist_node *h;
13850 u8 enabled_tc = 0x1; /* TC0 enabled */
13853 memset(&ctxt, 0, sizeof(ctxt));
13854 switch (vsi->type) {
13855 case I40E_VSI_MAIN:
13856 /* The PF's main VSI is already setup as part of the
13857 * device initialization, so we'll not bother with
13858 * the add_vsi call, but we will retrieve the current
13861 ctxt.seid = pf->main_vsi_seid;
13862 ctxt.pf_num = pf->hw.pf_id;
13864 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
13865 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
13867 dev_info(&pf->pdev->dev,
13868 "couldn't get PF vsi config, err %s aq_err %s\n",
13869 i40e_stat_str(&pf->hw, ret),
13870 i40e_aq_str(&pf->hw,
13871 pf->hw.aq.asq_last_status));
13874 vsi->info = ctxt.info;
13875 vsi->info.valid_sections = 0;
13877 vsi->seid = ctxt.seid;
13878 vsi->id = ctxt.vsi_number;
13880 enabled_tc = i40e_pf_get_tc_map(pf);
13882 /* Source pruning is enabled by default, so the flag is
13883 * negative logic - if it's set, we need to fiddle with
13884 * the VSI to disable source pruning.
13886 if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
13887 memset(&ctxt, 0, sizeof(ctxt));
13888 ctxt.seid = pf->main_vsi_seid;
13889 ctxt.pf_num = pf->hw.pf_id;
13891 ctxt.info.valid_sections |=
13892 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13893 ctxt.info.switch_id =
13894 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
13895 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
13897 dev_info(&pf->pdev->dev,
13898 "update vsi failed, err %s aq_err %s\n",
13899 i40e_stat_str(&pf->hw, ret),
13900 i40e_aq_str(&pf->hw,
13901 pf->hw.aq.asq_last_status));
13907 /* MFP mode setup queue map and update VSI */
13908 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
13909 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
13910 memset(&ctxt, 0, sizeof(ctxt));
13911 ctxt.seid = pf->main_vsi_seid;
13912 ctxt.pf_num = pf->hw.pf_id;
13914 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
13915 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
13917 dev_info(&pf->pdev->dev,
13918 "update vsi failed, err %s aq_err %s\n",
13919 i40e_stat_str(&pf->hw, ret),
13920 i40e_aq_str(&pf->hw,
13921 pf->hw.aq.asq_last_status));
13925 /* update the local VSI info queue map */
13926 i40e_vsi_update_queue_map(vsi, &ctxt);
13927 vsi->info.valid_sections = 0;
13929 /* Default/Main VSI is only enabled for TC0
13930 * reconfigure it to enable all TCs that are
13931 * available on the port in SFP mode.
13932 * For MFP case the iSCSI PF would use this
13933 * flow to enable LAN+iSCSI TC.
13935 ret = i40e_vsi_config_tc(vsi, enabled_tc);
13937 /* Single TC condition is not fatal,
13938 * message and continue
13940 dev_info(&pf->pdev->dev,
13941 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
13943 i40e_stat_str(&pf->hw, ret),
13944 i40e_aq_str(&pf->hw,
13945 pf->hw.aq.asq_last_status));
13950 case I40E_VSI_FDIR:
13951 ctxt.pf_num = hw->pf_id;
13953 ctxt.uplink_seid = vsi->uplink_seid;
13954 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13955 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
13956 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
13957 (i40e_is_vsi_uplink_mode_veb(vsi))) {
13958 ctxt.info.valid_sections |=
13959 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13960 ctxt.info.switch_id =
13961 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13963 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13966 case I40E_VSI_VMDQ2:
13967 ctxt.pf_num = hw->pf_id;
13969 ctxt.uplink_seid = vsi->uplink_seid;
13970 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13971 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
13973 /* This VSI is connected to VEB so the switch_id
13974 * should be set to zero by default.
13976 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
13977 ctxt.info.valid_sections |=
13978 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13979 ctxt.info.switch_id =
13980 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13983 /* Setup the VSI tx/rx queue map for TC0 only for now */
13984 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13987 case I40E_VSI_SRIOV:
13988 ctxt.pf_num = hw->pf_id;
13989 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
13990 ctxt.uplink_seid = vsi->uplink_seid;
13991 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13992 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
13994 /* This VSI is connected to VEB so the switch_id
13995 * should be set to zero by default.
13997 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
13998 ctxt.info.valid_sections |=
13999 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
14000 ctxt.info.switch_id =
14001 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
14004 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
14005 ctxt.info.valid_sections |=
14006 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
14007 ctxt.info.queueing_opt_flags |=
14008 (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
14009 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
14012 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
14013 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
14014 if (pf->vf[vsi->vf_id].spoofchk) {
14015 ctxt.info.valid_sections |=
14016 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
14017 ctxt.info.sec_flags |=
14018 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
14019 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
14021 /* Setup the VSI tx/rx queue map for TC0 only for now */
14022 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
14025 case I40E_VSI_IWARP:
14026 /* send down message to iWARP */
14033 if (vsi->type != I40E_VSI_MAIN) {
14034 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
14036 dev_info(&vsi->back->pdev->dev,
14037 "add vsi failed, err %s aq_err %s\n",
14038 i40e_stat_str(&pf->hw, ret),
14039 i40e_aq_str(&pf->hw,
14040 pf->hw.aq.asq_last_status));
14044 vsi->info = ctxt.info;
14045 vsi->info.valid_sections = 0;
14046 vsi->seid = ctxt.seid;
14047 vsi->id = ctxt.vsi_number;
14050 vsi->active_filters = 0;
14051 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
14052 spin_lock_bh(&vsi->mac_filter_hash_lock);
14053 /* If macvlan filters already exist, force them to get loaded */
14054 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
14055 f->state = I40E_FILTER_NEW;
14058 spin_unlock_bh(&vsi->mac_filter_hash_lock);
14061 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
14062 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
14065 /* Update VSI BW information */
14066 ret = i40e_vsi_get_bw_info(vsi);
14068 dev_info(&pf->pdev->dev,
14069 "couldn't get vsi bw info, err %s aq_err %s\n",
14070 i40e_stat_str(&pf->hw, ret),
14071 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14072 /* VSI is already added so not tearing that up */
14081 * i40e_vsi_release - Delete a VSI and free its resources
14082 * @vsi: the VSI being removed
14084 * Returns 0 on success or < 0 on error
14086 int i40e_vsi_release(struct i40e_vsi *vsi)
14088 struct i40e_mac_filter *f;
14089 struct hlist_node *h;
14090 struct i40e_veb *veb = NULL;
14091 struct i40e_pf *pf;
14097 /* release of a VEB-owner or last VSI is not allowed */
14098 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
14099 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
14100 vsi->seid, vsi->uplink_seid);
14103 if (vsi == pf->vsi[pf->lan_vsi] &&
14104 !test_bit(__I40E_DOWN, pf->state)) {
14105 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
14108 set_bit(__I40E_VSI_RELEASING, vsi->state);
14109 uplink_seid = vsi->uplink_seid;
14110 if (vsi->type != I40E_VSI_SRIOV) {
14111 if (vsi->netdev_registered) {
14112 vsi->netdev_registered = false;
14114 /* results in a call to i40e_close() */
14115 unregister_netdev(vsi->netdev);
14118 i40e_vsi_close(vsi);
14120 i40e_vsi_disable_irq(vsi);
14123 spin_lock_bh(&vsi->mac_filter_hash_lock);
14125 /* clear the sync flag on all filters */
14127 __dev_uc_unsync(vsi->netdev, NULL);
14128 __dev_mc_unsync(vsi->netdev, NULL);
14131 /* make sure any remaining filters are marked for deletion */
14132 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
14133 __i40e_del_filter(vsi, f);
14135 spin_unlock_bh(&vsi->mac_filter_hash_lock);
14137 i40e_sync_vsi_filters(vsi);
14139 i40e_vsi_delete(vsi);
14140 i40e_vsi_free_q_vectors(vsi);
14142 free_netdev(vsi->netdev);
14143 vsi->netdev = NULL;
14145 i40e_vsi_clear_rings(vsi);
14146 i40e_vsi_clear(vsi);
14148 /* If this was the last thing on the VEB, except for the
14149 * controlling VSI, remove the VEB, which puts the controlling
14150 * VSI onto the next level down in the switch.
14152 * Well, okay, there's one more exception here: don't remove
14153 * the orphan VEBs yet. We'll wait for an explicit remove request
14154 * from up the network stack.
14156 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
14158 pf->vsi[i]->uplink_seid == uplink_seid &&
14159 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
14160 n++; /* count the VSIs */
14163 for (i = 0; i < I40E_MAX_VEB; i++) {
14166 if (pf->veb[i]->uplink_seid == uplink_seid)
14167 n++; /* count the VEBs */
14168 if (pf->veb[i]->seid == uplink_seid)
14171 if (n == 0 && veb && veb->uplink_seid != 0)
14172 i40e_veb_release(veb);
14178 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
14179 * @vsi: ptr to the VSI
14181 * This should only be called after i40e_vsi_mem_alloc() which allocates the
14182 * corresponding SW VSI structure and initializes num_queue_pairs for the
14183 * newly allocated VSI.
14185 * Returns 0 on success or negative on failure
14187 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
14190 struct i40e_pf *pf = vsi->back;
14192 if (vsi->q_vectors[0]) {
14193 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
14198 if (vsi->base_vector) {
14199 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
14200 vsi->seid, vsi->base_vector);
14204 ret = i40e_vsi_alloc_q_vectors(vsi);
14206 dev_info(&pf->pdev->dev,
14207 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
14208 vsi->num_q_vectors, vsi->seid, ret);
14209 vsi->num_q_vectors = 0;
14210 goto vector_setup_out;
14213 /* In Legacy mode, we do not have to get any other vector since we
14214 * piggyback on the misc/ICR0 for queue interrupts.
14216 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
14218 if (vsi->num_q_vectors)
14219 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
14220 vsi->num_q_vectors, vsi->idx);
14221 if (vsi->base_vector < 0) {
14222 dev_info(&pf->pdev->dev,
14223 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
14224 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
14225 i40e_vsi_free_q_vectors(vsi);
14227 goto vector_setup_out;
14235 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
14236 * @vsi: pointer to the vsi.
14238 * This re-allocates a vsi's queue resources.
14240 * Returns pointer to the successfully allocated and configured VSI sw struct
14241 * on success, otherwise returns NULL on failure.
14243 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
14245 u16 alloc_queue_pairs;
14246 struct i40e_pf *pf;
14255 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
14256 i40e_vsi_clear_rings(vsi);
14258 i40e_vsi_free_arrays(vsi, false);
14259 i40e_set_num_rings_in_vsi(vsi);
14260 ret = i40e_vsi_alloc_arrays(vsi, false);
14264 alloc_queue_pairs = vsi->alloc_queue_pairs *
14265 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
14267 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
14269 dev_info(&pf->pdev->dev,
14270 "failed to get tracking for %d queues for VSI %d err %d\n",
14271 alloc_queue_pairs, vsi->seid, ret);
14274 vsi->base_queue = ret;
14276 /* Update the FW view of the VSI. Force a reset of TC and queue
14277 * layout configurations.
14279 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
14280 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
14281 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
14282 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
14283 if (vsi->type == I40E_VSI_MAIN)
14284 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
14286 /* assign it some queues */
14287 ret = i40e_alloc_rings(vsi);
14291 /* map all of the rings to the q_vectors */
14292 i40e_vsi_map_rings_to_vectors(vsi);
14296 i40e_vsi_free_q_vectors(vsi);
14297 if (vsi->netdev_registered) {
14298 vsi->netdev_registered = false;
14299 unregister_netdev(vsi->netdev);
14300 free_netdev(vsi->netdev);
14301 vsi->netdev = NULL;
14303 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
14305 i40e_vsi_clear(vsi);
14310 * i40e_vsi_setup - Set up a VSI by a given type
14311 * @pf: board private structure
14313 * @uplink_seid: the switch element to link to
14314 * @param1: usage depends upon VSI type. For VF types, indicates VF id
14316 * This allocates the sw VSI structure and its queue resources, then add a VSI
14317 * to the identified VEB.
14319 * Returns pointer to the successfully allocated and configure VSI sw struct on
14320 * success, otherwise returns NULL on failure.
14322 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
14323 u16 uplink_seid, u32 param1)
14325 struct i40e_vsi *vsi = NULL;
14326 struct i40e_veb *veb = NULL;
14327 u16 alloc_queue_pairs;
14331 /* The requested uplink_seid must be either
14332 * - the PF's port seid
14333 * no VEB is needed because this is the PF
14334 * or this is a Flow Director special case VSI
14335 * - seid of an existing VEB
14336 * - seid of a VSI that owns an existing VEB
14337 * - seid of a VSI that doesn't own a VEB
14338 * a new VEB is created and the VSI becomes the owner
14339 * - seid of the PF VSI, which is what creates the first VEB
14340 * this is a special case of the previous
14342 * Find which uplink_seid we were given and create a new VEB if needed
14344 for (i = 0; i < I40E_MAX_VEB; i++) {
14345 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
14351 if (!veb && uplink_seid != pf->mac_seid) {
14353 for (i = 0; i < pf->num_alloc_vsi; i++) {
14354 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
14360 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
14365 if (vsi->uplink_seid == pf->mac_seid)
14366 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
14367 vsi->tc_config.enabled_tc);
14368 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
14369 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
14370 vsi->tc_config.enabled_tc);
14372 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
14373 dev_info(&vsi->back->pdev->dev,
14374 "New VSI creation error, uplink seid of LAN VSI expected.\n");
14377 /* We come up by default in VEPA mode if SRIOV is not
14378 * already enabled, in which case we can't force VEPA
14381 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
14382 veb->bridge_mode = BRIDGE_MODE_VEPA;
14383 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
14385 i40e_config_bridge_mode(veb);
14387 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
14388 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
14392 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
14396 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
14397 uplink_seid = veb->seid;
14400 /* get vsi sw struct */
14401 v_idx = i40e_vsi_mem_alloc(pf, type);
14404 vsi = pf->vsi[v_idx];
14408 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
14410 if (type == I40E_VSI_MAIN)
14411 pf->lan_vsi = v_idx;
14412 else if (type == I40E_VSI_SRIOV)
14413 vsi->vf_id = param1;
14414 /* assign it some queues */
14415 alloc_queue_pairs = vsi->alloc_queue_pairs *
14416 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
14418 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
14420 dev_info(&pf->pdev->dev,
14421 "failed to get tracking for %d queues for VSI %d err=%d\n",
14422 alloc_queue_pairs, vsi->seid, ret);
14425 vsi->base_queue = ret;
14427 /* get a VSI from the hardware */
14428 vsi->uplink_seid = uplink_seid;
14429 ret = i40e_add_vsi(vsi);
14433 switch (vsi->type) {
14434 /* setup the netdev if needed */
14435 case I40E_VSI_MAIN:
14436 case I40E_VSI_VMDQ2:
14437 ret = i40e_config_netdev(vsi);
14440 ret = i40e_netif_set_realnum_tx_rx_queues(vsi);
14443 ret = register_netdev(vsi->netdev);
14446 vsi->netdev_registered = true;
14447 netif_carrier_off(vsi->netdev);
14448 #ifdef CONFIG_I40E_DCB
14449 /* Setup DCB netlink interface */
14450 i40e_dcbnl_setup(vsi);
14451 #endif /* CONFIG_I40E_DCB */
14453 case I40E_VSI_FDIR:
14454 /* set up vectors and rings if needed */
14455 ret = i40e_vsi_setup_vectors(vsi);
14459 ret = i40e_alloc_rings(vsi);
14463 /* map all of the rings to the q_vectors */
14464 i40e_vsi_map_rings_to_vectors(vsi);
14466 i40e_vsi_reset_stats(vsi);
14469 /* no netdev or rings for the other VSI types */
14473 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
14474 (vsi->type == I40E_VSI_VMDQ2)) {
14475 ret = i40e_vsi_config_rss(vsi);
14480 i40e_vsi_free_q_vectors(vsi);
14482 if (vsi->netdev_registered) {
14483 vsi->netdev_registered = false;
14484 unregister_netdev(vsi->netdev);
14485 free_netdev(vsi->netdev);
14486 vsi->netdev = NULL;
14489 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
14491 i40e_vsi_clear(vsi);
14497 * i40e_veb_get_bw_info - Query VEB BW information
14498 * @veb: the veb to query
14500 * Query the Tx scheduler BW configuration data for given VEB
14502 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
14504 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
14505 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
14506 struct i40e_pf *pf = veb->pf;
14507 struct i40e_hw *hw = &pf->hw;
14512 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
14515 dev_info(&pf->pdev->dev,
14516 "query veb bw config failed, err %s aq_err %s\n",
14517 i40e_stat_str(&pf->hw, ret),
14518 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
14522 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
14525 dev_info(&pf->pdev->dev,
14526 "query veb bw ets config failed, err %s aq_err %s\n",
14527 i40e_stat_str(&pf->hw, ret),
14528 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
14532 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
14533 veb->bw_max_quanta = ets_data.tc_bw_max;
14534 veb->is_abs_credits = bw_data.absolute_credits_enable;
14535 veb->enabled_tc = ets_data.tc_valid_bits;
14536 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
14537 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
14538 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
14539 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
14540 veb->bw_tc_limit_credits[i] =
14541 le16_to_cpu(bw_data.tc_bw_limits[i]);
14542 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
14550 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
14551 * @pf: board private structure
14553 * On error: returns error code (negative)
14554 * On success: returns vsi index in PF (positive)
14556 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
14559 struct i40e_veb *veb;
14562 /* Need to protect the allocation of switch elements at the PF level */
14563 mutex_lock(&pf->switch_mutex);
14565 /* VEB list may be fragmented if VEB creation/destruction has
14566 * been happening. We can afford to do a quick scan to look
14567 * for any free slots in the list.
14569 * find next empty veb slot, looping back around if necessary
14572 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
14574 if (i >= I40E_MAX_VEB) {
14576 goto err_alloc_veb; /* out of VEB slots! */
14579 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
14582 goto err_alloc_veb;
14586 veb->enabled_tc = 1;
14591 mutex_unlock(&pf->switch_mutex);
14596 * i40e_switch_branch_release - Delete a branch of the switch tree
14597 * @branch: where to start deleting
14599 * This uses recursion to find the tips of the branch to be
14600 * removed, deleting until we get back to and can delete this VEB.
14602 static void i40e_switch_branch_release(struct i40e_veb *branch)
14604 struct i40e_pf *pf = branch->pf;
14605 u16 branch_seid = branch->seid;
14606 u16 veb_idx = branch->idx;
14609 /* release any VEBs on this VEB - RECURSION */
14610 for (i = 0; i < I40E_MAX_VEB; i++) {
14613 if (pf->veb[i]->uplink_seid == branch->seid)
14614 i40e_switch_branch_release(pf->veb[i]);
14617 /* Release the VSIs on this VEB, but not the owner VSI.
14619 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
14620 * the VEB itself, so don't use (*branch) after this loop.
14622 for (i = 0; i < pf->num_alloc_vsi; i++) {
14625 if (pf->vsi[i]->uplink_seid == branch_seid &&
14626 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
14627 i40e_vsi_release(pf->vsi[i]);
14631 /* There's one corner case where the VEB might not have been
14632 * removed, so double check it here and remove it if needed.
14633 * This case happens if the veb was created from the debugfs
14634 * commands and no VSIs were added to it.
14636 if (pf->veb[veb_idx])
14637 i40e_veb_release(pf->veb[veb_idx]);
14641 * i40e_veb_clear - remove veb struct
14642 * @veb: the veb to remove
14644 static void i40e_veb_clear(struct i40e_veb *veb)
14650 struct i40e_pf *pf = veb->pf;
14652 mutex_lock(&pf->switch_mutex);
14653 if (pf->veb[veb->idx] == veb)
14654 pf->veb[veb->idx] = NULL;
14655 mutex_unlock(&pf->switch_mutex);
14662 * i40e_veb_release - Delete a VEB and free its resources
14663 * @veb: the VEB being removed
14665 void i40e_veb_release(struct i40e_veb *veb)
14667 struct i40e_vsi *vsi = NULL;
14668 struct i40e_pf *pf;
14673 /* find the remaining VSI and check for extras */
14674 for (i = 0; i < pf->num_alloc_vsi; i++) {
14675 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
14681 dev_info(&pf->pdev->dev,
14682 "can't remove VEB %d with %d VSIs left\n",
14687 /* move the remaining VSI to uplink veb */
14688 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
14689 if (veb->uplink_seid) {
14690 vsi->uplink_seid = veb->uplink_seid;
14691 if (veb->uplink_seid == pf->mac_seid)
14692 vsi->veb_idx = I40E_NO_VEB;
14694 vsi->veb_idx = veb->veb_idx;
14697 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
14698 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
14701 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
14702 i40e_veb_clear(veb);
14706 * i40e_add_veb - create the VEB in the switch
14707 * @veb: the VEB to be instantiated
14708 * @vsi: the controlling VSI
14710 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
14712 struct i40e_pf *pf = veb->pf;
14713 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
14716 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
14717 veb->enabled_tc, false,
14718 &veb->seid, enable_stats, NULL);
14720 /* get a VEB from the hardware */
14722 dev_info(&pf->pdev->dev,
14723 "couldn't add VEB, err %s aq_err %s\n",
14724 i40e_stat_str(&pf->hw, ret),
14725 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14729 /* get statistics counter */
14730 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
14731 &veb->stats_idx, NULL, NULL, NULL);
14733 dev_info(&pf->pdev->dev,
14734 "couldn't get VEB statistics idx, err %s aq_err %s\n",
14735 i40e_stat_str(&pf->hw, ret),
14736 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14739 ret = i40e_veb_get_bw_info(veb);
14741 dev_info(&pf->pdev->dev,
14742 "couldn't get VEB bw info, err %s aq_err %s\n",
14743 i40e_stat_str(&pf->hw, ret),
14744 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14745 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
14749 vsi->uplink_seid = veb->seid;
14750 vsi->veb_idx = veb->idx;
14751 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
14757 * i40e_veb_setup - Set up a VEB
14758 * @pf: board private structure
14759 * @flags: VEB setup flags
14760 * @uplink_seid: the switch element to link to
14761 * @vsi_seid: the initial VSI seid
14762 * @enabled_tc: Enabled TC bit-map
14764 * This allocates the sw VEB structure and links it into the switch
14765 * It is possible and legal for this to be a duplicate of an already
14766 * existing VEB. It is also possible for both uplink and vsi seids
14767 * to be zero, in order to create a floating VEB.
14769 * Returns pointer to the successfully allocated VEB sw struct on
14770 * success, otherwise returns NULL on failure.
14772 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
14773 u16 uplink_seid, u16 vsi_seid,
14776 struct i40e_veb *veb, *uplink_veb = NULL;
14777 int vsi_idx, veb_idx;
14780 /* if one seid is 0, the other must be 0 to create a floating relay */
14781 if ((uplink_seid == 0 || vsi_seid == 0) &&
14782 (uplink_seid + vsi_seid != 0)) {
14783 dev_info(&pf->pdev->dev,
14784 "one, not both seid's are 0: uplink=%d vsi=%d\n",
14785 uplink_seid, vsi_seid);
14789 /* make sure there is such a vsi and uplink */
14790 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
14791 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
14793 if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) {
14794 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
14799 if (uplink_seid && uplink_seid != pf->mac_seid) {
14800 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
14801 if (pf->veb[veb_idx] &&
14802 pf->veb[veb_idx]->seid == uplink_seid) {
14803 uplink_veb = pf->veb[veb_idx];
14808 dev_info(&pf->pdev->dev,
14809 "uplink seid %d not found\n", uplink_seid);
14814 /* get veb sw struct */
14815 veb_idx = i40e_veb_mem_alloc(pf);
14818 veb = pf->veb[veb_idx];
14819 veb->flags = flags;
14820 veb->uplink_seid = uplink_seid;
14821 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
14822 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
14824 /* create the VEB in the switch */
14825 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
14828 if (vsi_idx == pf->lan_vsi)
14829 pf->lan_veb = veb->idx;
14834 i40e_veb_clear(veb);
14840 * i40e_setup_pf_switch_element - set PF vars based on switch type
14841 * @pf: board private structure
14842 * @ele: element we are building info from
14843 * @num_reported: total number of elements
14844 * @printconfig: should we print the contents
14846 * helper function to assist in extracting a few useful SEID values.
14848 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
14849 struct i40e_aqc_switch_config_element_resp *ele,
14850 u16 num_reported, bool printconfig)
14852 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
14853 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
14854 u8 element_type = ele->element_type;
14855 u16 seid = le16_to_cpu(ele->seid);
14858 dev_info(&pf->pdev->dev,
14859 "type=%d seid=%d uplink=%d downlink=%d\n",
14860 element_type, seid, uplink_seid, downlink_seid);
14862 switch (element_type) {
14863 case I40E_SWITCH_ELEMENT_TYPE_MAC:
14864 pf->mac_seid = seid;
14866 case I40E_SWITCH_ELEMENT_TYPE_VEB:
14868 if (uplink_seid != pf->mac_seid)
14870 if (pf->lan_veb >= I40E_MAX_VEB) {
14873 /* find existing or else empty VEB */
14874 for (v = 0; v < I40E_MAX_VEB; v++) {
14875 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
14880 if (pf->lan_veb >= I40E_MAX_VEB) {
14881 v = i40e_veb_mem_alloc(pf);
14887 if (pf->lan_veb >= I40E_MAX_VEB)
14890 pf->veb[pf->lan_veb]->seid = seid;
14891 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
14892 pf->veb[pf->lan_veb]->pf = pf;
14893 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
14895 case I40E_SWITCH_ELEMENT_TYPE_VSI:
14896 if (num_reported != 1)
14898 /* This is immediately after a reset so we can assume this is
14901 pf->mac_seid = uplink_seid;
14902 pf->pf_seid = downlink_seid;
14903 pf->main_vsi_seid = seid;
14905 dev_info(&pf->pdev->dev,
14906 "pf_seid=%d main_vsi_seid=%d\n",
14907 pf->pf_seid, pf->main_vsi_seid);
14909 case I40E_SWITCH_ELEMENT_TYPE_PF:
14910 case I40E_SWITCH_ELEMENT_TYPE_VF:
14911 case I40E_SWITCH_ELEMENT_TYPE_EMP:
14912 case I40E_SWITCH_ELEMENT_TYPE_BMC:
14913 case I40E_SWITCH_ELEMENT_TYPE_PE:
14914 case I40E_SWITCH_ELEMENT_TYPE_PA:
14915 /* ignore these for now */
14918 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
14919 element_type, seid);
14925 * i40e_fetch_switch_configuration - Get switch config from firmware
14926 * @pf: board private structure
14927 * @printconfig: should we print the contents
14929 * Get the current switch configuration from the device and
14930 * extract a few useful SEID values.
14932 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
14934 struct i40e_aqc_get_switch_config_resp *sw_config;
14940 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
14944 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
14946 u16 num_reported, num_total;
14948 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
14952 dev_info(&pf->pdev->dev,
14953 "get switch config failed err %s aq_err %s\n",
14954 i40e_stat_str(&pf->hw, ret),
14955 i40e_aq_str(&pf->hw,
14956 pf->hw.aq.asq_last_status));
14961 num_reported = le16_to_cpu(sw_config->header.num_reported);
14962 num_total = le16_to_cpu(sw_config->header.num_total);
14965 dev_info(&pf->pdev->dev,
14966 "header: %d reported %d total\n",
14967 num_reported, num_total);
14969 for (i = 0; i < num_reported; i++) {
14970 struct i40e_aqc_switch_config_element_resp *ele =
14971 &sw_config->element[i];
14973 i40e_setup_pf_switch_element(pf, ele, num_reported,
14976 } while (next_seid != 0);
14983 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
14984 * @pf: board private structure
14985 * @reinit: if the Main VSI needs to re-initialized.
14986 * @lock_acquired: indicates whether or not the lock has been acquired
14988 * Returns 0 on success, negative value on failure
14990 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired)
14995 /* find out what's out there already */
14996 ret = i40e_fetch_switch_configuration(pf, false);
14998 dev_info(&pf->pdev->dev,
14999 "couldn't fetch switch config, err %s aq_err %s\n",
15000 i40e_stat_str(&pf->hw, ret),
15001 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15004 i40e_pf_reset_stats(pf);
15006 /* set the switch config bit for the whole device to
15007 * support limited promisc or true promisc
15008 * when user requests promisc. The default is limited
15012 if ((pf->hw.pf_id == 0) &&
15013 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
15014 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
15015 pf->last_sw_conf_flags = flags;
15018 if (pf->hw.pf_id == 0) {
15021 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
15022 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0,
15024 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
15025 dev_info(&pf->pdev->dev,
15026 "couldn't set switch config bits, err %s aq_err %s\n",
15027 i40e_stat_str(&pf->hw, ret),
15028 i40e_aq_str(&pf->hw,
15029 pf->hw.aq.asq_last_status));
15030 /* not a fatal problem, just keep going */
15032 pf->last_sw_conf_valid_flags = valid_flags;
15035 /* first time setup */
15036 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
15037 struct i40e_vsi *vsi = NULL;
15040 /* Set up the PF VSI associated with the PF's main VSI
15041 * that is already in the HW switch
15043 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
15044 uplink_seid = pf->veb[pf->lan_veb]->seid;
15046 uplink_seid = pf->mac_seid;
15047 if (pf->lan_vsi == I40E_NO_VSI)
15048 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
15050 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
15052 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
15053 i40e_cloud_filter_exit(pf);
15054 i40e_fdir_teardown(pf);
15058 /* force a reset of TC and queue layout configurations */
15059 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
15061 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
15062 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
15063 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
15065 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
15067 i40e_fdir_sb_setup(pf);
15069 /* Setup static PF queue filter control settings */
15070 ret = i40e_setup_pf_filter_control(pf);
15072 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
15074 /* Failure here should not stop continuing other steps */
15077 /* enable RSS in the HW, even for only one queue, as the stack can use
15080 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
15081 i40e_pf_config_rss(pf);
15083 /* fill in link information and enable LSE reporting */
15084 i40e_link_event(pf);
15086 /* Initialize user-specific link properties */
15087 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
15088 I40E_AQ_AN_COMPLETED) ? true : false);
15092 if (!lock_acquired)
15095 /* repopulate tunnel port filters */
15096 udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev);
15098 if (!lock_acquired)
15105 * i40e_determine_queue_usage - Work out queue distribution
15106 * @pf: board private structure
15108 static void i40e_determine_queue_usage(struct i40e_pf *pf)
15113 pf->num_lan_qps = 0;
15115 /* Find the max queues to be put into basic use. We'll always be
15116 * using TC0, whether or not DCB is running, and TC0 will get the
15119 queues_left = pf->hw.func_caps.num_tx_qp;
15121 if ((queues_left == 1) ||
15122 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
15123 /* one qp for PF, no queues for anything else */
15125 pf->alloc_rss_size = pf->num_lan_qps = 1;
15127 /* make sure all the fancies are disabled */
15128 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
15129 I40E_FLAG_IWARP_ENABLED |
15130 I40E_FLAG_FD_SB_ENABLED |
15131 I40E_FLAG_FD_ATR_ENABLED |
15132 I40E_FLAG_DCB_CAPABLE |
15133 I40E_FLAG_DCB_ENABLED |
15134 I40E_FLAG_SRIOV_ENABLED |
15135 I40E_FLAG_VMDQ_ENABLED);
15136 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
15137 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
15138 I40E_FLAG_FD_SB_ENABLED |
15139 I40E_FLAG_FD_ATR_ENABLED |
15140 I40E_FLAG_DCB_CAPABLE))) {
15141 /* one qp for PF */
15142 pf->alloc_rss_size = pf->num_lan_qps = 1;
15143 queues_left -= pf->num_lan_qps;
15145 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
15146 I40E_FLAG_IWARP_ENABLED |
15147 I40E_FLAG_FD_SB_ENABLED |
15148 I40E_FLAG_FD_ATR_ENABLED |
15149 I40E_FLAG_DCB_ENABLED |
15150 I40E_FLAG_VMDQ_ENABLED);
15151 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
15153 /* Not enough queues for all TCs */
15154 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
15155 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
15156 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
15157 I40E_FLAG_DCB_ENABLED);
15158 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
15161 /* limit lan qps to the smaller of qps, cpus or msix */
15162 q_max = max_t(int, pf->rss_size_max, num_online_cpus());
15163 q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp);
15164 q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors);
15165 pf->num_lan_qps = q_max;
15167 queues_left -= pf->num_lan_qps;
15170 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
15171 if (queues_left > 1) {
15172 queues_left -= 1; /* save 1 queue for FD */
15174 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
15175 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
15176 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
15180 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
15181 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
15182 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
15183 (queues_left / pf->num_vf_qps));
15184 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
15187 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
15188 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
15189 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
15190 (queues_left / pf->num_vmdq_qps));
15191 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
15194 pf->queues_left = queues_left;
15195 dev_dbg(&pf->pdev->dev,
15196 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
15197 pf->hw.func_caps.num_tx_qp,
15198 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
15199 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
15200 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
15205 * i40e_setup_pf_filter_control - Setup PF static filter control
15206 * @pf: PF to be setup
15208 * i40e_setup_pf_filter_control sets up a PF's initial filter control
15209 * settings. If PE/FCoE are enabled then it will also set the per PF
15210 * based filter sizes required for them. It also enables Flow director,
15211 * ethertype and macvlan type filter settings for the pf.
15213 * Returns 0 on success, negative on failure
15215 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
15217 struct i40e_filter_control_settings *settings = &pf->filter_settings;
15219 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
15221 /* Flow Director is enabled */
15222 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
15223 settings->enable_fdir = true;
15225 /* Ethtype and MACVLAN filters enabled for PF */
15226 settings->enable_ethtype = true;
15227 settings->enable_macvlan = true;
15229 if (i40e_set_filter_control(&pf->hw, settings))
15235 #define INFO_STRING_LEN 255
15236 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
15237 static void i40e_print_features(struct i40e_pf *pf)
15239 struct i40e_hw *hw = &pf->hw;
15243 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
15247 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
15248 #ifdef CONFIG_PCI_IOV
15249 i += scnprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
15251 i += scnprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
15252 pf->hw.func_caps.num_vsis,
15253 pf->vsi[pf->lan_vsi]->num_queue_pairs);
15254 if (pf->flags & I40E_FLAG_RSS_ENABLED)
15255 i += scnprintf(&buf[i], REMAIN(i), " RSS");
15256 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
15257 i += scnprintf(&buf[i], REMAIN(i), " FD_ATR");
15258 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
15259 i += scnprintf(&buf[i], REMAIN(i), " FD_SB");
15260 i += scnprintf(&buf[i], REMAIN(i), " NTUPLE");
15262 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
15263 i += scnprintf(&buf[i], REMAIN(i), " DCB");
15264 i += scnprintf(&buf[i], REMAIN(i), " VxLAN");
15265 i += scnprintf(&buf[i], REMAIN(i), " Geneve");
15266 if (pf->flags & I40E_FLAG_PTP)
15267 i += scnprintf(&buf[i], REMAIN(i), " PTP");
15268 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
15269 i += scnprintf(&buf[i], REMAIN(i), " VEB");
15271 i += scnprintf(&buf[i], REMAIN(i), " VEPA");
15273 dev_info(&pf->pdev->dev, "%s\n", buf);
15275 WARN_ON(i > INFO_STRING_LEN);
15279 * i40e_get_platform_mac_addr - get platform-specific MAC address
15280 * @pdev: PCI device information struct
15281 * @pf: board private structure
15283 * Look up the MAC address for the device. First we'll try
15284 * eth_platform_get_mac_address, which will check Open Firmware, or arch
15285 * specific fallback. Otherwise, we'll default to the stored value in
15288 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
15290 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
15291 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
15295 * i40e_set_fec_in_flags - helper function for setting FEC options in flags
15296 * @fec_cfg: FEC option to set in flags
15297 * @flags: ptr to flags in which we set FEC option
15299 void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
15301 if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
15302 *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC;
15303 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) ||
15304 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) {
15305 *flags |= I40E_FLAG_RS_FEC;
15306 *flags &= ~I40E_FLAG_BASE_R_FEC;
15308 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) ||
15309 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) {
15310 *flags |= I40E_FLAG_BASE_R_FEC;
15311 *flags &= ~I40E_FLAG_RS_FEC;
15314 *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC);
15318 * i40e_check_recovery_mode - check if we are running transition firmware
15319 * @pf: board private structure
15321 * Check registers indicating the firmware runs in recovery mode. Sets the
15322 * appropriate driver state.
15324 * Returns true if the recovery mode was detected, false otherwise
15326 static bool i40e_check_recovery_mode(struct i40e_pf *pf)
15328 u32 val = rd32(&pf->hw, I40E_GL_FWSTS);
15330 if (val & I40E_GL_FWSTS_FWS1B_MASK) {
15331 dev_crit(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
15332 dev_crit(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
15333 set_bit(__I40E_RECOVERY_MODE, pf->state);
15337 if (test_bit(__I40E_RECOVERY_MODE, pf->state))
15338 dev_info(&pf->pdev->dev, "Please do Power-On Reset to initialize adapter in normal mode with full functionality.\n");
15344 * i40e_pf_loop_reset - perform reset in a loop.
15345 * @pf: board private structure
15347 * This function is useful when a NIC is about to enter recovery mode.
15348 * When a NIC's internal data structures are corrupted the NIC's
15349 * firmware is going to enter recovery mode.
15350 * Right after a POR it takes about 7 minutes for firmware to enter
15351 * recovery mode. Until that time a NIC is in some kind of intermediate
15352 * state. After that time period the NIC almost surely enters
15353 * recovery mode. The only way for a driver to detect intermediate
15354 * state is to issue a series of pf-resets and check a return value.
15355 * If a PF reset returns success then the firmware could be in recovery
15356 * mode so the caller of this code needs to check for recovery mode
15357 * if this function returns success. There is a little chance that
15358 * firmware will hang in intermediate state forever.
15359 * Since waiting 7 minutes is quite a lot of time this function waits
15360 * 10 seconds and then gives up by returning an error.
15362 * Return 0 on success, negative on failure.
15364 static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf)
15366 /* wait max 10 seconds for PF reset to succeed */
15367 const unsigned long time_end = jiffies + 10 * HZ;
15369 struct i40e_hw *hw = &pf->hw;
15372 ret = i40e_pf_reset(hw);
15373 while (ret != I40E_SUCCESS && time_before(jiffies, time_end)) {
15374 usleep_range(10000, 20000);
15375 ret = i40e_pf_reset(hw);
15378 if (ret == I40E_SUCCESS)
15381 dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret);
15387 * i40e_check_fw_empr - check if FW issued unexpected EMP Reset
15388 * @pf: board private structure
15390 * Check FW registers to determine if FW issued unexpected EMP Reset.
15391 * Every time when unexpected EMP Reset occurs the FW increments
15392 * a counter of unexpected EMP Resets. When the counter reaches 10
15393 * the FW should enter the Recovery mode
15395 * Returns true if FW issued unexpected EMP Reset
15397 static bool i40e_check_fw_empr(struct i40e_pf *pf)
15399 const u32 fw_sts = rd32(&pf->hw, I40E_GL_FWSTS) &
15400 I40E_GL_FWSTS_FWS1B_MASK;
15401 return (fw_sts > I40E_GL_FWSTS_FWS1B_EMPR_0) &&
15402 (fw_sts <= I40E_GL_FWSTS_FWS1B_EMPR_10);
15406 * i40e_handle_resets - handle EMP resets and PF resets
15407 * @pf: board private structure
15409 * Handle both EMP resets and PF resets and conclude whether there are
15410 * any issues regarding these resets. If there are any issues then
15411 * generate log entry.
15413 * Return 0 if NIC is healthy or negative value when there are issues
15416 static i40e_status i40e_handle_resets(struct i40e_pf *pf)
15418 const i40e_status pfr = i40e_pf_loop_reset(pf);
15419 const bool is_empr = i40e_check_fw_empr(pf);
15421 if (is_empr || pfr != I40E_SUCCESS)
15422 dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
15424 return is_empr ? I40E_ERR_RESET_FAILED : pfr;
15428 * i40e_init_recovery_mode - initialize subsystems needed in recovery mode
15429 * @pf: board private structure
15430 * @hw: ptr to the hardware info
15432 * This function does a minimal setup of all subsystems needed for running
15435 * Returns 0 on success, negative on failure
15437 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
15439 struct i40e_vsi *vsi;
15443 pci_save_state(pf->pdev);
15445 /* set up periodic task facility */
15446 timer_setup(&pf->service_timer, i40e_service_timer, 0);
15447 pf->service_timer_period = HZ;
15449 INIT_WORK(&pf->service_task, i40e_service_task);
15450 clear_bit(__I40E_SERVICE_SCHED, pf->state);
15452 err = i40e_init_interrupt_scheme(pf);
15454 goto err_switch_setup;
15456 /* The number of VSIs reported by the FW is the minimum guaranteed
15457 * to us; HW supports far more and we share the remaining pool with
15458 * the other PFs. We allocate space for more than the guarantee with
15459 * the understanding that we might not get them all later.
15461 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
15462 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
15464 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
15466 /* Set up the vsi struct and our local tracking of the MAIN PF vsi. */
15467 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
15471 goto err_switch_setup;
15474 /* We allocate one VSI which is needed as absolute minimum
15475 * in order to register the netdev
15477 v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN);
15480 goto err_switch_setup;
15482 pf->lan_vsi = v_idx;
15483 vsi = pf->vsi[v_idx];
15486 goto err_switch_setup;
15488 vsi->alloc_queue_pairs = 1;
15489 err = i40e_config_netdev(vsi);
15491 goto err_switch_setup;
15492 err = register_netdev(vsi->netdev);
15494 goto err_switch_setup;
15495 vsi->netdev_registered = true;
15496 i40e_dbg_pf_init(pf);
15498 err = i40e_setup_misc_vector_for_recovery_mode(pf);
15500 goto err_switch_setup;
15502 /* tell the firmware that we're starting */
15503 i40e_send_version(pf);
15505 /* since everything's happy, start the service_task timer */
15506 mod_timer(&pf->service_timer,
15507 round_jiffies(jiffies + pf->service_timer_period));
15512 i40e_reset_interrupt_capability(pf);
15513 del_timer_sync(&pf->service_timer);
15514 i40e_shutdown_adminq(hw);
15515 iounmap(hw->hw_addr);
15516 pci_disable_pcie_error_reporting(pf->pdev);
15517 pci_release_mem_regions(pf->pdev);
15518 pci_disable_device(pf->pdev);
15525 * i40e_set_subsystem_device_id - set subsystem device id
15526 * @hw: pointer to the hardware info
15528 * Set PCI subsystem device id either from a pci_dev structure or
15529 * a specific FW register.
15531 static inline void i40e_set_subsystem_device_id(struct i40e_hw *hw)
15533 struct pci_dev *pdev = ((struct i40e_pf *)hw->back)->pdev;
15535 hw->subsystem_device_id = pdev->subsystem_device ?
15536 pdev->subsystem_device :
15537 (ushort)(rd32(hw, I40E_PFPCI_SUBSYSID) & USHRT_MAX);
15541 * i40e_probe - Device initialization routine
15542 * @pdev: PCI device information struct
15543 * @ent: entry in i40e_pci_tbl
15545 * i40e_probe initializes a PF identified by a pci_dev structure.
15546 * The OS initialization, configuring of the PF private structure,
15547 * and a hardware reset occur.
15549 * Returns 0 on success, negative on failure
15551 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
15553 struct i40e_aq_get_phy_abilities_resp abilities;
15554 #ifdef CONFIG_I40E_DCB
15555 enum i40e_get_fw_lldp_status_resp lldp_status;
15556 i40e_status status;
15557 #endif /* CONFIG_I40E_DCB */
15558 struct i40e_pf *pf;
15559 struct i40e_hw *hw;
15560 static u16 pfs_found;
15567 err = pci_enable_device_mem(pdev);
15571 /* set up for high or low dma */
15572 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
15574 dev_err(&pdev->dev,
15575 "DMA configuration failed: 0x%x\n", err);
15579 /* set up pci connections */
15580 err = pci_request_mem_regions(pdev, i40e_driver_name);
15582 dev_info(&pdev->dev,
15583 "pci_request_selected_regions failed %d\n", err);
15587 pci_enable_pcie_error_reporting(pdev);
15588 pci_set_master(pdev);
15590 /* Now that we have a PCI connection, we need to do the
15591 * low level device setup. This is primarily setting up
15592 * the Admin Queue structures and then querying for the
15593 * device's current profile information.
15595 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
15602 set_bit(__I40E_DOWN, pf->state);
15607 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
15608 I40E_MAX_CSR_SPACE);
15609 /* We believe that the highest register to read is
15610 * I40E_GLGEN_STAT_CLEAR, so we check if the BAR size
15611 * is not less than that before mapping to prevent a
15614 if (pf->ioremap_len < I40E_GLGEN_STAT_CLEAR) {
15615 dev_err(&pdev->dev, "Cannot map registers, bar size 0x%X too small, aborting\n",
15620 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
15621 if (!hw->hw_addr) {
15623 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
15624 (unsigned int)pci_resource_start(pdev, 0),
15625 pf->ioremap_len, err);
15628 hw->vendor_id = pdev->vendor;
15629 hw->device_id = pdev->device;
15630 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
15631 hw->subsystem_vendor_id = pdev->subsystem_vendor;
15632 i40e_set_subsystem_device_id(hw);
15633 hw->bus.device = PCI_SLOT(pdev->devfn);
15634 hw->bus.func = PCI_FUNC(pdev->devfn);
15635 hw->bus.bus_id = pdev->bus->number;
15636 pf->instance = pfs_found;
15638 /* Select something other than the 802.1ad ethertype for the
15639 * switch to use internally and drop on ingress.
15641 hw->switch_tag = 0xffff;
15642 hw->first_tag = ETH_P_8021AD;
15643 hw->second_tag = ETH_P_8021Q;
15645 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
15646 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
15647 INIT_LIST_HEAD(&pf->ddp_old_prof);
15649 /* set up the locks for the AQ, do this only once in probe
15650 * and destroy them only once in remove
15652 mutex_init(&hw->aq.asq_mutex);
15653 mutex_init(&hw->aq.arq_mutex);
15655 pf->msg_enable = netif_msg_init(debug,
15660 pf->hw.debug_mask = debug;
15662 /* do a special CORER for clearing PXE mode once at init */
15663 if (hw->revision_id == 0 &&
15664 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
15665 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
15670 i40e_clear_pxe_mode(hw);
15673 /* Reset here to make sure all is clean and to define PF 'n' */
15676 err = i40e_set_mac_type(hw);
15678 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
15683 err = i40e_handle_resets(pf);
15687 i40e_check_recovery_mode(pf);
15689 if (is_kdump_kernel()) {
15690 hw->aq.num_arq_entries = I40E_MIN_ARQ_LEN;
15691 hw->aq.num_asq_entries = I40E_MIN_ASQ_LEN;
15693 hw->aq.num_arq_entries = I40E_AQ_LEN;
15694 hw->aq.num_asq_entries = I40E_AQ_LEN;
15696 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
15697 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
15698 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
15700 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
15702 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
15704 err = i40e_init_shared_code(hw);
15706 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
15711 /* set up a default setting for link flow control */
15712 pf->hw.fc.requested_mode = I40E_FC_NONE;
15714 err = i40e_init_adminq(hw);
15716 if (err == I40E_ERR_FIRMWARE_API_VERSION)
15717 dev_info(&pdev->dev,
15718 "The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n",
15719 hw->aq.api_maj_ver,
15720 hw->aq.api_min_ver,
15721 I40E_FW_API_VERSION_MAJOR,
15722 I40E_FW_MINOR_VERSION(hw));
15724 dev_info(&pdev->dev,
15725 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
15729 i40e_get_oem_version(hw);
15731 /* provide nvm, fw, api versions, vendor:device id, subsys vendor:device id */
15732 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n",
15733 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
15734 hw->aq.api_maj_ver, hw->aq.api_min_ver,
15735 i40e_nvm_version_str(hw), hw->vendor_id, hw->device_id,
15736 hw->subsystem_vendor_id, hw->subsystem_device_id);
15738 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
15739 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
15740 dev_dbg(&pdev->dev,
15741 "The driver for the device detected a newer version of the NVM image v%u.%u than v%u.%u.\n",
15742 hw->aq.api_maj_ver,
15743 hw->aq.api_min_ver,
15744 I40E_FW_API_VERSION_MAJOR,
15745 I40E_FW_MINOR_VERSION(hw));
15746 else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
15747 dev_info(&pdev->dev,
15748 "The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n",
15749 hw->aq.api_maj_ver,
15750 hw->aq.api_min_ver,
15751 I40E_FW_API_VERSION_MAJOR,
15752 I40E_FW_MINOR_VERSION(hw));
15754 i40e_verify_eeprom(pf);
15756 /* Rev 0 hardware was never productized */
15757 if (hw->revision_id < 1)
15758 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
15760 i40e_clear_pxe_mode(hw);
15762 err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
15764 goto err_adminq_setup;
15766 err = i40e_sw_init(pf);
15768 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
15772 if (test_bit(__I40E_RECOVERY_MODE, pf->state))
15773 return i40e_init_recovery_mode(pf, hw);
15775 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
15776 hw->func_caps.num_rx_qp, 0, 0);
15778 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
15779 goto err_init_lan_hmc;
15782 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
15784 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
15786 goto err_configure_lan_hmc;
15789 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
15790 * Ignore error return codes because if it was already disabled via
15791 * hardware settings this will fail
15793 if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
15794 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
15795 i40e_aq_stop_lldp(hw, true, false, NULL);
15798 /* allow a platform config to override the HW addr */
15799 i40e_get_platform_mac_addr(pdev, pf);
15801 if (!is_valid_ether_addr(hw->mac.addr)) {
15802 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
15806 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
15807 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
15808 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
15809 if (is_valid_ether_addr(hw->mac.port_addr))
15810 pf->hw_features |= I40E_HW_PORT_ID_VALID;
15812 i40e_ptp_alloc_pins(pf);
15813 pci_set_drvdata(pdev, pf);
15814 pci_save_state(pdev);
15816 #ifdef CONFIG_I40E_DCB
15817 status = i40e_get_fw_lldp_status(&pf->hw, &lldp_status);
15819 lldp_status == I40E_GET_FW_LLDP_STATUS_ENABLED) ?
15820 (pf->flags &= ~I40E_FLAG_DISABLE_FW_LLDP) :
15821 (pf->flags |= I40E_FLAG_DISABLE_FW_LLDP);
15822 dev_info(&pdev->dev,
15823 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ?
15824 "FW LLDP is disabled\n" :
15825 "FW LLDP is enabled\n");
15827 /* Enable FW to write default DCB config on link-up */
15828 i40e_aq_set_dcb_parameters(hw, true, NULL);
15830 err = i40e_init_pf_dcb(pf);
15832 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
15833 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
15834 /* Continue without DCB enabled */
15836 #endif /* CONFIG_I40E_DCB */
15838 /* set up periodic task facility */
15839 timer_setup(&pf->service_timer, i40e_service_timer, 0);
15840 pf->service_timer_period = HZ;
15842 INIT_WORK(&pf->service_task, i40e_service_task);
15843 clear_bit(__I40E_SERVICE_SCHED, pf->state);
15845 /* NVM bit on means WoL disabled for the port */
15846 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
15847 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
15848 pf->wol_en = false;
15851 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
15853 /* set up the main switch operations */
15854 i40e_determine_queue_usage(pf);
15855 err = i40e_init_interrupt_scheme(pf);
15857 goto err_switch_setup;
15859 /* Reduce Tx and Rx pairs for kdump
15860 * When MSI-X is enabled, it's not allowed to use more TC queue
15861 * pairs than MSI-X vectors (pf->num_lan_msix) exist. Thus
15862 * vsi->num_queue_pairs will be equal to pf->num_lan_msix, i.e., 1.
15864 if (is_kdump_kernel())
15865 pf->num_lan_msix = 1;
15867 pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port;
15868 pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port;
15869 pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
15870 pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared;
15871 pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS;
15872 pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN |
15873 UDP_TUNNEL_TYPE_GENEVE;
15875 /* The number of VSIs reported by the FW is the minimum guaranteed
15876 * to us; HW supports far more and we share the remaining pool with
15877 * the other PFs. We allocate space for more than the guarantee with
15878 * the understanding that we might not get them all later.
15880 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
15881 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
15883 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
15884 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
15885 dev_warn(&pf->pdev->dev,
15886 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
15887 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
15888 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
15891 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
15892 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
15896 goto err_switch_setup;
15899 #ifdef CONFIG_PCI_IOV
15900 /* prep for VF support */
15901 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
15902 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
15903 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
15904 if (pci_num_vf(pdev))
15905 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
15908 err = i40e_setup_pf_switch(pf, false, false);
15910 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
15913 INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
15915 /* if FDIR VSI was set up, start it now */
15916 for (i = 0; i < pf->num_alloc_vsi; i++) {
15917 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
15918 i40e_vsi_open(pf->vsi[i]);
15923 /* The driver only wants link up/down and module qualification
15924 * reports from firmware. Note the negative logic.
15926 err = i40e_aq_set_phy_int_mask(&pf->hw,
15927 ~(I40E_AQ_EVENT_LINK_UPDOWN |
15928 I40E_AQ_EVENT_MEDIA_NA |
15929 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
15931 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
15932 i40e_stat_str(&pf->hw, err),
15933 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15935 /* Reconfigure hardware for allowing smaller MSS in the case
15936 * of TSO, so that we avoid the MDD being fired and causing
15937 * a reset in the case of small MSS+TSO.
15939 val = rd32(hw, I40E_REG_MSS);
15940 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
15941 val &= ~I40E_REG_MSS_MIN_MASK;
15942 val |= I40E_64BYTE_MSS;
15943 wr32(hw, I40E_REG_MSS, val);
15946 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
15948 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
15950 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
15951 i40e_stat_str(&pf->hw, err),
15952 i40e_aq_str(&pf->hw,
15953 pf->hw.aq.asq_last_status));
15955 /* The main driver is (mostly) up and happy. We need to set this state
15956 * before setting up the misc vector or we get a race and the vector
15957 * ends up disabled forever.
15959 clear_bit(__I40E_DOWN, pf->state);
15961 /* In case of MSIX we are going to setup the misc vector right here
15962 * to handle admin queue events etc. In case of legacy and MSI
15963 * the misc functionality and queue processing is combined in
15964 * the same vector and that gets setup at open.
15966 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
15967 err = i40e_setup_misc_vector(pf);
15969 dev_info(&pdev->dev,
15970 "setup of misc vector failed: %d\n", err);
15971 i40e_cloud_filter_exit(pf);
15972 i40e_fdir_teardown(pf);
15977 #ifdef CONFIG_PCI_IOV
15978 /* prep for VF support */
15979 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
15980 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
15981 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
15982 /* disable link interrupts for VFs */
15983 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
15984 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
15985 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
15988 if (pci_num_vf(pdev)) {
15989 dev_info(&pdev->dev,
15990 "Active VFs found, allocating resources.\n");
15991 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
15993 dev_info(&pdev->dev,
15994 "Error %d allocating resources for existing VFs\n",
15998 #endif /* CONFIG_PCI_IOV */
16000 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
16001 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
16002 pf->num_iwarp_msix,
16003 I40E_IWARP_IRQ_PILE_ID);
16004 if (pf->iwarp_base_vector < 0) {
16005 dev_info(&pdev->dev,
16006 "failed to get tracking for %d vectors for IWARP err=%d\n",
16007 pf->num_iwarp_msix, pf->iwarp_base_vector);
16008 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
16012 i40e_dbg_pf_init(pf);
16014 /* tell the firmware that we're starting */
16015 i40e_send_version(pf);
16017 /* since everything's happy, start the service_task timer */
16018 mod_timer(&pf->service_timer,
16019 round_jiffies(jiffies + pf->service_timer_period));
16021 /* add this PF to client device list and launch a client service task */
16022 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
16023 err = i40e_lan_add_device(pf);
16025 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
16029 #define PCI_SPEED_SIZE 8
16030 #define PCI_WIDTH_SIZE 8
16031 /* Devices on the IOSF bus do not have this information
16032 * and will report PCI Gen 1 x 1 by default so don't bother
16035 if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) {
16036 char speed[PCI_SPEED_SIZE] = "Unknown";
16037 char width[PCI_WIDTH_SIZE] = "Unknown";
16039 /* Get the negotiated link width and speed from PCI config
16042 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
16045 i40e_set_pci_config_data(hw, link_status);
16047 switch (hw->bus.speed) {
16048 case i40e_bus_speed_8000:
16049 strlcpy(speed, "8.0", PCI_SPEED_SIZE); break;
16050 case i40e_bus_speed_5000:
16051 strlcpy(speed, "5.0", PCI_SPEED_SIZE); break;
16052 case i40e_bus_speed_2500:
16053 strlcpy(speed, "2.5", PCI_SPEED_SIZE); break;
16057 switch (hw->bus.width) {
16058 case i40e_bus_width_pcie_x8:
16059 strlcpy(width, "8", PCI_WIDTH_SIZE); break;
16060 case i40e_bus_width_pcie_x4:
16061 strlcpy(width, "4", PCI_WIDTH_SIZE); break;
16062 case i40e_bus_width_pcie_x2:
16063 strlcpy(width, "2", PCI_WIDTH_SIZE); break;
16064 case i40e_bus_width_pcie_x1:
16065 strlcpy(width, "1", PCI_WIDTH_SIZE); break;
16070 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
16073 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
16074 hw->bus.speed < i40e_bus_speed_8000) {
16075 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
16076 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
16080 /* get the requested speeds from the fw */
16081 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
16083 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
16084 i40e_stat_str(&pf->hw, err),
16085 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
16086 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
16088 /* set the FEC config due to the board capabilities */
16089 i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags);
16091 /* get the supported phy types from the fw */
16092 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
16094 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
16095 i40e_stat_str(&pf->hw, err),
16096 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
16098 /* make sure the MFS hasn't been set lower than the default */
16099 #define MAX_FRAME_SIZE_DEFAULT 0x2600
16100 val = (rd32(&pf->hw, I40E_PRTGL_SAH) &
16101 I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT;
16102 if (val < MAX_FRAME_SIZE_DEFAULT)
16103 dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
16106 /* Add a filter to drop all Flow control frames from any VSI from being
16107 * transmitted. By doing so we stop a malicious VF from sending out
16108 * PAUSE or PFC frames and potentially controlling traffic for other
16110 * The FW can still send Flow control frames if enabled.
16112 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
16113 pf->main_vsi_seid);
16115 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
16116 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
16117 pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS;
16118 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
16119 pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER;
16120 /* print a string summarizing features */
16121 i40e_print_features(pf);
16125 /* Unwind what we've done if something failed in the setup */
16127 set_bit(__I40E_DOWN, pf->state);
16128 i40e_clear_interrupt_scheme(pf);
16131 i40e_reset_interrupt_capability(pf);
16132 del_timer_sync(&pf->service_timer);
16134 err_configure_lan_hmc:
16135 (void)i40e_shutdown_lan_hmc(hw);
16137 kfree(pf->qp_pile);
16141 iounmap(hw->hw_addr);
16145 pci_disable_pcie_error_reporting(pdev);
16146 pci_release_mem_regions(pdev);
16149 pci_disable_device(pdev);
16154 * i40e_remove - Device removal routine
16155 * @pdev: PCI device information struct
16157 * i40e_remove is called by the PCI subsystem to alert the driver
16158 * that is should release a PCI device. This could be caused by a
16159 * Hot-Plug event, or because the driver is going to be removed from
16162 static void i40e_remove(struct pci_dev *pdev)
16164 struct i40e_pf *pf = pci_get_drvdata(pdev);
16165 struct i40e_hw *hw = &pf->hw;
16166 i40e_status ret_code;
16169 i40e_dbg_pf_exit(pf);
16173 /* Disable RSS in hw */
16174 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
16175 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
16177 /* Grab __I40E_RESET_RECOVERY_PENDING and set __I40E_IN_REMOVE
16178 * flags, once they are set, i40e_rebuild should not be called as
16179 * i40e_prep_for_reset always returns early.
16181 while (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
16182 usleep_range(1000, 2000);
16183 set_bit(__I40E_IN_REMOVE, pf->state);
16185 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
16186 set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
16188 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
16190 /* no more scheduling of any task */
16191 set_bit(__I40E_SUSPENDED, pf->state);
16192 set_bit(__I40E_DOWN, pf->state);
16193 if (pf->service_timer.function)
16194 del_timer_sync(&pf->service_timer);
16195 if (pf->service_task.func)
16196 cancel_work_sync(&pf->service_task);
16198 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
16199 struct i40e_vsi *vsi = pf->vsi[0];
16201 /* We know that we have allocated only one vsi for this PF,
16202 * it was just for registering netdevice, so the interface
16203 * could be visible in the 'ifconfig' output
16205 unregister_netdev(vsi->netdev);
16206 free_netdev(vsi->netdev);
16211 /* Client close must be called explicitly here because the timer
16212 * has been stopped.
16214 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
16216 i40e_fdir_teardown(pf);
16218 /* If there is a switch structure or any orphans, remove them.
16219 * This will leave only the PF's VSI remaining.
16221 for (i = 0; i < I40E_MAX_VEB; i++) {
16225 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
16226 pf->veb[i]->uplink_seid == 0)
16227 i40e_switch_branch_release(pf->veb[i]);
16230 /* Now we can shutdown the PF's VSI, just before we kill
16233 if (pf->vsi[pf->lan_vsi])
16234 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
16236 i40e_cloud_filter_exit(pf);
16238 /* remove attached clients */
16239 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
16240 ret_code = i40e_lan_del_device(pf);
16242 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
16246 /* shutdown and destroy the HMC */
16247 if (hw->hmc.hmc_obj) {
16248 ret_code = i40e_shutdown_lan_hmc(hw);
16250 dev_warn(&pdev->dev,
16251 "Failed to destroy the HMC resources: %d\n",
16256 /* Free MSI/legacy interrupt 0 when in recovery mode. */
16257 if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
16258 !(pf->flags & I40E_FLAG_MSIX_ENABLED))
16259 free_irq(pf->pdev->irq, pf);
16261 /* shutdown the adminq */
16262 i40e_shutdown_adminq(hw);
16264 /* destroy the locks only once, here */
16265 mutex_destroy(&hw->aq.arq_mutex);
16266 mutex_destroy(&hw->aq.asq_mutex);
16268 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
16270 i40e_clear_interrupt_scheme(pf);
16271 for (i = 0; i < pf->num_alloc_vsi; i++) {
16273 if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
16274 i40e_vsi_clear_rings(pf->vsi[i]);
16275 i40e_vsi_clear(pf->vsi[i]);
16281 for (i = 0; i < I40E_MAX_VEB; i++) {
16286 kfree(pf->qp_pile);
16289 iounmap(hw->hw_addr);
16291 pci_release_mem_regions(pdev);
16293 pci_disable_pcie_error_reporting(pdev);
16294 pci_disable_device(pdev);
16298 * i40e_pci_error_detected - warning that something funky happened in PCI land
16299 * @pdev: PCI device information struct
16300 * @error: the type of PCI error
16302 * Called to warn that something happened and the error handling steps
16303 * are in progress. Allows the driver to quiesce things, be ready for
16306 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
16307 pci_channel_state_t error)
16309 struct i40e_pf *pf = pci_get_drvdata(pdev);
16311 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
16314 dev_info(&pdev->dev,
16315 "Cannot recover - error happened during device probe\n");
16316 return PCI_ERS_RESULT_DISCONNECT;
16319 /* shutdown all operations */
16320 if (!test_bit(__I40E_SUSPENDED, pf->state))
16321 i40e_prep_for_reset(pf);
16323 /* Request a slot reset */
16324 return PCI_ERS_RESULT_NEED_RESET;
16328 * i40e_pci_error_slot_reset - a PCI slot reset just happened
16329 * @pdev: PCI device information struct
16331 * Called to find if the driver can work with the device now that
16332 * the pci slot has been reset. If a basic connection seems good
16333 * (registers are readable and have sane content) then return a
16334 * happy little PCI_ERS_RESULT_xxx.
16336 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
16338 struct i40e_pf *pf = pci_get_drvdata(pdev);
16339 pci_ers_result_t result;
16342 dev_dbg(&pdev->dev, "%s\n", __func__);
16343 if (pci_enable_device_mem(pdev)) {
16344 dev_info(&pdev->dev,
16345 "Cannot re-enable PCI device after reset.\n");
16346 result = PCI_ERS_RESULT_DISCONNECT;
16348 pci_set_master(pdev);
16349 pci_restore_state(pdev);
16350 pci_save_state(pdev);
16351 pci_wake_from_d3(pdev, false);
16353 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
16355 result = PCI_ERS_RESULT_RECOVERED;
16357 result = PCI_ERS_RESULT_DISCONNECT;
16364 * i40e_pci_error_reset_prepare - prepare device driver for pci reset
16365 * @pdev: PCI device information struct
16367 static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
16369 struct i40e_pf *pf = pci_get_drvdata(pdev);
16371 i40e_prep_for_reset(pf);
16375 * i40e_pci_error_reset_done - pci reset done, device driver reset can begin
16376 * @pdev: PCI device information struct
16378 static void i40e_pci_error_reset_done(struct pci_dev *pdev)
16380 struct i40e_pf *pf = pci_get_drvdata(pdev);
16382 if (test_bit(__I40E_IN_REMOVE, pf->state))
16385 i40e_reset_and_rebuild(pf, false, false);
16389 * i40e_pci_error_resume - restart operations after PCI error recovery
16390 * @pdev: PCI device information struct
16392 * Called to allow the driver to bring things back up after PCI error
16393 * and/or reset recovery has finished.
16395 static void i40e_pci_error_resume(struct pci_dev *pdev)
16397 struct i40e_pf *pf = pci_get_drvdata(pdev);
16399 dev_dbg(&pdev->dev, "%s\n", __func__);
16400 if (test_bit(__I40E_SUSPENDED, pf->state))
16403 i40e_handle_reset_warning(pf, false);
16407 * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
16408 * using the mac_address_write admin q function
16409 * @pf: pointer to i40e_pf struct
16411 static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
16413 struct i40e_hw *hw = &pf->hw;
16418 /* Get current MAC address in case it's an LAA */
16419 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
16420 ether_addr_copy(mac_addr,
16421 pf->vsi[pf->lan_vsi]->netdev->dev_addr);
16423 dev_err(&pf->pdev->dev,
16424 "Failed to retrieve MAC address; using default\n");
16425 ether_addr_copy(mac_addr, hw->mac.addr);
16428 /* The FW expects the mac address write cmd to first be called with
16429 * one of these flags before calling it again with the multicast
16432 flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
16434 if (hw->func_caps.flex10_enable && hw->partition_id != 1)
16435 flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
16437 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
16439 dev_err(&pf->pdev->dev,
16440 "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
16444 flags = I40E_AQC_MC_MAG_EN
16445 | I40E_AQC_WOL_PRESERVE_ON_PFR
16446 | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
16447 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
16449 dev_err(&pf->pdev->dev,
16450 "Failed to enable Multicast Magic Packet wake up\n");
16454 * i40e_shutdown - PCI callback for shutting down
16455 * @pdev: PCI device information struct
16457 static void i40e_shutdown(struct pci_dev *pdev)
16459 struct i40e_pf *pf = pci_get_drvdata(pdev);
16460 struct i40e_hw *hw = &pf->hw;
16462 set_bit(__I40E_SUSPENDED, pf->state);
16463 set_bit(__I40E_DOWN, pf->state);
16465 del_timer_sync(&pf->service_timer);
16466 cancel_work_sync(&pf->service_task);
16467 i40e_cloud_filter_exit(pf);
16468 i40e_fdir_teardown(pf);
16470 /* Client close must be called explicitly here because the timer
16471 * has been stopped.
16473 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
16475 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
16476 i40e_enable_mc_magic_wake(pf);
16478 i40e_prep_for_reset(pf);
16480 wr32(hw, I40E_PFPM_APM,
16481 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
16482 wr32(hw, I40E_PFPM_WUFC,
16483 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
16485 /* Free MSI/legacy interrupt 0 when in recovery mode. */
16486 if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
16487 !(pf->flags & I40E_FLAG_MSIX_ENABLED))
16488 free_irq(pf->pdev->irq, pf);
16490 /* Since we're going to destroy queues during the
16491 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
16495 i40e_clear_interrupt_scheme(pf);
16498 if (system_state == SYSTEM_POWER_OFF) {
16499 pci_wake_from_d3(pdev, pf->wol_en);
16500 pci_set_power_state(pdev, PCI_D3hot);
16505 * i40e_suspend - PM callback for moving to D3
16506 * @dev: generic device information structure
16508 static int __maybe_unused i40e_suspend(struct device *dev)
16510 struct i40e_pf *pf = dev_get_drvdata(dev);
16511 struct i40e_hw *hw = &pf->hw;
16513 /* If we're already suspended, then there is nothing to do */
16514 if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
16517 set_bit(__I40E_DOWN, pf->state);
16519 /* Ensure service task will not be running */
16520 del_timer_sync(&pf->service_timer);
16521 cancel_work_sync(&pf->service_task);
16523 /* Client close must be called explicitly here because the timer
16524 * has been stopped.
16526 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
16528 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
16529 i40e_enable_mc_magic_wake(pf);
16531 /* Since we're going to destroy queues during the
16532 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
16537 i40e_prep_for_reset(pf);
16539 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
16540 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
16542 /* Clear the interrupt scheme and release our IRQs so that the system
16543 * can safely hibernate even when there are a large number of CPUs.
16544 * Otherwise hibernation might fail when mapping all the vectors back
16547 i40e_clear_interrupt_scheme(pf);
16555 * i40e_resume - PM callback for waking up from D3
16556 * @dev: generic device information structure
16558 static int __maybe_unused i40e_resume(struct device *dev)
16560 struct i40e_pf *pf = dev_get_drvdata(dev);
16563 /* If we're not suspended, then there is nothing to do */
16564 if (!test_bit(__I40E_SUSPENDED, pf->state))
16567 /* We need to hold the RTNL lock prior to restoring interrupt schemes,
16568 * since we're going to be restoring queues
16572 /* We cleared the interrupt scheme when we suspended, so we need to
16573 * restore it now to resume device functionality.
16575 err = i40e_restore_interrupt_scheme(pf);
16577 dev_err(dev, "Cannot restore interrupt scheme: %d\n",
16581 clear_bit(__I40E_DOWN, pf->state);
16582 i40e_reset_and_rebuild(pf, false, true);
16586 /* Clear suspended state last after everything is recovered */
16587 clear_bit(__I40E_SUSPENDED, pf->state);
16589 /* Restart the service task */
16590 mod_timer(&pf->service_timer,
16591 round_jiffies(jiffies + pf->service_timer_period));
16596 static const struct pci_error_handlers i40e_err_handler = {
16597 .error_detected = i40e_pci_error_detected,
16598 .slot_reset = i40e_pci_error_slot_reset,
16599 .reset_prepare = i40e_pci_error_reset_prepare,
16600 .reset_done = i40e_pci_error_reset_done,
16601 .resume = i40e_pci_error_resume,
16604 static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
16606 static struct pci_driver i40e_driver = {
16607 .name = i40e_driver_name,
16608 .id_table = i40e_pci_tbl,
16609 .probe = i40e_probe,
16610 .remove = i40e_remove,
16612 .pm = &i40e_pm_ops,
16614 .shutdown = i40e_shutdown,
16615 .err_handler = &i40e_err_handler,
16616 .sriov_configure = i40e_pci_sriov_configure,
16620 * i40e_init_module - Driver registration routine
16622 * i40e_init_module is the first routine called when the driver is
16623 * loaded. All it does is register with the PCI subsystem.
16625 static int __init i40e_init_module(void)
16627 pr_info("%s: %s\n", i40e_driver_name, i40e_driver_string);
16628 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
16630 /* There is no need to throttle the number of active tasks because
16631 * each device limits its own task using a state bit for scheduling
16632 * the service task, and the device tasks do not interfere with each
16633 * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM
16634 * since we need to be able to guarantee forward progress even under
16637 i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
16639 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
16644 return pci_register_driver(&i40e_driver);
16646 module_init(i40e_init_module);
16649 * i40e_exit_module - Driver exit cleanup routine
16651 * i40e_exit_module is called just before the driver is removed
16654 static void __exit i40e_exit_module(void)
16656 pci_unregister_driver(&i40e_driver);
16657 destroy_workqueue(i40e_wq);
16658 ida_destroy(&i40e_client_ida);
16661 module_exit(i40e_exit_module);