1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2021 Intel Corporation. */
4 #include <linux/etherdevice.h>
5 #include <linux/of_net.h>
8 #include <generated/utsrelease.h>
9 #include <linux/crash_dump.h>
13 #include "i40e_diag.h"
15 #include <net/udp_tunnel.h>
16 #include <net/xdp_sock_drv.h>
17 /* All i40e tracepoints are defined by the include below, which
18 * must be included exactly once across the whole kernel with
19 * CREATE_TRACE_POINTS defined
21 #define CREATE_TRACE_POINTS
22 #include "i40e_trace.h"
24 const char i40e_driver_name[] = "i40e";
25 static const char i40e_driver_string[] =
26 "Intel(R) Ethernet Connection XL710 Network Driver";
28 static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation.";
30 /* a bit of forward declarations */
31 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
32 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
33 static int i40e_add_vsi(struct i40e_vsi *vsi);
34 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
35 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired);
36 static int i40e_setup_misc_vector(struct i40e_pf *pf);
37 static void i40e_determine_queue_usage(struct i40e_pf *pf);
38 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
39 static void i40e_prep_for_reset(struct i40e_pf *pf);
40 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
42 static int i40e_reset(struct i40e_pf *pf);
43 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
44 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
45 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf);
46 static bool i40e_check_recovery_mode(struct i40e_pf *pf);
47 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw);
48 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
49 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
50 static int i40e_get_capabilities(struct i40e_pf *pf,
51 enum i40e_admin_queue_opc list_type);
52 static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf);
54 /* i40e_pci_tbl - PCI Device ID Table
56 * Last entry must be all 0s
58 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
59 * Class, Class Mask, private data (not used) }
61 static const struct pci_device_id i40e_pci_tbl[] = {
62 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
63 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
64 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
65 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
66 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
67 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
68 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_BC), 0},
70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0},
73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0},
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
79 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
80 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722_A), 0},
82 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
83 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
84 {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
85 {PCI_VDEVICE(INTEL, I40E_DEV_ID_XXV710_N3000), 0},
86 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
87 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
88 /* required last entry */
91 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
93 #define I40E_MAX_VF_COUNT 128
94 static int debug = -1;
95 module_param(debug, uint, 0);
96 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
98 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
99 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
100 MODULE_LICENSE("GPL v2");
102 static struct workqueue_struct *i40e_wq;
104 static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f,
105 struct net_device *netdev, int delta)
107 struct netdev_hw_addr *ha;
112 netdev_for_each_mc_addr(ha, netdev) {
113 if (ether_addr_equal(ha->addr, f->macaddr)) {
114 ha->refcount += delta;
115 if (ha->refcount <= 0)
123 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
124 * @hw: pointer to the HW structure
125 * @mem: ptr to mem struct to fill out
126 * @size: size of memory requested
127 * @alignment: what to align the allocation to
129 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
130 u64 size, u32 alignment)
132 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
134 mem->size = ALIGN(size, alignment);
135 mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
144 * i40e_free_dma_mem_d - OS specific memory free for shared code
145 * @hw: pointer to the HW structure
146 * @mem: ptr to mem struct to free
148 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
150 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
152 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
161 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
162 * @hw: pointer to the HW structure
163 * @mem: ptr to mem struct to fill out
164 * @size: size of memory requested
166 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
170 mem->va = kzalloc(size, GFP_KERNEL);
179 * i40e_free_virt_mem_d - OS specific memory free for shared code
180 * @hw: pointer to the HW structure
181 * @mem: ptr to mem struct to free
183 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
185 /* it's ok to kfree a NULL pointer */
194 * i40e_get_lump - find a lump of free generic resource
195 * @pf: board private structure
196 * @pile: the pile of resource to search
197 * @needed: the number of items needed
198 * @id: an owner id to stick on the items assigned
200 * Returns the base item index of the lump, or negative for error
202 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
208 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
209 dev_info(&pf->pdev->dev,
210 "param err: pile=%s needed=%d id=0x%04x\n",
211 pile ? "<valid>" : "<null>", needed, id);
215 /* Allocate last queue in the pile for FDIR VSI queue
216 * so it doesn't fragment the qp_pile
218 if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) {
219 if (pile->list[pile->num_entries - 1] & I40E_PILE_VALID_BIT) {
220 dev_err(&pf->pdev->dev,
221 "Cannot allocate queue %d for I40E_VSI_FDIR\n",
222 pile->num_entries - 1);
225 pile->list[pile->num_entries - 1] = id | I40E_PILE_VALID_BIT;
226 return pile->num_entries - 1;
230 while (i < pile->num_entries) {
231 /* skip already allocated entries */
232 if (pile->list[i] & I40E_PILE_VALID_BIT) {
237 /* do we have enough in this lump? */
238 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
239 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
244 /* there was enough, so assign it to the requestor */
245 for (j = 0; j < needed; j++)
246 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
251 /* not enough, so skip over it and continue looking */
259 * i40e_put_lump - return a lump of generic resource
260 * @pile: the pile of resource to search
261 * @index: the base item index
262 * @id: the owner id of the items assigned
264 * Returns the count of items in the lump
266 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
268 int valid_id = (id | I40E_PILE_VALID_BIT);
272 if (!pile || index >= pile->num_entries)
276 i < pile->num_entries && pile->list[i] == valid_id;
287 * i40e_find_vsi_from_id - searches for the vsi with the given id
288 * @pf: the pf structure to search for the vsi
289 * @id: id of the vsi it is searching for
291 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
295 for (i = 0; i < pf->num_alloc_vsi; i++)
296 if (pf->vsi[i] && (pf->vsi[i]->id == id))
303 * i40e_service_event_schedule - Schedule the service task to wake up
304 * @pf: board private structure
306 * If not already scheduled, this puts the task into the work queue
308 void i40e_service_event_schedule(struct i40e_pf *pf)
310 if ((!test_bit(__I40E_DOWN, pf->state) &&
311 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) ||
312 test_bit(__I40E_RECOVERY_MODE, pf->state))
313 queue_work(i40e_wq, &pf->service_task);
317 * i40e_tx_timeout - Respond to a Tx Hang
318 * @netdev: network interface device structure
319 * @txqueue: queue number timing out
321 * If any port has noticed a Tx timeout, it is likely that the whole
322 * device is munged, not just the one netdev port, so go for the full
325 static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
327 struct i40e_netdev_priv *np = netdev_priv(netdev);
328 struct i40e_vsi *vsi = np->vsi;
329 struct i40e_pf *pf = vsi->back;
330 struct i40e_ring *tx_ring = NULL;
334 pf->tx_timeout_count++;
336 /* with txqueue index, find the tx_ring struct */
337 for (i = 0; i < vsi->num_queue_pairs; i++) {
338 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
340 vsi->tx_rings[i]->queue_index) {
341 tx_ring = vsi->tx_rings[i];
347 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
348 pf->tx_timeout_recovery_level = 1; /* reset after some time */
349 else if (time_before(jiffies,
350 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
351 return; /* don't do any new action before the next timeout */
353 /* don't kick off another recovery if one is already pending */
354 if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
358 head = i40e_get_head(tx_ring);
359 /* Read interrupt register */
360 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
362 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
363 tx_ring->vsi->base_vector - 1));
365 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
367 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
368 vsi->seid, txqueue, tx_ring->next_to_clean,
369 head, tx_ring->next_to_use,
370 readl(tx_ring->tail), val);
373 pf->tx_timeout_last_recovery = jiffies;
374 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n",
375 pf->tx_timeout_recovery_level, txqueue);
377 switch (pf->tx_timeout_recovery_level) {
379 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
382 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
385 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
388 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in non-recoverable state.\n");
389 set_bit(__I40E_DOWN_REQUESTED, pf->state);
390 set_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state);
394 i40e_service_event_schedule(pf);
395 pf->tx_timeout_recovery_level++;
399 * i40e_get_vsi_stats_struct - Get System Network Statistics
400 * @vsi: the VSI we care about
402 * Returns the address of the device statistics structure.
403 * The statistics are actually updated from the service task.
405 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
407 return &vsi->net_stats;
411 * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
412 * @ring: Tx ring to get statistics from
413 * @stats: statistics entry to be updated
415 static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
416 struct rtnl_link_stats64 *stats)
422 start = u64_stats_fetch_begin(&ring->syncp);
423 packets = ring->stats.packets;
424 bytes = ring->stats.bytes;
425 } while (u64_stats_fetch_retry(&ring->syncp, start));
427 stats->tx_packets += packets;
428 stats->tx_bytes += bytes;
432 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
433 * @netdev: network interface device structure
434 * @stats: data structure to store statistics
436 * Returns the address of the device statistics structure.
437 * The statistics are actually updated from the service task.
439 static void i40e_get_netdev_stats_struct(struct net_device *netdev,
440 struct rtnl_link_stats64 *stats)
442 struct i40e_netdev_priv *np = netdev_priv(netdev);
443 struct i40e_vsi *vsi = np->vsi;
444 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
445 struct i40e_ring *ring;
448 if (test_bit(__I40E_VSI_DOWN, vsi->state))
455 for (i = 0; i < vsi->num_queue_pairs; i++) {
459 ring = READ_ONCE(vsi->tx_rings[i]);
462 i40e_get_netdev_stats_struct_tx(ring, stats);
464 if (i40e_enabled_xdp_vsi(vsi)) {
465 ring = READ_ONCE(vsi->xdp_rings[i]);
468 i40e_get_netdev_stats_struct_tx(ring, stats);
471 ring = READ_ONCE(vsi->rx_rings[i]);
475 start = u64_stats_fetch_begin(&ring->syncp);
476 packets = ring->stats.packets;
477 bytes = ring->stats.bytes;
478 } while (u64_stats_fetch_retry(&ring->syncp, start));
480 stats->rx_packets += packets;
481 stats->rx_bytes += bytes;
486 /* following stats updated by i40e_watchdog_subtask() */
487 stats->multicast = vsi_stats->multicast;
488 stats->tx_errors = vsi_stats->tx_errors;
489 stats->tx_dropped = vsi_stats->tx_dropped;
490 stats->rx_errors = vsi_stats->rx_errors;
491 stats->rx_dropped = vsi_stats->rx_dropped;
492 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
493 stats->rx_length_errors = vsi_stats->rx_length_errors;
497 * i40e_vsi_reset_stats - Resets all stats of the given vsi
498 * @vsi: the VSI to have its stats reset
500 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
502 struct rtnl_link_stats64 *ns;
508 ns = i40e_get_vsi_stats_struct(vsi);
509 memset(ns, 0, sizeof(*ns));
510 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
511 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
512 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
513 if (vsi->rx_rings && vsi->rx_rings[0]) {
514 for (i = 0; i < vsi->num_queue_pairs; i++) {
515 memset(&vsi->rx_rings[i]->stats, 0,
516 sizeof(vsi->rx_rings[i]->stats));
517 memset(&vsi->rx_rings[i]->rx_stats, 0,
518 sizeof(vsi->rx_rings[i]->rx_stats));
519 memset(&vsi->tx_rings[i]->stats, 0,
520 sizeof(vsi->tx_rings[i]->stats));
521 memset(&vsi->tx_rings[i]->tx_stats, 0,
522 sizeof(vsi->tx_rings[i]->tx_stats));
525 vsi->stat_offsets_loaded = false;
529 * i40e_pf_reset_stats - Reset all of the stats for the given PF
530 * @pf: the PF to be reset
532 void i40e_pf_reset_stats(struct i40e_pf *pf)
536 memset(&pf->stats, 0, sizeof(pf->stats));
537 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
538 pf->stat_offsets_loaded = false;
540 for (i = 0; i < I40E_MAX_VEB; i++) {
542 memset(&pf->veb[i]->stats, 0,
543 sizeof(pf->veb[i]->stats));
544 memset(&pf->veb[i]->stats_offsets, 0,
545 sizeof(pf->veb[i]->stats_offsets));
546 memset(&pf->veb[i]->tc_stats, 0,
547 sizeof(pf->veb[i]->tc_stats));
548 memset(&pf->veb[i]->tc_stats_offsets, 0,
549 sizeof(pf->veb[i]->tc_stats_offsets));
550 pf->veb[i]->stat_offsets_loaded = false;
553 pf->hw_csum_rx_error = 0;
557 * i40e_compute_pci_to_hw_id - compute index form PCI function.
558 * @vsi: ptr to the VSI to read from.
559 * @hw: ptr to the hardware info.
561 static u32 i40e_compute_pci_to_hw_id(struct i40e_vsi *vsi, struct i40e_hw *hw)
563 int pf_count = i40e_get_pf_count(hw);
565 if (vsi->type == I40E_VSI_SRIOV)
566 return (hw->port * BIT(7)) / pf_count + vsi->vf_id;
568 return hw->port + BIT(7);
572 * i40e_stat_update64 - read and update a 64 bit stat from the chip.
573 * @hw: ptr to the hardware info.
574 * @hireg: the high 32 bit reg to read.
575 * @loreg: the low 32 bit reg to read.
576 * @offset_loaded: has the initial offset been loaded yet.
577 * @offset: ptr to current offset value.
578 * @stat: ptr to the stat.
580 * Since the device stats are not reset at PFReset, they will not
581 * be zeroed when the driver starts. We'll save the first values read
582 * and use them as offsets to be subtracted from the raw values in order
583 * to report stats that count from zero.
585 static void i40e_stat_update64(struct i40e_hw *hw, u32 hireg, u32 loreg,
586 bool offset_loaded, u64 *offset, u64 *stat)
590 new_data = rd64(hw, loreg);
592 if (!offset_loaded || new_data < *offset)
594 *stat = new_data - *offset;
598 * i40e_stat_update48 - read and update a 48 bit stat from the chip
599 * @hw: ptr to the hardware info
600 * @hireg: the high 32 bit reg to read
601 * @loreg: the low 32 bit reg to read
602 * @offset_loaded: has the initial offset been loaded yet
603 * @offset: ptr to current offset value
604 * @stat: ptr to the stat
606 * Since the device stats are not reset at PFReset, they likely will not
607 * be zeroed when the driver starts. We'll save the first values read
608 * and use them as offsets to be subtracted from the raw values in order
609 * to report stats that count from zero. In the process, we also manage
610 * the potential roll-over.
612 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
613 bool offset_loaded, u64 *offset, u64 *stat)
617 if (hw->device_id == I40E_DEV_ID_QEMU) {
618 new_data = rd32(hw, loreg);
619 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
621 new_data = rd64(hw, loreg);
625 if (likely(new_data >= *offset))
626 *stat = new_data - *offset;
628 *stat = (new_data + BIT_ULL(48)) - *offset;
629 *stat &= 0xFFFFFFFFFFFFULL;
633 * i40e_stat_update32 - read and update a 32 bit stat from the chip
634 * @hw: ptr to the hardware info
635 * @reg: the hw reg to read
636 * @offset_loaded: has the initial offset been loaded yet
637 * @offset: ptr to current offset value
638 * @stat: ptr to the stat
640 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
641 bool offset_loaded, u64 *offset, u64 *stat)
645 new_data = rd32(hw, reg);
648 if (likely(new_data >= *offset))
649 *stat = (u32)(new_data - *offset);
651 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
655 * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
656 * @hw: ptr to the hardware info
657 * @reg: the hw reg to read and clear
658 * @stat: ptr to the stat
660 static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
662 u32 new_data = rd32(hw, reg);
664 wr32(hw, reg, 1); /* must write a nonzero value to clear register */
669 * i40e_stats_update_rx_discards - update rx_discards.
670 * @vsi: ptr to the VSI to be updated.
671 * @hw: ptr to the hardware info.
672 * @stat_idx: VSI's stat_counter_idx.
673 * @offset_loaded: ptr to the VSI's stat_offsets_loaded.
674 * @stat_offset: ptr to stat_offset to store first read of specific register.
675 * @stat: ptr to VSI's stat to be updated.
678 i40e_stats_update_rx_discards(struct i40e_vsi *vsi, struct i40e_hw *hw,
679 int stat_idx, bool offset_loaded,
680 struct i40e_eth_stats *stat_offset,
681 struct i40e_eth_stats *stat)
683 u64 rx_rdpc, rx_rxerr;
685 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), offset_loaded,
686 &stat_offset->rx_discards, &rx_rdpc);
687 i40e_stat_update64(hw,
688 I40E_GL_RXERR1H(i40e_compute_pci_to_hw_id(vsi, hw)),
689 I40E_GL_RXERR1L(i40e_compute_pci_to_hw_id(vsi, hw)),
690 offset_loaded, &stat_offset->rx_discards_other,
693 stat->rx_discards = rx_rdpc + rx_rxerr;
697 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
698 * @vsi: the VSI to be updated
700 void i40e_update_eth_stats(struct i40e_vsi *vsi)
702 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
703 struct i40e_pf *pf = vsi->back;
704 struct i40e_hw *hw = &pf->hw;
705 struct i40e_eth_stats *oes;
706 struct i40e_eth_stats *es; /* device's eth stats */
708 es = &vsi->eth_stats;
709 oes = &vsi->eth_stats_offsets;
711 /* Gather up the stats that the hw collects */
712 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
713 vsi->stat_offsets_loaded,
714 &oes->tx_errors, &es->tx_errors);
715 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
716 vsi->stat_offsets_loaded,
717 &oes->rx_discards, &es->rx_discards);
718 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
719 vsi->stat_offsets_loaded,
720 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
722 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
723 I40E_GLV_GORCL(stat_idx),
724 vsi->stat_offsets_loaded,
725 &oes->rx_bytes, &es->rx_bytes);
726 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
727 I40E_GLV_UPRCL(stat_idx),
728 vsi->stat_offsets_loaded,
729 &oes->rx_unicast, &es->rx_unicast);
730 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
731 I40E_GLV_MPRCL(stat_idx),
732 vsi->stat_offsets_loaded,
733 &oes->rx_multicast, &es->rx_multicast);
734 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
735 I40E_GLV_BPRCL(stat_idx),
736 vsi->stat_offsets_loaded,
737 &oes->rx_broadcast, &es->rx_broadcast);
739 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
740 I40E_GLV_GOTCL(stat_idx),
741 vsi->stat_offsets_loaded,
742 &oes->tx_bytes, &es->tx_bytes);
743 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
744 I40E_GLV_UPTCL(stat_idx),
745 vsi->stat_offsets_loaded,
746 &oes->tx_unicast, &es->tx_unicast);
747 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
748 I40E_GLV_MPTCL(stat_idx),
749 vsi->stat_offsets_loaded,
750 &oes->tx_multicast, &es->tx_multicast);
751 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
752 I40E_GLV_BPTCL(stat_idx),
753 vsi->stat_offsets_loaded,
754 &oes->tx_broadcast, &es->tx_broadcast);
756 i40e_stats_update_rx_discards(vsi, hw, stat_idx,
757 vsi->stat_offsets_loaded, oes, es);
759 vsi->stat_offsets_loaded = true;
763 * i40e_update_veb_stats - Update Switch component statistics
764 * @veb: the VEB being updated
766 void i40e_update_veb_stats(struct i40e_veb *veb)
768 struct i40e_pf *pf = veb->pf;
769 struct i40e_hw *hw = &pf->hw;
770 struct i40e_eth_stats *oes;
771 struct i40e_eth_stats *es; /* device's eth stats */
772 struct i40e_veb_tc_stats *veb_oes;
773 struct i40e_veb_tc_stats *veb_es;
776 idx = veb->stats_idx;
778 oes = &veb->stats_offsets;
779 veb_es = &veb->tc_stats;
780 veb_oes = &veb->tc_stats_offsets;
782 /* Gather up the stats that the hw collects */
783 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
784 veb->stat_offsets_loaded,
785 &oes->tx_discards, &es->tx_discards);
786 if (hw->revision_id > 0)
787 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
788 veb->stat_offsets_loaded,
789 &oes->rx_unknown_protocol,
790 &es->rx_unknown_protocol);
791 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
792 veb->stat_offsets_loaded,
793 &oes->rx_bytes, &es->rx_bytes);
794 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
795 veb->stat_offsets_loaded,
796 &oes->rx_unicast, &es->rx_unicast);
797 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
798 veb->stat_offsets_loaded,
799 &oes->rx_multicast, &es->rx_multicast);
800 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
801 veb->stat_offsets_loaded,
802 &oes->rx_broadcast, &es->rx_broadcast);
804 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
805 veb->stat_offsets_loaded,
806 &oes->tx_bytes, &es->tx_bytes);
807 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
808 veb->stat_offsets_loaded,
809 &oes->tx_unicast, &es->tx_unicast);
810 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
811 veb->stat_offsets_loaded,
812 &oes->tx_multicast, &es->tx_multicast);
813 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
814 veb->stat_offsets_loaded,
815 &oes->tx_broadcast, &es->tx_broadcast);
816 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
817 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
818 I40E_GLVEBTC_RPCL(i, idx),
819 veb->stat_offsets_loaded,
820 &veb_oes->tc_rx_packets[i],
821 &veb_es->tc_rx_packets[i]);
822 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
823 I40E_GLVEBTC_RBCL(i, idx),
824 veb->stat_offsets_loaded,
825 &veb_oes->tc_rx_bytes[i],
826 &veb_es->tc_rx_bytes[i]);
827 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
828 I40E_GLVEBTC_TPCL(i, idx),
829 veb->stat_offsets_loaded,
830 &veb_oes->tc_tx_packets[i],
831 &veb_es->tc_tx_packets[i]);
832 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
833 I40E_GLVEBTC_TBCL(i, idx),
834 veb->stat_offsets_loaded,
835 &veb_oes->tc_tx_bytes[i],
836 &veb_es->tc_tx_bytes[i]);
838 veb->stat_offsets_loaded = true;
842 * i40e_update_vsi_stats - Update the vsi statistics counters.
843 * @vsi: the VSI to be updated
845 * There are a few instances where we store the same stat in a
846 * couple of different structs. This is partly because we have
847 * the netdev stats that need to be filled out, which is slightly
848 * different from the "eth_stats" defined by the chip and used in
849 * VF communications. We sort it out here.
851 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
853 u64 rx_page, rx_buf, rx_reuse, rx_alloc, rx_waive, rx_busy;
854 struct i40e_pf *pf = vsi->back;
855 struct rtnl_link_stats64 *ons;
856 struct rtnl_link_stats64 *ns; /* netdev stats */
857 struct i40e_eth_stats *oes;
858 struct i40e_eth_stats *es; /* device's eth stats */
859 u64 tx_restart, tx_busy;
870 if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
871 test_bit(__I40E_CONFIG_BUSY, pf->state))
874 ns = i40e_get_vsi_stats_struct(vsi);
875 ons = &vsi->net_stats_offsets;
876 es = &vsi->eth_stats;
877 oes = &vsi->eth_stats_offsets;
879 /* Gather up the netdev and vsi stats that the driver collects
880 * on the fly during packet processing
884 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
893 for (q = 0; q < vsi->num_queue_pairs; q++) {
895 p = READ_ONCE(vsi->tx_rings[q]);
900 start = u64_stats_fetch_begin(&p->syncp);
901 packets = p->stats.packets;
902 bytes = p->stats.bytes;
903 } while (u64_stats_fetch_retry(&p->syncp, start));
906 tx_restart += p->tx_stats.restart_queue;
907 tx_busy += p->tx_stats.tx_busy;
908 tx_linearize += p->tx_stats.tx_linearize;
909 tx_force_wb += p->tx_stats.tx_force_wb;
910 tx_stopped += p->tx_stats.tx_stopped;
913 p = READ_ONCE(vsi->rx_rings[q]);
918 start = u64_stats_fetch_begin(&p->syncp);
919 packets = p->stats.packets;
920 bytes = p->stats.bytes;
921 } while (u64_stats_fetch_retry(&p->syncp, start));
924 rx_buf += p->rx_stats.alloc_buff_failed;
925 rx_page += p->rx_stats.alloc_page_failed;
926 rx_reuse += p->rx_stats.page_reuse_count;
927 rx_alloc += p->rx_stats.page_alloc_count;
928 rx_waive += p->rx_stats.page_waive_count;
929 rx_busy += p->rx_stats.page_busy_count;
931 if (i40e_enabled_xdp_vsi(vsi)) {
932 /* locate XDP ring */
933 p = READ_ONCE(vsi->xdp_rings[q]);
938 start = u64_stats_fetch_begin(&p->syncp);
939 packets = p->stats.packets;
940 bytes = p->stats.bytes;
941 } while (u64_stats_fetch_retry(&p->syncp, start));
944 tx_restart += p->tx_stats.restart_queue;
945 tx_busy += p->tx_stats.tx_busy;
946 tx_linearize += p->tx_stats.tx_linearize;
947 tx_force_wb += p->tx_stats.tx_force_wb;
951 vsi->tx_restart = tx_restart;
952 vsi->tx_busy = tx_busy;
953 vsi->tx_linearize = tx_linearize;
954 vsi->tx_force_wb = tx_force_wb;
955 vsi->tx_stopped = tx_stopped;
956 vsi->rx_page_failed = rx_page;
957 vsi->rx_buf_failed = rx_buf;
958 vsi->rx_page_reuse = rx_reuse;
959 vsi->rx_page_alloc = rx_alloc;
960 vsi->rx_page_waive = rx_waive;
961 vsi->rx_page_busy = rx_busy;
963 ns->rx_packets = rx_p;
965 ns->tx_packets = tx_p;
968 /* update netdev stats from eth stats */
969 i40e_update_eth_stats(vsi);
970 ons->tx_errors = oes->tx_errors;
971 ns->tx_errors = es->tx_errors;
972 ons->multicast = oes->rx_multicast;
973 ns->multicast = es->rx_multicast;
974 ons->rx_dropped = oes->rx_discards;
975 ns->rx_dropped = es->rx_discards;
976 ons->tx_dropped = oes->tx_discards;
977 ns->tx_dropped = es->tx_discards;
979 /* pull in a couple PF stats if this is the main vsi */
980 if (vsi == pf->vsi[pf->lan_vsi]) {
981 ns->rx_crc_errors = pf->stats.crc_errors;
982 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
983 ns->rx_length_errors = pf->stats.rx_length_errors;
988 * i40e_update_pf_stats - Update the PF statistics counters.
989 * @pf: the PF to be updated
991 static void i40e_update_pf_stats(struct i40e_pf *pf)
993 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
994 struct i40e_hw_port_stats *nsd = &pf->stats;
995 struct i40e_hw *hw = &pf->hw;
999 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
1000 I40E_GLPRT_GORCL(hw->port),
1001 pf->stat_offsets_loaded,
1002 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
1003 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
1004 I40E_GLPRT_GOTCL(hw->port),
1005 pf->stat_offsets_loaded,
1006 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
1007 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
1008 pf->stat_offsets_loaded,
1009 &osd->eth.rx_discards,
1010 &nsd->eth.rx_discards);
1011 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
1012 I40E_GLPRT_UPRCL(hw->port),
1013 pf->stat_offsets_loaded,
1014 &osd->eth.rx_unicast,
1015 &nsd->eth.rx_unicast);
1016 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
1017 I40E_GLPRT_MPRCL(hw->port),
1018 pf->stat_offsets_loaded,
1019 &osd->eth.rx_multicast,
1020 &nsd->eth.rx_multicast);
1021 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
1022 I40E_GLPRT_BPRCL(hw->port),
1023 pf->stat_offsets_loaded,
1024 &osd->eth.rx_broadcast,
1025 &nsd->eth.rx_broadcast);
1026 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
1027 I40E_GLPRT_UPTCL(hw->port),
1028 pf->stat_offsets_loaded,
1029 &osd->eth.tx_unicast,
1030 &nsd->eth.tx_unicast);
1031 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
1032 I40E_GLPRT_MPTCL(hw->port),
1033 pf->stat_offsets_loaded,
1034 &osd->eth.tx_multicast,
1035 &nsd->eth.tx_multicast);
1036 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
1037 I40E_GLPRT_BPTCL(hw->port),
1038 pf->stat_offsets_loaded,
1039 &osd->eth.tx_broadcast,
1040 &nsd->eth.tx_broadcast);
1042 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
1043 pf->stat_offsets_loaded,
1044 &osd->tx_dropped_link_down,
1045 &nsd->tx_dropped_link_down);
1047 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
1048 pf->stat_offsets_loaded,
1049 &osd->crc_errors, &nsd->crc_errors);
1051 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
1052 pf->stat_offsets_loaded,
1053 &osd->illegal_bytes, &nsd->illegal_bytes);
1055 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
1056 pf->stat_offsets_loaded,
1057 &osd->mac_local_faults,
1058 &nsd->mac_local_faults);
1059 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
1060 pf->stat_offsets_loaded,
1061 &osd->mac_remote_faults,
1062 &nsd->mac_remote_faults);
1064 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
1065 pf->stat_offsets_loaded,
1066 &osd->rx_length_errors,
1067 &nsd->rx_length_errors);
1069 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
1070 pf->stat_offsets_loaded,
1071 &osd->link_xon_rx, &nsd->link_xon_rx);
1072 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
1073 pf->stat_offsets_loaded,
1074 &osd->link_xon_tx, &nsd->link_xon_tx);
1075 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
1076 pf->stat_offsets_loaded,
1077 &osd->link_xoff_rx, &nsd->link_xoff_rx);
1078 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1079 pf->stat_offsets_loaded,
1080 &osd->link_xoff_tx, &nsd->link_xoff_tx);
1082 for (i = 0; i < 8; i++) {
1083 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
1084 pf->stat_offsets_loaded,
1085 &osd->priority_xoff_rx[i],
1086 &nsd->priority_xoff_rx[i]);
1087 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1088 pf->stat_offsets_loaded,
1089 &osd->priority_xon_rx[i],
1090 &nsd->priority_xon_rx[i]);
1091 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1092 pf->stat_offsets_loaded,
1093 &osd->priority_xon_tx[i],
1094 &nsd->priority_xon_tx[i]);
1095 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1096 pf->stat_offsets_loaded,
1097 &osd->priority_xoff_tx[i],
1098 &nsd->priority_xoff_tx[i]);
1099 i40e_stat_update32(hw,
1100 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1101 pf->stat_offsets_loaded,
1102 &osd->priority_xon_2_xoff[i],
1103 &nsd->priority_xon_2_xoff[i]);
1106 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
1107 I40E_GLPRT_PRC64L(hw->port),
1108 pf->stat_offsets_loaded,
1109 &osd->rx_size_64, &nsd->rx_size_64);
1110 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
1111 I40E_GLPRT_PRC127L(hw->port),
1112 pf->stat_offsets_loaded,
1113 &osd->rx_size_127, &nsd->rx_size_127);
1114 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1115 I40E_GLPRT_PRC255L(hw->port),
1116 pf->stat_offsets_loaded,
1117 &osd->rx_size_255, &nsd->rx_size_255);
1118 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1119 I40E_GLPRT_PRC511L(hw->port),
1120 pf->stat_offsets_loaded,
1121 &osd->rx_size_511, &nsd->rx_size_511);
1122 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1123 I40E_GLPRT_PRC1023L(hw->port),
1124 pf->stat_offsets_loaded,
1125 &osd->rx_size_1023, &nsd->rx_size_1023);
1126 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1127 I40E_GLPRT_PRC1522L(hw->port),
1128 pf->stat_offsets_loaded,
1129 &osd->rx_size_1522, &nsd->rx_size_1522);
1130 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1131 I40E_GLPRT_PRC9522L(hw->port),
1132 pf->stat_offsets_loaded,
1133 &osd->rx_size_big, &nsd->rx_size_big);
1135 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1136 I40E_GLPRT_PTC64L(hw->port),
1137 pf->stat_offsets_loaded,
1138 &osd->tx_size_64, &nsd->tx_size_64);
1139 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1140 I40E_GLPRT_PTC127L(hw->port),
1141 pf->stat_offsets_loaded,
1142 &osd->tx_size_127, &nsd->tx_size_127);
1143 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1144 I40E_GLPRT_PTC255L(hw->port),
1145 pf->stat_offsets_loaded,
1146 &osd->tx_size_255, &nsd->tx_size_255);
1147 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1148 I40E_GLPRT_PTC511L(hw->port),
1149 pf->stat_offsets_loaded,
1150 &osd->tx_size_511, &nsd->tx_size_511);
1151 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1152 I40E_GLPRT_PTC1023L(hw->port),
1153 pf->stat_offsets_loaded,
1154 &osd->tx_size_1023, &nsd->tx_size_1023);
1155 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1156 I40E_GLPRT_PTC1522L(hw->port),
1157 pf->stat_offsets_loaded,
1158 &osd->tx_size_1522, &nsd->tx_size_1522);
1159 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1160 I40E_GLPRT_PTC9522L(hw->port),
1161 pf->stat_offsets_loaded,
1162 &osd->tx_size_big, &nsd->tx_size_big);
1164 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1165 pf->stat_offsets_loaded,
1166 &osd->rx_undersize, &nsd->rx_undersize);
1167 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1168 pf->stat_offsets_loaded,
1169 &osd->rx_fragments, &nsd->rx_fragments);
1170 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1171 pf->stat_offsets_loaded,
1172 &osd->rx_oversize, &nsd->rx_oversize);
1173 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1174 pf->stat_offsets_loaded,
1175 &osd->rx_jabber, &nsd->rx_jabber);
1178 i40e_stat_update_and_clear32(hw,
1179 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
1180 &nsd->fd_atr_match);
1181 i40e_stat_update_and_clear32(hw,
1182 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
1184 i40e_stat_update_and_clear32(hw,
1185 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
1186 &nsd->fd_atr_tunnel_match);
1188 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1189 nsd->tx_lpi_status =
1190 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1191 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1192 nsd->rx_lpi_status =
1193 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1194 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1195 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1196 pf->stat_offsets_loaded,
1197 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1198 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1199 pf->stat_offsets_loaded,
1200 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1202 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1203 !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
1204 nsd->fd_sb_status = true;
1206 nsd->fd_sb_status = false;
1208 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1209 !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
1210 nsd->fd_atr_status = true;
1212 nsd->fd_atr_status = false;
1214 pf->stat_offsets_loaded = true;
1218 * i40e_update_stats - Update the various statistics counters.
1219 * @vsi: the VSI to be updated
1221 * Update the various stats for this VSI and its related entities.
1223 void i40e_update_stats(struct i40e_vsi *vsi)
1225 struct i40e_pf *pf = vsi->back;
1227 if (vsi == pf->vsi[pf->lan_vsi])
1228 i40e_update_pf_stats(pf);
1230 i40e_update_vsi_stats(vsi);
1234 * i40e_count_filters - counts VSI mac filters
1235 * @vsi: the VSI to be searched
1237 * Returns count of mac filters
1239 int i40e_count_filters(struct i40e_vsi *vsi)
1241 struct i40e_mac_filter *f;
1242 struct hlist_node *h;
1246 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
1253 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1254 * @vsi: the VSI to be searched
1255 * @macaddr: the MAC address
1258 * Returns ptr to the filter object or NULL
1260 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1261 const u8 *macaddr, s16 vlan)
1263 struct i40e_mac_filter *f;
1266 if (!vsi || !macaddr)
1269 key = i40e_addr_to_hkey(macaddr);
1270 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1271 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1279 * i40e_find_mac - Find a mac addr in the macvlan filters list
1280 * @vsi: the VSI to be searched
1281 * @macaddr: the MAC address we are searching for
1283 * Returns the first filter with the provided MAC address or NULL if
1284 * MAC address was not found
1286 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
1288 struct i40e_mac_filter *f;
1291 if (!vsi || !macaddr)
1294 key = i40e_addr_to_hkey(macaddr);
1295 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1296 if ((ether_addr_equal(macaddr, f->macaddr)))
1303 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1304 * @vsi: the VSI to be searched
1306 * Returns true if VSI is in vlan mode or false otherwise
1308 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1310 /* If we have a PVID, always operate in VLAN mode */
1314 /* We need to operate in VLAN mode whenever we have any filters with
1315 * a VLAN other than I40E_VLAN_ALL. We could check the table each
1316 * time, incurring search cost repeatedly. However, we can notice two
1319 * 1) the only place where we can gain a VLAN filter is in
1322 * 2) the only place where filters are actually removed is in
1323 * i40e_sync_filters_subtask.
1325 * Thus, we can simply use a boolean value, has_vlan_filters which we
1326 * will set to true when we add a VLAN filter in i40e_add_filter. Then
1327 * we have to perform the full search after deleting filters in
1328 * i40e_sync_filters_subtask, but we already have to search
1329 * filters here and can perform the check at the same time. This
1330 * results in avoiding embedding a loop for VLAN mode inside another
1331 * loop over all the filters, and should maintain correctness as noted
1334 return vsi->has_vlan_filter;
1338 * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
1339 * @vsi: the VSI to configure
1340 * @tmp_add_list: list of filters ready to be added
1341 * @tmp_del_list: list of filters ready to be deleted
1342 * @vlan_filters: the number of active VLAN filters
1344 * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
1345 * behave as expected. If we have any active VLAN filters remaining or about
1346 * to be added then we need to update non-VLAN filters to be marked as VLAN=0
1347 * so that they only match against untagged traffic. If we no longer have any
1348 * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
1349 * so that they match against both tagged and untagged traffic. In this way,
1350 * we ensure that we correctly receive the desired traffic. This ensures that
1351 * when we have an active VLAN we will receive only untagged traffic and
1352 * traffic matching active VLANs. If we have no active VLANs then we will
1353 * operate in non-VLAN mode and receive all traffic, tagged or untagged.
1355 * Finally, in a similar fashion, this function also corrects filters when
1356 * there is an active PVID assigned to this VSI.
1358 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1360 * This function is only expected to be called from within
1361 * i40e_sync_vsi_filters.
1363 * NOTE: This function expects to be called while under the
1364 * mac_filter_hash_lock
1366 static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
1367 struct hlist_head *tmp_add_list,
1368 struct hlist_head *tmp_del_list,
1371 s16 pvid = le16_to_cpu(vsi->info.pvid);
1372 struct i40e_mac_filter *f, *add_head;
1373 struct i40e_new_mac_filter *new;
1374 struct hlist_node *h;
1377 /* To determine if a particular filter needs to be replaced we
1378 * have the three following conditions:
1380 * a) if we have a PVID assigned, then all filters which are
1381 * not marked as VLAN=PVID must be replaced with filters that
1383 * b) otherwise, if we have any active VLANS, all filters
1384 * which are marked as VLAN=-1 must be replaced with
1385 * filters marked as VLAN=0
1386 * c) finally, if we do not have any active VLANS, all filters
1387 * which are marked as VLAN=0 must be replaced with filters
1391 /* Update the filters about to be added in place */
1392 hlist_for_each_entry(new, tmp_add_list, hlist) {
1393 if (pvid && new->f->vlan != pvid)
1394 new->f->vlan = pvid;
1395 else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
1397 else if (!vlan_filters && new->f->vlan == 0)
1398 new->f->vlan = I40E_VLAN_ANY;
1401 /* Update the remaining active filters */
1402 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1403 /* Combine the checks for whether a filter needs to be changed
1404 * and then determine the new VLAN inside the if block, in
1405 * order to avoid duplicating code for adding the new filter
1406 * then deleting the old filter.
1408 if ((pvid && f->vlan != pvid) ||
1409 (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1410 (!vlan_filters && f->vlan == 0)) {
1411 /* Determine the new vlan we will be adding */
1414 else if (vlan_filters)
1417 new_vlan = I40E_VLAN_ANY;
1419 /* Create the new filter */
1420 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1424 /* Create a temporary i40e_new_mac_filter */
1425 new = kzalloc(sizeof(*new), GFP_ATOMIC);
1430 new->state = add_head->state;
1432 /* Add the new filter to the tmp list */
1433 hlist_add_head(&new->hlist, tmp_add_list);
1435 /* Put the original filter into the delete list */
1436 f->state = I40E_FILTER_REMOVE;
1437 hash_del(&f->hlist);
1438 hlist_add_head(&f->hlist, tmp_del_list);
1442 vsi->has_vlan_filter = !!vlan_filters;
1448 * i40e_get_vf_new_vlan - Get new vlan id on a vf
1449 * @vsi: the vsi to configure
1450 * @new_mac: new mac filter to be added
1451 * @f: existing mac filter, replaced with new_mac->f if new_mac is not NULL
1452 * @vlan_filters: the number of active VLAN filters
1453 * @trusted: flag if the VF is trusted
1455 * Get new VLAN id based on current VLAN filters, trust, PVID
1456 * and vf-vlan-prune-disable flag.
1458 * Returns the value of the new vlan filter or
1459 * the old value if no new filter is needed.
1461 static s16 i40e_get_vf_new_vlan(struct i40e_vsi *vsi,
1462 struct i40e_new_mac_filter *new_mac,
1463 struct i40e_mac_filter *f,
1467 s16 pvid = le16_to_cpu(vsi->info.pvid);
1468 struct i40e_pf *pf = vsi->back;
1474 if (pvid && f->vlan != pvid)
1477 is_any = (trusted ||
1478 !(pf->flags & I40E_FLAG_VF_VLAN_PRUNING));
1480 if ((vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1481 (!is_any && !vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1482 (is_any && !vlan_filters && f->vlan == 0)) {
1484 return I40E_VLAN_ANY;
1493 * i40e_correct_vf_mac_vlan_filters - Correct non-VLAN VF filters if necessary
1494 * @vsi: the vsi to configure
1495 * @tmp_add_list: list of filters ready to be added
1496 * @tmp_del_list: list of filters ready to be deleted
1497 * @vlan_filters: the number of active VLAN filters
1498 * @trusted: flag if the VF is trusted
1500 * Correct VF VLAN filters based on current VLAN filters, trust, PVID
1501 * and vf-vlan-prune-disable flag.
1503 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1505 * This function is only expected to be called from within
1506 * i40e_sync_vsi_filters.
1508 * NOTE: This function expects to be called while under the
1509 * mac_filter_hash_lock
1511 static int i40e_correct_vf_mac_vlan_filters(struct i40e_vsi *vsi,
1512 struct hlist_head *tmp_add_list,
1513 struct hlist_head *tmp_del_list,
1517 struct i40e_mac_filter *f, *add_head;
1518 struct i40e_new_mac_filter *new_mac;
1519 struct hlist_node *h;
1522 hlist_for_each_entry(new_mac, tmp_add_list, hlist) {
1523 new_mac->f->vlan = i40e_get_vf_new_vlan(vsi, new_mac, NULL,
1524 vlan_filters, trusted);
1527 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1528 new_vlan = i40e_get_vf_new_vlan(vsi, NULL, f, vlan_filters,
1530 if (new_vlan != f->vlan) {
1531 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1534 /* Create a temporary i40e_new_mac_filter */
1535 new_mac = kzalloc(sizeof(*new_mac), GFP_ATOMIC);
1538 new_mac->f = add_head;
1539 new_mac->state = add_head->state;
1541 /* Add the new filter to the tmp list */
1542 hlist_add_head(&new_mac->hlist, tmp_add_list);
1544 /* Put the original filter into the delete list */
1545 f->state = I40E_FILTER_REMOVE;
1546 hash_del(&f->hlist);
1547 hlist_add_head(&f->hlist, tmp_del_list);
1551 vsi->has_vlan_filter = !!vlan_filters;
1556 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1557 * @vsi: the PF Main VSI - inappropriate for any other VSI
1558 * @macaddr: the MAC address
1560 * Remove whatever filter the firmware set up so the driver can manage
1561 * its own filtering intelligently.
1563 static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1565 struct i40e_aqc_remove_macvlan_element_data element;
1566 struct i40e_pf *pf = vsi->back;
1568 /* Only appropriate for the PF main VSI */
1569 if (vsi->type != I40E_VSI_MAIN)
1572 memset(&element, 0, sizeof(element));
1573 ether_addr_copy(element.mac_addr, macaddr);
1574 element.vlan_tag = 0;
1575 /* Ignore error returns, some firmware does it this way... */
1576 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1577 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1579 memset(&element, 0, sizeof(element));
1580 ether_addr_copy(element.mac_addr, macaddr);
1581 element.vlan_tag = 0;
1582 /* ...and some firmware does it this way. */
1583 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1584 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1585 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1589 * i40e_add_filter - Add a mac/vlan filter to the VSI
1590 * @vsi: the VSI to be searched
1591 * @macaddr: the MAC address
1594 * Returns ptr to the filter object or NULL when no memory available.
1596 * NOTE: This function is expected to be called with mac_filter_hash_lock
1599 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1600 const u8 *macaddr, s16 vlan)
1602 struct i40e_mac_filter *f;
1605 if (!vsi || !macaddr)
1608 f = i40e_find_filter(vsi, macaddr, vlan);
1610 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1614 /* Update the boolean indicating if we need to function in
1618 vsi->has_vlan_filter = true;
1620 ether_addr_copy(f->macaddr, macaddr);
1622 f->state = I40E_FILTER_NEW;
1623 INIT_HLIST_NODE(&f->hlist);
1625 key = i40e_addr_to_hkey(macaddr);
1626 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1628 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1629 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1632 /* If we're asked to add a filter that has been marked for removal, it
1633 * is safe to simply restore it to active state. __i40e_del_filter
1634 * will have simply deleted any filters which were previously marked
1635 * NEW or FAILED, so if it is currently marked REMOVE it must have
1636 * previously been ACTIVE. Since we haven't yet run the sync filters
1637 * task, just restore this filter to the ACTIVE state so that the
1638 * sync task leaves it in place
1640 if (f->state == I40E_FILTER_REMOVE)
1641 f->state = I40E_FILTER_ACTIVE;
1647 * __i40e_del_filter - Remove a specific filter from the VSI
1648 * @vsi: VSI to remove from
1649 * @f: the filter to remove from the list
1651 * This function should be called instead of i40e_del_filter only if you know
1652 * the exact filter you will remove already, such as via i40e_find_filter or
1655 * NOTE: This function is expected to be called with mac_filter_hash_lock
1657 * ANOTHER NOTE: This function MUST be called from within the context of
1658 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1659 * instead of list_for_each_entry().
1661 void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
1666 /* If the filter was never added to firmware then we can just delete it
1667 * directly and we don't want to set the status to remove or else an
1668 * admin queue command will unnecessarily fire.
1670 if ((f->state == I40E_FILTER_FAILED) ||
1671 (f->state == I40E_FILTER_NEW)) {
1672 hash_del(&f->hlist);
1675 f->state = I40E_FILTER_REMOVE;
1678 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1679 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1683 * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
1684 * @vsi: the VSI to be searched
1685 * @macaddr: the MAC address
1688 * NOTE: This function is expected to be called with mac_filter_hash_lock
1690 * ANOTHER NOTE: This function MUST be called from within the context of
1691 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1692 * instead of list_for_each_entry().
1694 void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1696 struct i40e_mac_filter *f;
1698 if (!vsi || !macaddr)
1701 f = i40e_find_filter(vsi, macaddr, vlan);
1702 __i40e_del_filter(vsi, f);
1706 * i40e_add_mac_filter - Add a MAC filter for all active VLANs
1707 * @vsi: the VSI to be searched
1708 * @macaddr: the mac address to be filtered
1710 * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
1711 * go through all the macvlan filters and add a macvlan filter for each
1712 * unique vlan that already exists. If a PVID has been assigned, instead only
1713 * add the macaddr to that VLAN.
1715 * Returns last filter added on success, else NULL
1717 struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
1720 struct i40e_mac_filter *f, *add = NULL;
1721 struct hlist_node *h;
1725 return i40e_add_filter(vsi, macaddr,
1726 le16_to_cpu(vsi->info.pvid));
1728 if (!i40e_is_vsi_in_vlan(vsi))
1729 return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
1731 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1732 if (f->state == I40E_FILTER_REMOVE)
1734 add = i40e_add_filter(vsi, macaddr, f->vlan);
1743 * i40e_del_mac_filter - Remove a MAC filter from all VLANs
1744 * @vsi: the VSI to be searched
1745 * @macaddr: the mac address to be removed
1747 * Removes a given MAC address from a VSI regardless of what VLAN it has been
1750 * Returns 0 for success, or error
1752 int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
1754 struct i40e_mac_filter *f;
1755 struct hlist_node *h;
1759 lockdep_assert_held(&vsi->mac_filter_hash_lock);
1760 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1761 if (ether_addr_equal(macaddr, f->macaddr)) {
1762 __i40e_del_filter(vsi, f);
1774 * i40e_set_mac - NDO callback to set mac address
1775 * @netdev: network interface device structure
1776 * @p: pointer to an address structure
1778 * Returns 0 on success, negative on failure
1780 static int i40e_set_mac(struct net_device *netdev, void *p)
1782 struct i40e_netdev_priv *np = netdev_priv(netdev);
1783 struct i40e_vsi *vsi = np->vsi;
1784 struct i40e_pf *pf = vsi->back;
1785 struct i40e_hw *hw = &pf->hw;
1786 struct sockaddr *addr = p;
1788 if (!is_valid_ether_addr(addr->sa_data))
1789 return -EADDRNOTAVAIL;
1791 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1792 netdev_info(netdev, "already using mac address %pM\n",
1797 if (test_bit(__I40E_DOWN, pf->state) ||
1798 test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
1799 return -EADDRNOTAVAIL;
1801 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1802 netdev_info(netdev, "returning to hw mac address %pM\n",
1805 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1807 /* Copy the address first, so that we avoid a possible race with
1809 * - Remove old address from MAC filter
1810 * - Copy new address
1811 * - Add new address to MAC filter
1813 spin_lock_bh(&vsi->mac_filter_hash_lock);
1814 i40e_del_mac_filter(vsi, netdev->dev_addr);
1815 eth_hw_addr_set(netdev, addr->sa_data);
1816 i40e_add_mac_filter(vsi, netdev->dev_addr);
1817 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1819 if (vsi->type == I40E_VSI_MAIN) {
1822 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
1823 addr->sa_data, NULL);
1825 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1826 i40e_stat_str(hw, ret),
1827 i40e_aq_str(hw, hw->aq.asq_last_status));
1830 /* schedule our worker thread which will take care of
1831 * applying the new filter changes
1833 i40e_service_event_schedule(pf);
1838 * i40e_config_rss_aq - Prepare for RSS using AQ commands
1839 * @vsi: vsi structure
1840 * @seed: RSS hash seed
1841 * @lut: pointer to lookup table of lut_size
1842 * @lut_size: size of the lookup table
1844 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
1845 u8 *lut, u16 lut_size)
1847 struct i40e_pf *pf = vsi->back;
1848 struct i40e_hw *hw = &pf->hw;
1852 struct i40e_aqc_get_set_rss_key_data *seed_dw =
1853 (struct i40e_aqc_get_set_rss_key_data *)seed;
1854 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
1856 dev_info(&pf->pdev->dev,
1857 "Cannot set RSS key, err %s aq_err %s\n",
1858 i40e_stat_str(hw, ret),
1859 i40e_aq_str(hw, hw->aq.asq_last_status));
1864 bool pf_lut = vsi->type == I40E_VSI_MAIN;
1866 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
1868 dev_info(&pf->pdev->dev,
1869 "Cannot set RSS lut, err %s aq_err %s\n",
1870 i40e_stat_str(hw, ret),
1871 i40e_aq_str(hw, hw->aq.asq_last_status));
1879 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
1880 * @vsi: VSI structure
1882 static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
1884 struct i40e_pf *pf = vsi->back;
1885 u8 seed[I40E_HKEY_ARRAY_SIZE];
1889 if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
1892 vsi->rss_size = min_t(int, pf->alloc_rss_size,
1893 vsi->num_queue_pairs);
1896 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1900 /* Use the user configured hash keys and lookup table if there is one,
1901 * otherwise use default
1903 if (vsi->rss_lut_user)
1904 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1906 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
1907 if (vsi->rss_hkey_user)
1908 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
1910 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
1911 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
1917 * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config
1918 * @vsi: the VSI being configured,
1919 * @ctxt: VSI context structure
1920 * @enabled_tc: number of traffic classes to enable
1922 * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
1924 static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi,
1925 struct i40e_vsi_context *ctxt,
1928 u16 qcount = 0, max_qcount, qmap, sections = 0;
1929 int i, override_q, pow, num_qps, ret;
1930 u8 netdev_tc = 0, offset = 0;
1932 if (vsi->type != I40E_VSI_MAIN)
1934 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1935 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1936 vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc;
1937 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1938 num_qps = vsi->mqprio_qopt.qopt.count[0];
1940 /* find the next higher power-of-2 of num queue pairs */
1941 pow = ilog2(num_qps);
1942 if (!is_power_of_2(num_qps))
1944 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1945 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1947 /* Setup queue offset/count for all TCs for given VSI */
1948 max_qcount = vsi->mqprio_qopt.qopt.count[0];
1949 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1950 /* See if the given TC is enabled for the given VSI */
1951 if (vsi->tc_config.enabled_tc & BIT(i)) {
1952 offset = vsi->mqprio_qopt.qopt.offset[i];
1953 qcount = vsi->mqprio_qopt.qopt.count[i];
1954 if (qcount > max_qcount)
1955 max_qcount = qcount;
1956 vsi->tc_config.tc_info[i].qoffset = offset;
1957 vsi->tc_config.tc_info[i].qcount = qcount;
1958 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1960 /* TC is not enabled so set the offset to
1961 * default queue and allocate one queue
1964 vsi->tc_config.tc_info[i].qoffset = 0;
1965 vsi->tc_config.tc_info[i].qcount = 1;
1966 vsi->tc_config.tc_info[i].netdev_tc = 0;
1970 /* Set actual Tx/Rx queue pairs */
1971 vsi->num_queue_pairs = offset + qcount;
1973 /* Setup queue TC[0].qmap for given VSI context */
1974 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
1975 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1976 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1977 ctxt->info.valid_sections |= cpu_to_le16(sections);
1979 /* Reconfigure RSS for main VSI with max queue count */
1980 vsi->rss_size = max_qcount;
1981 ret = i40e_vsi_config_rss(vsi);
1983 dev_info(&vsi->back->pdev->dev,
1984 "Failed to reconfig rss for num_queues (%u)\n",
1988 vsi->reconfig_rss = true;
1989 dev_dbg(&vsi->back->pdev->dev,
1990 "Reconfigured rss with num_queues (%u)\n", max_qcount);
1992 /* Find queue count available for channel VSIs and starting offset
1995 override_q = vsi->mqprio_qopt.qopt.count[0];
1996 if (override_q && override_q < vsi->num_queue_pairs) {
1997 vsi->cnt_q_avail = vsi->num_queue_pairs - override_q;
1998 vsi->next_base_queue = override_q;
2004 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
2005 * @vsi: the VSI being setup
2006 * @ctxt: VSI context structure
2007 * @enabled_tc: Enabled TCs bitmap
2008 * @is_add: True if called before Add VSI
2010 * Setup VSI queue mapping for enabled traffic classes.
2012 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
2013 struct i40e_vsi_context *ctxt,
2017 struct i40e_pf *pf = vsi->back;
2027 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2029 /* zero out queue mapping, it will get updated on the end of the function */
2030 memset(ctxt->info.queue_mapping, 0, sizeof(ctxt->info.queue_mapping));
2032 if (vsi->type == I40E_VSI_MAIN) {
2033 /* This code helps add more queue to the VSI if we have
2034 * more cores than RSS can support, the higher cores will
2035 * be served by ATR or other filters. Furthermore, the
2036 * non-zero req_queue_pairs says that user requested a new
2037 * queue count via ethtool's set_channels, so use this
2038 * value for queues distribution across traffic classes
2039 * We need at least one queue pair for the interface
2040 * to be usable as we see in else statement.
2042 if (vsi->req_queue_pairs > 0)
2043 vsi->num_queue_pairs = vsi->req_queue_pairs;
2044 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
2045 vsi->num_queue_pairs = pf->num_lan_msix;
2047 vsi->num_queue_pairs = 1;
2050 /* Number of queues per enabled TC */
2051 if (vsi->type == I40E_VSI_MAIN ||
2052 (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs != 0))
2053 num_tc_qps = vsi->num_queue_pairs;
2055 num_tc_qps = vsi->alloc_queue_pairs;
2057 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
2058 /* Find numtc from enabled TC bitmap */
2059 for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2060 if (enabled_tc & BIT(i)) /* TC is enabled */
2064 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
2067 num_tc_qps = num_tc_qps / numtc;
2068 num_tc_qps = min_t(int, num_tc_qps,
2069 i40e_pf_get_max_q_per_tc(pf));
2072 vsi->tc_config.numtc = numtc;
2073 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
2075 /* Do not allow use more TC queue pairs than MSI-X vectors exist */
2076 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
2077 num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
2079 /* Setup queue offset/count for all TCs for given VSI */
2080 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2081 /* See if the given TC is enabled for the given VSI */
2082 if (vsi->tc_config.enabled_tc & BIT(i)) {
2086 switch (vsi->type) {
2088 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED |
2089 I40E_FLAG_FD_ATR_ENABLED)) ||
2090 vsi->tc_config.enabled_tc != 1) {
2091 qcount = min_t(int, pf->alloc_rss_size,
2097 case I40E_VSI_SRIOV:
2098 case I40E_VSI_VMDQ2:
2100 qcount = num_tc_qps;
2104 vsi->tc_config.tc_info[i].qoffset = offset;
2105 vsi->tc_config.tc_info[i].qcount = qcount;
2107 /* find the next higher power-of-2 of num queue pairs */
2110 while (num_qps && (BIT_ULL(pow) < qcount)) {
2115 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
2117 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2118 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
2122 /* TC is not enabled so set the offset to
2123 * default queue and allocate one queue
2126 vsi->tc_config.tc_info[i].qoffset = 0;
2127 vsi->tc_config.tc_info[i].qcount = 1;
2128 vsi->tc_config.tc_info[i].netdev_tc = 0;
2132 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
2134 /* Do not change previously set num_queue_pairs for PFs and VFs*/
2135 if ((vsi->type == I40E_VSI_MAIN && numtc != 1) ||
2136 (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs == 0) ||
2137 (vsi->type != I40E_VSI_MAIN && vsi->type != I40E_VSI_SRIOV))
2138 vsi->num_queue_pairs = offset;
2140 /* Scheduler section valid can only be set for ADD VSI */
2142 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
2144 ctxt->info.up_enable_bits = enabled_tc;
2146 if (vsi->type == I40E_VSI_SRIOV) {
2147 ctxt->info.mapping_flags |=
2148 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
2149 for (i = 0; i < vsi->num_queue_pairs; i++)
2150 ctxt->info.queue_mapping[i] =
2151 cpu_to_le16(vsi->base_queue + i);
2153 ctxt->info.mapping_flags |=
2154 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2155 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
2157 ctxt->info.valid_sections |= cpu_to_le16(sections);
2161 * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
2162 * @netdev: the netdevice
2163 * @addr: address to add
2165 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
2166 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
2168 static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
2170 struct i40e_netdev_priv *np = netdev_priv(netdev);
2171 struct i40e_vsi *vsi = np->vsi;
2173 if (i40e_add_mac_filter(vsi, addr))
2180 * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
2181 * @netdev: the netdevice
2182 * @addr: address to add
2184 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
2185 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
2187 static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
2189 struct i40e_netdev_priv *np = netdev_priv(netdev);
2190 struct i40e_vsi *vsi = np->vsi;
2192 /* Under some circumstances, we might receive a request to delete
2193 * our own device address from our uc list. Because we store the
2194 * device address in the VSI's MAC/VLAN filter list, we need to ignore
2195 * such requests and not delete our device address from this list.
2197 if (ether_addr_equal(addr, netdev->dev_addr))
2200 i40e_del_mac_filter(vsi, addr);
2206 * i40e_set_rx_mode - NDO callback to set the netdev filters
2207 * @netdev: network interface device structure
2209 static void i40e_set_rx_mode(struct net_device *netdev)
2211 struct i40e_netdev_priv *np = netdev_priv(netdev);
2212 struct i40e_vsi *vsi = np->vsi;
2214 spin_lock_bh(&vsi->mac_filter_hash_lock);
2216 __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
2217 __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
2219 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2221 /* check for other flag changes */
2222 if (vsi->current_netdev_flags != vsi->netdev->flags) {
2223 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2224 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
2229 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
2230 * @vsi: Pointer to VSI struct
2231 * @from: Pointer to list which contains MAC filter entries - changes to
2232 * those entries needs to be undone.
2234 * MAC filter entries from this list were slated for deletion.
2236 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
2237 struct hlist_head *from)
2239 struct i40e_mac_filter *f;
2240 struct hlist_node *h;
2242 hlist_for_each_entry_safe(f, h, from, hlist) {
2243 u64 key = i40e_addr_to_hkey(f->macaddr);
2245 /* Move the element back into MAC filter list*/
2246 hlist_del(&f->hlist);
2247 hash_add(vsi->mac_filter_hash, &f->hlist, key);
2252 * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
2253 * @vsi: Pointer to vsi struct
2254 * @from: Pointer to list which contains MAC filter entries - changes to
2255 * those entries needs to be undone.
2257 * MAC filter entries from this list were slated for addition.
2259 static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
2260 struct hlist_head *from)
2262 struct i40e_new_mac_filter *new;
2263 struct hlist_node *h;
2265 hlist_for_each_entry_safe(new, h, from, hlist) {
2266 /* We can simply free the wrapper structure */
2267 hlist_del(&new->hlist);
2268 netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
2274 * i40e_next_filter - Get the next non-broadcast filter from a list
2275 * @next: pointer to filter in list
2277 * Returns the next non-broadcast filter in the list. Required so that we
2278 * ignore broadcast filters within the list, since these are not handled via
2279 * the normal firmware update path.
2282 struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
2284 hlist_for_each_entry_continue(next, hlist) {
2285 if (!is_broadcast_ether_addr(next->f->macaddr))
2293 * i40e_update_filter_state - Update filter state based on return data
2295 * @count: Number of filters added
2296 * @add_list: return data from fw
2297 * @add_head: pointer to first filter in current batch
2299 * MAC filter entries from list were slated to be added to device. Returns
2300 * number of successful filters. Note that 0 does NOT mean success!
2303 i40e_update_filter_state(int count,
2304 struct i40e_aqc_add_macvlan_element_data *add_list,
2305 struct i40e_new_mac_filter *add_head)
2310 for (i = 0; i < count; i++) {
2311 /* Always check status of each filter. We don't need to check
2312 * the firmware return status because we pre-set the filter
2313 * status to I40E_AQC_MM_ERR_NO_RES when sending the filter
2314 * request to the adminq. Thus, if it no longer matches then
2315 * we know the filter is active.
2317 if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
2318 add_head->state = I40E_FILTER_FAILED;
2320 add_head->state = I40E_FILTER_ACTIVE;
2324 add_head = i40e_next_filter(add_head);
2333 * i40e_aqc_del_filters - Request firmware to delete a set of filters
2334 * @vsi: ptr to the VSI
2335 * @vsi_name: name to display in messages
2336 * @list: the list of filters to send to firmware
2337 * @num_del: the number of filters to delete
2338 * @retval: Set to -EIO on failure to delete
2340 * Send a request to firmware via AdminQ to delete a set of filters. Uses
2341 * *retval instead of a return value so that success does not force ret_val to
2342 * be set to 0. This ensures that a sequence of calls to this function
2343 * preserve the previous value of *retval on successful delete.
2346 void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
2347 struct i40e_aqc_remove_macvlan_element_data *list,
2348 int num_del, int *retval)
2350 struct i40e_hw *hw = &vsi->back->hw;
2351 enum i40e_admin_queue_err aq_status;
2354 aq_ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, list, num_del, NULL,
2357 /* Explicitly ignore and do not report when firmware returns ENOENT */
2358 if (aq_ret && !(aq_status == I40E_AQ_RC_ENOENT)) {
2360 dev_info(&vsi->back->pdev->dev,
2361 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
2362 vsi_name, i40e_stat_str(hw, aq_ret),
2363 i40e_aq_str(hw, aq_status));
2368 * i40e_aqc_add_filters - Request firmware to add a set of filters
2369 * @vsi: ptr to the VSI
2370 * @vsi_name: name to display in messages
2371 * @list: the list of filters to send to firmware
2372 * @add_head: Position in the add hlist
2373 * @num_add: the number of filters to add
2375 * Send a request to firmware via AdminQ to add a chunk of filters. Will set
2376 * __I40E_VSI_OVERFLOW_PROMISC bit in vsi->state if the firmware has run out of
2377 * space for more filters.
2380 void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
2381 struct i40e_aqc_add_macvlan_element_data *list,
2382 struct i40e_new_mac_filter *add_head,
2385 struct i40e_hw *hw = &vsi->back->hw;
2386 enum i40e_admin_queue_err aq_status;
2389 i40e_aq_add_macvlan_v2(hw, vsi->seid, list, num_add, NULL, &aq_status);
2390 fcnt = i40e_update_filter_state(num_add, list, add_head);
2392 if (fcnt != num_add) {
2393 if (vsi->type == I40E_VSI_MAIN) {
2394 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2395 dev_warn(&vsi->back->pdev->dev,
2396 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2397 i40e_aq_str(hw, aq_status), vsi_name);
2398 } else if (vsi->type == I40E_VSI_SRIOV ||
2399 vsi->type == I40E_VSI_VMDQ1 ||
2400 vsi->type == I40E_VSI_VMDQ2) {
2401 dev_warn(&vsi->back->pdev->dev,
2402 "Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
2403 i40e_aq_str(hw, aq_status), vsi_name,
2406 dev_warn(&vsi->back->pdev->dev,
2407 "Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
2408 i40e_aq_str(hw, aq_status), vsi_name,
2415 * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
2416 * @vsi: pointer to the VSI
2417 * @vsi_name: the VSI name
2420 * This function sets or clears the promiscuous broadcast flags for VLAN
2421 * filters in order to properly receive broadcast frames. Assumes that only
2422 * broadcast filters are passed.
2424 * Returns status indicating success or failure;
2427 i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
2428 struct i40e_mac_filter *f)
2430 bool enable = f->state == I40E_FILTER_NEW;
2431 struct i40e_hw *hw = &vsi->back->hw;
2434 if (f->vlan == I40E_VLAN_ANY) {
2435 aq_ret = i40e_aq_set_vsi_broadcast(hw,
2440 aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
2448 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2449 dev_warn(&vsi->back->pdev->dev,
2450 "Error %s, forcing overflow promiscuous on %s\n",
2451 i40e_aq_str(hw, hw->aq.asq_last_status),
2459 * i40e_set_promiscuous - set promiscuous mode
2460 * @pf: board private structure
2461 * @promisc: promisc on or off
2463 * There are different ways of setting promiscuous mode on a PF depending on
2464 * what state/environment we're in. This identifies and sets it appropriately.
2465 * Returns 0 on success.
2467 static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
2469 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
2470 struct i40e_hw *hw = &pf->hw;
2473 if (vsi->type == I40E_VSI_MAIN &&
2474 pf->lan_veb != I40E_NO_VEB &&
2475 !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2476 /* set defport ON for Main VSI instead of true promisc
2477 * this way we will get all unicast/multicast and VLAN
2478 * promisc behavior but will not get VF or VMDq traffic
2479 * replicated on the Main VSI.
2482 aq_ret = i40e_aq_set_default_vsi(hw,
2486 aq_ret = i40e_aq_clear_default_vsi(hw,
2490 dev_info(&pf->pdev->dev,
2491 "Set default VSI failed, err %s, aq_err %s\n",
2492 i40e_stat_str(hw, aq_ret),
2493 i40e_aq_str(hw, hw->aq.asq_last_status));
2496 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2502 dev_info(&pf->pdev->dev,
2503 "set unicast promisc failed, err %s, aq_err %s\n",
2504 i40e_stat_str(hw, aq_ret),
2505 i40e_aq_str(hw, hw->aq.asq_last_status));
2507 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2512 dev_info(&pf->pdev->dev,
2513 "set multicast promisc failed, err %s, aq_err %s\n",
2514 i40e_stat_str(hw, aq_ret),
2515 i40e_aq_str(hw, hw->aq.asq_last_status));
2520 pf->cur_promisc = promisc;
2526 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
2527 * @vsi: ptr to the VSI
2529 * Push any outstanding VSI filter changes through the AdminQ.
2531 * Returns 0 or error value
2533 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2535 struct hlist_head tmp_add_list, tmp_del_list;
2536 struct i40e_mac_filter *f;
2537 struct i40e_new_mac_filter *new, *add_head = NULL;
2538 struct i40e_hw *hw = &vsi->back->hw;
2539 bool old_overflow, new_overflow;
2540 unsigned int failed_filters = 0;
2541 unsigned int vlan_filters = 0;
2542 char vsi_name[16] = "PF";
2543 int filter_list_len = 0;
2544 i40e_status aq_ret = 0;
2545 u32 changed_flags = 0;
2546 struct hlist_node *h;
2555 /* empty array typed pointers, kcalloc later */
2556 struct i40e_aqc_add_macvlan_element_data *add_list;
2557 struct i40e_aqc_remove_macvlan_element_data *del_list;
2559 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
2560 usleep_range(1000, 2000);
2563 old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2566 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
2567 vsi->current_netdev_flags = vsi->netdev->flags;
2570 INIT_HLIST_HEAD(&tmp_add_list);
2571 INIT_HLIST_HEAD(&tmp_del_list);
2573 if (vsi->type == I40E_VSI_SRIOV)
2574 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
2575 else if (vsi->type != I40E_VSI_MAIN)
2576 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
2578 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
2579 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
2581 spin_lock_bh(&vsi->mac_filter_hash_lock);
2582 /* Create a list of filters to delete. */
2583 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2584 if (f->state == I40E_FILTER_REMOVE) {
2585 /* Move the element into temporary del_list */
2586 hash_del(&f->hlist);
2587 hlist_add_head(&f->hlist, &tmp_del_list);
2589 /* Avoid counting removed filters */
2592 if (f->state == I40E_FILTER_NEW) {
2593 /* Create a temporary i40e_new_mac_filter */
2594 new = kzalloc(sizeof(*new), GFP_ATOMIC);
2596 goto err_no_memory_locked;
2598 /* Store pointer to the real filter */
2600 new->state = f->state;
2602 /* Add it to the hash list */
2603 hlist_add_head(&new->hlist, &tmp_add_list);
2606 /* Count the number of active (current and new) VLAN
2607 * filters we have now. Does not count filters which
2608 * are marked for deletion.
2614 if (vsi->type != I40E_VSI_SRIOV)
2615 retval = i40e_correct_mac_vlan_filters
2616 (vsi, &tmp_add_list, &tmp_del_list,
2619 retval = i40e_correct_vf_mac_vlan_filters
2620 (vsi, &tmp_add_list, &tmp_del_list,
2621 vlan_filters, pf->vf[vsi->vf_id].trusted);
2623 hlist_for_each_entry(new, &tmp_add_list, hlist)
2624 netdev_hw_addr_refcnt(new->f, vsi->netdev, 1);
2627 goto err_no_memory_locked;
2629 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2632 /* Now process 'del_list' outside the lock */
2633 if (!hlist_empty(&tmp_del_list)) {
2634 filter_list_len = hw->aq.asq_buf_size /
2635 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2636 list_size = filter_list_len *
2637 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2638 del_list = kzalloc(list_size, GFP_ATOMIC);
2642 hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
2645 /* handle broadcast filters by updating the broadcast
2646 * promiscuous flag and release filter list.
2648 if (is_broadcast_ether_addr(f->macaddr)) {
2649 i40e_aqc_broadcast_filter(vsi, vsi_name, f);
2651 hlist_del(&f->hlist);
2656 /* add to delete list */
2657 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
2658 if (f->vlan == I40E_VLAN_ANY) {
2659 del_list[num_del].vlan_tag = 0;
2660 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2662 del_list[num_del].vlan_tag =
2663 cpu_to_le16((u16)(f->vlan));
2666 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2667 del_list[num_del].flags = cmd_flags;
2670 /* flush a full buffer */
2671 if (num_del == filter_list_len) {
2672 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2674 memset(del_list, 0, list_size);
2677 /* Release memory for MAC filter entries which were
2678 * synced up with HW.
2680 hlist_del(&f->hlist);
2685 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2693 if (!hlist_empty(&tmp_add_list)) {
2694 /* Do all the adds now. */
2695 filter_list_len = hw->aq.asq_buf_size /
2696 sizeof(struct i40e_aqc_add_macvlan_element_data);
2697 list_size = filter_list_len *
2698 sizeof(struct i40e_aqc_add_macvlan_element_data);
2699 add_list = kzalloc(list_size, GFP_ATOMIC);
2704 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2705 /* handle broadcast filters by updating the broadcast
2706 * promiscuous flag instead of adding a MAC filter.
2708 if (is_broadcast_ether_addr(new->f->macaddr)) {
2709 if (i40e_aqc_broadcast_filter(vsi, vsi_name,
2711 new->state = I40E_FILTER_FAILED;
2713 new->state = I40E_FILTER_ACTIVE;
2717 /* add to add array */
2721 ether_addr_copy(add_list[num_add].mac_addr,
2723 if (new->f->vlan == I40E_VLAN_ANY) {
2724 add_list[num_add].vlan_tag = 0;
2725 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2727 add_list[num_add].vlan_tag =
2728 cpu_to_le16((u16)(new->f->vlan));
2730 add_list[num_add].queue_number = 0;
2731 /* set invalid match method for later detection */
2732 add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
2733 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2734 add_list[num_add].flags = cpu_to_le16(cmd_flags);
2737 /* flush a full buffer */
2738 if (num_add == filter_list_len) {
2739 i40e_aqc_add_filters(vsi, vsi_name, add_list,
2741 memset(add_list, 0, list_size);
2746 i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
2749 /* Now move all of the filters from the temp add list back to
2752 spin_lock_bh(&vsi->mac_filter_hash_lock);
2753 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2754 /* Only update the state if we're still NEW */
2755 if (new->f->state == I40E_FILTER_NEW)
2756 new->f->state = new->state;
2757 hlist_del(&new->hlist);
2758 netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
2761 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2766 /* Determine the number of active and failed filters. */
2767 spin_lock_bh(&vsi->mac_filter_hash_lock);
2768 vsi->active_filters = 0;
2769 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
2770 if (f->state == I40E_FILTER_ACTIVE)
2771 vsi->active_filters++;
2772 else if (f->state == I40E_FILTER_FAILED)
2775 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2777 /* Check if we are able to exit overflow promiscuous mode. We can
2778 * safely exit if we didn't just enter, we no longer have any failed
2779 * filters, and we have reduced filters below the threshold value.
2781 if (old_overflow && !failed_filters &&
2782 vsi->active_filters < vsi->promisc_threshold) {
2783 dev_info(&pf->pdev->dev,
2784 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2786 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2787 vsi->promisc_threshold = 0;
2790 /* if the VF is not trusted do not do promisc */
2791 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
2792 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2796 new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2798 /* If we are entering overflow promiscuous, we need to calculate a new
2799 * threshold for when we are safe to exit
2801 if (!old_overflow && new_overflow)
2802 vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
2804 /* check for changes in promiscuous modes */
2805 if (changed_flags & IFF_ALLMULTI) {
2806 bool cur_multipromisc;
2808 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2809 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2814 retval = i40e_aq_rc_to_posix(aq_ret,
2815 hw->aq.asq_last_status);
2816 dev_info(&pf->pdev->dev,
2817 "set multi promisc failed on %s, err %s aq_err %s\n",
2819 i40e_stat_str(hw, aq_ret),
2820 i40e_aq_str(hw, hw->aq.asq_last_status));
2822 dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
2823 cur_multipromisc ? "entering" : "leaving");
2827 if ((changed_flags & IFF_PROMISC) || old_overflow != new_overflow) {
2830 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2832 aq_ret = i40e_set_promiscuous(pf, cur_promisc);
2834 retval = i40e_aq_rc_to_posix(aq_ret,
2835 hw->aq.asq_last_status);
2836 dev_info(&pf->pdev->dev,
2837 "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
2838 cur_promisc ? "on" : "off",
2840 i40e_stat_str(hw, aq_ret),
2841 i40e_aq_str(hw, hw->aq.asq_last_status));
2845 /* if something went wrong then set the changed flag so we try again */
2847 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2849 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2853 /* Restore elements on the temporary add and delete lists */
2854 spin_lock_bh(&vsi->mac_filter_hash_lock);
2855 err_no_memory_locked:
2856 i40e_undo_del_filter_entries(vsi, &tmp_del_list);
2857 i40e_undo_add_filter_entries(vsi, &tmp_add_list);
2858 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2860 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2861 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2866 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2867 * @pf: board private structure
2869 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2875 if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
2877 if (test_bit(__I40E_VF_DISABLE, pf->state)) {
2878 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
2882 for (v = 0; v < pf->num_alloc_vsi; v++) {
2884 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED) &&
2885 !test_bit(__I40E_VSI_RELEASING, pf->vsi[v]->state)) {
2886 int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2889 /* come back and try again later */
2890 set_bit(__I40E_MACVLAN_SYNC_PENDING,
2899 * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2902 static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
2904 if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
2905 return I40E_RXBUFFER_2048;
2907 return I40E_RXBUFFER_3072;
2911 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2912 * @netdev: network interface device structure
2913 * @new_mtu: new value for maximum frame size
2915 * Returns 0 on success, negative on failure
2917 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2919 struct i40e_netdev_priv *np = netdev_priv(netdev);
2920 struct i40e_vsi *vsi = np->vsi;
2921 struct i40e_pf *pf = vsi->back;
2923 if (i40e_enabled_xdp_vsi(vsi)) {
2924 int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2926 if (frame_size > i40e_max_xdp_frame_size(vsi))
2930 netdev_dbg(netdev, "changing MTU from %d to %d\n",
2931 netdev->mtu, new_mtu);
2932 netdev->mtu = new_mtu;
2933 if (netif_running(netdev))
2934 i40e_vsi_reinit_locked(vsi);
2935 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
2936 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
2941 * i40e_ioctl - Access the hwtstamp interface
2942 * @netdev: network interface device structure
2943 * @ifr: interface request data
2944 * @cmd: ioctl command
2946 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2948 struct i40e_netdev_priv *np = netdev_priv(netdev);
2949 struct i40e_pf *pf = np->vsi->back;
2953 return i40e_ptp_get_ts_config(pf, ifr);
2955 return i40e_ptp_set_ts_config(pf, ifr);
2962 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2963 * @vsi: the vsi being adjusted
2965 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2967 struct i40e_vsi_context ctxt;
2970 /* Don't modify stripping options if a port VLAN is active */
2974 if ((vsi->info.valid_sections &
2975 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2976 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2977 return; /* already enabled */
2979 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2980 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2981 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2983 ctxt.seid = vsi->seid;
2984 ctxt.info = vsi->info;
2985 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2987 dev_info(&vsi->back->pdev->dev,
2988 "update vlan stripping failed, err %s aq_err %s\n",
2989 i40e_stat_str(&vsi->back->hw, ret),
2990 i40e_aq_str(&vsi->back->hw,
2991 vsi->back->hw.aq.asq_last_status));
2996 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2997 * @vsi: the vsi being adjusted
2999 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
3001 struct i40e_vsi_context ctxt;
3004 /* Don't modify stripping options if a port VLAN is active */
3008 if ((vsi->info.valid_sections &
3009 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
3010 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
3011 I40E_AQ_VSI_PVLAN_EMOD_MASK))
3012 return; /* already disabled */
3014 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
3015 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
3016 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
3018 ctxt.seid = vsi->seid;
3019 ctxt.info = vsi->info;
3020 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
3022 dev_info(&vsi->back->pdev->dev,
3023 "update vlan stripping failed, err %s aq_err %s\n",
3024 i40e_stat_str(&vsi->back->hw, ret),
3025 i40e_aq_str(&vsi->back->hw,
3026 vsi->back->hw.aq.asq_last_status));
3031 * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
3032 * @vsi: the vsi being configured
3033 * @vid: vlan id to be added (0 = untagged only , -1 = any)
3035 * This is a helper function for adding a new MAC/VLAN filter with the
3036 * specified VLAN for each existing MAC address already in the hash table.
3037 * This function does *not* perform any accounting to update filters based on
3040 * NOTE: this function expects to be called while under the
3041 * mac_filter_hash_lock
3043 int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
3045 struct i40e_mac_filter *f, *add_f;
3046 struct hlist_node *h;
3049 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
3050 /* If we're asked to add a filter that has been marked for
3051 * removal, it is safe to simply restore it to active state.
3052 * __i40e_del_filter will have simply deleted any filters which
3053 * were previously marked NEW or FAILED, so if it is currently
3054 * marked REMOVE it must have previously been ACTIVE. Since we
3055 * haven't yet run the sync filters task, just restore this
3056 * filter to the ACTIVE state so that the sync task leaves it
3059 if (f->state == I40E_FILTER_REMOVE && f->vlan == vid) {
3060 f->state = I40E_FILTER_ACTIVE;
3062 } else if (f->state == I40E_FILTER_REMOVE) {
3065 add_f = i40e_add_filter(vsi, f->macaddr, vid);
3067 dev_info(&vsi->back->pdev->dev,
3068 "Could not add vlan filter %d for %pM\n",
3078 * i40e_vsi_add_vlan - Add VSI membership for given VLAN
3079 * @vsi: the VSI being configured
3080 * @vid: VLAN id to be added
3082 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
3089 /* The network stack will attempt to add VID=0, with the intention to
3090 * receive priority tagged packets with a VLAN of 0. Our HW receives
3091 * these packets by default when configured to receive untagged
3092 * packets, so we don't need to add a filter for this case.
3093 * Additionally, HW interprets adding a VID=0 filter as meaning to
3094 * receive *only* tagged traffic and stops receiving untagged traffic.
3095 * Thus, we do not want to actually add a filter for VID=0
3100 /* Locked once because all functions invoked below iterates list*/
3101 spin_lock_bh(&vsi->mac_filter_hash_lock);
3102 err = i40e_add_vlan_all_mac(vsi, vid);
3103 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3107 /* schedule our worker thread which will take care of
3108 * applying the new filter changes
3110 i40e_service_event_schedule(vsi->back);
3115 * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
3116 * @vsi: the vsi being configured
3117 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
3119 * This function should be used to remove all VLAN filters which match the
3120 * given VID. It does not schedule the service event and does not take the
3121 * mac_filter_hash_lock so it may be combined with other operations under
3122 * a single invocation of the mac_filter_hash_lock.
3124 * NOTE: this function expects to be called while under the
3125 * mac_filter_hash_lock
3127 void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
3129 struct i40e_mac_filter *f;
3130 struct hlist_node *h;
3133 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
3135 __i40e_del_filter(vsi, f);
3140 * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
3141 * @vsi: the VSI being configured
3142 * @vid: VLAN id to be removed
3144 void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
3146 if (!vid || vsi->info.pvid)
3149 spin_lock_bh(&vsi->mac_filter_hash_lock);
3150 i40e_rm_vlan_all_mac(vsi, vid);
3151 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3153 /* schedule our worker thread which will take care of
3154 * applying the new filter changes
3156 i40e_service_event_schedule(vsi->back);
3160 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
3161 * @netdev: network interface to be adjusted
3162 * @proto: unused protocol value
3163 * @vid: vlan id to be added
3165 * net_device_ops implementation for adding vlan ids
3167 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
3168 __always_unused __be16 proto, u16 vid)
3170 struct i40e_netdev_priv *np = netdev_priv(netdev);
3171 struct i40e_vsi *vsi = np->vsi;
3174 if (vid >= VLAN_N_VID)
3177 ret = i40e_vsi_add_vlan(vsi, vid);
3179 set_bit(vid, vsi->active_vlans);
3185 * i40e_vlan_rx_add_vid_up - Add a vlan id filter to HW offload in UP path
3186 * @netdev: network interface to be adjusted
3187 * @proto: unused protocol value
3188 * @vid: vlan id to be added
3190 static void i40e_vlan_rx_add_vid_up(struct net_device *netdev,
3191 __always_unused __be16 proto, u16 vid)
3193 struct i40e_netdev_priv *np = netdev_priv(netdev);
3194 struct i40e_vsi *vsi = np->vsi;
3196 if (vid >= VLAN_N_VID)
3198 set_bit(vid, vsi->active_vlans);
3202 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
3203 * @netdev: network interface to be adjusted
3204 * @proto: unused protocol value
3205 * @vid: vlan id to be removed
3207 * net_device_ops implementation for removing vlan ids
3209 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
3210 __always_unused __be16 proto, u16 vid)
3212 struct i40e_netdev_priv *np = netdev_priv(netdev);
3213 struct i40e_vsi *vsi = np->vsi;
3215 /* return code is ignored as there is nothing a user
3216 * can do about failure to remove and a log message was
3217 * already printed from the other function
3219 i40e_vsi_kill_vlan(vsi, vid);
3221 clear_bit(vid, vsi->active_vlans);
3227 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
3228 * @vsi: the vsi being brought back up
3230 static void i40e_restore_vlan(struct i40e_vsi *vsi)
3237 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
3238 i40e_vlan_stripping_enable(vsi);
3240 i40e_vlan_stripping_disable(vsi);
3242 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
3243 i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q),
3248 * i40e_vsi_add_pvid - Add pvid for the VSI
3249 * @vsi: the vsi being adjusted
3250 * @vid: the vlan id to set as a PVID
3252 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
3254 struct i40e_vsi_context ctxt;
3257 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
3258 vsi->info.pvid = cpu_to_le16(vid);
3259 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
3260 I40E_AQ_VSI_PVLAN_INSERT_PVID |
3261 I40E_AQ_VSI_PVLAN_EMOD_STR;
3263 ctxt.seid = vsi->seid;
3264 ctxt.info = vsi->info;
3265 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
3267 dev_info(&vsi->back->pdev->dev,
3268 "add pvid failed, err %s aq_err %s\n",
3269 i40e_stat_str(&vsi->back->hw, ret),
3270 i40e_aq_str(&vsi->back->hw,
3271 vsi->back->hw.aq.asq_last_status));
3279 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
3280 * @vsi: the vsi being adjusted
3282 * Just use the vlan_rx_register() service to put it back to normal
3284 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
3288 i40e_vlan_stripping_disable(vsi);
3292 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
3293 * @vsi: ptr to the VSI
3295 * If this function returns with an error, then it's possible one or
3296 * more of the rings is populated (while the rest are not). It is the
3297 * callers duty to clean those orphaned rings.
3299 * Return 0 on success, negative on failure
3301 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
3305 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3306 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
3308 if (!i40e_enabled_xdp_vsi(vsi))
3311 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3312 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
3318 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
3319 * @vsi: ptr to the VSI
3321 * Free VSI's transmit software resources
3323 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
3327 if (vsi->tx_rings) {
3328 for (i = 0; i < vsi->num_queue_pairs; i++)
3329 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
3330 i40e_free_tx_resources(vsi->tx_rings[i]);
3333 if (vsi->xdp_rings) {
3334 for (i = 0; i < vsi->num_queue_pairs; i++)
3335 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
3336 i40e_free_tx_resources(vsi->xdp_rings[i]);
3341 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
3342 * @vsi: ptr to the VSI
3344 * If this function returns with an error, then it's possible one or
3345 * more of the rings is populated (while the rest are not). It is the
3346 * callers duty to clean those orphaned rings.
3348 * Return 0 on success, negative on failure
3350 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
3354 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3355 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
3360 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
3361 * @vsi: ptr to the VSI
3363 * Free all receive software resources
3365 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
3372 for (i = 0; i < vsi->num_queue_pairs; i++)
3373 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
3374 i40e_free_rx_resources(vsi->rx_rings[i]);
3378 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
3379 * @ring: The Tx ring to configure
3381 * This enables/disables XPS for a given Tx descriptor ring
3382 * based on the TCs enabled for the VSI that ring belongs to.
3384 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
3388 if (!ring->q_vector || !ring->netdev || ring->ch)
3391 /* We only initialize XPS once, so as not to overwrite user settings */
3392 if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
3395 cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
3396 netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
3401 * i40e_xsk_pool - Retrieve the AF_XDP buffer pool if XDP and ZC is enabled
3402 * @ring: The Tx or Rx ring
3404 * Returns the AF_XDP buffer pool or NULL.
3406 static struct xsk_buff_pool *i40e_xsk_pool(struct i40e_ring *ring)
3408 bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
3409 int qid = ring->queue_index;
3411 if (ring_is_xdp(ring))
3412 qid -= ring->vsi->alloc_queue_pairs;
3414 if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
3417 return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
3421 * i40e_configure_tx_ring - Configure a transmit ring context and rest
3422 * @ring: The Tx ring to configure
3424 * Configure the Tx descriptor ring in the HMC context.
3426 static int i40e_configure_tx_ring(struct i40e_ring *ring)
3428 struct i40e_vsi *vsi = ring->vsi;
3429 u16 pf_q = vsi->base_queue + ring->queue_index;
3430 struct i40e_hw *hw = &vsi->back->hw;
3431 struct i40e_hmc_obj_txq tx_ctx;
3432 i40e_status err = 0;
3435 if (ring_is_xdp(ring))
3436 ring->xsk_pool = i40e_xsk_pool(ring);
3438 /* some ATR related tx ring init */
3439 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
3440 ring->atr_sample_rate = vsi->back->atr_sample_rate;
3441 ring->atr_count = 0;
3443 ring->atr_sample_rate = 0;
3447 i40e_config_xps_tx_ring(ring);
3449 /* clear the context structure first */
3450 memset(&tx_ctx, 0, sizeof(tx_ctx));
3452 tx_ctx.new_context = 1;
3453 tx_ctx.base = (ring->dma / 128);
3454 tx_ctx.qlen = ring->count;
3455 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
3456 I40E_FLAG_FD_ATR_ENABLED));
3457 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
3458 /* FDIR VSI tx ring can still use RS bit and writebacks */
3459 if (vsi->type != I40E_VSI_FDIR)
3460 tx_ctx.head_wb_ena = 1;
3461 tx_ctx.head_wb_addr = ring->dma +
3462 (ring->count * sizeof(struct i40e_tx_desc));
3464 /* As part of VSI creation/update, FW allocates certain
3465 * Tx arbitration queue sets for each TC enabled for
3466 * the VSI. The FW returns the handles to these queue
3467 * sets as part of the response buffer to Add VSI,
3468 * Update VSI, etc. AQ commands. It is expected that
3469 * these queue set handles be associated with the Tx
3470 * queues by the driver as part of the TX queue context
3471 * initialization. This has to be done regardless of
3472 * DCB as by default everything is mapped to TC0.
3477 le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]);
3480 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
3482 tx_ctx.rdylist_act = 0;
3484 /* clear the context in the HMC */
3485 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
3487 dev_info(&vsi->back->pdev->dev,
3488 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
3489 ring->queue_index, pf_q, err);
3493 /* set the context in the HMC */
3494 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
3496 dev_info(&vsi->back->pdev->dev,
3497 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
3498 ring->queue_index, pf_q, err);
3502 /* Now associate this queue with this PCI function */
3504 if (ring->ch->type == I40E_VSI_VMDQ2)
3505 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3509 qtx_ctl |= (ring->ch->vsi_number <<
3510 I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3511 I40E_QTX_CTL_VFVM_INDX_MASK;
3513 if (vsi->type == I40E_VSI_VMDQ2) {
3514 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3515 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3516 I40E_QTX_CTL_VFVM_INDX_MASK;
3518 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
3522 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
3523 I40E_QTX_CTL_PF_INDX_MASK);
3524 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
3527 /* cache tail off for easier writes later */
3528 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
3534 * i40e_rx_offset - Return expected offset into page to access data
3535 * @rx_ring: Ring we are requesting offset of
3537 * Returns the offset value for ring into the data buffer.
3539 static unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
3541 return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
3545 * i40e_configure_rx_ring - Configure a receive ring context
3546 * @ring: The Rx ring to configure
3548 * Configure the Rx descriptor ring in the HMC context.
3550 static int i40e_configure_rx_ring(struct i40e_ring *ring)
3552 struct i40e_vsi *vsi = ring->vsi;
3553 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
3554 u16 pf_q = vsi->base_queue + ring->queue_index;
3555 struct i40e_hw *hw = &vsi->back->hw;
3556 struct i40e_hmc_obj_rxq rx_ctx;
3557 i40e_status err = 0;
3561 bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
3563 /* clear the context structure first */
3564 memset(&rx_ctx, 0, sizeof(rx_ctx));
3566 if (ring->vsi->type == I40E_VSI_MAIN)
3567 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
3569 ring->xsk_pool = i40e_xsk_pool(ring);
3570 if (ring->xsk_pool) {
3572 xsk_pool_get_rx_frame_size(ring->xsk_pool);
3573 /* For AF_XDP ZC, we disallow packets to span on
3574 * multiple buffers, thus letting us skip that
3575 * handling in the fast-path.
3578 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3579 MEM_TYPE_XSK_BUFF_POOL,
3583 dev_info(&vsi->back->pdev->dev,
3584 "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
3588 ring->rx_buf_len = vsi->rx_buf_len;
3589 if (ring->vsi->type == I40E_VSI_MAIN) {
3590 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3591 MEM_TYPE_PAGE_SHARED,
3598 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
3599 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3601 rx_ctx.base = (ring->dma / 128);
3602 rx_ctx.qlen = ring->count;
3604 /* use 16 byte descriptors */
3607 /* descriptor type is always zero
3610 rx_ctx.hsplit_0 = 0;
3612 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
3613 if (hw->revision_id == 0)
3614 rx_ctx.lrxqthresh = 0;
3616 rx_ctx.lrxqthresh = 1;
3617 rx_ctx.crcstrip = 1;
3619 /* this controls whether VLAN is stripped from inner headers */
3621 /* set the prefena field to 1 because the manual says to */
3624 /* clear the context in the HMC */
3625 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
3627 dev_info(&vsi->back->pdev->dev,
3628 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3629 ring->queue_index, pf_q, err);
3633 /* set the context in the HMC */
3634 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
3636 dev_info(&vsi->back->pdev->dev,
3637 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3638 ring->queue_index, pf_q, err);
3642 /* configure Rx buffer alignment */
3643 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
3644 clear_ring_build_skb_enabled(ring);
3646 set_ring_build_skb_enabled(ring);
3648 ring->rx_offset = i40e_rx_offset(ring);
3650 /* cache tail for quicker writes, and clear the reg before use */
3651 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3652 writel(0, ring->tail);
3654 if (ring->xsk_pool) {
3655 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
3656 ok = i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring));
3658 ok = !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3661 /* Log this in case the user has forgotten to give the kernel
3662 * any buffers, even later in the application.
3664 dev_info(&vsi->back->pdev->dev,
3665 "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
3666 ring->xsk_pool ? "AF_XDP ZC enabled " : "",
3667 ring->queue_index, pf_q);
3674 * i40e_vsi_configure_tx - Configure the VSI for Tx
3675 * @vsi: VSI structure describing this set of rings and resources
3677 * Configure the Tx VSI for operation.
3679 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
3684 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3685 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
3687 if (err || !i40e_enabled_xdp_vsi(vsi))
3690 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3691 err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
3697 * i40e_calculate_vsi_rx_buf_len - Calculates buffer length
3699 * @vsi: VSI to calculate rx_buf_len from
3701 static u16 i40e_calculate_vsi_rx_buf_len(struct i40e_vsi *vsi)
3703 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
3704 return I40E_RXBUFFER_2048;
3706 #if (PAGE_SIZE < 8192)
3707 if (!I40E_2K_TOO_SMALL_WITH_PADDING && vsi->netdev->mtu <= ETH_DATA_LEN)
3708 return I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3711 return PAGE_SIZE < 8192 ? I40E_RXBUFFER_3072 : I40E_RXBUFFER_2048;
3715 * i40e_vsi_configure_rx - Configure the VSI for Rx
3716 * @vsi: the VSI being configured
3718 * Configure the Rx VSI for operation.
3720 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3725 vsi->max_frame = I40E_MAX_RXBUFFER;
3726 vsi->rx_buf_len = i40e_calculate_vsi_rx_buf_len(vsi);
3728 #if (PAGE_SIZE < 8192)
3729 if (vsi->netdev && !I40E_2K_TOO_SMALL_WITH_PADDING &&
3730 vsi->netdev->mtu <= ETH_DATA_LEN)
3731 vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3734 /* set up individual rings */
3735 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3736 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3742 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3743 * @vsi: ptr to the VSI
3745 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3747 struct i40e_ring *tx_ring, *rx_ring;
3748 u16 qoffset, qcount;
3751 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3752 /* Reset the TC information */
3753 for (i = 0; i < vsi->num_queue_pairs; i++) {
3754 rx_ring = vsi->rx_rings[i];
3755 tx_ring = vsi->tx_rings[i];
3756 rx_ring->dcb_tc = 0;
3757 tx_ring->dcb_tc = 0;
3762 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3763 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3766 qoffset = vsi->tc_config.tc_info[n].qoffset;
3767 qcount = vsi->tc_config.tc_info[n].qcount;
3768 for (i = qoffset; i < (qoffset + qcount); i++) {
3769 rx_ring = vsi->rx_rings[i];
3770 tx_ring = vsi->tx_rings[i];
3771 rx_ring->dcb_tc = n;
3772 tx_ring->dcb_tc = n;
3778 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3779 * @vsi: ptr to the VSI
3781 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3784 i40e_set_rx_mode(vsi->netdev);
3788 * i40e_reset_fdir_filter_cnt - Reset flow director filter counters
3789 * @pf: Pointer to the targeted PF
3791 * Set all flow director counters to 0.
3793 static void i40e_reset_fdir_filter_cnt(struct i40e_pf *pf)
3795 pf->fd_tcp4_filter_cnt = 0;
3796 pf->fd_udp4_filter_cnt = 0;
3797 pf->fd_sctp4_filter_cnt = 0;
3798 pf->fd_ip4_filter_cnt = 0;
3799 pf->fd_tcp6_filter_cnt = 0;
3800 pf->fd_udp6_filter_cnt = 0;
3801 pf->fd_sctp6_filter_cnt = 0;
3802 pf->fd_ip6_filter_cnt = 0;
3806 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3807 * @vsi: Pointer to the targeted VSI
3809 * This function replays the hlist on the hw where all the SB Flow Director
3810 * filters were saved.
3812 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3814 struct i40e_fdir_filter *filter;
3815 struct i40e_pf *pf = vsi->back;
3816 struct hlist_node *node;
3818 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3821 /* Reset FDir counters as we're replaying all existing filters */
3822 i40e_reset_fdir_filter_cnt(pf);
3824 hlist_for_each_entry_safe(filter, node,
3825 &pf->fdir_filter_list, fdir_node) {
3826 i40e_add_del_fdir(vsi, filter, true);
3831 * i40e_vsi_configure - Set up the VSI for action
3832 * @vsi: the VSI being configured
3834 static int i40e_vsi_configure(struct i40e_vsi *vsi)
3838 i40e_set_vsi_rx_mode(vsi);
3839 i40e_restore_vlan(vsi);
3840 i40e_vsi_config_dcb_rings(vsi);
3841 err = i40e_vsi_configure_tx(vsi);
3843 err = i40e_vsi_configure_rx(vsi);
3849 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3850 * @vsi: the VSI being configured
3852 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3854 bool has_xdp = i40e_enabled_xdp_vsi(vsi);
3855 struct i40e_pf *pf = vsi->back;
3856 struct i40e_hw *hw = &pf->hw;
3861 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
3862 * and PFINT_LNKLSTn registers, e.g.:
3863 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
3865 qp = vsi->base_queue;
3866 vector = vsi->base_vector;
3867 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3868 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3870 q_vector->rx.next_update = jiffies + 1;
3871 q_vector->rx.target_itr =
3872 ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
3873 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3874 q_vector->rx.target_itr >> 1);
3875 q_vector->rx.current_itr = q_vector->rx.target_itr;
3877 q_vector->tx.next_update = jiffies + 1;
3878 q_vector->tx.target_itr =
3879 ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
3880 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3881 q_vector->tx.target_itr >> 1);
3882 q_vector->tx.current_itr = q_vector->tx.target_itr;
3884 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3885 i40e_intrl_usec_to_reg(vsi->int_rate_limit));
3887 /* begin of linked list for RX queue assigned to this vector */
3888 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3889 for (q = 0; q < q_vector->num_ringpairs; q++) {
3890 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
3893 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3894 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3895 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3896 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
3897 (I40E_QUEUE_TYPE_TX <<
3898 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3900 wr32(hw, I40E_QINT_RQCTL(qp), val);
3903 /* TX queue with next queue set to TX */
3904 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3905 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3906 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3907 (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3908 (I40E_QUEUE_TYPE_TX <<
3909 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3911 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3913 /* TX queue with next RX or end of linked list */
3914 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3915 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3916 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3917 ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3918 (I40E_QUEUE_TYPE_RX <<
3919 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3921 /* Terminate the linked list */
3922 if (q == (q_vector->num_ringpairs - 1))
3923 val |= (I40E_QUEUE_END_OF_LIST <<
3924 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3926 wr32(hw, I40E_QINT_TQCTL(qp), val);
3935 * i40e_enable_misc_int_causes - enable the non-queue interrupts
3936 * @pf: pointer to private device data structure
3938 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3940 struct i40e_hw *hw = &pf->hw;
3943 /* clear things first */
3944 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
3945 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
3947 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3948 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3949 I40E_PFINT_ICR0_ENA_GRST_MASK |
3950 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3951 I40E_PFINT_ICR0_ENA_GPIO_MASK |
3952 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3953 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3954 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3956 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3957 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3959 if (pf->flags & I40E_FLAG_PTP)
3960 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3962 wr32(hw, I40E_PFINT_ICR0_ENA, val);
3964 /* SW_ITR_IDX = 0, but don't change INTENA */
3965 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3966 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3968 /* OTHER_ITR_IDX = 0 */
3969 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3973 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3974 * @vsi: the VSI being configured
3976 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3978 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
3979 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3980 struct i40e_pf *pf = vsi->back;
3981 struct i40e_hw *hw = &pf->hw;
3983 /* set the ITR configuration */
3984 q_vector->rx.next_update = jiffies + 1;
3985 q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
3986 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1);
3987 q_vector->rx.current_itr = q_vector->rx.target_itr;
3988 q_vector->tx.next_update = jiffies + 1;
3989 q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
3990 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1);
3991 q_vector->tx.current_itr = q_vector->tx.target_itr;
3993 i40e_enable_misc_int_causes(pf);
3995 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3996 wr32(hw, I40E_PFINT_LNKLST0, 0);
3998 /* Associate the queue pair to the vector and enable the queue
3999 * interrupt RX queue in linked list with next queue set to TX
4001 wr32(hw, I40E_QINT_RQCTL(0), I40E_QINT_RQCTL_VAL(nextqp, 0, TX));
4003 if (i40e_enabled_xdp_vsi(vsi)) {
4004 /* TX queue in linked list with next queue set to TX */
4005 wr32(hw, I40E_QINT_TQCTL(nextqp),
4006 I40E_QINT_TQCTL_VAL(nextqp, 0, TX));
4009 /* last TX queue so the next RX queue doesn't matter */
4010 wr32(hw, I40E_QINT_TQCTL(0),
4011 I40E_QINT_TQCTL_VAL(I40E_QUEUE_END_OF_LIST, 0, RX));
4016 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
4017 * @pf: board private structure
4019 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
4021 struct i40e_hw *hw = &pf->hw;
4023 wr32(hw, I40E_PFINT_DYN_CTL0,
4024 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
4029 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
4030 * @pf: board private structure
4032 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
4034 struct i40e_hw *hw = &pf->hw;
4037 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
4038 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4039 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
4041 wr32(hw, I40E_PFINT_DYN_CTL0, val);
4046 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
4047 * @irq: interrupt number
4048 * @data: pointer to a q_vector
4050 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
4052 struct i40e_q_vector *q_vector = data;
4054 if (!q_vector->tx.ring && !q_vector->rx.ring)
4057 napi_schedule_irqoff(&q_vector->napi);
4063 * i40e_irq_affinity_notify - Callback for affinity changes
4064 * @notify: context as to what irq was changed
4065 * @mask: the new affinity mask
4067 * This is a callback function used by the irq_set_affinity_notifier function
4068 * so that we may register to receive changes to the irq affinity masks.
4070 static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
4071 const cpumask_t *mask)
4073 struct i40e_q_vector *q_vector =
4074 container_of(notify, struct i40e_q_vector, affinity_notify);
4076 cpumask_copy(&q_vector->affinity_mask, mask);
4080 * i40e_irq_affinity_release - Callback for affinity notifier release
4081 * @ref: internal core kernel usage
4083 * This is a callback function used by the irq_set_affinity_notifier function
4084 * to inform the current notification subscriber that they will no longer
4085 * receive notifications.
4087 static void i40e_irq_affinity_release(struct kref *ref) {}
4090 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
4091 * @vsi: the VSI being configured
4092 * @basename: name for the vector
4094 * Allocates MSI-X vectors and requests interrupts from the kernel.
4096 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
4098 int q_vectors = vsi->num_q_vectors;
4099 struct i40e_pf *pf = vsi->back;
4100 int base = vsi->base_vector;
4107 for (vector = 0; vector < q_vectors; vector++) {
4108 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
4110 irq_num = pf->msix_entries[base + vector].vector;
4112 if (q_vector->tx.ring && q_vector->rx.ring) {
4113 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
4114 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
4116 } else if (q_vector->rx.ring) {
4117 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
4118 "%s-%s-%d", basename, "rx", rx_int_idx++);
4119 } else if (q_vector->tx.ring) {
4120 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
4121 "%s-%s-%d", basename, "tx", tx_int_idx++);
4123 /* skip this unused q_vector */
4126 err = request_irq(irq_num,
4132 dev_info(&pf->pdev->dev,
4133 "MSIX request_irq failed, error: %d\n", err);
4134 goto free_queue_irqs;
4137 /* register for affinity change notifications */
4138 q_vector->irq_num = irq_num;
4139 q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
4140 q_vector->affinity_notify.release = i40e_irq_affinity_release;
4141 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
4142 /* Spread affinity hints out across online CPUs.
4144 * get_cpu_mask returns a static constant mask with
4145 * a permanent lifetime so it's ok to pass to
4146 * irq_update_affinity_hint without making a copy.
4148 cpu = cpumask_local_spread(q_vector->v_idx, -1);
4149 irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
4152 vsi->irqs_ready = true;
4158 irq_num = pf->msix_entries[base + vector].vector;
4159 irq_set_affinity_notifier(irq_num, NULL);
4160 irq_update_affinity_hint(irq_num, NULL);
4161 free_irq(irq_num, &vsi->q_vectors[vector]);
4167 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
4168 * @vsi: the VSI being un-configured
4170 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
4172 struct i40e_pf *pf = vsi->back;
4173 struct i40e_hw *hw = &pf->hw;
4174 int base = vsi->base_vector;
4177 /* disable interrupt causation from each queue */
4178 for (i = 0; i < vsi->num_queue_pairs; i++) {
4181 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
4182 val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
4183 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
4185 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
4186 val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
4187 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
4189 if (!i40e_enabled_xdp_vsi(vsi))
4191 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
4194 /* disable each interrupt */
4195 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4196 for (i = vsi->base_vector;
4197 i < (vsi->num_q_vectors + vsi->base_vector); i++)
4198 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
4201 for (i = 0; i < vsi->num_q_vectors; i++)
4202 synchronize_irq(pf->msix_entries[i + base].vector);
4204 /* Legacy and MSI mode - this stops all interrupt handling */
4205 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
4206 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
4208 synchronize_irq(pf->pdev->irq);
4213 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
4214 * @vsi: the VSI being configured
4216 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
4218 struct i40e_pf *pf = vsi->back;
4221 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4222 for (i = 0; i < vsi->num_q_vectors; i++)
4223 i40e_irq_dynamic_enable(vsi, i);
4225 i40e_irq_dynamic_enable_icr0(pf);
4228 i40e_flush(&pf->hw);
4233 * i40e_free_misc_vector - Free the vector that handles non-queue events
4234 * @pf: board private structure
4236 static void i40e_free_misc_vector(struct i40e_pf *pf)
4239 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
4240 i40e_flush(&pf->hw);
4242 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
4243 free_irq(pf->msix_entries[0].vector, pf);
4244 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
4249 * i40e_intr - MSI/Legacy and non-queue interrupt handler
4250 * @irq: interrupt number
4251 * @data: pointer to a q_vector
4253 * This is the handler used for all MSI/Legacy interrupts, and deals
4254 * with both queue and non-queue interrupts. This is also used in
4255 * MSIX mode to handle the non-queue interrupts.
4257 static irqreturn_t i40e_intr(int irq, void *data)
4259 struct i40e_pf *pf = (struct i40e_pf *)data;
4260 struct i40e_hw *hw = &pf->hw;
4261 irqreturn_t ret = IRQ_NONE;
4262 u32 icr0, icr0_remaining;
4265 icr0 = rd32(hw, I40E_PFINT_ICR0);
4266 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
4268 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
4269 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
4272 /* if interrupt but no bits showing, must be SWINT */
4273 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
4274 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
4277 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
4278 (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
4279 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
4280 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
4281 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
4284 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
4285 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
4286 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
4287 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
4289 /* We do not have a way to disarm Queue causes while leaving
4290 * interrupt enabled for all other causes, ideally
4291 * interrupt should be disabled while we are in NAPI but
4292 * this is not a performance path and napi_schedule()
4293 * can deal with rescheduling.
4295 if (!test_bit(__I40E_DOWN, pf->state))
4296 napi_schedule_irqoff(&q_vector->napi);
4299 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
4300 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4301 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
4302 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
4305 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
4306 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
4307 set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
4310 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
4311 /* disable any further VFLR event notifications */
4312 if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) {
4313 u32 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4315 reg &= ~I40E_PFINT_ICR0_VFLR_MASK;
4316 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4318 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
4319 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
4323 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
4324 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
4325 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
4326 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
4327 val = rd32(hw, I40E_GLGEN_RSTAT);
4328 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
4329 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
4330 if (val == I40E_RESET_CORER) {
4332 } else if (val == I40E_RESET_GLOBR) {
4334 } else if (val == I40E_RESET_EMPR) {
4336 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
4340 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
4341 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
4342 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
4343 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
4344 rd32(hw, I40E_PFHMC_ERRORINFO),
4345 rd32(hw, I40E_PFHMC_ERRORDATA));
4348 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
4349 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
4351 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_EVENT0_MASK)
4352 schedule_work(&pf->ptp_extts0_work);
4354 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK)
4355 i40e_ptp_tx_hwtstamp(pf);
4357 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
4360 /* If a critical error is pending we have no choice but to reset the
4362 * Report and mask out any remaining unexpected interrupts.
4364 icr0_remaining = icr0 & ena_mask;
4365 if (icr0_remaining) {
4366 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
4368 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
4369 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
4370 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
4371 dev_info(&pf->pdev->dev, "device will be reset\n");
4372 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
4373 i40e_service_event_schedule(pf);
4375 ena_mask &= ~icr0_remaining;
4380 /* re-enable interrupt causes */
4381 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
4382 if (!test_bit(__I40E_DOWN, pf->state) ||
4383 test_bit(__I40E_RECOVERY_MODE, pf->state)) {
4384 i40e_service_event_schedule(pf);
4385 i40e_irq_dynamic_enable_icr0(pf);
4392 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
4393 * @tx_ring: tx ring to clean
4394 * @budget: how many cleans we're allowed
4396 * Returns true if there's any budget left (e.g. the clean is finished)
4398 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
4400 struct i40e_vsi *vsi = tx_ring->vsi;
4401 u16 i = tx_ring->next_to_clean;
4402 struct i40e_tx_buffer *tx_buf;
4403 struct i40e_tx_desc *tx_desc;
4405 tx_buf = &tx_ring->tx_bi[i];
4406 tx_desc = I40E_TX_DESC(tx_ring, i);
4407 i -= tx_ring->count;
4410 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
4412 /* if next_to_watch is not set then there is no work pending */
4416 /* prevent any other reads prior to eop_desc */
4419 /* if the descriptor isn't done, no work yet to do */
4420 if (!(eop_desc->cmd_type_offset_bsz &
4421 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
4424 /* clear next_to_watch to prevent false hangs */
4425 tx_buf->next_to_watch = NULL;
4427 tx_desc->buffer_addr = 0;
4428 tx_desc->cmd_type_offset_bsz = 0;
4429 /* move past filter desc */
4434 i -= tx_ring->count;
4435 tx_buf = tx_ring->tx_bi;
4436 tx_desc = I40E_TX_DESC(tx_ring, 0);
4438 /* unmap skb header data */
4439 dma_unmap_single(tx_ring->dev,
4440 dma_unmap_addr(tx_buf, dma),
4441 dma_unmap_len(tx_buf, len),
4443 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
4444 kfree(tx_buf->raw_buf);
4446 tx_buf->raw_buf = NULL;
4447 tx_buf->tx_flags = 0;
4448 tx_buf->next_to_watch = NULL;
4449 dma_unmap_len_set(tx_buf, len, 0);
4450 tx_desc->buffer_addr = 0;
4451 tx_desc->cmd_type_offset_bsz = 0;
4453 /* move us past the eop_desc for start of next FD desc */
4458 i -= tx_ring->count;
4459 tx_buf = tx_ring->tx_bi;
4460 tx_desc = I40E_TX_DESC(tx_ring, 0);
4463 /* update budget accounting */
4465 } while (likely(budget));
4467 i += tx_ring->count;
4468 tx_ring->next_to_clean = i;
4470 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
4471 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
4477 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
4478 * @irq: interrupt number
4479 * @data: pointer to a q_vector
4481 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
4483 struct i40e_q_vector *q_vector = data;
4484 struct i40e_vsi *vsi;
4486 if (!q_vector->tx.ring)
4489 vsi = q_vector->tx.ring->vsi;
4490 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
4496 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
4497 * @vsi: the VSI being configured
4498 * @v_idx: vector index
4499 * @qp_idx: queue pair index
4501 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
4503 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4504 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
4505 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
4507 tx_ring->q_vector = q_vector;
4508 tx_ring->next = q_vector->tx.ring;
4509 q_vector->tx.ring = tx_ring;
4510 q_vector->tx.count++;
4512 /* Place XDP Tx ring in the same q_vector ring list as regular Tx */
4513 if (i40e_enabled_xdp_vsi(vsi)) {
4514 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
4516 xdp_ring->q_vector = q_vector;
4517 xdp_ring->next = q_vector->tx.ring;
4518 q_vector->tx.ring = xdp_ring;
4519 q_vector->tx.count++;
4522 rx_ring->q_vector = q_vector;
4523 rx_ring->next = q_vector->rx.ring;
4524 q_vector->rx.ring = rx_ring;
4525 q_vector->rx.count++;
4529 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
4530 * @vsi: the VSI being configured
4532 * This function maps descriptor rings to the queue-specific vectors
4533 * we were allotted through the MSI-X enabling code. Ideally, we'd have
4534 * one vector per queue pair, but on a constrained vector budget, we
4535 * group the queue pairs as "efficiently" as possible.
4537 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
4539 int qp_remaining = vsi->num_queue_pairs;
4540 int q_vectors = vsi->num_q_vectors;
4545 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
4546 * group them so there are multiple queues per vector.
4547 * It is also important to go through all the vectors available to be
4548 * sure that if we don't use all the vectors, that the remaining vectors
4549 * are cleared. This is especially important when decreasing the
4550 * number of queues in use.
4552 for (; v_start < q_vectors; v_start++) {
4553 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
4555 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
4557 q_vector->num_ringpairs = num_ringpairs;
4558 q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1;
4560 q_vector->rx.count = 0;
4561 q_vector->tx.count = 0;
4562 q_vector->rx.ring = NULL;
4563 q_vector->tx.ring = NULL;
4565 while (num_ringpairs--) {
4566 i40e_map_vector_to_qp(vsi, v_start, qp_idx);
4574 * i40e_vsi_request_irq - Request IRQ from the OS
4575 * @vsi: the VSI being configured
4576 * @basename: name for the vector
4578 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
4580 struct i40e_pf *pf = vsi->back;
4583 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4584 err = i40e_vsi_request_irq_msix(vsi, basename);
4585 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
4586 err = request_irq(pf->pdev->irq, i40e_intr, 0,
4589 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
4593 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
4598 #ifdef CONFIG_NET_POLL_CONTROLLER
4600 * i40e_netpoll - A Polling 'interrupt' handler
4601 * @netdev: network interface device structure
4603 * This is used by netconsole to send skbs without having to re-enable
4604 * interrupts. It's not called while the normal interrupt routine is executing.
4606 static void i40e_netpoll(struct net_device *netdev)
4608 struct i40e_netdev_priv *np = netdev_priv(netdev);
4609 struct i40e_vsi *vsi = np->vsi;
4610 struct i40e_pf *pf = vsi->back;
4613 /* if interface is down do nothing */
4614 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4617 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4618 for (i = 0; i < vsi->num_q_vectors; i++)
4619 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
4621 i40e_intr(pf->pdev->irq, netdev);
4626 #define I40E_QTX_ENA_WAIT_COUNT 50
4629 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
4630 * @pf: the PF being configured
4631 * @pf_q: the PF queue
4632 * @enable: enable or disable state of the queue
4634 * This routine will wait for the given Tx queue of the PF to reach the
4635 * enabled or disabled state.
4636 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4637 * multiple retries; else will return 0 in case of success.
4639 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4644 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4645 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
4646 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4649 usleep_range(10, 20);
4651 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4658 * i40e_control_tx_q - Start or stop a particular Tx queue
4659 * @pf: the PF structure
4660 * @pf_q: the PF queue to configure
4661 * @enable: start or stop the queue
4663 * This function enables or disables a single queue. Note that any delay
4664 * required after the operation is expected to be handled by the caller of
4667 static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
4669 struct i40e_hw *hw = &pf->hw;
4673 /* warn the TX unit of coming changes */
4674 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
4676 usleep_range(10, 20);
4678 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4679 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
4680 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
4681 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
4683 usleep_range(1000, 2000);
4686 /* Skip if the queue is already in the requested state */
4687 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4690 /* turn on/off the queue */
4692 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
4693 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4695 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4698 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
4702 * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
4704 * @pf: the PF structure
4705 * @pf_q: the PF queue to configure
4706 * @is_xdp: true if the queue is used for XDP
4707 * @enable: start or stop the queue
4709 int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
4710 bool is_xdp, bool enable)
4714 i40e_control_tx_q(pf, pf_q, enable);
4716 /* wait for the change to finish */
4717 ret = i40e_pf_txq_wait(pf, pf_q, enable);
4719 dev_info(&pf->pdev->dev,
4720 "VSI seid %d %sTx ring %d %sable timeout\n",
4721 seid, (is_xdp ? "XDP " : ""), pf_q,
4722 (enable ? "en" : "dis"));
4729 * i40e_vsi_enable_tx - Start a VSI's rings
4730 * @vsi: the VSI being configured
4732 static int i40e_vsi_enable_tx(struct i40e_vsi *vsi)
4734 struct i40e_pf *pf = vsi->back;
4735 int i, pf_q, ret = 0;
4737 pf_q = vsi->base_queue;
4738 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4739 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4741 false /*is xdp*/, true);
4745 if (!i40e_enabled_xdp_vsi(vsi))
4748 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4749 pf_q + vsi->alloc_queue_pairs,
4750 true /*is xdp*/, true);
4758 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4759 * @pf: the PF being configured
4760 * @pf_q: the PF queue
4761 * @enable: enable or disable state of the queue
4763 * This routine will wait for the given Rx queue of the PF to reach the
4764 * enabled or disabled state.
4765 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4766 * multiple retries; else will return 0 in case of success.
4768 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4773 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4774 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
4775 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4778 usleep_range(10, 20);
4780 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4787 * i40e_control_rx_q - Start or stop a particular Rx queue
4788 * @pf: the PF structure
4789 * @pf_q: the PF queue to configure
4790 * @enable: start or stop the queue
4792 * This function enables or disables a single queue. Note that
4793 * any delay required after the operation is expected to be
4794 * handled by the caller of this function.
4796 static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4798 struct i40e_hw *hw = &pf->hw;
4802 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4803 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
4804 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
4805 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
4807 usleep_range(1000, 2000);
4810 /* Skip if the queue is already in the requested state */
4811 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4814 /* turn on/off the queue */
4816 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4818 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4820 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
4824 * i40e_control_wait_rx_q
4825 * @pf: the PF structure
4826 * @pf_q: queue being configured
4827 * @enable: start or stop the rings
4829 * This function enables or disables a single queue along with waiting
4830 * for the change to finish. The caller of this function should handle
4831 * the delays needed in the case of disabling queues.
4833 int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4837 i40e_control_rx_q(pf, pf_q, enable);
4839 /* wait for the change to finish */
4840 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4848 * i40e_vsi_enable_rx - Start a VSI's rings
4849 * @vsi: the VSI being configured
4851 static int i40e_vsi_enable_rx(struct i40e_vsi *vsi)
4853 struct i40e_pf *pf = vsi->back;
4854 int i, pf_q, ret = 0;
4856 pf_q = vsi->base_queue;
4857 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4858 ret = i40e_control_wait_rx_q(pf, pf_q, true);
4860 dev_info(&pf->pdev->dev,
4861 "VSI seid %d Rx ring %d enable timeout\n",
4871 * i40e_vsi_start_rings - Start a VSI's rings
4872 * @vsi: the VSI being configured
4874 int i40e_vsi_start_rings(struct i40e_vsi *vsi)
4878 /* do rx first for enable and last for disable */
4879 ret = i40e_vsi_enable_rx(vsi);
4882 ret = i40e_vsi_enable_tx(vsi);
4887 #define I40E_DISABLE_TX_GAP_MSEC 50
4890 * i40e_vsi_stop_rings - Stop a VSI's rings
4891 * @vsi: the VSI being configured
4893 void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
4895 struct i40e_pf *pf = vsi->back;
4896 int pf_q, err, q_end;
4898 /* When port TX is suspended, don't wait */
4899 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
4900 return i40e_vsi_stop_rings_no_wait(vsi);
4902 q_end = vsi->base_queue + vsi->num_queue_pairs;
4903 for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
4904 i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false);
4906 for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) {
4907 err = i40e_control_wait_rx_q(pf, pf_q, false);
4909 dev_info(&pf->pdev->dev,
4910 "VSI seid %d Rx ring %d disable timeout\n",
4914 msleep(I40E_DISABLE_TX_GAP_MSEC);
4915 pf_q = vsi->base_queue;
4916 for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
4917 wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0);
4919 i40e_vsi_wait_queues_disabled(vsi);
4923 * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
4924 * @vsi: the VSI being shutdown
4926 * This function stops all the rings for a VSI but does not delay to verify
4927 * that rings have been disabled. It is expected that the caller is shutting
4928 * down multiple VSIs at once and will delay together for all the VSIs after
4929 * initiating the shutdown. This is particularly useful for shutting down lots
4930 * of VFs together. Otherwise, a large delay can be incurred while configuring
4931 * each VSI in serial.
4933 void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
4935 struct i40e_pf *pf = vsi->back;
4938 pf_q = vsi->base_queue;
4939 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4940 i40e_control_tx_q(pf, pf_q, false);
4941 i40e_control_rx_q(pf, pf_q, false);
4946 * i40e_vsi_free_irq - Free the irq association with the OS
4947 * @vsi: the VSI being configured
4949 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4951 struct i40e_pf *pf = vsi->back;
4952 struct i40e_hw *hw = &pf->hw;
4953 int base = vsi->base_vector;
4957 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4958 if (!vsi->q_vectors)
4961 if (!vsi->irqs_ready)
4964 vsi->irqs_ready = false;
4965 for (i = 0; i < vsi->num_q_vectors; i++) {
4970 irq_num = pf->msix_entries[vector].vector;
4972 /* free only the irqs that were actually requested */
4973 if (!vsi->q_vectors[i] ||
4974 !vsi->q_vectors[i]->num_ringpairs)
4977 /* clear the affinity notifier in the IRQ descriptor */
4978 irq_set_affinity_notifier(irq_num, NULL);
4979 /* remove our suggested affinity mask for this IRQ */
4980 irq_update_affinity_hint(irq_num, NULL);
4981 free_irq(irq_num, vsi->q_vectors[i]);
4983 /* Tear down the interrupt queue link list
4985 * We know that they come in pairs and always
4986 * the Rx first, then the Tx. To clear the
4987 * link list, stick the EOL value into the
4988 * next_q field of the registers.
4990 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4991 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4992 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4993 val |= I40E_QUEUE_END_OF_LIST
4994 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4995 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4997 while (qp != I40E_QUEUE_END_OF_LIST) {
5000 val = rd32(hw, I40E_QINT_RQCTL(qp));
5002 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
5003 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
5004 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
5005 I40E_QINT_RQCTL_INTEVENT_MASK);
5007 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
5008 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
5010 wr32(hw, I40E_QINT_RQCTL(qp), val);
5012 val = rd32(hw, I40E_QINT_TQCTL(qp));
5014 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
5015 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
5017 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
5018 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
5019 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
5020 I40E_QINT_TQCTL_INTEVENT_MASK);
5022 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
5023 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
5025 wr32(hw, I40E_QINT_TQCTL(qp), val);
5030 free_irq(pf->pdev->irq, pf);
5032 val = rd32(hw, I40E_PFINT_LNKLST0);
5033 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
5034 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
5035 val |= I40E_QUEUE_END_OF_LIST
5036 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
5037 wr32(hw, I40E_PFINT_LNKLST0, val);
5039 val = rd32(hw, I40E_QINT_RQCTL(qp));
5040 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
5041 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
5042 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
5043 I40E_QINT_RQCTL_INTEVENT_MASK);
5045 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
5046 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
5048 wr32(hw, I40E_QINT_RQCTL(qp), val);
5050 val = rd32(hw, I40E_QINT_TQCTL(qp));
5052 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
5053 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
5054 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
5055 I40E_QINT_TQCTL_INTEVENT_MASK);
5057 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
5058 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
5060 wr32(hw, I40E_QINT_TQCTL(qp), val);
5065 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
5066 * @vsi: the VSI being configured
5067 * @v_idx: Index of vector to be freed
5069 * This function frees the memory allocated to the q_vector. In addition if
5070 * NAPI is enabled it will delete any references to the NAPI struct prior
5071 * to freeing the q_vector.
5073 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
5075 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
5076 struct i40e_ring *ring;
5081 /* disassociate q_vector from rings */
5082 i40e_for_each_ring(ring, q_vector->tx)
5083 ring->q_vector = NULL;
5085 i40e_for_each_ring(ring, q_vector->rx)
5086 ring->q_vector = NULL;
5088 /* only VSI w/ an associated netdev is set up w/ NAPI */
5090 netif_napi_del(&q_vector->napi);
5092 vsi->q_vectors[v_idx] = NULL;
5094 kfree_rcu(q_vector, rcu);
5098 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
5099 * @vsi: the VSI being un-configured
5101 * This frees the memory allocated to the q_vectors and
5102 * deletes references to the NAPI struct.
5104 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
5108 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
5109 i40e_free_q_vector(vsi, v_idx);
5113 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
5114 * @pf: board private structure
5116 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
5118 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
5119 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
5120 pci_disable_msix(pf->pdev);
5121 kfree(pf->msix_entries);
5122 pf->msix_entries = NULL;
5123 kfree(pf->irq_pile);
5124 pf->irq_pile = NULL;
5125 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
5126 pci_disable_msi(pf->pdev);
5128 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
5132 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
5133 * @pf: board private structure
5135 * We go through and clear interrupt specific resources and reset the structure
5136 * to pre-load conditions
5138 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
5142 if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state))
5143 i40e_free_misc_vector(pf);
5145 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
5146 I40E_IWARP_IRQ_PILE_ID);
5148 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
5149 for (i = 0; i < pf->num_alloc_vsi; i++)
5151 i40e_vsi_free_q_vectors(pf->vsi[i]);
5152 i40e_reset_interrupt_capability(pf);
5156 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
5157 * @vsi: the VSI being configured
5159 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
5166 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
5167 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
5169 if (q_vector->rx.ring || q_vector->tx.ring)
5170 napi_enable(&q_vector->napi);
5175 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
5176 * @vsi: the VSI being configured
5178 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
5185 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
5186 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
5188 if (q_vector->rx.ring || q_vector->tx.ring)
5189 napi_disable(&q_vector->napi);
5194 * i40e_vsi_close - Shut down a VSI
5195 * @vsi: the vsi to be quelled
5197 static void i40e_vsi_close(struct i40e_vsi *vsi)
5199 struct i40e_pf *pf = vsi->back;
5200 if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
5202 i40e_vsi_free_irq(vsi);
5203 i40e_vsi_free_tx_resources(vsi);
5204 i40e_vsi_free_rx_resources(vsi);
5205 vsi->current_netdev_flags = 0;
5206 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
5207 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
5208 set_bit(__I40E_CLIENT_RESET, pf->state);
5212 * i40e_quiesce_vsi - Pause a given VSI
5213 * @vsi: the VSI being paused
5215 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
5217 if (test_bit(__I40E_VSI_DOWN, vsi->state))
5220 set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
5221 if (vsi->netdev && netif_running(vsi->netdev))
5222 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
5224 i40e_vsi_close(vsi);
5228 * i40e_unquiesce_vsi - Resume a given VSI
5229 * @vsi: the VSI being resumed
5231 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
5233 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
5236 if (vsi->netdev && netif_running(vsi->netdev))
5237 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
5239 i40e_vsi_open(vsi); /* this clears the DOWN bit */
5243 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
5246 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
5250 for (v = 0; v < pf->num_alloc_vsi; v++) {
5252 i40e_quiesce_vsi(pf->vsi[v]);
5257 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
5260 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
5264 for (v = 0; v < pf->num_alloc_vsi; v++) {
5266 i40e_unquiesce_vsi(pf->vsi[v]);
5271 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
5272 * @vsi: the VSI being configured
5274 * Wait until all queues on a given VSI have been disabled.
5276 int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
5278 struct i40e_pf *pf = vsi->back;
5281 pf_q = vsi->base_queue;
5282 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
5283 /* Check and wait for the Tx queue */
5284 ret = i40e_pf_txq_wait(pf, pf_q, false);
5286 dev_info(&pf->pdev->dev,
5287 "VSI seid %d Tx ring %d disable timeout\n",
5292 if (!i40e_enabled_xdp_vsi(vsi))
5295 /* Check and wait for the XDP Tx queue */
5296 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
5299 dev_info(&pf->pdev->dev,
5300 "VSI seid %d XDP Tx ring %d disable timeout\n",
5305 /* Check and wait for the Rx queue */
5306 ret = i40e_pf_rxq_wait(pf, pf_q, false);
5308 dev_info(&pf->pdev->dev,
5309 "VSI seid %d Rx ring %d disable timeout\n",
5318 #ifdef CONFIG_I40E_DCB
5320 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
5323 * This function waits for the queues to be in disabled state for all the
5324 * VSIs that are managed by this PF.
5326 static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
5330 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
5332 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
5344 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
5345 * @pf: pointer to PF
5347 * Get TC map for ISCSI PF type that will include iSCSI TC
5350 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
5352 struct i40e_dcb_app_priority_table app;
5353 struct i40e_hw *hw = &pf->hw;
5354 u8 enabled_tc = 1; /* TC0 is always enabled */
5356 /* Get the iSCSI APP TLV */
5357 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5359 for (i = 0; i < dcbcfg->numapps; i++) {
5360 app = dcbcfg->app[i];
5361 if (app.selector == I40E_APP_SEL_TCPIP &&
5362 app.protocolid == I40E_APP_PROTOID_ISCSI) {
5363 tc = dcbcfg->etscfg.prioritytable[app.priority];
5364 enabled_tc |= BIT(tc);
5373 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
5374 * @dcbcfg: the corresponding DCBx configuration structure
5376 * Return the number of TCs from given DCBx configuration
5378 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
5380 int i, tc_unused = 0;
5384 /* Scan the ETS Config Priority Table to find
5385 * traffic class enabled for a given priority
5386 * and create a bitmask of enabled TCs
5388 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
5389 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
5391 /* Now scan the bitmask to check for
5392 * contiguous TCs starting with TC0
5394 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5395 if (num_tc & BIT(i)) {
5399 pr_err("Non-contiguous TC - Disabling DCB\n");
5407 /* There is always at least TC0 */
5415 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
5416 * @dcbcfg: the corresponding DCBx configuration structure
5418 * Query the current DCB configuration and return the number of
5419 * traffic classes enabled from the given DCBX config
5421 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
5423 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
5427 for (i = 0; i < num_tc; i++)
5428 enabled_tc |= BIT(i);
5434 * i40e_mqprio_get_enabled_tc - Get enabled traffic classes
5435 * @pf: PF being queried
5437 * Query the current MQPRIO configuration and return the number of
5438 * traffic classes enabled.
5440 static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
5442 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5443 u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
5444 u8 enabled_tc = 1, i;
5446 for (i = 1; i < num_tc; i++)
5447 enabled_tc |= BIT(i);
5452 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
5453 * @pf: PF being queried
5455 * Return number of traffic classes enabled for the given PF
5457 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
5459 struct i40e_hw *hw = &pf->hw;
5460 u8 i, enabled_tc = 1;
5462 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5464 if (i40e_is_tc_mqprio_enabled(pf))
5465 return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
5467 /* If neither MQPRIO nor DCB is enabled, then always use single TC */
5468 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5471 /* SFP mode will be enabled for all TCs on port */
5472 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5473 return i40e_dcb_get_num_tc(dcbcfg);
5475 /* MFP mode return count of enabled TCs for this PF */
5476 if (pf->hw.func_caps.iscsi)
5477 enabled_tc = i40e_get_iscsi_tc_map(pf);
5479 return 1; /* Only TC0 */
5481 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5482 if (enabled_tc & BIT(i))
5489 * i40e_pf_get_tc_map - Get bitmap for enabled traffic classes
5490 * @pf: PF being queried
5492 * Return a bitmap for enabled traffic classes for this PF.
5494 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
5496 if (i40e_is_tc_mqprio_enabled(pf))
5497 return i40e_mqprio_get_enabled_tc(pf);
5499 /* If neither MQPRIO nor DCB is enabled for this PF then just return
5502 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5503 return I40E_DEFAULT_TRAFFIC_CLASS;
5505 /* SFP mode we want PF to be enabled for all TCs */
5506 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5507 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
5509 /* MFP enabled and iSCSI PF type */
5510 if (pf->hw.func_caps.iscsi)
5511 return i40e_get_iscsi_tc_map(pf);
5513 return I40E_DEFAULT_TRAFFIC_CLASS;
5517 * i40e_vsi_get_bw_info - Query VSI BW Information
5518 * @vsi: the VSI being queried
5520 * Returns 0 on success, negative value on failure
5522 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
5524 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
5525 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5526 struct i40e_pf *pf = vsi->back;
5527 struct i40e_hw *hw = &pf->hw;
5532 /* Get the VSI level BW configuration */
5533 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5535 dev_info(&pf->pdev->dev,
5536 "couldn't get PF vsi bw config, err %s aq_err %s\n",
5537 i40e_stat_str(&pf->hw, ret),
5538 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5542 /* Get the VSI level BW configuration per TC */
5543 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
5546 dev_info(&pf->pdev->dev,
5547 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
5548 i40e_stat_str(&pf->hw, ret),
5549 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5553 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
5554 dev_info(&pf->pdev->dev,
5555 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
5556 bw_config.tc_valid_bits,
5557 bw_ets_config.tc_valid_bits);
5558 /* Still continuing */
5561 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
5562 vsi->bw_max_quanta = bw_config.max_bw;
5563 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
5564 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
5565 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5566 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
5567 vsi->bw_ets_limit_credits[i] =
5568 le16_to_cpu(bw_ets_config.credits[i]);
5569 /* 3 bits out of 4 for each TC */
5570 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
5577 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
5578 * @vsi: the VSI being configured
5579 * @enabled_tc: TC bitmap
5580 * @bw_share: BW shared credits per TC
5582 * Returns 0 on success, negative value on failure
5584 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
5587 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5588 struct i40e_pf *pf = vsi->back;
5592 /* There is no need to reset BW when mqprio mode is on. */
5593 if (i40e_is_tc_mqprio_enabled(pf))
5595 if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5596 ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
5598 dev_info(&pf->pdev->dev,
5599 "Failed to reset tx rate for vsi->seid %u\n",
5603 memset(&bw_data, 0, sizeof(bw_data));
5604 bw_data.tc_valid_bits = enabled_tc;
5605 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5606 bw_data.tc_bw_credits[i] = bw_share[i];
5608 ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
5610 dev_info(&pf->pdev->dev,
5611 "AQ command Config VSI BW allocation per TC failed = %d\n",
5612 pf->hw.aq.asq_last_status);
5616 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5617 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
5623 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
5624 * @vsi: the VSI being configured
5625 * @enabled_tc: TC map to be enabled
5628 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5630 struct net_device *netdev = vsi->netdev;
5631 struct i40e_pf *pf = vsi->back;
5632 struct i40e_hw *hw = &pf->hw;
5635 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5641 netdev_reset_tc(netdev);
5645 /* Set up actual enabled TCs on the VSI */
5646 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
5649 /* set per TC queues for the VSI */
5650 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5651 /* Only set TC queues for enabled tcs
5653 * e.g. For a VSI that has TC0 and TC3 enabled the
5654 * enabled_tc bitmap would be 0x00001001; the driver
5655 * will set the numtc for netdev as 2 that will be
5656 * referenced by the netdev layer as TC 0 and 1.
5658 if (vsi->tc_config.enabled_tc & BIT(i))
5659 netdev_set_tc_queue(netdev,
5660 vsi->tc_config.tc_info[i].netdev_tc,
5661 vsi->tc_config.tc_info[i].qcount,
5662 vsi->tc_config.tc_info[i].qoffset);
5665 if (i40e_is_tc_mqprio_enabled(pf))
5668 /* Assign UP2TC map for the VSI */
5669 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
5670 /* Get the actual TC# for the UP */
5671 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
5672 /* Get the mapped netdev TC# for the UP */
5673 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
5674 netdev_set_prio_tc_map(netdev, i, netdev_tc);
5679 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
5680 * @vsi: the VSI being configured
5681 * @ctxt: the ctxt buffer returned from AQ VSI update param command
5683 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
5684 struct i40e_vsi_context *ctxt)
5686 /* copy just the sections touched not the entire info
5687 * since not all sections are valid as returned by
5690 vsi->info.mapping_flags = ctxt->info.mapping_flags;
5691 memcpy(&vsi->info.queue_mapping,
5692 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
5693 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
5694 sizeof(vsi->info.tc_mapping));
5698 * i40e_update_adq_vsi_queues - update queue mapping for ADq VSI
5699 * @vsi: the VSI being reconfigured
5700 * @vsi_offset: offset from main VF VSI
5702 int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset)
5704 struct i40e_vsi_context ctxt = {};
5710 return I40E_ERR_PARAM;
5714 ctxt.seid = vsi->seid;
5715 ctxt.pf_num = hw->pf_id;
5716 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id + vsi_offset;
5717 ctxt.uplink_seid = vsi->uplink_seid;
5718 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
5719 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5720 ctxt.info = vsi->info;
5722 i40e_vsi_setup_queue_map(vsi, &ctxt, vsi->tc_config.enabled_tc,
5724 if (vsi->reconfig_rss) {
5725 vsi->rss_size = min_t(int, pf->alloc_rss_size,
5726 vsi->num_queue_pairs);
5727 ret = i40e_vsi_config_rss(vsi);
5729 dev_info(&pf->pdev->dev, "Failed to reconfig rss for num_queues\n");
5732 vsi->reconfig_rss = false;
5735 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5737 dev_info(&pf->pdev->dev, "Update vsi config failed, err %s aq_err %s\n",
5738 i40e_stat_str(hw, ret),
5739 i40e_aq_str(hw, hw->aq.asq_last_status));
5742 /* update the local VSI info with updated queue map */
5743 i40e_vsi_update_queue_map(vsi, &ctxt);
5744 vsi->info.valid_sections = 0;
5750 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
5751 * @vsi: VSI to be configured
5752 * @enabled_tc: TC bitmap
5754 * This configures a particular VSI for TCs that are mapped to the
5755 * given TC bitmap. It uses default bandwidth share for TCs across
5756 * VSIs to configure TC for a particular VSI.
5759 * It is expected that the VSI queues have been quisced before calling
5762 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5764 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5765 struct i40e_pf *pf = vsi->back;
5766 struct i40e_hw *hw = &pf->hw;
5767 struct i40e_vsi_context ctxt;
5771 /* Check if enabled_tc is same as existing or new TCs */
5772 if (vsi->tc_config.enabled_tc == enabled_tc &&
5773 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
5776 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5777 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5778 if (enabled_tc & BIT(i))
5782 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5784 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5786 dev_info(&pf->pdev->dev,
5787 "Failed configuring TC map %d for VSI %d\n",
5788 enabled_tc, vsi->seid);
5789 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid,
5792 dev_info(&pf->pdev->dev,
5793 "Failed querying vsi bw info, err %s aq_err %s\n",
5794 i40e_stat_str(hw, ret),
5795 i40e_aq_str(hw, hw->aq.asq_last_status));
5798 if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
5799 u8 valid_tc = bw_config.tc_valid_bits & enabled_tc;
5802 valid_tc = bw_config.tc_valid_bits;
5803 /* Always enable TC0, no matter what */
5805 dev_info(&pf->pdev->dev,
5806 "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
5807 enabled_tc, bw_config.tc_valid_bits, valid_tc);
5808 enabled_tc = valid_tc;
5811 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5813 dev_err(&pf->pdev->dev,
5814 "Unable to configure TC map %d for VSI %d\n",
5815 enabled_tc, vsi->seid);
5820 /* Update Queue Pairs Mapping for currently enabled UPs */
5821 ctxt.seid = vsi->seid;
5822 ctxt.pf_num = vsi->back->hw.pf_id;
5824 ctxt.uplink_seid = vsi->uplink_seid;
5825 ctxt.info = vsi->info;
5826 if (i40e_is_tc_mqprio_enabled(pf)) {
5827 ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
5831 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5834 /* On destroying the qdisc, reset vsi->rss_size, as number of enabled
5837 if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) {
5838 vsi->rss_size = min_t(int, vsi->back->alloc_rss_size,
5839 vsi->num_queue_pairs);
5840 ret = i40e_vsi_config_rss(vsi);
5842 dev_info(&vsi->back->pdev->dev,
5843 "Failed to reconfig rss for num_queues\n");
5846 vsi->reconfig_rss = false;
5848 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
5849 ctxt.info.valid_sections |=
5850 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
5851 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
5854 /* Update the VSI after updating the VSI queue-mapping
5857 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5859 dev_info(&pf->pdev->dev,
5860 "Update vsi tc config failed, err %s aq_err %s\n",
5861 i40e_stat_str(hw, ret),
5862 i40e_aq_str(hw, hw->aq.asq_last_status));
5865 /* update the local VSI info with updated queue map */
5866 i40e_vsi_update_queue_map(vsi, &ctxt);
5867 vsi->info.valid_sections = 0;
5869 /* Update current VSI BW information */
5870 ret = i40e_vsi_get_bw_info(vsi);
5872 dev_info(&pf->pdev->dev,
5873 "Failed updating vsi bw info, err %s aq_err %s\n",
5874 i40e_stat_str(hw, ret),
5875 i40e_aq_str(hw, hw->aq.asq_last_status));
5879 /* Update the netdev TC setup */
5880 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
5886 * i40e_get_link_speed - Returns link speed for the interface
5887 * @vsi: VSI to be configured
5890 static int i40e_get_link_speed(struct i40e_vsi *vsi)
5892 struct i40e_pf *pf = vsi->back;
5894 switch (pf->hw.phy.link_info.link_speed) {
5895 case I40E_LINK_SPEED_40GB:
5897 case I40E_LINK_SPEED_25GB:
5899 case I40E_LINK_SPEED_20GB:
5901 case I40E_LINK_SPEED_10GB:
5903 case I40E_LINK_SPEED_1GB:
5911 * i40e_bw_bytes_to_mbits - Convert max_tx_rate from bytes to mbits
5912 * @vsi: Pointer to vsi structure
5913 * @max_tx_rate: max TX rate in bytes to be converted into Mbits
5915 * Helper function to convert units before send to set BW limit
5917 static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate)
5919 if (max_tx_rate < I40E_BW_MBPS_DIVISOR) {
5920 dev_warn(&vsi->back->pdev->dev,
5921 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5922 max_tx_rate = I40E_BW_CREDIT_DIVISOR;
5924 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
5931 * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
5932 * @vsi: VSI to be configured
5933 * @seid: seid of the channel/VSI
5934 * @max_tx_rate: max TX rate to be configured as BW limit
5936 * Helper function to set BW limit for a given VSI
5938 int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
5940 struct i40e_pf *pf = vsi->back;
5945 speed = i40e_get_link_speed(vsi);
5946 if (max_tx_rate > speed) {
5947 dev_err(&pf->pdev->dev,
5948 "Invalid max tx rate %llu specified for VSI seid %d.",
5952 if (max_tx_rate && max_tx_rate < I40E_BW_CREDIT_DIVISOR) {
5953 dev_warn(&pf->pdev->dev,
5954 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5955 max_tx_rate = I40E_BW_CREDIT_DIVISOR;
5958 /* Tx rate credits are in values of 50Mbps, 0 is disabled */
5959 credits = max_tx_rate;
5960 do_div(credits, I40E_BW_CREDIT_DIVISOR);
5961 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits,
5962 I40E_MAX_BW_INACTIVE_ACCUM, NULL);
5964 dev_err(&pf->pdev->dev,
5965 "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
5966 max_tx_rate, seid, i40e_stat_str(&pf->hw, ret),
5967 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5972 * i40e_remove_queue_channels - Remove queue channels for the TCs
5973 * @vsi: VSI to be configured
5975 * Remove queue channels for the TCs
5977 static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
5979 enum i40e_admin_queue_err last_aq_status;
5980 struct i40e_cloud_filter *cfilter;
5981 struct i40e_channel *ch, *ch_tmp;
5982 struct i40e_pf *pf = vsi->back;
5983 struct hlist_node *node;
5986 /* Reset rss size that was stored when reconfiguring rss for
5987 * channel VSIs with non-power-of-2 queue count.
5989 vsi->current_rss_size = 0;
5991 /* perform cleanup for channels if they exist */
5992 if (list_empty(&vsi->ch_list))
5995 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5996 struct i40e_vsi *p_vsi;
5998 list_del(&ch->list);
5999 p_vsi = ch->parent_vsi;
6000 if (!p_vsi || !ch->initialized) {
6004 /* Reset queue contexts */
6005 for (i = 0; i < ch->num_queue_pairs; i++) {
6006 struct i40e_ring *tx_ring, *rx_ring;
6009 pf_q = ch->base_queue + i;
6010 tx_ring = vsi->tx_rings[pf_q];
6013 rx_ring = vsi->rx_rings[pf_q];
6017 /* Reset BW configured for this VSI via mqprio */
6018 ret = i40e_set_bw_limit(vsi, ch->seid, 0);
6020 dev_info(&vsi->back->pdev->dev,
6021 "Failed to reset tx rate for ch->seid %u\n",
6024 /* delete cloud filters associated with this channel */
6025 hlist_for_each_entry_safe(cfilter, node,
6026 &pf->cloud_filter_list, cloud_node) {
6027 if (cfilter->seid != ch->seid)
6030 hash_del(&cfilter->cloud_node);
6031 if (cfilter->dst_port)
6032 ret = i40e_add_del_cloud_filter_big_buf(vsi,
6036 ret = i40e_add_del_cloud_filter(vsi, cfilter,
6038 last_aq_status = pf->hw.aq.asq_last_status;
6040 dev_info(&pf->pdev->dev,
6041 "Failed to delete cloud filter, err %s aq_err %s\n",
6042 i40e_stat_str(&pf->hw, ret),
6043 i40e_aq_str(&pf->hw, last_aq_status));
6047 /* delete VSI from FW */
6048 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
6051 dev_err(&vsi->back->pdev->dev,
6052 "unable to remove channel (%d) for parent VSI(%d)\n",
6053 ch->seid, p_vsi->seid);
6056 INIT_LIST_HEAD(&vsi->ch_list);
6060 * i40e_get_max_queues_for_channel
6061 * @vsi: ptr to VSI to which channels are associated with
6063 * Helper function which returns max value among the queue counts set on the
6064 * channels/TCs created.
6066 static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi)
6068 struct i40e_channel *ch, *ch_tmp;
6071 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
6072 if (!ch->initialized)
6074 if (ch->num_queue_pairs > max)
6075 max = ch->num_queue_pairs;
6082 * i40e_validate_num_queues - validate num_queues w.r.t channel
6083 * @pf: ptr to PF device
6084 * @num_queues: number of queues
6085 * @vsi: the parent VSI
6086 * @reconfig_rss: indicates should the RSS be reconfigured or not
6088 * This function validates number of queues in the context of new channel
6089 * which is being established and determines if RSS should be reconfigured
6090 * or not for parent VSI.
6092 static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
6093 struct i40e_vsi *vsi, bool *reconfig_rss)
6100 *reconfig_rss = false;
6101 if (vsi->current_rss_size) {
6102 if (num_queues > vsi->current_rss_size) {
6103 dev_dbg(&pf->pdev->dev,
6104 "Error: num_queues (%d) > vsi's current_size(%d)\n",
6105 num_queues, vsi->current_rss_size);
6107 } else if ((num_queues < vsi->current_rss_size) &&
6108 (!is_power_of_2(num_queues))) {
6109 dev_dbg(&pf->pdev->dev,
6110 "Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
6111 num_queues, vsi->current_rss_size);
6116 if (!is_power_of_2(num_queues)) {
6117 /* Find the max num_queues configured for channel if channel
6119 * if channel exist, then enforce 'num_queues' to be more than
6120 * max ever queues configured for channel.
6122 max_ch_queues = i40e_get_max_queues_for_channel(vsi);
6123 if (num_queues < max_ch_queues) {
6124 dev_dbg(&pf->pdev->dev,
6125 "Error: num_queues (%d) < max queues configured for channel(%d)\n",
6126 num_queues, max_ch_queues);
6129 *reconfig_rss = true;
6136 * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size
6137 * @vsi: the VSI being setup
6138 * @rss_size: size of RSS, accordingly LUT gets reprogrammed
6140 * This function reconfigures RSS by reprogramming LUTs using 'rss_size'
6142 static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
6144 struct i40e_pf *pf = vsi->back;
6145 u8 seed[I40E_HKEY_ARRAY_SIZE];
6146 struct i40e_hw *hw = &pf->hw;
6154 if (rss_size > vsi->rss_size)
6157 local_rss_size = min_t(int, vsi->rss_size, rss_size);
6158 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
6162 /* Ignoring user configured lut if there is one */
6163 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size);
6165 /* Use user configured hash key if there is one, otherwise
6168 if (vsi->rss_hkey_user)
6169 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
6171 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
6173 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
6175 dev_info(&pf->pdev->dev,
6176 "Cannot set RSS lut, err %s aq_err %s\n",
6177 i40e_stat_str(hw, ret),
6178 i40e_aq_str(hw, hw->aq.asq_last_status));
6184 /* Do the update w.r.t. storing rss_size */
6185 if (!vsi->orig_rss_size)
6186 vsi->orig_rss_size = vsi->rss_size;
6187 vsi->current_rss_size = local_rss_size;
6193 * i40e_channel_setup_queue_map - Setup a channel queue map
6194 * @pf: ptr to PF device
6195 * @ctxt: VSI context structure
6196 * @ch: ptr to channel structure
6198 * Setup queue map for a specific channel
6200 static void i40e_channel_setup_queue_map(struct i40e_pf *pf,
6201 struct i40e_vsi_context *ctxt,
6202 struct i40e_channel *ch)
6204 u16 qcount, qmap, sections = 0;
6208 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
6209 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
6211 qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix);
6212 ch->num_queue_pairs = qcount;
6214 /* find the next higher power-of-2 of num queue pairs */
6215 pow = ilog2(qcount);
6216 if (!is_power_of_2(qcount))
6219 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
6220 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
6222 /* Setup queue TC[0].qmap for given VSI context */
6223 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
6225 ctxt->info.up_enable_bits = 0x1; /* TC0 enabled */
6226 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
6227 ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue);
6228 ctxt->info.valid_sections |= cpu_to_le16(sections);
6232 * i40e_add_channel - add a channel by adding VSI
6233 * @pf: ptr to PF device
6234 * @uplink_seid: underlying HW switching element (VEB) ID
6235 * @ch: ptr to channel structure
6237 * Add a channel (VSI) using add_vsi and queue_map
6239 static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
6240 struct i40e_channel *ch)
6242 struct i40e_hw *hw = &pf->hw;
6243 struct i40e_vsi_context ctxt;
6244 u8 enabled_tc = 0x1; /* TC0 enabled */
6247 if (ch->type != I40E_VSI_VMDQ2) {
6248 dev_info(&pf->pdev->dev,
6249 "add new vsi failed, ch->type %d\n", ch->type);
6253 memset(&ctxt, 0, sizeof(ctxt));
6254 ctxt.pf_num = hw->pf_id;
6256 ctxt.uplink_seid = uplink_seid;
6257 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
6258 if (ch->type == I40E_VSI_VMDQ2)
6259 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
6261 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) {
6262 ctxt.info.valid_sections |=
6263 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6264 ctxt.info.switch_id =
6265 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6268 /* Set queue map for a given VSI context */
6269 i40e_channel_setup_queue_map(pf, &ctxt, ch);
6271 /* Now time to create VSI */
6272 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
6274 dev_info(&pf->pdev->dev,
6275 "add new vsi failed, err %s aq_err %s\n",
6276 i40e_stat_str(&pf->hw, ret),
6277 i40e_aq_str(&pf->hw,
6278 pf->hw.aq.asq_last_status));
6282 /* Success, update channel, set enabled_tc only if the channel
6285 ch->enabled_tc = !i40e_is_channel_macvlan(ch) && enabled_tc;
6286 ch->seid = ctxt.seid;
6287 ch->vsi_number = ctxt.vsi_number;
6288 ch->stat_counter_idx = le16_to_cpu(ctxt.info.stat_counter_idx);
6290 /* copy just the sections touched not the entire info
6291 * since not all sections are valid as returned by
6294 ch->info.mapping_flags = ctxt.info.mapping_flags;
6295 memcpy(&ch->info.queue_mapping,
6296 &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping));
6297 memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping,
6298 sizeof(ctxt.info.tc_mapping));
6303 static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
6306 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
6310 memset(&bw_data, 0, sizeof(bw_data));
6311 bw_data.tc_valid_bits = ch->enabled_tc;
6312 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
6313 bw_data.tc_bw_credits[i] = bw_share[i];
6315 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid,
6318 dev_info(&vsi->back->pdev->dev,
6319 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
6320 vsi->back->hw.aq.asq_last_status, ch->seid);
6324 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
6325 ch->info.qs_handle[i] = bw_data.qs_handles[i];
6331 * i40e_channel_config_tx_ring - config TX ring associated with new channel
6332 * @pf: ptr to PF device
6333 * @vsi: the VSI being setup
6334 * @ch: ptr to channel structure
6336 * Configure TX rings associated with channel (VSI) since queues are being
6339 static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
6340 struct i40e_vsi *vsi,
6341 struct i40e_channel *ch)
6345 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
6347 /* Enable ETS TCs with equal BW Share for now across all VSIs */
6348 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6349 if (ch->enabled_tc & BIT(i))
6353 /* configure BW for new VSI */
6354 ret = i40e_channel_config_bw(vsi, ch, bw_share);
6356 dev_info(&vsi->back->pdev->dev,
6357 "Failed configuring TC map %d for channel (seid %u)\n",
6358 ch->enabled_tc, ch->seid);
6362 for (i = 0; i < ch->num_queue_pairs; i++) {
6363 struct i40e_ring *tx_ring, *rx_ring;
6366 pf_q = ch->base_queue + i;
6368 /* Get to TX ring ptr of main VSI, for re-setup TX queue
6371 tx_ring = vsi->tx_rings[pf_q];
6374 /* Get the RX ring ptr */
6375 rx_ring = vsi->rx_rings[pf_q];
6383 * i40e_setup_hw_channel - setup new channel
6384 * @pf: ptr to PF device
6385 * @vsi: the VSI being setup
6386 * @ch: ptr to channel structure
6387 * @uplink_seid: underlying HW switching element (VEB) ID
6388 * @type: type of channel to be created (VMDq2/VF)
6390 * Setup new channel (VSI) based on specified type (VMDq2/VF)
6391 * and configures TX rings accordingly
6393 static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
6394 struct i40e_vsi *vsi,
6395 struct i40e_channel *ch,
6396 u16 uplink_seid, u8 type)
6400 ch->initialized = false;
6401 ch->base_queue = vsi->next_base_queue;
6404 /* Proceed with creation of channel (VMDq2) VSI */
6405 ret = i40e_add_channel(pf, uplink_seid, ch);
6407 dev_info(&pf->pdev->dev,
6408 "failed to add_channel using uplink_seid %u\n",
6413 /* Mark the successful creation of channel */
6414 ch->initialized = true;
6416 /* Reconfigure TX queues using QTX_CTL register */
6417 ret = i40e_channel_config_tx_ring(pf, vsi, ch);
6419 dev_info(&pf->pdev->dev,
6420 "failed to configure TX rings for channel %u\n",
6425 /* update 'next_base_queue' */
6426 vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs;
6427 dev_dbg(&pf->pdev->dev,
6428 "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
6429 ch->seid, ch->vsi_number, ch->stat_counter_idx,
6430 ch->num_queue_pairs,
6431 vsi->next_base_queue);
6436 * i40e_setup_channel - setup new channel using uplink element
6437 * @pf: ptr to PF device
6438 * @vsi: pointer to the VSI to set up the channel within
6439 * @ch: ptr to channel structure
6441 * Setup new channel (VSI) based on specified type (VMDq2/VF)
6442 * and uplink switching element (uplink_seid)
6444 static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
6445 struct i40e_channel *ch)
6451 if (vsi->type == I40E_VSI_MAIN) {
6452 vsi_type = I40E_VSI_VMDQ2;
6454 dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n",
6459 /* underlying switching element */
6460 seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6462 /* create channel (VSI), configure TX rings */
6463 ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type);
6465 dev_err(&pf->pdev->dev, "failed to setup hw_channel\n");
6469 return ch->initialized ? true : false;
6473 * i40e_validate_and_set_switch_mode - sets up switch mode correctly
6474 * @vsi: ptr to VSI which has PF backing
6476 * Sets up switch mode correctly if it needs to be changed and perform
6477 * what are allowed modes.
6479 static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
6482 struct i40e_pf *pf = vsi->back;
6483 struct i40e_hw *hw = &pf->hw;
6486 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities);
6490 if (hw->dev_caps.switch_mode) {
6491 /* if switch mode is set, support mode2 (non-tunneled for
6492 * cloud filter) for now
6494 u32 switch_mode = hw->dev_caps.switch_mode &
6495 I40E_SWITCH_MODE_MASK;
6496 if (switch_mode >= I40E_CLOUD_FILTER_MODE1) {
6497 if (switch_mode == I40E_CLOUD_FILTER_MODE2)
6499 dev_err(&pf->pdev->dev,
6500 "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
6501 hw->dev_caps.switch_mode);
6506 /* Set Bit 7 to be valid */
6507 mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
6509 /* Set L4type for TCP support */
6510 mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
6512 /* Set cloud filter mode */
6513 mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
6515 /* Prep mode field for set_switch_config */
6516 ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags,
6517 pf->last_sw_conf_valid_flags,
6519 if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
6520 dev_err(&pf->pdev->dev,
6521 "couldn't set switch config bits, err %s aq_err %s\n",
6522 i40e_stat_str(hw, ret),
6524 hw->aq.asq_last_status));
6530 * i40e_create_queue_channel - function to create channel
6531 * @vsi: VSI to be configured
6532 * @ch: ptr to channel (it contains channel specific params)
6534 * This function creates channel (VSI) using num_queues specified by user,
6535 * reconfigs RSS if needed.
6537 int i40e_create_queue_channel(struct i40e_vsi *vsi,
6538 struct i40e_channel *ch)
6540 struct i40e_pf *pf = vsi->back;
6547 if (!ch->num_queue_pairs) {
6548 dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n",
6549 ch->num_queue_pairs);
6553 /* validate user requested num_queues for channel */
6554 err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi,
6557 dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n",
6558 ch->num_queue_pairs);
6562 /* By default we are in VEPA mode, if this is the first VF/VMDq
6563 * VSI to be added switch to VEB mode.
6566 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
6567 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
6569 if (vsi->type == I40E_VSI_MAIN) {
6570 if (i40e_is_tc_mqprio_enabled(pf))
6571 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
6573 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
6575 /* now onwards for main VSI, number of queues will be value
6576 * of TC0's queue count
6580 /* By this time, vsi->cnt_q_avail shall be set to non-zero and
6581 * it should be more than num_queues
6583 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) {
6584 dev_dbg(&pf->pdev->dev,
6585 "Error: cnt_q_avail (%u) less than num_queues %d\n",
6586 vsi->cnt_q_avail, ch->num_queue_pairs);
6590 /* reconfig_rss only if vsi type is MAIN_VSI */
6591 if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) {
6592 err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs);
6594 dev_info(&pf->pdev->dev,
6595 "Error: unable to reconfig rss for num_queues (%u)\n",
6596 ch->num_queue_pairs);
6601 if (!i40e_setup_channel(pf, vsi, ch)) {
6602 dev_info(&pf->pdev->dev, "Failed to setup channel\n");
6606 dev_info(&pf->pdev->dev,
6607 "Setup channel (id:%u) utilizing num_queues %d\n",
6608 ch->seid, ch->num_queue_pairs);
6610 /* configure VSI for BW limit */
6611 if (ch->max_tx_rate) {
6612 u64 credits = ch->max_tx_rate;
6614 if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate))
6617 do_div(credits, I40E_BW_CREDIT_DIVISOR);
6618 dev_dbg(&pf->pdev->dev,
6619 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6625 /* in case of VF, this will be main SRIOV VSI */
6626 ch->parent_vsi = vsi;
6628 /* and update main_vsi's count for queue_available to use */
6629 vsi->cnt_q_avail -= ch->num_queue_pairs;
6635 * i40e_configure_queue_channels - Add queue channel for the given TCs
6636 * @vsi: VSI to be configured
6638 * Configures queue channel mapping to the given TCs
6640 static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
6642 struct i40e_channel *ch;
6646 /* Create app vsi with the TCs. Main VSI with TC0 is already set up */
6647 vsi->tc_seid_map[0] = vsi->seid;
6648 for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6649 if (vsi->tc_config.enabled_tc & BIT(i)) {
6650 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
6656 INIT_LIST_HEAD(&ch->list);
6657 ch->num_queue_pairs =
6658 vsi->tc_config.tc_info[i].qcount;
6660 vsi->tc_config.tc_info[i].qoffset;
6662 /* Bandwidth limit through tc interface is in bytes/s,
6665 max_rate = vsi->mqprio_qopt.max_rate[i];
6666 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6667 ch->max_tx_rate = max_rate;
6669 list_add_tail(&ch->list, &vsi->ch_list);
6671 ret = i40e_create_queue_channel(vsi, ch);
6673 dev_err(&vsi->back->pdev->dev,
6674 "Failed creating queue channel with TC%d: queues %d\n",
6675 i, ch->num_queue_pairs);
6678 vsi->tc_seid_map[i] = ch->seid;
6682 /* reset to reconfigure TX queue contexts */
6683 i40e_do_reset(vsi->back, I40E_PF_RESET_FLAG, true);
6687 i40e_remove_queue_channels(vsi);
6692 * i40e_veb_config_tc - Configure TCs for given VEB
6694 * @enabled_tc: TC bitmap
6696 * Configures given TC bitmap for VEB (switching) element
6698 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
6700 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
6701 struct i40e_pf *pf = veb->pf;
6705 /* No TCs or already enabled TCs just return */
6706 if (!enabled_tc || veb->enabled_tc == enabled_tc)
6709 bw_data.tc_valid_bits = enabled_tc;
6710 /* bw_data.absolute_credits is not set (relative) */
6712 /* Enable ETS TCs with equal BW Share for now */
6713 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6714 if (enabled_tc & BIT(i))
6715 bw_data.tc_bw_share_credits[i] = 1;
6718 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
6721 dev_info(&pf->pdev->dev,
6722 "VEB bw config failed, err %s aq_err %s\n",
6723 i40e_stat_str(&pf->hw, ret),
6724 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6728 /* Update the BW information */
6729 ret = i40e_veb_get_bw_info(veb);
6731 dev_info(&pf->pdev->dev,
6732 "Failed getting veb bw config, err %s aq_err %s\n",
6733 i40e_stat_str(&pf->hw, ret),
6734 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6741 #ifdef CONFIG_I40E_DCB
6743 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
6746 * Reconfigure VEB/VSIs on a given PF; it is assumed that
6747 * the caller would've quiesce all the VSIs before calling
6750 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
6756 /* Enable the TCs available on PF to all VEBs */
6757 tc_map = i40e_pf_get_tc_map(pf);
6758 if (tc_map == I40E_DEFAULT_TRAFFIC_CLASS)
6761 for (v = 0; v < I40E_MAX_VEB; v++) {
6764 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
6766 dev_info(&pf->pdev->dev,
6767 "Failed configuring TC for VEB seid=%d\n",
6769 /* Will try to configure as many components */
6773 /* Update each VSI */
6774 for (v = 0; v < pf->num_alloc_vsi; v++) {
6778 /* - Enable all TCs for the LAN VSI
6779 * - For all others keep them at TC0 for now
6781 if (v == pf->lan_vsi)
6782 tc_map = i40e_pf_get_tc_map(pf);
6784 tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
6786 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
6788 dev_info(&pf->pdev->dev,
6789 "Failed configuring TC for VSI seid=%d\n",
6791 /* Will try to configure as many components */
6793 /* Re-configure VSI vectors based on updated TC map */
6794 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
6795 if (pf->vsi[v]->netdev)
6796 i40e_dcbnl_set_all(pf->vsi[v]);
6802 * i40e_resume_port_tx - Resume port Tx
6805 * Resume a port's Tx and issue a PF reset in case of failure to
6808 static int i40e_resume_port_tx(struct i40e_pf *pf)
6810 struct i40e_hw *hw = &pf->hw;
6813 ret = i40e_aq_resume_port_tx(hw, NULL);
6815 dev_info(&pf->pdev->dev,
6816 "Resume Port Tx failed, err %s aq_err %s\n",
6817 i40e_stat_str(&pf->hw, ret),
6818 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6819 /* Schedule PF reset to recover */
6820 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6821 i40e_service_event_schedule(pf);
6828 * i40e_suspend_port_tx - Suspend port Tx
6831 * Suspend a port's Tx and issue a PF reset in case of failure.
6833 static int i40e_suspend_port_tx(struct i40e_pf *pf)
6835 struct i40e_hw *hw = &pf->hw;
6838 ret = i40e_aq_suspend_port_tx(hw, pf->mac_seid, NULL);
6840 dev_info(&pf->pdev->dev,
6841 "Suspend Port Tx failed, err %s aq_err %s\n",
6842 i40e_stat_str(&pf->hw, ret),
6843 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6844 /* Schedule PF reset to recover */
6845 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6846 i40e_service_event_schedule(pf);
6853 * i40e_hw_set_dcb_config - Program new DCBX settings into HW
6854 * @pf: PF being configured
6855 * @new_cfg: New DCBX configuration
6857 * Program DCB settings into HW and reconfigure VEB/VSIs on
6858 * given PF. Uses "Set LLDP MIB" AQC to program the hardware.
6860 static int i40e_hw_set_dcb_config(struct i40e_pf *pf,
6861 struct i40e_dcbx_config *new_cfg)
6863 struct i40e_dcbx_config *old_cfg = &pf->hw.local_dcbx_config;
6866 /* Check if need reconfiguration */
6867 if (!memcmp(&new_cfg, &old_cfg, sizeof(new_cfg))) {
6868 dev_dbg(&pf->pdev->dev, "No Change in DCB Config required.\n");
6872 /* Config change disable all VSIs */
6873 i40e_pf_quiesce_all_vsi(pf);
6875 /* Copy the new config to the current config */
6876 *old_cfg = *new_cfg;
6877 old_cfg->etsrec = old_cfg->etscfg;
6878 ret = i40e_set_dcb_config(&pf->hw);
6880 dev_info(&pf->pdev->dev,
6881 "Set DCB Config failed, err %s aq_err %s\n",
6882 i40e_stat_str(&pf->hw, ret),
6883 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6887 /* Changes in configuration update VEB/VSI */
6888 i40e_dcb_reconfigure(pf);
6890 /* In case of reset do not try to resume anything */
6891 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) {
6892 /* Re-start the VSIs if disabled */
6893 ret = i40e_resume_port_tx(pf);
6894 /* In case of error no point in resuming VSIs */
6897 i40e_pf_unquiesce_all_vsi(pf);
6904 * i40e_hw_dcb_config - Program new DCBX settings into HW
6905 * @pf: PF being configured
6906 * @new_cfg: New DCBX configuration
6908 * Program DCB settings into HW and reconfigure VEB/VSIs on
6911 int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
6913 struct i40e_aqc_configure_switching_comp_ets_data ets_data;
6914 u8 prio_type[I40E_MAX_TRAFFIC_CLASS] = {0};
6915 u32 mfs_tc[I40E_MAX_TRAFFIC_CLASS];
6916 struct i40e_dcbx_config *old_cfg;
6917 u8 mode[I40E_MAX_TRAFFIC_CLASS];
6918 struct i40e_rx_pb_config pb_cfg;
6919 struct i40e_hw *hw = &pf->hw;
6920 u8 num_ports = hw->num_ports;
6928 dev_dbg(&pf->pdev->dev, "Configuring DCB registers directly\n");
6929 /* Un-pack information to Program ETS HW via shared API
6932 * ETS/NON-ETS arbiter mode
6933 * max exponent (credit refills)
6934 * Total number of ports
6935 * PFC priority bit-map
6938 * Arbiter mode between UPs sharing same TC
6939 * TSA table (ETS or non-ETS)
6940 * EEE enabled or not
6944 new_numtc = i40e_dcb_get_num_tc(new_cfg);
6946 memset(&ets_data, 0, sizeof(ets_data));
6947 for (i = 0; i < new_numtc; i++) {
6949 switch (new_cfg->etscfg.tsatable[i]) {
6950 case I40E_IEEE_TSA_ETS:
6951 prio_type[i] = I40E_DCB_PRIO_TYPE_ETS;
6952 ets_data.tc_bw_share_credits[i] =
6953 new_cfg->etscfg.tcbwtable[i];
6955 case I40E_IEEE_TSA_STRICT:
6956 prio_type[i] = I40E_DCB_PRIO_TYPE_STRICT;
6958 ets_data.tc_bw_share_credits[i] =
6959 I40E_DCB_STRICT_PRIO_CREDITS;
6962 /* Invalid TSA type */
6963 need_reconfig = false;
6968 old_cfg = &hw->local_dcbx_config;
6969 /* Check if need reconfiguration */
6970 need_reconfig = i40e_dcb_need_reconfig(pf, old_cfg, new_cfg);
6972 /* If needed, enable/disable frame tagging, disable all VSIs
6973 * and suspend port tx
6975 if (need_reconfig) {
6976 /* Enable DCB tagging only when more than one TC */
6978 pf->flags |= I40E_FLAG_DCB_ENABLED;
6980 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6982 set_bit(__I40E_PORT_SUSPENDED, pf->state);
6983 /* Reconfiguration needed quiesce all VSIs */
6984 i40e_pf_quiesce_all_vsi(pf);
6985 ret = i40e_suspend_port_tx(pf);
6990 /* Configure Port ETS Tx Scheduler */
6991 ets_data.tc_valid_bits = tc_map;
6992 ets_data.tc_strict_priority_flags = lltc_map;
6993 ret = i40e_aq_config_switch_comp_ets
6994 (hw, pf->mac_seid, &ets_data,
6995 i40e_aqc_opc_modify_switching_comp_ets, NULL);
6997 dev_info(&pf->pdev->dev,
6998 "Modify Port ETS failed, err %s aq_err %s\n",
6999 i40e_stat_str(&pf->hw, ret),
7000 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7004 /* Configure Rx ETS HW */
7005 memset(&mode, I40E_DCB_ARB_MODE_ROUND_ROBIN, sizeof(mode));
7006 i40e_dcb_hw_set_num_tc(hw, new_numtc);
7007 i40e_dcb_hw_rx_fifo_config(hw, I40E_DCB_ARB_MODE_ROUND_ROBIN,
7008 I40E_DCB_ARB_MODE_STRICT_PRIORITY,
7009 I40E_DCB_DEFAULT_MAX_EXPONENT,
7011 i40e_dcb_hw_rx_cmd_monitor_config(hw, new_numtc, num_ports);
7012 i40e_dcb_hw_rx_ets_bw_config(hw, new_cfg->etscfg.tcbwtable, mode,
7014 i40e_dcb_hw_pfc_config(hw, new_cfg->pfc.pfcenable,
7015 new_cfg->etscfg.prioritytable);
7016 i40e_dcb_hw_rx_up2tc_config(hw, new_cfg->etscfg.prioritytable);
7018 /* Configure Rx Packet Buffers in HW */
7019 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
7020 mfs_tc[i] = pf->vsi[pf->lan_vsi]->netdev->mtu;
7021 mfs_tc[i] += I40E_PACKET_HDR_PAD;
7024 i40e_dcb_hw_calculate_pool_sizes(hw, num_ports,
7025 false, new_cfg->pfc.pfcenable,
7027 i40e_dcb_hw_rx_pb_config(hw, &pf->pb_cfg, &pb_cfg);
7029 /* Update the local Rx Packet buffer config */
7030 pf->pb_cfg = pb_cfg;
7032 /* Inform the FW about changes to DCB configuration */
7033 ret = i40e_aq_dcb_updated(&pf->hw, NULL);
7035 dev_info(&pf->pdev->dev,
7036 "DCB Updated failed, err %s aq_err %s\n",
7037 i40e_stat_str(&pf->hw, ret),
7038 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7042 /* Update the port DCBx configuration */
7043 *old_cfg = *new_cfg;
7045 /* Changes in configuration update VEB/VSI */
7046 i40e_dcb_reconfigure(pf);
7048 /* Re-start the VSIs if disabled */
7049 if (need_reconfig) {
7050 ret = i40e_resume_port_tx(pf);
7052 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
7053 /* In case of error no point in resuming VSIs */
7057 /* Wait for the PF's queues to be disabled */
7058 ret = i40e_pf_wait_queues_disabled(pf);
7060 /* Schedule PF reset to recover */
7061 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
7062 i40e_service_event_schedule(pf);
7065 i40e_pf_unquiesce_all_vsi(pf);
7066 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
7067 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
7069 /* registers are set, lets apply */
7070 if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB)
7071 ret = i40e_hw_set_dcb_config(pf, new_cfg);
7079 * i40e_dcb_sw_default_config - Set default DCB configuration when DCB in SW
7080 * @pf: PF being queried
7082 * Set default DCB configuration in case DCB is to be done in SW.
7084 int i40e_dcb_sw_default_config(struct i40e_pf *pf)
7086 struct i40e_dcbx_config *dcb_cfg = &pf->hw.local_dcbx_config;
7087 struct i40e_aqc_configure_switching_comp_ets_data ets_data;
7088 struct i40e_hw *hw = &pf->hw;
7091 if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB) {
7092 /* Update the local cached instance with TC0 ETS */
7093 memset(&pf->tmp_cfg, 0, sizeof(struct i40e_dcbx_config));
7094 pf->tmp_cfg.etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING;
7095 pf->tmp_cfg.etscfg.maxtcs = 0;
7096 pf->tmp_cfg.etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW;
7097 pf->tmp_cfg.etscfg.tsatable[0] = I40E_IEEE_TSA_ETS;
7098 pf->tmp_cfg.pfc.willing = I40E_IEEE_DEFAULT_PFC_WILLING;
7099 pf->tmp_cfg.pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
7100 /* FW needs one App to configure HW */
7101 pf->tmp_cfg.numapps = I40E_IEEE_DEFAULT_NUM_APPS;
7102 pf->tmp_cfg.app[0].selector = I40E_APP_SEL_ETHTYPE;
7103 pf->tmp_cfg.app[0].priority = I40E_IEEE_DEFAULT_APP_PRIO;
7104 pf->tmp_cfg.app[0].protocolid = I40E_APP_PROTOID_FCOE;
7106 return i40e_hw_set_dcb_config(pf, &pf->tmp_cfg);
7109 memset(&ets_data, 0, sizeof(ets_data));
7110 ets_data.tc_valid_bits = I40E_DEFAULT_TRAFFIC_CLASS; /* TC0 only */
7111 ets_data.tc_strict_priority_flags = 0; /* ETS */
7112 ets_data.tc_bw_share_credits[0] = I40E_IEEE_DEFAULT_ETS_TCBW; /* 100% to TC0 */
7114 /* Enable ETS on the Physical port */
7115 err = i40e_aq_config_switch_comp_ets
7116 (hw, pf->mac_seid, &ets_data,
7117 i40e_aqc_opc_enable_switching_comp_ets, NULL);
7119 dev_info(&pf->pdev->dev,
7120 "Enable Port ETS failed, err %s aq_err %s\n",
7121 i40e_stat_str(&pf->hw, err),
7122 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7127 /* Update the local cached instance with TC0 ETS */
7128 dcb_cfg->etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING;
7129 dcb_cfg->etscfg.cbs = 0;
7130 dcb_cfg->etscfg.maxtcs = I40E_MAX_TRAFFIC_CLASS;
7131 dcb_cfg->etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW;
7138 * i40e_init_pf_dcb - Initialize DCB configuration
7139 * @pf: PF being configured
7141 * Query the current DCB configuration and cache it
7142 * in the hardware structure
7144 static int i40e_init_pf_dcb(struct i40e_pf *pf)
7146 struct i40e_hw *hw = &pf->hw;
7149 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable
7150 * Also do not enable DCBx if FW LLDP agent is disabled
7152 if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT) {
7153 dev_info(&pf->pdev->dev, "DCB is not supported.\n");
7154 err = I40E_NOT_SUPPORTED;
7157 if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) {
7158 dev_info(&pf->pdev->dev, "FW LLDP is disabled, attempting SW DCB\n");
7159 err = i40e_dcb_sw_default_config(pf);
7161 dev_info(&pf->pdev->dev, "Could not initialize SW DCB\n");
7164 dev_info(&pf->pdev->dev, "SW DCB initialization succeeded.\n");
7165 pf->dcbx_cap = DCB_CAP_DCBX_HOST |
7166 DCB_CAP_DCBX_VER_IEEE;
7167 /* at init capable but disabled */
7168 pf->flags |= I40E_FLAG_DCB_CAPABLE;
7169 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
7172 err = i40e_init_dcb(hw, true);
7174 /* Device/Function is not DCBX capable */
7175 if ((!hw->func_caps.dcb) ||
7176 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
7177 dev_info(&pf->pdev->dev,
7178 "DCBX offload is not supported or is disabled for this PF.\n");
7180 /* When status is not DISABLED then DCBX in FW */
7181 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
7182 DCB_CAP_DCBX_VER_IEEE;
7184 pf->flags |= I40E_FLAG_DCB_CAPABLE;
7185 /* Enable DCB tagging only when more than one TC
7186 * or explicitly disable if only one TC
7188 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
7189 pf->flags |= I40E_FLAG_DCB_ENABLED;
7191 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
7192 dev_dbg(&pf->pdev->dev,
7193 "DCBX offload is supported for this PF.\n");
7195 } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
7196 dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
7197 pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
7199 dev_info(&pf->pdev->dev,
7200 "Query for DCB configuration failed, err %s aq_err %s\n",
7201 i40e_stat_str(&pf->hw, err),
7202 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7208 #endif /* CONFIG_I40E_DCB */
7211 * i40e_print_link_message - print link up or down
7212 * @vsi: the VSI for which link needs a message
7213 * @isup: true of link is up, false otherwise
7215 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
7217 enum i40e_aq_link_speed new_speed;
7218 struct i40e_pf *pf = vsi->back;
7219 char *speed = "Unknown";
7220 char *fc = "Unknown";
7226 new_speed = pf->hw.phy.link_info.link_speed;
7228 new_speed = I40E_LINK_SPEED_UNKNOWN;
7230 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
7232 vsi->current_isup = isup;
7233 vsi->current_speed = new_speed;
7235 netdev_info(vsi->netdev, "NIC Link is Down\n");
7239 /* Warn user if link speed on NPAR enabled partition is not at
7242 if (pf->hw.func_caps.npar_enable &&
7243 (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
7244 pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
7245 netdev_warn(vsi->netdev,
7246 "The partition detected link speed that is less than 10Gbps\n");
7248 switch (pf->hw.phy.link_info.link_speed) {
7249 case I40E_LINK_SPEED_40GB:
7252 case I40E_LINK_SPEED_20GB:
7255 case I40E_LINK_SPEED_25GB:
7258 case I40E_LINK_SPEED_10GB:
7261 case I40E_LINK_SPEED_5GB:
7264 case I40E_LINK_SPEED_2_5GB:
7267 case I40E_LINK_SPEED_1GB:
7270 case I40E_LINK_SPEED_100MB:
7277 switch (pf->hw.fc.current_mode) {
7281 case I40E_FC_TX_PAUSE:
7284 case I40E_FC_RX_PAUSE:
7292 if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
7297 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
7300 if (pf->hw.phy.link_info.fec_info &
7301 I40E_AQ_CONFIG_FEC_KR_ENA)
7302 fec = "CL74 FC-FEC/BASE-R";
7303 else if (pf->hw.phy.link_info.fec_info &
7304 I40E_AQ_CONFIG_FEC_RS_ENA)
7305 fec = "CL108 RS-FEC";
7307 /* 'CL108 RS-FEC' should be displayed when RS is requested, or
7308 * both RS and FC are requested
7310 if (vsi->back->hw.phy.link_info.req_fec_info &
7311 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
7312 if (vsi->back->hw.phy.link_info.req_fec_info &
7313 I40E_AQ_REQUEST_FEC_RS)
7314 req_fec = "CL108 RS-FEC";
7316 req_fec = "CL74 FC-FEC/BASE-R";
7318 netdev_info(vsi->netdev,
7319 "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
7320 speed, req_fec, fec, an, fc);
7321 } else if (pf->hw.device_id == I40E_DEV_ID_KX_X722) {
7326 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
7329 if (pf->hw.phy.link_info.fec_info &
7330 I40E_AQ_CONFIG_FEC_KR_ENA)
7331 fec = "CL74 FC-FEC/BASE-R";
7333 if (pf->hw.phy.link_info.req_fec_info &
7334 I40E_AQ_REQUEST_FEC_KR)
7335 req_fec = "CL74 FC-FEC/BASE-R";
7337 netdev_info(vsi->netdev,
7338 "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
7339 speed, req_fec, fec, an, fc);
7341 netdev_info(vsi->netdev,
7342 "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n",
7349 * i40e_up_complete - Finish the last steps of bringing up a connection
7350 * @vsi: the VSI being configured
7352 static int i40e_up_complete(struct i40e_vsi *vsi)
7354 struct i40e_pf *pf = vsi->back;
7357 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7358 i40e_vsi_configure_msix(vsi);
7360 i40e_configure_msi_and_legacy(vsi);
7363 err = i40e_vsi_start_rings(vsi);
7367 clear_bit(__I40E_VSI_DOWN, vsi->state);
7368 i40e_napi_enable_all(vsi);
7369 i40e_vsi_enable_irq(vsi);
7371 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
7373 i40e_print_link_message(vsi, true);
7374 netif_tx_start_all_queues(vsi->netdev);
7375 netif_carrier_on(vsi->netdev);
7378 /* replay FDIR SB filters */
7379 if (vsi->type == I40E_VSI_FDIR) {
7380 /* reset fd counters */
7383 i40e_fdir_filter_restore(vsi);
7386 /* On the next run of the service_task, notify any clients of the new
7389 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
7390 i40e_service_event_schedule(pf);
7396 * i40e_vsi_reinit_locked - Reset the VSI
7397 * @vsi: the VSI being configured
7399 * Rebuild the ring structs after some configuration
7400 * has changed, e.g. MTU size.
7402 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
7404 struct i40e_pf *pf = vsi->back;
7406 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
7407 usleep_range(1000, 2000);
7411 clear_bit(__I40E_CONFIG_BUSY, pf->state);
7415 * i40e_force_link_state - Force the link status
7416 * @pf: board private structure
7417 * @is_up: whether the link state should be forced up or down
7419 static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
7421 struct i40e_aq_get_phy_abilities_resp abilities;
7422 struct i40e_aq_set_phy_config config = {0};
7423 bool non_zero_phy_type = is_up;
7424 struct i40e_hw *hw = &pf->hw;
7429 /* Card might've been put in an unstable state by other drivers
7430 * and applications, which causes incorrect speed values being
7431 * set on startup. In order to clear speed registers, we call
7432 * get_phy_capabilities twice, once to get initial state of
7433 * available speeds, and once to get current PHY config.
7435 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities,
7438 dev_err(&pf->pdev->dev,
7439 "failed to get phy cap., ret = %s last_status = %s\n",
7440 i40e_stat_str(hw, err),
7441 i40e_aq_str(hw, hw->aq.asq_last_status));
7444 speed = abilities.link_speed;
7446 /* Get the current phy config */
7447 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
7450 dev_err(&pf->pdev->dev,
7451 "failed to get phy cap., ret = %s last_status = %s\n",
7452 i40e_stat_str(hw, err),
7453 i40e_aq_str(hw, hw->aq.asq_last_status));
7457 /* If link needs to go up, but was not forced to go down,
7458 * and its speed values are OK, no need for a flap
7459 * if non_zero_phy_type was set, still need to force up
7461 if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)
7462 non_zero_phy_type = true;
7463 else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
7464 return I40E_SUCCESS;
7466 /* To force link we need to set bits for all supported PHY types,
7467 * but there are now more than 32, so we need to split the bitmap
7468 * across two fields.
7470 mask = I40E_PHY_TYPES_BITMASK;
7472 non_zero_phy_type ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
7473 config.phy_type_ext =
7474 non_zero_phy_type ? (u8)((mask >> 32) & 0xff) : 0;
7475 /* Copy the old settings, except of phy_type */
7476 config.abilities = abilities.abilities;
7477 if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) {
7479 config.abilities |= I40E_AQ_PHY_ENABLE_LINK;
7481 config.abilities &= ~(I40E_AQ_PHY_ENABLE_LINK);
7483 if (abilities.link_speed != 0)
7484 config.link_speed = abilities.link_speed;
7486 config.link_speed = speed;
7487 config.eee_capability = abilities.eee_capability;
7488 config.eeer = abilities.eeer_val;
7489 config.low_power_ctrl = abilities.d3_lpan;
7490 config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
7491 I40E_AQ_PHY_FEC_CONFIG_MASK;
7492 err = i40e_aq_set_phy_config(hw, &config, NULL);
7495 dev_err(&pf->pdev->dev,
7496 "set phy config ret = %s last_status = %s\n",
7497 i40e_stat_str(&pf->hw, err),
7498 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7502 /* Update the link info */
7503 err = i40e_update_link_info(hw);
7505 /* Wait a little bit (on 40G cards it sometimes takes a really
7506 * long time for link to come back from the atomic reset)
7510 i40e_update_link_info(hw);
7513 i40e_aq_set_link_restart_an(hw, is_up, NULL);
7515 return I40E_SUCCESS;
7519 * i40e_up - Bring the connection back up after being down
7520 * @vsi: the VSI being configured
7522 int i40e_up(struct i40e_vsi *vsi)
7526 if (vsi->type == I40E_VSI_MAIN &&
7527 (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
7528 vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
7529 i40e_force_link_state(vsi->back, true);
7531 err = i40e_vsi_configure(vsi);
7533 err = i40e_up_complete(vsi);
7539 * i40e_down - Shutdown the connection processing
7540 * @vsi: the VSI being stopped
7542 void i40e_down(struct i40e_vsi *vsi)
7546 /* It is assumed that the caller of this function
7547 * sets the vsi->state __I40E_VSI_DOWN bit.
7550 netif_carrier_off(vsi->netdev);
7551 netif_tx_disable(vsi->netdev);
7553 i40e_vsi_disable_irq(vsi);
7554 i40e_vsi_stop_rings(vsi);
7555 if (vsi->type == I40E_VSI_MAIN &&
7556 (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
7557 vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
7558 i40e_force_link_state(vsi->back, false);
7559 i40e_napi_disable_all(vsi);
7561 for (i = 0; i < vsi->num_queue_pairs; i++) {
7562 i40e_clean_tx_ring(vsi->tx_rings[i]);
7563 if (i40e_enabled_xdp_vsi(vsi)) {
7564 /* Make sure that in-progress ndo_xdp_xmit and
7565 * ndo_xsk_wakeup calls are completed.
7568 i40e_clean_tx_ring(vsi->xdp_rings[i]);
7570 i40e_clean_rx_ring(vsi->rx_rings[i]);
7576 * i40e_validate_mqprio_qopt- validate queue mapping info
7577 * @vsi: the VSI being configured
7578 * @mqprio_qopt: queue parametrs
7580 static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
7581 struct tc_mqprio_qopt_offload *mqprio_qopt)
7583 u64 sum_max_rate = 0;
7587 if (mqprio_qopt->qopt.offset[0] != 0 ||
7588 mqprio_qopt->qopt.num_tc < 1 ||
7589 mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS)
7591 for (i = 0; ; i++) {
7592 if (!mqprio_qopt->qopt.count[i])
7594 if (mqprio_qopt->min_rate[i]) {
7595 dev_err(&vsi->back->pdev->dev,
7596 "Invalid min tx rate (greater than 0) specified\n");
7599 max_rate = mqprio_qopt->max_rate[i];
7600 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
7601 sum_max_rate += max_rate;
7603 if (i >= mqprio_qopt->qopt.num_tc - 1)
7605 if (mqprio_qopt->qopt.offset[i + 1] !=
7606 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
7609 if (vsi->num_queue_pairs <
7610 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
7611 dev_err(&vsi->back->pdev->dev,
7612 "Failed to create traffic channel, insufficient number of queues.\n");
7615 if (sum_max_rate > i40e_get_link_speed(vsi)) {
7616 dev_err(&vsi->back->pdev->dev,
7617 "Invalid max tx rate specified\n");
7624 * i40e_vsi_set_default_tc_config - set default values for tc configuration
7625 * @vsi: the VSI being configured
7627 static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
7632 /* Only TC0 is enabled */
7633 vsi->tc_config.numtc = 1;
7634 vsi->tc_config.enabled_tc = 1;
7635 qcount = min_t(int, vsi->alloc_queue_pairs,
7636 i40e_pf_get_max_q_per_tc(vsi->back));
7637 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
7638 /* For the TC that is not enabled set the offset to default
7639 * queue and allocate one queue for the given TC.
7641 vsi->tc_config.tc_info[i].qoffset = 0;
7643 vsi->tc_config.tc_info[i].qcount = qcount;
7645 vsi->tc_config.tc_info[i].qcount = 1;
7646 vsi->tc_config.tc_info[i].netdev_tc = 0;
7651 * i40e_del_macvlan_filter
7652 * @hw: pointer to the HW structure
7653 * @seid: seid of the channel VSI
7654 * @macaddr: the mac address to apply as a filter
7655 * @aq_err: store the admin Q error
7657 * This function deletes a mac filter on the channel VSI which serves as the
7658 * macvlan. Returns 0 on success.
7660 static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
7661 const u8 *macaddr, int *aq_err)
7663 struct i40e_aqc_remove_macvlan_element_data element;
7666 memset(&element, 0, sizeof(element));
7667 ether_addr_copy(element.mac_addr, macaddr);
7668 element.vlan_tag = 0;
7669 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
7670 status = i40e_aq_remove_macvlan(hw, seid, &element, 1, NULL);
7671 *aq_err = hw->aq.asq_last_status;
7677 * i40e_add_macvlan_filter
7678 * @hw: pointer to the HW structure
7679 * @seid: seid of the channel VSI
7680 * @macaddr: the mac address to apply as a filter
7681 * @aq_err: store the admin Q error
7683 * This function adds a mac filter on the channel VSI which serves as the
7684 * macvlan. Returns 0 on success.
7686 static i40e_status i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid,
7687 const u8 *macaddr, int *aq_err)
7689 struct i40e_aqc_add_macvlan_element_data element;
7693 ether_addr_copy(element.mac_addr, macaddr);
7694 element.vlan_tag = 0;
7695 element.queue_number = 0;
7696 element.match_method = I40E_AQC_MM_ERR_NO_RES;
7697 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
7698 element.flags = cpu_to_le16(cmd_flags);
7699 status = i40e_aq_add_macvlan(hw, seid, &element, 1, NULL);
7700 *aq_err = hw->aq.asq_last_status;
7706 * i40e_reset_ch_rings - Reset the queue contexts in a channel
7707 * @vsi: the VSI we want to access
7708 * @ch: the channel we want to access
7710 static void i40e_reset_ch_rings(struct i40e_vsi *vsi, struct i40e_channel *ch)
7712 struct i40e_ring *tx_ring, *rx_ring;
7716 for (i = 0; i < ch->num_queue_pairs; i++) {
7717 pf_q = ch->base_queue + i;
7718 tx_ring = vsi->tx_rings[pf_q];
7720 rx_ring = vsi->rx_rings[pf_q];
7726 * i40e_free_macvlan_channels
7727 * @vsi: the VSI we want to access
7729 * This function frees the Qs of the channel VSI from
7730 * the stack and also deletes the channel VSIs which
7731 * serve as macvlans.
7733 static void i40e_free_macvlan_channels(struct i40e_vsi *vsi)
7735 struct i40e_channel *ch, *ch_tmp;
7738 if (list_empty(&vsi->macvlan_list))
7741 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7742 struct i40e_vsi *parent_vsi;
7744 if (i40e_is_channel_macvlan(ch)) {
7745 i40e_reset_ch_rings(vsi, ch);
7746 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7747 netdev_unbind_sb_channel(vsi->netdev, ch->fwd->netdev);
7748 netdev_set_sb_channel(ch->fwd->netdev, 0);
7753 list_del(&ch->list);
7754 parent_vsi = ch->parent_vsi;
7755 if (!parent_vsi || !ch->initialized) {
7760 /* remove the VSI */
7761 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
7764 dev_err(&vsi->back->pdev->dev,
7765 "unable to remove channel (%d) for parent VSI(%d)\n",
7766 ch->seid, parent_vsi->seid);
7769 vsi->macvlan_cnt = 0;
7773 * i40e_fwd_ring_up - bring the macvlan device up
7774 * @vsi: the VSI we want to access
7775 * @vdev: macvlan netdevice
7776 * @fwd: the private fwd structure
7778 static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
7779 struct i40e_fwd_adapter *fwd)
7781 struct i40e_channel *ch = NULL, *ch_tmp, *iter;
7782 int ret = 0, num_tc = 1, i, aq_err;
7783 struct i40e_pf *pf = vsi->back;
7784 struct i40e_hw *hw = &pf->hw;
7786 /* Go through the list and find an available channel */
7787 list_for_each_entry_safe(iter, ch_tmp, &vsi->macvlan_list, list) {
7788 if (!i40e_is_channel_macvlan(iter)) {
7790 /* record configuration for macvlan interface in vdev */
7791 for (i = 0; i < num_tc; i++)
7792 netdev_bind_sb_channel_queue(vsi->netdev, vdev,
7794 iter->num_queue_pairs,
7796 for (i = 0; i < iter->num_queue_pairs; i++) {
7797 struct i40e_ring *tx_ring, *rx_ring;
7800 pf_q = iter->base_queue + i;
7802 /* Get to TX ring ptr */
7803 tx_ring = vsi->tx_rings[pf_q];
7806 /* Get the RX ring ptr */
7807 rx_ring = vsi->rx_rings[pf_q];
7818 /* Guarantee all rings are updated before we update the
7819 * MAC address filter.
7823 /* Add a mac filter */
7824 ret = i40e_add_macvlan_filter(hw, ch->seid, vdev->dev_addr, &aq_err);
7826 /* if we cannot add the MAC rule then disable the offload */
7827 macvlan_release_l2fw_offload(vdev);
7828 for (i = 0; i < ch->num_queue_pairs; i++) {
7829 struct i40e_ring *rx_ring;
7832 pf_q = ch->base_queue + i;
7833 rx_ring = vsi->rx_rings[pf_q];
7834 rx_ring->netdev = NULL;
7836 dev_info(&pf->pdev->dev,
7837 "Error adding mac filter on macvlan err %s, aq_err %s\n",
7838 i40e_stat_str(hw, ret),
7839 i40e_aq_str(hw, aq_err));
7840 netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n");
7847 * i40e_setup_macvlans - create the channels which will be macvlans
7848 * @vsi: the VSI we want to access
7849 * @macvlan_cnt: no. of macvlans to be setup
7850 * @qcnt: no. of Qs per macvlan
7851 * @vdev: macvlan netdevice
7853 static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
7854 struct net_device *vdev)
7856 struct i40e_pf *pf = vsi->back;
7857 struct i40e_hw *hw = &pf->hw;
7858 struct i40e_vsi_context ctxt;
7859 u16 sections, qmap, num_qps;
7860 struct i40e_channel *ch;
7861 int i, pow, ret = 0;
7864 if (vsi->type != I40E_VSI_MAIN || !macvlan_cnt)
7867 num_qps = vsi->num_queue_pairs - (macvlan_cnt * qcnt);
7869 /* find the next higher power-of-2 of num queue pairs */
7870 pow = fls(roundup_pow_of_two(num_qps) - 1);
7872 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
7873 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
7875 /* Setup context bits for the main VSI */
7876 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
7877 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
7878 memset(&ctxt, 0, sizeof(ctxt));
7879 ctxt.seid = vsi->seid;
7880 ctxt.pf_num = vsi->back->hw.pf_id;
7882 ctxt.uplink_seid = vsi->uplink_seid;
7883 ctxt.info = vsi->info;
7884 ctxt.info.tc_mapping[0] = cpu_to_le16(qmap);
7885 ctxt.info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
7886 ctxt.info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
7887 ctxt.info.valid_sections |= cpu_to_le16(sections);
7889 /* Reconfigure RSS for main VSI with new max queue count */
7890 vsi->rss_size = max_t(u16, num_qps, qcnt);
7891 ret = i40e_vsi_config_rss(vsi);
7893 dev_info(&pf->pdev->dev,
7894 "Failed to reconfig RSS for num_queues (%u)\n",
7898 vsi->reconfig_rss = true;
7899 dev_dbg(&vsi->back->pdev->dev,
7900 "Reconfigured RSS with num_queues (%u)\n", vsi->rss_size);
7901 vsi->next_base_queue = num_qps;
7902 vsi->cnt_q_avail = vsi->num_queue_pairs - num_qps;
7904 /* Update the VSI after updating the VSI queue-mapping
7907 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
7909 dev_info(&pf->pdev->dev,
7910 "Update vsi tc config failed, err %s aq_err %s\n",
7911 i40e_stat_str(hw, ret),
7912 i40e_aq_str(hw, hw->aq.asq_last_status));
7915 /* update the local VSI info with updated queue map */
7916 i40e_vsi_update_queue_map(vsi, &ctxt);
7917 vsi->info.valid_sections = 0;
7919 /* Create channels for macvlans */
7920 INIT_LIST_HEAD(&vsi->macvlan_list);
7921 for (i = 0; i < macvlan_cnt; i++) {
7922 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
7927 INIT_LIST_HEAD(&ch->list);
7928 ch->num_queue_pairs = qcnt;
7929 if (!i40e_setup_channel(pf, vsi, ch)) {
7934 ch->parent_vsi = vsi;
7935 vsi->cnt_q_avail -= ch->num_queue_pairs;
7937 list_add_tail(&ch->list, &vsi->macvlan_list);
7943 dev_info(&pf->pdev->dev, "Failed to setup macvlans\n");
7944 i40e_free_macvlan_channels(vsi);
7950 * i40e_fwd_add - configure macvlans
7951 * @netdev: net device to configure
7952 * @vdev: macvlan netdevice
7954 static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev)
7956 struct i40e_netdev_priv *np = netdev_priv(netdev);
7957 u16 q_per_macvlan = 0, macvlan_cnt = 0, vectors;
7958 struct i40e_vsi *vsi = np->vsi;
7959 struct i40e_pf *pf = vsi->back;
7960 struct i40e_fwd_adapter *fwd;
7961 int avail_macvlan, ret;
7963 if ((pf->flags & I40E_FLAG_DCB_ENABLED)) {
7964 netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n");
7965 return ERR_PTR(-EINVAL);
7967 if (i40e_is_tc_mqprio_enabled(pf)) {
7968 netdev_info(netdev, "Macvlans are not supported when HW TC offload is on\n");
7969 return ERR_PTR(-EINVAL);
7971 if (pf->num_lan_msix < I40E_MIN_MACVLAN_VECTORS) {
7972 netdev_info(netdev, "Not enough vectors available to support macvlans\n");
7973 return ERR_PTR(-EINVAL);
7976 /* The macvlan device has to be a single Q device so that the
7977 * tc_to_txq field can be reused to pick the tx queue.
7979 if (netif_is_multiqueue(vdev))
7980 return ERR_PTR(-ERANGE);
7982 if (!vsi->macvlan_cnt) {
7983 /* reserve bit 0 for the pf device */
7984 set_bit(0, vsi->fwd_bitmask);
7986 /* Try to reserve as many queues as possible for macvlans. First
7987 * reserve 3/4th of max vectors, then half, then quarter and
7988 * calculate Qs per macvlan as you go
7990 vectors = pf->num_lan_msix;
7991 if (vectors <= I40E_MAX_MACVLANS && vectors > 64) {
7992 /* allocate 4 Qs per macvlan and 32 Qs to the PF*/
7994 macvlan_cnt = (vectors - 32) / 4;
7995 } else if (vectors <= 64 && vectors > 32) {
7996 /* allocate 2 Qs per macvlan and 16 Qs to the PF*/
7998 macvlan_cnt = (vectors - 16) / 2;
7999 } else if (vectors <= 32 && vectors > 16) {
8000 /* allocate 1 Q per macvlan and 16 Qs to the PF*/
8002 macvlan_cnt = vectors - 16;
8003 } else if (vectors <= 16 && vectors > 8) {
8004 /* allocate 1 Q per macvlan and 8 Qs to the PF */
8006 macvlan_cnt = vectors - 8;
8008 /* allocate 1 Q per macvlan and 1 Q to the PF */
8010 macvlan_cnt = vectors - 1;
8013 if (macvlan_cnt == 0)
8014 return ERR_PTR(-EBUSY);
8016 /* Quiesce VSI queues */
8017 i40e_quiesce_vsi(vsi);
8019 /* sets up the macvlans but does not "enable" them */
8020 ret = i40e_setup_macvlans(vsi, macvlan_cnt, q_per_macvlan,
8023 return ERR_PTR(ret);
8026 i40e_unquiesce_vsi(vsi);
8028 avail_macvlan = find_first_zero_bit(vsi->fwd_bitmask,
8030 if (avail_macvlan >= I40E_MAX_MACVLANS)
8031 return ERR_PTR(-EBUSY);
8033 /* create the fwd struct */
8034 fwd = kzalloc(sizeof(*fwd), GFP_KERNEL);
8036 return ERR_PTR(-ENOMEM);
8038 set_bit(avail_macvlan, vsi->fwd_bitmask);
8039 fwd->bit_no = avail_macvlan;
8040 netdev_set_sb_channel(vdev, avail_macvlan);
8043 if (!netif_running(netdev))
8046 /* Set fwd ring up */
8047 ret = i40e_fwd_ring_up(vsi, vdev, fwd);
8049 /* unbind the queues and drop the subordinate channel config */
8050 netdev_unbind_sb_channel(netdev, vdev);
8051 netdev_set_sb_channel(vdev, 0);
8054 return ERR_PTR(-EINVAL);
8061 * i40e_del_all_macvlans - Delete all the mac filters on the channels
8062 * @vsi: the VSI we want to access
8064 static void i40e_del_all_macvlans(struct i40e_vsi *vsi)
8066 struct i40e_channel *ch, *ch_tmp;
8067 struct i40e_pf *pf = vsi->back;
8068 struct i40e_hw *hw = &pf->hw;
8069 int aq_err, ret = 0;
8071 if (list_empty(&vsi->macvlan_list))
8074 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
8075 if (i40e_is_channel_macvlan(ch)) {
8076 ret = i40e_del_macvlan_filter(hw, ch->seid,
8077 i40e_channel_mac(ch),
8080 /* Reset queue contexts */
8081 i40e_reset_ch_rings(vsi, ch);
8082 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
8083 netdev_unbind_sb_channel(vsi->netdev,
8085 netdev_set_sb_channel(ch->fwd->netdev, 0);
8094 * i40e_fwd_del - delete macvlan interfaces
8095 * @netdev: net device to configure
8096 * @vdev: macvlan netdevice
8098 static void i40e_fwd_del(struct net_device *netdev, void *vdev)
8100 struct i40e_netdev_priv *np = netdev_priv(netdev);
8101 struct i40e_fwd_adapter *fwd = vdev;
8102 struct i40e_channel *ch, *ch_tmp;
8103 struct i40e_vsi *vsi = np->vsi;
8104 struct i40e_pf *pf = vsi->back;
8105 struct i40e_hw *hw = &pf->hw;
8106 int aq_err, ret = 0;
8108 /* Find the channel associated with the macvlan and del mac filter */
8109 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
8110 if (i40e_is_channel_macvlan(ch) &&
8111 ether_addr_equal(i40e_channel_mac(ch),
8112 fwd->netdev->dev_addr)) {
8113 ret = i40e_del_macvlan_filter(hw, ch->seid,
8114 i40e_channel_mac(ch),
8117 /* Reset queue contexts */
8118 i40e_reset_ch_rings(vsi, ch);
8119 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
8120 netdev_unbind_sb_channel(netdev, fwd->netdev);
8121 netdev_set_sb_channel(fwd->netdev, 0);
8125 dev_info(&pf->pdev->dev,
8126 "Error deleting mac filter on macvlan err %s, aq_err %s\n",
8127 i40e_stat_str(hw, ret),
8128 i40e_aq_str(hw, aq_err));
8136 * i40e_setup_tc - configure multiple traffic classes
8137 * @netdev: net device to configure
8138 * @type_data: tc offload data
8140 static int i40e_setup_tc(struct net_device *netdev, void *type_data)
8142 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
8143 struct i40e_netdev_priv *np = netdev_priv(netdev);
8144 struct i40e_vsi *vsi = np->vsi;
8145 struct i40e_pf *pf = vsi->back;
8146 u8 enabled_tc = 0, num_tc, hw;
8147 bool need_reset = false;
8148 int old_queue_pairs;
8153 old_queue_pairs = vsi->num_queue_pairs;
8154 num_tc = mqprio_qopt->qopt.num_tc;
8155 hw = mqprio_qopt->qopt.hw;
8156 mode = mqprio_qopt->mode;
8158 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
8159 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8163 /* Check if MFP enabled */
8164 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
8166 "Configuring TC not supported in MFP mode\n");
8170 case TC_MQPRIO_MODE_DCB:
8171 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
8173 /* Check if DCB enabled to continue */
8174 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
8176 "DCB is not enabled for adapter\n");
8180 /* Check whether tc count is within enabled limit */
8181 if (num_tc > i40e_pf_get_num_tc(pf)) {
8183 "TC count greater than enabled on link for adapter\n");
8187 case TC_MQPRIO_MODE_CHANNEL:
8188 if (pf->flags & I40E_FLAG_DCB_ENABLED) {
8190 "Full offload of TC Mqprio options is not supported when DCB is enabled\n");
8193 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
8195 ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt);
8198 memcpy(&vsi->mqprio_qopt, mqprio_qopt,
8199 sizeof(*mqprio_qopt));
8200 pf->flags |= I40E_FLAG_TC_MQPRIO;
8201 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
8208 /* Generate TC map for number of tc requested */
8209 for (i = 0; i < num_tc; i++)
8210 enabled_tc |= BIT(i);
8212 /* Requesting same TC configuration as already enabled */
8213 if (enabled_tc == vsi->tc_config.enabled_tc &&
8214 mode != TC_MQPRIO_MODE_CHANNEL)
8217 /* Quiesce VSI queues */
8218 i40e_quiesce_vsi(vsi);
8220 if (!hw && !i40e_is_tc_mqprio_enabled(pf))
8221 i40e_remove_queue_channels(vsi);
8223 /* Configure VSI for enabled TCs */
8224 ret = i40e_vsi_config_tc(vsi, enabled_tc);
8226 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
8230 } else if (enabled_tc &&
8231 (!is_power_of_2(vsi->tc_config.tc_info[0].qcount))) {
8233 "Failed to create channel. Override queues (%u) not power of 2\n",
8234 vsi->tc_config.tc_info[0].qcount);
8240 dev_info(&vsi->back->pdev->dev,
8241 "Setup channel (id:%u) utilizing num_queues %d\n",
8242 vsi->seid, vsi->tc_config.tc_info[0].qcount);
8244 if (i40e_is_tc_mqprio_enabled(pf)) {
8245 if (vsi->mqprio_qopt.max_rate[0]) {
8246 u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
8247 vsi->mqprio_qopt.max_rate[0]);
8249 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
8251 u64 credits = max_tx_rate;
8253 do_div(credits, I40E_BW_CREDIT_DIVISOR);
8254 dev_dbg(&vsi->back->pdev->dev,
8255 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
8264 ret = i40e_configure_queue_channels(vsi);
8266 vsi->num_queue_pairs = old_queue_pairs;
8268 "Failed configuring queue channels\n");
8275 /* Reset the configuration data to defaults, only TC0 is enabled */
8277 i40e_vsi_set_default_tc_config(vsi);
8282 i40e_unquiesce_vsi(vsi);
8287 * i40e_set_cld_element - sets cloud filter element data
8288 * @filter: cloud filter rule
8289 * @cld: ptr to cloud filter element data
8291 * This is helper function to copy data into cloud filter element
8294 i40e_set_cld_element(struct i40e_cloud_filter *filter,
8295 struct i40e_aqc_cloud_filters_element_data *cld)
8300 memset(cld, 0, sizeof(*cld));
8301 ether_addr_copy(cld->outer_mac, filter->dst_mac);
8302 ether_addr_copy(cld->inner_mac, filter->src_mac);
8304 if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6)
8307 if (filter->n_proto == ETH_P_IPV6) {
8308 #define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
8309 for (i = 0; i < ARRAY_SIZE(filter->dst_ipv6); i++) {
8310 ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
8312 *(__le32 *)&cld->ipaddr.raw_v6.data[i * 2] = cpu_to_le32(ipa);
8315 ipa = be32_to_cpu(filter->dst_ipv4);
8317 memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
8320 cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id));
8322 /* tenant_id is not supported by FW now, once the support is enabled
8323 * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id)
8325 if (filter->tenant_id)
8330 * i40e_add_del_cloud_filter - Add/del cloud filter
8331 * @vsi: pointer to VSI
8332 * @filter: cloud filter rule
8333 * @add: if true, add, if false, delete
8335 * Add or delete a cloud filter for a specific flow spec.
8336 * Returns 0 if the filter were successfully added.
8338 int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
8339 struct i40e_cloud_filter *filter, bool add)
8341 struct i40e_aqc_cloud_filters_element_data cld_filter;
8342 struct i40e_pf *pf = vsi->back;
8344 static const u16 flag_table[128] = {
8345 [I40E_CLOUD_FILTER_FLAGS_OMAC] =
8346 I40E_AQC_ADD_CLOUD_FILTER_OMAC,
8347 [I40E_CLOUD_FILTER_FLAGS_IMAC] =
8348 I40E_AQC_ADD_CLOUD_FILTER_IMAC,
8349 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] =
8350 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
8351 [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
8352 I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
8353 [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
8354 I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
8355 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
8356 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
8357 [I40E_CLOUD_FILTER_FLAGS_IIP] =
8358 I40E_AQC_ADD_CLOUD_FILTER_IIP,
8361 if (filter->flags >= ARRAY_SIZE(flag_table))
8362 return I40E_ERR_CONFIG;
8364 memset(&cld_filter, 0, sizeof(cld_filter));
8366 /* copy element needed to add cloud filter from filter */
8367 i40e_set_cld_element(filter, &cld_filter);
8369 if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE)
8370 cld_filter.flags = cpu_to_le16(filter->tunnel_type <<
8371 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
8373 if (filter->n_proto == ETH_P_IPV6)
8374 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
8375 I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
8377 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
8378 I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
8381 ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid,
8384 ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid,
8387 dev_dbg(&pf->pdev->dev,
8388 "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
8389 add ? "add" : "delete", filter->dst_port, ret,
8390 pf->hw.aq.asq_last_status);
8392 dev_info(&pf->pdev->dev,
8393 "%s cloud filter for VSI: %d\n",
8394 add ? "Added" : "Deleted", filter->seid);
8399 * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf
8400 * @vsi: pointer to VSI
8401 * @filter: cloud filter rule
8402 * @add: if true, add, if false, delete
8404 * Add or delete a cloud filter for a specific flow spec using big buffer.
8405 * Returns 0 if the filter were successfully added.
8407 int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
8408 struct i40e_cloud_filter *filter,
8411 struct i40e_aqc_cloud_filters_element_bb cld_filter;
8412 struct i40e_pf *pf = vsi->back;
8415 /* Both (src/dst) valid mac_addr are not supported */
8416 if ((is_valid_ether_addr(filter->dst_mac) &&
8417 is_valid_ether_addr(filter->src_mac)) ||
8418 (is_multicast_ether_addr(filter->dst_mac) &&
8419 is_multicast_ether_addr(filter->src_mac)))
8422 /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
8423 * ports are not supported via big buffer now.
8425 if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
8428 /* adding filter using src_port/src_ip is not supported at this stage */
8429 if (filter->src_port ||
8430 (filter->src_ipv4 && filter->n_proto != ETH_P_IPV6) ||
8431 !ipv6_addr_any(&filter->ip.v6.src_ip6))
8434 memset(&cld_filter, 0, sizeof(cld_filter));
8436 /* copy element needed to add cloud filter from filter */
8437 i40e_set_cld_element(filter, &cld_filter.element);
8439 if (is_valid_ether_addr(filter->dst_mac) ||
8440 is_valid_ether_addr(filter->src_mac) ||
8441 is_multicast_ether_addr(filter->dst_mac) ||
8442 is_multicast_ether_addr(filter->src_mac)) {
8443 /* MAC + IP : unsupported mode */
8444 if (filter->dst_ipv4)
8447 /* since we validated that L4 port must be valid before
8448 * we get here, start with respective "flags" value
8449 * and update if vlan is present or not
8451 cld_filter.element.flags =
8452 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT);
8454 if (filter->vlan_id) {
8455 cld_filter.element.flags =
8456 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
8459 } else if ((filter->dst_ipv4 && filter->n_proto != ETH_P_IPV6) ||
8460 !ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
8461 cld_filter.element.flags =
8462 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
8463 if (filter->n_proto == ETH_P_IPV6)
8464 cld_filter.element.flags |=
8465 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
8467 cld_filter.element.flags |=
8468 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
8470 dev_err(&pf->pdev->dev,
8471 "either mac or ip has to be valid for cloud filter\n");
8475 /* Now copy L4 port in Byte 6..7 in general fields */
8476 cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] =
8477 be16_to_cpu(filter->dst_port);
8480 /* Validate current device switch mode, change if necessary */
8481 ret = i40e_validate_and_set_switch_mode(vsi);
8483 dev_err(&pf->pdev->dev,
8484 "failed to set switch mode, ret %d\n",
8489 ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid,
8492 ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid,
8497 dev_dbg(&pf->pdev->dev,
8498 "Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
8499 add ? "add" : "delete", ret, pf->hw.aq.asq_last_status);
8501 dev_info(&pf->pdev->dev,
8502 "%s cloud filter for VSI: %d, L4 port: %d\n",
8503 add ? "add" : "delete", filter->seid,
8504 ntohs(filter->dst_port));
8509 * i40e_parse_cls_flower - Parse tc flower filters provided by kernel
8510 * @vsi: Pointer to VSI
8511 * @f: Pointer to struct flow_cls_offload
8512 * @filter: Pointer to cloud filter structure
8515 static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
8516 struct flow_cls_offload *f,
8517 struct i40e_cloud_filter *filter)
8519 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
8520 struct flow_dissector *dissector = rule->match.dissector;
8521 u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
8522 struct i40e_pf *pf = vsi->back;
8525 if (dissector->used_keys &
8526 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
8527 BIT(FLOW_DISSECTOR_KEY_BASIC) |
8528 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
8529 BIT(FLOW_DISSECTOR_KEY_VLAN) |
8530 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
8531 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
8532 BIT(FLOW_DISSECTOR_KEY_PORTS) |
8533 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
8534 dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n",
8535 dissector->used_keys);
8539 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
8540 struct flow_match_enc_keyid match;
8542 flow_rule_match_enc_keyid(rule, &match);
8543 if (match.mask->keyid != 0)
8544 field_flags |= I40E_CLOUD_FIELD_TEN_ID;
8546 filter->tenant_id = be32_to_cpu(match.key->keyid);
8549 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
8550 struct flow_match_basic match;
8552 flow_rule_match_basic(rule, &match);
8553 n_proto_key = ntohs(match.key->n_proto);
8554 n_proto_mask = ntohs(match.mask->n_proto);
8556 if (n_proto_key == ETH_P_ALL) {
8560 filter->n_proto = n_proto_key & n_proto_mask;
8561 filter->ip_proto = match.key->ip_proto;
8564 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
8565 struct flow_match_eth_addrs match;
8567 flow_rule_match_eth_addrs(rule, &match);
8569 /* use is_broadcast and is_zero to check for all 0xf or 0 */
8570 if (!is_zero_ether_addr(match.mask->dst)) {
8571 if (is_broadcast_ether_addr(match.mask->dst)) {
8572 field_flags |= I40E_CLOUD_FIELD_OMAC;
8574 dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
8576 return I40E_ERR_CONFIG;
8580 if (!is_zero_ether_addr(match.mask->src)) {
8581 if (is_broadcast_ether_addr(match.mask->src)) {
8582 field_flags |= I40E_CLOUD_FIELD_IMAC;
8584 dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
8586 return I40E_ERR_CONFIG;
8589 ether_addr_copy(filter->dst_mac, match.key->dst);
8590 ether_addr_copy(filter->src_mac, match.key->src);
8593 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
8594 struct flow_match_vlan match;
8596 flow_rule_match_vlan(rule, &match);
8597 if (match.mask->vlan_id) {
8598 if (match.mask->vlan_id == VLAN_VID_MASK) {
8599 field_flags |= I40E_CLOUD_FIELD_IVLAN;
8602 dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
8603 match.mask->vlan_id);
8604 return I40E_ERR_CONFIG;
8608 filter->vlan_id = cpu_to_be16(match.key->vlan_id);
8611 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
8612 struct flow_match_control match;
8614 flow_rule_match_control(rule, &match);
8615 addr_type = match.key->addr_type;
8618 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
8619 struct flow_match_ipv4_addrs match;
8621 flow_rule_match_ipv4_addrs(rule, &match);
8622 if (match.mask->dst) {
8623 if (match.mask->dst == cpu_to_be32(0xffffffff)) {
8624 field_flags |= I40E_CLOUD_FIELD_IIP;
8626 dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
8628 return I40E_ERR_CONFIG;
8632 if (match.mask->src) {
8633 if (match.mask->src == cpu_to_be32(0xffffffff)) {
8634 field_flags |= I40E_CLOUD_FIELD_IIP;
8636 dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
8638 return I40E_ERR_CONFIG;
8642 if (field_flags & I40E_CLOUD_FIELD_TEN_ID) {
8643 dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
8644 return I40E_ERR_CONFIG;
8646 filter->dst_ipv4 = match.key->dst;
8647 filter->src_ipv4 = match.key->src;
8650 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
8651 struct flow_match_ipv6_addrs match;
8653 flow_rule_match_ipv6_addrs(rule, &match);
8655 /* src and dest IPV6 address should not be LOOPBACK
8656 * (0:0:0:0:0:0:0:1), which can be represented as ::1
8658 if (ipv6_addr_loopback(&match.key->dst) ||
8659 ipv6_addr_loopback(&match.key->src)) {
8660 dev_err(&pf->pdev->dev,
8661 "Bad ipv6, addr is LOOPBACK\n");
8662 return I40E_ERR_CONFIG;
8664 if (!ipv6_addr_any(&match.mask->dst) ||
8665 !ipv6_addr_any(&match.mask->src))
8666 field_flags |= I40E_CLOUD_FIELD_IIP;
8668 memcpy(&filter->src_ipv6, &match.key->src.s6_addr32,
8669 sizeof(filter->src_ipv6));
8670 memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32,
8671 sizeof(filter->dst_ipv6));
8674 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
8675 struct flow_match_ports match;
8677 flow_rule_match_ports(rule, &match);
8678 if (match.mask->src) {
8679 if (match.mask->src == cpu_to_be16(0xffff)) {
8680 field_flags |= I40E_CLOUD_FIELD_IIP;
8682 dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
8683 be16_to_cpu(match.mask->src));
8684 return I40E_ERR_CONFIG;
8688 if (match.mask->dst) {
8689 if (match.mask->dst == cpu_to_be16(0xffff)) {
8690 field_flags |= I40E_CLOUD_FIELD_IIP;
8692 dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
8693 be16_to_cpu(match.mask->dst));
8694 return I40E_ERR_CONFIG;
8698 filter->dst_port = match.key->dst;
8699 filter->src_port = match.key->src;
8701 switch (filter->ip_proto) {
8706 dev_err(&pf->pdev->dev,
8707 "Only UDP and TCP transport are supported\n");
8711 filter->flags = field_flags;
8716 * i40e_handle_tclass: Forward to a traffic class on the device
8717 * @vsi: Pointer to VSI
8718 * @tc: traffic class index on the device
8719 * @filter: Pointer to cloud filter structure
8722 static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc,
8723 struct i40e_cloud_filter *filter)
8725 struct i40e_channel *ch, *ch_tmp;
8727 /* direct to a traffic class on the same device */
8729 filter->seid = vsi->seid;
8731 } else if (vsi->tc_config.enabled_tc & BIT(tc)) {
8732 if (!filter->dst_port) {
8733 dev_err(&vsi->back->pdev->dev,
8734 "Specify destination port to direct to traffic class that is not default\n");
8737 if (list_empty(&vsi->ch_list))
8739 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list,
8741 if (ch->seid == vsi->tc_seid_map[tc])
8742 filter->seid = ch->seid;
8746 dev_err(&vsi->back->pdev->dev, "TC is not enabled\n");
8751 * i40e_configure_clsflower - Configure tc flower filters
8752 * @vsi: Pointer to VSI
8753 * @cls_flower: Pointer to struct flow_cls_offload
8756 static int i40e_configure_clsflower(struct i40e_vsi *vsi,
8757 struct flow_cls_offload *cls_flower)
8759 int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
8760 struct i40e_cloud_filter *filter = NULL;
8761 struct i40e_pf *pf = vsi->back;
8765 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
8770 dev_err(&pf->pdev->dev, "Unable to add filter because of invalid destination");
8774 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
8775 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
8778 if (pf->fdir_pf_active_filters ||
8779 (!hlist_empty(&pf->fdir_filter_list))) {
8780 dev_err(&vsi->back->pdev->dev,
8781 "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
8785 if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) {
8786 dev_err(&vsi->back->pdev->dev,
8787 "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
8788 vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8789 vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8792 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
8796 filter->cookie = cls_flower->cookie;
8798 err = i40e_parse_cls_flower(vsi, cls_flower, filter);
8802 err = i40e_handle_tclass(vsi, tc, filter);
8806 /* Add cloud filter */
8807 if (filter->dst_port)
8808 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true);
8810 err = i40e_add_del_cloud_filter(vsi, filter, true);
8813 dev_err(&pf->pdev->dev, "Failed to add cloud filter, err %d\n",
8818 /* add filter to the ordered list */
8819 INIT_HLIST_NODE(&filter->cloud_node);
8821 hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list);
8823 pf->num_cloud_filters++;
8832 * i40e_find_cloud_filter - Find the could filter in the list
8833 * @vsi: Pointer to VSI
8834 * @cookie: filter specific cookie
8837 static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi,
8838 unsigned long *cookie)
8840 struct i40e_cloud_filter *filter = NULL;
8841 struct hlist_node *node2;
8843 hlist_for_each_entry_safe(filter, node2,
8844 &vsi->back->cloud_filter_list, cloud_node)
8845 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
8851 * i40e_delete_clsflower - Remove tc flower filters
8852 * @vsi: Pointer to VSI
8853 * @cls_flower: Pointer to struct flow_cls_offload
8856 static int i40e_delete_clsflower(struct i40e_vsi *vsi,
8857 struct flow_cls_offload *cls_flower)
8859 struct i40e_cloud_filter *filter = NULL;
8860 struct i40e_pf *pf = vsi->back;
8863 filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie);
8868 hash_del(&filter->cloud_node);
8870 if (filter->dst_port)
8871 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false);
8873 err = i40e_add_del_cloud_filter(vsi, filter, false);
8877 dev_err(&pf->pdev->dev,
8878 "Failed to delete cloud filter, err %s\n",
8879 i40e_stat_str(&pf->hw, err));
8880 return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
8883 pf->num_cloud_filters--;
8884 if (!pf->num_cloud_filters)
8885 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
8886 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
8887 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8888 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8889 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
8895 * i40e_setup_tc_cls_flower - flower classifier offloads
8896 * @np: net device to configure
8897 * @cls_flower: offload data
8899 static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
8900 struct flow_cls_offload *cls_flower)
8902 struct i40e_vsi *vsi = np->vsi;
8904 switch (cls_flower->command) {
8905 case FLOW_CLS_REPLACE:
8906 return i40e_configure_clsflower(vsi, cls_flower);
8907 case FLOW_CLS_DESTROY:
8908 return i40e_delete_clsflower(vsi, cls_flower);
8909 case FLOW_CLS_STATS:
8916 static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
8919 struct i40e_netdev_priv *np = cb_priv;
8921 if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data))
8925 case TC_SETUP_CLSFLOWER:
8926 return i40e_setup_tc_cls_flower(np, type_data);
8933 static LIST_HEAD(i40e_block_cb_list);
8935 static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
8938 struct i40e_netdev_priv *np = netdev_priv(netdev);
8941 case TC_SETUP_QDISC_MQPRIO:
8942 return i40e_setup_tc(netdev, type_data);
8943 case TC_SETUP_BLOCK:
8944 return flow_block_cb_setup_simple(type_data,
8945 &i40e_block_cb_list,
8946 i40e_setup_tc_block_cb,
8954 * i40e_open - Called when a network interface is made active
8955 * @netdev: network interface device structure
8957 * The open entry point is called when a network interface is made
8958 * active by the system (IFF_UP). At this point all resources needed
8959 * for transmit and receive operations are allocated, the interrupt
8960 * handler is registered with the OS, the netdev watchdog subtask is
8961 * enabled, and the stack is notified that the interface is ready.
8963 * Returns 0 on success, negative value on failure
8965 int i40e_open(struct net_device *netdev)
8967 struct i40e_netdev_priv *np = netdev_priv(netdev);
8968 struct i40e_vsi *vsi = np->vsi;
8969 struct i40e_pf *pf = vsi->back;
8972 /* disallow open during test or if eeprom is broken */
8973 if (test_bit(__I40E_TESTING, pf->state) ||
8974 test_bit(__I40E_BAD_EEPROM, pf->state))
8977 netif_carrier_off(netdev);
8979 if (i40e_force_link_state(pf, true))
8982 err = i40e_vsi_open(vsi);
8986 /* configure global TSO hardware offload settings */
8987 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
8988 TCP_FLAG_FIN) >> 16);
8989 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
8991 TCP_FLAG_CWR) >> 16);
8992 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
8993 udp_tunnel_get_rx_info(netdev);
8999 * i40e_netif_set_realnum_tx_rx_queues - Update number of tx/rx queues
9000 * @vsi: vsi structure
9002 * This updates netdev's number of tx/rx queues
9004 * Returns status of setting tx/rx queues
9006 static int i40e_netif_set_realnum_tx_rx_queues(struct i40e_vsi *vsi)
9010 ret = netif_set_real_num_rx_queues(vsi->netdev,
9011 vsi->num_queue_pairs);
9015 return netif_set_real_num_tx_queues(vsi->netdev,
9016 vsi->num_queue_pairs);
9021 * @vsi: the VSI to open
9023 * Finish initialization of the VSI.
9025 * Returns 0 on success, negative value on failure
9027 * Note: expects to be called while under rtnl_lock()
9029 int i40e_vsi_open(struct i40e_vsi *vsi)
9031 struct i40e_pf *pf = vsi->back;
9032 char int_name[I40E_INT_NAME_STR_LEN];
9035 /* allocate descriptors */
9036 err = i40e_vsi_setup_tx_resources(vsi);
9039 err = i40e_vsi_setup_rx_resources(vsi);
9043 err = i40e_vsi_configure(vsi);
9048 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
9049 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
9050 err = i40e_vsi_request_irq(vsi, int_name);
9054 /* Notify the stack of the actual queue counts. */
9055 err = i40e_netif_set_realnum_tx_rx_queues(vsi);
9057 goto err_set_queues;
9059 } else if (vsi->type == I40E_VSI_FDIR) {
9060 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
9061 dev_driver_string(&pf->pdev->dev),
9062 dev_name(&pf->pdev->dev));
9063 err = i40e_vsi_request_irq(vsi, int_name);
9072 err = i40e_up_complete(vsi);
9074 goto err_up_complete;
9081 i40e_vsi_free_irq(vsi);
9083 i40e_vsi_free_rx_resources(vsi);
9085 i40e_vsi_free_tx_resources(vsi);
9086 if (vsi == pf->vsi[pf->lan_vsi])
9087 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
9093 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
9094 * @pf: Pointer to PF
9096 * This function destroys the hlist where all the Flow Director
9097 * filters were saved.
9099 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
9101 struct i40e_fdir_filter *filter;
9102 struct i40e_flex_pit *pit_entry, *tmp;
9103 struct hlist_node *node2;
9105 hlist_for_each_entry_safe(filter, node2,
9106 &pf->fdir_filter_list, fdir_node) {
9107 hlist_del(&filter->fdir_node);
9111 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
9112 list_del(&pit_entry->list);
9115 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
9117 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
9118 list_del(&pit_entry->list);
9121 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
9123 pf->fdir_pf_active_filters = 0;
9124 i40e_reset_fdir_filter_cnt(pf);
9126 /* Reprogram the default input set for TCP/IPv4 */
9127 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
9128 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
9129 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9131 /* Reprogram the default input set for TCP/IPv6 */
9132 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
9133 I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
9134 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9136 /* Reprogram the default input set for UDP/IPv4 */
9137 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
9138 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
9139 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9141 /* Reprogram the default input set for UDP/IPv6 */
9142 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
9143 I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
9144 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9146 /* Reprogram the default input set for SCTP/IPv4 */
9147 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
9148 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
9149 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9151 /* Reprogram the default input set for SCTP/IPv6 */
9152 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
9153 I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
9154 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9156 /* Reprogram the default input set for Other/IPv4 */
9157 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
9158 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
9160 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
9161 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
9163 /* Reprogram the default input set for Other/IPv6 */
9164 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
9165 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
9167 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV6,
9168 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
9172 * i40e_cloud_filter_exit - Cleans up the cloud filters
9173 * @pf: Pointer to PF
9175 * This function destroys the hlist where all the cloud filters
9178 static void i40e_cloud_filter_exit(struct i40e_pf *pf)
9180 struct i40e_cloud_filter *cfilter;
9181 struct hlist_node *node;
9183 hlist_for_each_entry_safe(cfilter, node,
9184 &pf->cloud_filter_list, cloud_node) {
9185 hlist_del(&cfilter->cloud_node);
9188 pf->num_cloud_filters = 0;
9190 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
9191 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
9192 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
9193 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
9194 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
9199 * i40e_close - Disables a network interface
9200 * @netdev: network interface device structure
9202 * The close entry point is called when an interface is de-activated
9203 * by the OS. The hardware is still under the driver's control, but
9204 * this netdev interface is disabled.
9206 * Returns 0, this is not allowed to fail
9208 int i40e_close(struct net_device *netdev)
9210 struct i40e_netdev_priv *np = netdev_priv(netdev);
9211 struct i40e_vsi *vsi = np->vsi;
9213 i40e_vsi_close(vsi);
9219 * i40e_do_reset - Start a PF or Core Reset sequence
9220 * @pf: board private structure
9221 * @reset_flags: which reset is requested
9222 * @lock_acquired: indicates whether or not the lock has been acquired
9223 * before this function was called.
9225 * The essential difference in resets is that the PF Reset
9226 * doesn't clear the packet buffers, doesn't reset the PE
9227 * firmware, and doesn't bother the other PFs on the chip.
9229 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
9233 /* do the biggest reset indicated */
9234 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
9236 /* Request a Global Reset
9238 * This will start the chip's countdown to the actual full
9239 * chip reset event, and a warning interrupt to be sent
9240 * to all PFs, including the requestor. Our handler
9241 * for the warning interrupt will deal with the shutdown
9242 * and recovery of the switch setup.
9244 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
9245 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
9246 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
9247 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
9249 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
9251 /* Request a Core Reset
9253 * Same as Global Reset, except does *not* include the MAC/PHY
9255 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
9256 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
9257 val |= I40E_GLGEN_RTRIG_CORER_MASK;
9258 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
9259 i40e_flush(&pf->hw);
9261 } else if (reset_flags & I40E_PF_RESET_FLAG) {
9263 /* Request a PF Reset
9265 * Resets only the PF-specific registers
9267 * This goes directly to the tear-down and rebuild of
9268 * the switch, since we need to do all the recovery as
9269 * for the Core Reset.
9271 dev_dbg(&pf->pdev->dev, "PFR requested\n");
9272 i40e_handle_reset_warning(pf, lock_acquired);
9274 } else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
9275 /* Request a PF Reset
9277 * Resets PF and reinitializes PFs VSI.
9279 i40e_prep_for_reset(pf);
9280 i40e_reset_and_rebuild(pf, true, lock_acquired);
9281 dev_info(&pf->pdev->dev,
9282 pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
9283 "FW LLDP is disabled\n" :
9284 "FW LLDP is enabled\n");
9286 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
9289 /* Find the VSI(s) that requested a re-init */
9290 dev_info(&pf->pdev->dev,
9291 "VSI reinit requested\n");
9292 for (v = 0; v < pf->num_alloc_vsi; v++) {
9293 struct i40e_vsi *vsi = pf->vsi[v];
9296 test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
9298 i40e_vsi_reinit_locked(pf->vsi[v]);
9300 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
9303 /* Find the VSI(s) that needs to be brought down */
9304 dev_info(&pf->pdev->dev, "VSI down requested\n");
9305 for (v = 0; v < pf->num_alloc_vsi; v++) {
9306 struct i40e_vsi *vsi = pf->vsi[v];
9309 test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
9311 set_bit(__I40E_VSI_DOWN, vsi->state);
9316 dev_info(&pf->pdev->dev,
9317 "bad reset request 0x%08x\n", reset_flags);
9321 #ifdef CONFIG_I40E_DCB
9323 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
9324 * @pf: board private structure
9325 * @old_cfg: current DCB config
9326 * @new_cfg: new DCB config
9328 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
9329 struct i40e_dcbx_config *old_cfg,
9330 struct i40e_dcbx_config *new_cfg)
9332 bool need_reconfig = false;
9334 /* Check if ETS configuration has changed */
9335 if (memcmp(&new_cfg->etscfg,
9337 sizeof(new_cfg->etscfg))) {
9338 /* If Priority Table has changed reconfig is needed */
9339 if (memcmp(&new_cfg->etscfg.prioritytable,
9340 &old_cfg->etscfg.prioritytable,
9341 sizeof(new_cfg->etscfg.prioritytable))) {
9342 need_reconfig = true;
9343 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
9346 if (memcmp(&new_cfg->etscfg.tcbwtable,
9347 &old_cfg->etscfg.tcbwtable,
9348 sizeof(new_cfg->etscfg.tcbwtable)))
9349 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
9351 if (memcmp(&new_cfg->etscfg.tsatable,
9352 &old_cfg->etscfg.tsatable,
9353 sizeof(new_cfg->etscfg.tsatable)))
9354 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
9357 /* Check if PFC configuration has changed */
9358 if (memcmp(&new_cfg->pfc,
9360 sizeof(new_cfg->pfc))) {
9361 need_reconfig = true;
9362 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
9365 /* Check if APP Table has changed */
9366 if (memcmp(&new_cfg->app,
9368 sizeof(new_cfg->app))) {
9369 need_reconfig = true;
9370 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
9373 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
9374 return need_reconfig;
9378 * i40e_handle_lldp_event - Handle LLDP Change MIB event
9379 * @pf: board private structure
9380 * @e: event info posted on ARQ
9382 static int i40e_handle_lldp_event(struct i40e_pf *pf,
9383 struct i40e_arq_event_info *e)
9385 struct i40e_aqc_lldp_get_mib *mib =
9386 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
9387 struct i40e_hw *hw = &pf->hw;
9388 struct i40e_dcbx_config tmp_dcbx_cfg;
9389 bool need_reconfig = false;
9393 /* X710-T*L 2.5G and 5G speeds don't support DCB */
9394 if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
9395 (hw->phy.link_info.link_speed &
9396 ~(I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB)) &&
9397 !(pf->flags & I40E_FLAG_DCB_CAPABLE))
9398 /* let firmware decide if the DCB should be disabled */
9399 pf->flags |= I40E_FLAG_DCB_CAPABLE;
9401 /* Not DCB capable or capability disabled */
9402 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
9405 /* Ignore if event is not for Nearest Bridge */
9406 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
9407 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
9408 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
9409 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
9412 /* Check MIB Type and return if event for Remote MIB update */
9413 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
9414 dev_dbg(&pf->pdev->dev,
9415 "LLDP event mib type %s\n", type ? "remote" : "local");
9416 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
9417 /* Update the remote cached instance and return */
9418 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
9419 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
9420 &hw->remote_dcbx_config);
9424 /* Store the old configuration */
9425 tmp_dcbx_cfg = hw->local_dcbx_config;
9427 /* Reset the old DCBx configuration data */
9428 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
9429 /* Get updated DCBX data from firmware */
9430 ret = i40e_get_dcb_config(&pf->hw);
9432 /* X710-T*L 2.5G and 5G speeds don't support DCB */
9433 if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
9434 (hw->phy.link_info.link_speed &
9435 (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
9436 dev_warn(&pf->pdev->dev,
9437 "DCB is not supported for X710-T*L 2.5/5G speeds\n");
9438 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
9440 dev_info(&pf->pdev->dev,
9441 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
9442 i40e_stat_str(&pf->hw, ret),
9443 i40e_aq_str(&pf->hw,
9444 pf->hw.aq.asq_last_status));
9449 /* No change detected in DCBX configs */
9450 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
9451 sizeof(tmp_dcbx_cfg))) {
9452 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
9456 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
9457 &hw->local_dcbx_config);
9459 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
9464 /* Enable DCB tagging only when more than one TC */
9465 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
9466 pf->flags |= I40E_FLAG_DCB_ENABLED;
9468 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
9470 set_bit(__I40E_PORT_SUSPENDED, pf->state);
9471 /* Reconfiguration needed quiesce all VSIs */
9472 i40e_pf_quiesce_all_vsi(pf);
9474 /* Changes in configuration update VEB/VSI */
9475 i40e_dcb_reconfigure(pf);
9477 ret = i40e_resume_port_tx(pf);
9479 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
9480 /* In case of error no point in resuming VSIs */
9484 /* Wait for the PF's queues to be disabled */
9485 ret = i40e_pf_wait_queues_disabled(pf);
9487 /* Schedule PF reset to recover */
9488 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
9489 i40e_service_event_schedule(pf);
9491 i40e_pf_unquiesce_all_vsi(pf);
9492 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
9493 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
9499 #endif /* CONFIG_I40E_DCB */
9502 * i40e_do_reset_safe - Protected reset path for userland calls.
9503 * @pf: board private structure
9504 * @reset_flags: which reset is requested
9507 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
9510 i40e_do_reset(pf, reset_flags, true);
9515 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
9516 * @pf: board private structure
9517 * @e: event info posted on ARQ
9519 * Handler for LAN Queue Overflow Event generated by the firmware for PF
9522 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
9523 struct i40e_arq_event_info *e)
9525 struct i40e_aqc_lan_overflow *data =
9526 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
9527 u32 queue = le32_to_cpu(data->prtdcb_rupto);
9528 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
9529 struct i40e_hw *hw = &pf->hw;
9533 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
9536 /* Queue belongs to VF, find the VF and issue VF reset */
9537 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
9538 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
9539 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
9540 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
9541 vf_id -= hw->func_caps.vf_base_id;
9542 vf = &pf->vf[vf_id];
9543 i40e_vc_notify_vf_reset(vf);
9544 /* Allow VF to process pending reset notification */
9546 i40e_reset_vf(vf, false);
9551 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
9552 * @pf: board private structure
9554 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
9558 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
9559 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
9564 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
9565 * @pf: board private structure
9567 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
9571 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
9572 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
9573 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
9574 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
9579 * i40e_get_global_fd_count - Get total FD filters programmed on device
9580 * @pf: board private structure
9582 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
9586 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
9587 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
9588 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
9589 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
9594 * i40e_reenable_fdir_sb - Restore FDir SB capability
9595 * @pf: board private structure
9597 static void i40e_reenable_fdir_sb(struct i40e_pf *pf)
9599 if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
9600 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
9601 (I40E_DEBUG_FD & pf->hw.debug_mask))
9602 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
9606 * i40e_reenable_fdir_atr - Restore FDir ATR capability
9607 * @pf: board private structure
9609 static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
9611 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) {
9612 /* ATR uses the same filtering logic as SB rules. It only
9613 * functions properly if the input set mask is at the default
9614 * settings. It is safe to restore the default input set
9615 * because there are no active TCPv4 filter rules.
9617 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
9618 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
9619 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9621 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
9622 (I40E_DEBUG_FD & pf->hw.debug_mask))
9623 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
9628 * i40e_delete_invalid_filter - Delete an invalid FDIR filter
9629 * @pf: board private structure
9630 * @filter: FDir filter to remove
9632 static void i40e_delete_invalid_filter(struct i40e_pf *pf,
9633 struct i40e_fdir_filter *filter)
9635 /* Update counters */
9636 pf->fdir_pf_active_filters--;
9639 switch (filter->flow_type) {
9641 pf->fd_tcp4_filter_cnt--;
9644 pf->fd_udp4_filter_cnt--;
9647 pf->fd_sctp4_filter_cnt--;
9650 pf->fd_tcp6_filter_cnt--;
9653 pf->fd_udp6_filter_cnt--;
9656 pf->fd_udp6_filter_cnt--;
9659 switch (filter->ipl4_proto) {
9661 pf->fd_tcp4_filter_cnt--;
9664 pf->fd_udp4_filter_cnt--;
9667 pf->fd_sctp4_filter_cnt--;
9670 pf->fd_ip4_filter_cnt--;
9674 case IPV6_USER_FLOW:
9675 switch (filter->ipl4_proto) {
9677 pf->fd_tcp6_filter_cnt--;
9680 pf->fd_udp6_filter_cnt--;
9683 pf->fd_sctp6_filter_cnt--;
9686 pf->fd_ip6_filter_cnt--;
9692 /* Remove the filter from the list and free memory */
9693 hlist_del(&filter->fdir_node);
9698 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
9699 * @pf: board private structure
9701 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
9703 struct i40e_fdir_filter *filter;
9704 u32 fcnt_prog, fcnt_avail;
9705 struct hlist_node *node;
9707 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
9710 /* Check if we have enough room to re-enable FDir SB capability. */
9711 fcnt_prog = i40e_get_global_fd_count(pf);
9712 fcnt_avail = pf->fdir_pf_filter_count;
9713 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
9714 (pf->fd_add_err == 0) ||
9715 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt))
9716 i40e_reenable_fdir_sb(pf);
9718 /* We should wait for even more space before re-enabling ATR.
9719 * Additionally, we cannot enable ATR as long as we still have TCP SB
9722 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
9723 pf->fd_tcp4_filter_cnt == 0 && pf->fd_tcp6_filter_cnt == 0)
9724 i40e_reenable_fdir_atr(pf);
9726 /* if hw had a problem adding a filter, delete it */
9727 if (pf->fd_inv > 0) {
9728 hlist_for_each_entry_safe(filter, node,
9729 &pf->fdir_filter_list, fdir_node)
9730 if (filter->fd_id == pf->fd_inv)
9731 i40e_delete_invalid_filter(pf, filter);
9735 #define I40E_MIN_FD_FLUSH_INTERVAL 10
9736 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
9738 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
9739 * @pf: board private structure
9741 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
9743 unsigned long min_flush_time;
9744 int flush_wait_retry = 50;
9745 bool disable_atr = false;
9749 if (!time_after(jiffies, pf->fd_flush_timestamp +
9750 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
9753 /* If the flush is happening too quick and we have mostly SB rules we
9754 * should not re-enable ATR for some time.
9756 min_flush_time = pf->fd_flush_timestamp +
9757 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
9758 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
9760 if (!(time_after(jiffies, min_flush_time)) &&
9761 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
9762 if (I40E_DEBUG_FD & pf->hw.debug_mask)
9763 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
9767 pf->fd_flush_timestamp = jiffies;
9768 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
9769 /* flush all filters */
9770 wr32(&pf->hw, I40E_PFQF_CTL_1,
9771 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
9772 i40e_flush(&pf->hw);
9776 /* Check FD flush status every 5-6msec */
9777 usleep_range(5000, 6000);
9778 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
9779 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
9781 } while (flush_wait_retry--);
9782 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
9783 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
9785 /* replay sideband filters */
9786 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
9787 if (!disable_atr && !pf->fd_tcp4_filter_cnt)
9788 clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
9789 clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
9790 if (I40E_DEBUG_FD & pf->hw.debug_mask)
9791 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
9796 * i40e_get_current_atr_cnt - Get the count of total FD ATR filters programmed
9797 * @pf: board private structure
9799 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
9801 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
9805 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
9806 * @pf: board private structure
9808 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
9811 /* if interface is down do nothing */
9812 if (test_bit(__I40E_DOWN, pf->state))
9815 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
9816 i40e_fdir_flush_and_replay(pf);
9818 i40e_fdir_check_and_reenable(pf);
9823 * i40e_vsi_link_event - notify VSI of a link event
9824 * @vsi: vsi to be notified
9825 * @link_up: link up or down
9827 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
9829 if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
9832 switch (vsi->type) {
9834 if (!vsi->netdev || !vsi->netdev_registered)
9838 netif_carrier_on(vsi->netdev);
9839 netif_tx_wake_all_queues(vsi->netdev);
9841 netif_carrier_off(vsi->netdev);
9842 netif_tx_stop_all_queues(vsi->netdev);
9846 case I40E_VSI_SRIOV:
9847 case I40E_VSI_VMDQ2:
9849 case I40E_VSI_IWARP:
9850 case I40E_VSI_MIRROR:
9852 /* there is no notification for other VSIs */
9858 * i40e_veb_link_event - notify elements on the veb of a link event
9859 * @veb: veb to be notified
9860 * @link_up: link up or down
9862 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
9867 if (!veb || !veb->pf)
9871 /* depth first... */
9872 for (i = 0; i < I40E_MAX_VEB; i++)
9873 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
9874 i40e_veb_link_event(pf->veb[i], link_up);
9876 /* ... now the local VSIs */
9877 for (i = 0; i < pf->num_alloc_vsi; i++)
9878 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
9879 i40e_vsi_link_event(pf->vsi[i], link_up);
9883 * i40e_link_event - Update netif_carrier status
9884 * @pf: board private structure
9886 static void i40e_link_event(struct i40e_pf *pf)
9888 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9889 u8 new_link_speed, old_link_speed;
9891 bool new_link, old_link;
9892 #ifdef CONFIG_I40E_DCB
9894 #endif /* CONFIG_I40E_DCB */
9896 /* set this to force the get_link_status call to refresh state */
9897 pf->hw.phy.get_link_info = true;
9898 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
9899 status = i40e_get_link_status(&pf->hw, &new_link);
9901 /* On success, disable temp link polling */
9902 if (status == I40E_SUCCESS) {
9903 clear_bit(__I40E_TEMP_LINK_POLLING, pf->state);
9905 /* Enable link polling temporarily until i40e_get_link_status
9906 * returns I40E_SUCCESS
9908 set_bit(__I40E_TEMP_LINK_POLLING, pf->state);
9909 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
9914 old_link_speed = pf->hw.phy.link_info_old.link_speed;
9915 new_link_speed = pf->hw.phy.link_info.link_speed;
9917 if (new_link == old_link &&
9918 new_link_speed == old_link_speed &&
9919 (test_bit(__I40E_VSI_DOWN, vsi->state) ||
9920 new_link == netif_carrier_ok(vsi->netdev)))
9923 i40e_print_link_message(vsi, new_link);
9925 /* Notify the base of the switch tree connected to
9926 * the link. Floating VEBs are not notified.
9928 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
9929 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
9931 i40e_vsi_link_event(vsi, new_link);
9934 i40e_vc_notify_link_state(pf);
9936 if (pf->flags & I40E_FLAG_PTP)
9937 i40e_ptp_set_increment(pf);
9938 #ifdef CONFIG_I40E_DCB
9939 if (new_link == old_link)
9941 /* Not SW DCB so firmware will take care of default settings */
9942 if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)
9945 /* We cover here only link down, as after link up in case of SW DCB
9946 * SW LLDP agent will take care of setting it up
9949 dev_dbg(&pf->pdev->dev, "Reconfig DCB to single TC as result of Link Down\n");
9950 memset(&pf->tmp_cfg, 0, sizeof(pf->tmp_cfg));
9951 err = i40e_dcb_sw_default_config(pf);
9953 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
9954 I40E_FLAG_DCB_ENABLED);
9956 pf->dcbx_cap = DCB_CAP_DCBX_HOST |
9957 DCB_CAP_DCBX_VER_IEEE;
9958 pf->flags |= I40E_FLAG_DCB_CAPABLE;
9959 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
9962 #endif /* CONFIG_I40E_DCB */
9966 * i40e_watchdog_subtask - periodic checks not using event driven response
9967 * @pf: board private structure
9969 static void i40e_watchdog_subtask(struct i40e_pf *pf)
9973 /* if interface is down do nothing */
9974 if (test_bit(__I40E_DOWN, pf->state) ||
9975 test_bit(__I40E_CONFIG_BUSY, pf->state))
9978 /* make sure we don't do these things too often */
9979 if (time_before(jiffies, (pf->service_timer_previous +
9980 pf->service_timer_period)))
9982 pf->service_timer_previous = jiffies;
9984 if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
9985 test_bit(__I40E_TEMP_LINK_POLLING, pf->state))
9986 i40e_link_event(pf);
9988 /* Update the stats for active netdevs so the network stack
9989 * can look at updated numbers whenever it cares to
9991 for (i = 0; i < pf->num_alloc_vsi; i++)
9992 if (pf->vsi[i] && pf->vsi[i]->netdev)
9993 i40e_update_stats(pf->vsi[i]);
9995 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
9996 /* Update the stats for the active switching components */
9997 for (i = 0; i < I40E_MAX_VEB; i++)
9999 i40e_update_veb_stats(pf->veb[i]);
10002 i40e_ptp_rx_hang(pf);
10003 i40e_ptp_tx_hang(pf);
10007 * i40e_reset_subtask - Set up for resetting the device and driver
10008 * @pf: board private structure
10010 static void i40e_reset_subtask(struct i40e_pf *pf)
10012 u32 reset_flags = 0;
10014 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
10015 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
10016 clear_bit(__I40E_REINIT_REQUESTED, pf->state);
10018 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
10019 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
10020 clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
10022 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
10023 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
10024 clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
10026 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
10027 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
10028 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
10030 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
10031 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
10032 clear_bit(__I40E_DOWN_REQUESTED, pf->state);
10035 /* If there's a recovery already waiting, it takes
10036 * precedence before starting a new reset sequence.
10038 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
10039 i40e_prep_for_reset(pf);
10041 i40e_rebuild(pf, false, false);
10044 /* If we're already down or resetting, just bail */
10046 !test_bit(__I40E_DOWN, pf->state) &&
10047 !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
10048 i40e_do_reset(pf, reset_flags, false);
10053 * i40e_handle_link_event - Handle link event
10054 * @pf: board private structure
10055 * @e: event info posted on ARQ
10057 static void i40e_handle_link_event(struct i40e_pf *pf,
10058 struct i40e_arq_event_info *e)
10060 struct i40e_aqc_get_link_status *status =
10061 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
10063 /* Do a new status request to re-enable LSE reporting
10064 * and load new status information into the hw struct
10065 * This completely ignores any state information
10066 * in the ARQ event info, instead choosing to always
10067 * issue the AQ update link status command.
10069 i40e_link_event(pf);
10071 /* Check if module meets thermal requirements */
10072 if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) {
10073 dev_err(&pf->pdev->dev,
10074 "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
10075 dev_err(&pf->pdev->dev,
10076 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
10078 /* check for unqualified module, if link is down, suppress
10079 * the message if link was forced to be down.
10081 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
10082 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
10083 (!(status->link_info & I40E_AQ_LINK_UP)) &&
10084 (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
10085 dev_err(&pf->pdev->dev,
10086 "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
10087 dev_err(&pf->pdev->dev,
10088 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
10094 * i40e_clean_adminq_subtask - Clean the AdminQ rings
10095 * @pf: board private structure
10097 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
10099 struct i40e_arq_event_info event;
10100 struct i40e_hw *hw = &pf->hw;
10101 u16 pending, i = 0;
10107 /* Do not run clean AQ when PF reset fails */
10108 if (test_bit(__I40E_RESET_FAILED, pf->state))
10111 /* check for error indications */
10112 val = rd32(&pf->hw, pf->hw.aq.arq.len);
10114 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
10115 if (hw->debug_mask & I40E_DEBUG_AQ)
10116 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
10117 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
10119 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
10120 if (hw->debug_mask & I40E_DEBUG_AQ)
10121 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
10122 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
10123 pf->arq_overflows++;
10125 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
10126 if (hw->debug_mask & I40E_DEBUG_AQ)
10127 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
10128 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
10131 wr32(&pf->hw, pf->hw.aq.arq.len, val);
10133 val = rd32(&pf->hw, pf->hw.aq.asq.len);
10135 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
10136 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
10137 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
10138 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
10140 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
10141 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
10142 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
10143 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
10145 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
10146 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
10147 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
10148 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
10151 wr32(&pf->hw, pf->hw.aq.asq.len, val);
10153 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
10154 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
10155 if (!event.msg_buf)
10159 ret = i40e_clean_arq_element(hw, &event, &pending);
10160 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
10163 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
10167 opcode = le16_to_cpu(event.desc.opcode);
10170 case i40e_aqc_opc_get_link_status:
10172 i40e_handle_link_event(pf, &event);
10175 case i40e_aqc_opc_send_msg_to_pf:
10176 ret = i40e_vc_process_vf_msg(pf,
10177 le16_to_cpu(event.desc.retval),
10178 le32_to_cpu(event.desc.cookie_high),
10179 le32_to_cpu(event.desc.cookie_low),
10183 case i40e_aqc_opc_lldp_update_mib:
10184 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
10185 #ifdef CONFIG_I40E_DCB
10187 i40e_handle_lldp_event(pf, &event);
10189 #endif /* CONFIG_I40E_DCB */
10191 case i40e_aqc_opc_event_lan_overflow:
10192 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
10193 i40e_handle_lan_overflow_event(pf, &event);
10195 case i40e_aqc_opc_send_msg_to_peer:
10196 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
10198 case i40e_aqc_opc_nvm_erase:
10199 case i40e_aqc_opc_nvm_update:
10200 case i40e_aqc_opc_oem_post_update:
10201 i40e_debug(&pf->hw, I40E_DEBUG_NVM,
10202 "ARQ NVM operation 0x%04x completed\n",
10206 dev_info(&pf->pdev->dev,
10207 "ARQ: Unknown event 0x%04x ignored\n",
10211 } while (i++ < pf->adminq_work_limit);
10213 if (i < pf->adminq_work_limit)
10214 clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
10216 /* re-enable Admin queue interrupt cause */
10217 val = rd32(hw, I40E_PFINT_ICR0_ENA);
10218 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
10219 wr32(hw, I40E_PFINT_ICR0_ENA, val);
10222 kfree(event.msg_buf);
10226 * i40e_verify_eeprom - make sure eeprom is good to use
10227 * @pf: board private structure
10229 static void i40e_verify_eeprom(struct i40e_pf *pf)
10233 err = i40e_diag_eeprom_test(&pf->hw);
10235 /* retry in case of garbage read */
10236 err = i40e_diag_eeprom_test(&pf->hw);
10238 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
10240 set_bit(__I40E_BAD_EEPROM, pf->state);
10244 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
10245 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
10246 clear_bit(__I40E_BAD_EEPROM, pf->state);
10251 * i40e_enable_pf_switch_lb
10252 * @pf: pointer to the PF structure
10254 * enable switch loop back or die - no point in a return value
10256 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
10258 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10259 struct i40e_vsi_context ctxt;
10262 ctxt.seid = pf->main_vsi_seid;
10263 ctxt.pf_num = pf->hw.pf_id;
10265 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
10267 dev_info(&pf->pdev->dev,
10268 "couldn't get PF vsi config, err %s aq_err %s\n",
10269 i40e_stat_str(&pf->hw, ret),
10270 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10273 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
10274 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
10275 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
10277 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
10279 dev_info(&pf->pdev->dev,
10280 "update vsi switch failed, err %s aq_err %s\n",
10281 i40e_stat_str(&pf->hw, ret),
10282 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10287 * i40e_disable_pf_switch_lb
10288 * @pf: pointer to the PF structure
10290 * disable switch loop back or die - no point in a return value
10292 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
10294 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10295 struct i40e_vsi_context ctxt;
10298 ctxt.seid = pf->main_vsi_seid;
10299 ctxt.pf_num = pf->hw.pf_id;
10301 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
10303 dev_info(&pf->pdev->dev,
10304 "couldn't get PF vsi config, err %s aq_err %s\n",
10305 i40e_stat_str(&pf->hw, ret),
10306 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10309 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
10310 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
10311 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
10313 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
10315 dev_info(&pf->pdev->dev,
10316 "update vsi switch failed, err %s aq_err %s\n",
10317 i40e_stat_str(&pf->hw, ret),
10318 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10323 * i40e_config_bridge_mode - Configure the HW bridge mode
10324 * @veb: pointer to the bridge instance
10326 * Configure the loop back mode for the LAN VSI that is downlink to the
10327 * specified HW bridge instance. It is expected this function is called
10328 * when a new HW bridge is instantiated.
10330 static void i40e_config_bridge_mode(struct i40e_veb *veb)
10332 struct i40e_pf *pf = veb->pf;
10334 if (pf->hw.debug_mask & I40E_DEBUG_LAN)
10335 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
10336 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
10337 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
10338 i40e_disable_pf_switch_lb(pf);
10340 i40e_enable_pf_switch_lb(pf);
10344 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
10345 * @veb: pointer to the VEB instance
10347 * This is a recursive function that first builds the attached VSIs then
10348 * recurses in to build the next layer of VEB. We track the connections
10349 * through our own index numbers because the seid's from the HW could
10350 * change across the reset.
10352 static int i40e_reconstitute_veb(struct i40e_veb *veb)
10354 struct i40e_vsi *ctl_vsi = NULL;
10355 struct i40e_pf *pf = veb->pf;
10359 /* build VSI that owns this VEB, temporarily attached to base VEB */
10360 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
10362 pf->vsi[v]->veb_idx == veb->idx &&
10363 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
10364 ctl_vsi = pf->vsi[v];
10369 dev_info(&pf->pdev->dev,
10370 "missing owner VSI for veb_idx %d\n", veb->idx);
10372 goto end_reconstitute;
10374 if (ctl_vsi != pf->vsi[pf->lan_vsi])
10375 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
10376 ret = i40e_add_vsi(ctl_vsi);
10378 dev_info(&pf->pdev->dev,
10379 "rebuild of veb_idx %d owner VSI failed: %d\n",
10381 goto end_reconstitute;
10383 i40e_vsi_reset_stats(ctl_vsi);
10385 /* create the VEB in the switch and move the VSI onto the VEB */
10386 ret = i40e_add_veb(veb, ctl_vsi);
10388 goto end_reconstitute;
10390 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
10391 veb->bridge_mode = BRIDGE_MODE_VEB;
10393 veb->bridge_mode = BRIDGE_MODE_VEPA;
10394 i40e_config_bridge_mode(veb);
10396 /* create the remaining VSIs attached to this VEB */
10397 for (v = 0; v < pf->num_alloc_vsi; v++) {
10398 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
10401 if (pf->vsi[v]->veb_idx == veb->idx) {
10402 struct i40e_vsi *vsi = pf->vsi[v];
10404 vsi->uplink_seid = veb->seid;
10405 ret = i40e_add_vsi(vsi);
10407 dev_info(&pf->pdev->dev,
10408 "rebuild of vsi_idx %d failed: %d\n",
10410 goto end_reconstitute;
10412 i40e_vsi_reset_stats(vsi);
10416 /* create any VEBs attached to this VEB - RECURSION */
10417 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
10418 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
10419 pf->veb[veb_idx]->uplink_seid = veb->seid;
10420 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
10431 * i40e_get_capabilities - get info about the HW
10432 * @pf: the PF struct
10433 * @list_type: AQ capability to be queried
10435 static int i40e_get_capabilities(struct i40e_pf *pf,
10436 enum i40e_admin_queue_opc list_type)
10438 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
10443 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
10445 cap_buf = kzalloc(buf_len, GFP_KERNEL);
10449 /* this loads the data into the hw struct for us */
10450 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
10451 &data_size, list_type,
10453 /* data loaded, buffer no longer needed */
10456 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
10457 /* retry with a larger buffer */
10458 buf_len = data_size;
10459 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {
10460 dev_info(&pf->pdev->dev,
10461 "capability discovery failed, err %s aq_err %s\n",
10462 i40e_stat_str(&pf->hw, err),
10463 i40e_aq_str(&pf->hw,
10464 pf->hw.aq.asq_last_status));
10469 if (pf->hw.debug_mask & I40E_DEBUG_USER) {
10470 if (list_type == i40e_aqc_opc_list_func_capabilities) {
10471 dev_info(&pf->pdev->dev,
10472 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
10473 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
10474 pf->hw.func_caps.num_msix_vectors,
10475 pf->hw.func_caps.num_msix_vectors_vf,
10476 pf->hw.func_caps.fd_filters_guaranteed,
10477 pf->hw.func_caps.fd_filters_best_effort,
10478 pf->hw.func_caps.num_tx_qp,
10479 pf->hw.func_caps.num_vsis);
10480 } else if (list_type == i40e_aqc_opc_list_dev_capabilities) {
10481 dev_info(&pf->pdev->dev,
10482 "switch_mode=0x%04x, function_valid=0x%08x\n",
10483 pf->hw.dev_caps.switch_mode,
10484 pf->hw.dev_caps.valid_functions);
10485 dev_info(&pf->pdev->dev,
10486 "SR-IOV=%d, num_vfs for all function=%u\n",
10487 pf->hw.dev_caps.sr_iov_1_1,
10488 pf->hw.dev_caps.num_vfs);
10489 dev_info(&pf->pdev->dev,
10490 "num_vsis=%u, num_rx:%u, num_tx=%u\n",
10491 pf->hw.dev_caps.num_vsis,
10492 pf->hw.dev_caps.num_rx_qp,
10493 pf->hw.dev_caps.num_tx_qp);
10496 if (list_type == i40e_aqc_opc_list_func_capabilities) {
10497 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
10498 + pf->hw.func_caps.num_vfs)
10499 if (pf->hw.revision_id == 0 &&
10500 pf->hw.func_caps.num_vsis < DEF_NUM_VSI) {
10501 dev_info(&pf->pdev->dev,
10502 "got num_vsis %d, setting num_vsis to %d\n",
10503 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
10504 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
10510 static int i40e_vsi_clear(struct i40e_vsi *vsi);
10513 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
10514 * @pf: board private structure
10516 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
10518 struct i40e_vsi *vsi;
10520 /* quick workaround for an NVM issue that leaves a critical register
10523 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
10524 static const u32 hkey[] = {
10525 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
10526 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
10527 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
10531 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
10532 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
10535 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
10538 /* find existing VSI and see if it needs configuring */
10539 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
10541 /* create a new VSI if none exists */
10543 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
10544 pf->vsi[pf->lan_vsi]->seid, 0);
10546 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
10547 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
10548 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
10553 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
10557 * i40e_fdir_teardown - release the Flow Director resources
10558 * @pf: board private structure
10560 static void i40e_fdir_teardown(struct i40e_pf *pf)
10562 struct i40e_vsi *vsi;
10564 i40e_fdir_filter_exit(pf);
10565 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
10567 i40e_vsi_release(vsi);
10571 * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs
10572 * @vsi: PF main vsi
10573 * @seid: seid of main or channel VSIs
10575 * Rebuilds cloud filters associated with main VSI and channel VSIs if they
10576 * existed before reset
10578 static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
10580 struct i40e_cloud_filter *cfilter;
10581 struct i40e_pf *pf = vsi->back;
10582 struct hlist_node *node;
10585 /* Add cloud filters back if they exist */
10586 hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
10588 if (cfilter->seid != seid)
10591 if (cfilter->dst_port)
10592 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
10595 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
10598 dev_dbg(&pf->pdev->dev,
10599 "Failed to rebuild cloud filter, err %s aq_err %s\n",
10600 i40e_stat_str(&pf->hw, ret),
10601 i40e_aq_str(&pf->hw,
10602 pf->hw.aq.asq_last_status));
10610 * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset
10611 * @vsi: PF main vsi
10613 * Rebuilds channel VSIs if they existed before reset
10615 static int i40e_rebuild_channels(struct i40e_vsi *vsi)
10617 struct i40e_channel *ch, *ch_tmp;
10620 if (list_empty(&vsi->ch_list))
10623 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
10624 if (!ch->initialized)
10626 /* Proceed with creation of channel (VMDq2) VSI */
10627 ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch);
10629 dev_info(&vsi->back->pdev->dev,
10630 "failed to rebuild channels using uplink_seid %u\n",
10634 /* Reconfigure TX queues using QTX_CTL register */
10635 ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch);
10637 dev_info(&vsi->back->pdev->dev,
10638 "failed to configure TX rings for channel %u\n",
10642 /* update 'next_base_queue' */
10643 vsi->next_base_queue = vsi->next_base_queue +
10644 ch->num_queue_pairs;
10645 if (ch->max_tx_rate) {
10646 u64 credits = ch->max_tx_rate;
10648 if (i40e_set_bw_limit(vsi, ch->seid,
10652 do_div(credits, I40E_BW_CREDIT_DIVISOR);
10653 dev_dbg(&vsi->back->pdev->dev,
10654 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
10659 ret = i40e_rebuild_cloud_filters(vsi, ch->seid);
10661 dev_dbg(&vsi->back->pdev->dev,
10662 "Failed to rebuild cloud filters for channel VSI %u\n",
10671 * i40e_clean_xps_state - clean xps state for every tx_ring
10672 * @vsi: ptr to the VSI
10674 static void i40e_clean_xps_state(struct i40e_vsi *vsi)
10679 for (i = 0; i < vsi->num_queue_pairs; i++)
10680 if (vsi->tx_rings[i])
10681 clear_bit(__I40E_TX_XPS_INIT_DONE,
10682 vsi->tx_rings[i]->state);
10686 * i40e_prep_for_reset - prep for the core to reset
10687 * @pf: board private structure
10689 * Close up the VFs and other things in prep for PF Reset.
10691 static void i40e_prep_for_reset(struct i40e_pf *pf)
10693 struct i40e_hw *hw = &pf->hw;
10694 i40e_status ret = 0;
10697 clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
10698 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
10700 if (i40e_check_asq_alive(&pf->hw))
10701 i40e_vc_notify_reset(pf);
10703 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
10705 /* quiesce the VSIs and their queues that are not already DOWN */
10706 i40e_pf_quiesce_all_vsi(pf);
10708 for (v = 0; v < pf->num_alloc_vsi; v++) {
10710 i40e_clean_xps_state(pf->vsi[v]);
10711 pf->vsi[v]->seid = 0;
10715 i40e_shutdown_adminq(&pf->hw);
10717 /* call shutdown HMC */
10718 if (hw->hmc.hmc_obj) {
10719 ret = i40e_shutdown_lan_hmc(hw);
10721 dev_warn(&pf->pdev->dev,
10722 "shutdown_lan_hmc failed: %d\n", ret);
10725 /* Save the current PTP time so that we can restore the time after the
10728 i40e_ptp_save_hw_time(pf);
10732 * i40e_send_version - update firmware with driver version
10735 static void i40e_send_version(struct i40e_pf *pf)
10737 struct i40e_driver_version dv;
10739 dv.major_version = 0xff;
10740 dv.minor_version = 0xff;
10741 dv.build_version = 0xff;
10742 dv.subbuild_version = 0;
10743 strscpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string));
10744 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
10748 * i40e_get_oem_version - get OEM specific version information
10749 * @hw: pointer to the hardware structure
10751 static void i40e_get_oem_version(struct i40e_hw *hw)
10753 u16 block_offset = 0xffff;
10754 u16 block_length = 0;
10755 u16 capabilities = 0;
10759 #define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
10760 #define I40E_NVM_OEM_LENGTH_OFFSET 0x00
10761 #define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
10762 #define I40E_NVM_OEM_GEN_OFFSET 0x02
10763 #define I40E_NVM_OEM_RELEASE_OFFSET 0x03
10764 #define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
10765 #define I40E_NVM_OEM_LENGTH 3
10767 /* Check if pointer to OEM version block is valid. */
10768 i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
10769 if (block_offset == 0xffff)
10772 /* Check if OEM version block has correct length. */
10773 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
10775 if (block_length < I40E_NVM_OEM_LENGTH)
10778 /* Check if OEM version format is as expected. */
10779 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
10781 if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
10784 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
10786 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
10788 hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
10789 hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
10793 * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
10794 * @pf: board private structure
10796 static int i40e_reset(struct i40e_pf *pf)
10798 struct i40e_hw *hw = &pf->hw;
10801 ret = i40e_pf_reset(hw);
10803 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
10804 set_bit(__I40E_RESET_FAILED, pf->state);
10805 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
10813 * i40e_rebuild - rebuild using a saved config
10814 * @pf: board private structure
10815 * @reinit: if the Main VSI needs to re-initialized.
10816 * @lock_acquired: indicates whether or not the lock has been acquired
10817 * before this function was called.
10819 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
10821 const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf);
10822 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10823 struct i40e_hw *hw = &pf->hw;
10828 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
10829 is_recovery_mode_reported)
10830 i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
10832 if (test_bit(__I40E_DOWN, pf->state) &&
10833 !test_bit(__I40E_RECOVERY_MODE, pf->state))
10834 goto clear_recovery;
10835 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
10837 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
10838 ret = i40e_init_adminq(&pf->hw);
10840 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
10841 i40e_stat_str(&pf->hw, ret),
10842 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10843 goto clear_recovery;
10845 i40e_get_oem_version(&pf->hw);
10847 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) {
10848 /* The following delay is necessary for firmware update. */
10852 /* re-verify the eeprom if we just had an EMP reset */
10853 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
10854 i40e_verify_eeprom(pf);
10856 /* if we are going out of or into recovery mode we have to act
10857 * accordingly with regard to resources initialization
10858 * and deinitialization
10860 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
10861 if (i40e_get_capabilities(pf,
10862 i40e_aqc_opc_list_func_capabilities))
10865 if (is_recovery_mode_reported) {
10866 /* we're staying in recovery mode so we'll reinitialize
10869 if (i40e_setup_misc_vector_for_recovery_mode(pf))
10872 if (!lock_acquired)
10874 /* we're going out of recovery mode so we'll free
10875 * the IRQ allocated specifically for recovery mode
10876 * and restore the interrupt scheme
10878 free_irq(pf->pdev->irq, pf);
10879 i40e_clear_interrupt_scheme(pf);
10880 if (i40e_restore_interrupt_scheme(pf))
10884 /* tell the firmware that we're starting */
10885 i40e_send_version(pf);
10887 /* bail out in case recovery mode was detected, as there is
10888 * no need for further configuration.
10893 i40e_clear_pxe_mode(hw);
10894 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
10896 goto end_core_reset;
10898 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
10899 hw->func_caps.num_rx_qp, 0, 0);
10901 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
10902 goto end_core_reset;
10904 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
10906 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
10907 goto end_core_reset;
10910 #ifdef CONFIG_I40E_DCB
10911 /* Enable FW to write a default DCB config on link-up
10912 * unless I40E_FLAG_TC_MQPRIO was enabled or DCB
10913 * is not supported with new link speed
10915 if (i40e_is_tc_mqprio_enabled(pf)) {
10916 i40e_aq_set_dcb_parameters(hw, false, NULL);
10918 if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
10919 (hw->phy.link_info.link_speed &
10920 (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
10921 i40e_aq_set_dcb_parameters(hw, false, NULL);
10922 dev_warn(&pf->pdev->dev,
10923 "DCB is not supported for X710-T*L 2.5/5G speeds\n");
10924 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10926 i40e_aq_set_dcb_parameters(hw, true, NULL);
10927 ret = i40e_init_pf_dcb(pf);
10929 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n",
10931 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10932 /* Continue without DCB enabled */
10937 #endif /* CONFIG_I40E_DCB */
10938 if (!lock_acquired)
10940 ret = i40e_setup_pf_switch(pf, reinit, true);
10944 /* The driver only wants link up/down and module qualification
10945 * reports from firmware. Note the negative logic.
10947 ret = i40e_aq_set_phy_int_mask(&pf->hw,
10948 ~(I40E_AQ_EVENT_LINK_UPDOWN |
10949 I40E_AQ_EVENT_MEDIA_NA |
10950 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
10952 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
10953 i40e_stat_str(&pf->hw, ret),
10954 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10956 /* Rebuild the VSIs and VEBs that existed before reset.
10957 * They are still in our local switch element arrays, so only
10958 * need to rebuild the switch model in the HW.
10960 * If there were VEBs but the reconstitution failed, we'll try
10961 * to recover minimal use by getting the basic PF VSI working.
10963 if (vsi->uplink_seid != pf->mac_seid) {
10964 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
10965 /* find the one VEB connected to the MAC, and find orphans */
10966 for (v = 0; v < I40E_MAX_VEB; v++) {
10970 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
10971 pf->veb[v]->uplink_seid == 0) {
10972 ret = i40e_reconstitute_veb(pf->veb[v]);
10977 /* If Main VEB failed, we're in deep doodoo,
10978 * so give up rebuilding the switch and set up
10979 * for minimal rebuild of PF VSI.
10980 * If orphan failed, we'll report the error
10981 * but try to keep going.
10983 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
10984 dev_info(&pf->pdev->dev,
10985 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
10987 vsi->uplink_seid = pf->mac_seid;
10989 } else if (pf->veb[v]->uplink_seid == 0) {
10990 dev_info(&pf->pdev->dev,
10991 "rebuild of orphan VEB failed: %d\n",
10998 if (vsi->uplink_seid == pf->mac_seid) {
10999 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
11000 /* no VEB, so rebuild only the Main VSI */
11001 ret = i40e_add_vsi(vsi);
11003 dev_info(&pf->pdev->dev,
11004 "rebuild of Main VSI failed: %d\n", ret);
11009 if (vsi->mqprio_qopt.max_rate[0]) {
11010 u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
11011 vsi->mqprio_qopt.max_rate[0]);
11014 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
11018 credits = max_tx_rate;
11019 do_div(credits, I40E_BW_CREDIT_DIVISOR);
11020 dev_dbg(&vsi->back->pdev->dev,
11021 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
11027 ret = i40e_rebuild_cloud_filters(vsi, vsi->seid);
11031 /* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs
11032 * for this main VSI if they exist
11034 ret = i40e_rebuild_channels(vsi);
11038 /* Reconfigure hardware for allowing smaller MSS in the case
11039 * of TSO, so that we avoid the MDD being fired and causing
11040 * a reset in the case of small MSS+TSO.
11042 #define I40E_REG_MSS 0x000E64DC
11043 #define I40E_REG_MSS_MIN_MASK 0x3FF0000
11044 #define I40E_64BYTE_MSS 0x400000
11045 val = rd32(hw, I40E_REG_MSS);
11046 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
11047 val &= ~I40E_REG_MSS_MIN_MASK;
11048 val |= I40E_64BYTE_MSS;
11049 wr32(hw, I40E_REG_MSS, val);
11052 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
11054 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
11056 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
11057 i40e_stat_str(&pf->hw, ret),
11058 i40e_aq_str(&pf->hw,
11059 pf->hw.aq.asq_last_status));
11061 /* reinit the misc interrupt */
11062 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
11063 ret = i40e_setup_misc_vector(pf);
11065 /* Add a filter to drop all Flow control frames from any VSI from being
11066 * transmitted. By doing so we stop a malicious VF from sending out
11067 * PAUSE or PFC frames and potentially controlling traffic for other
11069 * The FW can still send Flow control frames if enabled.
11071 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
11072 pf->main_vsi_seid);
11074 /* restart the VSIs that were rebuilt and running before the reset */
11075 i40e_pf_unquiesce_all_vsi(pf);
11077 /* Release the RTNL lock before we start resetting VFs */
11078 if (!lock_acquired)
11081 /* Restore promiscuous settings */
11082 ret = i40e_set_promiscuous(pf, pf->cur_promisc);
11084 dev_warn(&pf->pdev->dev,
11085 "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
11086 pf->cur_promisc ? "on" : "off",
11087 i40e_stat_str(&pf->hw, ret),
11088 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11090 i40e_reset_all_vfs(pf, true);
11092 /* tell the firmware that we're starting */
11093 i40e_send_version(pf);
11095 /* We've already released the lock, so don't do it again */
11096 goto end_core_reset;
11099 if (!lock_acquired)
11102 clear_bit(__I40E_RESET_FAILED, pf->state);
11104 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
11105 clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
11109 * i40e_reset_and_rebuild - reset and rebuild using a saved config
11110 * @pf: board private structure
11111 * @reinit: if the Main VSI needs to re-initialized.
11112 * @lock_acquired: indicates whether or not the lock has been acquired
11113 * before this function was called.
11115 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
11116 bool lock_acquired)
11120 if (test_bit(__I40E_IN_REMOVE, pf->state))
11122 /* Now we wait for GRST to settle out.
11123 * We don't have to delete the VEBs or VSIs from the hw switch
11124 * because the reset will make them disappear.
11126 ret = i40e_reset(pf);
11128 i40e_rebuild(pf, reinit, lock_acquired);
11132 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
11133 * @pf: board private structure
11135 * Close up the VFs and other things in prep for a Core Reset,
11136 * then get ready to rebuild the world.
11137 * @lock_acquired: indicates whether or not the lock has been acquired
11138 * before this function was called.
11140 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
11142 i40e_prep_for_reset(pf);
11143 i40e_reset_and_rebuild(pf, false, lock_acquired);
11147 * i40e_handle_mdd_event
11148 * @pf: pointer to the PF structure
11150 * Called from the MDD irq handler to identify possibly malicious vfs
11152 static void i40e_handle_mdd_event(struct i40e_pf *pf)
11154 struct i40e_hw *hw = &pf->hw;
11155 bool mdd_detected = false;
11156 struct i40e_vf *vf;
11160 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
11163 /* find what triggered the MDD event */
11164 reg = rd32(hw, I40E_GL_MDET_TX);
11165 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
11166 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
11167 I40E_GL_MDET_TX_PF_NUM_SHIFT;
11168 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
11169 I40E_GL_MDET_TX_VF_NUM_SHIFT;
11170 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
11171 I40E_GL_MDET_TX_EVENT_SHIFT;
11172 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
11173 I40E_GL_MDET_TX_QUEUE_SHIFT) -
11174 pf->hw.func_caps.base_queue;
11175 if (netif_msg_tx_err(pf))
11176 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
11177 event, queue, pf_num, vf_num);
11178 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
11179 mdd_detected = true;
11181 reg = rd32(hw, I40E_GL_MDET_RX);
11182 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
11183 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
11184 I40E_GL_MDET_RX_FUNCTION_SHIFT;
11185 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
11186 I40E_GL_MDET_RX_EVENT_SHIFT;
11187 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
11188 I40E_GL_MDET_RX_QUEUE_SHIFT) -
11189 pf->hw.func_caps.base_queue;
11190 if (netif_msg_rx_err(pf))
11191 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
11192 event, queue, func);
11193 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
11194 mdd_detected = true;
11197 if (mdd_detected) {
11198 reg = rd32(hw, I40E_PF_MDET_TX);
11199 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
11200 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
11201 dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n");
11203 reg = rd32(hw, I40E_PF_MDET_RX);
11204 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
11205 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
11206 dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n");
11210 /* see if one of the VFs needs its hand slapped */
11211 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
11213 reg = rd32(hw, I40E_VP_MDET_TX(i));
11214 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
11215 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
11216 vf->num_mdd_events++;
11217 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
11219 dev_info(&pf->pdev->dev,
11220 "Use PF Control I/F to re-enable the VF\n");
11221 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
11224 reg = rd32(hw, I40E_VP_MDET_RX(i));
11225 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
11226 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
11227 vf->num_mdd_events++;
11228 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
11230 dev_info(&pf->pdev->dev,
11231 "Use PF Control I/F to re-enable the VF\n");
11232 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
11236 /* re-enable mdd interrupt cause */
11237 clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
11238 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
11239 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
11240 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
11245 * i40e_service_task - Run the driver's async subtasks
11246 * @work: pointer to work_struct containing our data
11248 static void i40e_service_task(struct work_struct *work)
11250 struct i40e_pf *pf = container_of(work,
11253 unsigned long start_time = jiffies;
11255 /* don't bother with service tasks if a reset is in progress */
11256 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
11257 test_bit(__I40E_SUSPENDED, pf->state))
11260 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
11263 if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) {
11264 i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
11265 i40e_sync_filters_subtask(pf);
11266 i40e_reset_subtask(pf);
11267 i40e_handle_mdd_event(pf);
11268 i40e_vc_process_vflr_event(pf);
11269 i40e_watchdog_subtask(pf);
11270 i40e_fdir_reinit_subtask(pf);
11271 if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
11272 /* Client subtask will reopen next time through. */
11273 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi],
11276 i40e_client_subtask(pf);
11277 if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
11279 i40e_notify_client_of_l2_param_changes(
11280 pf->vsi[pf->lan_vsi]);
11282 i40e_sync_filters_subtask(pf);
11284 i40e_reset_subtask(pf);
11287 i40e_clean_adminq_subtask(pf);
11289 /* flush memory to make sure state is correct before next watchdog */
11290 smp_mb__before_atomic();
11291 clear_bit(__I40E_SERVICE_SCHED, pf->state);
11293 /* If the tasks have taken longer than one timer cycle or there
11294 * is more work to be done, reschedule the service task now
11295 * rather than wait for the timer to tick again.
11297 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
11298 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
11299 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
11300 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
11301 i40e_service_event_schedule(pf);
11305 * i40e_service_timer - timer callback
11306 * @t: timer list pointer
11308 static void i40e_service_timer(struct timer_list *t)
11310 struct i40e_pf *pf = from_timer(pf, t, service_timer);
11312 mod_timer(&pf->service_timer,
11313 round_jiffies(jiffies + pf->service_timer_period));
11314 i40e_service_event_schedule(pf);
11318 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
11319 * @vsi: the VSI being configured
11321 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
11323 struct i40e_pf *pf = vsi->back;
11325 switch (vsi->type) {
11326 case I40E_VSI_MAIN:
11327 vsi->alloc_queue_pairs = pf->num_lan_qps;
11328 if (!vsi->num_tx_desc)
11329 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11330 I40E_REQ_DESCRIPTOR_MULTIPLE);
11331 if (!vsi->num_rx_desc)
11332 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11333 I40E_REQ_DESCRIPTOR_MULTIPLE);
11334 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
11335 vsi->num_q_vectors = pf->num_lan_msix;
11337 vsi->num_q_vectors = 1;
11341 case I40E_VSI_FDIR:
11342 vsi->alloc_queue_pairs = 1;
11343 vsi->num_tx_desc = ALIGN(I40E_FDIR_RING_COUNT,
11344 I40E_REQ_DESCRIPTOR_MULTIPLE);
11345 vsi->num_rx_desc = ALIGN(I40E_FDIR_RING_COUNT,
11346 I40E_REQ_DESCRIPTOR_MULTIPLE);
11347 vsi->num_q_vectors = pf->num_fdsb_msix;
11350 case I40E_VSI_VMDQ2:
11351 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
11352 if (!vsi->num_tx_desc)
11353 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11354 I40E_REQ_DESCRIPTOR_MULTIPLE);
11355 if (!vsi->num_rx_desc)
11356 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11357 I40E_REQ_DESCRIPTOR_MULTIPLE);
11358 vsi->num_q_vectors = pf->num_vmdq_msix;
11361 case I40E_VSI_SRIOV:
11362 vsi->alloc_queue_pairs = pf->num_vf_qps;
11363 if (!vsi->num_tx_desc)
11364 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11365 I40E_REQ_DESCRIPTOR_MULTIPLE);
11366 if (!vsi->num_rx_desc)
11367 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11368 I40E_REQ_DESCRIPTOR_MULTIPLE);
11376 if (is_kdump_kernel()) {
11377 vsi->num_tx_desc = I40E_MIN_NUM_DESCRIPTORS;
11378 vsi->num_rx_desc = I40E_MIN_NUM_DESCRIPTORS;
11385 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
11386 * @vsi: VSI pointer
11387 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
11389 * On error: returns error code (negative)
11390 * On success: returns 0
11392 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
11394 struct i40e_ring **next_rings;
11398 /* allocate memory for both Tx, XDP Tx and Rx ring pointers */
11399 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
11400 (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
11401 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
11402 if (!vsi->tx_rings)
11404 next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
11405 if (i40e_enabled_xdp_vsi(vsi)) {
11406 vsi->xdp_rings = next_rings;
11407 next_rings += vsi->alloc_queue_pairs;
11409 vsi->rx_rings = next_rings;
11411 if (alloc_qvectors) {
11412 /* allocate memory for q_vector pointers */
11413 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
11414 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
11415 if (!vsi->q_vectors) {
11423 kfree(vsi->tx_rings);
11428 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
11429 * @pf: board private structure
11430 * @type: type of VSI
11432 * On error: returns error code (negative)
11433 * On success: returns vsi index in PF (positive)
11435 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
11438 struct i40e_vsi *vsi;
11442 /* Need to protect the allocation of the VSIs at the PF level */
11443 mutex_lock(&pf->switch_mutex);
11445 /* VSI list may be fragmented if VSI creation/destruction has
11446 * been happening. We can afford to do a quick scan to look
11447 * for any free VSIs in the list.
11449 * find next empty vsi slot, looping back around if necessary
11452 while (i < pf->num_alloc_vsi && pf->vsi[i])
11454 if (i >= pf->num_alloc_vsi) {
11456 while (i < pf->next_vsi && pf->vsi[i])
11460 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
11461 vsi_idx = i; /* Found one! */
11464 goto unlock_pf; /* out of VSI slots! */
11466 pf->next_vsi = ++i;
11468 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
11475 set_bit(__I40E_VSI_DOWN, vsi->state);
11477 vsi->idx = vsi_idx;
11478 vsi->int_rate_limit = 0;
11479 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
11480 pf->rss_table_size : 64;
11481 vsi->netdev_registered = false;
11482 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
11483 hash_init(vsi->mac_filter_hash);
11484 vsi->irqs_ready = false;
11486 if (type == I40E_VSI_MAIN) {
11487 vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL);
11488 if (!vsi->af_xdp_zc_qps)
11492 ret = i40e_set_num_rings_in_vsi(vsi);
11496 ret = i40e_vsi_alloc_arrays(vsi, true);
11500 /* Setup default MSIX irq handler for VSI */
11501 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
11503 /* Initialize VSI lock */
11504 spin_lock_init(&vsi->mac_filter_hash_lock);
11505 pf->vsi[vsi_idx] = vsi;
11510 bitmap_free(vsi->af_xdp_zc_qps);
11511 pf->next_vsi = i - 1;
11514 mutex_unlock(&pf->switch_mutex);
11519 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
11520 * @vsi: VSI pointer
11521 * @free_qvectors: a bool to specify if q_vectors need to be freed.
11523 * On error: returns error code (negative)
11524 * On success: returns 0
11526 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
11528 /* free the ring and vector containers */
11529 if (free_qvectors) {
11530 kfree(vsi->q_vectors);
11531 vsi->q_vectors = NULL;
11533 kfree(vsi->tx_rings);
11534 vsi->tx_rings = NULL;
11535 vsi->rx_rings = NULL;
11536 vsi->xdp_rings = NULL;
11540 * i40e_clear_rss_config_user - clear the user configured RSS hash keys
11542 * @vsi: Pointer to VSI structure
11544 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
11549 kfree(vsi->rss_hkey_user);
11550 vsi->rss_hkey_user = NULL;
11552 kfree(vsi->rss_lut_user);
11553 vsi->rss_lut_user = NULL;
11557 * i40e_vsi_clear - Deallocate the VSI provided
11558 * @vsi: the VSI being un-configured
11560 static int i40e_vsi_clear(struct i40e_vsi *vsi)
11562 struct i40e_pf *pf;
11571 mutex_lock(&pf->switch_mutex);
11572 if (!pf->vsi[vsi->idx]) {
11573 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n",
11574 vsi->idx, vsi->idx, vsi->type);
11578 if (pf->vsi[vsi->idx] != vsi) {
11579 dev_err(&pf->pdev->dev,
11580 "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n",
11581 pf->vsi[vsi->idx]->idx,
11582 pf->vsi[vsi->idx]->type,
11583 vsi->idx, vsi->type);
11587 /* updates the PF for this cleared vsi */
11588 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
11589 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
11591 bitmap_free(vsi->af_xdp_zc_qps);
11592 i40e_vsi_free_arrays(vsi, true);
11593 i40e_clear_rss_config_user(vsi);
11595 pf->vsi[vsi->idx] = NULL;
11596 if (vsi->idx < pf->next_vsi)
11597 pf->next_vsi = vsi->idx;
11600 mutex_unlock(&pf->switch_mutex);
11608 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
11609 * @vsi: the VSI being cleaned
11611 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
11615 if (vsi->tx_rings && vsi->tx_rings[0]) {
11616 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
11617 kfree_rcu(vsi->tx_rings[i], rcu);
11618 WRITE_ONCE(vsi->tx_rings[i], NULL);
11619 WRITE_ONCE(vsi->rx_rings[i], NULL);
11620 if (vsi->xdp_rings)
11621 WRITE_ONCE(vsi->xdp_rings[i], NULL);
11627 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
11628 * @vsi: the VSI being configured
11630 static int i40e_alloc_rings(struct i40e_vsi *vsi)
11632 int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
11633 struct i40e_pf *pf = vsi->back;
11634 struct i40e_ring *ring;
11636 /* Set basic values in the rings to be used later during open() */
11637 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
11638 /* allocate space for both Tx and Rx in one shot */
11639 ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
11643 ring->queue_index = i;
11644 ring->reg_idx = vsi->base_queue + i;
11645 ring->ring_active = false;
11647 ring->netdev = vsi->netdev;
11648 ring->dev = &pf->pdev->dev;
11649 ring->count = vsi->num_tx_desc;
11652 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
11653 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
11654 ring->itr_setting = pf->tx_itr_default;
11655 WRITE_ONCE(vsi->tx_rings[i], ring++);
11657 if (!i40e_enabled_xdp_vsi(vsi))
11660 ring->queue_index = vsi->alloc_queue_pairs + i;
11661 ring->reg_idx = vsi->base_queue + ring->queue_index;
11662 ring->ring_active = false;
11664 ring->netdev = NULL;
11665 ring->dev = &pf->pdev->dev;
11666 ring->count = vsi->num_tx_desc;
11669 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
11670 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
11671 set_ring_xdp(ring);
11672 ring->itr_setting = pf->tx_itr_default;
11673 WRITE_ONCE(vsi->xdp_rings[i], ring++);
11676 ring->queue_index = i;
11677 ring->reg_idx = vsi->base_queue + i;
11678 ring->ring_active = false;
11680 ring->netdev = vsi->netdev;
11681 ring->dev = &pf->pdev->dev;
11682 ring->count = vsi->num_rx_desc;
11685 ring->itr_setting = pf->rx_itr_default;
11686 WRITE_ONCE(vsi->rx_rings[i], ring);
11692 i40e_vsi_clear_rings(vsi);
11697 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
11698 * @pf: board private structure
11699 * @vectors: the number of MSI-X vectors to request
11701 * Returns the number of vectors reserved, or error
11703 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
11705 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
11706 I40E_MIN_MSIX, vectors);
11708 dev_info(&pf->pdev->dev,
11709 "MSI-X vector reservation failed: %d\n", vectors);
11717 * i40e_init_msix - Setup the MSIX capability
11718 * @pf: board private structure
11720 * Work with the OS to set up the MSIX vectors needed.
11722 * Returns the number of vectors reserved or negative on failure
11724 static int i40e_init_msix(struct i40e_pf *pf)
11726 struct i40e_hw *hw = &pf->hw;
11727 int cpus, extra_vectors;
11731 int iwarp_requested = 0;
11733 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
11736 /* The number of vectors we'll request will be comprised of:
11737 * - Add 1 for "other" cause for Admin Queue events, etc.
11738 * - The number of LAN queue pairs
11739 * - Queues being used for RSS.
11740 * We don't need as many as max_rss_size vectors.
11741 * use rss_size instead in the calculation since that
11742 * is governed by number of cpus in the system.
11743 * - assumes symmetric Tx/Rx pairing
11744 * - The number of VMDq pairs
11745 * - The CPU count within the NUMA node if iWARP is enabled
11746 * Once we count this up, try the request.
11748 * If we can't get what we want, we'll simplify to nearly nothing
11749 * and try again. If that still fails, we punt.
11751 vectors_left = hw->func_caps.num_msix_vectors;
11754 /* reserve one vector for miscellaneous handler */
11755 if (vectors_left) {
11760 /* reserve some vectors for the main PF traffic queues. Initially we
11761 * only reserve at most 50% of the available vectors, in the case that
11762 * the number of online CPUs is large. This ensures that we can enable
11763 * extra features as well. Once we've enabled the other features, we
11764 * will use any remaining vectors to reach as close as we can to the
11765 * number of online CPUs.
11767 cpus = num_online_cpus();
11768 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
11769 vectors_left -= pf->num_lan_msix;
11771 /* reserve one vector for sideband flow director */
11772 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11773 if (vectors_left) {
11774 pf->num_fdsb_msix = 1;
11778 pf->num_fdsb_msix = 0;
11782 /* can we reserve enough for iWARP? */
11783 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11784 iwarp_requested = pf->num_iwarp_msix;
11787 pf->num_iwarp_msix = 0;
11788 else if (vectors_left < pf->num_iwarp_msix)
11789 pf->num_iwarp_msix = 1;
11790 v_budget += pf->num_iwarp_msix;
11791 vectors_left -= pf->num_iwarp_msix;
11794 /* any vectors left over go for VMDq support */
11795 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
11796 if (!vectors_left) {
11797 pf->num_vmdq_msix = 0;
11798 pf->num_vmdq_qps = 0;
11800 int vmdq_vecs_wanted =
11801 pf->num_vmdq_vsis * pf->num_vmdq_qps;
11803 min_t(int, vectors_left, vmdq_vecs_wanted);
11805 /* if we're short on vectors for what's desired, we limit
11806 * the queues per vmdq. If this is still more than are
11807 * available, the user will need to change the number of
11808 * queues/vectors used by the PF later with the ethtool
11811 if (vectors_left < vmdq_vecs_wanted) {
11812 pf->num_vmdq_qps = 1;
11813 vmdq_vecs_wanted = pf->num_vmdq_vsis;
11814 vmdq_vecs = min_t(int,
11818 pf->num_vmdq_msix = pf->num_vmdq_qps;
11820 v_budget += vmdq_vecs;
11821 vectors_left -= vmdq_vecs;
11825 /* On systems with a large number of SMP cores, we previously limited
11826 * the number of vectors for num_lan_msix to be at most 50% of the
11827 * available vectors, to allow for other features. Now, we add back
11828 * the remaining vectors. However, we ensure that the total
11829 * num_lan_msix will not exceed num_online_cpus(). To do this, we
11830 * calculate the number of vectors we can add without going over the
11831 * cap of CPUs. For systems with a small number of CPUs this will be
11834 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
11835 pf->num_lan_msix += extra_vectors;
11836 vectors_left -= extra_vectors;
11838 WARN(vectors_left < 0,
11839 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
11841 v_budget += pf->num_lan_msix;
11842 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
11844 if (!pf->msix_entries)
11847 for (i = 0; i < v_budget; i++)
11848 pf->msix_entries[i].entry = i;
11849 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
11851 if (v_actual < I40E_MIN_MSIX) {
11852 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
11853 kfree(pf->msix_entries);
11854 pf->msix_entries = NULL;
11855 pci_disable_msix(pf->pdev);
11858 } else if (v_actual == I40E_MIN_MSIX) {
11859 /* Adjust for minimal MSIX use */
11860 pf->num_vmdq_vsis = 0;
11861 pf->num_vmdq_qps = 0;
11862 pf->num_lan_qps = 1;
11863 pf->num_lan_msix = 1;
11865 } else if (v_actual != v_budget) {
11866 /* If we have limited resources, we will start with no vectors
11867 * for the special features and then allocate vectors to some
11868 * of these features based on the policy and at the end disable
11869 * the features that did not get any vectors.
11873 dev_info(&pf->pdev->dev,
11874 "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n",
11875 v_actual, v_budget);
11876 /* reserve the misc vector */
11877 vec = v_actual - 1;
11879 /* Scale vector usage down */
11880 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
11881 pf->num_vmdq_vsis = 1;
11882 pf->num_vmdq_qps = 1;
11884 /* partition out the remaining vectors */
11887 pf->num_lan_msix = 1;
11890 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11891 pf->num_lan_msix = 1;
11892 pf->num_iwarp_msix = 1;
11894 pf->num_lan_msix = 2;
11898 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11899 pf->num_iwarp_msix = min_t(int, (vec / 3),
11901 pf->num_vmdq_vsis = min_t(int, (vec / 3),
11902 I40E_DEFAULT_NUM_VMDQ_VSI);
11904 pf->num_vmdq_vsis = min_t(int, (vec / 2),
11905 I40E_DEFAULT_NUM_VMDQ_VSI);
11907 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11908 pf->num_fdsb_msix = 1;
11911 pf->num_lan_msix = min_t(int,
11912 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
11914 pf->num_lan_qps = pf->num_lan_msix;
11919 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
11920 (pf->num_fdsb_msix == 0)) {
11921 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
11922 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
11923 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11925 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
11926 (pf->num_vmdq_msix == 0)) {
11927 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
11928 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
11931 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
11932 (pf->num_iwarp_msix == 0)) {
11933 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
11934 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
11936 i40e_debug(&pf->hw, I40E_DEBUG_INIT,
11937 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
11939 pf->num_vmdq_msix * pf->num_vmdq_vsis,
11941 pf->num_iwarp_msix);
11947 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
11948 * @vsi: the VSI being configured
11949 * @v_idx: index of the vector in the vsi struct
11951 * We allocate one q_vector. If allocation fails we return -ENOMEM.
11953 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
11955 struct i40e_q_vector *q_vector;
11957 /* allocate q_vector */
11958 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
11962 q_vector->vsi = vsi;
11963 q_vector->v_idx = v_idx;
11964 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
11967 netif_napi_add(vsi->netdev, &q_vector->napi, i40e_napi_poll);
11969 /* tie q_vector and vsi together */
11970 vsi->q_vectors[v_idx] = q_vector;
11976 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
11977 * @vsi: the VSI being configured
11979 * We allocate one q_vector per queue interrupt. If allocation fails we
11982 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
11984 struct i40e_pf *pf = vsi->back;
11985 int err, v_idx, num_q_vectors;
11987 /* if not MSIX, give the one vector only to the LAN VSI */
11988 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
11989 num_q_vectors = vsi->num_q_vectors;
11990 else if (vsi == pf->vsi[pf->lan_vsi])
11995 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
11996 err = i40e_vsi_alloc_q_vector(vsi, v_idx);
12005 i40e_free_q_vector(vsi, v_idx);
12011 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
12012 * @pf: board private structure to initialize
12014 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
12019 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
12020 vectors = i40e_init_msix(pf);
12022 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
12023 I40E_FLAG_IWARP_ENABLED |
12024 I40E_FLAG_RSS_ENABLED |
12025 I40E_FLAG_DCB_CAPABLE |
12026 I40E_FLAG_DCB_ENABLED |
12027 I40E_FLAG_SRIOV_ENABLED |
12028 I40E_FLAG_FD_SB_ENABLED |
12029 I40E_FLAG_FD_ATR_ENABLED |
12030 I40E_FLAG_VMDQ_ENABLED);
12031 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
12033 /* rework the queue expectations without MSIX */
12034 i40e_determine_queue_usage(pf);
12038 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
12039 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
12040 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
12041 vectors = pci_enable_msi(pf->pdev);
12043 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
12045 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
12047 vectors = 1; /* one MSI or Legacy vector */
12050 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
12051 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
12053 /* set up vector assignment tracking */
12054 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
12055 pf->irq_pile = kzalloc(size, GFP_KERNEL);
12059 pf->irq_pile->num_entries = vectors;
12061 /* track first vector for misc interrupts, ignore return */
12062 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
12068 * i40e_restore_interrupt_scheme - Restore the interrupt scheme
12069 * @pf: private board data structure
12071 * Restore the interrupt scheme that was cleared when we suspended the
12072 * device. This should be called during resume to re-allocate the q_vectors
12073 * and reacquire IRQs.
12075 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
12079 /* We cleared the MSI and MSI-X flags when disabling the old interrupt
12080 * scheme. We need to re-enabled them here in order to attempt to
12081 * re-acquire the MSI or MSI-X vectors
12083 pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
12085 err = i40e_init_interrupt_scheme(pf);
12089 /* Now that we've re-acquired IRQs, we need to remap the vectors and
12090 * rings together again.
12092 for (i = 0; i < pf->num_alloc_vsi; i++) {
12094 err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
12097 i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
12101 err = i40e_setup_misc_vector(pf);
12105 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
12106 i40e_client_update_msix_info(pf);
12113 i40e_vsi_free_q_vectors(pf->vsi[i]);
12120 * i40e_setup_misc_vector_for_recovery_mode - Setup the misc vector to handle
12121 * non queue events in recovery mode
12122 * @pf: board private structure
12124 * This sets up the handler for MSIX 0 or MSI/legacy, which is used to manage
12125 * the non-queue interrupts, e.g. AdminQ and errors in recovery mode.
12126 * This is handled differently than in recovery mode since no Tx/Rx resources
12127 * are being allocated.
12129 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf)
12133 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
12134 err = i40e_setup_misc_vector(pf);
12137 dev_info(&pf->pdev->dev,
12138 "MSI-X misc vector request failed, error %d\n",
12143 u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED;
12145 err = request_irq(pf->pdev->irq, i40e_intr, flags,
12149 dev_info(&pf->pdev->dev,
12150 "MSI/legacy misc vector request failed, error %d\n",
12154 i40e_enable_misc_int_causes(pf);
12155 i40e_irq_dynamic_enable_icr0(pf);
12162 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
12163 * @pf: board private structure
12165 * This sets up the handler for MSIX 0, which is used to manage the
12166 * non-queue interrupts, e.g. AdminQ and errors. This is not used
12167 * when in MSI or Legacy interrupt mode.
12169 static int i40e_setup_misc_vector(struct i40e_pf *pf)
12171 struct i40e_hw *hw = &pf->hw;
12174 /* Only request the IRQ once, the first time through. */
12175 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) {
12176 err = request_irq(pf->msix_entries[0].vector,
12177 i40e_intr, 0, pf->int_name, pf);
12179 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
12180 dev_info(&pf->pdev->dev,
12181 "request_irq for %s failed: %d\n",
12182 pf->int_name, err);
12187 i40e_enable_misc_int_causes(pf);
12189 /* associate no queues to the misc vector */
12190 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
12191 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K >> 1);
12195 i40e_irq_dynamic_enable_icr0(pf);
12201 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
12202 * @vsi: Pointer to vsi structure
12203 * @seed: Buffter to store the hash keys
12204 * @lut: Buffer to store the lookup table entries
12205 * @lut_size: Size of buffer to store the lookup table entries
12207 * Return 0 on success, negative on failure
12209 static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
12210 u8 *lut, u16 lut_size)
12212 struct i40e_pf *pf = vsi->back;
12213 struct i40e_hw *hw = &pf->hw;
12217 ret = i40e_aq_get_rss_key(hw, vsi->id,
12218 (struct i40e_aqc_get_set_rss_key_data *)seed);
12220 dev_info(&pf->pdev->dev,
12221 "Cannot get RSS key, err %s aq_err %s\n",
12222 i40e_stat_str(&pf->hw, ret),
12223 i40e_aq_str(&pf->hw,
12224 pf->hw.aq.asq_last_status));
12230 bool pf_lut = vsi->type == I40E_VSI_MAIN;
12232 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
12234 dev_info(&pf->pdev->dev,
12235 "Cannot get RSS lut, err %s aq_err %s\n",
12236 i40e_stat_str(&pf->hw, ret),
12237 i40e_aq_str(&pf->hw,
12238 pf->hw.aq.asq_last_status));
12247 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
12248 * @vsi: Pointer to vsi structure
12249 * @seed: RSS hash seed
12250 * @lut: Lookup table
12251 * @lut_size: Lookup table size
12253 * Returns 0 on success, negative on failure
12255 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
12256 const u8 *lut, u16 lut_size)
12258 struct i40e_pf *pf = vsi->back;
12259 struct i40e_hw *hw = &pf->hw;
12260 u16 vf_id = vsi->vf_id;
12263 /* Fill out hash function seed */
12265 u32 *seed_dw = (u32 *)seed;
12267 if (vsi->type == I40E_VSI_MAIN) {
12268 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
12269 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
12270 } else if (vsi->type == I40E_VSI_SRIOV) {
12271 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
12272 wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
12274 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
12279 u32 *lut_dw = (u32 *)lut;
12281 if (vsi->type == I40E_VSI_MAIN) {
12282 if (lut_size != I40E_HLUT_ARRAY_SIZE)
12284 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12285 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
12286 } else if (vsi->type == I40E_VSI_SRIOV) {
12287 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
12289 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
12290 wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
12292 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
12301 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
12302 * @vsi: Pointer to VSI structure
12303 * @seed: Buffer to store the keys
12304 * @lut: Buffer to store the lookup table entries
12305 * @lut_size: Size of buffer to store the lookup table entries
12307 * Returns 0 on success, negative on failure
12309 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
12310 u8 *lut, u16 lut_size)
12312 struct i40e_pf *pf = vsi->back;
12313 struct i40e_hw *hw = &pf->hw;
12317 u32 *seed_dw = (u32 *)seed;
12319 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
12320 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
12323 u32 *lut_dw = (u32 *)lut;
12325 if (lut_size != I40E_HLUT_ARRAY_SIZE)
12327 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12328 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
12335 * i40e_config_rss - Configure RSS keys and lut
12336 * @vsi: Pointer to VSI structure
12337 * @seed: RSS hash seed
12338 * @lut: Lookup table
12339 * @lut_size: Lookup table size
12341 * Returns 0 on success, negative on failure
12343 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
12345 struct i40e_pf *pf = vsi->back;
12347 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
12348 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
12350 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
12354 * i40e_get_rss - Get RSS keys and lut
12355 * @vsi: Pointer to VSI structure
12356 * @seed: Buffer to store the keys
12357 * @lut: Buffer to store the lookup table entries
12358 * @lut_size: Size of buffer to store the lookup table entries
12360 * Returns 0 on success, negative on failure
12362 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
12364 struct i40e_pf *pf = vsi->back;
12366 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
12367 return i40e_get_rss_aq(vsi, seed, lut, lut_size);
12369 return i40e_get_rss_reg(vsi, seed, lut, lut_size);
12373 * i40e_fill_rss_lut - Fill the RSS lookup table with default values
12374 * @pf: Pointer to board private structure
12375 * @lut: Lookup table
12376 * @rss_table_size: Lookup table size
12377 * @rss_size: Range of queue number for hashing
12379 void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
12380 u16 rss_table_size, u16 rss_size)
12384 for (i = 0; i < rss_table_size; i++)
12385 lut[i] = i % rss_size;
12389 * i40e_pf_config_rss - Prepare for RSS if used
12390 * @pf: board private structure
12392 static int i40e_pf_config_rss(struct i40e_pf *pf)
12394 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
12395 u8 seed[I40E_HKEY_ARRAY_SIZE];
12397 struct i40e_hw *hw = &pf->hw;
12402 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
12403 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
12404 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
12405 hena |= i40e_pf_get_default_rss_hena(pf);
12407 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
12408 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
12410 /* Determine the RSS table size based on the hardware capabilities */
12411 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
12412 reg_val = (pf->rss_table_size == 512) ?
12413 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
12414 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
12415 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
12417 /* Determine the RSS size of the VSI */
12418 if (!vsi->rss_size) {
12420 /* If the firmware does something weird during VSI init, we
12421 * could end up with zero TCs. Check for that to avoid
12422 * divide-by-zero. It probably won't pass traffic, but it also
12425 qcount = vsi->num_queue_pairs /
12426 (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1);
12427 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
12429 if (!vsi->rss_size)
12432 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
12436 /* Use user configured lut if there is one, otherwise use default */
12437 if (vsi->rss_lut_user)
12438 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
12440 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
12442 /* Use user configured hash key if there is one, otherwise
12445 if (vsi->rss_hkey_user)
12446 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
12448 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
12449 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
12456 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
12457 * @pf: board private structure
12458 * @queue_count: the requested queue count for rss.
12460 * returns 0 if rss is not enabled, if enabled returns the final rss queue
12461 * count which may be different from the requested queue count.
12462 * Note: expects to be called while under rtnl_lock()
12464 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
12466 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
12469 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
12472 queue_count = min_t(int, queue_count, num_online_cpus());
12473 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
12475 if (queue_count != vsi->num_queue_pairs) {
12478 vsi->req_queue_pairs = queue_count;
12479 i40e_prep_for_reset(pf);
12480 if (test_bit(__I40E_IN_REMOVE, pf->state))
12481 return pf->alloc_rss_size;
12483 pf->alloc_rss_size = new_rss_size;
12485 i40e_reset_and_rebuild(pf, true, true);
12487 /* Discard the user configured hash keys and lut, if less
12488 * queues are enabled.
12490 if (queue_count < vsi->rss_size) {
12491 i40e_clear_rss_config_user(vsi);
12492 dev_dbg(&pf->pdev->dev,
12493 "discard user configured hash keys and lut\n");
12496 /* Reset vsi->rss_size, as number of enabled queues changed */
12497 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
12498 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
12500 i40e_pf_config_rss(pf);
12502 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
12503 vsi->req_queue_pairs, pf->rss_size_max);
12504 return pf->alloc_rss_size;
12508 * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
12509 * @pf: board private structure
12511 i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
12513 i40e_status status;
12514 bool min_valid, max_valid;
12515 u32 max_bw, min_bw;
12517 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
12518 &min_valid, &max_valid);
12522 pf->min_bw = min_bw;
12524 pf->max_bw = max_bw;
12531 * i40e_set_partition_bw_setting - Set BW settings for this PF partition
12532 * @pf: board private structure
12534 i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
12536 struct i40e_aqc_configure_partition_bw_data bw_data;
12537 i40e_status status;
12539 memset(&bw_data, 0, sizeof(bw_data));
12541 /* Set the valid bit for this PF */
12542 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
12543 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
12544 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
12546 /* Set the new bandwidths */
12547 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
12553 * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
12554 * @pf: board private structure
12556 i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
12558 /* Commit temporary BW setting to permanent NVM image */
12559 enum i40e_admin_queue_err last_aq_status;
12563 if (pf->hw.partition_id != 1) {
12564 dev_info(&pf->pdev->dev,
12565 "Commit BW only works on partition 1! This is partition %d",
12566 pf->hw.partition_id);
12567 ret = I40E_NOT_SUPPORTED;
12568 goto bw_commit_out;
12571 /* Acquire NVM for read access */
12572 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
12573 last_aq_status = pf->hw.aq.asq_last_status;
12575 dev_info(&pf->pdev->dev,
12576 "Cannot acquire NVM for read access, err %s aq_err %s\n",
12577 i40e_stat_str(&pf->hw, ret),
12578 i40e_aq_str(&pf->hw, last_aq_status));
12579 goto bw_commit_out;
12582 /* Read word 0x10 of NVM - SW compatibility word 1 */
12583 ret = i40e_aq_read_nvm(&pf->hw,
12584 I40E_SR_NVM_CONTROL_WORD,
12585 0x10, sizeof(nvm_word), &nvm_word,
12587 /* Save off last admin queue command status before releasing
12590 last_aq_status = pf->hw.aq.asq_last_status;
12591 i40e_release_nvm(&pf->hw);
12593 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
12594 i40e_stat_str(&pf->hw, ret),
12595 i40e_aq_str(&pf->hw, last_aq_status));
12596 goto bw_commit_out;
12599 /* Wait a bit for NVM release to complete */
12602 /* Acquire NVM for write access */
12603 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
12604 last_aq_status = pf->hw.aq.asq_last_status;
12606 dev_info(&pf->pdev->dev,
12607 "Cannot acquire NVM for write access, err %s aq_err %s\n",
12608 i40e_stat_str(&pf->hw, ret),
12609 i40e_aq_str(&pf->hw, last_aq_status));
12610 goto bw_commit_out;
12612 /* Write it back out unchanged to initiate update NVM,
12613 * which will force a write of the shadow (alt) RAM to
12614 * the NVM - thus storing the bandwidth values permanently.
12616 ret = i40e_aq_update_nvm(&pf->hw,
12617 I40E_SR_NVM_CONTROL_WORD,
12618 0x10, sizeof(nvm_word),
12619 &nvm_word, true, 0, NULL);
12620 /* Save off last admin queue command status before releasing
12623 last_aq_status = pf->hw.aq.asq_last_status;
12624 i40e_release_nvm(&pf->hw);
12626 dev_info(&pf->pdev->dev,
12627 "BW settings NOT SAVED, err %s aq_err %s\n",
12628 i40e_stat_str(&pf->hw, ret),
12629 i40e_aq_str(&pf->hw, last_aq_status));
12636 * i40e_is_total_port_shutdown_enabled - read NVM and return value
12637 * if total port shutdown feature is enabled for this PF
12638 * @pf: board private structure
12640 static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
12642 #define I40E_TOTAL_PORT_SHUTDOWN_ENABLED BIT(4)
12643 #define I40E_FEATURES_ENABLE_PTR 0x2A
12644 #define I40E_CURRENT_SETTING_PTR 0x2B
12645 #define I40E_LINK_BEHAVIOR_WORD_OFFSET 0x2D
12646 #define I40E_LINK_BEHAVIOR_WORD_LENGTH 0x1
12647 #define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED BIT(0)
12648 #define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH 4
12649 i40e_status read_status = I40E_SUCCESS;
12650 u16 sr_emp_sr_settings_ptr = 0;
12651 u16 features_enable = 0;
12652 u16 link_behavior = 0;
12655 read_status = i40e_read_nvm_word(&pf->hw,
12656 I40E_SR_EMP_SR_SETTINGS_PTR,
12657 &sr_emp_sr_settings_ptr);
12660 read_status = i40e_read_nvm_word(&pf->hw,
12661 sr_emp_sr_settings_ptr +
12662 I40E_FEATURES_ENABLE_PTR,
12666 if (I40E_TOTAL_PORT_SHUTDOWN_ENABLED & features_enable) {
12667 read_status = i40e_read_nvm_module_data(&pf->hw,
12668 I40E_SR_EMP_SR_SETTINGS_PTR,
12669 I40E_CURRENT_SETTING_PTR,
12670 I40E_LINK_BEHAVIOR_WORD_OFFSET,
12671 I40E_LINK_BEHAVIOR_WORD_LENGTH,
12675 link_behavior >>= (pf->hw.port * I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH);
12676 ret = I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED & link_behavior;
12681 dev_warn(&pf->pdev->dev,
12682 "total-port-shutdown feature is off due to read nvm error: %s\n",
12683 i40e_stat_str(&pf->hw, read_status));
12688 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
12689 * @pf: board private structure to initialize
12691 * i40e_sw_init initializes the Adapter private data structure.
12692 * Fields are initialized based on PCI device information and
12693 * OS network device settings (MTU size).
12695 static int i40e_sw_init(struct i40e_pf *pf)
12701 /* Set default capability flags */
12702 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
12703 I40E_FLAG_MSI_ENABLED |
12704 I40E_FLAG_MSIX_ENABLED;
12706 /* Set default ITR */
12707 pf->rx_itr_default = I40E_ITR_RX_DEF;
12708 pf->tx_itr_default = I40E_ITR_TX_DEF;
12710 /* Depending on PF configurations, it is possible that the RSS
12711 * maximum might end up larger than the available queues
12713 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
12714 pf->alloc_rss_size = 1;
12715 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
12716 pf->rss_size_max = min_t(int, pf->rss_size_max,
12717 pf->hw.func_caps.num_tx_qp);
12719 /* find the next higher power-of-2 of num cpus */
12720 pow = roundup_pow_of_two(num_online_cpus());
12721 pf->rss_size_max = min_t(int, pf->rss_size_max, pow);
12723 if (pf->hw.func_caps.rss) {
12724 pf->flags |= I40E_FLAG_RSS_ENABLED;
12725 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
12726 num_online_cpus());
12729 /* MFP mode enabled */
12730 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
12731 pf->flags |= I40E_FLAG_MFP_ENABLED;
12732 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
12733 if (i40e_get_partition_bw_setting(pf)) {
12734 dev_warn(&pf->pdev->dev,
12735 "Could not get partition bw settings\n");
12737 dev_info(&pf->pdev->dev,
12738 "Partition BW Min = %8.8x, Max = %8.8x\n",
12739 pf->min_bw, pf->max_bw);
12741 /* nudge the Tx scheduler */
12742 i40e_set_partition_bw_setting(pf);
12746 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
12747 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
12748 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
12749 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
12750 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
12751 pf->hw.num_partitions > 1)
12752 dev_info(&pf->pdev->dev,
12753 "Flow Director Sideband mode Disabled in MFP mode\n");
12755 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
12756 pf->fdir_pf_filter_count =
12757 pf->hw.func_caps.fd_filters_guaranteed;
12758 pf->hw.fdir_shared_filter_count =
12759 pf->hw.func_caps.fd_filters_best_effort;
12762 if (pf->hw.mac.type == I40E_MAC_X722) {
12763 pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
12764 I40E_HW_128_QP_RSS_CAPABLE |
12765 I40E_HW_ATR_EVICT_CAPABLE |
12766 I40E_HW_WB_ON_ITR_CAPABLE |
12767 I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
12768 I40E_HW_NO_PCI_LINK_CHECK |
12769 I40E_HW_USE_SET_LLDP_MIB |
12770 I40E_HW_GENEVE_OFFLOAD_CAPABLE |
12771 I40E_HW_PTP_L4_CAPABLE |
12772 I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
12773 I40E_HW_OUTER_UDP_CSUM_CAPABLE);
12775 #define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
12776 if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
12777 I40E_FDEVICT_PCTYPE_DEFAULT) {
12778 dev_warn(&pf->pdev->dev,
12779 "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
12780 pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
12782 } else if ((pf->hw.aq.api_maj_ver > 1) ||
12783 ((pf->hw.aq.api_maj_ver == 1) &&
12784 (pf->hw.aq.api_min_ver > 4))) {
12785 /* Supported in FW API version higher than 1.4 */
12786 pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
12789 /* Enable HW ATR eviction if possible */
12790 if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
12791 pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
12793 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
12794 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
12795 (pf->hw.aq.fw_maj_ver < 4))) {
12796 pf->hw_features |= I40E_HW_RESTART_AUTONEG;
12797 /* No DCB support for FW < v4.33 */
12798 pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
12801 /* Disable FW LLDP if FW < v4.3 */
12802 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
12803 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
12804 (pf->hw.aq.fw_maj_ver < 4)))
12805 pf->hw_features |= I40E_HW_STOP_FW_LLDP;
12807 /* Use the FW Set LLDP MIB API if FW > v4.40 */
12808 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
12809 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
12810 (pf->hw.aq.fw_maj_ver >= 5)))
12811 pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
12813 /* Enable PTP L4 if FW > v6.0 */
12814 if (pf->hw.mac.type == I40E_MAC_XL710 &&
12815 pf->hw.aq.fw_maj_ver >= 6)
12816 pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
12818 if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
12819 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
12820 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
12821 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
12824 if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
12825 pf->flags |= I40E_FLAG_IWARP_ENABLED;
12826 /* IWARP needs one extra vector for CQP just like MISC.*/
12827 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
12829 /* Stopping FW LLDP engine is supported on XL710 and X722
12830 * starting from FW versions determined in i40e_init_adminq.
12831 * Stopping the FW LLDP engine is not supported on XL710
12832 * if NPAR is functioning so unset this hw flag in this case.
12834 if (pf->hw.mac.type == I40E_MAC_XL710 &&
12835 pf->hw.func_caps.npar_enable &&
12836 (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
12837 pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE;
12839 #ifdef CONFIG_PCI_IOV
12840 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
12841 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
12842 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
12843 pf->num_req_vfs = min_t(int,
12844 pf->hw.func_caps.num_vfs,
12845 I40E_MAX_VF_COUNT);
12847 #endif /* CONFIG_PCI_IOV */
12848 pf->eeprom_version = 0xDEAD;
12849 pf->lan_veb = I40E_NO_VEB;
12850 pf->lan_vsi = I40E_NO_VSI;
12852 /* By default FW has this off for performance reasons */
12853 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
12855 /* set up queue assignment tracking */
12856 size = sizeof(struct i40e_lump_tracking)
12857 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
12858 pf->qp_pile = kzalloc(size, GFP_KERNEL);
12859 if (!pf->qp_pile) {
12863 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
12865 pf->tx_timeout_recovery_level = 1;
12867 if (pf->hw.mac.type != I40E_MAC_X722 &&
12868 i40e_is_total_port_shutdown_enabled(pf)) {
12869 /* Link down on close must be on when total port shutdown
12870 * is enabled for a given port
12872 pf->flags |= (I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED |
12873 I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED);
12874 dev_info(&pf->pdev->dev,
12875 "total-port-shutdown was enabled, link-down-on-close is forced on\n");
12877 mutex_init(&pf->switch_mutex);
12884 * i40e_set_ntuple - set the ntuple feature flag and take action
12885 * @pf: board private structure to initialize
12886 * @features: the feature set that the stack is suggesting
12888 * returns a bool to indicate if reset needs to happen
12890 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
12892 bool need_reset = false;
12894 /* Check if Flow Director n-tuple support was enabled or disabled. If
12895 * the state changed, we need to reset.
12897 if (features & NETIF_F_NTUPLE) {
12898 /* Enable filters and mark for reset */
12899 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
12901 /* enable FD_SB only if there is MSI-X vector and no cloud
12904 if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) {
12905 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
12906 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
12909 /* turn off filters, mark for reset and clear SW filter list */
12910 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
12912 i40e_fdir_filter_exit(pf);
12914 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
12915 clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state);
12916 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
12918 /* reset fd counters */
12919 pf->fd_add_err = 0;
12920 pf->fd_atr_cnt = 0;
12921 /* if ATR was auto disabled it can be re-enabled. */
12922 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
12923 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
12924 (I40E_DEBUG_FD & pf->hw.debug_mask))
12925 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
12931 * i40e_clear_rss_lut - clear the rx hash lookup table
12932 * @vsi: the VSI being configured
12934 static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
12936 struct i40e_pf *pf = vsi->back;
12937 struct i40e_hw *hw = &pf->hw;
12938 u16 vf_id = vsi->vf_id;
12941 if (vsi->type == I40E_VSI_MAIN) {
12942 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12943 wr32(hw, I40E_PFQF_HLUT(i), 0);
12944 } else if (vsi->type == I40E_VSI_SRIOV) {
12945 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
12946 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
12948 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
12953 * i40e_set_loopback - turn on/off loopback mode on underlying PF
12955 * @ena: flag to indicate the on/off setting
12957 static int i40e_set_loopback(struct i40e_vsi *vsi, bool ena)
12959 bool if_running = netif_running(vsi->netdev) &&
12960 !test_and_set_bit(__I40E_VSI_DOWN, vsi->state);
12966 ret = i40e_aq_set_mac_loopback(&vsi->back->hw, ena, NULL);
12968 netdev_err(vsi->netdev, "Failed to toggle loopback state\n");
12976 * i40e_set_features - set the netdev feature flags
12977 * @netdev: ptr to the netdev being adjusted
12978 * @features: the feature set that the stack is suggesting
12979 * Note: expects to be called while under rtnl_lock()
12981 static int i40e_set_features(struct net_device *netdev,
12982 netdev_features_t features)
12984 struct i40e_netdev_priv *np = netdev_priv(netdev);
12985 struct i40e_vsi *vsi = np->vsi;
12986 struct i40e_pf *pf = vsi->back;
12989 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
12990 i40e_pf_config_rss(pf);
12991 else if (!(features & NETIF_F_RXHASH) &&
12992 netdev->features & NETIF_F_RXHASH)
12993 i40e_clear_rss_lut(vsi);
12995 if (features & NETIF_F_HW_VLAN_CTAG_RX)
12996 i40e_vlan_stripping_enable(vsi);
12998 i40e_vlan_stripping_disable(vsi);
13000 if (!(features & NETIF_F_HW_TC) &&
13001 (netdev->features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
13002 dev_err(&pf->pdev->dev,
13003 "Offloaded tc filters active, can't turn hw_tc_offload off");
13007 if (!(features & NETIF_F_HW_L2FW_DOFFLOAD) && vsi->macvlan_cnt)
13008 i40e_del_all_macvlans(vsi);
13010 need_reset = i40e_set_ntuple(pf, features);
13013 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
13015 if ((features ^ netdev->features) & NETIF_F_LOOPBACK)
13016 return i40e_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
13021 static int i40e_udp_tunnel_set_port(struct net_device *netdev,
13022 unsigned int table, unsigned int idx,
13023 struct udp_tunnel_info *ti)
13025 struct i40e_netdev_priv *np = netdev_priv(netdev);
13026 struct i40e_hw *hw = &np->vsi->back->hw;
13027 u8 type, filter_index;
13030 type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? I40E_AQC_TUNNEL_TYPE_VXLAN :
13031 I40E_AQC_TUNNEL_TYPE_NGE;
13033 ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index,
13036 netdev_info(netdev, "add UDP port failed, err %s aq_err %s\n",
13037 i40e_stat_str(hw, ret),
13038 i40e_aq_str(hw, hw->aq.asq_last_status));
13042 udp_tunnel_nic_set_port_priv(netdev, table, idx, filter_index);
13046 static int i40e_udp_tunnel_unset_port(struct net_device *netdev,
13047 unsigned int table, unsigned int idx,
13048 struct udp_tunnel_info *ti)
13050 struct i40e_netdev_priv *np = netdev_priv(netdev);
13051 struct i40e_hw *hw = &np->vsi->back->hw;
13054 ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL);
13056 netdev_info(netdev, "delete UDP port failed, err %s aq_err %s\n",
13057 i40e_stat_str(hw, ret),
13058 i40e_aq_str(hw, hw->aq.asq_last_status));
13065 static int i40e_get_phys_port_id(struct net_device *netdev,
13066 struct netdev_phys_item_id *ppid)
13068 struct i40e_netdev_priv *np = netdev_priv(netdev);
13069 struct i40e_pf *pf = np->vsi->back;
13070 struct i40e_hw *hw = &pf->hw;
13072 if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
13073 return -EOPNOTSUPP;
13075 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
13076 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
13082 * i40e_ndo_fdb_add - add an entry to the hardware database
13083 * @ndm: the input from the stack
13084 * @tb: pointer to array of nladdr (unused)
13085 * @dev: the net device pointer
13086 * @addr: the MAC address entry being added
13088 * @flags: instructions from stack about fdb operation
13089 * @extack: netlink extended ack, unused currently
13091 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
13092 struct net_device *dev,
13093 const unsigned char *addr, u16 vid,
13095 struct netlink_ext_ack *extack)
13097 struct i40e_netdev_priv *np = netdev_priv(dev);
13098 struct i40e_pf *pf = np->vsi->back;
13101 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
13102 return -EOPNOTSUPP;
13105 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
13109 /* Hardware does not support aging addresses so if a
13110 * ndm_state is given only allow permanent addresses
13112 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
13113 netdev_info(dev, "FDB only supports static addresses\n");
13117 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
13118 err = dev_uc_add_excl(dev, addr);
13119 else if (is_multicast_ether_addr(addr))
13120 err = dev_mc_add_excl(dev, addr);
13124 /* Only return duplicate errors if NLM_F_EXCL is set */
13125 if (err == -EEXIST && !(flags & NLM_F_EXCL))
13132 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
13133 * @dev: the netdev being configured
13134 * @nlh: RTNL message
13135 * @flags: bridge flags
13136 * @extack: netlink extended ack
13138 * Inserts a new hardware bridge if not already created and
13139 * enables the bridging mode requested (VEB or VEPA). If the
13140 * hardware bridge has already been inserted and the request
13141 * is to change the mode then that requires a PF reset to
13142 * allow rebuild of the components with required hardware
13143 * bridge mode enabled.
13145 * Note: expects to be called while under rtnl_lock()
13147 static int i40e_ndo_bridge_setlink(struct net_device *dev,
13148 struct nlmsghdr *nlh,
13150 struct netlink_ext_ack *extack)
13152 struct i40e_netdev_priv *np = netdev_priv(dev);
13153 struct i40e_vsi *vsi = np->vsi;
13154 struct i40e_pf *pf = vsi->back;
13155 struct i40e_veb *veb = NULL;
13156 struct nlattr *attr, *br_spec;
13159 /* Only for PF VSI for now */
13160 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
13161 return -EOPNOTSUPP;
13163 /* Find the HW bridge for PF VSI */
13164 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
13165 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
13169 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
13171 nla_for_each_nested(attr, br_spec, rem) {
13174 if (nla_type(attr) != IFLA_BRIDGE_MODE)
13177 mode = nla_get_u16(attr);
13178 if ((mode != BRIDGE_MODE_VEPA) &&
13179 (mode != BRIDGE_MODE_VEB))
13182 /* Insert a new HW bridge */
13184 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
13185 vsi->tc_config.enabled_tc);
13187 veb->bridge_mode = mode;
13188 i40e_config_bridge_mode(veb);
13190 /* No Bridge HW offload available */
13194 } else if (mode != veb->bridge_mode) {
13195 /* Existing HW bridge but different mode needs reset */
13196 veb->bridge_mode = mode;
13197 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
13198 if (mode == BRIDGE_MODE_VEB)
13199 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
13201 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
13202 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
13211 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
13214 * @seq: RTNL message seq #
13215 * @dev: the netdev being configured
13216 * @filter_mask: unused
13217 * @nlflags: netlink flags passed in
13219 * Return the mode in which the hardware bridge is operating in
13222 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
13223 struct net_device *dev,
13224 u32 __always_unused filter_mask,
13227 struct i40e_netdev_priv *np = netdev_priv(dev);
13228 struct i40e_vsi *vsi = np->vsi;
13229 struct i40e_pf *pf = vsi->back;
13230 struct i40e_veb *veb = NULL;
13233 /* Only for PF VSI for now */
13234 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
13235 return -EOPNOTSUPP;
13237 /* Find the HW bridge for the PF VSI */
13238 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
13239 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
13246 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
13247 0, 0, nlflags, filter_mask, NULL);
13251 * i40e_features_check - Validate encapsulated packet conforms to limits
13253 * @dev: This physical port's netdev
13254 * @features: Offload features that the stack believes apply
13256 static netdev_features_t i40e_features_check(struct sk_buff *skb,
13257 struct net_device *dev,
13258 netdev_features_t features)
13262 /* No point in doing any of this if neither checksum nor GSO are
13263 * being requested for this frame. We can rule out both by just
13264 * checking for CHECKSUM_PARTIAL
13266 if (skb->ip_summed != CHECKSUM_PARTIAL)
13269 /* We cannot support GSO if the MSS is going to be less than
13270 * 64 bytes. If it is then we need to drop support for GSO.
13272 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
13273 features &= ~NETIF_F_GSO_MASK;
13275 /* MACLEN can support at most 63 words */
13276 len = skb_network_header(skb) - skb->data;
13277 if (len & ~(63 * 2))
13280 /* IPLEN and EIPLEN can support at most 127 dwords */
13281 len = skb_transport_header(skb) - skb_network_header(skb);
13282 if (len & ~(127 * 4))
13285 if (skb->encapsulation) {
13286 /* L4TUNLEN can support 127 words */
13287 len = skb_inner_network_header(skb) - skb_transport_header(skb);
13288 if (len & ~(127 * 2))
13291 /* IPLEN can support at most 127 dwords */
13292 len = skb_inner_transport_header(skb) -
13293 skb_inner_network_header(skb);
13294 if (len & ~(127 * 4))
13298 /* No need to validate L4LEN as TCP is the only protocol with a
13299 * flexible value and we support all possible values supported
13300 * by TCP, which is at most 15 dwords
13305 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
13309 * i40e_xdp_setup - add/remove an XDP program
13310 * @vsi: VSI to changed
13311 * @prog: XDP program
13312 * @extack: netlink extended ack
13314 static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
13315 struct netlink_ext_ack *extack)
13317 int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
13318 struct i40e_pf *pf = vsi->back;
13319 struct bpf_prog *old_prog;
13323 /* Don't allow frames that span over multiple buffers */
13324 if (frame_size > i40e_calculate_vsi_rx_buf_len(vsi)) {
13325 NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
13329 /* When turning XDP on->off/off->on we reset and rebuild the rings. */
13330 need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
13333 i40e_prep_for_reset(pf);
13335 /* VSI shall be deleted in a moment, just return EINVAL */
13336 if (test_bit(__I40E_IN_REMOVE, pf->state))
13339 old_prog = xchg(&vsi->xdp_prog, prog);
13343 /* Wait until ndo_xsk_wakeup completes. */
13345 i40e_reset_and_rebuild(pf, true, true);
13348 if (!i40e_enabled_xdp_vsi(vsi) && prog) {
13349 if (i40e_realloc_rx_bi_zc(vsi, true))
13351 } else if (i40e_enabled_xdp_vsi(vsi) && !prog) {
13352 if (i40e_realloc_rx_bi_zc(vsi, false))
13356 for (i = 0; i < vsi->num_queue_pairs; i++)
13357 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
13360 bpf_prog_put(old_prog);
13362 /* Kick start the NAPI context if there is an AF_XDP socket open
13363 * on that queue id. This so that receiving will start.
13365 if (need_reset && prog)
13366 for (i = 0; i < vsi->num_queue_pairs; i++)
13367 if (vsi->xdp_rings[i]->xsk_pool)
13368 (void)i40e_xsk_wakeup(vsi->netdev, i,
13375 * i40e_enter_busy_conf - Enters busy config state
13378 * Returns 0 on success, <0 for failure.
13380 static int i40e_enter_busy_conf(struct i40e_vsi *vsi)
13382 struct i40e_pf *pf = vsi->back;
13385 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
13389 usleep_range(1000, 2000);
13396 * i40e_exit_busy_conf - Exits busy config state
13399 static void i40e_exit_busy_conf(struct i40e_vsi *vsi)
13401 struct i40e_pf *pf = vsi->back;
13403 clear_bit(__I40E_CONFIG_BUSY, pf->state);
13407 * i40e_queue_pair_reset_stats - Resets all statistics for a queue pair
13409 * @queue_pair: queue pair
13411 static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
13413 memset(&vsi->rx_rings[queue_pair]->rx_stats, 0,
13414 sizeof(vsi->rx_rings[queue_pair]->rx_stats));
13415 memset(&vsi->tx_rings[queue_pair]->stats, 0,
13416 sizeof(vsi->tx_rings[queue_pair]->stats));
13417 if (i40e_enabled_xdp_vsi(vsi)) {
13418 memset(&vsi->xdp_rings[queue_pair]->stats, 0,
13419 sizeof(vsi->xdp_rings[queue_pair]->stats));
13424 * i40e_queue_pair_clean_rings - Cleans all the rings of a queue pair
13426 * @queue_pair: queue pair
13428 static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
13430 i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
13431 if (i40e_enabled_xdp_vsi(vsi)) {
13432 /* Make sure that in-progress ndo_xdp_xmit calls are
13436 i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
13438 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
13442 * i40e_queue_pair_toggle_napi - Enables/disables NAPI for a queue pair
13444 * @queue_pair: queue pair
13445 * @enable: true for enable, false for disable
13447 static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair,
13450 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13451 struct i40e_q_vector *q_vector = rxr->q_vector;
13456 /* All rings in a qp belong to the same qvector. */
13457 if (q_vector->rx.ring || q_vector->tx.ring) {
13459 napi_enable(&q_vector->napi);
13461 napi_disable(&q_vector->napi);
13466 * i40e_queue_pair_toggle_rings - Enables/disables all rings for a queue pair
13468 * @queue_pair: queue pair
13469 * @enable: true for enable, false for disable
13471 * Returns 0 on success, <0 on failure.
13473 static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair,
13476 struct i40e_pf *pf = vsi->back;
13479 pf_q = vsi->base_queue + queue_pair;
13480 ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q,
13481 false /*is xdp*/, enable);
13483 dev_info(&pf->pdev->dev,
13484 "VSI seid %d Tx ring %d %sable timeout\n",
13485 vsi->seid, pf_q, (enable ? "en" : "dis"));
13489 i40e_control_rx_q(pf, pf_q, enable);
13490 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
13492 dev_info(&pf->pdev->dev,
13493 "VSI seid %d Rx ring %d %sable timeout\n",
13494 vsi->seid, pf_q, (enable ? "en" : "dis"));
13498 /* Due to HW errata, on Rx disable only, the register can
13499 * indicate done before it really is. Needs 50ms to be sure
13504 if (!i40e_enabled_xdp_vsi(vsi))
13507 ret = i40e_control_wait_tx_q(vsi->seid, pf,
13508 pf_q + vsi->alloc_queue_pairs,
13509 true /*is xdp*/, enable);
13511 dev_info(&pf->pdev->dev,
13512 "VSI seid %d XDP Tx ring %d %sable timeout\n",
13513 vsi->seid, pf_q, (enable ? "en" : "dis"));
13520 * i40e_queue_pair_enable_irq - Enables interrupts for a queue pair
13522 * @queue_pair: queue_pair
13524 static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair)
13526 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13527 struct i40e_pf *pf = vsi->back;
13528 struct i40e_hw *hw = &pf->hw;
13530 /* All rings in a qp belong to the same qvector. */
13531 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
13532 i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx);
13534 i40e_irq_dynamic_enable_icr0(pf);
13540 * i40e_queue_pair_disable_irq - Disables interrupts for a queue pair
13542 * @queue_pair: queue_pair
13544 static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair)
13546 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13547 struct i40e_pf *pf = vsi->back;
13548 struct i40e_hw *hw = &pf->hw;
13550 /* For simplicity, instead of removing the qp interrupt causes
13551 * from the interrupt linked list, we simply disable the interrupt, and
13552 * leave the list intact.
13554 * All rings in a qp belong to the same qvector.
13556 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
13557 u32 intpf = vsi->base_vector + rxr->q_vector->v_idx;
13559 wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0);
13561 synchronize_irq(pf->msix_entries[intpf].vector);
13563 /* Legacy and MSI mode - this stops all interrupt handling */
13564 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
13565 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
13567 synchronize_irq(pf->pdev->irq);
13572 * i40e_queue_pair_disable - Disables a queue pair
13574 * @queue_pair: queue pair
13576 * Returns 0 on success, <0 on failure.
13578 int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
13582 err = i40e_enter_busy_conf(vsi);
13586 i40e_queue_pair_disable_irq(vsi, queue_pair);
13587 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
13588 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
13589 i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
13590 i40e_queue_pair_clean_rings(vsi, queue_pair);
13591 i40e_queue_pair_reset_stats(vsi, queue_pair);
13597 * i40e_queue_pair_enable - Enables a queue pair
13599 * @queue_pair: queue pair
13601 * Returns 0 on success, <0 on failure.
13603 int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair)
13607 err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]);
13611 if (i40e_enabled_xdp_vsi(vsi)) {
13612 err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]);
13617 err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]);
13621 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, true /* on */);
13622 i40e_queue_pair_toggle_napi(vsi, queue_pair, true /* on */);
13623 i40e_queue_pair_enable_irq(vsi, queue_pair);
13625 i40e_exit_busy_conf(vsi);
13631 * i40e_xdp - implements ndo_bpf for i40e
13633 * @xdp: XDP command
13635 static int i40e_xdp(struct net_device *dev,
13636 struct netdev_bpf *xdp)
13638 struct i40e_netdev_priv *np = netdev_priv(dev);
13639 struct i40e_vsi *vsi = np->vsi;
13641 if (vsi->type != I40E_VSI_MAIN)
13644 switch (xdp->command) {
13645 case XDP_SETUP_PROG:
13646 return i40e_xdp_setup(vsi, xdp->prog, xdp->extack);
13647 case XDP_SETUP_XSK_POOL:
13648 return i40e_xsk_pool_setup(vsi, xdp->xsk.pool,
13649 xdp->xsk.queue_id);
13655 static const struct net_device_ops i40e_netdev_ops = {
13656 .ndo_open = i40e_open,
13657 .ndo_stop = i40e_close,
13658 .ndo_start_xmit = i40e_lan_xmit_frame,
13659 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
13660 .ndo_set_rx_mode = i40e_set_rx_mode,
13661 .ndo_validate_addr = eth_validate_addr,
13662 .ndo_set_mac_address = i40e_set_mac,
13663 .ndo_change_mtu = i40e_change_mtu,
13664 .ndo_eth_ioctl = i40e_ioctl,
13665 .ndo_tx_timeout = i40e_tx_timeout,
13666 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
13667 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
13668 #ifdef CONFIG_NET_POLL_CONTROLLER
13669 .ndo_poll_controller = i40e_netpoll,
13671 .ndo_setup_tc = __i40e_setup_tc,
13672 .ndo_select_queue = i40e_lan_select_queue,
13673 .ndo_set_features = i40e_set_features,
13674 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
13675 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
13676 .ndo_get_vf_stats = i40e_get_vf_stats,
13677 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
13678 .ndo_get_vf_config = i40e_ndo_get_vf_config,
13679 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
13680 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
13681 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
13682 .ndo_get_phys_port_id = i40e_get_phys_port_id,
13683 .ndo_fdb_add = i40e_ndo_fdb_add,
13684 .ndo_features_check = i40e_features_check,
13685 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
13686 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
13687 .ndo_bpf = i40e_xdp,
13688 .ndo_xdp_xmit = i40e_xdp_xmit,
13689 .ndo_xsk_wakeup = i40e_xsk_wakeup,
13690 .ndo_dfwd_add_station = i40e_fwd_add,
13691 .ndo_dfwd_del_station = i40e_fwd_del,
13695 * i40e_config_netdev - Setup the netdev flags
13696 * @vsi: the VSI being configured
13698 * Returns 0 on success, negative value on failure
13700 static int i40e_config_netdev(struct i40e_vsi *vsi)
13702 struct i40e_pf *pf = vsi->back;
13703 struct i40e_hw *hw = &pf->hw;
13704 struct i40e_netdev_priv *np;
13705 struct net_device *netdev;
13706 u8 broadcast[ETH_ALEN];
13707 u8 mac_addr[ETH_ALEN];
13709 netdev_features_t hw_enc_features;
13710 netdev_features_t hw_features;
13712 etherdev_size = sizeof(struct i40e_netdev_priv);
13713 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
13717 vsi->netdev = netdev;
13718 np = netdev_priv(netdev);
13721 hw_enc_features = NETIF_F_SG |
13724 NETIF_F_SOFT_FEATURES |
13729 NETIF_F_GSO_GRE_CSUM |
13730 NETIF_F_GSO_PARTIAL |
13731 NETIF_F_GSO_IPXIP4 |
13732 NETIF_F_GSO_IPXIP6 |
13733 NETIF_F_GSO_UDP_TUNNEL |
13734 NETIF_F_GSO_UDP_TUNNEL_CSUM |
13735 NETIF_F_GSO_UDP_L4 |
13741 if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
13742 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
13744 netdev->udp_tunnel_nic_info = &pf->udp_tunnel_nic;
13746 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
13748 netdev->hw_enc_features |= hw_enc_features;
13750 /* record features VLANs can make use of */
13751 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
13753 #define I40E_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
13754 NETIF_F_GSO_GRE_CSUM | \
13755 NETIF_F_GSO_IPXIP4 | \
13756 NETIF_F_GSO_IPXIP6 | \
13757 NETIF_F_GSO_UDP_TUNNEL | \
13758 NETIF_F_GSO_UDP_TUNNEL_CSUM)
13760 netdev->gso_partial_features = I40E_GSO_PARTIAL_FEATURES;
13761 netdev->features |= NETIF_F_GSO_PARTIAL |
13762 I40E_GSO_PARTIAL_FEATURES;
13764 netdev->mpls_features |= NETIF_F_SG;
13765 netdev->mpls_features |= NETIF_F_HW_CSUM;
13766 netdev->mpls_features |= NETIF_F_TSO;
13767 netdev->mpls_features |= NETIF_F_TSO6;
13768 netdev->mpls_features |= I40E_GSO_PARTIAL_FEATURES;
13770 /* enable macvlan offloads */
13771 netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD;
13773 hw_features = hw_enc_features |
13774 NETIF_F_HW_VLAN_CTAG_TX |
13775 NETIF_F_HW_VLAN_CTAG_RX;
13777 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
13778 hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
13780 netdev->hw_features |= hw_features | NETIF_F_LOOPBACK;
13782 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
13783 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
13785 netdev->features &= ~NETIF_F_HW_TC;
13787 if (vsi->type == I40E_VSI_MAIN) {
13788 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
13789 ether_addr_copy(mac_addr, hw->mac.perm_addr);
13790 /* The following steps are necessary for two reasons. First,
13791 * some older NVM configurations load a default MAC-VLAN
13792 * filter that will accept any tagged packet, and we want to
13793 * replace this with a normal filter. Additionally, it is
13794 * possible our MAC address was provided by the platform using
13795 * Open Firmware or similar.
13797 * Thus, we need to remove the default filter and install one
13798 * specific to the MAC address.
13800 i40e_rm_default_mac_filter(vsi, mac_addr);
13801 spin_lock_bh(&vsi->mac_filter_hash_lock);
13802 i40e_add_mac_filter(vsi, mac_addr);
13803 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13805 /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
13806 * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
13807 * the end, which is 4 bytes long, so force truncation of the
13808 * original name by IFNAMSIZ - 4
13810 snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
13812 pf->vsi[pf->lan_vsi]->netdev->name);
13813 eth_random_addr(mac_addr);
13815 spin_lock_bh(&vsi->mac_filter_hash_lock);
13816 i40e_add_mac_filter(vsi, mac_addr);
13817 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13820 /* Add the broadcast filter so that we initially will receive
13821 * broadcast packets. Note that when a new VLAN is first added the
13822 * driver will convert all filters marked I40E_VLAN_ANY into VLAN
13823 * specific filters as part of transitioning into "vlan" operation.
13824 * When more VLANs are added, the driver will copy each existing MAC
13825 * filter and add it for the new VLAN.
13827 * Broadcast filters are handled specially by
13828 * i40e_sync_filters_subtask, as the driver must to set the broadcast
13829 * promiscuous bit instead of adding this directly as a MAC/VLAN
13830 * filter. The subtask will update the correct broadcast promiscuous
13831 * bits as VLANs become active or inactive.
13833 eth_broadcast_addr(broadcast);
13834 spin_lock_bh(&vsi->mac_filter_hash_lock);
13835 i40e_add_mac_filter(vsi, broadcast);
13836 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13838 eth_hw_addr_set(netdev, mac_addr);
13839 ether_addr_copy(netdev->perm_addr, mac_addr);
13841 /* i40iw_net_event() reads 16 bytes from neigh->primary_key */
13842 netdev->neigh_priv_len = sizeof(u32) * 4;
13844 netdev->priv_flags |= IFF_UNICAST_FLT;
13845 netdev->priv_flags |= IFF_SUPP_NOFCS;
13846 /* Setup netdev TC information */
13847 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
13849 netdev->netdev_ops = &i40e_netdev_ops;
13850 netdev->watchdog_timeo = 5 * HZ;
13851 i40e_set_ethtool_ops(netdev);
13853 /* MTU range: 68 - 9706 */
13854 netdev->min_mtu = ETH_MIN_MTU;
13855 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
13861 * i40e_vsi_delete - Delete a VSI from the switch
13862 * @vsi: the VSI being removed
13864 * Returns 0 on success, negative value on failure
13866 static void i40e_vsi_delete(struct i40e_vsi *vsi)
13868 /* remove default VSI is not allowed */
13869 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
13872 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
13876 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
13877 * @vsi: the VSI being queried
13879 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
13881 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
13883 struct i40e_veb *veb;
13884 struct i40e_pf *pf = vsi->back;
13886 /* Uplink is not a bridge so default to VEB */
13887 if (vsi->veb_idx >= I40E_MAX_VEB)
13890 veb = pf->veb[vsi->veb_idx];
13892 dev_info(&pf->pdev->dev,
13893 "There is no veb associated with the bridge\n");
13897 /* Uplink is a bridge in VEPA mode */
13898 if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
13901 /* Uplink is a bridge in VEB mode */
13905 /* VEPA is now default bridge, so return 0 */
13910 * i40e_add_vsi - Add a VSI to the switch
13911 * @vsi: the VSI being configured
13913 * This initializes a VSI context depending on the VSI type to be added and
13914 * passes it down to the add_vsi aq command.
13916 static int i40e_add_vsi(struct i40e_vsi *vsi)
13919 struct i40e_pf *pf = vsi->back;
13920 struct i40e_hw *hw = &pf->hw;
13921 struct i40e_vsi_context ctxt;
13922 struct i40e_mac_filter *f;
13923 struct hlist_node *h;
13926 u8 enabled_tc = 0x1; /* TC0 enabled */
13929 memset(&ctxt, 0, sizeof(ctxt));
13930 switch (vsi->type) {
13931 case I40E_VSI_MAIN:
13932 /* The PF's main VSI is already setup as part of the
13933 * device initialization, so we'll not bother with
13934 * the add_vsi call, but we will retrieve the current
13937 ctxt.seid = pf->main_vsi_seid;
13938 ctxt.pf_num = pf->hw.pf_id;
13940 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
13941 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
13943 dev_info(&pf->pdev->dev,
13944 "couldn't get PF vsi config, err %s aq_err %s\n",
13945 i40e_stat_str(&pf->hw, ret),
13946 i40e_aq_str(&pf->hw,
13947 pf->hw.aq.asq_last_status));
13950 vsi->info = ctxt.info;
13951 vsi->info.valid_sections = 0;
13953 vsi->seid = ctxt.seid;
13954 vsi->id = ctxt.vsi_number;
13956 enabled_tc = i40e_pf_get_tc_map(pf);
13958 /* Source pruning is enabled by default, so the flag is
13959 * negative logic - if it's set, we need to fiddle with
13960 * the VSI to disable source pruning.
13962 if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
13963 memset(&ctxt, 0, sizeof(ctxt));
13964 ctxt.seid = pf->main_vsi_seid;
13965 ctxt.pf_num = pf->hw.pf_id;
13967 ctxt.info.valid_sections |=
13968 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13969 ctxt.info.switch_id =
13970 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
13971 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
13973 dev_info(&pf->pdev->dev,
13974 "update vsi failed, err %s aq_err %s\n",
13975 i40e_stat_str(&pf->hw, ret),
13976 i40e_aq_str(&pf->hw,
13977 pf->hw.aq.asq_last_status));
13983 /* MFP mode setup queue map and update VSI */
13984 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
13985 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
13986 memset(&ctxt, 0, sizeof(ctxt));
13987 ctxt.seid = pf->main_vsi_seid;
13988 ctxt.pf_num = pf->hw.pf_id;
13990 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
13991 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
13993 dev_info(&pf->pdev->dev,
13994 "update vsi failed, err %s aq_err %s\n",
13995 i40e_stat_str(&pf->hw, ret),
13996 i40e_aq_str(&pf->hw,
13997 pf->hw.aq.asq_last_status));
14001 /* update the local VSI info queue map */
14002 i40e_vsi_update_queue_map(vsi, &ctxt);
14003 vsi->info.valid_sections = 0;
14005 /* Default/Main VSI is only enabled for TC0
14006 * reconfigure it to enable all TCs that are
14007 * available on the port in SFP mode.
14008 * For MFP case the iSCSI PF would use this
14009 * flow to enable LAN+iSCSI TC.
14011 ret = i40e_vsi_config_tc(vsi, enabled_tc);
14013 /* Single TC condition is not fatal,
14014 * message and continue
14016 dev_info(&pf->pdev->dev,
14017 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
14019 i40e_stat_str(&pf->hw, ret),
14020 i40e_aq_str(&pf->hw,
14021 pf->hw.aq.asq_last_status));
14026 case I40E_VSI_FDIR:
14027 ctxt.pf_num = hw->pf_id;
14029 ctxt.uplink_seid = vsi->uplink_seid;
14030 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
14031 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
14032 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
14033 (i40e_is_vsi_uplink_mode_veb(vsi))) {
14034 ctxt.info.valid_sections |=
14035 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
14036 ctxt.info.switch_id =
14037 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
14039 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
14042 case I40E_VSI_VMDQ2:
14043 ctxt.pf_num = hw->pf_id;
14045 ctxt.uplink_seid = vsi->uplink_seid;
14046 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
14047 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
14049 /* This VSI is connected to VEB so the switch_id
14050 * should be set to zero by default.
14052 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
14053 ctxt.info.valid_sections |=
14054 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
14055 ctxt.info.switch_id =
14056 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
14059 /* Setup the VSI tx/rx queue map for TC0 only for now */
14060 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
14063 case I40E_VSI_SRIOV:
14064 ctxt.pf_num = hw->pf_id;
14065 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
14066 ctxt.uplink_seid = vsi->uplink_seid;
14067 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
14068 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
14070 /* This VSI is connected to VEB so the switch_id
14071 * should be set to zero by default.
14073 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
14074 ctxt.info.valid_sections |=
14075 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
14076 ctxt.info.switch_id =
14077 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
14080 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
14081 ctxt.info.valid_sections |=
14082 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
14083 ctxt.info.queueing_opt_flags |=
14084 (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
14085 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
14088 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
14089 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
14090 if (pf->vf[vsi->vf_id].spoofchk) {
14091 ctxt.info.valid_sections |=
14092 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
14093 ctxt.info.sec_flags |=
14094 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
14095 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
14097 /* Setup the VSI tx/rx queue map for TC0 only for now */
14098 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
14101 case I40E_VSI_IWARP:
14102 /* send down message to iWARP */
14109 if (vsi->type != I40E_VSI_MAIN) {
14110 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
14112 dev_info(&vsi->back->pdev->dev,
14113 "add vsi failed, err %s aq_err %s\n",
14114 i40e_stat_str(&pf->hw, ret),
14115 i40e_aq_str(&pf->hw,
14116 pf->hw.aq.asq_last_status));
14120 vsi->info = ctxt.info;
14121 vsi->info.valid_sections = 0;
14122 vsi->seid = ctxt.seid;
14123 vsi->id = ctxt.vsi_number;
14126 vsi->active_filters = 0;
14127 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
14128 spin_lock_bh(&vsi->mac_filter_hash_lock);
14129 /* If macvlan filters already exist, force them to get loaded */
14130 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
14131 f->state = I40E_FILTER_NEW;
14134 spin_unlock_bh(&vsi->mac_filter_hash_lock);
14137 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
14138 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
14141 /* Update VSI BW information */
14142 ret = i40e_vsi_get_bw_info(vsi);
14144 dev_info(&pf->pdev->dev,
14145 "couldn't get vsi bw info, err %s aq_err %s\n",
14146 i40e_stat_str(&pf->hw, ret),
14147 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14148 /* VSI is already added so not tearing that up */
14157 * i40e_vsi_release - Delete a VSI and free its resources
14158 * @vsi: the VSI being removed
14160 * Returns 0 on success or < 0 on error
14162 int i40e_vsi_release(struct i40e_vsi *vsi)
14164 struct i40e_mac_filter *f;
14165 struct hlist_node *h;
14166 struct i40e_veb *veb = NULL;
14167 struct i40e_pf *pf;
14173 /* release of a VEB-owner or last VSI is not allowed */
14174 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
14175 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
14176 vsi->seid, vsi->uplink_seid);
14179 if (vsi == pf->vsi[pf->lan_vsi] &&
14180 !test_bit(__I40E_DOWN, pf->state)) {
14181 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
14184 set_bit(__I40E_VSI_RELEASING, vsi->state);
14185 uplink_seid = vsi->uplink_seid;
14186 if (vsi->type != I40E_VSI_SRIOV) {
14187 if (vsi->netdev_registered) {
14188 vsi->netdev_registered = false;
14190 /* results in a call to i40e_close() */
14191 unregister_netdev(vsi->netdev);
14194 i40e_vsi_close(vsi);
14196 i40e_vsi_disable_irq(vsi);
14199 spin_lock_bh(&vsi->mac_filter_hash_lock);
14201 /* clear the sync flag on all filters */
14203 __dev_uc_unsync(vsi->netdev, NULL);
14204 __dev_mc_unsync(vsi->netdev, NULL);
14207 /* make sure any remaining filters are marked for deletion */
14208 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
14209 __i40e_del_filter(vsi, f);
14211 spin_unlock_bh(&vsi->mac_filter_hash_lock);
14213 i40e_sync_vsi_filters(vsi);
14215 i40e_vsi_delete(vsi);
14216 i40e_vsi_free_q_vectors(vsi);
14218 free_netdev(vsi->netdev);
14219 vsi->netdev = NULL;
14221 i40e_vsi_clear_rings(vsi);
14222 i40e_vsi_clear(vsi);
14224 /* If this was the last thing on the VEB, except for the
14225 * controlling VSI, remove the VEB, which puts the controlling
14226 * VSI onto the next level down in the switch.
14228 * Well, okay, there's one more exception here: don't remove
14229 * the orphan VEBs yet. We'll wait for an explicit remove request
14230 * from up the network stack.
14232 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
14234 pf->vsi[i]->uplink_seid == uplink_seid &&
14235 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
14236 n++; /* count the VSIs */
14239 for (i = 0; i < I40E_MAX_VEB; i++) {
14242 if (pf->veb[i]->uplink_seid == uplink_seid)
14243 n++; /* count the VEBs */
14244 if (pf->veb[i]->seid == uplink_seid)
14247 if (n == 0 && veb && veb->uplink_seid != 0)
14248 i40e_veb_release(veb);
14254 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
14255 * @vsi: ptr to the VSI
14257 * This should only be called after i40e_vsi_mem_alloc() which allocates the
14258 * corresponding SW VSI structure and initializes num_queue_pairs for the
14259 * newly allocated VSI.
14261 * Returns 0 on success or negative on failure
14263 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
14266 struct i40e_pf *pf = vsi->back;
14268 if (vsi->q_vectors[0]) {
14269 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
14274 if (vsi->base_vector) {
14275 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
14276 vsi->seid, vsi->base_vector);
14280 ret = i40e_vsi_alloc_q_vectors(vsi);
14282 dev_info(&pf->pdev->dev,
14283 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
14284 vsi->num_q_vectors, vsi->seid, ret);
14285 vsi->num_q_vectors = 0;
14286 goto vector_setup_out;
14289 /* In Legacy mode, we do not have to get any other vector since we
14290 * piggyback on the misc/ICR0 for queue interrupts.
14292 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
14294 if (vsi->num_q_vectors)
14295 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
14296 vsi->num_q_vectors, vsi->idx);
14297 if (vsi->base_vector < 0) {
14298 dev_info(&pf->pdev->dev,
14299 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
14300 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
14301 i40e_vsi_free_q_vectors(vsi);
14303 goto vector_setup_out;
14311 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
14312 * @vsi: pointer to the vsi.
14314 * This re-allocates a vsi's queue resources.
14316 * Returns pointer to the successfully allocated and configured VSI sw struct
14317 * on success, otherwise returns NULL on failure.
14319 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
14321 u16 alloc_queue_pairs;
14322 struct i40e_pf *pf;
14331 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
14332 i40e_vsi_clear_rings(vsi);
14334 i40e_vsi_free_arrays(vsi, false);
14335 i40e_set_num_rings_in_vsi(vsi);
14336 ret = i40e_vsi_alloc_arrays(vsi, false);
14340 alloc_queue_pairs = vsi->alloc_queue_pairs *
14341 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
14343 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
14345 dev_info(&pf->pdev->dev,
14346 "failed to get tracking for %d queues for VSI %d err %d\n",
14347 alloc_queue_pairs, vsi->seid, ret);
14350 vsi->base_queue = ret;
14352 /* Update the FW view of the VSI. Force a reset of TC and queue
14353 * layout configurations.
14355 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
14356 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
14357 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
14358 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
14359 if (vsi->type == I40E_VSI_MAIN)
14360 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
14362 /* assign it some queues */
14363 ret = i40e_alloc_rings(vsi);
14367 /* map all of the rings to the q_vectors */
14368 i40e_vsi_map_rings_to_vectors(vsi);
14372 i40e_vsi_free_q_vectors(vsi);
14373 if (vsi->netdev_registered) {
14374 vsi->netdev_registered = false;
14375 unregister_netdev(vsi->netdev);
14376 free_netdev(vsi->netdev);
14377 vsi->netdev = NULL;
14379 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
14381 i40e_vsi_clear(vsi);
14386 * i40e_vsi_setup - Set up a VSI by a given type
14387 * @pf: board private structure
14389 * @uplink_seid: the switch element to link to
14390 * @param1: usage depends upon VSI type. For VF types, indicates VF id
14392 * This allocates the sw VSI structure and its queue resources, then add a VSI
14393 * to the identified VEB.
14395 * Returns pointer to the successfully allocated and configure VSI sw struct on
14396 * success, otherwise returns NULL on failure.
14398 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
14399 u16 uplink_seid, u32 param1)
14401 struct i40e_vsi *vsi = NULL;
14402 struct i40e_veb *veb = NULL;
14403 u16 alloc_queue_pairs;
14407 /* The requested uplink_seid must be either
14408 * - the PF's port seid
14409 * no VEB is needed because this is the PF
14410 * or this is a Flow Director special case VSI
14411 * - seid of an existing VEB
14412 * - seid of a VSI that owns an existing VEB
14413 * - seid of a VSI that doesn't own a VEB
14414 * a new VEB is created and the VSI becomes the owner
14415 * - seid of the PF VSI, which is what creates the first VEB
14416 * this is a special case of the previous
14418 * Find which uplink_seid we were given and create a new VEB if needed
14420 for (i = 0; i < I40E_MAX_VEB; i++) {
14421 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
14427 if (!veb && uplink_seid != pf->mac_seid) {
14429 for (i = 0; i < pf->num_alloc_vsi; i++) {
14430 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
14436 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
14441 if (vsi->uplink_seid == pf->mac_seid)
14442 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
14443 vsi->tc_config.enabled_tc);
14444 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
14445 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
14446 vsi->tc_config.enabled_tc);
14448 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
14449 dev_info(&vsi->back->pdev->dev,
14450 "New VSI creation error, uplink seid of LAN VSI expected.\n");
14453 /* We come up by default in VEPA mode if SRIOV is not
14454 * already enabled, in which case we can't force VEPA
14457 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
14458 veb->bridge_mode = BRIDGE_MODE_VEPA;
14459 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
14461 i40e_config_bridge_mode(veb);
14463 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
14464 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
14468 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
14472 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
14473 uplink_seid = veb->seid;
14476 /* get vsi sw struct */
14477 v_idx = i40e_vsi_mem_alloc(pf, type);
14480 vsi = pf->vsi[v_idx];
14484 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
14486 if (type == I40E_VSI_MAIN)
14487 pf->lan_vsi = v_idx;
14488 else if (type == I40E_VSI_SRIOV)
14489 vsi->vf_id = param1;
14490 /* assign it some queues */
14491 alloc_queue_pairs = vsi->alloc_queue_pairs *
14492 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
14494 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
14496 dev_info(&pf->pdev->dev,
14497 "failed to get tracking for %d queues for VSI %d err=%d\n",
14498 alloc_queue_pairs, vsi->seid, ret);
14501 vsi->base_queue = ret;
14503 /* get a VSI from the hardware */
14504 vsi->uplink_seid = uplink_seid;
14505 ret = i40e_add_vsi(vsi);
14509 switch (vsi->type) {
14510 /* setup the netdev if needed */
14511 case I40E_VSI_MAIN:
14512 case I40E_VSI_VMDQ2:
14513 ret = i40e_config_netdev(vsi);
14516 ret = i40e_netif_set_realnum_tx_rx_queues(vsi);
14519 ret = register_netdev(vsi->netdev);
14522 vsi->netdev_registered = true;
14523 netif_carrier_off(vsi->netdev);
14524 #ifdef CONFIG_I40E_DCB
14525 /* Setup DCB netlink interface */
14526 i40e_dcbnl_setup(vsi);
14527 #endif /* CONFIG_I40E_DCB */
14529 case I40E_VSI_FDIR:
14530 /* set up vectors and rings if needed */
14531 ret = i40e_vsi_setup_vectors(vsi);
14535 ret = i40e_alloc_rings(vsi);
14539 /* map all of the rings to the q_vectors */
14540 i40e_vsi_map_rings_to_vectors(vsi);
14542 i40e_vsi_reset_stats(vsi);
14545 /* no netdev or rings for the other VSI types */
14549 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
14550 (vsi->type == I40E_VSI_VMDQ2)) {
14551 ret = i40e_vsi_config_rss(vsi);
14556 i40e_vsi_free_q_vectors(vsi);
14558 if (vsi->netdev_registered) {
14559 vsi->netdev_registered = false;
14560 unregister_netdev(vsi->netdev);
14561 free_netdev(vsi->netdev);
14562 vsi->netdev = NULL;
14565 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
14567 i40e_vsi_clear(vsi);
14573 * i40e_veb_get_bw_info - Query VEB BW information
14574 * @veb: the veb to query
14576 * Query the Tx scheduler BW configuration data for given VEB
14578 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
14580 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
14581 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
14582 struct i40e_pf *pf = veb->pf;
14583 struct i40e_hw *hw = &pf->hw;
14588 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
14591 dev_info(&pf->pdev->dev,
14592 "query veb bw config failed, err %s aq_err %s\n",
14593 i40e_stat_str(&pf->hw, ret),
14594 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
14598 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
14601 dev_info(&pf->pdev->dev,
14602 "query veb bw ets config failed, err %s aq_err %s\n",
14603 i40e_stat_str(&pf->hw, ret),
14604 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
14608 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
14609 veb->bw_max_quanta = ets_data.tc_bw_max;
14610 veb->is_abs_credits = bw_data.absolute_credits_enable;
14611 veb->enabled_tc = ets_data.tc_valid_bits;
14612 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
14613 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
14614 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
14615 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
14616 veb->bw_tc_limit_credits[i] =
14617 le16_to_cpu(bw_data.tc_bw_limits[i]);
14618 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
14626 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
14627 * @pf: board private structure
14629 * On error: returns error code (negative)
14630 * On success: returns vsi index in PF (positive)
14632 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
14635 struct i40e_veb *veb;
14638 /* Need to protect the allocation of switch elements at the PF level */
14639 mutex_lock(&pf->switch_mutex);
14641 /* VEB list may be fragmented if VEB creation/destruction has
14642 * been happening. We can afford to do a quick scan to look
14643 * for any free slots in the list.
14645 * find next empty veb slot, looping back around if necessary
14648 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
14650 if (i >= I40E_MAX_VEB) {
14652 goto err_alloc_veb; /* out of VEB slots! */
14655 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
14658 goto err_alloc_veb;
14662 veb->enabled_tc = 1;
14667 mutex_unlock(&pf->switch_mutex);
14672 * i40e_switch_branch_release - Delete a branch of the switch tree
14673 * @branch: where to start deleting
14675 * This uses recursion to find the tips of the branch to be
14676 * removed, deleting until we get back to and can delete this VEB.
14678 static void i40e_switch_branch_release(struct i40e_veb *branch)
14680 struct i40e_pf *pf = branch->pf;
14681 u16 branch_seid = branch->seid;
14682 u16 veb_idx = branch->idx;
14685 /* release any VEBs on this VEB - RECURSION */
14686 for (i = 0; i < I40E_MAX_VEB; i++) {
14689 if (pf->veb[i]->uplink_seid == branch->seid)
14690 i40e_switch_branch_release(pf->veb[i]);
14693 /* Release the VSIs on this VEB, but not the owner VSI.
14695 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
14696 * the VEB itself, so don't use (*branch) after this loop.
14698 for (i = 0; i < pf->num_alloc_vsi; i++) {
14701 if (pf->vsi[i]->uplink_seid == branch_seid &&
14702 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
14703 i40e_vsi_release(pf->vsi[i]);
14707 /* There's one corner case where the VEB might not have been
14708 * removed, so double check it here and remove it if needed.
14709 * This case happens if the veb was created from the debugfs
14710 * commands and no VSIs were added to it.
14712 if (pf->veb[veb_idx])
14713 i40e_veb_release(pf->veb[veb_idx]);
14717 * i40e_veb_clear - remove veb struct
14718 * @veb: the veb to remove
14720 static void i40e_veb_clear(struct i40e_veb *veb)
14726 struct i40e_pf *pf = veb->pf;
14728 mutex_lock(&pf->switch_mutex);
14729 if (pf->veb[veb->idx] == veb)
14730 pf->veb[veb->idx] = NULL;
14731 mutex_unlock(&pf->switch_mutex);
14738 * i40e_veb_release - Delete a VEB and free its resources
14739 * @veb: the VEB being removed
14741 void i40e_veb_release(struct i40e_veb *veb)
14743 struct i40e_vsi *vsi = NULL;
14744 struct i40e_pf *pf;
14749 /* find the remaining VSI and check for extras */
14750 for (i = 0; i < pf->num_alloc_vsi; i++) {
14751 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
14757 dev_info(&pf->pdev->dev,
14758 "can't remove VEB %d with %d VSIs left\n",
14763 /* move the remaining VSI to uplink veb */
14764 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
14765 if (veb->uplink_seid) {
14766 vsi->uplink_seid = veb->uplink_seid;
14767 if (veb->uplink_seid == pf->mac_seid)
14768 vsi->veb_idx = I40E_NO_VEB;
14770 vsi->veb_idx = veb->veb_idx;
14773 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
14774 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
14777 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
14778 i40e_veb_clear(veb);
14782 * i40e_add_veb - create the VEB in the switch
14783 * @veb: the VEB to be instantiated
14784 * @vsi: the controlling VSI
14786 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
14788 struct i40e_pf *pf = veb->pf;
14789 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
14792 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
14793 veb->enabled_tc, false,
14794 &veb->seid, enable_stats, NULL);
14796 /* get a VEB from the hardware */
14798 dev_info(&pf->pdev->dev,
14799 "couldn't add VEB, err %s aq_err %s\n",
14800 i40e_stat_str(&pf->hw, ret),
14801 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14805 /* get statistics counter */
14806 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
14807 &veb->stats_idx, NULL, NULL, NULL);
14809 dev_info(&pf->pdev->dev,
14810 "couldn't get VEB statistics idx, err %s aq_err %s\n",
14811 i40e_stat_str(&pf->hw, ret),
14812 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14815 ret = i40e_veb_get_bw_info(veb);
14817 dev_info(&pf->pdev->dev,
14818 "couldn't get VEB bw info, err %s aq_err %s\n",
14819 i40e_stat_str(&pf->hw, ret),
14820 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14821 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
14825 vsi->uplink_seid = veb->seid;
14826 vsi->veb_idx = veb->idx;
14827 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
14833 * i40e_veb_setup - Set up a VEB
14834 * @pf: board private structure
14835 * @flags: VEB setup flags
14836 * @uplink_seid: the switch element to link to
14837 * @vsi_seid: the initial VSI seid
14838 * @enabled_tc: Enabled TC bit-map
14840 * This allocates the sw VEB structure and links it into the switch
14841 * It is possible and legal for this to be a duplicate of an already
14842 * existing VEB. It is also possible for both uplink and vsi seids
14843 * to be zero, in order to create a floating VEB.
14845 * Returns pointer to the successfully allocated VEB sw struct on
14846 * success, otherwise returns NULL on failure.
14848 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
14849 u16 uplink_seid, u16 vsi_seid,
14852 struct i40e_veb *veb, *uplink_veb = NULL;
14853 int vsi_idx, veb_idx;
14856 /* if one seid is 0, the other must be 0 to create a floating relay */
14857 if ((uplink_seid == 0 || vsi_seid == 0) &&
14858 (uplink_seid + vsi_seid != 0)) {
14859 dev_info(&pf->pdev->dev,
14860 "one, not both seid's are 0: uplink=%d vsi=%d\n",
14861 uplink_seid, vsi_seid);
14865 /* make sure there is such a vsi and uplink */
14866 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
14867 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
14869 if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) {
14870 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
14875 if (uplink_seid && uplink_seid != pf->mac_seid) {
14876 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
14877 if (pf->veb[veb_idx] &&
14878 pf->veb[veb_idx]->seid == uplink_seid) {
14879 uplink_veb = pf->veb[veb_idx];
14884 dev_info(&pf->pdev->dev,
14885 "uplink seid %d not found\n", uplink_seid);
14890 /* get veb sw struct */
14891 veb_idx = i40e_veb_mem_alloc(pf);
14894 veb = pf->veb[veb_idx];
14895 veb->flags = flags;
14896 veb->uplink_seid = uplink_seid;
14897 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
14898 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
14900 /* create the VEB in the switch */
14901 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
14904 if (vsi_idx == pf->lan_vsi)
14905 pf->lan_veb = veb->idx;
14910 i40e_veb_clear(veb);
14916 * i40e_setup_pf_switch_element - set PF vars based on switch type
14917 * @pf: board private structure
14918 * @ele: element we are building info from
14919 * @num_reported: total number of elements
14920 * @printconfig: should we print the contents
14922 * helper function to assist in extracting a few useful SEID values.
14924 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
14925 struct i40e_aqc_switch_config_element_resp *ele,
14926 u16 num_reported, bool printconfig)
14928 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
14929 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
14930 u8 element_type = ele->element_type;
14931 u16 seid = le16_to_cpu(ele->seid);
14934 dev_info(&pf->pdev->dev,
14935 "type=%d seid=%d uplink=%d downlink=%d\n",
14936 element_type, seid, uplink_seid, downlink_seid);
14938 switch (element_type) {
14939 case I40E_SWITCH_ELEMENT_TYPE_MAC:
14940 pf->mac_seid = seid;
14942 case I40E_SWITCH_ELEMENT_TYPE_VEB:
14944 if (uplink_seid != pf->mac_seid)
14946 if (pf->lan_veb >= I40E_MAX_VEB) {
14949 /* find existing or else empty VEB */
14950 for (v = 0; v < I40E_MAX_VEB; v++) {
14951 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
14956 if (pf->lan_veb >= I40E_MAX_VEB) {
14957 v = i40e_veb_mem_alloc(pf);
14963 if (pf->lan_veb >= I40E_MAX_VEB)
14966 pf->veb[pf->lan_veb]->seid = seid;
14967 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
14968 pf->veb[pf->lan_veb]->pf = pf;
14969 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
14971 case I40E_SWITCH_ELEMENT_TYPE_VSI:
14972 if (num_reported != 1)
14974 /* This is immediately after a reset so we can assume this is
14977 pf->mac_seid = uplink_seid;
14978 pf->pf_seid = downlink_seid;
14979 pf->main_vsi_seid = seid;
14981 dev_info(&pf->pdev->dev,
14982 "pf_seid=%d main_vsi_seid=%d\n",
14983 pf->pf_seid, pf->main_vsi_seid);
14985 case I40E_SWITCH_ELEMENT_TYPE_PF:
14986 case I40E_SWITCH_ELEMENT_TYPE_VF:
14987 case I40E_SWITCH_ELEMENT_TYPE_EMP:
14988 case I40E_SWITCH_ELEMENT_TYPE_BMC:
14989 case I40E_SWITCH_ELEMENT_TYPE_PE:
14990 case I40E_SWITCH_ELEMENT_TYPE_PA:
14991 /* ignore these for now */
14994 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
14995 element_type, seid);
15001 * i40e_fetch_switch_configuration - Get switch config from firmware
15002 * @pf: board private structure
15003 * @printconfig: should we print the contents
15005 * Get the current switch configuration from the device and
15006 * extract a few useful SEID values.
15008 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
15010 struct i40e_aqc_get_switch_config_resp *sw_config;
15016 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
15020 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
15022 u16 num_reported, num_total;
15024 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
15028 dev_info(&pf->pdev->dev,
15029 "get switch config failed err %s aq_err %s\n",
15030 i40e_stat_str(&pf->hw, ret),
15031 i40e_aq_str(&pf->hw,
15032 pf->hw.aq.asq_last_status));
15037 num_reported = le16_to_cpu(sw_config->header.num_reported);
15038 num_total = le16_to_cpu(sw_config->header.num_total);
15041 dev_info(&pf->pdev->dev,
15042 "header: %d reported %d total\n",
15043 num_reported, num_total);
15045 for (i = 0; i < num_reported; i++) {
15046 struct i40e_aqc_switch_config_element_resp *ele =
15047 &sw_config->element[i];
15049 i40e_setup_pf_switch_element(pf, ele, num_reported,
15052 } while (next_seid != 0);
15059 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
15060 * @pf: board private structure
15061 * @reinit: if the Main VSI needs to re-initialized.
15062 * @lock_acquired: indicates whether or not the lock has been acquired
15064 * Returns 0 on success, negative value on failure
15066 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired)
15071 /* find out what's out there already */
15072 ret = i40e_fetch_switch_configuration(pf, false);
15074 dev_info(&pf->pdev->dev,
15075 "couldn't fetch switch config, err %s aq_err %s\n",
15076 i40e_stat_str(&pf->hw, ret),
15077 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15080 i40e_pf_reset_stats(pf);
15082 /* set the switch config bit for the whole device to
15083 * support limited promisc or true promisc
15084 * when user requests promisc. The default is limited
15088 if ((pf->hw.pf_id == 0) &&
15089 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
15090 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
15091 pf->last_sw_conf_flags = flags;
15094 if (pf->hw.pf_id == 0) {
15097 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
15098 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0,
15100 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
15101 dev_info(&pf->pdev->dev,
15102 "couldn't set switch config bits, err %s aq_err %s\n",
15103 i40e_stat_str(&pf->hw, ret),
15104 i40e_aq_str(&pf->hw,
15105 pf->hw.aq.asq_last_status));
15106 /* not a fatal problem, just keep going */
15108 pf->last_sw_conf_valid_flags = valid_flags;
15111 /* first time setup */
15112 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
15113 struct i40e_vsi *vsi = NULL;
15116 /* Set up the PF VSI associated with the PF's main VSI
15117 * that is already in the HW switch
15119 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
15120 uplink_seid = pf->veb[pf->lan_veb]->seid;
15122 uplink_seid = pf->mac_seid;
15123 if (pf->lan_vsi == I40E_NO_VSI)
15124 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
15126 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
15128 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
15129 i40e_cloud_filter_exit(pf);
15130 i40e_fdir_teardown(pf);
15134 /* force a reset of TC and queue layout configurations */
15135 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
15137 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
15138 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
15139 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
15141 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
15143 i40e_fdir_sb_setup(pf);
15145 /* Setup static PF queue filter control settings */
15146 ret = i40e_setup_pf_filter_control(pf);
15148 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
15150 /* Failure here should not stop continuing other steps */
15153 /* enable RSS in the HW, even for only one queue, as the stack can use
15156 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
15157 i40e_pf_config_rss(pf);
15159 /* fill in link information and enable LSE reporting */
15160 i40e_link_event(pf);
15162 /* Initialize user-specific link properties */
15163 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
15164 I40E_AQ_AN_COMPLETED) ? true : false);
15168 if (!lock_acquired)
15171 /* repopulate tunnel port filters */
15172 udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev);
15174 if (!lock_acquired)
15181 * i40e_determine_queue_usage - Work out queue distribution
15182 * @pf: board private structure
15184 static void i40e_determine_queue_usage(struct i40e_pf *pf)
15189 pf->num_lan_qps = 0;
15191 /* Find the max queues to be put into basic use. We'll always be
15192 * using TC0, whether or not DCB is running, and TC0 will get the
15195 queues_left = pf->hw.func_caps.num_tx_qp;
15197 if ((queues_left == 1) ||
15198 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
15199 /* one qp for PF, no queues for anything else */
15201 pf->alloc_rss_size = pf->num_lan_qps = 1;
15203 /* make sure all the fancies are disabled */
15204 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
15205 I40E_FLAG_IWARP_ENABLED |
15206 I40E_FLAG_FD_SB_ENABLED |
15207 I40E_FLAG_FD_ATR_ENABLED |
15208 I40E_FLAG_DCB_CAPABLE |
15209 I40E_FLAG_DCB_ENABLED |
15210 I40E_FLAG_SRIOV_ENABLED |
15211 I40E_FLAG_VMDQ_ENABLED);
15212 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
15213 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
15214 I40E_FLAG_FD_SB_ENABLED |
15215 I40E_FLAG_FD_ATR_ENABLED |
15216 I40E_FLAG_DCB_CAPABLE))) {
15217 /* one qp for PF */
15218 pf->alloc_rss_size = pf->num_lan_qps = 1;
15219 queues_left -= pf->num_lan_qps;
15221 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
15222 I40E_FLAG_IWARP_ENABLED |
15223 I40E_FLAG_FD_SB_ENABLED |
15224 I40E_FLAG_FD_ATR_ENABLED |
15225 I40E_FLAG_DCB_ENABLED |
15226 I40E_FLAG_VMDQ_ENABLED);
15227 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
15229 /* Not enough queues for all TCs */
15230 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
15231 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
15232 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
15233 I40E_FLAG_DCB_ENABLED);
15234 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
15237 /* limit lan qps to the smaller of qps, cpus or msix */
15238 q_max = max_t(int, pf->rss_size_max, num_online_cpus());
15239 q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp);
15240 q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors);
15241 pf->num_lan_qps = q_max;
15243 queues_left -= pf->num_lan_qps;
15246 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
15247 if (queues_left > 1) {
15248 queues_left -= 1; /* save 1 queue for FD */
15250 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
15251 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
15252 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
15256 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
15257 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
15258 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
15259 (queues_left / pf->num_vf_qps));
15260 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
15263 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
15264 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
15265 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
15266 (queues_left / pf->num_vmdq_qps));
15267 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
15270 pf->queues_left = queues_left;
15271 dev_dbg(&pf->pdev->dev,
15272 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
15273 pf->hw.func_caps.num_tx_qp,
15274 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
15275 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
15276 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
15281 * i40e_setup_pf_filter_control - Setup PF static filter control
15282 * @pf: PF to be setup
15284 * i40e_setup_pf_filter_control sets up a PF's initial filter control
15285 * settings. If PE/FCoE are enabled then it will also set the per PF
15286 * based filter sizes required for them. It also enables Flow director,
15287 * ethertype and macvlan type filter settings for the pf.
15289 * Returns 0 on success, negative on failure
15291 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
15293 struct i40e_filter_control_settings *settings = &pf->filter_settings;
15295 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
15297 /* Flow Director is enabled */
15298 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
15299 settings->enable_fdir = true;
15301 /* Ethtype and MACVLAN filters enabled for PF */
15302 settings->enable_ethtype = true;
15303 settings->enable_macvlan = true;
15305 if (i40e_set_filter_control(&pf->hw, settings))
15311 #define INFO_STRING_LEN 255
15312 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
15313 static void i40e_print_features(struct i40e_pf *pf)
15315 struct i40e_hw *hw = &pf->hw;
15319 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
15323 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
15324 #ifdef CONFIG_PCI_IOV
15325 i += scnprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
15327 i += scnprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
15328 pf->hw.func_caps.num_vsis,
15329 pf->vsi[pf->lan_vsi]->num_queue_pairs);
15330 if (pf->flags & I40E_FLAG_RSS_ENABLED)
15331 i += scnprintf(&buf[i], REMAIN(i), " RSS");
15332 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
15333 i += scnprintf(&buf[i], REMAIN(i), " FD_ATR");
15334 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
15335 i += scnprintf(&buf[i], REMAIN(i), " FD_SB");
15336 i += scnprintf(&buf[i], REMAIN(i), " NTUPLE");
15338 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
15339 i += scnprintf(&buf[i], REMAIN(i), " DCB");
15340 i += scnprintf(&buf[i], REMAIN(i), " VxLAN");
15341 i += scnprintf(&buf[i], REMAIN(i), " Geneve");
15342 if (pf->flags & I40E_FLAG_PTP)
15343 i += scnprintf(&buf[i], REMAIN(i), " PTP");
15344 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
15345 i += scnprintf(&buf[i], REMAIN(i), " VEB");
15347 i += scnprintf(&buf[i], REMAIN(i), " VEPA");
15349 dev_info(&pf->pdev->dev, "%s\n", buf);
15351 WARN_ON(i > INFO_STRING_LEN);
15355 * i40e_get_platform_mac_addr - get platform-specific MAC address
15356 * @pdev: PCI device information struct
15357 * @pf: board private structure
15359 * Look up the MAC address for the device. First we'll try
15360 * eth_platform_get_mac_address, which will check Open Firmware, or arch
15361 * specific fallback. Otherwise, we'll default to the stored value in
15364 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
15366 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
15367 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
15371 * i40e_set_fec_in_flags - helper function for setting FEC options in flags
15372 * @fec_cfg: FEC option to set in flags
15373 * @flags: ptr to flags in which we set FEC option
15375 void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
15377 if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
15378 *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC;
15379 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) ||
15380 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) {
15381 *flags |= I40E_FLAG_RS_FEC;
15382 *flags &= ~I40E_FLAG_BASE_R_FEC;
15384 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) ||
15385 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) {
15386 *flags |= I40E_FLAG_BASE_R_FEC;
15387 *flags &= ~I40E_FLAG_RS_FEC;
15390 *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC);
15394 * i40e_check_recovery_mode - check if we are running transition firmware
15395 * @pf: board private structure
15397 * Check registers indicating the firmware runs in recovery mode. Sets the
15398 * appropriate driver state.
15400 * Returns true if the recovery mode was detected, false otherwise
15402 static bool i40e_check_recovery_mode(struct i40e_pf *pf)
15404 u32 val = rd32(&pf->hw, I40E_GL_FWSTS);
15406 if (val & I40E_GL_FWSTS_FWS1B_MASK) {
15407 dev_crit(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
15408 dev_crit(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
15409 set_bit(__I40E_RECOVERY_MODE, pf->state);
15413 if (test_bit(__I40E_RECOVERY_MODE, pf->state))
15414 dev_info(&pf->pdev->dev, "Please do Power-On Reset to initialize adapter in normal mode with full functionality.\n");
15420 * i40e_pf_loop_reset - perform reset in a loop.
15421 * @pf: board private structure
15423 * This function is useful when a NIC is about to enter recovery mode.
15424 * When a NIC's internal data structures are corrupted the NIC's
15425 * firmware is going to enter recovery mode.
15426 * Right after a POR it takes about 7 minutes for firmware to enter
15427 * recovery mode. Until that time a NIC is in some kind of intermediate
15428 * state. After that time period the NIC almost surely enters
15429 * recovery mode. The only way for a driver to detect intermediate
15430 * state is to issue a series of pf-resets and check a return value.
15431 * If a PF reset returns success then the firmware could be in recovery
15432 * mode so the caller of this code needs to check for recovery mode
15433 * if this function returns success. There is a little chance that
15434 * firmware will hang in intermediate state forever.
15435 * Since waiting 7 minutes is quite a lot of time this function waits
15436 * 10 seconds and then gives up by returning an error.
15438 * Return 0 on success, negative on failure.
15440 static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf)
15442 /* wait max 10 seconds for PF reset to succeed */
15443 const unsigned long time_end = jiffies + 10 * HZ;
15445 struct i40e_hw *hw = &pf->hw;
15448 ret = i40e_pf_reset(hw);
15449 while (ret != I40E_SUCCESS && time_before(jiffies, time_end)) {
15450 usleep_range(10000, 20000);
15451 ret = i40e_pf_reset(hw);
15454 if (ret == I40E_SUCCESS)
15457 dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret);
15463 * i40e_check_fw_empr - check if FW issued unexpected EMP Reset
15464 * @pf: board private structure
15466 * Check FW registers to determine if FW issued unexpected EMP Reset.
15467 * Every time when unexpected EMP Reset occurs the FW increments
15468 * a counter of unexpected EMP Resets. When the counter reaches 10
15469 * the FW should enter the Recovery mode
15471 * Returns true if FW issued unexpected EMP Reset
15473 static bool i40e_check_fw_empr(struct i40e_pf *pf)
15475 const u32 fw_sts = rd32(&pf->hw, I40E_GL_FWSTS) &
15476 I40E_GL_FWSTS_FWS1B_MASK;
15477 return (fw_sts > I40E_GL_FWSTS_FWS1B_EMPR_0) &&
15478 (fw_sts <= I40E_GL_FWSTS_FWS1B_EMPR_10);
15482 * i40e_handle_resets - handle EMP resets and PF resets
15483 * @pf: board private structure
15485 * Handle both EMP resets and PF resets and conclude whether there are
15486 * any issues regarding these resets. If there are any issues then
15487 * generate log entry.
15489 * Return 0 if NIC is healthy or negative value when there are issues
15492 static i40e_status i40e_handle_resets(struct i40e_pf *pf)
15494 const i40e_status pfr = i40e_pf_loop_reset(pf);
15495 const bool is_empr = i40e_check_fw_empr(pf);
15497 if (is_empr || pfr != I40E_SUCCESS)
15498 dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
15500 return is_empr ? I40E_ERR_RESET_FAILED : pfr;
15504 * i40e_init_recovery_mode - initialize subsystems needed in recovery mode
15505 * @pf: board private structure
15506 * @hw: ptr to the hardware info
15508 * This function does a minimal setup of all subsystems needed for running
15511 * Returns 0 on success, negative on failure
15513 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
15515 struct i40e_vsi *vsi;
15519 pci_save_state(pf->pdev);
15521 /* set up periodic task facility */
15522 timer_setup(&pf->service_timer, i40e_service_timer, 0);
15523 pf->service_timer_period = HZ;
15525 INIT_WORK(&pf->service_task, i40e_service_task);
15526 clear_bit(__I40E_SERVICE_SCHED, pf->state);
15528 err = i40e_init_interrupt_scheme(pf);
15530 goto err_switch_setup;
15532 /* The number of VSIs reported by the FW is the minimum guaranteed
15533 * to us; HW supports far more and we share the remaining pool with
15534 * the other PFs. We allocate space for more than the guarantee with
15535 * the understanding that we might not get them all later.
15537 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
15538 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
15540 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
15542 /* Set up the vsi struct and our local tracking of the MAIN PF vsi. */
15543 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
15547 goto err_switch_setup;
15550 /* We allocate one VSI which is needed as absolute minimum
15551 * in order to register the netdev
15553 v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN);
15556 goto err_switch_setup;
15558 pf->lan_vsi = v_idx;
15559 vsi = pf->vsi[v_idx];
15562 goto err_switch_setup;
15564 vsi->alloc_queue_pairs = 1;
15565 err = i40e_config_netdev(vsi);
15567 goto err_switch_setup;
15568 err = register_netdev(vsi->netdev);
15570 goto err_switch_setup;
15571 vsi->netdev_registered = true;
15572 i40e_dbg_pf_init(pf);
15574 err = i40e_setup_misc_vector_for_recovery_mode(pf);
15576 goto err_switch_setup;
15578 /* tell the firmware that we're starting */
15579 i40e_send_version(pf);
15581 /* since everything's happy, start the service_task timer */
15582 mod_timer(&pf->service_timer,
15583 round_jiffies(jiffies + pf->service_timer_period));
15588 i40e_reset_interrupt_capability(pf);
15589 timer_shutdown_sync(&pf->service_timer);
15590 i40e_shutdown_adminq(hw);
15591 iounmap(hw->hw_addr);
15592 pci_disable_pcie_error_reporting(pf->pdev);
15593 pci_release_mem_regions(pf->pdev);
15594 pci_disable_device(pf->pdev);
15601 * i40e_set_subsystem_device_id - set subsystem device id
15602 * @hw: pointer to the hardware info
15604 * Set PCI subsystem device id either from a pci_dev structure or
15605 * a specific FW register.
15607 static inline void i40e_set_subsystem_device_id(struct i40e_hw *hw)
15609 struct pci_dev *pdev = ((struct i40e_pf *)hw->back)->pdev;
15611 hw->subsystem_device_id = pdev->subsystem_device ?
15612 pdev->subsystem_device :
15613 (ushort)(rd32(hw, I40E_PFPCI_SUBSYSID) & USHRT_MAX);
15617 * i40e_probe - Device initialization routine
15618 * @pdev: PCI device information struct
15619 * @ent: entry in i40e_pci_tbl
15621 * i40e_probe initializes a PF identified by a pci_dev structure.
15622 * The OS initialization, configuring of the PF private structure,
15623 * and a hardware reset occur.
15625 * Returns 0 on success, negative on failure
15627 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
15629 struct i40e_aq_get_phy_abilities_resp abilities;
15630 #ifdef CONFIG_I40E_DCB
15631 enum i40e_get_fw_lldp_status_resp lldp_status;
15632 i40e_status status;
15633 #endif /* CONFIG_I40E_DCB */
15634 struct i40e_pf *pf;
15635 struct i40e_hw *hw;
15636 static u16 pfs_found;
15643 err = pci_enable_device_mem(pdev);
15647 /* set up for high or low dma */
15648 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
15650 dev_err(&pdev->dev,
15651 "DMA configuration failed: 0x%x\n", err);
15655 /* set up pci connections */
15656 err = pci_request_mem_regions(pdev, i40e_driver_name);
15658 dev_info(&pdev->dev,
15659 "pci_request_selected_regions failed %d\n", err);
15663 pci_enable_pcie_error_reporting(pdev);
15664 pci_set_master(pdev);
15666 /* Now that we have a PCI connection, we need to do the
15667 * low level device setup. This is primarily setting up
15668 * the Admin Queue structures and then querying for the
15669 * device's current profile information.
15671 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
15678 set_bit(__I40E_DOWN, pf->state);
15683 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
15684 I40E_MAX_CSR_SPACE);
15685 /* We believe that the highest register to read is
15686 * I40E_GLGEN_STAT_CLEAR, so we check if the BAR size
15687 * is not less than that before mapping to prevent a
15690 if (pf->ioremap_len < I40E_GLGEN_STAT_CLEAR) {
15691 dev_err(&pdev->dev, "Cannot map registers, bar size 0x%X too small, aborting\n",
15696 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
15697 if (!hw->hw_addr) {
15699 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
15700 (unsigned int)pci_resource_start(pdev, 0),
15701 pf->ioremap_len, err);
15704 hw->vendor_id = pdev->vendor;
15705 hw->device_id = pdev->device;
15706 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
15707 hw->subsystem_vendor_id = pdev->subsystem_vendor;
15708 i40e_set_subsystem_device_id(hw);
15709 hw->bus.device = PCI_SLOT(pdev->devfn);
15710 hw->bus.func = PCI_FUNC(pdev->devfn);
15711 hw->bus.bus_id = pdev->bus->number;
15712 pf->instance = pfs_found;
15714 /* Select something other than the 802.1ad ethertype for the
15715 * switch to use internally and drop on ingress.
15717 hw->switch_tag = 0xffff;
15718 hw->first_tag = ETH_P_8021AD;
15719 hw->second_tag = ETH_P_8021Q;
15721 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
15722 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
15723 INIT_LIST_HEAD(&pf->ddp_old_prof);
15725 /* set up the locks for the AQ, do this only once in probe
15726 * and destroy them only once in remove
15728 mutex_init(&hw->aq.asq_mutex);
15729 mutex_init(&hw->aq.arq_mutex);
15731 pf->msg_enable = netif_msg_init(debug,
15736 pf->hw.debug_mask = debug;
15738 /* do a special CORER for clearing PXE mode once at init */
15739 if (hw->revision_id == 0 &&
15740 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
15741 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
15746 i40e_clear_pxe_mode(hw);
15749 /* Reset here to make sure all is clean and to define PF 'n' */
15752 err = i40e_set_mac_type(hw);
15754 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
15759 err = i40e_handle_resets(pf);
15763 i40e_check_recovery_mode(pf);
15765 if (is_kdump_kernel()) {
15766 hw->aq.num_arq_entries = I40E_MIN_ARQ_LEN;
15767 hw->aq.num_asq_entries = I40E_MIN_ASQ_LEN;
15769 hw->aq.num_arq_entries = I40E_AQ_LEN;
15770 hw->aq.num_asq_entries = I40E_AQ_LEN;
15772 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
15773 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
15774 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
15776 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
15778 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
15780 err = i40e_init_shared_code(hw);
15782 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
15787 /* set up a default setting for link flow control */
15788 pf->hw.fc.requested_mode = I40E_FC_NONE;
15790 err = i40e_init_adminq(hw);
15792 if (err == I40E_ERR_FIRMWARE_API_VERSION)
15793 dev_info(&pdev->dev,
15794 "The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n",
15795 hw->aq.api_maj_ver,
15796 hw->aq.api_min_ver,
15797 I40E_FW_API_VERSION_MAJOR,
15798 I40E_FW_MINOR_VERSION(hw));
15800 dev_info(&pdev->dev,
15801 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
15805 i40e_get_oem_version(hw);
15807 /* provide nvm, fw, api versions, vendor:device id, subsys vendor:device id */
15808 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n",
15809 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
15810 hw->aq.api_maj_ver, hw->aq.api_min_ver,
15811 i40e_nvm_version_str(hw), hw->vendor_id, hw->device_id,
15812 hw->subsystem_vendor_id, hw->subsystem_device_id);
15814 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
15815 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
15816 dev_dbg(&pdev->dev,
15817 "The driver for the device detected a newer version of the NVM image v%u.%u than v%u.%u.\n",
15818 hw->aq.api_maj_ver,
15819 hw->aq.api_min_ver,
15820 I40E_FW_API_VERSION_MAJOR,
15821 I40E_FW_MINOR_VERSION(hw));
15822 else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
15823 dev_info(&pdev->dev,
15824 "The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n",
15825 hw->aq.api_maj_ver,
15826 hw->aq.api_min_ver,
15827 I40E_FW_API_VERSION_MAJOR,
15828 I40E_FW_MINOR_VERSION(hw));
15830 i40e_verify_eeprom(pf);
15832 /* Rev 0 hardware was never productized */
15833 if (hw->revision_id < 1)
15834 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
15836 i40e_clear_pxe_mode(hw);
15838 err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
15840 goto err_adminq_setup;
15842 err = i40e_sw_init(pf);
15844 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
15848 if (test_bit(__I40E_RECOVERY_MODE, pf->state))
15849 return i40e_init_recovery_mode(pf, hw);
15851 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
15852 hw->func_caps.num_rx_qp, 0, 0);
15854 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
15855 goto err_init_lan_hmc;
15858 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
15860 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
15862 goto err_configure_lan_hmc;
15865 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
15866 * Ignore error return codes because if it was already disabled via
15867 * hardware settings this will fail
15869 if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
15870 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
15871 i40e_aq_stop_lldp(hw, true, false, NULL);
15874 /* allow a platform config to override the HW addr */
15875 i40e_get_platform_mac_addr(pdev, pf);
15877 if (!is_valid_ether_addr(hw->mac.addr)) {
15878 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
15882 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
15883 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
15884 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
15885 if (is_valid_ether_addr(hw->mac.port_addr))
15886 pf->hw_features |= I40E_HW_PORT_ID_VALID;
15888 i40e_ptp_alloc_pins(pf);
15889 pci_set_drvdata(pdev, pf);
15890 pci_save_state(pdev);
15892 #ifdef CONFIG_I40E_DCB
15893 status = i40e_get_fw_lldp_status(&pf->hw, &lldp_status);
15895 lldp_status == I40E_GET_FW_LLDP_STATUS_ENABLED) ?
15896 (pf->flags &= ~I40E_FLAG_DISABLE_FW_LLDP) :
15897 (pf->flags |= I40E_FLAG_DISABLE_FW_LLDP);
15898 dev_info(&pdev->dev,
15899 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ?
15900 "FW LLDP is disabled\n" :
15901 "FW LLDP is enabled\n");
15903 /* Enable FW to write default DCB config on link-up */
15904 i40e_aq_set_dcb_parameters(hw, true, NULL);
15906 err = i40e_init_pf_dcb(pf);
15908 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
15909 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
15910 /* Continue without DCB enabled */
15912 #endif /* CONFIG_I40E_DCB */
15914 /* set up periodic task facility */
15915 timer_setup(&pf->service_timer, i40e_service_timer, 0);
15916 pf->service_timer_period = HZ;
15918 INIT_WORK(&pf->service_task, i40e_service_task);
15919 clear_bit(__I40E_SERVICE_SCHED, pf->state);
15921 /* NVM bit on means WoL disabled for the port */
15922 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
15923 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
15924 pf->wol_en = false;
15927 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
15929 /* set up the main switch operations */
15930 i40e_determine_queue_usage(pf);
15931 err = i40e_init_interrupt_scheme(pf);
15933 goto err_switch_setup;
15935 /* Reduce Tx and Rx pairs for kdump
15936 * When MSI-X is enabled, it's not allowed to use more TC queue
15937 * pairs than MSI-X vectors (pf->num_lan_msix) exist. Thus
15938 * vsi->num_queue_pairs will be equal to pf->num_lan_msix, i.e., 1.
15940 if (is_kdump_kernel())
15941 pf->num_lan_msix = 1;
15943 pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port;
15944 pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port;
15945 pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
15946 pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared;
15947 pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS;
15948 pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN |
15949 UDP_TUNNEL_TYPE_GENEVE;
15951 /* The number of VSIs reported by the FW is the minimum guaranteed
15952 * to us; HW supports far more and we share the remaining pool with
15953 * the other PFs. We allocate space for more than the guarantee with
15954 * the understanding that we might not get them all later.
15956 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
15957 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
15959 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
15960 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
15961 dev_warn(&pf->pdev->dev,
15962 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
15963 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
15964 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
15967 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
15968 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
15972 goto err_switch_setup;
15975 #ifdef CONFIG_PCI_IOV
15976 /* prep for VF support */
15977 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
15978 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
15979 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
15980 if (pci_num_vf(pdev))
15981 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
15984 err = i40e_setup_pf_switch(pf, false, false);
15986 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
15989 INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
15991 /* if FDIR VSI was set up, start it now */
15992 for (i = 0; i < pf->num_alloc_vsi; i++) {
15993 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
15994 i40e_vsi_open(pf->vsi[i]);
15999 /* The driver only wants link up/down and module qualification
16000 * reports from firmware. Note the negative logic.
16002 err = i40e_aq_set_phy_int_mask(&pf->hw,
16003 ~(I40E_AQ_EVENT_LINK_UPDOWN |
16004 I40E_AQ_EVENT_MEDIA_NA |
16005 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
16007 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
16008 i40e_stat_str(&pf->hw, err),
16009 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
16011 /* Reconfigure hardware for allowing smaller MSS in the case
16012 * of TSO, so that we avoid the MDD being fired and causing
16013 * a reset in the case of small MSS+TSO.
16015 val = rd32(hw, I40E_REG_MSS);
16016 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
16017 val &= ~I40E_REG_MSS_MIN_MASK;
16018 val |= I40E_64BYTE_MSS;
16019 wr32(hw, I40E_REG_MSS, val);
16022 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
16024 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
16026 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
16027 i40e_stat_str(&pf->hw, err),
16028 i40e_aq_str(&pf->hw,
16029 pf->hw.aq.asq_last_status));
16031 /* The main driver is (mostly) up and happy. We need to set this state
16032 * before setting up the misc vector or we get a race and the vector
16033 * ends up disabled forever.
16035 clear_bit(__I40E_DOWN, pf->state);
16037 /* In case of MSIX we are going to setup the misc vector right here
16038 * to handle admin queue events etc. In case of legacy and MSI
16039 * the misc functionality and queue processing is combined in
16040 * the same vector and that gets setup at open.
16042 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
16043 err = i40e_setup_misc_vector(pf);
16045 dev_info(&pdev->dev,
16046 "setup of misc vector failed: %d\n", err);
16047 i40e_cloud_filter_exit(pf);
16048 i40e_fdir_teardown(pf);
16053 #ifdef CONFIG_PCI_IOV
16054 /* prep for VF support */
16055 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
16056 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
16057 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
16058 /* disable link interrupts for VFs */
16059 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
16060 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
16061 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
16064 if (pci_num_vf(pdev)) {
16065 dev_info(&pdev->dev,
16066 "Active VFs found, allocating resources.\n");
16067 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
16069 dev_info(&pdev->dev,
16070 "Error %d allocating resources for existing VFs\n",
16074 #endif /* CONFIG_PCI_IOV */
16076 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
16077 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
16078 pf->num_iwarp_msix,
16079 I40E_IWARP_IRQ_PILE_ID);
16080 if (pf->iwarp_base_vector < 0) {
16081 dev_info(&pdev->dev,
16082 "failed to get tracking for %d vectors for IWARP err=%d\n",
16083 pf->num_iwarp_msix, pf->iwarp_base_vector);
16084 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
16088 i40e_dbg_pf_init(pf);
16090 /* tell the firmware that we're starting */
16091 i40e_send_version(pf);
16093 /* since everything's happy, start the service_task timer */
16094 mod_timer(&pf->service_timer,
16095 round_jiffies(jiffies + pf->service_timer_period));
16097 /* add this PF to client device list and launch a client service task */
16098 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
16099 err = i40e_lan_add_device(pf);
16101 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
16105 #define PCI_SPEED_SIZE 8
16106 #define PCI_WIDTH_SIZE 8
16107 /* Devices on the IOSF bus do not have this information
16108 * and will report PCI Gen 1 x 1 by default so don't bother
16111 if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) {
16112 char speed[PCI_SPEED_SIZE] = "Unknown";
16113 char width[PCI_WIDTH_SIZE] = "Unknown";
16115 /* Get the negotiated link width and speed from PCI config
16118 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
16121 i40e_set_pci_config_data(hw, link_status);
16123 switch (hw->bus.speed) {
16124 case i40e_bus_speed_8000:
16125 strscpy(speed, "8.0", PCI_SPEED_SIZE); break;
16126 case i40e_bus_speed_5000:
16127 strscpy(speed, "5.0", PCI_SPEED_SIZE); break;
16128 case i40e_bus_speed_2500:
16129 strscpy(speed, "2.5", PCI_SPEED_SIZE); break;
16133 switch (hw->bus.width) {
16134 case i40e_bus_width_pcie_x8:
16135 strscpy(width, "8", PCI_WIDTH_SIZE); break;
16136 case i40e_bus_width_pcie_x4:
16137 strscpy(width, "4", PCI_WIDTH_SIZE); break;
16138 case i40e_bus_width_pcie_x2:
16139 strscpy(width, "2", PCI_WIDTH_SIZE); break;
16140 case i40e_bus_width_pcie_x1:
16141 strscpy(width, "1", PCI_WIDTH_SIZE); break;
16146 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
16149 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
16150 hw->bus.speed < i40e_bus_speed_8000) {
16151 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
16152 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
16156 /* get the requested speeds from the fw */
16157 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
16159 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
16160 i40e_stat_str(&pf->hw, err),
16161 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
16162 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
16164 /* set the FEC config due to the board capabilities */
16165 i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags);
16167 /* get the supported phy types from the fw */
16168 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
16170 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
16171 i40e_stat_str(&pf->hw, err),
16172 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
16174 /* make sure the MFS hasn't been set lower than the default */
16175 #define MAX_FRAME_SIZE_DEFAULT 0x2600
16176 val = (rd32(&pf->hw, I40E_PRTGL_SAH) &
16177 I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT;
16178 if (val < MAX_FRAME_SIZE_DEFAULT)
16179 dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
16182 /* Add a filter to drop all Flow control frames from any VSI from being
16183 * transmitted. By doing so we stop a malicious VF from sending out
16184 * PAUSE or PFC frames and potentially controlling traffic for other
16186 * The FW can still send Flow control frames if enabled.
16188 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
16189 pf->main_vsi_seid);
16191 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
16192 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
16193 pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS;
16194 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
16195 pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER;
16196 /* print a string summarizing features */
16197 i40e_print_features(pf);
16201 /* Unwind what we've done if something failed in the setup */
16203 set_bit(__I40E_DOWN, pf->state);
16204 i40e_clear_interrupt_scheme(pf);
16207 i40e_reset_interrupt_capability(pf);
16208 timer_shutdown_sync(&pf->service_timer);
16210 err_configure_lan_hmc:
16211 (void)i40e_shutdown_lan_hmc(hw);
16213 kfree(pf->qp_pile);
16217 iounmap(hw->hw_addr);
16221 pci_disable_pcie_error_reporting(pdev);
16222 pci_release_mem_regions(pdev);
16225 pci_disable_device(pdev);
16230 * i40e_remove - Device removal routine
16231 * @pdev: PCI device information struct
16233 * i40e_remove is called by the PCI subsystem to alert the driver
16234 * that is should release a PCI device. This could be caused by a
16235 * Hot-Plug event, or because the driver is going to be removed from
16238 static void i40e_remove(struct pci_dev *pdev)
16240 struct i40e_pf *pf = pci_get_drvdata(pdev);
16241 struct i40e_hw *hw = &pf->hw;
16242 i40e_status ret_code;
16245 i40e_dbg_pf_exit(pf);
16249 /* Disable RSS in hw */
16250 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
16251 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
16253 /* Grab __I40E_RESET_RECOVERY_PENDING and set __I40E_IN_REMOVE
16254 * flags, once they are set, i40e_rebuild should not be called as
16255 * i40e_prep_for_reset always returns early.
16257 while (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
16258 usleep_range(1000, 2000);
16259 set_bit(__I40E_IN_REMOVE, pf->state);
16261 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
16262 set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
16264 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
16266 /* no more scheduling of any task */
16267 set_bit(__I40E_SUSPENDED, pf->state);
16268 set_bit(__I40E_DOWN, pf->state);
16269 if (pf->service_timer.function)
16270 timer_shutdown_sync(&pf->service_timer);
16271 if (pf->service_task.func)
16272 cancel_work_sync(&pf->service_task);
16274 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
16275 struct i40e_vsi *vsi = pf->vsi[0];
16277 /* We know that we have allocated only one vsi for this PF,
16278 * it was just for registering netdevice, so the interface
16279 * could be visible in the 'ifconfig' output
16281 unregister_netdev(vsi->netdev);
16282 free_netdev(vsi->netdev);
16287 /* Client close must be called explicitly here because the timer
16288 * has been stopped.
16290 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
16292 i40e_fdir_teardown(pf);
16294 /* If there is a switch structure or any orphans, remove them.
16295 * This will leave only the PF's VSI remaining.
16297 for (i = 0; i < I40E_MAX_VEB; i++) {
16301 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
16302 pf->veb[i]->uplink_seid == 0)
16303 i40e_switch_branch_release(pf->veb[i]);
16306 /* Now we can shutdown the PF's VSI, just before we kill
16309 if (pf->vsi[pf->lan_vsi])
16310 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
16312 i40e_cloud_filter_exit(pf);
16314 /* remove attached clients */
16315 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
16316 ret_code = i40e_lan_del_device(pf);
16318 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
16322 /* shutdown and destroy the HMC */
16323 if (hw->hmc.hmc_obj) {
16324 ret_code = i40e_shutdown_lan_hmc(hw);
16326 dev_warn(&pdev->dev,
16327 "Failed to destroy the HMC resources: %d\n",
16332 /* Free MSI/legacy interrupt 0 when in recovery mode. */
16333 if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
16334 !(pf->flags & I40E_FLAG_MSIX_ENABLED))
16335 free_irq(pf->pdev->irq, pf);
16337 /* shutdown the adminq */
16338 i40e_shutdown_adminq(hw);
16340 /* destroy the locks only once, here */
16341 mutex_destroy(&hw->aq.arq_mutex);
16342 mutex_destroy(&hw->aq.asq_mutex);
16344 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
16346 i40e_clear_interrupt_scheme(pf);
16347 for (i = 0; i < pf->num_alloc_vsi; i++) {
16349 if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
16350 i40e_vsi_clear_rings(pf->vsi[i]);
16351 i40e_vsi_clear(pf->vsi[i]);
16357 for (i = 0; i < I40E_MAX_VEB; i++) {
16362 kfree(pf->qp_pile);
16365 iounmap(hw->hw_addr);
16367 pci_release_mem_regions(pdev);
16369 pci_disable_pcie_error_reporting(pdev);
16370 pci_disable_device(pdev);
16374 * i40e_pci_error_detected - warning that something funky happened in PCI land
16375 * @pdev: PCI device information struct
16376 * @error: the type of PCI error
16378 * Called to warn that something happened and the error handling steps
16379 * are in progress. Allows the driver to quiesce things, be ready for
16382 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
16383 pci_channel_state_t error)
16385 struct i40e_pf *pf = pci_get_drvdata(pdev);
16387 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
16390 dev_info(&pdev->dev,
16391 "Cannot recover - error happened during device probe\n");
16392 return PCI_ERS_RESULT_DISCONNECT;
16395 /* shutdown all operations */
16396 if (!test_bit(__I40E_SUSPENDED, pf->state))
16397 i40e_prep_for_reset(pf);
16399 /* Request a slot reset */
16400 return PCI_ERS_RESULT_NEED_RESET;
16404 * i40e_pci_error_slot_reset - a PCI slot reset just happened
16405 * @pdev: PCI device information struct
16407 * Called to find if the driver can work with the device now that
16408 * the pci slot has been reset. If a basic connection seems good
16409 * (registers are readable and have sane content) then return a
16410 * happy little PCI_ERS_RESULT_xxx.
16412 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
16414 struct i40e_pf *pf = pci_get_drvdata(pdev);
16415 pci_ers_result_t result;
16418 dev_dbg(&pdev->dev, "%s\n", __func__);
16419 if (pci_enable_device_mem(pdev)) {
16420 dev_info(&pdev->dev,
16421 "Cannot re-enable PCI device after reset.\n");
16422 result = PCI_ERS_RESULT_DISCONNECT;
16424 pci_set_master(pdev);
16425 pci_restore_state(pdev);
16426 pci_save_state(pdev);
16427 pci_wake_from_d3(pdev, false);
16429 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
16431 result = PCI_ERS_RESULT_RECOVERED;
16433 result = PCI_ERS_RESULT_DISCONNECT;
16440 * i40e_pci_error_reset_prepare - prepare device driver for pci reset
16441 * @pdev: PCI device information struct
16443 static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
16445 struct i40e_pf *pf = pci_get_drvdata(pdev);
16447 i40e_prep_for_reset(pf);
16451 * i40e_pci_error_reset_done - pci reset done, device driver reset can begin
16452 * @pdev: PCI device information struct
16454 static void i40e_pci_error_reset_done(struct pci_dev *pdev)
16456 struct i40e_pf *pf = pci_get_drvdata(pdev);
16458 if (test_bit(__I40E_IN_REMOVE, pf->state))
16461 i40e_reset_and_rebuild(pf, false, false);
16465 * i40e_pci_error_resume - restart operations after PCI error recovery
16466 * @pdev: PCI device information struct
16468 * Called to allow the driver to bring things back up after PCI error
16469 * and/or reset recovery has finished.
16471 static void i40e_pci_error_resume(struct pci_dev *pdev)
16473 struct i40e_pf *pf = pci_get_drvdata(pdev);
16475 dev_dbg(&pdev->dev, "%s\n", __func__);
16476 if (test_bit(__I40E_SUSPENDED, pf->state))
16479 i40e_handle_reset_warning(pf, false);
16483 * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
16484 * using the mac_address_write admin q function
16485 * @pf: pointer to i40e_pf struct
16487 static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
16489 struct i40e_hw *hw = &pf->hw;
16494 /* Get current MAC address in case it's an LAA */
16495 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
16496 ether_addr_copy(mac_addr,
16497 pf->vsi[pf->lan_vsi]->netdev->dev_addr);
16499 dev_err(&pf->pdev->dev,
16500 "Failed to retrieve MAC address; using default\n");
16501 ether_addr_copy(mac_addr, hw->mac.addr);
16504 /* The FW expects the mac address write cmd to first be called with
16505 * one of these flags before calling it again with the multicast
16508 flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
16510 if (hw->func_caps.flex10_enable && hw->partition_id != 1)
16511 flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
16513 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
16515 dev_err(&pf->pdev->dev,
16516 "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
16520 flags = I40E_AQC_MC_MAG_EN
16521 | I40E_AQC_WOL_PRESERVE_ON_PFR
16522 | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
16523 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
16525 dev_err(&pf->pdev->dev,
16526 "Failed to enable Multicast Magic Packet wake up\n");
16530 * i40e_shutdown - PCI callback for shutting down
16531 * @pdev: PCI device information struct
16533 static void i40e_shutdown(struct pci_dev *pdev)
16535 struct i40e_pf *pf = pci_get_drvdata(pdev);
16536 struct i40e_hw *hw = &pf->hw;
16538 set_bit(__I40E_SUSPENDED, pf->state);
16539 set_bit(__I40E_DOWN, pf->state);
16541 del_timer_sync(&pf->service_timer);
16542 cancel_work_sync(&pf->service_task);
16543 i40e_cloud_filter_exit(pf);
16544 i40e_fdir_teardown(pf);
16546 /* Client close must be called explicitly here because the timer
16547 * has been stopped.
16549 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
16551 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
16552 i40e_enable_mc_magic_wake(pf);
16554 i40e_prep_for_reset(pf);
16556 wr32(hw, I40E_PFPM_APM,
16557 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
16558 wr32(hw, I40E_PFPM_WUFC,
16559 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
16561 /* Free MSI/legacy interrupt 0 when in recovery mode. */
16562 if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
16563 !(pf->flags & I40E_FLAG_MSIX_ENABLED))
16564 free_irq(pf->pdev->irq, pf);
16566 /* Since we're going to destroy queues during the
16567 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
16571 i40e_clear_interrupt_scheme(pf);
16574 if (system_state == SYSTEM_POWER_OFF) {
16575 pci_wake_from_d3(pdev, pf->wol_en);
16576 pci_set_power_state(pdev, PCI_D3hot);
16581 * i40e_suspend - PM callback for moving to D3
16582 * @dev: generic device information structure
16584 static int __maybe_unused i40e_suspend(struct device *dev)
16586 struct i40e_pf *pf = dev_get_drvdata(dev);
16587 struct i40e_hw *hw = &pf->hw;
16589 /* If we're already suspended, then there is nothing to do */
16590 if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
16593 set_bit(__I40E_DOWN, pf->state);
16595 /* Ensure service task will not be running */
16596 del_timer_sync(&pf->service_timer);
16597 cancel_work_sync(&pf->service_task);
16599 /* Client close must be called explicitly here because the timer
16600 * has been stopped.
16602 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
16604 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
16605 i40e_enable_mc_magic_wake(pf);
16607 /* Since we're going to destroy queues during the
16608 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
16613 i40e_prep_for_reset(pf);
16615 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
16616 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
16618 /* Clear the interrupt scheme and release our IRQs so that the system
16619 * can safely hibernate even when there are a large number of CPUs.
16620 * Otherwise hibernation might fail when mapping all the vectors back
16623 i40e_clear_interrupt_scheme(pf);
16631 * i40e_resume - PM callback for waking up from D3
16632 * @dev: generic device information structure
16634 static int __maybe_unused i40e_resume(struct device *dev)
16636 struct i40e_pf *pf = dev_get_drvdata(dev);
16639 /* If we're not suspended, then there is nothing to do */
16640 if (!test_bit(__I40E_SUSPENDED, pf->state))
16643 /* We need to hold the RTNL lock prior to restoring interrupt schemes,
16644 * since we're going to be restoring queues
16648 /* We cleared the interrupt scheme when we suspended, so we need to
16649 * restore it now to resume device functionality.
16651 err = i40e_restore_interrupt_scheme(pf);
16653 dev_err(dev, "Cannot restore interrupt scheme: %d\n",
16657 clear_bit(__I40E_DOWN, pf->state);
16658 i40e_reset_and_rebuild(pf, false, true);
16662 /* Clear suspended state last after everything is recovered */
16663 clear_bit(__I40E_SUSPENDED, pf->state);
16665 /* Restart the service task */
16666 mod_timer(&pf->service_timer,
16667 round_jiffies(jiffies + pf->service_timer_period));
16672 static const struct pci_error_handlers i40e_err_handler = {
16673 .error_detected = i40e_pci_error_detected,
16674 .slot_reset = i40e_pci_error_slot_reset,
16675 .reset_prepare = i40e_pci_error_reset_prepare,
16676 .reset_done = i40e_pci_error_reset_done,
16677 .resume = i40e_pci_error_resume,
16680 static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
16682 static struct pci_driver i40e_driver = {
16683 .name = i40e_driver_name,
16684 .id_table = i40e_pci_tbl,
16685 .probe = i40e_probe,
16686 .remove = i40e_remove,
16688 .pm = &i40e_pm_ops,
16690 .shutdown = i40e_shutdown,
16691 .err_handler = &i40e_err_handler,
16692 .sriov_configure = i40e_pci_sriov_configure,
16696 * i40e_init_module - Driver registration routine
16698 * i40e_init_module is the first routine called when the driver is
16699 * loaded. All it does is register with the PCI subsystem.
16701 static int __init i40e_init_module(void)
16705 pr_info("%s: %s\n", i40e_driver_name, i40e_driver_string);
16706 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
16708 /* There is no need to throttle the number of active tasks because
16709 * each device limits its own task using a state bit for scheduling
16710 * the service task, and the device tasks do not interfere with each
16711 * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM
16712 * since we need to be able to guarantee forward progress even under
16715 i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
16717 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
16722 err = pci_register_driver(&i40e_driver);
16724 destroy_workqueue(i40e_wq);
16731 module_init(i40e_init_module);
16734 * i40e_exit_module - Driver exit cleanup routine
16736 * i40e_exit_module is called just before the driver is removed
16739 static void __exit i40e_exit_module(void)
16741 pci_unregister_driver(&i40e_driver);
16742 destroy_workqueue(i40e_wq);
16743 ida_destroy(&i40e_client_ida);
16746 module_exit(i40e_exit_module);