1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2021 Intel Corporation. */
4 #include <linux/etherdevice.h>
5 #include <linux/of_net.h>
8 #include <generated/utsrelease.h>
9 #include <linux/crash_dump.h>
13 #include "i40e_diag.h"
15 #include <net/udp_tunnel.h>
16 #include <net/xdp_sock_drv.h>
17 /* All i40e tracepoints are defined by the include below, which
18 * must be included exactly once across the whole kernel with
19 * CREATE_TRACE_POINTS defined
21 #define CREATE_TRACE_POINTS
22 #include "i40e_trace.h"
24 const char i40e_driver_name[] = "i40e";
25 static const char i40e_driver_string[] =
26 "Intel(R) Ethernet Connection XL710 Network Driver";
28 static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation.";
30 /* a bit of forward declarations */
31 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
32 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
33 static int i40e_add_vsi(struct i40e_vsi *vsi);
34 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
35 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired);
36 static int i40e_setup_misc_vector(struct i40e_pf *pf);
37 static void i40e_determine_queue_usage(struct i40e_pf *pf);
38 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
39 static void i40e_prep_for_reset(struct i40e_pf *pf);
40 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
42 static int i40e_reset(struct i40e_pf *pf);
43 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
44 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
45 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf);
46 static bool i40e_check_recovery_mode(struct i40e_pf *pf);
47 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw);
48 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
49 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
50 static int i40e_get_capabilities(struct i40e_pf *pf,
51 enum i40e_admin_queue_opc list_type);
52 static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf);
54 /* i40e_pci_tbl - PCI Device ID Table
56 * Last entry must be all 0s
58 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
59 * Class, Class Mask, private data (not used) }
61 static const struct pci_device_id i40e_pci_tbl[] = {
62 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
63 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
64 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
65 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
66 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
67 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
68 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0},
72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0},
73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0},
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
79 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
80 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722_A), 0},
81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
82 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
83 {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
84 {PCI_VDEVICE(INTEL, I40E_DEV_ID_XXV710_N3000), 0},
85 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
86 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
87 /* required last entry */
90 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
92 #define I40E_MAX_VF_COUNT 128
93 static int debug = -1;
94 module_param(debug, uint, 0);
95 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
97 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
98 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
99 MODULE_LICENSE("GPL v2");
101 static struct workqueue_struct *i40e_wq;
103 static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f,
104 struct net_device *netdev, int delta)
106 struct netdev_hw_addr *ha;
111 netdev_for_each_mc_addr(ha, netdev) {
112 if (ether_addr_equal(ha->addr, f->macaddr)) {
113 ha->refcount += delta;
114 if (ha->refcount <= 0)
122 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
123 * @hw: pointer to the HW structure
124 * @mem: ptr to mem struct to fill out
125 * @size: size of memory requested
126 * @alignment: what to align the allocation to
128 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
129 u64 size, u32 alignment)
131 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
133 mem->size = ALIGN(size, alignment);
134 mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
143 * i40e_free_dma_mem_d - OS specific memory free for shared code
144 * @hw: pointer to the HW structure
145 * @mem: ptr to mem struct to free
147 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
149 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
151 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
160 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
161 * @hw: pointer to the HW structure
162 * @mem: ptr to mem struct to fill out
163 * @size: size of memory requested
165 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
169 mem->va = kzalloc(size, GFP_KERNEL);
178 * i40e_free_virt_mem_d - OS specific memory free for shared code
179 * @hw: pointer to the HW structure
180 * @mem: ptr to mem struct to free
182 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
184 /* it's ok to kfree a NULL pointer */
193 * i40e_get_lump - find a lump of free generic resource
194 * @pf: board private structure
195 * @pile: the pile of resource to search
196 * @needed: the number of items needed
197 * @id: an owner id to stick on the items assigned
199 * Returns the base item index of the lump, or negative for error
201 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
207 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
208 dev_info(&pf->pdev->dev,
209 "param err: pile=%s needed=%d id=0x%04x\n",
210 pile ? "<valid>" : "<null>", needed, id);
214 /* Allocate last queue in the pile for FDIR VSI queue
215 * so it doesn't fragment the qp_pile
217 if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) {
218 if (pile->list[pile->num_entries - 1] & I40E_PILE_VALID_BIT) {
219 dev_err(&pf->pdev->dev,
220 "Cannot allocate queue %d for I40E_VSI_FDIR\n",
221 pile->num_entries - 1);
224 pile->list[pile->num_entries - 1] = id | I40E_PILE_VALID_BIT;
225 return pile->num_entries - 1;
229 while (i < pile->num_entries) {
230 /* skip already allocated entries */
231 if (pile->list[i] & I40E_PILE_VALID_BIT) {
236 /* do we have enough in this lump? */
237 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
238 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
243 /* there was enough, so assign it to the requestor */
244 for (j = 0; j < needed; j++)
245 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
250 /* not enough, so skip over it and continue looking */
258 * i40e_put_lump - return a lump of generic resource
259 * @pile: the pile of resource to search
260 * @index: the base item index
261 * @id: the owner id of the items assigned
263 * Returns the count of items in the lump
265 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
267 int valid_id = (id | I40E_PILE_VALID_BIT);
271 if (!pile || index >= pile->num_entries)
275 i < pile->num_entries && pile->list[i] == valid_id;
286 * i40e_find_vsi_from_id - searches for the vsi with the given id
287 * @pf: the pf structure to search for the vsi
288 * @id: id of the vsi it is searching for
290 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
294 for (i = 0; i < pf->num_alloc_vsi; i++)
295 if (pf->vsi[i] && (pf->vsi[i]->id == id))
302 * i40e_service_event_schedule - Schedule the service task to wake up
303 * @pf: board private structure
305 * If not already scheduled, this puts the task into the work queue
307 void i40e_service_event_schedule(struct i40e_pf *pf)
309 if ((!test_bit(__I40E_DOWN, pf->state) &&
310 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) ||
311 test_bit(__I40E_RECOVERY_MODE, pf->state))
312 queue_work(i40e_wq, &pf->service_task);
316 * i40e_tx_timeout - Respond to a Tx Hang
317 * @netdev: network interface device structure
318 * @txqueue: queue number timing out
320 * If any port has noticed a Tx timeout, it is likely that the whole
321 * device is munged, not just the one netdev port, so go for the full
324 static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
326 struct i40e_netdev_priv *np = netdev_priv(netdev);
327 struct i40e_vsi *vsi = np->vsi;
328 struct i40e_pf *pf = vsi->back;
329 struct i40e_ring *tx_ring = NULL;
333 pf->tx_timeout_count++;
335 /* with txqueue index, find the tx_ring struct */
336 for (i = 0; i < vsi->num_queue_pairs; i++) {
337 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
339 vsi->tx_rings[i]->queue_index) {
340 tx_ring = vsi->tx_rings[i];
346 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
347 pf->tx_timeout_recovery_level = 1; /* reset after some time */
348 else if (time_before(jiffies,
349 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
350 return; /* don't do any new action before the next timeout */
352 /* don't kick off another recovery if one is already pending */
353 if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
357 head = i40e_get_head(tx_ring);
358 /* Read interrupt register */
359 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
361 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
362 tx_ring->vsi->base_vector - 1));
364 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
366 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
367 vsi->seid, txqueue, tx_ring->next_to_clean,
368 head, tx_ring->next_to_use,
369 readl(tx_ring->tail), val);
372 pf->tx_timeout_last_recovery = jiffies;
373 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n",
374 pf->tx_timeout_recovery_level, txqueue);
376 switch (pf->tx_timeout_recovery_level) {
378 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
381 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
384 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
387 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
391 i40e_service_event_schedule(pf);
392 pf->tx_timeout_recovery_level++;
396 * i40e_get_vsi_stats_struct - Get System Network Statistics
397 * @vsi: the VSI we care about
399 * Returns the address of the device statistics structure.
400 * The statistics are actually updated from the service task.
402 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
404 return &vsi->net_stats;
408 * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
409 * @ring: Tx ring to get statistics from
410 * @stats: statistics entry to be updated
412 static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
413 struct rtnl_link_stats64 *stats)
419 start = u64_stats_fetch_begin_irq(&ring->syncp);
420 packets = ring->stats.packets;
421 bytes = ring->stats.bytes;
422 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
424 stats->tx_packets += packets;
425 stats->tx_bytes += bytes;
429 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
430 * @netdev: network interface device structure
431 * @stats: data structure to store statistics
433 * Returns the address of the device statistics structure.
434 * The statistics are actually updated from the service task.
436 static void i40e_get_netdev_stats_struct(struct net_device *netdev,
437 struct rtnl_link_stats64 *stats)
439 struct i40e_netdev_priv *np = netdev_priv(netdev);
440 struct i40e_vsi *vsi = np->vsi;
441 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
442 struct i40e_ring *ring;
445 if (test_bit(__I40E_VSI_DOWN, vsi->state))
452 for (i = 0; i < vsi->num_queue_pairs; i++) {
456 ring = READ_ONCE(vsi->tx_rings[i]);
459 i40e_get_netdev_stats_struct_tx(ring, stats);
461 if (i40e_enabled_xdp_vsi(vsi)) {
462 ring = READ_ONCE(vsi->xdp_rings[i]);
465 i40e_get_netdev_stats_struct_tx(ring, stats);
468 ring = READ_ONCE(vsi->rx_rings[i]);
472 start = u64_stats_fetch_begin_irq(&ring->syncp);
473 packets = ring->stats.packets;
474 bytes = ring->stats.bytes;
475 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
477 stats->rx_packets += packets;
478 stats->rx_bytes += bytes;
483 /* following stats updated by i40e_watchdog_subtask() */
484 stats->multicast = vsi_stats->multicast;
485 stats->tx_errors = vsi_stats->tx_errors;
486 stats->tx_dropped = vsi_stats->tx_dropped;
487 stats->rx_errors = vsi_stats->rx_errors;
488 stats->rx_dropped = vsi_stats->rx_dropped;
489 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
490 stats->rx_length_errors = vsi_stats->rx_length_errors;
494 * i40e_vsi_reset_stats - Resets all stats of the given vsi
495 * @vsi: the VSI to have its stats reset
497 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
499 struct rtnl_link_stats64 *ns;
505 ns = i40e_get_vsi_stats_struct(vsi);
506 memset(ns, 0, sizeof(*ns));
507 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
508 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
509 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
510 if (vsi->rx_rings && vsi->rx_rings[0]) {
511 for (i = 0; i < vsi->num_queue_pairs; i++) {
512 memset(&vsi->rx_rings[i]->stats, 0,
513 sizeof(vsi->rx_rings[i]->stats));
514 memset(&vsi->rx_rings[i]->rx_stats, 0,
515 sizeof(vsi->rx_rings[i]->rx_stats));
516 memset(&vsi->tx_rings[i]->stats, 0,
517 sizeof(vsi->tx_rings[i]->stats));
518 memset(&vsi->tx_rings[i]->tx_stats, 0,
519 sizeof(vsi->tx_rings[i]->tx_stats));
522 vsi->stat_offsets_loaded = false;
526 * i40e_pf_reset_stats - Reset all of the stats for the given PF
527 * @pf: the PF to be reset
529 void i40e_pf_reset_stats(struct i40e_pf *pf)
533 memset(&pf->stats, 0, sizeof(pf->stats));
534 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
535 pf->stat_offsets_loaded = false;
537 for (i = 0; i < I40E_MAX_VEB; i++) {
539 memset(&pf->veb[i]->stats, 0,
540 sizeof(pf->veb[i]->stats));
541 memset(&pf->veb[i]->stats_offsets, 0,
542 sizeof(pf->veb[i]->stats_offsets));
543 memset(&pf->veb[i]->tc_stats, 0,
544 sizeof(pf->veb[i]->tc_stats));
545 memset(&pf->veb[i]->tc_stats_offsets, 0,
546 sizeof(pf->veb[i]->tc_stats_offsets));
547 pf->veb[i]->stat_offsets_loaded = false;
550 pf->hw_csum_rx_error = 0;
554 * i40e_stat_update48 - read and update a 48 bit stat from the chip
555 * @hw: ptr to the hardware info
556 * @hireg: the high 32 bit reg to read
557 * @loreg: the low 32 bit reg to read
558 * @offset_loaded: has the initial offset been loaded yet
559 * @offset: ptr to current offset value
560 * @stat: ptr to the stat
562 * Since the device stats are not reset at PFReset, they likely will not
563 * be zeroed when the driver starts. We'll save the first values read
564 * and use them as offsets to be subtracted from the raw values in order
565 * to report stats that count from zero. In the process, we also manage
566 * the potential roll-over.
568 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
569 bool offset_loaded, u64 *offset, u64 *stat)
573 if (hw->device_id == I40E_DEV_ID_QEMU) {
574 new_data = rd32(hw, loreg);
575 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
577 new_data = rd64(hw, loreg);
581 if (likely(new_data >= *offset))
582 *stat = new_data - *offset;
584 *stat = (new_data + BIT_ULL(48)) - *offset;
585 *stat &= 0xFFFFFFFFFFFFULL;
589 * i40e_stat_update32 - read and update a 32 bit stat from the chip
590 * @hw: ptr to the hardware info
591 * @reg: the hw reg to read
592 * @offset_loaded: has the initial offset been loaded yet
593 * @offset: ptr to current offset value
594 * @stat: ptr to the stat
596 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
597 bool offset_loaded, u64 *offset, u64 *stat)
601 new_data = rd32(hw, reg);
604 if (likely(new_data >= *offset))
605 *stat = (u32)(new_data - *offset);
607 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
611 * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
612 * @hw: ptr to the hardware info
613 * @reg: the hw reg to read and clear
614 * @stat: ptr to the stat
616 static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
618 u32 new_data = rd32(hw, reg);
620 wr32(hw, reg, 1); /* must write a nonzero value to clear register */
625 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
626 * @vsi: the VSI to be updated
628 void i40e_update_eth_stats(struct i40e_vsi *vsi)
630 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
631 struct i40e_pf *pf = vsi->back;
632 struct i40e_hw *hw = &pf->hw;
633 struct i40e_eth_stats *oes;
634 struct i40e_eth_stats *es; /* device's eth stats */
636 es = &vsi->eth_stats;
637 oes = &vsi->eth_stats_offsets;
639 /* Gather up the stats that the hw collects */
640 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
641 vsi->stat_offsets_loaded,
642 &oes->tx_errors, &es->tx_errors);
643 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
644 vsi->stat_offsets_loaded,
645 &oes->rx_discards, &es->rx_discards);
646 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
647 vsi->stat_offsets_loaded,
648 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
650 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
651 I40E_GLV_GORCL(stat_idx),
652 vsi->stat_offsets_loaded,
653 &oes->rx_bytes, &es->rx_bytes);
654 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
655 I40E_GLV_UPRCL(stat_idx),
656 vsi->stat_offsets_loaded,
657 &oes->rx_unicast, &es->rx_unicast);
658 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
659 I40E_GLV_MPRCL(stat_idx),
660 vsi->stat_offsets_loaded,
661 &oes->rx_multicast, &es->rx_multicast);
662 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
663 I40E_GLV_BPRCL(stat_idx),
664 vsi->stat_offsets_loaded,
665 &oes->rx_broadcast, &es->rx_broadcast);
667 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
668 I40E_GLV_GOTCL(stat_idx),
669 vsi->stat_offsets_loaded,
670 &oes->tx_bytes, &es->tx_bytes);
671 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
672 I40E_GLV_UPTCL(stat_idx),
673 vsi->stat_offsets_loaded,
674 &oes->tx_unicast, &es->tx_unicast);
675 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
676 I40E_GLV_MPTCL(stat_idx),
677 vsi->stat_offsets_loaded,
678 &oes->tx_multicast, &es->tx_multicast);
679 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
680 I40E_GLV_BPTCL(stat_idx),
681 vsi->stat_offsets_loaded,
682 &oes->tx_broadcast, &es->tx_broadcast);
683 vsi->stat_offsets_loaded = true;
687 * i40e_update_veb_stats - Update Switch component statistics
688 * @veb: the VEB being updated
690 void i40e_update_veb_stats(struct i40e_veb *veb)
692 struct i40e_pf *pf = veb->pf;
693 struct i40e_hw *hw = &pf->hw;
694 struct i40e_eth_stats *oes;
695 struct i40e_eth_stats *es; /* device's eth stats */
696 struct i40e_veb_tc_stats *veb_oes;
697 struct i40e_veb_tc_stats *veb_es;
700 idx = veb->stats_idx;
702 oes = &veb->stats_offsets;
703 veb_es = &veb->tc_stats;
704 veb_oes = &veb->tc_stats_offsets;
706 /* Gather up the stats that the hw collects */
707 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
708 veb->stat_offsets_loaded,
709 &oes->tx_discards, &es->tx_discards);
710 if (hw->revision_id > 0)
711 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
712 veb->stat_offsets_loaded,
713 &oes->rx_unknown_protocol,
714 &es->rx_unknown_protocol);
715 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
716 veb->stat_offsets_loaded,
717 &oes->rx_bytes, &es->rx_bytes);
718 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
719 veb->stat_offsets_loaded,
720 &oes->rx_unicast, &es->rx_unicast);
721 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
722 veb->stat_offsets_loaded,
723 &oes->rx_multicast, &es->rx_multicast);
724 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
725 veb->stat_offsets_loaded,
726 &oes->rx_broadcast, &es->rx_broadcast);
728 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
729 veb->stat_offsets_loaded,
730 &oes->tx_bytes, &es->tx_bytes);
731 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
732 veb->stat_offsets_loaded,
733 &oes->tx_unicast, &es->tx_unicast);
734 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
735 veb->stat_offsets_loaded,
736 &oes->tx_multicast, &es->tx_multicast);
737 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
738 veb->stat_offsets_loaded,
739 &oes->tx_broadcast, &es->tx_broadcast);
740 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
741 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
742 I40E_GLVEBTC_RPCL(i, idx),
743 veb->stat_offsets_loaded,
744 &veb_oes->tc_rx_packets[i],
745 &veb_es->tc_rx_packets[i]);
746 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
747 I40E_GLVEBTC_RBCL(i, idx),
748 veb->stat_offsets_loaded,
749 &veb_oes->tc_rx_bytes[i],
750 &veb_es->tc_rx_bytes[i]);
751 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
752 I40E_GLVEBTC_TPCL(i, idx),
753 veb->stat_offsets_loaded,
754 &veb_oes->tc_tx_packets[i],
755 &veb_es->tc_tx_packets[i]);
756 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
757 I40E_GLVEBTC_TBCL(i, idx),
758 veb->stat_offsets_loaded,
759 &veb_oes->tc_tx_bytes[i],
760 &veb_es->tc_tx_bytes[i]);
762 veb->stat_offsets_loaded = true;
766 * i40e_update_vsi_stats - Update the vsi statistics counters.
767 * @vsi: the VSI to be updated
769 * There are a few instances where we store the same stat in a
770 * couple of different structs. This is partly because we have
771 * the netdev stats that need to be filled out, which is slightly
772 * different from the "eth_stats" defined by the chip and used in
773 * VF communications. We sort it out here.
775 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
777 u64 rx_page, rx_buf, rx_reuse, rx_alloc, rx_waive, rx_busy;
778 struct i40e_pf *pf = vsi->back;
779 struct rtnl_link_stats64 *ons;
780 struct rtnl_link_stats64 *ns; /* netdev stats */
781 struct i40e_eth_stats *oes;
782 struct i40e_eth_stats *es; /* device's eth stats */
783 u64 tx_restart, tx_busy;
794 if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
795 test_bit(__I40E_CONFIG_BUSY, pf->state))
798 ns = i40e_get_vsi_stats_struct(vsi);
799 ons = &vsi->net_stats_offsets;
800 es = &vsi->eth_stats;
801 oes = &vsi->eth_stats_offsets;
803 /* Gather up the netdev and vsi stats that the driver collects
804 * on the fly during packet processing
808 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
817 for (q = 0; q < vsi->num_queue_pairs; q++) {
819 p = READ_ONCE(vsi->tx_rings[q]);
824 start = u64_stats_fetch_begin_irq(&p->syncp);
825 packets = p->stats.packets;
826 bytes = p->stats.bytes;
827 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
830 tx_restart += p->tx_stats.restart_queue;
831 tx_busy += p->tx_stats.tx_busy;
832 tx_linearize += p->tx_stats.tx_linearize;
833 tx_force_wb += p->tx_stats.tx_force_wb;
834 tx_stopped += p->tx_stats.tx_stopped;
837 p = READ_ONCE(vsi->rx_rings[q]);
842 start = u64_stats_fetch_begin_irq(&p->syncp);
843 packets = p->stats.packets;
844 bytes = p->stats.bytes;
845 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
848 rx_buf += p->rx_stats.alloc_buff_failed;
849 rx_page += p->rx_stats.alloc_page_failed;
850 rx_reuse += p->rx_stats.page_reuse_count;
851 rx_alloc += p->rx_stats.page_alloc_count;
852 rx_waive += p->rx_stats.page_waive_count;
853 rx_busy += p->rx_stats.page_busy_count;
855 if (i40e_enabled_xdp_vsi(vsi)) {
856 /* locate XDP ring */
857 p = READ_ONCE(vsi->xdp_rings[q]);
862 start = u64_stats_fetch_begin_irq(&p->syncp);
863 packets = p->stats.packets;
864 bytes = p->stats.bytes;
865 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
868 tx_restart += p->tx_stats.restart_queue;
869 tx_busy += p->tx_stats.tx_busy;
870 tx_linearize += p->tx_stats.tx_linearize;
871 tx_force_wb += p->tx_stats.tx_force_wb;
875 vsi->tx_restart = tx_restart;
876 vsi->tx_busy = tx_busy;
877 vsi->tx_linearize = tx_linearize;
878 vsi->tx_force_wb = tx_force_wb;
879 vsi->tx_stopped = tx_stopped;
880 vsi->rx_page_failed = rx_page;
881 vsi->rx_buf_failed = rx_buf;
882 vsi->rx_page_reuse = rx_reuse;
883 vsi->rx_page_alloc = rx_alloc;
884 vsi->rx_page_waive = rx_waive;
885 vsi->rx_page_busy = rx_busy;
887 ns->rx_packets = rx_p;
889 ns->tx_packets = tx_p;
892 /* update netdev stats from eth stats */
893 i40e_update_eth_stats(vsi);
894 ons->tx_errors = oes->tx_errors;
895 ns->tx_errors = es->tx_errors;
896 ons->multicast = oes->rx_multicast;
897 ns->multicast = es->rx_multicast;
898 ons->rx_dropped = oes->rx_discards;
899 ns->rx_dropped = es->rx_discards;
900 ons->tx_dropped = oes->tx_discards;
901 ns->tx_dropped = es->tx_discards;
903 /* pull in a couple PF stats if this is the main vsi */
904 if (vsi == pf->vsi[pf->lan_vsi]) {
905 ns->rx_crc_errors = pf->stats.crc_errors;
906 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
907 ns->rx_length_errors = pf->stats.rx_length_errors;
912 * i40e_update_pf_stats - Update the PF statistics counters.
913 * @pf: the PF to be updated
915 static void i40e_update_pf_stats(struct i40e_pf *pf)
917 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
918 struct i40e_hw_port_stats *nsd = &pf->stats;
919 struct i40e_hw *hw = &pf->hw;
923 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
924 I40E_GLPRT_GORCL(hw->port),
925 pf->stat_offsets_loaded,
926 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
927 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
928 I40E_GLPRT_GOTCL(hw->port),
929 pf->stat_offsets_loaded,
930 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
931 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
932 pf->stat_offsets_loaded,
933 &osd->eth.rx_discards,
934 &nsd->eth.rx_discards);
935 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
936 I40E_GLPRT_UPRCL(hw->port),
937 pf->stat_offsets_loaded,
938 &osd->eth.rx_unicast,
939 &nsd->eth.rx_unicast);
940 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
941 I40E_GLPRT_MPRCL(hw->port),
942 pf->stat_offsets_loaded,
943 &osd->eth.rx_multicast,
944 &nsd->eth.rx_multicast);
945 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
946 I40E_GLPRT_BPRCL(hw->port),
947 pf->stat_offsets_loaded,
948 &osd->eth.rx_broadcast,
949 &nsd->eth.rx_broadcast);
950 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
951 I40E_GLPRT_UPTCL(hw->port),
952 pf->stat_offsets_loaded,
953 &osd->eth.tx_unicast,
954 &nsd->eth.tx_unicast);
955 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
956 I40E_GLPRT_MPTCL(hw->port),
957 pf->stat_offsets_loaded,
958 &osd->eth.tx_multicast,
959 &nsd->eth.tx_multicast);
960 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
961 I40E_GLPRT_BPTCL(hw->port),
962 pf->stat_offsets_loaded,
963 &osd->eth.tx_broadcast,
964 &nsd->eth.tx_broadcast);
966 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
967 pf->stat_offsets_loaded,
968 &osd->tx_dropped_link_down,
969 &nsd->tx_dropped_link_down);
971 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
972 pf->stat_offsets_loaded,
973 &osd->crc_errors, &nsd->crc_errors);
975 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
976 pf->stat_offsets_loaded,
977 &osd->illegal_bytes, &nsd->illegal_bytes);
979 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
980 pf->stat_offsets_loaded,
981 &osd->mac_local_faults,
982 &nsd->mac_local_faults);
983 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
984 pf->stat_offsets_loaded,
985 &osd->mac_remote_faults,
986 &nsd->mac_remote_faults);
988 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
989 pf->stat_offsets_loaded,
990 &osd->rx_length_errors,
991 &nsd->rx_length_errors);
993 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
994 pf->stat_offsets_loaded,
995 &osd->link_xon_rx, &nsd->link_xon_rx);
996 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
997 pf->stat_offsets_loaded,
998 &osd->link_xon_tx, &nsd->link_xon_tx);
999 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
1000 pf->stat_offsets_loaded,
1001 &osd->link_xoff_rx, &nsd->link_xoff_rx);
1002 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1003 pf->stat_offsets_loaded,
1004 &osd->link_xoff_tx, &nsd->link_xoff_tx);
1006 for (i = 0; i < 8; i++) {
1007 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
1008 pf->stat_offsets_loaded,
1009 &osd->priority_xoff_rx[i],
1010 &nsd->priority_xoff_rx[i]);
1011 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1012 pf->stat_offsets_loaded,
1013 &osd->priority_xon_rx[i],
1014 &nsd->priority_xon_rx[i]);
1015 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1016 pf->stat_offsets_loaded,
1017 &osd->priority_xon_tx[i],
1018 &nsd->priority_xon_tx[i]);
1019 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1020 pf->stat_offsets_loaded,
1021 &osd->priority_xoff_tx[i],
1022 &nsd->priority_xoff_tx[i]);
1023 i40e_stat_update32(hw,
1024 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1025 pf->stat_offsets_loaded,
1026 &osd->priority_xon_2_xoff[i],
1027 &nsd->priority_xon_2_xoff[i]);
1030 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
1031 I40E_GLPRT_PRC64L(hw->port),
1032 pf->stat_offsets_loaded,
1033 &osd->rx_size_64, &nsd->rx_size_64);
1034 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
1035 I40E_GLPRT_PRC127L(hw->port),
1036 pf->stat_offsets_loaded,
1037 &osd->rx_size_127, &nsd->rx_size_127);
1038 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1039 I40E_GLPRT_PRC255L(hw->port),
1040 pf->stat_offsets_loaded,
1041 &osd->rx_size_255, &nsd->rx_size_255);
1042 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1043 I40E_GLPRT_PRC511L(hw->port),
1044 pf->stat_offsets_loaded,
1045 &osd->rx_size_511, &nsd->rx_size_511);
1046 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1047 I40E_GLPRT_PRC1023L(hw->port),
1048 pf->stat_offsets_loaded,
1049 &osd->rx_size_1023, &nsd->rx_size_1023);
1050 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1051 I40E_GLPRT_PRC1522L(hw->port),
1052 pf->stat_offsets_loaded,
1053 &osd->rx_size_1522, &nsd->rx_size_1522);
1054 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1055 I40E_GLPRT_PRC9522L(hw->port),
1056 pf->stat_offsets_loaded,
1057 &osd->rx_size_big, &nsd->rx_size_big);
1059 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1060 I40E_GLPRT_PTC64L(hw->port),
1061 pf->stat_offsets_loaded,
1062 &osd->tx_size_64, &nsd->tx_size_64);
1063 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1064 I40E_GLPRT_PTC127L(hw->port),
1065 pf->stat_offsets_loaded,
1066 &osd->tx_size_127, &nsd->tx_size_127);
1067 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1068 I40E_GLPRT_PTC255L(hw->port),
1069 pf->stat_offsets_loaded,
1070 &osd->tx_size_255, &nsd->tx_size_255);
1071 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1072 I40E_GLPRT_PTC511L(hw->port),
1073 pf->stat_offsets_loaded,
1074 &osd->tx_size_511, &nsd->tx_size_511);
1075 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1076 I40E_GLPRT_PTC1023L(hw->port),
1077 pf->stat_offsets_loaded,
1078 &osd->tx_size_1023, &nsd->tx_size_1023);
1079 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1080 I40E_GLPRT_PTC1522L(hw->port),
1081 pf->stat_offsets_loaded,
1082 &osd->tx_size_1522, &nsd->tx_size_1522);
1083 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1084 I40E_GLPRT_PTC9522L(hw->port),
1085 pf->stat_offsets_loaded,
1086 &osd->tx_size_big, &nsd->tx_size_big);
1088 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1089 pf->stat_offsets_loaded,
1090 &osd->rx_undersize, &nsd->rx_undersize);
1091 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1092 pf->stat_offsets_loaded,
1093 &osd->rx_fragments, &nsd->rx_fragments);
1094 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1095 pf->stat_offsets_loaded,
1096 &osd->rx_oversize, &nsd->rx_oversize);
1097 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1098 pf->stat_offsets_loaded,
1099 &osd->rx_jabber, &nsd->rx_jabber);
1102 i40e_stat_update_and_clear32(hw,
1103 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
1104 &nsd->fd_atr_match);
1105 i40e_stat_update_and_clear32(hw,
1106 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
1108 i40e_stat_update_and_clear32(hw,
1109 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
1110 &nsd->fd_atr_tunnel_match);
1112 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1113 nsd->tx_lpi_status =
1114 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1115 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1116 nsd->rx_lpi_status =
1117 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1118 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1119 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1120 pf->stat_offsets_loaded,
1121 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1122 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1123 pf->stat_offsets_loaded,
1124 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1126 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1127 !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
1128 nsd->fd_sb_status = true;
1130 nsd->fd_sb_status = false;
1132 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1133 !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
1134 nsd->fd_atr_status = true;
1136 nsd->fd_atr_status = false;
1138 pf->stat_offsets_loaded = true;
1142 * i40e_update_stats - Update the various statistics counters.
1143 * @vsi: the VSI to be updated
1145 * Update the various stats for this VSI and its related entities.
1147 void i40e_update_stats(struct i40e_vsi *vsi)
1149 struct i40e_pf *pf = vsi->back;
1151 if (vsi == pf->vsi[pf->lan_vsi])
1152 i40e_update_pf_stats(pf);
1154 i40e_update_vsi_stats(vsi);
1158 * i40e_count_filters - counts VSI mac filters
1159 * @vsi: the VSI to be searched
1161 * Returns count of mac filters
1163 int i40e_count_filters(struct i40e_vsi *vsi)
1165 struct i40e_mac_filter *f;
1166 struct hlist_node *h;
1170 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
1177 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1178 * @vsi: the VSI to be searched
1179 * @macaddr: the MAC address
1182 * Returns ptr to the filter object or NULL
1184 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1185 const u8 *macaddr, s16 vlan)
1187 struct i40e_mac_filter *f;
1190 if (!vsi || !macaddr)
1193 key = i40e_addr_to_hkey(macaddr);
1194 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1195 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1203 * i40e_find_mac - Find a mac addr in the macvlan filters list
1204 * @vsi: the VSI to be searched
1205 * @macaddr: the MAC address we are searching for
1207 * Returns the first filter with the provided MAC address or NULL if
1208 * MAC address was not found
1210 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
1212 struct i40e_mac_filter *f;
1215 if (!vsi || !macaddr)
1218 key = i40e_addr_to_hkey(macaddr);
1219 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1220 if ((ether_addr_equal(macaddr, f->macaddr)))
1227 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1228 * @vsi: the VSI to be searched
1230 * Returns true if VSI is in vlan mode or false otherwise
1232 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1234 /* If we have a PVID, always operate in VLAN mode */
1238 /* We need to operate in VLAN mode whenever we have any filters with
1239 * a VLAN other than I40E_VLAN_ALL. We could check the table each
1240 * time, incurring search cost repeatedly. However, we can notice two
1243 * 1) the only place where we can gain a VLAN filter is in
1246 * 2) the only place where filters are actually removed is in
1247 * i40e_sync_filters_subtask.
1249 * Thus, we can simply use a boolean value, has_vlan_filters which we
1250 * will set to true when we add a VLAN filter in i40e_add_filter. Then
1251 * we have to perform the full search after deleting filters in
1252 * i40e_sync_filters_subtask, but we already have to search
1253 * filters here and can perform the check at the same time. This
1254 * results in avoiding embedding a loop for VLAN mode inside another
1255 * loop over all the filters, and should maintain correctness as noted
1258 return vsi->has_vlan_filter;
1262 * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
1263 * @vsi: the VSI to configure
1264 * @tmp_add_list: list of filters ready to be added
1265 * @tmp_del_list: list of filters ready to be deleted
1266 * @vlan_filters: the number of active VLAN filters
1268 * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
1269 * behave as expected. If we have any active VLAN filters remaining or about
1270 * to be added then we need to update non-VLAN filters to be marked as VLAN=0
1271 * so that they only match against untagged traffic. If we no longer have any
1272 * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
1273 * so that they match against both tagged and untagged traffic. In this way,
1274 * we ensure that we correctly receive the desired traffic. This ensures that
1275 * when we have an active VLAN we will receive only untagged traffic and
1276 * traffic matching active VLANs. If we have no active VLANs then we will
1277 * operate in non-VLAN mode and receive all traffic, tagged or untagged.
1279 * Finally, in a similar fashion, this function also corrects filters when
1280 * there is an active PVID assigned to this VSI.
1282 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1284 * This function is only expected to be called from within
1285 * i40e_sync_vsi_filters.
1287 * NOTE: This function expects to be called while under the
1288 * mac_filter_hash_lock
1290 static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
1291 struct hlist_head *tmp_add_list,
1292 struct hlist_head *tmp_del_list,
1295 s16 pvid = le16_to_cpu(vsi->info.pvid);
1296 struct i40e_mac_filter *f, *add_head;
1297 struct i40e_new_mac_filter *new;
1298 struct hlist_node *h;
1301 /* To determine if a particular filter needs to be replaced we
1302 * have the three following conditions:
1304 * a) if we have a PVID assigned, then all filters which are
1305 * not marked as VLAN=PVID must be replaced with filters that
1307 * b) otherwise, if we have any active VLANS, all filters
1308 * which are marked as VLAN=-1 must be replaced with
1309 * filters marked as VLAN=0
1310 * c) finally, if we do not have any active VLANS, all filters
1311 * which are marked as VLAN=0 must be replaced with filters
1315 /* Update the filters about to be added in place */
1316 hlist_for_each_entry(new, tmp_add_list, hlist) {
1317 if (pvid && new->f->vlan != pvid)
1318 new->f->vlan = pvid;
1319 else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
1321 else if (!vlan_filters && new->f->vlan == 0)
1322 new->f->vlan = I40E_VLAN_ANY;
1325 /* Update the remaining active filters */
1326 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1327 /* Combine the checks for whether a filter needs to be changed
1328 * and then determine the new VLAN inside the if block, in
1329 * order to avoid duplicating code for adding the new filter
1330 * then deleting the old filter.
1332 if ((pvid && f->vlan != pvid) ||
1333 (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1334 (!vlan_filters && f->vlan == 0)) {
1335 /* Determine the new vlan we will be adding */
1338 else if (vlan_filters)
1341 new_vlan = I40E_VLAN_ANY;
1343 /* Create the new filter */
1344 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1348 /* Create a temporary i40e_new_mac_filter */
1349 new = kzalloc(sizeof(*new), GFP_ATOMIC);
1354 new->state = add_head->state;
1356 /* Add the new filter to the tmp list */
1357 hlist_add_head(&new->hlist, tmp_add_list);
1359 /* Put the original filter into the delete list */
1360 f->state = I40E_FILTER_REMOVE;
1361 hash_del(&f->hlist);
1362 hlist_add_head(&f->hlist, tmp_del_list);
1366 vsi->has_vlan_filter = !!vlan_filters;
1372 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1373 * @vsi: the PF Main VSI - inappropriate for any other VSI
1374 * @macaddr: the MAC address
1376 * Remove whatever filter the firmware set up so the driver can manage
1377 * its own filtering intelligently.
1379 static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1381 struct i40e_aqc_remove_macvlan_element_data element;
1382 struct i40e_pf *pf = vsi->back;
1384 /* Only appropriate for the PF main VSI */
1385 if (vsi->type != I40E_VSI_MAIN)
1388 memset(&element, 0, sizeof(element));
1389 ether_addr_copy(element.mac_addr, macaddr);
1390 element.vlan_tag = 0;
1391 /* Ignore error returns, some firmware does it this way... */
1392 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1393 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1395 memset(&element, 0, sizeof(element));
1396 ether_addr_copy(element.mac_addr, macaddr);
1397 element.vlan_tag = 0;
1398 /* ...and some firmware does it this way. */
1399 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1400 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1401 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1405 * i40e_add_filter - Add a mac/vlan filter to the VSI
1406 * @vsi: the VSI to be searched
1407 * @macaddr: the MAC address
1410 * Returns ptr to the filter object or NULL when no memory available.
1412 * NOTE: This function is expected to be called with mac_filter_hash_lock
1415 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1416 const u8 *macaddr, s16 vlan)
1418 struct i40e_mac_filter *f;
1421 if (!vsi || !macaddr)
1424 f = i40e_find_filter(vsi, macaddr, vlan);
1426 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1430 /* Update the boolean indicating if we need to function in
1434 vsi->has_vlan_filter = true;
1436 ether_addr_copy(f->macaddr, macaddr);
1438 f->state = I40E_FILTER_NEW;
1439 INIT_HLIST_NODE(&f->hlist);
1441 key = i40e_addr_to_hkey(macaddr);
1442 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1444 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1445 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1448 /* If we're asked to add a filter that has been marked for removal, it
1449 * is safe to simply restore it to active state. __i40e_del_filter
1450 * will have simply deleted any filters which were previously marked
1451 * NEW or FAILED, so if it is currently marked REMOVE it must have
1452 * previously been ACTIVE. Since we haven't yet run the sync filters
1453 * task, just restore this filter to the ACTIVE state so that the
1454 * sync task leaves it in place
1456 if (f->state == I40E_FILTER_REMOVE)
1457 f->state = I40E_FILTER_ACTIVE;
1463 * __i40e_del_filter - Remove a specific filter from the VSI
1464 * @vsi: VSI to remove from
1465 * @f: the filter to remove from the list
1467 * This function should be called instead of i40e_del_filter only if you know
1468 * the exact filter you will remove already, such as via i40e_find_filter or
1471 * NOTE: This function is expected to be called with mac_filter_hash_lock
1473 * ANOTHER NOTE: This function MUST be called from within the context of
1474 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1475 * instead of list_for_each_entry().
1477 void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
1482 /* If the filter was never added to firmware then we can just delete it
1483 * directly and we don't want to set the status to remove or else an
1484 * admin queue command will unnecessarily fire.
1486 if ((f->state == I40E_FILTER_FAILED) ||
1487 (f->state == I40E_FILTER_NEW)) {
1488 hash_del(&f->hlist);
1491 f->state = I40E_FILTER_REMOVE;
1494 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1495 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1499 * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
1500 * @vsi: the VSI to be searched
1501 * @macaddr: the MAC address
1504 * NOTE: This function is expected to be called with mac_filter_hash_lock
1506 * ANOTHER NOTE: This function MUST be called from within the context of
1507 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1508 * instead of list_for_each_entry().
1510 void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1512 struct i40e_mac_filter *f;
1514 if (!vsi || !macaddr)
1517 f = i40e_find_filter(vsi, macaddr, vlan);
1518 __i40e_del_filter(vsi, f);
1522 * i40e_add_mac_filter - Add a MAC filter for all active VLANs
1523 * @vsi: the VSI to be searched
1524 * @macaddr: the mac address to be filtered
1526 * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
1527 * go through all the macvlan filters and add a macvlan filter for each
1528 * unique vlan that already exists. If a PVID has been assigned, instead only
1529 * add the macaddr to that VLAN.
1531 * Returns last filter added on success, else NULL
1533 struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
1536 struct i40e_mac_filter *f, *add = NULL;
1537 struct hlist_node *h;
1541 return i40e_add_filter(vsi, macaddr,
1542 le16_to_cpu(vsi->info.pvid));
1544 if (!i40e_is_vsi_in_vlan(vsi))
1545 return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
1547 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1548 if (f->state == I40E_FILTER_REMOVE)
1550 add = i40e_add_filter(vsi, macaddr, f->vlan);
1559 * i40e_del_mac_filter - Remove a MAC filter from all VLANs
1560 * @vsi: the VSI to be searched
1561 * @macaddr: the mac address to be removed
1563 * Removes a given MAC address from a VSI regardless of what VLAN it has been
1566 * Returns 0 for success, or error
1568 int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
1570 struct i40e_mac_filter *f;
1571 struct hlist_node *h;
1575 lockdep_assert_held(&vsi->mac_filter_hash_lock);
1576 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1577 if (ether_addr_equal(macaddr, f->macaddr)) {
1578 __i40e_del_filter(vsi, f);
1590 * i40e_set_mac - NDO callback to set mac address
1591 * @netdev: network interface device structure
1592 * @p: pointer to an address structure
1594 * Returns 0 on success, negative on failure
1596 static int i40e_set_mac(struct net_device *netdev, void *p)
1598 struct i40e_netdev_priv *np = netdev_priv(netdev);
1599 struct i40e_vsi *vsi = np->vsi;
1600 struct i40e_pf *pf = vsi->back;
1601 struct i40e_hw *hw = &pf->hw;
1602 struct sockaddr *addr = p;
1604 if (!is_valid_ether_addr(addr->sa_data))
1605 return -EADDRNOTAVAIL;
1607 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1608 netdev_info(netdev, "already using mac address %pM\n",
1613 if (test_bit(__I40E_DOWN, pf->state) ||
1614 test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
1615 return -EADDRNOTAVAIL;
1617 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1618 netdev_info(netdev, "returning to hw mac address %pM\n",
1621 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1623 /* Copy the address first, so that we avoid a possible race with
1625 * - Remove old address from MAC filter
1626 * - Copy new address
1627 * - Add new address to MAC filter
1629 spin_lock_bh(&vsi->mac_filter_hash_lock);
1630 i40e_del_mac_filter(vsi, netdev->dev_addr);
1631 eth_hw_addr_set(netdev, addr->sa_data);
1632 i40e_add_mac_filter(vsi, netdev->dev_addr);
1633 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1635 if (vsi->type == I40E_VSI_MAIN) {
1638 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
1639 addr->sa_data, NULL);
1641 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1642 i40e_stat_str(hw, ret),
1643 i40e_aq_str(hw, hw->aq.asq_last_status));
1646 /* schedule our worker thread which will take care of
1647 * applying the new filter changes
1649 i40e_service_event_schedule(pf);
1654 * i40e_config_rss_aq - Prepare for RSS using AQ commands
1655 * @vsi: vsi structure
1656 * @seed: RSS hash seed
1657 * @lut: pointer to lookup table of lut_size
1658 * @lut_size: size of the lookup table
1660 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
1661 u8 *lut, u16 lut_size)
1663 struct i40e_pf *pf = vsi->back;
1664 struct i40e_hw *hw = &pf->hw;
1668 struct i40e_aqc_get_set_rss_key_data *seed_dw =
1669 (struct i40e_aqc_get_set_rss_key_data *)seed;
1670 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
1672 dev_info(&pf->pdev->dev,
1673 "Cannot set RSS key, err %s aq_err %s\n",
1674 i40e_stat_str(hw, ret),
1675 i40e_aq_str(hw, hw->aq.asq_last_status));
1680 bool pf_lut = vsi->type == I40E_VSI_MAIN;
1682 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
1684 dev_info(&pf->pdev->dev,
1685 "Cannot set RSS lut, err %s aq_err %s\n",
1686 i40e_stat_str(hw, ret),
1687 i40e_aq_str(hw, hw->aq.asq_last_status));
1695 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
1696 * @vsi: VSI structure
1698 static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
1700 struct i40e_pf *pf = vsi->back;
1701 u8 seed[I40E_HKEY_ARRAY_SIZE];
1705 if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
1708 vsi->rss_size = min_t(int, pf->alloc_rss_size,
1709 vsi->num_queue_pairs);
1712 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1716 /* Use the user configured hash keys and lookup table if there is one,
1717 * otherwise use default
1719 if (vsi->rss_lut_user)
1720 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1722 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
1723 if (vsi->rss_hkey_user)
1724 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
1726 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
1727 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
1733 * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config
1734 * @vsi: the VSI being configured,
1735 * @ctxt: VSI context structure
1736 * @enabled_tc: number of traffic classes to enable
1738 * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
1740 static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi,
1741 struct i40e_vsi_context *ctxt,
1744 u16 qcount = 0, max_qcount, qmap, sections = 0;
1745 int i, override_q, pow, num_qps, ret;
1746 u8 netdev_tc = 0, offset = 0;
1748 if (vsi->type != I40E_VSI_MAIN)
1750 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1751 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1752 vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc;
1753 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1754 num_qps = vsi->mqprio_qopt.qopt.count[0];
1756 /* find the next higher power-of-2 of num queue pairs */
1757 pow = ilog2(num_qps);
1758 if (!is_power_of_2(num_qps))
1760 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1761 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1763 /* Setup queue offset/count for all TCs for given VSI */
1764 max_qcount = vsi->mqprio_qopt.qopt.count[0];
1765 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1766 /* See if the given TC is enabled for the given VSI */
1767 if (vsi->tc_config.enabled_tc & BIT(i)) {
1768 offset = vsi->mqprio_qopt.qopt.offset[i];
1769 qcount = vsi->mqprio_qopt.qopt.count[i];
1770 if (qcount > max_qcount)
1771 max_qcount = qcount;
1772 vsi->tc_config.tc_info[i].qoffset = offset;
1773 vsi->tc_config.tc_info[i].qcount = qcount;
1774 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1776 /* TC is not enabled so set the offset to
1777 * default queue and allocate one queue
1780 vsi->tc_config.tc_info[i].qoffset = 0;
1781 vsi->tc_config.tc_info[i].qcount = 1;
1782 vsi->tc_config.tc_info[i].netdev_tc = 0;
1786 /* Set actual Tx/Rx queue pairs */
1787 vsi->num_queue_pairs = offset + qcount;
1789 /* Setup queue TC[0].qmap for given VSI context */
1790 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
1791 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1792 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1793 ctxt->info.valid_sections |= cpu_to_le16(sections);
1795 /* Reconfigure RSS for main VSI with max queue count */
1796 vsi->rss_size = max_qcount;
1797 ret = i40e_vsi_config_rss(vsi);
1799 dev_info(&vsi->back->pdev->dev,
1800 "Failed to reconfig rss for num_queues (%u)\n",
1804 vsi->reconfig_rss = true;
1805 dev_dbg(&vsi->back->pdev->dev,
1806 "Reconfigured rss with num_queues (%u)\n", max_qcount);
1808 /* Find queue count available for channel VSIs and starting offset
1811 override_q = vsi->mqprio_qopt.qopt.count[0];
1812 if (override_q && override_q < vsi->num_queue_pairs) {
1813 vsi->cnt_q_avail = vsi->num_queue_pairs - override_q;
1814 vsi->next_base_queue = override_q;
1820 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1821 * @vsi: the VSI being setup
1822 * @ctxt: VSI context structure
1823 * @enabled_tc: Enabled TCs bitmap
1824 * @is_add: True if called before Add VSI
1826 * Setup VSI queue mapping for enabled traffic classes.
1828 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1829 struct i40e_vsi_context *ctxt,
1833 struct i40e_pf *pf = vsi->back;
1843 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1845 /* zero out queue mapping, it will get updated on the end of the function */
1846 memset(ctxt->info.queue_mapping, 0, sizeof(ctxt->info.queue_mapping));
1848 if (vsi->type == I40E_VSI_MAIN) {
1849 /* This code helps add more queue to the VSI if we have
1850 * more cores than RSS can support, the higher cores will
1851 * be served by ATR or other filters. Furthermore, the
1852 * non-zero req_queue_pairs says that user requested a new
1853 * queue count via ethtool's set_channels, so use this
1854 * value for queues distribution across traffic classes
1856 if (vsi->req_queue_pairs > 0)
1857 vsi->num_queue_pairs = vsi->req_queue_pairs;
1858 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1859 vsi->num_queue_pairs = pf->num_lan_msix;
1862 /* Number of queues per enabled TC */
1863 if (vsi->type == I40E_VSI_MAIN ||
1864 (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs != 0))
1865 num_tc_qps = vsi->num_queue_pairs;
1867 num_tc_qps = vsi->alloc_queue_pairs;
1869 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1870 /* Find numtc from enabled TC bitmap */
1871 for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1872 if (enabled_tc & BIT(i)) /* TC is enabled */
1876 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1879 num_tc_qps = num_tc_qps / numtc;
1880 num_tc_qps = min_t(int, num_tc_qps,
1881 i40e_pf_get_max_q_per_tc(pf));
1884 vsi->tc_config.numtc = numtc;
1885 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1887 /* Do not allow use more TC queue pairs than MSI-X vectors exist */
1888 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1889 num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
1891 /* Setup queue offset/count for all TCs for given VSI */
1892 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1893 /* See if the given TC is enabled for the given VSI */
1894 if (vsi->tc_config.enabled_tc & BIT(i)) {
1898 switch (vsi->type) {
1900 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED |
1901 I40E_FLAG_FD_ATR_ENABLED)) ||
1902 vsi->tc_config.enabled_tc != 1) {
1903 qcount = min_t(int, pf->alloc_rss_size,
1909 case I40E_VSI_SRIOV:
1910 case I40E_VSI_VMDQ2:
1912 qcount = num_tc_qps;
1916 vsi->tc_config.tc_info[i].qoffset = offset;
1917 vsi->tc_config.tc_info[i].qcount = qcount;
1919 /* find the next higher power-of-2 of num queue pairs */
1922 while (num_qps && (BIT_ULL(pow) < qcount)) {
1927 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1929 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1930 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1934 /* TC is not enabled so set the offset to
1935 * default queue and allocate one queue
1938 vsi->tc_config.tc_info[i].qoffset = 0;
1939 vsi->tc_config.tc_info[i].qcount = 1;
1940 vsi->tc_config.tc_info[i].netdev_tc = 0;
1944 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1946 /* Do not change previously set num_queue_pairs for PFs and VFs*/
1947 if ((vsi->type == I40E_VSI_MAIN && numtc != 1) ||
1948 (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs == 0) ||
1949 (vsi->type != I40E_VSI_MAIN && vsi->type != I40E_VSI_SRIOV))
1950 vsi->num_queue_pairs = offset;
1952 /* Scheduler section valid can only be set for ADD VSI */
1954 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1956 ctxt->info.up_enable_bits = enabled_tc;
1958 if (vsi->type == I40E_VSI_SRIOV) {
1959 ctxt->info.mapping_flags |=
1960 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1961 for (i = 0; i < vsi->num_queue_pairs; i++)
1962 ctxt->info.queue_mapping[i] =
1963 cpu_to_le16(vsi->base_queue + i);
1965 ctxt->info.mapping_flags |=
1966 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1967 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1969 ctxt->info.valid_sections |= cpu_to_le16(sections);
1973 * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
1974 * @netdev: the netdevice
1975 * @addr: address to add
1977 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1978 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1980 static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
1982 struct i40e_netdev_priv *np = netdev_priv(netdev);
1983 struct i40e_vsi *vsi = np->vsi;
1985 if (i40e_add_mac_filter(vsi, addr))
1992 * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1993 * @netdev: the netdevice
1994 * @addr: address to add
1996 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1997 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1999 static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
2001 struct i40e_netdev_priv *np = netdev_priv(netdev);
2002 struct i40e_vsi *vsi = np->vsi;
2004 /* Under some circumstances, we might receive a request to delete
2005 * our own device address from our uc list. Because we store the
2006 * device address in the VSI's MAC/VLAN filter list, we need to ignore
2007 * such requests and not delete our device address from this list.
2009 if (ether_addr_equal(addr, netdev->dev_addr))
2012 i40e_del_mac_filter(vsi, addr);
2018 * i40e_set_rx_mode - NDO callback to set the netdev filters
2019 * @netdev: network interface device structure
2021 static void i40e_set_rx_mode(struct net_device *netdev)
2023 struct i40e_netdev_priv *np = netdev_priv(netdev);
2024 struct i40e_vsi *vsi = np->vsi;
2026 spin_lock_bh(&vsi->mac_filter_hash_lock);
2028 __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
2029 __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
2031 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2033 /* check for other flag changes */
2034 if (vsi->current_netdev_flags != vsi->netdev->flags) {
2035 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2036 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
2041 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
2042 * @vsi: Pointer to VSI struct
2043 * @from: Pointer to list which contains MAC filter entries - changes to
2044 * those entries needs to be undone.
2046 * MAC filter entries from this list were slated for deletion.
2048 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
2049 struct hlist_head *from)
2051 struct i40e_mac_filter *f;
2052 struct hlist_node *h;
2054 hlist_for_each_entry_safe(f, h, from, hlist) {
2055 u64 key = i40e_addr_to_hkey(f->macaddr);
2057 /* Move the element back into MAC filter list*/
2058 hlist_del(&f->hlist);
2059 hash_add(vsi->mac_filter_hash, &f->hlist, key);
2064 * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
2065 * @vsi: Pointer to vsi struct
2066 * @from: Pointer to list which contains MAC filter entries - changes to
2067 * those entries needs to be undone.
2069 * MAC filter entries from this list were slated for addition.
2071 static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
2072 struct hlist_head *from)
2074 struct i40e_new_mac_filter *new;
2075 struct hlist_node *h;
2077 hlist_for_each_entry_safe(new, h, from, hlist) {
2078 /* We can simply free the wrapper structure */
2079 hlist_del(&new->hlist);
2080 netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
2086 * i40e_next_filter - Get the next non-broadcast filter from a list
2087 * @next: pointer to filter in list
2089 * Returns the next non-broadcast filter in the list. Required so that we
2090 * ignore broadcast filters within the list, since these are not handled via
2091 * the normal firmware update path.
2094 struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
2096 hlist_for_each_entry_continue(next, hlist) {
2097 if (!is_broadcast_ether_addr(next->f->macaddr))
2105 * i40e_update_filter_state - Update filter state based on return data
2107 * @count: Number of filters added
2108 * @add_list: return data from fw
2109 * @add_head: pointer to first filter in current batch
2111 * MAC filter entries from list were slated to be added to device. Returns
2112 * number of successful filters. Note that 0 does NOT mean success!
2115 i40e_update_filter_state(int count,
2116 struct i40e_aqc_add_macvlan_element_data *add_list,
2117 struct i40e_new_mac_filter *add_head)
2122 for (i = 0; i < count; i++) {
2123 /* Always check status of each filter. We don't need to check
2124 * the firmware return status because we pre-set the filter
2125 * status to I40E_AQC_MM_ERR_NO_RES when sending the filter
2126 * request to the adminq. Thus, if it no longer matches then
2127 * we know the filter is active.
2129 if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
2130 add_head->state = I40E_FILTER_FAILED;
2132 add_head->state = I40E_FILTER_ACTIVE;
2136 add_head = i40e_next_filter(add_head);
2145 * i40e_aqc_del_filters - Request firmware to delete a set of filters
2146 * @vsi: ptr to the VSI
2147 * @vsi_name: name to display in messages
2148 * @list: the list of filters to send to firmware
2149 * @num_del: the number of filters to delete
2150 * @retval: Set to -EIO on failure to delete
2152 * Send a request to firmware via AdminQ to delete a set of filters. Uses
2153 * *retval instead of a return value so that success does not force ret_val to
2154 * be set to 0. This ensures that a sequence of calls to this function
2155 * preserve the previous value of *retval on successful delete.
2158 void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
2159 struct i40e_aqc_remove_macvlan_element_data *list,
2160 int num_del, int *retval)
2162 struct i40e_hw *hw = &vsi->back->hw;
2163 enum i40e_admin_queue_err aq_status;
2166 aq_ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, list, num_del, NULL,
2169 /* Explicitly ignore and do not report when firmware returns ENOENT */
2170 if (aq_ret && !(aq_status == I40E_AQ_RC_ENOENT)) {
2172 dev_info(&vsi->back->pdev->dev,
2173 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
2174 vsi_name, i40e_stat_str(hw, aq_ret),
2175 i40e_aq_str(hw, aq_status));
2180 * i40e_aqc_add_filters - Request firmware to add a set of filters
2181 * @vsi: ptr to the VSI
2182 * @vsi_name: name to display in messages
2183 * @list: the list of filters to send to firmware
2184 * @add_head: Position in the add hlist
2185 * @num_add: the number of filters to add
2187 * Send a request to firmware via AdminQ to add a chunk of filters. Will set
2188 * __I40E_VSI_OVERFLOW_PROMISC bit in vsi->state if the firmware has run out of
2189 * space for more filters.
2192 void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
2193 struct i40e_aqc_add_macvlan_element_data *list,
2194 struct i40e_new_mac_filter *add_head,
2197 struct i40e_hw *hw = &vsi->back->hw;
2198 enum i40e_admin_queue_err aq_status;
2201 i40e_aq_add_macvlan_v2(hw, vsi->seid, list, num_add, NULL, &aq_status);
2202 fcnt = i40e_update_filter_state(num_add, list, add_head);
2204 if (fcnt != num_add) {
2205 if (vsi->type == I40E_VSI_MAIN) {
2206 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2207 dev_warn(&vsi->back->pdev->dev,
2208 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2209 i40e_aq_str(hw, aq_status), vsi_name);
2210 } else if (vsi->type == I40E_VSI_SRIOV ||
2211 vsi->type == I40E_VSI_VMDQ1 ||
2212 vsi->type == I40E_VSI_VMDQ2) {
2213 dev_warn(&vsi->back->pdev->dev,
2214 "Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
2215 i40e_aq_str(hw, aq_status), vsi_name,
2218 dev_warn(&vsi->back->pdev->dev,
2219 "Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
2220 i40e_aq_str(hw, aq_status), vsi_name,
2227 * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
2228 * @vsi: pointer to the VSI
2229 * @vsi_name: the VSI name
2232 * This function sets or clears the promiscuous broadcast flags for VLAN
2233 * filters in order to properly receive broadcast frames. Assumes that only
2234 * broadcast filters are passed.
2236 * Returns status indicating success or failure;
2239 i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
2240 struct i40e_mac_filter *f)
2242 bool enable = f->state == I40E_FILTER_NEW;
2243 struct i40e_hw *hw = &vsi->back->hw;
2246 if (f->vlan == I40E_VLAN_ANY) {
2247 aq_ret = i40e_aq_set_vsi_broadcast(hw,
2252 aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
2260 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2261 dev_warn(&vsi->back->pdev->dev,
2262 "Error %s, forcing overflow promiscuous on %s\n",
2263 i40e_aq_str(hw, hw->aq.asq_last_status),
2271 * i40e_set_promiscuous - set promiscuous mode
2272 * @pf: board private structure
2273 * @promisc: promisc on or off
2275 * There are different ways of setting promiscuous mode on a PF depending on
2276 * what state/environment we're in. This identifies and sets it appropriately.
2277 * Returns 0 on success.
2279 static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
2281 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
2282 struct i40e_hw *hw = &pf->hw;
2285 if (vsi->type == I40E_VSI_MAIN &&
2286 pf->lan_veb != I40E_NO_VEB &&
2287 !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2288 /* set defport ON for Main VSI instead of true promisc
2289 * this way we will get all unicast/multicast and VLAN
2290 * promisc behavior but will not get VF or VMDq traffic
2291 * replicated on the Main VSI.
2294 aq_ret = i40e_aq_set_default_vsi(hw,
2298 aq_ret = i40e_aq_clear_default_vsi(hw,
2302 dev_info(&pf->pdev->dev,
2303 "Set default VSI failed, err %s, aq_err %s\n",
2304 i40e_stat_str(hw, aq_ret),
2305 i40e_aq_str(hw, hw->aq.asq_last_status));
2308 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2314 dev_info(&pf->pdev->dev,
2315 "set unicast promisc failed, err %s, aq_err %s\n",
2316 i40e_stat_str(hw, aq_ret),
2317 i40e_aq_str(hw, hw->aq.asq_last_status));
2319 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2324 dev_info(&pf->pdev->dev,
2325 "set multicast promisc failed, err %s, aq_err %s\n",
2326 i40e_stat_str(hw, aq_ret),
2327 i40e_aq_str(hw, hw->aq.asq_last_status));
2332 pf->cur_promisc = promisc;
2338 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
2339 * @vsi: ptr to the VSI
2341 * Push any outstanding VSI filter changes through the AdminQ.
2343 * Returns 0 or error value
2345 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2347 struct hlist_head tmp_add_list, tmp_del_list;
2348 struct i40e_mac_filter *f;
2349 struct i40e_new_mac_filter *new, *add_head = NULL;
2350 struct i40e_hw *hw = &vsi->back->hw;
2351 bool old_overflow, new_overflow;
2352 unsigned int failed_filters = 0;
2353 unsigned int vlan_filters = 0;
2354 char vsi_name[16] = "PF";
2355 int filter_list_len = 0;
2356 i40e_status aq_ret = 0;
2357 u32 changed_flags = 0;
2358 struct hlist_node *h;
2367 /* empty array typed pointers, kcalloc later */
2368 struct i40e_aqc_add_macvlan_element_data *add_list;
2369 struct i40e_aqc_remove_macvlan_element_data *del_list;
2371 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
2372 usleep_range(1000, 2000);
2375 old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2378 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
2379 vsi->current_netdev_flags = vsi->netdev->flags;
2382 INIT_HLIST_HEAD(&tmp_add_list);
2383 INIT_HLIST_HEAD(&tmp_del_list);
2385 if (vsi->type == I40E_VSI_SRIOV)
2386 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
2387 else if (vsi->type != I40E_VSI_MAIN)
2388 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
2390 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
2391 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
2393 spin_lock_bh(&vsi->mac_filter_hash_lock);
2394 /* Create a list of filters to delete. */
2395 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2396 if (f->state == I40E_FILTER_REMOVE) {
2397 /* Move the element into temporary del_list */
2398 hash_del(&f->hlist);
2399 hlist_add_head(&f->hlist, &tmp_del_list);
2401 /* Avoid counting removed filters */
2404 if (f->state == I40E_FILTER_NEW) {
2405 /* Create a temporary i40e_new_mac_filter */
2406 new = kzalloc(sizeof(*new), GFP_ATOMIC);
2408 goto err_no_memory_locked;
2410 /* Store pointer to the real filter */
2412 new->state = f->state;
2414 /* Add it to the hash list */
2415 hlist_add_head(&new->hlist, &tmp_add_list);
2418 /* Count the number of active (current and new) VLAN
2419 * filters we have now. Does not count filters which
2420 * are marked for deletion.
2426 retval = i40e_correct_mac_vlan_filters(vsi,
2431 hlist_for_each_entry(new, &tmp_add_list, hlist)
2432 netdev_hw_addr_refcnt(new->f, vsi->netdev, 1);
2435 goto err_no_memory_locked;
2437 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2440 /* Now process 'del_list' outside the lock */
2441 if (!hlist_empty(&tmp_del_list)) {
2442 filter_list_len = hw->aq.asq_buf_size /
2443 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2444 list_size = filter_list_len *
2445 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2446 del_list = kzalloc(list_size, GFP_ATOMIC);
2450 hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
2453 /* handle broadcast filters by updating the broadcast
2454 * promiscuous flag and release filter list.
2456 if (is_broadcast_ether_addr(f->macaddr)) {
2457 i40e_aqc_broadcast_filter(vsi, vsi_name, f);
2459 hlist_del(&f->hlist);
2464 /* add to delete list */
2465 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
2466 if (f->vlan == I40E_VLAN_ANY) {
2467 del_list[num_del].vlan_tag = 0;
2468 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2470 del_list[num_del].vlan_tag =
2471 cpu_to_le16((u16)(f->vlan));
2474 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2475 del_list[num_del].flags = cmd_flags;
2478 /* flush a full buffer */
2479 if (num_del == filter_list_len) {
2480 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2482 memset(del_list, 0, list_size);
2485 /* Release memory for MAC filter entries which were
2486 * synced up with HW.
2488 hlist_del(&f->hlist);
2493 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2501 if (!hlist_empty(&tmp_add_list)) {
2502 /* Do all the adds now. */
2503 filter_list_len = hw->aq.asq_buf_size /
2504 sizeof(struct i40e_aqc_add_macvlan_element_data);
2505 list_size = filter_list_len *
2506 sizeof(struct i40e_aqc_add_macvlan_element_data);
2507 add_list = kzalloc(list_size, GFP_ATOMIC);
2512 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2513 /* handle broadcast filters by updating the broadcast
2514 * promiscuous flag instead of adding a MAC filter.
2516 if (is_broadcast_ether_addr(new->f->macaddr)) {
2517 if (i40e_aqc_broadcast_filter(vsi, vsi_name,
2519 new->state = I40E_FILTER_FAILED;
2521 new->state = I40E_FILTER_ACTIVE;
2525 /* add to add array */
2529 ether_addr_copy(add_list[num_add].mac_addr,
2531 if (new->f->vlan == I40E_VLAN_ANY) {
2532 add_list[num_add].vlan_tag = 0;
2533 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2535 add_list[num_add].vlan_tag =
2536 cpu_to_le16((u16)(new->f->vlan));
2538 add_list[num_add].queue_number = 0;
2539 /* set invalid match method for later detection */
2540 add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
2541 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2542 add_list[num_add].flags = cpu_to_le16(cmd_flags);
2545 /* flush a full buffer */
2546 if (num_add == filter_list_len) {
2547 i40e_aqc_add_filters(vsi, vsi_name, add_list,
2549 memset(add_list, 0, list_size);
2554 i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
2557 /* Now move all of the filters from the temp add list back to
2560 spin_lock_bh(&vsi->mac_filter_hash_lock);
2561 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2562 /* Only update the state if we're still NEW */
2563 if (new->f->state == I40E_FILTER_NEW)
2564 new->f->state = new->state;
2565 hlist_del(&new->hlist);
2566 netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
2569 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2574 /* Determine the number of active and failed filters. */
2575 spin_lock_bh(&vsi->mac_filter_hash_lock);
2576 vsi->active_filters = 0;
2577 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
2578 if (f->state == I40E_FILTER_ACTIVE)
2579 vsi->active_filters++;
2580 else if (f->state == I40E_FILTER_FAILED)
2583 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2585 /* Check if we are able to exit overflow promiscuous mode. We can
2586 * safely exit if we didn't just enter, we no longer have any failed
2587 * filters, and we have reduced filters below the threshold value.
2589 if (old_overflow && !failed_filters &&
2590 vsi->active_filters < vsi->promisc_threshold) {
2591 dev_info(&pf->pdev->dev,
2592 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2594 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2595 vsi->promisc_threshold = 0;
2598 /* if the VF is not trusted do not do promisc */
2599 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
2600 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2604 new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2606 /* If we are entering overflow promiscuous, we need to calculate a new
2607 * threshold for when we are safe to exit
2609 if (!old_overflow && new_overflow)
2610 vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
2612 /* check for changes in promiscuous modes */
2613 if (changed_flags & IFF_ALLMULTI) {
2614 bool cur_multipromisc;
2616 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2617 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2622 retval = i40e_aq_rc_to_posix(aq_ret,
2623 hw->aq.asq_last_status);
2624 dev_info(&pf->pdev->dev,
2625 "set multi promisc failed on %s, err %s aq_err %s\n",
2627 i40e_stat_str(hw, aq_ret),
2628 i40e_aq_str(hw, hw->aq.asq_last_status));
2630 dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
2631 cur_multipromisc ? "entering" : "leaving");
2635 if ((changed_flags & IFF_PROMISC) || old_overflow != new_overflow) {
2638 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2640 aq_ret = i40e_set_promiscuous(pf, cur_promisc);
2642 retval = i40e_aq_rc_to_posix(aq_ret,
2643 hw->aq.asq_last_status);
2644 dev_info(&pf->pdev->dev,
2645 "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
2646 cur_promisc ? "on" : "off",
2648 i40e_stat_str(hw, aq_ret),
2649 i40e_aq_str(hw, hw->aq.asq_last_status));
2653 /* if something went wrong then set the changed flag so we try again */
2655 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2657 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2661 /* Restore elements on the temporary add and delete lists */
2662 spin_lock_bh(&vsi->mac_filter_hash_lock);
2663 err_no_memory_locked:
2664 i40e_undo_del_filter_entries(vsi, &tmp_del_list);
2665 i40e_undo_add_filter_entries(vsi, &tmp_add_list);
2666 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2668 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2669 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2674 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2675 * @pf: board private structure
2677 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2683 if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
2685 if (test_bit(__I40E_VF_DISABLE, pf->state)) {
2686 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
2690 for (v = 0; v < pf->num_alloc_vsi; v++) {
2692 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED) &&
2693 !test_bit(__I40E_VSI_RELEASING, pf->vsi[v]->state)) {
2694 int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2697 /* come back and try again later */
2698 set_bit(__I40E_MACVLAN_SYNC_PENDING,
2707 * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2710 static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
2712 if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
2713 return I40E_RXBUFFER_2048;
2715 return I40E_RXBUFFER_3072;
2719 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2720 * @netdev: network interface device structure
2721 * @new_mtu: new value for maximum frame size
2723 * Returns 0 on success, negative on failure
2725 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2727 struct i40e_netdev_priv *np = netdev_priv(netdev);
2728 struct i40e_vsi *vsi = np->vsi;
2729 struct i40e_pf *pf = vsi->back;
2731 if (i40e_enabled_xdp_vsi(vsi)) {
2732 int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2734 if (frame_size > i40e_max_xdp_frame_size(vsi))
2738 netdev_dbg(netdev, "changing MTU from %d to %d\n",
2739 netdev->mtu, new_mtu);
2740 netdev->mtu = new_mtu;
2741 if (netif_running(netdev))
2742 i40e_vsi_reinit_locked(vsi);
2743 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
2744 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
2749 * i40e_ioctl - Access the hwtstamp interface
2750 * @netdev: network interface device structure
2751 * @ifr: interface request data
2752 * @cmd: ioctl command
2754 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2756 struct i40e_netdev_priv *np = netdev_priv(netdev);
2757 struct i40e_pf *pf = np->vsi->back;
2761 return i40e_ptp_get_ts_config(pf, ifr);
2763 return i40e_ptp_set_ts_config(pf, ifr);
2770 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2771 * @vsi: the vsi being adjusted
2773 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2775 struct i40e_vsi_context ctxt;
2778 /* Don't modify stripping options if a port VLAN is active */
2782 if ((vsi->info.valid_sections &
2783 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2784 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2785 return; /* already enabled */
2787 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2788 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2789 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2791 ctxt.seid = vsi->seid;
2792 ctxt.info = vsi->info;
2793 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2795 dev_info(&vsi->back->pdev->dev,
2796 "update vlan stripping failed, err %s aq_err %s\n",
2797 i40e_stat_str(&vsi->back->hw, ret),
2798 i40e_aq_str(&vsi->back->hw,
2799 vsi->back->hw.aq.asq_last_status));
2804 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2805 * @vsi: the vsi being adjusted
2807 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2809 struct i40e_vsi_context ctxt;
2812 /* Don't modify stripping options if a port VLAN is active */
2816 if ((vsi->info.valid_sections &
2817 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2818 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2819 I40E_AQ_VSI_PVLAN_EMOD_MASK))
2820 return; /* already disabled */
2822 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2823 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2824 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2826 ctxt.seid = vsi->seid;
2827 ctxt.info = vsi->info;
2828 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2830 dev_info(&vsi->back->pdev->dev,
2831 "update vlan stripping failed, err %s aq_err %s\n",
2832 i40e_stat_str(&vsi->back->hw, ret),
2833 i40e_aq_str(&vsi->back->hw,
2834 vsi->back->hw.aq.asq_last_status));
2839 * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
2840 * @vsi: the vsi being configured
2841 * @vid: vlan id to be added (0 = untagged only , -1 = any)
2843 * This is a helper function for adding a new MAC/VLAN filter with the
2844 * specified VLAN for each existing MAC address already in the hash table.
2845 * This function does *not* perform any accounting to update filters based on
2848 * NOTE: this function expects to be called while under the
2849 * mac_filter_hash_lock
2851 int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2853 struct i40e_mac_filter *f, *add_f;
2854 struct hlist_node *h;
2857 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2858 if (f->state == I40E_FILTER_REMOVE)
2860 add_f = i40e_add_filter(vsi, f->macaddr, vid);
2862 dev_info(&vsi->back->pdev->dev,
2863 "Could not add vlan filter %d for %pM\n",
2873 * i40e_vsi_add_vlan - Add VSI membership for given VLAN
2874 * @vsi: the VSI being configured
2875 * @vid: VLAN id to be added
2877 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
2884 /* The network stack will attempt to add VID=0, with the intention to
2885 * receive priority tagged packets with a VLAN of 0. Our HW receives
2886 * these packets by default when configured to receive untagged
2887 * packets, so we don't need to add a filter for this case.
2888 * Additionally, HW interprets adding a VID=0 filter as meaning to
2889 * receive *only* tagged traffic and stops receiving untagged traffic.
2890 * Thus, we do not want to actually add a filter for VID=0
2895 /* Locked once because all functions invoked below iterates list*/
2896 spin_lock_bh(&vsi->mac_filter_hash_lock);
2897 err = i40e_add_vlan_all_mac(vsi, vid);
2898 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2902 /* schedule our worker thread which will take care of
2903 * applying the new filter changes
2905 i40e_service_event_schedule(vsi->back);
2910 * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
2911 * @vsi: the vsi being configured
2912 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2914 * This function should be used to remove all VLAN filters which match the
2915 * given VID. It does not schedule the service event and does not take the
2916 * mac_filter_hash_lock so it may be combined with other operations under
2917 * a single invocation of the mac_filter_hash_lock.
2919 * NOTE: this function expects to be called while under the
2920 * mac_filter_hash_lock
2922 void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2924 struct i40e_mac_filter *f;
2925 struct hlist_node *h;
2928 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2930 __i40e_del_filter(vsi, f);
2935 * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
2936 * @vsi: the VSI being configured
2937 * @vid: VLAN id to be removed
2939 void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
2941 if (!vid || vsi->info.pvid)
2944 spin_lock_bh(&vsi->mac_filter_hash_lock);
2945 i40e_rm_vlan_all_mac(vsi, vid);
2946 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2948 /* schedule our worker thread which will take care of
2949 * applying the new filter changes
2951 i40e_service_event_schedule(vsi->back);
2955 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2956 * @netdev: network interface to be adjusted
2957 * @proto: unused protocol value
2958 * @vid: vlan id to be added
2960 * net_device_ops implementation for adding vlan ids
2962 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2963 __always_unused __be16 proto, u16 vid)
2965 struct i40e_netdev_priv *np = netdev_priv(netdev);
2966 struct i40e_vsi *vsi = np->vsi;
2969 if (vid >= VLAN_N_VID)
2972 ret = i40e_vsi_add_vlan(vsi, vid);
2974 set_bit(vid, vsi->active_vlans);
2980 * i40e_vlan_rx_add_vid_up - Add a vlan id filter to HW offload in UP path
2981 * @netdev: network interface to be adjusted
2982 * @proto: unused protocol value
2983 * @vid: vlan id to be added
2985 static void i40e_vlan_rx_add_vid_up(struct net_device *netdev,
2986 __always_unused __be16 proto, u16 vid)
2988 struct i40e_netdev_priv *np = netdev_priv(netdev);
2989 struct i40e_vsi *vsi = np->vsi;
2991 if (vid >= VLAN_N_VID)
2993 set_bit(vid, vsi->active_vlans);
2997 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2998 * @netdev: network interface to be adjusted
2999 * @proto: unused protocol value
3000 * @vid: vlan id to be removed
3002 * net_device_ops implementation for removing vlan ids
3004 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
3005 __always_unused __be16 proto, u16 vid)
3007 struct i40e_netdev_priv *np = netdev_priv(netdev);
3008 struct i40e_vsi *vsi = np->vsi;
3010 /* return code is ignored as there is nothing a user
3011 * can do about failure to remove and a log message was
3012 * already printed from the other function
3014 i40e_vsi_kill_vlan(vsi, vid);
3016 clear_bit(vid, vsi->active_vlans);
3022 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
3023 * @vsi: the vsi being brought back up
3025 static void i40e_restore_vlan(struct i40e_vsi *vsi)
3032 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
3033 i40e_vlan_stripping_enable(vsi);
3035 i40e_vlan_stripping_disable(vsi);
3037 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
3038 i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q),
3043 * i40e_vsi_add_pvid - Add pvid for the VSI
3044 * @vsi: the vsi being adjusted
3045 * @vid: the vlan id to set as a PVID
3047 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
3049 struct i40e_vsi_context ctxt;
3052 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
3053 vsi->info.pvid = cpu_to_le16(vid);
3054 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
3055 I40E_AQ_VSI_PVLAN_INSERT_PVID |
3056 I40E_AQ_VSI_PVLAN_EMOD_STR;
3058 ctxt.seid = vsi->seid;
3059 ctxt.info = vsi->info;
3060 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
3062 dev_info(&vsi->back->pdev->dev,
3063 "add pvid failed, err %s aq_err %s\n",
3064 i40e_stat_str(&vsi->back->hw, ret),
3065 i40e_aq_str(&vsi->back->hw,
3066 vsi->back->hw.aq.asq_last_status));
3074 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
3075 * @vsi: the vsi being adjusted
3077 * Just use the vlan_rx_register() service to put it back to normal
3079 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
3083 i40e_vlan_stripping_disable(vsi);
3087 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
3088 * @vsi: ptr to the VSI
3090 * If this function returns with an error, then it's possible one or
3091 * more of the rings is populated (while the rest are not). It is the
3092 * callers duty to clean those orphaned rings.
3094 * Return 0 on success, negative on failure
3096 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
3100 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3101 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
3103 if (!i40e_enabled_xdp_vsi(vsi))
3106 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3107 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
3113 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
3114 * @vsi: ptr to the VSI
3116 * Free VSI's transmit software resources
3118 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
3122 if (vsi->tx_rings) {
3123 for (i = 0; i < vsi->num_queue_pairs; i++)
3124 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
3125 i40e_free_tx_resources(vsi->tx_rings[i]);
3128 if (vsi->xdp_rings) {
3129 for (i = 0; i < vsi->num_queue_pairs; i++)
3130 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
3131 i40e_free_tx_resources(vsi->xdp_rings[i]);
3136 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
3137 * @vsi: ptr to the VSI
3139 * If this function returns with an error, then it's possible one or
3140 * more of the rings is populated (while the rest are not). It is the
3141 * callers duty to clean those orphaned rings.
3143 * Return 0 on success, negative on failure
3145 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
3149 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3150 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
3155 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
3156 * @vsi: ptr to the VSI
3158 * Free all receive software resources
3160 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
3167 for (i = 0; i < vsi->num_queue_pairs; i++)
3168 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
3169 i40e_free_rx_resources(vsi->rx_rings[i]);
3173 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
3174 * @ring: The Tx ring to configure
3176 * This enables/disables XPS for a given Tx descriptor ring
3177 * based on the TCs enabled for the VSI that ring belongs to.
3179 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
3183 if (!ring->q_vector || !ring->netdev || ring->ch)
3186 /* We only initialize XPS once, so as not to overwrite user settings */
3187 if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
3190 cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
3191 netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
3196 * i40e_xsk_pool - Retrieve the AF_XDP buffer pool if XDP and ZC is enabled
3197 * @ring: The Tx or Rx ring
3199 * Returns the AF_XDP buffer pool or NULL.
3201 static struct xsk_buff_pool *i40e_xsk_pool(struct i40e_ring *ring)
3203 bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
3204 int qid = ring->queue_index;
3206 if (ring_is_xdp(ring))
3207 qid -= ring->vsi->alloc_queue_pairs;
3209 if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
3212 return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
3216 * i40e_configure_tx_ring - Configure a transmit ring context and rest
3217 * @ring: The Tx ring to configure
3219 * Configure the Tx descriptor ring in the HMC context.
3221 static int i40e_configure_tx_ring(struct i40e_ring *ring)
3223 struct i40e_vsi *vsi = ring->vsi;
3224 u16 pf_q = vsi->base_queue + ring->queue_index;
3225 struct i40e_hw *hw = &vsi->back->hw;
3226 struct i40e_hmc_obj_txq tx_ctx;
3227 i40e_status err = 0;
3230 if (ring_is_xdp(ring))
3231 ring->xsk_pool = i40e_xsk_pool(ring);
3233 /* some ATR related tx ring init */
3234 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
3235 ring->atr_sample_rate = vsi->back->atr_sample_rate;
3236 ring->atr_count = 0;
3238 ring->atr_sample_rate = 0;
3242 i40e_config_xps_tx_ring(ring);
3244 /* clear the context structure first */
3245 memset(&tx_ctx, 0, sizeof(tx_ctx));
3247 tx_ctx.new_context = 1;
3248 tx_ctx.base = (ring->dma / 128);
3249 tx_ctx.qlen = ring->count;
3250 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
3251 I40E_FLAG_FD_ATR_ENABLED));
3252 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
3253 /* FDIR VSI tx ring can still use RS bit and writebacks */
3254 if (vsi->type != I40E_VSI_FDIR)
3255 tx_ctx.head_wb_ena = 1;
3256 tx_ctx.head_wb_addr = ring->dma +
3257 (ring->count * sizeof(struct i40e_tx_desc));
3259 /* As part of VSI creation/update, FW allocates certain
3260 * Tx arbitration queue sets for each TC enabled for
3261 * the VSI. The FW returns the handles to these queue
3262 * sets as part of the response buffer to Add VSI,
3263 * Update VSI, etc. AQ commands. It is expected that
3264 * these queue set handles be associated with the Tx
3265 * queues by the driver as part of the TX queue context
3266 * initialization. This has to be done regardless of
3267 * DCB as by default everything is mapped to TC0.
3272 le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]);
3275 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
3277 tx_ctx.rdylist_act = 0;
3279 /* clear the context in the HMC */
3280 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
3282 dev_info(&vsi->back->pdev->dev,
3283 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
3284 ring->queue_index, pf_q, err);
3288 /* set the context in the HMC */
3289 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
3291 dev_info(&vsi->back->pdev->dev,
3292 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
3293 ring->queue_index, pf_q, err);
3297 /* Now associate this queue with this PCI function */
3299 if (ring->ch->type == I40E_VSI_VMDQ2)
3300 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3304 qtx_ctl |= (ring->ch->vsi_number <<
3305 I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3306 I40E_QTX_CTL_VFVM_INDX_MASK;
3308 if (vsi->type == I40E_VSI_VMDQ2) {
3309 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3310 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3311 I40E_QTX_CTL_VFVM_INDX_MASK;
3313 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
3317 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
3318 I40E_QTX_CTL_PF_INDX_MASK);
3319 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
3322 /* cache tail off for easier writes later */
3323 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
3329 * i40e_rx_offset - Return expected offset into page to access data
3330 * @rx_ring: Ring we are requesting offset of
3332 * Returns the offset value for ring into the data buffer.
3334 static unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
3336 return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
3340 * i40e_configure_rx_ring - Configure a receive ring context
3341 * @ring: The Rx ring to configure
3343 * Configure the Rx descriptor ring in the HMC context.
3345 static int i40e_configure_rx_ring(struct i40e_ring *ring)
3347 struct i40e_vsi *vsi = ring->vsi;
3348 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
3349 u16 pf_q = vsi->base_queue + ring->queue_index;
3350 struct i40e_hw *hw = &vsi->back->hw;
3351 struct i40e_hmc_obj_rxq rx_ctx;
3352 i40e_status err = 0;
3356 bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
3358 /* clear the context structure first */
3359 memset(&rx_ctx, 0, sizeof(rx_ctx));
3361 if (ring->vsi->type == I40E_VSI_MAIN)
3362 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
3365 ring->xsk_pool = i40e_xsk_pool(ring);
3366 if (ring->xsk_pool) {
3367 ret = i40e_alloc_rx_bi_zc(ring);
3371 xsk_pool_get_rx_frame_size(ring->xsk_pool);
3372 /* For AF_XDP ZC, we disallow packets to span on
3373 * multiple buffers, thus letting us skip that
3374 * handling in the fast-path.
3377 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3378 MEM_TYPE_XSK_BUFF_POOL,
3382 dev_info(&vsi->back->pdev->dev,
3383 "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
3387 ret = i40e_alloc_rx_bi(ring);
3390 ring->rx_buf_len = vsi->rx_buf_len;
3391 if (ring->vsi->type == I40E_VSI_MAIN) {
3392 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3393 MEM_TYPE_PAGE_SHARED,
3400 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
3401 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3403 rx_ctx.base = (ring->dma / 128);
3404 rx_ctx.qlen = ring->count;
3406 /* use 16 byte descriptors */
3409 /* descriptor type is always zero
3412 rx_ctx.hsplit_0 = 0;
3414 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
3415 if (hw->revision_id == 0)
3416 rx_ctx.lrxqthresh = 0;
3418 rx_ctx.lrxqthresh = 1;
3419 rx_ctx.crcstrip = 1;
3421 /* this controls whether VLAN is stripped from inner headers */
3423 /* set the prefena field to 1 because the manual says to */
3426 /* clear the context in the HMC */
3427 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
3429 dev_info(&vsi->back->pdev->dev,
3430 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3431 ring->queue_index, pf_q, err);
3435 /* set the context in the HMC */
3436 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
3438 dev_info(&vsi->back->pdev->dev,
3439 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3440 ring->queue_index, pf_q, err);
3444 /* configure Rx buffer alignment */
3445 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
3446 clear_ring_build_skb_enabled(ring);
3448 set_ring_build_skb_enabled(ring);
3450 ring->rx_offset = i40e_rx_offset(ring);
3452 /* cache tail for quicker writes, and clear the reg before use */
3453 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3454 writel(0, ring->tail);
3456 if (ring->xsk_pool) {
3457 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
3458 ok = i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring));
3460 ok = !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3463 /* Log this in case the user has forgotten to give the kernel
3464 * any buffers, even later in the application.
3466 dev_info(&vsi->back->pdev->dev,
3467 "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
3468 ring->xsk_pool ? "AF_XDP ZC enabled " : "",
3469 ring->queue_index, pf_q);
3476 * i40e_vsi_configure_tx - Configure the VSI for Tx
3477 * @vsi: VSI structure describing this set of rings and resources
3479 * Configure the Tx VSI for operation.
3481 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
3486 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3487 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
3489 if (err || !i40e_enabled_xdp_vsi(vsi))
3492 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3493 err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
3499 * i40e_vsi_configure_rx - Configure the VSI for Rx
3500 * @vsi: the VSI being configured
3502 * Configure the Rx VSI for operation.
3504 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3509 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
3510 vsi->max_frame = I40E_MAX_RXBUFFER;
3511 vsi->rx_buf_len = I40E_RXBUFFER_2048;
3512 #if (PAGE_SIZE < 8192)
3513 } else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
3514 (vsi->netdev->mtu <= ETH_DATA_LEN)) {
3515 vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3516 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3519 vsi->max_frame = I40E_MAX_RXBUFFER;
3520 vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
3524 /* set up individual rings */
3525 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3526 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3532 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3533 * @vsi: ptr to the VSI
3535 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3537 struct i40e_ring *tx_ring, *rx_ring;
3538 u16 qoffset, qcount;
3541 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3542 /* Reset the TC information */
3543 for (i = 0; i < vsi->num_queue_pairs; i++) {
3544 rx_ring = vsi->rx_rings[i];
3545 tx_ring = vsi->tx_rings[i];
3546 rx_ring->dcb_tc = 0;
3547 tx_ring->dcb_tc = 0;
3552 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3553 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3556 qoffset = vsi->tc_config.tc_info[n].qoffset;
3557 qcount = vsi->tc_config.tc_info[n].qcount;
3558 for (i = qoffset; i < (qoffset + qcount); i++) {
3559 rx_ring = vsi->rx_rings[i];
3560 tx_ring = vsi->tx_rings[i];
3561 rx_ring->dcb_tc = n;
3562 tx_ring->dcb_tc = n;
3568 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3569 * @vsi: ptr to the VSI
3571 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3574 i40e_set_rx_mode(vsi->netdev);
3578 * i40e_reset_fdir_filter_cnt - Reset flow director filter counters
3579 * @pf: Pointer to the targeted PF
3581 * Set all flow director counters to 0.
3583 static void i40e_reset_fdir_filter_cnt(struct i40e_pf *pf)
3585 pf->fd_tcp4_filter_cnt = 0;
3586 pf->fd_udp4_filter_cnt = 0;
3587 pf->fd_sctp4_filter_cnt = 0;
3588 pf->fd_ip4_filter_cnt = 0;
3589 pf->fd_tcp6_filter_cnt = 0;
3590 pf->fd_udp6_filter_cnt = 0;
3591 pf->fd_sctp6_filter_cnt = 0;
3592 pf->fd_ip6_filter_cnt = 0;
3596 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3597 * @vsi: Pointer to the targeted VSI
3599 * This function replays the hlist on the hw where all the SB Flow Director
3600 * filters were saved.
3602 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3604 struct i40e_fdir_filter *filter;
3605 struct i40e_pf *pf = vsi->back;
3606 struct hlist_node *node;
3608 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3611 /* Reset FDir counters as we're replaying all existing filters */
3612 i40e_reset_fdir_filter_cnt(pf);
3614 hlist_for_each_entry_safe(filter, node,
3615 &pf->fdir_filter_list, fdir_node) {
3616 i40e_add_del_fdir(vsi, filter, true);
3621 * i40e_vsi_configure - Set up the VSI for action
3622 * @vsi: the VSI being configured
3624 static int i40e_vsi_configure(struct i40e_vsi *vsi)
3628 i40e_set_vsi_rx_mode(vsi);
3629 i40e_restore_vlan(vsi);
3630 i40e_vsi_config_dcb_rings(vsi);
3631 err = i40e_vsi_configure_tx(vsi);
3633 err = i40e_vsi_configure_rx(vsi);
3639 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3640 * @vsi: the VSI being configured
3642 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3644 bool has_xdp = i40e_enabled_xdp_vsi(vsi);
3645 struct i40e_pf *pf = vsi->back;
3646 struct i40e_hw *hw = &pf->hw;
3651 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
3652 * and PFINT_LNKLSTn registers, e.g.:
3653 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
3655 qp = vsi->base_queue;
3656 vector = vsi->base_vector;
3657 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3658 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3660 q_vector->rx.next_update = jiffies + 1;
3661 q_vector->rx.target_itr =
3662 ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
3663 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3664 q_vector->rx.target_itr >> 1);
3665 q_vector->rx.current_itr = q_vector->rx.target_itr;
3667 q_vector->tx.next_update = jiffies + 1;
3668 q_vector->tx.target_itr =
3669 ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
3670 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3671 q_vector->tx.target_itr >> 1);
3672 q_vector->tx.current_itr = q_vector->tx.target_itr;
3674 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3675 i40e_intrl_usec_to_reg(vsi->int_rate_limit));
3677 /* Linked list for the queuepairs assigned to this vector */
3678 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3679 for (q = 0; q < q_vector->num_ringpairs; q++) {
3680 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
3683 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3684 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3685 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3686 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
3687 (I40E_QUEUE_TYPE_TX <<
3688 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3690 wr32(hw, I40E_QINT_RQCTL(qp), val);
3693 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3694 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3695 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3696 (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3697 (I40E_QUEUE_TYPE_TX <<
3698 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3700 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3703 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3704 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3705 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3706 ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3707 (I40E_QUEUE_TYPE_RX <<
3708 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3710 /* Terminate the linked list */
3711 if (q == (q_vector->num_ringpairs - 1))
3712 val |= (I40E_QUEUE_END_OF_LIST <<
3713 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3715 wr32(hw, I40E_QINT_TQCTL(qp), val);
3724 * i40e_enable_misc_int_causes - enable the non-queue interrupts
3725 * @pf: pointer to private device data structure
3727 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3729 struct i40e_hw *hw = &pf->hw;
3732 /* clear things first */
3733 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
3734 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
3736 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3737 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3738 I40E_PFINT_ICR0_ENA_GRST_MASK |
3739 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3740 I40E_PFINT_ICR0_ENA_GPIO_MASK |
3741 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3742 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3743 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3745 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3746 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3748 if (pf->flags & I40E_FLAG_PTP)
3749 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3751 wr32(hw, I40E_PFINT_ICR0_ENA, val);
3753 /* SW_ITR_IDX = 0, but don't change INTENA */
3754 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3755 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3757 /* OTHER_ITR_IDX = 0 */
3758 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3762 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3763 * @vsi: the VSI being configured
3765 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3767 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
3768 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3769 struct i40e_pf *pf = vsi->back;
3770 struct i40e_hw *hw = &pf->hw;
3773 /* set the ITR configuration */
3774 q_vector->rx.next_update = jiffies + 1;
3775 q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
3776 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1);
3777 q_vector->rx.current_itr = q_vector->rx.target_itr;
3778 q_vector->tx.next_update = jiffies + 1;
3779 q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
3780 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1);
3781 q_vector->tx.current_itr = q_vector->tx.target_itr;
3783 i40e_enable_misc_int_causes(pf);
3785 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3786 wr32(hw, I40E_PFINT_LNKLST0, 0);
3788 /* Associate the queue pair to the vector and enable the queue int */
3789 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3790 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3791 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3792 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3794 wr32(hw, I40E_QINT_RQCTL(0), val);
3796 if (i40e_enabled_xdp_vsi(vsi)) {
3797 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3798 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
3800 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3802 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3805 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3806 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3807 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3809 wr32(hw, I40E_QINT_TQCTL(0), val);
3814 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3815 * @pf: board private structure
3817 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3819 struct i40e_hw *hw = &pf->hw;
3821 wr32(hw, I40E_PFINT_DYN_CTL0,
3822 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3827 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3828 * @pf: board private structure
3830 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
3832 struct i40e_hw *hw = &pf->hw;
3835 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3836 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3837 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3839 wr32(hw, I40E_PFINT_DYN_CTL0, val);
3844 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3845 * @irq: interrupt number
3846 * @data: pointer to a q_vector
3848 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3850 struct i40e_q_vector *q_vector = data;
3852 if (!q_vector->tx.ring && !q_vector->rx.ring)
3855 napi_schedule_irqoff(&q_vector->napi);
3861 * i40e_irq_affinity_notify - Callback for affinity changes
3862 * @notify: context as to what irq was changed
3863 * @mask: the new affinity mask
3865 * This is a callback function used by the irq_set_affinity_notifier function
3866 * so that we may register to receive changes to the irq affinity masks.
3868 static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
3869 const cpumask_t *mask)
3871 struct i40e_q_vector *q_vector =
3872 container_of(notify, struct i40e_q_vector, affinity_notify);
3874 cpumask_copy(&q_vector->affinity_mask, mask);
3878 * i40e_irq_affinity_release - Callback for affinity notifier release
3879 * @ref: internal core kernel usage
3881 * This is a callback function used by the irq_set_affinity_notifier function
3882 * to inform the current notification subscriber that they will no longer
3883 * receive notifications.
3885 static void i40e_irq_affinity_release(struct kref *ref) {}
3888 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3889 * @vsi: the VSI being configured
3890 * @basename: name for the vector
3892 * Allocates MSI-X vectors and requests interrupts from the kernel.
3894 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3896 int q_vectors = vsi->num_q_vectors;
3897 struct i40e_pf *pf = vsi->back;
3898 int base = vsi->base_vector;
3905 for (vector = 0; vector < q_vectors; vector++) {
3906 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3908 irq_num = pf->msix_entries[base + vector].vector;
3910 if (q_vector->tx.ring && q_vector->rx.ring) {
3911 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3912 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3914 } else if (q_vector->rx.ring) {
3915 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3916 "%s-%s-%d", basename, "rx", rx_int_idx++);
3917 } else if (q_vector->tx.ring) {
3918 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3919 "%s-%s-%d", basename, "tx", tx_int_idx++);
3921 /* skip this unused q_vector */
3924 err = request_irq(irq_num,
3930 dev_info(&pf->pdev->dev,
3931 "MSIX request_irq failed, error: %d\n", err);
3932 goto free_queue_irqs;
3935 /* register for affinity change notifications */
3936 q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
3937 q_vector->affinity_notify.release = i40e_irq_affinity_release;
3938 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
3939 /* Spread affinity hints out across online CPUs.
3941 * get_cpu_mask returns a static constant mask with
3942 * a permanent lifetime so it's ok to pass to
3943 * irq_update_affinity_hint without making a copy.
3945 cpu = cpumask_local_spread(q_vector->v_idx, -1);
3946 irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
3949 vsi->irqs_ready = true;
3955 irq_num = pf->msix_entries[base + vector].vector;
3956 irq_set_affinity_notifier(irq_num, NULL);
3957 irq_update_affinity_hint(irq_num, NULL);
3958 free_irq(irq_num, &vsi->q_vectors[vector]);
3964 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3965 * @vsi: the VSI being un-configured
3967 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3969 struct i40e_pf *pf = vsi->back;
3970 struct i40e_hw *hw = &pf->hw;
3971 int base = vsi->base_vector;
3974 /* disable interrupt causation from each queue */
3975 for (i = 0; i < vsi->num_queue_pairs; i++) {
3978 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
3979 val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3980 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
3982 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
3983 val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3984 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
3986 if (!i40e_enabled_xdp_vsi(vsi))
3988 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
3991 /* disable each interrupt */
3992 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3993 for (i = vsi->base_vector;
3994 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3995 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3998 for (i = 0; i < vsi->num_q_vectors; i++)
3999 synchronize_irq(pf->msix_entries[i + base].vector);
4001 /* Legacy and MSI mode - this stops all interrupt handling */
4002 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
4003 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
4005 synchronize_irq(pf->pdev->irq);
4010 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
4011 * @vsi: the VSI being configured
4013 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
4015 struct i40e_pf *pf = vsi->back;
4018 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4019 for (i = 0; i < vsi->num_q_vectors; i++)
4020 i40e_irq_dynamic_enable(vsi, i);
4022 i40e_irq_dynamic_enable_icr0(pf);
4025 i40e_flush(&pf->hw);
4030 * i40e_free_misc_vector - Free the vector that handles non-queue events
4031 * @pf: board private structure
4033 static void i40e_free_misc_vector(struct i40e_pf *pf)
4036 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
4037 i40e_flush(&pf->hw);
4039 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
4040 synchronize_irq(pf->msix_entries[0].vector);
4041 free_irq(pf->msix_entries[0].vector, pf);
4042 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
4047 * i40e_intr - MSI/Legacy and non-queue interrupt handler
4048 * @irq: interrupt number
4049 * @data: pointer to a q_vector
4051 * This is the handler used for all MSI/Legacy interrupts, and deals
4052 * with both queue and non-queue interrupts. This is also used in
4053 * MSIX mode to handle the non-queue interrupts.
4055 static irqreturn_t i40e_intr(int irq, void *data)
4057 struct i40e_pf *pf = (struct i40e_pf *)data;
4058 struct i40e_hw *hw = &pf->hw;
4059 irqreturn_t ret = IRQ_NONE;
4060 u32 icr0, icr0_remaining;
4063 icr0 = rd32(hw, I40E_PFINT_ICR0);
4064 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
4066 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
4067 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
4070 /* if interrupt but no bits showing, must be SWINT */
4071 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
4072 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
4075 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
4076 (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
4077 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
4078 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
4079 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
4082 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
4083 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
4084 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
4085 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
4087 /* We do not have a way to disarm Queue causes while leaving
4088 * interrupt enabled for all other causes, ideally
4089 * interrupt should be disabled while we are in NAPI but
4090 * this is not a performance path and napi_schedule()
4091 * can deal with rescheduling.
4093 if (!test_bit(__I40E_DOWN, pf->state))
4094 napi_schedule_irqoff(&q_vector->napi);
4097 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
4098 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4099 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
4100 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
4103 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
4104 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
4105 set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
4108 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
4109 /* disable any further VFLR event notifications */
4110 if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) {
4111 u32 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4113 reg &= ~I40E_PFINT_ICR0_VFLR_MASK;
4114 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4116 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
4117 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
4121 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
4122 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
4123 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
4124 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
4125 val = rd32(hw, I40E_GLGEN_RSTAT);
4126 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
4127 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
4128 if (val == I40E_RESET_CORER) {
4130 } else if (val == I40E_RESET_GLOBR) {
4132 } else if (val == I40E_RESET_EMPR) {
4134 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
4138 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
4139 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
4140 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
4141 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
4142 rd32(hw, I40E_PFHMC_ERRORINFO),
4143 rd32(hw, I40E_PFHMC_ERRORDATA));
4146 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
4147 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
4149 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_EVENT0_MASK)
4150 schedule_work(&pf->ptp_extts0_work);
4152 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK)
4153 i40e_ptp_tx_hwtstamp(pf);
4155 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
4158 /* If a critical error is pending we have no choice but to reset the
4160 * Report and mask out any remaining unexpected interrupts.
4162 icr0_remaining = icr0 & ena_mask;
4163 if (icr0_remaining) {
4164 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
4166 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
4167 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
4168 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
4169 dev_info(&pf->pdev->dev, "device will be reset\n");
4170 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
4171 i40e_service_event_schedule(pf);
4173 ena_mask &= ~icr0_remaining;
4178 /* re-enable interrupt causes */
4179 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
4180 if (!test_bit(__I40E_DOWN, pf->state) ||
4181 test_bit(__I40E_RECOVERY_MODE, pf->state)) {
4182 i40e_service_event_schedule(pf);
4183 i40e_irq_dynamic_enable_icr0(pf);
4190 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
4191 * @tx_ring: tx ring to clean
4192 * @budget: how many cleans we're allowed
4194 * Returns true if there's any budget left (e.g. the clean is finished)
4196 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
4198 struct i40e_vsi *vsi = tx_ring->vsi;
4199 u16 i = tx_ring->next_to_clean;
4200 struct i40e_tx_buffer *tx_buf;
4201 struct i40e_tx_desc *tx_desc;
4203 tx_buf = &tx_ring->tx_bi[i];
4204 tx_desc = I40E_TX_DESC(tx_ring, i);
4205 i -= tx_ring->count;
4208 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
4210 /* if next_to_watch is not set then there is no work pending */
4214 /* prevent any other reads prior to eop_desc */
4217 /* if the descriptor isn't done, no work yet to do */
4218 if (!(eop_desc->cmd_type_offset_bsz &
4219 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
4222 /* clear next_to_watch to prevent false hangs */
4223 tx_buf->next_to_watch = NULL;
4225 tx_desc->buffer_addr = 0;
4226 tx_desc->cmd_type_offset_bsz = 0;
4227 /* move past filter desc */
4232 i -= tx_ring->count;
4233 tx_buf = tx_ring->tx_bi;
4234 tx_desc = I40E_TX_DESC(tx_ring, 0);
4236 /* unmap skb header data */
4237 dma_unmap_single(tx_ring->dev,
4238 dma_unmap_addr(tx_buf, dma),
4239 dma_unmap_len(tx_buf, len),
4241 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
4242 kfree(tx_buf->raw_buf);
4244 tx_buf->raw_buf = NULL;
4245 tx_buf->tx_flags = 0;
4246 tx_buf->next_to_watch = NULL;
4247 dma_unmap_len_set(tx_buf, len, 0);
4248 tx_desc->buffer_addr = 0;
4249 tx_desc->cmd_type_offset_bsz = 0;
4251 /* move us past the eop_desc for start of next FD desc */
4256 i -= tx_ring->count;
4257 tx_buf = tx_ring->tx_bi;
4258 tx_desc = I40E_TX_DESC(tx_ring, 0);
4261 /* update budget accounting */
4263 } while (likely(budget));
4265 i += tx_ring->count;
4266 tx_ring->next_to_clean = i;
4268 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
4269 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
4275 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
4276 * @irq: interrupt number
4277 * @data: pointer to a q_vector
4279 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
4281 struct i40e_q_vector *q_vector = data;
4282 struct i40e_vsi *vsi;
4284 if (!q_vector->tx.ring)
4287 vsi = q_vector->tx.ring->vsi;
4288 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
4294 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
4295 * @vsi: the VSI being configured
4296 * @v_idx: vector index
4297 * @qp_idx: queue pair index
4299 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
4301 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4302 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
4303 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
4305 tx_ring->q_vector = q_vector;
4306 tx_ring->next = q_vector->tx.ring;
4307 q_vector->tx.ring = tx_ring;
4308 q_vector->tx.count++;
4310 /* Place XDP Tx ring in the same q_vector ring list as regular Tx */
4311 if (i40e_enabled_xdp_vsi(vsi)) {
4312 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
4314 xdp_ring->q_vector = q_vector;
4315 xdp_ring->next = q_vector->tx.ring;
4316 q_vector->tx.ring = xdp_ring;
4317 q_vector->tx.count++;
4320 rx_ring->q_vector = q_vector;
4321 rx_ring->next = q_vector->rx.ring;
4322 q_vector->rx.ring = rx_ring;
4323 q_vector->rx.count++;
4327 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
4328 * @vsi: the VSI being configured
4330 * This function maps descriptor rings to the queue-specific vectors
4331 * we were allotted through the MSI-X enabling code. Ideally, we'd have
4332 * one vector per queue pair, but on a constrained vector budget, we
4333 * group the queue pairs as "efficiently" as possible.
4335 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
4337 int qp_remaining = vsi->num_queue_pairs;
4338 int q_vectors = vsi->num_q_vectors;
4343 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
4344 * group them so there are multiple queues per vector.
4345 * It is also important to go through all the vectors available to be
4346 * sure that if we don't use all the vectors, that the remaining vectors
4347 * are cleared. This is especially important when decreasing the
4348 * number of queues in use.
4350 for (; v_start < q_vectors; v_start++) {
4351 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
4353 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
4355 q_vector->num_ringpairs = num_ringpairs;
4356 q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1;
4358 q_vector->rx.count = 0;
4359 q_vector->tx.count = 0;
4360 q_vector->rx.ring = NULL;
4361 q_vector->tx.ring = NULL;
4363 while (num_ringpairs--) {
4364 i40e_map_vector_to_qp(vsi, v_start, qp_idx);
4372 * i40e_vsi_request_irq - Request IRQ from the OS
4373 * @vsi: the VSI being configured
4374 * @basename: name for the vector
4376 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
4378 struct i40e_pf *pf = vsi->back;
4381 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4382 err = i40e_vsi_request_irq_msix(vsi, basename);
4383 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
4384 err = request_irq(pf->pdev->irq, i40e_intr, 0,
4387 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
4391 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
4396 #ifdef CONFIG_NET_POLL_CONTROLLER
4398 * i40e_netpoll - A Polling 'interrupt' handler
4399 * @netdev: network interface device structure
4401 * This is used by netconsole to send skbs without having to re-enable
4402 * interrupts. It's not called while the normal interrupt routine is executing.
4404 static void i40e_netpoll(struct net_device *netdev)
4406 struct i40e_netdev_priv *np = netdev_priv(netdev);
4407 struct i40e_vsi *vsi = np->vsi;
4408 struct i40e_pf *pf = vsi->back;
4411 /* if interface is down do nothing */
4412 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4415 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4416 for (i = 0; i < vsi->num_q_vectors; i++)
4417 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
4419 i40e_intr(pf->pdev->irq, netdev);
4424 #define I40E_QTX_ENA_WAIT_COUNT 50
4427 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
4428 * @pf: the PF being configured
4429 * @pf_q: the PF queue
4430 * @enable: enable or disable state of the queue
4432 * This routine will wait for the given Tx queue of the PF to reach the
4433 * enabled or disabled state.
4434 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4435 * multiple retries; else will return 0 in case of success.
4437 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4442 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4443 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
4444 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4447 usleep_range(10, 20);
4449 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4456 * i40e_control_tx_q - Start or stop a particular Tx queue
4457 * @pf: the PF structure
4458 * @pf_q: the PF queue to configure
4459 * @enable: start or stop the queue
4461 * This function enables or disables a single queue. Note that any delay
4462 * required after the operation is expected to be handled by the caller of
4465 static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
4467 struct i40e_hw *hw = &pf->hw;
4471 /* warn the TX unit of coming changes */
4472 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
4474 usleep_range(10, 20);
4476 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4477 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
4478 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
4479 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
4481 usleep_range(1000, 2000);
4484 /* Skip if the queue is already in the requested state */
4485 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4488 /* turn on/off the queue */
4490 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
4491 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4493 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4496 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
4500 * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
4502 * @pf: the PF structure
4503 * @pf_q: the PF queue to configure
4504 * @is_xdp: true if the queue is used for XDP
4505 * @enable: start or stop the queue
4507 int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
4508 bool is_xdp, bool enable)
4512 i40e_control_tx_q(pf, pf_q, enable);
4514 /* wait for the change to finish */
4515 ret = i40e_pf_txq_wait(pf, pf_q, enable);
4517 dev_info(&pf->pdev->dev,
4518 "VSI seid %d %sTx ring %d %sable timeout\n",
4519 seid, (is_xdp ? "XDP " : ""), pf_q,
4520 (enable ? "en" : "dis"));
4527 * i40e_vsi_enable_tx - Start a VSI's rings
4528 * @vsi: the VSI being configured
4530 static int i40e_vsi_enable_tx(struct i40e_vsi *vsi)
4532 struct i40e_pf *pf = vsi->back;
4533 int i, pf_q, ret = 0;
4535 pf_q = vsi->base_queue;
4536 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4537 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4539 false /*is xdp*/, true);
4543 if (!i40e_enabled_xdp_vsi(vsi))
4546 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4547 pf_q + vsi->alloc_queue_pairs,
4548 true /*is xdp*/, true);
4556 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4557 * @pf: the PF being configured
4558 * @pf_q: the PF queue
4559 * @enable: enable or disable state of the queue
4561 * This routine will wait for the given Rx queue of the PF to reach the
4562 * enabled or disabled state.
4563 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4564 * multiple retries; else will return 0 in case of success.
4566 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4571 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4572 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
4573 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4576 usleep_range(10, 20);
4578 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4585 * i40e_control_rx_q - Start or stop a particular Rx queue
4586 * @pf: the PF structure
4587 * @pf_q: the PF queue to configure
4588 * @enable: start or stop the queue
4590 * This function enables or disables a single queue. Note that
4591 * any delay required after the operation is expected to be
4592 * handled by the caller of this function.
4594 static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4596 struct i40e_hw *hw = &pf->hw;
4600 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4601 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
4602 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
4603 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
4605 usleep_range(1000, 2000);
4608 /* Skip if the queue is already in the requested state */
4609 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4612 /* turn on/off the queue */
4614 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4616 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4618 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
4622 * i40e_control_wait_rx_q
4623 * @pf: the PF structure
4624 * @pf_q: queue being configured
4625 * @enable: start or stop the rings
4627 * This function enables or disables a single queue along with waiting
4628 * for the change to finish. The caller of this function should handle
4629 * the delays needed in the case of disabling queues.
4631 int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4635 i40e_control_rx_q(pf, pf_q, enable);
4637 /* wait for the change to finish */
4638 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4646 * i40e_vsi_enable_rx - Start a VSI's rings
4647 * @vsi: the VSI being configured
4649 static int i40e_vsi_enable_rx(struct i40e_vsi *vsi)
4651 struct i40e_pf *pf = vsi->back;
4652 int i, pf_q, ret = 0;
4654 pf_q = vsi->base_queue;
4655 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4656 ret = i40e_control_wait_rx_q(pf, pf_q, true);
4658 dev_info(&pf->pdev->dev,
4659 "VSI seid %d Rx ring %d enable timeout\n",
4669 * i40e_vsi_start_rings - Start a VSI's rings
4670 * @vsi: the VSI being configured
4672 int i40e_vsi_start_rings(struct i40e_vsi *vsi)
4676 /* do rx first for enable and last for disable */
4677 ret = i40e_vsi_enable_rx(vsi);
4680 ret = i40e_vsi_enable_tx(vsi);
4685 #define I40E_DISABLE_TX_GAP_MSEC 50
4688 * i40e_vsi_stop_rings - Stop a VSI's rings
4689 * @vsi: the VSI being configured
4691 void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
4693 struct i40e_pf *pf = vsi->back;
4694 int pf_q, err, q_end;
4696 /* When port TX is suspended, don't wait */
4697 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
4698 return i40e_vsi_stop_rings_no_wait(vsi);
4700 q_end = vsi->base_queue + vsi->num_queue_pairs;
4701 for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
4702 i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false);
4704 for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) {
4705 err = i40e_control_wait_rx_q(pf, pf_q, false);
4707 dev_info(&pf->pdev->dev,
4708 "VSI seid %d Rx ring %d disable timeout\n",
4712 msleep(I40E_DISABLE_TX_GAP_MSEC);
4713 pf_q = vsi->base_queue;
4714 for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
4715 wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0);
4717 i40e_vsi_wait_queues_disabled(vsi);
4721 * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
4722 * @vsi: the VSI being shutdown
4724 * This function stops all the rings for a VSI but does not delay to verify
4725 * that rings have been disabled. It is expected that the caller is shutting
4726 * down multiple VSIs at once and will delay together for all the VSIs after
4727 * initiating the shutdown. This is particularly useful for shutting down lots
4728 * of VFs together. Otherwise, a large delay can be incurred while configuring
4729 * each VSI in serial.
4731 void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
4733 struct i40e_pf *pf = vsi->back;
4736 pf_q = vsi->base_queue;
4737 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4738 i40e_control_tx_q(pf, pf_q, false);
4739 i40e_control_rx_q(pf, pf_q, false);
4744 * i40e_vsi_free_irq - Free the irq association with the OS
4745 * @vsi: the VSI being configured
4747 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4749 struct i40e_pf *pf = vsi->back;
4750 struct i40e_hw *hw = &pf->hw;
4751 int base = vsi->base_vector;
4755 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4756 if (!vsi->q_vectors)
4759 if (!vsi->irqs_ready)
4762 vsi->irqs_ready = false;
4763 for (i = 0; i < vsi->num_q_vectors; i++) {
4768 irq_num = pf->msix_entries[vector].vector;
4770 /* free only the irqs that were actually requested */
4771 if (!vsi->q_vectors[i] ||
4772 !vsi->q_vectors[i]->num_ringpairs)
4775 /* clear the affinity notifier in the IRQ descriptor */
4776 irq_set_affinity_notifier(irq_num, NULL);
4777 /* remove our suggested affinity mask for this IRQ */
4778 irq_update_affinity_hint(irq_num, NULL);
4779 synchronize_irq(irq_num);
4780 free_irq(irq_num, vsi->q_vectors[i]);
4782 /* Tear down the interrupt queue link list
4784 * We know that they come in pairs and always
4785 * the Rx first, then the Tx. To clear the
4786 * link list, stick the EOL value into the
4787 * next_q field of the registers.
4789 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4790 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4791 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4792 val |= I40E_QUEUE_END_OF_LIST
4793 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4794 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4796 while (qp != I40E_QUEUE_END_OF_LIST) {
4799 val = rd32(hw, I40E_QINT_RQCTL(qp));
4801 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4802 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4803 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4804 I40E_QINT_RQCTL_INTEVENT_MASK);
4806 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4807 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4809 wr32(hw, I40E_QINT_RQCTL(qp), val);
4811 val = rd32(hw, I40E_QINT_TQCTL(qp));
4813 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
4814 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
4816 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4817 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4818 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4819 I40E_QINT_TQCTL_INTEVENT_MASK);
4821 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4822 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4824 wr32(hw, I40E_QINT_TQCTL(qp), val);
4829 free_irq(pf->pdev->irq, pf);
4831 val = rd32(hw, I40E_PFINT_LNKLST0);
4832 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4833 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4834 val |= I40E_QUEUE_END_OF_LIST
4835 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4836 wr32(hw, I40E_PFINT_LNKLST0, val);
4838 val = rd32(hw, I40E_QINT_RQCTL(qp));
4839 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4840 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4841 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4842 I40E_QINT_RQCTL_INTEVENT_MASK);
4844 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4845 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4847 wr32(hw, I40E_QINT_RQCTL(qp), val);
4849 val = rd32(hw, I40E_QINT_TQCTL(qp));
4851 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4852 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4853 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4854 I40E_QINT_TQCTL_INTEVENT_MASK);
4856 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4857 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4859 wr32(hw, I40E_QINT_TQCTL(qp), val);
4864 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
4865 * @vsi: the VSI being configured
4866 * @v_idx: Index of vector to be freed
4868 * This function frees the memory allocated to the q_vector. In addition if
4869 * NAPI is enabled it will delete any references to the NAPI struct prior
4870 * to freeing the q_vector.
4872 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4874 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4875 struct i40e_ring *ring;
4880 /* disassociate q_vector from rings */
4881 i40e_for_each_ring(ring, q_vector->tx)
4882 ring->q_vector = NULL;
4884 i40e_for_each_ring(ring, q_vector->rx)
4885 ring->q_vector = NULL;
4887 /* only VSI w/ an associated netdev is set up w/ NAPI */
4889 netif_napi_del(&q_vector->napi);
4891 vsi->q_vectors[v_idx] = NULL;
4893 kfree_rcu(q_vector, rcu);
4897 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
4898 * @vsi: the VSI being un-configured
4900 * This frees the memory allocated to the q_vectors and
4901 * deletes references to the NAPI struct.
4903 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4907 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4908 i40e_free_q_vector(vsi, v_idx);
4912 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
4913 * @pf: board private structure
4915 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4917 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
4918 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4919 pci_disable_msix(pf->pdev);
4920 kfree(pf->msix_entries);
4921 pf->msix_entries = NULL;
4922 kfree(pf->irq_pile);
4923 pf->irq_pile = NULL;
4924 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4925 pci_disable_msi(pf->pdev);
4927 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4931 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
4932 * @pf: board private structure
4934 * We go through and clear interrupt specific resources and reset the structure
4935 * to pre-load conditions
4937 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4941 if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state))
4942 i40e_free_misc_vector(pf);
4944 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
4945 I40E_IWARP_IRQ_PILE_ID);
4947 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
4948 for (i = 0; i < pf->num_alloc_vsi; i++)
4950 i40e_vsi_free_q_vectors(pf->vsi[i]);
4951 i40e_reset_interrupt_capability(pf);
4955 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4956 * @vsi: the VSI being configured
4958 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4965 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4966 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4968 if (q_vector->rx.ring || q_vector->tx.ring)
4969 napi_enable(&q_vector->napi);
4974 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4975 * @vsi: the VSI being configured
4977 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4984 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4985 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4987 if (q_vector->rx.ring || q_vector->tx.ring)
4988 napi_disable(&q_vector->napi);
4993 * i40e_vsi_close - Shut down a VSI
4994 * @vsi: the vsi to be quelled
4996 static void i40e_vsi_close(struct i40e_vsi *vsi)
4998 struct i40e_pf *pf = vsi->back;
4999 if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
5001 i40e_vsi_free_irq(vsi);
5002 i40e_vsi_free_tx_resources(vsi);
5003 i40e_vsi_free_rx_resources(vsi);
5004 vsi->current_netdev_flags = 0;
5005 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
5006 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
5007 set_bit(__I40E_CLIENT_RESET, pf->state);
5011 * i40e_quiesce_vsi - Pause a given VSI
5012 * @vsi: the VSI being paused
5014 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
5016 if (test_bit(__I40E_VSI_DOWN, vsi->state))
5019 set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
5020 if (vsi->netdev && netif_running(vsi->netdev))
5021 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
5023 i40e_vsi_close(vsi);
5027 * i40e_unquiesce_vsi - Resume a given VSI
5028 * @vsi: the VSI being resumed
5030 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
5032 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
5035 if (vsi->netdev && netif_running(vsi->netdev))
5036 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
5038 i40e_vsi_open(vsi); /* this clears the DOWN bit */
5042 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
5045 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
5049 for (v = 0; v < pf->num_alloc_vsi; v++) {
5051 i40e_quiesce_vsi(pf->vsi[v]);
5056 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
5059 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
5063 for (v = 0; v < pf->num_alloc_vsi; v++) {
5065 i40e_unquiesce_vsi(pf->vsi[v]);
5070 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
5071 * @vsi: the VSI being configured
5073 * Wait until all queues on a given VSI have been disabled.
5075 int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
5077 struct i40e_pf *pf = vsi->back;
5080 pf_q = vsi->base_queue;
5081 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
5082 /* Check and wait for the Tx queue */
5083 ret = i40e_pf_txq_wait(pf, pf_q, false);
5085 dev_info(&pf->pdev->dev,
5086 "VSI seid %d Tx ring %d disable timeout\n",
5091 if (!i40e_enabled_xdp_vsi(vsi))
5094 /* Check and wait for the XDP Tx queue */
5095 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
5098 dev_info(&pf->pdev->dev,
5099 "VSI seid %d XDP Tx ring %d disable timeout\n",
5104 /* Check and wait for the Rx queue */
5105 ret = i40e_pf_rxq_wait(pf, pf_q, false);
5107 dev_info(&pf->pdev->dev,
5108 "VSI seid %d Rx ring %d disable timeout\n",
5117 #ifdef CONFIG_I40E_DCB
5119 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
5122 * This function waits for the queues to be in disabled state for all the
5123 * VSIs that are managed by this PF.
5125 static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
5129 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
5131 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
5143 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
5144 * @pf: pointer to PF
5146 * Get TC map for ISCSI PF type that will include iSCSI TC
5149 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
5151 struct i40e_dcb_app_priority_table app;
5152 struct i40e_hw *hw = &pf->hw;
5153 u8 enabled_tc = 1; /* TC0 is always enabled */
5155 /* Get the iSCSI APP TLV */
5156 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5158 for (i = 0; i < dcbcfg->numapps; i++) {
5159 app = dcbcfg->app[i];
5160 if (app.selector == I40E_APP_SEL_TCPIP &&
5161 app.protocolid == I40E_APP_PROTOID_ISCSI) {
5162 tc = dcbcfg->etscfg.prioritytable[app.priority];
5163 enabled_tc |= BIT(tc);
5172 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
5173 * @dcbcfg: the corresponding DCBx configuration structure
5175 * Return the number of TCs from given DCBx configuration
5177 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
5179 int i, tc_unused = 0;
5183 /* Scan the ETS Config Priority Table to find
5184 * traffic class enabled for a given priority
5185 * and create a bitmask of enabled TCs
5187 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
5188 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
5190 /* Now scan the bitmask to check for
5191 * contiguous TCs starting with TC0
5193 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5194 if (num_tc & BIT(i)) {
5198 pr_err("Non-contiguous TC - Disabling DCB\n");
5206 /* There is always at least TC0 */
5214 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
5215 * @dcbcfg: the corresponding DCBx configuration structure
5217 * Query the current DCB configuration and return the number of
5218 * traffic classes enabled from the given DCBX config
5220 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
5222 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
5226 for (i = 0; i < num_tc; i++)
5227 enabled_tc |= BIT(i);
5233 * i40e_mqprio_get_enabled_tc - Get enabled traffic classes
5234 * @pf: PF being queried
5236 * Query the current MQPRIO configuration and return the number of
5237 * traffic classes enabled.
5239 static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
5241 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5242 u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
5243 u8 enabled_tc = 1, i;
5245 for (i = 1; i < num_tc; i++)
5246 enabled_tc |= BIT(i);
5251 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
5252 * @pf: PF being queried
5254 * Return number of traffic classes enabled for the given PF
5256 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
5258 struct i40e_hw *hw = &pf->hw;
5259 u8 i, enabled_tc = 1;
5261 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5263 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5264 return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
5266 /* If neither MQPRIO nor DCB is enabled, then always use single TC */
5267 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5270 /* SFP mode will be enabled for all TCs on port */
5271 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5272 return i40e_dcb_get_num_tc(dcbcfg);
5274 /* MFP mode return count of enabled TCs for this PF */
5275 if (pf->hw.func_caps.iscsi)
5276 enabled_tc = i40e_get_iscsi_tc_map(pf);
5278 return 1; /* Only TC0 */
5280 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5281 if (enabled_tc & BIT(i))
5288 * i40e_pf_get_tc_map - Get bitmap for enabled traffic classes
5289 * @pf: PF being queried
5291 * Return a bitmap for enabled traffic classes for this PF.
5293 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
5295 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5296 return i40e_mqprio_get_enabled_tc(pf);
5298 /* If neither MQPRIO nor DCB is enabled for this PF then just return
5301 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5302 return I40E_DEFAULT_TRAFFIC_CLASS;
5304 /* SFP mode we want PF to be enabled for all TCs */
5305 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5306 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
5308 /* MFP enabled and iSCSI PF type */
5309 if (pf->hw.func_caps.iscsi)
5310 return i40e_get_iscsi_tc_map(pf);
5312 return I40E_DEFAULT_TRAFFIC_CLASS;
5316 * i40e_vsi_get_bw_info - Query VSI BW Information
5317 * @vsi: the VSI being queried
5319 * Returns 0 on success, negative value on failure
5321 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
5323 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
5324 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5325 struct i40e_pf *pf = vsi->back;
5326 struct i40e_hw *hw = &pf->hw;
5331 /* Get the VSI level BW configuration */
5332 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5334 dev_info(&pf->pdev->dev,
5335 "couldn't get PF vsi bw config, err %s aq_err %s\n",
5336 i40e_stat_str(&pf->hw, ret),
5337 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5341 /* Get the VSI level BW configuration per TC */
5342 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
5345 dev_info(&pf->pdev->dev,
5346 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
5347 i40e_stat_str(&pf->hw, ret),
5348 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5352 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
5353 dev_info(&pf->pdev->dev,
5354 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
5355 bw_config.tc_valid_bits,
5356 bw_ets_config.tc_valid_bits);
5357 /* Still continuing */
5360 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
5361 vsi->bw_max_quanta = bw_config.max_bw;
5362 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
5363 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
5364 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5365 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
5366 vsi->bw_ets_limit_credits[i] =
5367 le16_to_cpu(bw_ets_config.credits[i]);
5368 /* 3 bits out of 4 for each TC */
5369 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
5376 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
5377 * @vsi: the VSI being configured
5378 * @enabled_tc: TC bitmap
5379 * @bw_share: BW shared credits per TC
5381 * Returns 0 on success, negative value on failure
5383 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
5386 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5387 struct i40e_pf *pf = vsi->back;
5391 /* There is no need to reset BW when mqprio mode is on. */
5392 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5394 if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5395 ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
5397 dev_info(&pf->pdev->dev,
5398 "Failed to reset tx rate for vsi->seid %u\n",
5402 memset(&bw_data, 0, sizeof(bw_data));
5403 bw_data.tc_valid_bits = enabled_tc;
5404 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5405 bw_data.tc_bw_credits[i] = bw_share[i];
5407 ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
5409 dev_info(&pf->pdev->dev,
5410 "AQ command Config VSI BW allocation per TC failed = %d\n",
5411 pf->hw.aq.asq_last_status);
5415 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5416 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
5422 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
5423 * @vsi: the VSI being configured
5424 * @enabled_tc: TC map to be enabled
5427 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5429 struct net_device *netdev = vsi->netdev;
5430 struct i40e_pf *pf = vsi->back;
5431 struct i40e_hw *hw = &pf->hw;
5434 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5440 netdev_reset_tc(netdev);
5444 /* Set up actual enabled TCs on the VSI */
5445 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
5448 /* set per TC queues for the VSI */
5449 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5450 /* Only set TC queues for enabled tcs
5452 * e.g. For a VSI that has TC0 and TC3 enabled the
5453 * enabled_tc bitmap would be 0x00001001; the driver
5454 * will set the numtc for netdev as 2 that will be
5455 * referenced by the netdev layer as TC 0 and 1.
5457 if (vsi->tc_config.enabled_tc & BIT(i))
5458 netdev_set_tc_queue(netdev,
5459 vsi->tc_config.tc_info[i].netdev_tc,
5460 vsi->tc_config.tc_info[i].qcount,
5461 vsi->tc_config.tc_info[i].qoffset);
5464 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5467 /* Assign UP2TC map for the VSI */
5468 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
5469 /* Get the actual TC# for the UP */
5470 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
5471 /* Get the mapped netdev TC# for the UP */
5472 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
5473 netdev_set_prio_tc_map(netdev, i, netdev_tc);
5478 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
5479 * @vsi: the VSI being configured
5480 * @ctxt: the ctxt buffer returned from AQ VSI update param command
5482 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
5483 struct i40e_vsi_context *ctxt)
5485 /* copy just the sections touched not the entire info
5486 * since not all sections are valid as returned by
5489 vsi->info.mapping_flags = ctxt->info.mapping_flags;
5490 memcpy(&vsi->info.queue_mapping,
5491 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
5492 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
5493 sizeof(vsi->info.tc_mapping));
5497 * i40e_update_adq_vsi_queues - update queue mapping for ADq VSI
5498 * @vsi: the VSI being reconfigured
5499 * @vsi_offset: offset from main VF VSI
5501 int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset)
5503 struct i40e_vsi_context ctxt = {};
5509 return I40E_ERR_PARAM;
5513 ctxt.seid = vsi->seid;
5514 ctxt.pf_num = hw->pf_id;
5515 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id + vsi_offset;
5516 ctxt.uplink_seid = vsi->uplink_seid;
5517 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
5518 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5519 ctxt.info = vsi->info;
5521 i40e_vsi_setup_queue_map(vsi, &ctxt, vsi->tc_config.enabled_tc,
5523 if (vsi->reconfig_rss) {
5524 vsi->rss_size = min_t(int, pf->alloc_rss_size,
5525 vsi->num_queue_pairs);
5526 ret = i40e_vsi_config_rss(vsi);
5528 dev_info(&pf->pdev->dev, "Failed to reconfig rss for num_queues\n");
5531 vsi->reconfig_rss = false;
5534 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5536 dev_info(&pf->pdev->dev, "Update vsi config failed, err %s aq_err %s\n",
5537 i40e_stat_str(hw, ret),
5538 i40e_aq_str(hw, hw->aq.asq_last_status));
5541 /* update the local VSI info with updated queue map */
5542 i40e_vsi_update_queue_map(vsi, &ctxt);
5543 vsi->info.valid_sections = 0;
5549 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
5550 * @vsi: VSI to be configured
5551 * @enabled_tc: TC bitmap
5553 * This configures a particular VSI for TCs that are mapped to the
5554 * given TC bitmap. It uses default bandwidth share for TCs across
5555 * VSIs to configure TC for a particular VSI.
5558 * It is expected that the VSI queues have been quisced before calling
5561 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5563 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5564 struct i40e_pf *pf = vsi->back;
5565 struct i40e_hw *hw = &pf->hw;
5566 struct i40e_vsi_context ctxt;
5570 /* Check if enabled_tc is same as existing or new TCs */
5571 if (vsi->tc_config.enabled_tc == enabled_tc &&
5572 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
5575 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5576 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5577 if (enabled_tc & BIT(i))
5581 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5583 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5585 dev_info(&pf->pdev->dev,
5586 "Failed configuring TC map %d for VSI %d\n",
5587 enabled_tc, vsi->seid);
5588 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid,
5591 dev_info(&pf->pdev->dev,
5592 "Failed querying vsi bw info, err %s aq_err %s\n",
5593 i40e_stat_str(hw, ret),
5594 i40e_aq_str(hw, hw->aq.asq_last_status));
5597 if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
5598 u8 valid_tc = bw_config.tc_valid_bits & enabled_tc;
5601 valid_tc = bw_config.tc_valid_bits;
5602 /* Always enable TC0, no matter what */
5604 dev_info(&pf->pdev->dev,
5605 "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
5606 enabled_tc, bw_config.tc_valid_bits, valid_tc);
5607 enabled_tc = valid_tc;
5610 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5612 dev_err(&pf->pdev->dev,
5613 "Unable to configure TC map %d for VSI %d\n",
5614 enabled_tc, vsi->seid);
5619 /* Update Queue Pairs Mapping for currently enabled UPs */
5620 ctxt.seid = vsi->seid;
5621 ctxt.pf_num = vsi->back->hw.pf_id;
5623 ctxt.uplink_seid = vsi->uplink_seid;
5624 ctxt.info = vsi->info;
5625 if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) {
5626 ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
5630 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5633 /* On destroying the qdisc, reset vsi->rss_size, as number of enabled
5636 if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) {
5637 vsi->rss_size = min_t(int, vsi->back->alloc_rss_size,
5638 vsi->num_queue_pairs);
5639 ret = i40e_vsi_config_rss(vsi);
5641 dev_info(&vsi->back->pdev->dev,
5642 "Failed to reconfig rss for num_queues\n");
5645 vsi->reconfig_rss = false;
5647 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
5648 ctxt.info.valid_sections |=
5649 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
5650 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
5653 /* Update the VSI after updating the VSI queue-mapping
5656 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5658 dev_info(&pf->pdev->dev,
5659 "Update vsi tc config failed, err %s aq_err %s\n",
5660 i40e_stat_str(hw, ret),
5661 i40e_aq_str(hw, hw->aq.asq_last_status));
5664 /* update the local VSI info with updated queue map */
5665 i40e_vsi_update_queue_map(vsi, &ctxt);
5666 vsi->info.valid_sections = 0;
5668 /* Update current VSI BW information */
5669 ret = i40e_vsi_get_bw_info(vsi);
5671 dev_info(&pf->pdev->dev,
5672 "Failed updating vsi bw info, err %s aq_err %s\n",
5673 i40e_stat_str(hw, ret),
5674 i40e_aq_str(hw, hw->aq.asq_last_status));
5678 /* Update the netdev TC setup */
5679 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
5685 * i40e_get_link_speed - Returns link speed for the interface
5686 * @vsi: VSI to be configured
5689 static int i40e_get_link_speed(struct i40e_vsi *vsi)
5691 struct i40e_pf *pf = vsi->back;
5693 switch (pf->hw.phy.link_info.link_speed) {
5694 case I40E_LINK_SPEED_40GB:
5696 case I40E_LINK_SPEED_25GB:
5698 case I40E_LINK_SPEED_20GB:
5700 case I40E_LINK_SPEED_10GB:
5702 case I40E_LINK_SPEED_1GB:
5710 * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
5711 * @vsi: VSI to be configured
5712 * @seid: seid of the channel/VSI
5713 * @max_tx_rate: max TX rate to be configured as BW limit
5715 * Helper function to set BW limit for a given VSI
5717 int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
5719 struct i40e_pf *pf = vsi->back;
5724 speed = i40e_get_link_speed(vsi);
5725 if (max_tx_rate > speed) {
5726 dev_err(&pf->pdev->dev,
5727 "Invalid max tx rate %llu specified for VSI seid %d.",
5731 if (max_tx_rate && max_tx_rate < 50) {
5732 dev_warn(&pf->pdev->dev,
5733 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5737 /* Tx rate credits are in values of 50Mbps, 0 is disabled */
5738 credits = max_tx_rate;
5739 do_div(credits, I40E_BW_CREDIT_DIVISOR);
5740 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits,
5741 I40E_MAX_BW_INACTIVE_ACCUM, NULL);
5743 dev_err(&pf->pdev->dev,
5744 "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
5745 max_tx_rate, seid, i40e_stat_str(&pf->hw, ret),
5746 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5751 * i40e_remove_queue_channels - Remove queue channels for the TCs
5752 * @vsi: VSI to be configured
5754 * Remove queue channels for the TCs
5756 static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
5758 enum i40e_admin_queue_err last_aq_status;
5759 struct i40e_cloud_filter *cfilter;
5760 struct i40e_channel *ch, *ch_tmp;
5761 struct i40e_pf *pf = vsi->back;
5762 struct hlist_node *node;
5765 /* Reset rss size that was stored when reconfiguring rss for
5766 * channel VSIs with non-power-of-2 queue count.
5768 vsi->current_rss_size = 0;
5770 /* perform cleanup for channels if they exist */
5771 if (list_empty(&vsi->ch_list))
5774 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5775 struct i40e_vsi *p_vsi;
5777 list_del(&ch->list);
5778 p_vsi = ch->parent_vsi;
5779 if (!p_vsi || !ch->initialized) {
5783 /* Reset queue contexts */
5784 for (i = 0; i < ch->num_queue_pairs; i++) {
5785 struct i40e_ring *tx_ring, *rx_ring;
5788 pf_q = ch->base_queue + i;
5789 tx_ring = vsi->tx_rings[pf_q];
5792 rx_ring = vsi->rx_rings[pf_q];
5796 /* Reset BW configured for this VSI via mqprio */
5797 ret = i40e_set_bw_limit(vsi, ch->seid, 0);
5799 dev_info(&vsi->back->pdev->dev,
5800 "Failed to reset tx rate for ch->seid %u\n",
5803 /* delete cloud filters associated with this channel */
5804 hlist_for_each_entry_safe(cfilter, node,
5805 &pf->cloud_filter_list, cloud_node) {
5806 if (cfilter->seid != ch->seid)
5809 hash_del(&cfilter->cloud_node);
5810 if (cfilter->dst_port)
5811 ret = i40e_add_del_cloud_filter_big_buf(vsi,
5815 ret = i40e_add_del_cloud_filter(vsi, cfilter,
5817 last_aq_status = pf->hw.aq.asq_last_status;
5819 dev_info(&pf->pdev->dev,
5820 "Failed to delete cloud filter, err %s aq_err %s\n",
5821 i40e_stat_str(&pf->hw, ret),
5822 i40e_aq_str(&pf->hw, last_aq_status));
5826 /* delete VSI from FW */
5827 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
5830 dev_err(&vsi->back->pdev->dev,
5831 "unable to remove channel (%d) for parent VSI(%d)\n",
5832 ch->seid, p_vsi->seid);
5835 INIT_LIST_HEAD(&vsi->ch_list);
5839 * i40e_get_max_queues_for_channel
5840 * @vsi: ptr to VSI to which channels are associated with
5842 * Helper function which returns max value among the queue counts set on the
5843 * channels/TCs created.
5845 static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi)
5847 struct i40e_channel *ch, *ch_tmp;
5850 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5851 if (!ch->initialized)
5853 if (ch->num_queue_pairs > max)
5854 max = ch->num_queue_pairs;
5861 * i40e_validate_num_queues - validate num_queues w.r.t channel
5862 * @pf: ptr to PF device
5863 * @num_queues: number of queues
5864 * @vsi: the parent VSI
5865 * @reconfig_rss: indicates should the RSS be reconfigured or not
5867 * This function validates number of queues in the context of new channel
5868 * which is being established and determines if RSS should be reconfigured
5869 * or not for parent VSI.
5871 static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
5872 struct i40e_vsi *vsi, bool *reconfig_rss)
5879 *reconfig_rss = false;
5880 if (vsi->current_rss_size) {
5881 if (num_queues > vsi->current_rss_size) {
5882 dev_dbg(&pf->pdev->dev,
5883 "Error: num_queues (%d) > vsi's current_size(%d)\n",
5884 num_queues, vsi->current_rss_size);
5886 } else if ((num_queues < vsi->current_rss_size) &&
5887 (!is_power_of_2(num_queues))) {
5888 dev_dbg(&pf->pdev->dev,
5889 "Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
5890 num_queues, vsi->current_rss_size);
5895 if (!is_power_of_2(num_queues)) {
5896 /* Find the max num_queues configured for channel if channel
5898 * if channel exist, then enforce 'num_queues' to be more than
5899 * max ever queues configured for channel.
5901 max_ch_queues = i40e_get_max_queues_for_channel(vsi);
5902 if (num_queues < max_ch_queues) {
5903 dev_dbg(&pf->pdev->dev,
5904 "Error: num_queues (%d) < max queues configured for channel(%d)\n",
5905 num_queues, max_ch_queues);
5908 *reconfig_rss = true;
5915 * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size
5916 * @vsi: the VSI being setup
5917 * @rss_size: size of RSS, accordingly LUT gets reprogrammed
5919 * This function reconfigures RSS by reprogramming LUTs using 'rss_size'
5921 static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
5923 struct i40e_pf *pf = vsi->back;
5924 u8 seed[I40E_HKEY_ARRAY_SIZE];
5925 struct i40e_hw *hw = &pf->hw;
5933 if (rss_size > vsi->rss_size)
5936 local_rss_size = min_t(int, vsi->rss_size, rss_size);
5937 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
5941 /* Ignoring user configured lut if there is one */
5942 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size);
5944 /* Use user configured hash key if there is one, otherwise
5947 if (vsi->rss_hkey_user)
5948 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
5950 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
5952 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
5954 dev_info(&pf->pdev->dev,
5955 "Cannot set RSS lut, err %s aq_err %s\n",
5956 i40e_stat_str(hw, ret),
5957 i40e_aq_str(hw, hw->aq.asq_last_status));
5963 /* Do the update w.r.t. storing rss_size */
5964 if (!vsi->orig_rss_size)
5965 vsi->orig_rss_size = vsi->rss_size;
5966 vsi->current_rss_size = local_rss_size;
5972 * i40e_channel_setup_queue_map - Setup a channel queue map
5973 * @pf: ptr to PF device
5974 * @ctxt: VSI context structure
5975 * @ch: ptr to channel structure
5977 * Setup queue map for a specific channel
5979 static void i40e_channel_setup_queue_map(struct i40e_pf *pf,
5980 struct i40e_vsi_context *ctxt,
5981 struct i40e_channel *ch)
5983 u16 qcount, qmap, sections = 0;
5987 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
5988 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
5990 qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix);
5991 ch->num_queue_pairs = qcount;
5993 /* find the next higher power-of-2 of num queue pairs */
5994 pow = ilog2(qcount);
5995 if (!is_power_of_2(qcount))
5998 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5999 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
6001 /* Setup queue TC[0].qmap for given VSI context */
6002 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
6004 ctxt->info.up_enable_bits = 0x1; /* TC0 enabled */
6005 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
6006 ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue);
6007 ctxt->info.valid_sections |= cpu_to_le16(sections);
6011 * i40e_add_channel - add a channel by adding VSI
6012 * @pf: ptr to PF device
6013 * @uplink_seid: underlying HW switching element (VEB) ID
6014 * @ch: ptr to channel structure
6016 * Add a channel (VSI) using add_vsi and queue_map
6018 static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
6019 struct i40e_channel *ch)
6021 struct i40e_hw *hw = &pf->hw;
6022 struct i40e_vsi_context ctxt;
6023 u8 enabled_tc = 0x1; /* TC0 enabled */
6026 if (ch->type != I40E_VSI_VMDQ2) {
6027 dev_info(&pf->pdev->dev,
6028 "add new vsi failed, ch->type %d\n", ch->type);
6032 memset(&ctxt, 0, sizeof(ctxt));
6033 ctxt.pf_num = hw->pf_id;
6035 ctxt.uplink_seid = uplink_seid;
6036 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
6037 if (ch->type == I40E_VSI_VMDQ2)
6038 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
6040 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) {
6041 ctxt.info.valid_sections |=
6042 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6043 ctxt.info.switch_id =
6044 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6047 /* Set queue map for a given VSI context */
6048 i40e_channel_setup_queue_map(pf, &ctxt, ch);
6050 /* Now time to create VSI */
6051 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
6053 dev_info(&pf->pdev->dev,
6054 "add new vsi failed, err %s aq_err %s\n",
6055 i40e_stat_str(&pf->hw, ret),
6056 i40e_aq_str(&pf->hw,
6057 pf->hw.aq.asq_last_status));
6061 /* Success, update channel, set enabled_tc only if the channel
6064 ch->enabled_tc = !i40e_is_channel_macvlan(ch) && enabled_tc;
6065 ch->seid = ctxt.seid;
6066 ch->vsi_number = ctxt.vsi_number;
6067 ch->stat_counter_idx = le16_to_cpu(ctxt.info.stat_counter_idx);
6069 /* copy just the sections touched not the entire info
6070 * since not all sections are valid as returned by
6073 ch->info.mapping_flags = ctxt.info.mapping_flags;
6074 memcpy(&ch->info.queue_mapping,
6075 &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping));
6076 memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping,
6077 sizeof(ctxt.info.tc_mapping));
6082 static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
6085 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
6089 memset(&bw_data, 0, sizeof(bw_data));
6090 bw_data.tc_valid_bits = ch->enabled_tc;
6091 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
6092 bw_data.tc_bw_credits[i] = bw_share[i];
6094 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid,
6097 dev_info(&vsi->back->pdev->dev,
6098 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
6099 vsi->back->hw.aq.asq_last_status, ch->seid);
6103 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
6104 ch->info.qs_handle[i] = bw_data.qs_handles[i];
6110 * i40e_channel_config_tx_ring - config TX ring associated with new channel
6111 * @pf: ptr to PF device
6112 * @vsi: the VSI being setup
6113 * @ch: ptr to channel structure
6115 * Configure TX rings associated with channel (VSI) since queues are being
6118 static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
6119 struct i40e_vsi *vsi,
6120 struct i40e_channel *ch)
6124 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
6126 /* Enable ETS TCs with equal BW Share for now across all VSIs */
6127 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6128 if (ch->enabled_tc & BIT(i))
6132 /* configure BW for new VSI */
6133 ret = i40e_channel_config_bw(vsi, ch, bw_share);
6135 dev_info(&vsi->back->pdev->dev,
6136 "Failed configuring TC map %d for channel (seid %u)\n",
6137 ch->enabled_tc, ch->seid);
6141 for (i = 0; i < ch->num_queue_pairs; i++) {
6142 struct i40e_ring *tx_ring, *rx_ring;
6145 pf_q = ch->base_queue + i;
6147 /* Get to TX ring ptr of main VSI, for re-setup TX queue
6150 tx_ring = vsi->tx_rings[pf_q];
6153 /* Get the RX ring ptr */
6154 rx_ring = vsi->rx_rings[pf_q];
6162 * i40e_setup_hw_channel - setup new channel
6163 * @pf: ptr to PF device
6164 * @vsi: the VSI being setup
6165 * @ch: ptr to channel structure
6166 * @uplink_seid: underlying HW switching element (VEB) ID
6167 * @type: type of channel to be created (VMDq2/VF)
6169 * Setup new channel (VSI) based on specified type (VMDq2/VF)
6170 * and configures TX rings accordingly
6172 static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
6173 struct i40e_vsi *vsi,
6174 struct i40e_channel *ch,
6175 u16 uplink_seid, u8 type)
6179 ch->initialized = false;
6180 ch->base_queue = vsi->next_base_queue;
6183 /* Proceed with creation of channel (VMDq2) VSI */
6184 ret = i40e_add_channel(pf, uplink_seid, ch);
6186 dev_info(&pf->pdev->dev,
6187 "failed to add_channel using uplink_seid %u\n",
6192 /* Mark the successful creation of channel */
6193 ch->initialized = true;
6195 /* Reconfigure TX queues using QTX_CTL register */
6196 ret = i40e_channel_config_tx_ring(pf, vsi, ch);
6198 dev_info(&pf->pdev->dev,
6199 "failed to configure TX rings for channel %u\n",
6204 /* update 'next_base_queue' */
6205 vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs;
6206 dev_dbg(&pf->pdev->dev,
6207 "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
6208 ch->seid, ch->vsi_number, ch->stat_counter_idx,
6209 ch->num_queue_pairs,
6210 vsi->next_base_queue);
6215 * i40e_setup_channel - setup new channel using uplink element
6216 * @pf: ptr to PF device
6217 * @vsi: pointer to the VSI to set up the channel within
6218 * @ch: ptr to channel structure
6220 * Setup new channel (VSI) based on specified type (VMDq2/VF)
6221 * and uplink switching element (uplink_seid)
6223 static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
6224 struct i40e_channel *ch)
6230 if (vsi->type == I40E_VSI_MAIN) {
6231 vsi_type = I40E_VSI_VMDQ2;
6233 dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n",
6238 /* underlying switching element */
6239 seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6241 /* create channel (VSI), configure TX rings */
6242 ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type);
6244 dev_err(&pf->pdev->dev, "failed to setup hw_channel\n");
6248 return ch->initialized ? true : false;
6252 * i40e_validate_and_set_switch_mode - sets up switch mode correctly
6253 * @vsi: ptr to VSI which has PF backing
6255 * Sets up switch mode correctly if it needs to be changed and perform
6256 * what are allowed modes.
6258 static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
6261 struct i40e_pf *pf = vsi->back;
6262 struct i40e_hw *hw = &pf->hw;
6265 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities);
6269 if (hw->dev_caps.switch_mode) {
6270 /* if switch mode is set, support mode2 (non-tunneled for
6271 * cloud filter) for now
6273 u32 switch_mode = hw->dev_caps.switch_mode &
6274 I40E_SWITCH_MODE_MASK;
6275 if (switch_mode >= I40E_CLOUD_FILTER_MODE1) {
6276 if (switch_mode == I40E_CLOUD_FILTER_MODE2)
6278 dev_err(&pf->pdev->dev,
6279 "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
6280 hw->dev_caps.switch_mode);
6285 /* Set Bit 7 to be valid */
6286 mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
6288 /* Set L4type for TCP support */
6289 mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
6291 /* Set cloud filter mode */
6292 mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
6294 /* Prep mode field for set_switch_config */
6295 ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags,
6296 pf->last_sw_conf_valid_flags,
6298 if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
6299 dev_err(&pf->pdev->dev,
6300 "couldn't set switch config bits, err %s aq_err %s\n",
6301 i40e_stat_str(hw, ret),
6303 hw->aq.asq_last_status));
6309 * i40e_create_queue_channel - function to create channel
6310 * @vsi: VSI to be configured
6311 * @ch: ptr to channel (it contains channel specific params)
6313 * This function creates channel (VSI) using num_queues specified by user,
6314 * reconfigs RSS if needed.
6316 int i40e_create_queue_channel(struct i40e_vsi *vsi,
6317 struct i40e_channel *ch)
6319 struct i40e_pf *pf = vsi->back;
6326 if (!ch->num_queue_pairs) {
6327 dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n",
6328 ch->num_queue_pairs);
6332 /* validate user requested num_queues for channel */
6333 err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi,
6336 dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n",
6337 ch->num_queue_pairs);
6341 /* By default we are in VEPA mode, if this is the first VF/VMDq
6342 * VSI to be added switch to VEB mode.
6345 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
6346 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
6348 if (vsi->type == I40E_VSI_MAIN) {
6349 if (pf->flags & I40E_FLAG_TC_MQPRIO)
6350 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
6352 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
6354 /* now onwards for main VSI, number of queues will be value
6355 * of TC0's queue count
6359 /* By this time, vsi->cnt_q_avail shall be set to non-zero and
6360 * it should be more than num_queues
6362 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) {
6363 dev_dbg(&pf->pdev->dev,
6364 "Error: cnt_q_avail (%u) less than num_queues %d\n",
6365 vsi->cnt_q_avail, ch->num_queue_pairs);
6369 /* reconfig_rss only if vsi type is MAIN_VSI */
6370 if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) {
6371 err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs);
6373 dev_info(&pf->pdev->dev,
6374 "Error: unable to reconfig rss for num_queues (%u)\n",
6375 ch->num_queue_pairs);
6380 if (!i40e_setup_channel(pf, vsi, ch)) {
6381 dev_info(&pf->pdev->dev, "Failed to setup channel\n");
6385 dev_info(&pf->pdev->dev,
6386 "Setup channel (id:%u) utilizing num_queues %d\n",
6387 ch->seid, ch->num_queue_pairs);
6389 /* configure VSI for BW limit */
6390 if (ch->max_tx_rate) {
6391 u64 credits = ch->max_tx_rate;
6393 if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate))
6396 do_div(credits, I40E_BW_CREDIT_DIVISOR);
6397 dev_dbg(&pf->pdev->dev,
6398 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6404 /* in case of VF, this will be main SRIOV VSI */
6405 ch->parent_vsi = vsi;
6407 /* and update main_vsi's count for queue_available to use */
6408 vsi->cnt_q_avail -= ch->num_queue_pairs;
6414 * i40e_configure_queue_channels - Add queue channel for the given TCs
6415 * @vsi: VSI to be configured
6417 * Configures queue channel mapping to the given TCs
6419 static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
6421 struct i40e_channel *ch;
6425 /* Create app vsi with the TCs. Main VSI with TC0 is already set up */
6426 vsi->tc_seid_map[0] = vsi->seid;
6427 for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6428 if (vsi->tc_config.enabled_tc & BIT(i)) {
6429 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
6435 INIT_LIST_HEAD(&ch->list);
6436 ch->num_queue_pairs =
6437 vsi->tc_config.tc_info[i].qcount;
6439 vsi->tc_config.tc_info[i].qoffset;
6441 /* Bandwidth limit through tc interface is in bytes/s,
6444 max_rate = vsi->mqprio_qopt.max_rate[i];
6445 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6446 ch->max_tx_rate = max_rate;
6448 list_add_tail(&ch->list, &vsi->ch_list);
6450 ret = i40e_create_queue_channel(vsi, ch);
6452 dev_err(&vsi->back->pdev->dev,
6453 "Failed creating queue channel with TC%d: queues %d\n",
6454 i, ch->num_queue_pairs);
6457 vsi->tc_seid_map[i] = ch->seid;
6463 i40e_remove_queue_channels(vsi);
6468 * i40e_veb_config_tc - Configure TCs for given VEB
6470 * @enabled_tc: TC bitmap
6472 * Configures given TC bitmap for VEB (switching) element
6474 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
6476 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
6477 struct i40e_pf *pf = veb->pf;
6481 /* No TCs or already enabled TCs just return */
6482 if (!enabled_tc || veb->enabled_tc == enabled_tc)
6485 bw_data.tc_valid_bits = enabled_tc;
6486 /* bw_data.absolute_credits is not set (relative) */
6488 /* Enable ETS TCs with equal BW Share for now */
6489 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6490 if (enabled_tc & BIT(i))
6491 bw_data.tc_bw_share_credits[i] = 1;
6494 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
6497 dev_info(&pf->pdev->dev,
6498 "VEB bw config failed, err %s aq_err %s\n",
6499 i40e_stat_str(&pf->hw, ret),
6500 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6504 /* Update the BW information */
6505 ret = i40e_veb_get_bw_info(veb);
6507 dev_info(&pf->pdev->dev,
6508 "Failed getting veb bw config, err %s aq_err %s\n",
6509 i40e_stat_str(&pf->hw, ret),
6510 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6517 #ifdef CONFIG_I40E_DCB
6519 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
6522 * Reconfigure VEB/VSIs on a given PF; it is assumed that
6523 * the caller would've quiesce all the VSIs before calling
6526 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
6532 /* Enable the TCs available on PF to all VEBs */
6533 tc_map = i40e_pf_get_tc_map(pf);
6534 if (tc_map == I40E_DEFAULT_TRAFFIC_CLASS)
6537 for (v = 0; v < I40E_MAX_VEB; v++) {
6540 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
6542 dev_info(&pf->pdev->dev,
6543 "Failed configuring TC for VEB seid=%d\n",
6545 /* Will try to configure as many components */
6549 /* Update each VSI */
6550 for (v = 0; v < pf->num_alloc_vsi; v++) {
6554 /* - Enable all TCs for the LAN VSI
6555 * - For all others keep them at TC0 for now
6557 if (v == pf->lan_vsi)
6558 tc_map = i40e_pf_get_tc_map(pf);
6560 tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
6562 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
6564 dev_info(&pf->pdev->dev,
6565 "Failed configuring TC for VSI seid=%d\n",
6567 /* Will try to configure as many components */
6569 /* Re-configure VSI vectors based on updated TC map */
6570 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
6571 if (pf->vsi[v]->netdev)
6572 i40e_dcbnl_set_all(pf->vsi[v]);
6578 * i40e_resume_port_tx - Resume port Tx
6581 * Resume a port's Tx and issue a PF reset in case of failure to
6584 static int i40e_resume_port_tx(struct i40e_pf *pf)
6586 struct i40e_hw *hw = &pf->hw;
6589 ret = i40e_aq_resume_port_tx(hw, NULL);
6591 dev_info(&pf->pdev->dev,
6592 "Resume Port Tx failed, err %s aq_err %s\n",
6593 i40e_stat_str(&pf->hw, ret),
6594 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6595 /* Schedule PF reset to recover */
6596 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6597 i40e_service_event_schedule(pf);
6604 * i40e_suspend_port_tx - Suspend port Tx
6607 * Suspend a port's Tx and issue a PF reset in case of failure.
6609 static int i40e_suspend_port_tx(struct i40e_pf *pf)
6611 struct i40e_hw *hw = &pf->hw;
6614 ret = i40e_aq_suspend_port_tx(hw, pf->mac_seid, NULL);
6616 dev_info(&pf->pdev->dev,
6617 "Suspend Port Tx failed, err %s aq_err %s\n",
6618 i40e_stat_str(&pf->hw, ret),
6619 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6620 /* Schedule PF reset to recover */
6621 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6622 i40e_service_event_schedule(pf);
6629 * i40e_hw_set_dcb_config - Program new DCBX settings into HW
6630 * @pf: PF being configured
6631 * @new_cfg: New DCBX configuration
6633 * Program DCB settings into HW and reconfigure VEB/VSIs on
6634 * given PF. Uses "Set LLDP MIB" AQC to program the hardware.
6636 static int i40e_hw_set_dcb_config(struct i40e_pf *pf,
6637 struct i40e_dcbx_config *new_cfg)
6639 struct i40e_dcbx_config *old_cfg = &pf->hw.local_dcbx_config;
6642 /* Check if need reconfiguration */
6643 if (!memcmp(&new_cfg, &old_cfg, sizeof(new_cfg))) {
6644 dev_dbg(&pf->pdev->dev, "No Change in DCB Config required.\n");
6648 /* Config change disable all VSIs */
6649 i40e_pf_quiesce_all_vsi(pf);
6651 /* Copy the new config to the current config */
6652 *old_cfg = *new_cfg;
6653 old_cfg->etsrec = old_cfg->etscfg;
6654 ret = i40e_set_dcb_config(&pf->hw);
6656 dev_info(&pf->pdev->dev,
6657 "Set DCB Config failed, err %s aq_err %s\n",
6658 i40e_stat_str(&pf->hw, ret),
6659 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6663 /* Changes in configuration update VEB/VSI */
6664 i40e_dcb_reconfigure(pf);
6666 /* In case of reset do not try to resume anything */
6667 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) {
6668 /* Re-start the VSIs if disabled */
6669 ret = i40e_resume_port_tx(pf);
6670 /* In case of error no point in resuming VSIs */
6673 i40e_pf_unquiesce_all_vsi(pf);
6680 * i40e_hw_dcb_config - Program new DCBX settings into HW
6681 * @pf: PF being configured
6682 * @new_cfg: New DCBX configuration
6684 * Program DCB settings into HW and reconfigure VEB/VSIs on
6687 int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
6689 struct i40e_aqc_configure_switching_comp_ets_data ets_data;
6690 u8 prio_type[I40E_MAX_TRAFFIC_CLASS] = {0};
6691 u32 mfs_tc[I40E_MAX_TRAFFIC_CLASS];
6692 struct i40e_dcbx_config *old_cfg;
6693 u8 mode[I40E_MAX_TRAFFIC_CLASS];
6694 struct i40e_rx_pb_config pb_cfg;
6695 struct i40e_hw *hw = &pf->hw;
6696 u8 num_ports = hw->num_ports;
6704 dev_dbg(&pf->pdev->dev, "Configuring DCB registers directly\n");
6705 /* Un-pack information to Program ETS HW via shared API
6708 * ETS/NON-ETS arbiter mode
6709 * max exponent (credit refills)
6710 * Total number of ports
6711 * PFC priority bit-map
6714 * Arbiter mode between UPs sharing same TC
6715 * TSA table (ETS or non-ETS)
6716 * EEE enabled or not
6720 new_numtc = i40e_dcb_get_num_tc(new_cfg);
6722 memset(&ets_data, 0, sizeof(ets_data));
6723 for (i = 0; i < new_numtc; i++) {
6725 switch (new_cfg->etscfg.tsatable[i]) {
6726 case I40E_IEEE_TSA_ETS:
6727 prio_type[i] = I40E_DCB_PRIO_TYPE_ETS;
6728 ets_data.tc_bw_share_credits[i] =
6729 new_cfg->etscfg.tcbwtable[i];
6731 case I40E_IEEE_TSA_STRICT:
6732 prio_type[i] = I40E_DCB_PRIO_TYPE_STRICT;
6734 ets_data.tc_bw_share_credits[i] =
6735 I40E_DCB_STRICT_PRIO_CREDITS;
6738 /* Invalid TSA type */
6739 need_reconfig = false;
6744 old_cfg = &hw->local_dcbx_config;
6745 /* Check if need reconfiguration */
6746 need_reconfig = i40e_dcb_need_reconfig(pf, old_cfg, new_cfg);
6748 /* If needed, enable/disable frame tagging, disable all VSIs
6749 * and suspend port tx
6751 if (need_reconfig) {
6752 /* Enable DCB tagging only when more than one TC */
6754 pf->flags |= I40E_FLAG_DCB_ENABLED;
6756 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6758 set_bit(__I40E_PORT_SUSPENDED, pf->state);
6759 /* Reconfiguration needed quiesce all VSIs */
6760 i40e_pf_quiesce_all_vsi(pf);
6761 ret = i40e_suspend_port_tx(pf);
6766 /* Configure Port ETS Tx Scheduler */
6767 ets_data.tc_valid_bits = tc_map;
6768 ets_data.tc_strict_priority_flags = lltc_map;
6769 ret = i40e_aq_config_switch_comp_ets
6770 (hw, pf->mac_seid, &ets_data,
6771 i40e_aqc_opc_modify_switching_comp_ets, NULL);
6773 dev_info(&pf->pdev->dev,
6774 "Modify Port ETS failed, err %s aq_err %s\n",
6775 i40e_stat_str(&pf->hw, ret),
6776 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6780 /* Configure Rx ETS HW */
6781 memset(&mode, I40E_DCB_ARB_MODE_ROUND_ROBIN, sizeof(mode));
6782 i40e_dcb_hw_set_num_tc(hw, new_numtc);
6783 i40e_dcb_hw_rx_fifo_config(hw, I40E_DCB_ARB_MODE_ROUND_ROBIN,
6784 I40E_DCB_ARB_MODE_STRICT_PRIORITY,
6785 I40E_DCB_DEFAULT_MAX_EXPONENT,
6787 i40e_dcb_hw_rx_cmd_monitor_config(hw, new_numtc, num_ports);
6788 i40e_dcb_hw_rx_ets_bw_config(hw, new_cfg->etscfg.tcbwtable, mode,
6790 i40e_dcb_hw_pfc_config(hw, new_cfg->pfc.pfcenable,
6791 new_cfg->etscfg.prioritytable);
6792 i40e_dcb_hw_rx_up2tc_config(hw, new_cfg->etscfg.prioritytable);
6794 /* Configure Rx Packet Buffers in HW */
6795 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6796 mfs_tc[i] = pf->vsi[pf->lan_vsi]->netdev->mtu;
6797 mfs_tc[i] += I40E_PACKET_HDR_PAD;
6800 i40e_dcb_hw_calculate_pool_sizes(hw, num_ports,
6801 false, new_cfg->pfc.pfcenable,
6803 i40e_dcb_hw_rx_pb_config(hw, &pf->pb_cfg, &pb_cfg);
6805 /* Update the local Rx Packet buffer config */
6806 pf->pb_cfg = pb_cfg;
6808 /* Inform the FW about changes to DCB configuration */
6809 ret = i40e_aq_dcb_updated(&pf->hw, NULL);
6811 dev_info(&pf->pdev->dev,
6812 "DCB Updated failed, err %s aq_err %s\n",
6813 i40e_stat_str(&pf->hw, ret),
6814 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6818 /* Update the port DCBx configuration */
6819 *old_cfg = *new_cfg;
6821 /* Changes in configuration update VEB/VSI */
6822 i40e_dcb_reconfigure(pf);
6824 /* Re-start the VSIs if disabled */
6825 if (need_reconfig) {
6826 ret = i40e_resume_port_tx(pf);
6828 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
6829 /* In case of error no point in resuming VSIs */
6833 /* Wait for the PF's queues to be disabled */
6834 ret = i40e_pf_wait_queues_disabled(pf);
6836 /* Schedule PF reset to recover */
6837 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6838 i40e_service_event_schedule(pf);
6841 i40e_pf_unquiesce_all_vsi(pf);
6842 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
6843 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
6845 /* registers are set, lets apply */
6846 if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB)
6847 ret = i40e_hw_set_dcb_config(pf, new_cfg);
6855 * i40e_dcb_sw_default_config - Set default DCB configuration when DCB in SW
6856 * @pf: PF being queried
6858 * Set default DCB configuration in case DCB is to be done in SW.
6860 int i40e_dcb_sw_default_config(struct i40e_pf *pf)
6862 struct i40e_dcbx_config *dcb_cfg = &pf->hw.local_dcbx_config;
6863 struct i40e_aqc_configure_switching_comp_ets_data ets_data;
6864 struct i40e_hw *hw = &pf->hw;
6867 if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB) {
6868 /* Update the local cached instance with TC0 ETS */
6869 memset(&pf->tmp_cfg, 0, sizeof(struct i40e_dcbx_config));
6870 pf->tmp_cfg.etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING;
6871 pf->tmp_cfg.etscfg.maxtcs = 0;
6872 pf->tmp_cfg.etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW;
6873 pf->tmp_cfg.etscfg.tsatable[0] = I40E_IEEE_TSA_ETS;
6874 pf->tmp_cfg.pfc.willing = I40E_IEEE_DEFAULT_PFC_WILLING;
6875 pf->tmp_cfg.pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
6876 /* FW needs one App to configure HW */
6877 pf->tmp_cfg.numapps = I40E_IEEE_DEFAULT_NUM_APPS;
6878 pf->tmp_cfg.app[0].selector = I40E_APP_SEL_ETHTYPE;
6879 pf->tmp_cfg.app[0].priority = I40E_IEEE_DEFAULT_APP_PRIO;
6880 pf->tmp_cfg.app[0].protocolid = I40E_APP_PROTOID_FCOE;
6882 return i40e_hw_set_dcb_config(pf, &pf->tmp_cfg);
6885 memset(&ets_data, 0, sizeof(ets_data));
6886 ets_data.tc_valid_bits = I40E_DEFAULT_TRAFFIC_CLASS; /* TC0 only */
6887 ets_data.tc_strict_priority_flags = 0; /* ETS */
6888 ets_data.tc_bw_share_credits[0] = I40E_IEEE_DEFAULT_ETS_TCBW; /* 100% to TC0 */
6890 /* Enable ETS on the Physical port */
6891 err = i40e_aq_config_switch_comp_ets
6892 (hw, pf->mac_seid, &ets_data,
6893 i40e_aqc_opc_enable_switching_comp_ets, NULL);
6895 dev_info(&pf->pdev->dev,
6896 "Enable Port ETS failed, err %s aq_err %s\n",
6897 i40e_stat_str(&pf->hw, err),
6898 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6903 /* Update the local cached instance with TC0 ETS */
6904 dcb_cfg->etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING;
6905 dcb_cfg->etscfg.cbs = 0;
6906 dcb_cfg->etscfg.maxtcs = I40E_MAX_TRAFFIC_CLASS;
6907 dcb_cfg->etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW;
6914 * i40e_init_pf_dcb - Initialize DCB configuration
6915 * @pf: PF being configured
6917 * Query the current DCB configuration and cache it
6918 * in the hardware structure
6920 static int i40e_init_pf_dcb(struct i40e_pf *pf)
6922 struct i40e_hw *hw = &pf->hw;
6925 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable
6926 * Also do not enable DCBx if FW LLDP agent is disabled
6928 if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT) {
6929 dev_info(&pf->pdev->dev, "DCB is not supported.\n");
6930 err = I40E_NOT_SUPPORTED;
6933 if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) {
6934 dev_info(&pf->pdev->dev, "FW LLDP is disabled, attempting SW DCB\n");
6935 err = i40e_dcb_sw_default_config(pf);
6937 dev_info(&pf->pdev->dev, "Could not initialize SW DCB\n");
6940 dev_info(&pf->pdev->dev, "SW DCB initialization succeeded.\n");
6941 pf->dcbx_cap = DCB_CAP_DCBX_HOST |
6942 DCB_CAP_DCBX_VER_IEEE;
6943 /* at init capable but disabled */
6944 pf->flags |= I40E_FLAG_DCB_CAPABLE;
6945 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6948 err = i40e_init_dcb(hw, true);
6950 /* Device/Function is not DCBX capable */
6951 if ((!hw->func_caps.dcb) ||
6952 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
6953 dev_info(&pf->pdev->dev,
6954 "DCBX offload is not supported or is disabled for this PF.\n");
6956 /* When status is not DISABLED then DCBX in FW */
6957 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
6958 DCB_CAP_DCBX_VER_IEEE;
6960 pf->flags |= I40E_FLAG_DCB_CAPABLE;
6961 /* Enable DCB tagging only when more than one TC
6962 * or explicitly disable if only one TC
6964 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
6965 pf->flags |= I40E_FLAG_DCB_ENABLED;
6967 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6968 dev_dbg(&pf->pdev->dev,
6969 "DCBX offload is supported for this PF.\n");
6971 } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
6972 dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
6973 pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
6975 dev_info(&pf->pdev->dev,
6976 "Query for DCB configuration failed, err %s aq_err %s\n",
6977 i40e_stat_str(&pf->hw, err),
6978 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6984 #endif /* CONFIG_I40E_DCB */
6987 * i40e_print_link_message - print link up or down
6988 * @vsi: the VSI for which link needs a message
6989 * @isup: true of link is up, false otherwise
6991 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
6993 enum i40e_aq_link_speed new_speed;
6994 struct i40e_pf *pf = vsi->back;
6995 char *speed = "Unknown";
6996 char *fc = "Unknown";
7002 new_speed = pf->hw.phy.link_info.link_speed;
7004 new_speed = I40E_LINK_SPEED_UNKNOWN;
7006 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
7008 vsi->current_isup = isup;
7009 vsi->current_speed = new_speed;
7011 netdev_info(vsi->netdev, "NIC Link is Down\n");
7015 /* Warn user if link speed on NPAR enabled partition is not at
7018 if (pf->hw.func_caps.npar_enable &&
7019 (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
7020 pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
7021 netdev_warn(vsi->netdev,
7022 "The partition detected link speed that is less than 10Gbps\n");
7024 switch (pf->hw.phy.link_info.link_speed) {
7025 case I40E_LINK_SPEED_40GB:
7028 case I40E_LINK_SPEED_20GB:
7031 case I40E_LINK_SPEED_25GB:
7034 case I40E_LINK_SPEED_10GB:
7037 case I40E_LINK_SPEED_5GB:
7040 case I40E_LINK_SPEED_2_5GB:
7043 case I40E_LINK_SPEED_1GB:
7046 case I40E_LINK_SPEED_100MB:
7053 switch (pf->hw.fc.current_mode) {
7057 case I40E_FC_TX_PAUSE:
7060 case I40E_FC_RX_PAUSE:
7068 if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
7073 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
7076 if (pf->hw.phy.link_info.fec_info &
7077 I40E_AQ_CONFIG_FEC_KR_ENA)
7078 fec = "CL74 FC-FEC/BASE-R";
7079 else if (pf->hw.phy.link_info.fec_info &
7080 I40E_AQ_CONFIG_FEC_RS_ENA)
7081 fec = "CL108 RS-FEC";
7083 /* 'CL108 RS-FEC' should be displayed when RS is requested, or
7084 * both RS and FC are requested
7086 if (vsi->back->hw.phy.link_info.req_fec_info &
7087 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
7088 if (vsi->back->hw.phy.link_info.req_fec_info &
7089 I40E_AQ_REQUEST_FEC_RS)
7090 req_fec = "CL108 RS-FEC";
7092 req_fec = "CL74 FC-FEC/BASE-R";
7094 netdev_info(vsi->netdev,
7095 "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
7096 speed, req_fec, fec, an, fc);
7097 } else if (pf->hw.device_id == I40E_DEV_ID_KX_X722) {
7102 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
7105 if (pf->hw.phy.link_info.fec_info &
7106 I40E_AQ_CONFIG_FEC_KR_ENA)
7107 fec = "CL74 FC-FEC/BASE-R";
7109 if (pf->hw.phy.link_info.req_fec_info &
7110 I40E_AQ_REQUEST_FEC_KR)
7111 req_fec = "CL74 FC-FEC/BASE-R";
7113 netdev_info(vsi->netdev,
7114 "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
7115 speed, req_fec, fec, an, fc);
7117 netdev_info(vsi->netdev,
7118 "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n",
7125 * i40e_up_complete - Finish the last steps of bringing up a connection
7126 * @vsi: the VSI being configured
7128 static int i40e_up_complete(struct i40e_vsi *vsi)
7130 struct i40e_pf *pf = vsi->back;
7133 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7134 i40e_vsi_configure_msix(vsi);
7136 i40e_configure_msi_and_legacy(vsi);
7139 err = i40e_vsi_start_rings(vsi);
7143 clear_bit(__I40E_VSI_DOWN, vsi->state);
7144 i40e_napi_enable_all(vsi);
7145 i40e_vsi_enable_irq(vsi);
7147 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
7149 i40e_print_link_message(vsi, true);
7150 netif_tx_start_all_queues(vsi->netdev);
7151 netif_carrier_on(vsi->netdev);
7154 /* replay FDIR SB filters */
7155 if (vsi->type == I40E_VSI_FDIR) {
7156 /* reset fd counters */
7159 i40e_fdir_filter_restore(vsi);
7162 /* On the next run of the service_task, notify any clients of the new
7165 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
7166 i40e_service_event_schedule(pf);
7172 * i40e_vsi_reinit_locked - Reset the VSI
7173 * @vsi: the VSI being configured
7175 * Rebuild the ring structs after some configuration
7176 * has changed, e.g. MTU size.
7178 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
7180 struct i40e_pf *pf = vsi->back;
7182 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
7183 usleep_range(1000, 2000);
7187 clear_bit(__I40E_CONFIG_BUSY, pf->state);
7191 * i40e_force_link_state - Force the link status
7192 * @pf: board private structure
7193 * @is_up: whether the link state should be forced up or down
7195 static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
7197 struct i40e_aq_get_phy_abilities_resp abilities;
7198 struct i40e_aq_set_phy_config config = {0};
7199 bool non_zero_phy_type = is_up;
7200 struct i40e_hw *hw = &pf->hw;
7205 /* Card might've been put in an unstable state by other drivers
7206 * and applications, which causes incorrect speed values being
7207 * set on startup. In order to clear speed registers, we call
7208 * get_phy_capabilities twice, once to get initial state of
7209 * available speeds, and once to get current PHY config.
7211 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities,
7214 dev_err(&pf->pdev->dev,
7215 "failed to get phy cap., ret = %s last_status = %s\n",
7216 i40e_stat_str(hw, err),
7217 i40e_aq_str(hw, hw->aq.asq_last_status));
7220 speed = abilities.link_speed;
7222 /* Get the current phy config */
7223 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
7226 dev_err(&pf->pdev->dev,
7227 "failed to get phy cap., ret = %s last_status = %s\n",
7228 i40e_stat_str(hw, err),
7229 i40e_aq_str(hw, hw->aq.asq_last_status));
7233 /* If link needs to go up, but was not forced to go down,
7234 * and its speed values are OK, no need for a flap
7235 * if non_zero_phy_type was set, still need to force up
7237 if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)
7238 non_zero_phy_type = true;
7239 else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
7240 return I40E_SUCCESS;
7242 /* To force link we need to set bits for all supported PHY types,
7243 * but there are now more than 32, so we need to split the bitmap
7244 * across two fields.
7246 mask = I40E_PHY_TYPES_BITMASK;
7248 non_zero_phy_type ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
7249 config.phy_type_ext =
7250 non_zero_phy_type ? (u8)((mask >> 32) & 0xff) : 0;
7251 /* Copy the old settings, except of phy_type */
7252 config.abilities = abilities.abilities;
7253 if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) {
7255 config.abilities |= I40E_AQ_PHY_ENABLE_LINK;
7257 config.abilities &= ~(I40E_AQ_PHY_ENABLE_LINK);
7259 if (abilities.link_speed != 0)
7260 config.link_speed = abilities.link_speed;
7262 config.link_speed = speed;
7263 config.eee_capability = abilities.eee_capability;
7264 config.eeer = abilities.eeer_val;
7265 config.low_power_ctrl = abilities.d3_lpan;
7266 config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
7267 I40E_AQ_PHY_FEC_CONFIG_MASK;
7268 err = i40e_aq_set_phy_config(hw, &config, NULL);
7271 dev_err(&pf->pdev->dev,
7272 "set phy config ret = %s last_status = %s\n",
7273 i40e_stat_str(&pf->hw, err),
7274 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7278 /* Update the link info */
7279 err = i40e_update_link_info(hw);
7281 /* Wait a little bit (on 40G cards it sometimes takes a really
7282 * long time for link to come back from the atomic reset)
7286 i40e_update_link_info(hw);
7289 i40e_aq_set_link_restart_an(hw, is_up, NULL);
7291 return I40E_SUCCESS;
7295 * i40e_up - Bring the connection back up after being down
7296 * @vsi: the VSI being configured
7298 int i40e_up(struct i40e_vsi *vsi)
7302 if (vsi->type == I40E_VSI_MAIN &&
7303 (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
7304 vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
7305 i40e_force_link_state(vsi->back, true);
7307 err = i40e_vsi_configure(vsi);
7309 err = i40e_up_complete(vsi);
7315 * i40e_down - Shutdown the connection processing
7316 * @vsi: the VSI being stopped
7318 void i40e_down(struct i40e_vsi *vsi)
7322 /* It is assumed that the caller of this function
7323 * sets the vsi->state __I40E_VSI_DOWN bit.
7326 netif_carrier_off(vsi->netdev);
7327 netif_tx_disable(vsi->netdev);
7329 i40e_vsi_disable_irq(vsi);
7330 i40e_vsi_stop_rings(vsi);
7331 if (vsi->type == I40E_VSI_MAIN &&
7332 (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
7333 vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
7334 i40e_force_link_state(vsi->back, false);
7335 i40e_napi_disable_all(vsi);
7337 for (i = 0; i < vsi->num_queue_pairs; i++) {
7338 i40e_clean_tx_ring(vsi->tx_rings[i]);
7339 if (i40e_enabled_xdp_vsi(vsi)) {
7340 /* Make sure that in-progress ndo_xdp_xmit and
7341 * ndo_xsk_wakeup calls are completed.
7344 i40e_clean_tx_ring(vsi->xdp_rings[i]);
7346 i40e_clean_rx_ring(vsi->rx_rings[i]);
7352 * i40e_validate_mqprio_qopt- validate queue mapping info
7353 * @vsi: the VSI being configured
7354 * @mqprio_qopt: queue parametrs
7356 static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
7357 struct tc_mqprio_qopt_offload *mqprio_qopt)
7359 u64 sum_max_rate = 0;
7363 if (mqprio_qopt->qopt.offset[0] != 0 ||
7364 mqprio_qopt->qopt.num_tc < 1 ||
7365 mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS)
7367 for (i = 0; ; i++) {
7368 if (!mqprio_qopt->qopt.count[i])
7370 if (mqprio_qopt->min_rate[i]) {
7371 dev_err(&vsi->back->pdev->dev,
7372 "Invalid min tx rate (greater than 0) specified\n");
7375 max_rate = mqprio_qopt->max_rate[i];
7376 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
7377 sum_max_rate += max_rate;
7379 if (i >= mqprio_qopt->qopt.num_tc - 1)
7381 if (mqprio_qopt->qopt.offset[i + 1] !=
7382 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
7385 if (vsi->num_queue_pairs <
7386 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
7387 dev_err(&vsi->back->pdev->dev,
7388 "Failed to create traffic channel, insufficient number of queues.\n");
7391 if (sum_max_rate > i40e_get_link_speed(vsi)) {
7392 dev_err(&vsi->back->pdev->dev,
7393 "Invalid max tx rate specified\n");
7400 * i40e_vsi_set_default_tc_config - set default values for tc configuration
7401 * @vsi: the VSI being configured
7403 static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
7408 /* Only TC0 is enabled */
7409 vsi->tc_config.numtc = 1;
7410 vsi->tc_config.enabled_tc = 1;
7411 qcount = min_t(int, vsi->alloc_queue_pairs,
7412 i40e_pf_get_max_q_per_tc(vsi->back));
7413 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
7414 /* For the TC that is not enabled set the offset to default
7415 * queue and allocate one queue for the given TC.
7417 vsi->tc_config.tc_info[i].qoffset = 0;
7419 vsi->tc_config.tc_info[i].qcount = qcount;
7421 vsi->tc_config.tc_info[i].qcount = 1;
7422 vsi->tc_config.tc_info[i].netdev_tc = 0;
7427 * i40e_del_macvlan_filter
7428 * @hw: pointer to the HW structure
7429 * @seid: seid of the channel VSI
7430 * @macaddr: the mac address to apply as a filter
7431 * @aq_err: store the admin Q error
7433 * This function deletes a mac filter on the channel VSI which serves as the
7434 * macvlan. Returns 0 on success.
7436 static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
7437 const u8 *macaddr, int *aq_err)
7439 struct i40e_aqc_remove_macvlan_element_data element;
7442 memset(&element, 0, sizeof(element));
7443 ether_addr_copy(element.mac_addr, macaddr);
7444 element.vlan_tag = 0;
7445 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
7446 status = i40e_aq_remove_macvlan(hw, seid, &element, 1, NULL);
7447 *aq_err = hw->aq.asq_last_status;
7453 * i40e_add_macvlan_filter
7454 * @hw: pointer to the HW structure
7455 * @seid: seid of the channel VSI
7456 * @macaddr: the mac address to apply as a filter
7457 * @aq_err: store the admin Q error
7459 * This function adds a mac filter on the channel VSI which serves as the
7460 * macvlan. Returns 0 on success.
7462 static i40e_status i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid,
7463 const u8 *macaddr, int *aq_err)
7465 struct i40e_aqc_add_macvlan_element_data element;
7469 ether_addr_copy(element.mac_addr, macaddr);
7470 element.vlan_tag = 0;
7471 element.queue_number = 0;
7472 element.match_method = I40E_AQC_MM_ERR_NO_RES;
7473 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
7474 element.flags = cpu_to_le16(cmd_flags);
7475 status = i40e_aq_add_macvlan(hw, seid, &element, 1, NULL);
7476 *aq_err = hw->aq.asq_last_status;
7482 * i40e_reset_ch_rings - Reset the queue contexts in a channel
7483 * @vsi: the VSI we want to access
7484 * @ch: the channel we want to access
7486 static void i40e_reset_ch_rings(struct i40e_vsi *vsi, struct i40e_channel *ch)
7488 struct i40e_ring *tx_ring, *rx_ring;
7492 for (i = 0; i < ch->num_queue_pairs; i++) {
7493 pf_q = ch->base_queue + i;
7494 tx_ring = vsi->tx_rings[pf_q];
7496 rx_ring = vsi->rx_rings[pf_q];
7502 * i40e_free_macvlan_channels
7503 * @vsi: the VSI we want to access
7505 * This function frees the Qs of the channel VSI from
7506 * the stack and also deletes the channel VSIs which
7507 * serve as macvlans.
7509 static void i40e_free_macvlan_channels(struct i40e_vsi *vsi)
7511 struct i40e_channel *ch, *ch_tmp;
7514 if (list_empty(&vsi->macvlan_list))
7517 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7518 struct i40e_vsi *parent_vsi;
7520 if (i40e_is_channel_macvlan(ch)) {
7521 i40e_reset_ch_rings(vsi, ch);
7522 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7523 netdev_unbind_sb_channel(vsi->netdev, ch->fwd->netdev);
7524 netdev_set_sb_channel(ch->fwd->netdev, 0);
7529 list_del(&ch->list);
7530 parent_vsi = ch->parent_vsi;
7531 if (!parent_vsi || !ch->initialized) {
7536 /* remove the VSI */
7537 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
7540 dev_err(&vsi->back->pdev->dev,
7541 "unable to remove channel (%d) for parent VSI(%d)\n",
7542 ch->seid, parent_vsi->seid);
7545 vsi->macvlan_cnt = 0;
7549 * i40e_fwd_ring_up - bring the macvlan device up
7550 * @vsi: the VSI we want to access
7551 * @vdev: macvlan netdevice
7552 * @fwd: the private fwd structure
7554 static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
7555 struct i40e_fwd_adapter *fwd)
7557 struct i40e_channel *ch = NULL, *ch_tmp, *iter;
7558 int ret = 0, num_tc = 1, i, aq_err;
7559 struct i40e_pf *pf = vsi->back;
7560 struct i40e_hw *hw = &pf->hw;
7562 /* Go through the list and find an available channel */
7563 list_for_each_entry_safe(iter, ch_tmp, &vsi->macvlan_list, list) {
7564 if (!i40e_is_channel_macvlan(iter)) {
7566 /* record configuration for macvlan interface in vdev */
7567 for (i = 0; i < num_tc; i++)
7568 netdev_bind_sb_channel_queue(vsi->netdev, vdev,
7570 iter->num_queue_pairs,
7572 for (i = 0; i < iter->num_queue_pairs; i++) {
7573 struct i40e_ring *tx_ring, *rx_ring;
7576 pf_q = iter->base_queue + i;
7578 /* Get to TX ring ptr */
7579 tx_ring = vsi->tx_rings[pf_q];
7582 /* Get the RX ring ptr */
7583 rx_ring = vsi->rx_rings[pf_q];
7594 /* Guarantee all rings are updated before we update the
7595 * MAC address filter.
7599 /* Add a mac filter */
7600 ret = i40e_add_macvlan_filter(hw, ch->seid, vdev->dev_addr, &aq_err);
7602 /* if we cannot add the MAC rule then disable the offload */
7603 macvlan_release_l2fw_offload(vdev);
7604 for (i = 0; i < ch->num_queue_pairs; i++) {
7605 struct i40e_ring *rx_ring;
7608 pf_q = ch->base_queue + i;
7609 rx_ring = vsi->rx_rings[pf_q];
7610 rx_ring->netdev = NULL;
7612 dev_info(&pf->pdev->dev,
7613 "Error adding mac filter on macvlan err %s, aq_err %s\n",
7614 i40e_stat_str(hw, ret),
7615 i40e_aq_str(hw, aq_err));
7616 netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n");
7623 * i40e_setup_macvlans - create the channels which will be macvlans
7624 * @vsi: the VSI we want to access
7625 * @macvlan_cnt: no. of macvlans to be setup
7626 * @qcnt: no. of Qs per macvlan
7627 * @vdev: macvlan netdevice
7629 static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
7630 struct net_device *vdev)
7632 struct i40e_pf *pf = vsi->back;
7633 struct i40e_hw *hw = &pf->hw;
7634 struct i40e_vsi_context ctxt;
7635 u16 sections, qmap, num_qps;
7636 struct i40e_channel *ch;
7637 int i, pow, ret = 0;
7640 if (vsi->type != I40E_VSI_MAIN || !macvlan_cnt)
7643 num_qps = vsi->num_queue_pairs - (macvlan_cnt * qcnt);
7645 /* find the next higher power-of-2 of num queue pairs */
7646 pow = fls(roundup_pow_of_two(num_qps) - 1);
7648 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
7649 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
7651 /* Setup context bits for the main VSI */
7652 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
7653 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
7654 memset(&ctxt, 0, sizeof(ctxt));
7655 ctxt.seid = vsi->seid;
7656 ctxt.pf_num = vsi->back->hw.pf_id;
7658 ctxt.uplink_seid = vsi->uplink_seid;
7659 ctxt.info = vsi->info;
7660 ctxt.info.tc_mapping[0] = cpu_to_le16(qmap);
7661 ctxt.info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
7662 ctxt.info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
7663 ctxt.info.valid_sections |= cpu_to_le16(sections);
7665 /* Reconfigure RSS for main VSI with new max queue count */
7666 vsi->rss_size = max_t(u16, num_qps, qcnt);
7667 ret = i40e_vsi_config_rss(vsi);
7669 dev_info(&pf->pdev->dev,
7670 "Failed to reconfig RSS for num_queues (%u)\n",
7674 vsi->reconfig_rss = true;
7675 dev_dbg(&vsi->back->pdev->dev,
7676 "Reconfigured RSS with num_queues (%u)\n", vsi->rss_size);
7677 vsi->next_base_queue = num_qps;
7678 vsi->cnt_q_avail = vsi->num_queue_pairs - num_qps;
7680 /* Update the VSI after updating the VSI queue-mapping
7683 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
7685 dev_info(&pf->pdev->dev,
7686 "Update vsi tc config failed, err %s aq_err %s\n",
7687 i40e_stat_str(hw, ret),
7688 i40e_aq_str(hw, hw->aq.asq_last_status));
7691 /* update the local VSI info with updated queue map */
7692 i40e_vsi_update_queue_map(vsi, &ctxt);
7693 vsi->info.valid_sections = 0;
7695 /* Create channels for macvlans */
7696 INIT_LIST_HEAD(&vsi->macvlan_list);
7697 for (i = 0; i < macvlan_cnt; i++) {
7698 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
7703 INIT_LIST_HEAD(&ch->list);
7704 ch->num_queue_pairs = qcnt;
7705 if (!i40e_setup_channel(pf, vsi, ch)) {
7710 ch->parent_vsi = vsi;
7711 vsi->cnt_q_avail -= ch->num_queue_pairs;
7713 list_add_tail(&ch->list, &vsi->macvlan_list);
7719 dev_info(&pf->pdev->dev, "Failed to setup macvlans\n");
7720 i40e_free_macvlan_channels(vsi);
7726 * i40e_fwd_add - configure macvlans
7727 * @netdev: net device to configure
7728 * @vdev: macvlan netdevice
7730 static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev)
7732 struct i40e_netdev_priv *np = netdev_priv(netdev);
7733 u16 q_per_macvlan = 0, macvlan_cnt = 0, vectors;
7734 struct i40e_vsi *vsi = np->vsi;
7735 struct i40e_pf *pf = vsi->back;
7736 struct i40e_fwd_adapter *fwd;
7737 int avail_macvlan, ret;
7739 if ((pf->flags & I40E_FLAG_DCB_ENABLED)) {
7740 netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n");
7741 return ERR_PTR(-EINVAL);
7743 if ((pf->flags & I40E_FLAG_TC_MQPRIO)) {
7744 netdev_info(netdev, "Macvlans are not supported when HW TC offload is on\n");
7745 return ERR_PTR(-EINVAL);
7747 if (pf->num_lan_msix < I40E_MIN_MACVLAN_VECTORS) {
7748 netdev_info(netdev, "Not enough vectors available to support macvlans\n");
7749 return ERR_PTR(-EINVAL);
7752 /* The macvlan device has to be a single Q device so that the
7753 * tc_to_txq field can be reused to pick the tx queue.
7755 if (netif_is_multiqueue(vdev))
7756 return ERR_PTR(-ERANGE);
7758 if (!vsi->macvlan_cnt) {
7759 /* reserve bit 0 for the pf device */
7760 set_bit(0, vsi->fwd_bitmask);
7762 /* Try to reserve as many queues as possible for macvlans. First
7763 * reserve 3/4th of max vectors, then half, then quarter and
7764 * calculate Qs per macvlan as you go
7766 vectors = pf->num_lan_msix;
7767 if (vectors <= I40E_MAX_MACVLANS && vectors > 64) {
7768 /* allocate 4 Qs per macvlan and 32 Qs to the PF*/
7770 macvlan_cnt = (vectors - 32) / 4;
7771 } else if (vectors <= 64 && vectors > 32) {
7772 /* allocate 2 Qs per macvlan and 16 Qs to the PF*/
7774 macvlan_cnt = (vectors - 16) / 2;
7775 } else if (vectors <= 32 && vectors > 16) {
7776 /* allocate 1 Q per macvlan and 16 Qs to the PF*/
7778 macvlan_cnt = vectors - 16;
7779 } else if (vectors <= 16 && vectors > 8) {
7780 /* allocate 1 Q per macvlan and 8 Qs to the PF */
7782 macvlan_cnt = vectors - 8;
7784 /* allocate 1 Q per macvlan and 1 Q to the PF */
7786 macvlan_cnt = vectors - 1;
7789 if (macvlan_cnt == 0)
7790 return ERR_PTR(-EBUSY);
7792 /* Quiesce VSI queues */
7793 i40e_quiesce_vsi(vsi);
7795 /* sets up the macvlans but does not "enable" them */
7796 ret = i40e_setup_macvlans(vsi, macvlan_cnt, q_per_macvlan,
7799 return ERR_PTR(ret);
7802 i40e_unquiesce_vsi(vsi);
7804 avail_macvlan = find_first_zero_bit(vsi->fwd_bitmask,
7806 if (avail_macvlan >= I40E_MAX_MACVLANS)
7807 return ERR_PTR(-EBUSY);
7809 /* create the fwd struct */
7810 fwd = kzalloc(sizeof(*fwd), GFP_KERNEL);
7812 return ERR_PTR(-ENOMEM);
7814 set_bit(avail_macvlan, vsi->fwd_bitmask);
7815 fwd->bit_no = avail_macvlan;
7816 netdev_set_sb_channel(vdev, avail_macvlan);
7819 if (!netif_running(netdev))
7822 /* Set fwd ring up */
7823 ret = i40e_fwd_ring_up(vsi, vdev, fwd);
7825 /* unbind the queues and drop the subordinate channel config */
7826 netdev_unbind_sb_channel(netdev, vdev);
7827 netdev_set_sb_channel(vdev, 0);
7830 return ERR_PTR(-EINVAL);
7837 * i40e_del_all_macvlans - Delete all the mac filters on the channels
7838 * @vsi: the VSI we want to access
7840 static void i40e_del_all_macvlans(struct i40e_vsi *vsi)
7842 struct i40e_channel *ch, *ch_tmp;
7843 struct i40e_pf *pf = vsi->back;
7844 struct i40e_hw *hw = &pf->hw;
7845 int aq_err, ret = 0;
7847 if (list_empty(&vsi->macvlan_list))
7850 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7851 if (i40e_is_channel_macvlan(ch)) {
7852 ret = i40e_del_macvlan_filter(hw, ch->seid,
7853 i40e_channel_mac(ch),
7856 /* Reset queue contexts */
7857 i40e_reset_ch_rings(vsi, ch);
7858 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7859 netdev_unbind_sb_channel(vsi->netdev,
7861 netdev_set_sb_channel(ch->fwd->netdev, 0);
7870 * i40e_fwd_del - delete macvlan interfaces
7871 * @netdev: net device to configure
7872 * @vdev: macvlan netdevice
7874 static void i40e_fwd_del(struct net_device *netdev, void *vdev)
7876 struct i40e_netdev_priv *np = netdev_priv(netdev);
7877 struct i40e_fwd_adapter *fwd = vdev;
7878 struct i40e_channel *ch, *ch_tmp;
7879 struct i40e_vsi *vsi = np->vsi;
7880 struct i40e_pf *pf = vsi->back;
7881 struct i40e_hw *hw = &pf->hw;
7882 int aq_err, ret = 0;
7884 /* Find the channel associated with the macvlan and del mac filter */
7885 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7886 if (i40e_is_channel_macvlan(ch) &&
7887 ether_addr_equal(i40e_channel_mac(ch),
7888 fwd->netdev->dev_addr)) {
7889 ret = i40e_del_macvlan_filter(hw, ch->seid,
7890 i40e_channel_mac(ch),
7893 /* Reset queue contexts */
7894 i40e_reset_ch_rings(vsi, ch);
7895 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7896 netdev_unbind_sb_channel(netdev, fwd->netdev);
7897 netdev_set_sb_channel(fwd->netdev, 0);
7901 dev_info(&pf->pdev->dev,
7902 "Error deleting mac filter on macvlan err %s, aq_err %s\n",
7903 i40e_stat_str(hw, ret),
7904 i40e_aq_str(hw, aq_err));
7912 * i40e_setup_tc - configure multiple traffic classes
7913 * @netdev: net device to configure
7914 * @type_data: tc offload data
7916 static int i40e_setup_tc(struct net_device *netdev, void *type_data)
7918 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
7919 struct i40e_netdev_priv *np = netdev_priv(netdev);
7920 struct i40e_vsi *vsi = np->vsi;
7921 struct i40e_pf *pf = vsi->back;
7922 u8 enabled_tc = 0, num_tc, hw;
7923 bool need_reset = false;
7924 int old_queue_pairs;
7929 old_queue_pairs = vsi->num_queue_pairs;
7930 num_tc = mqprio_qopt->qopt.num_tc;
7931 hw = mqprio_qopt->qopt.hw;
7932 mode = mqprio_qopt->mode;
7934 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
7935 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
7939 /* Check if MFP enabled */
7940 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
7942 "Configuring TC not supported in MFP mode\n");
7946 case TC_MQPRIO_MODE_DCB:
7947 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
7949 /* Check if DCB enabled to continue */
7950 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
7952 "DCB is not enabled for adapter\n");
7956 /* Check whether tc count is within enabled limit */
7957 if (num_tc > i40e_pf_get_num_tc(pf)) {
7959 "TC count greater than enabled on link for adapter\n");
7963 case TC_MQPRIO_MODE_CHANNEL:
7964 if (pf->flags & I40E_FLAG_DCB_ENABLED) {
7966 "Full offload of TC Mqprio options is not supported when DCB is enabled\n");
7969 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
7971 ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt);
7974 memcpy(&vsi->mqprio_qopt, mqprio_qopt,
7975 sizeof(*mqprio_qopt));
7976 pf->flags |= I40E_FLAG_TC_MQPRIO;
7977 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
7984 /* Generate TC map for number of tc requested */
7985 for (i = 0; i < num_tc; i++)
7986 enabled_tc |= BIT(i);
7988 /* Requesting same TC configuration as already enabled */
7989 if (enabled_tc == vsi->tc_config.enabled_tc &&
7990 mode != TC_MQPRIO_MODE_CHANNEL)
7993 /* Quiesce VSI queues */
7994 i40e_quiesce_vsi(vsi);
7996 if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO))
7997 i40e_remove_queue_channels(vsi);
7999 /* Configure VSI for enabled TCs */
8000 ret = i40e_vsi_config_tc(vsi, enabled_tc);
8002 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
8006 } else if (enabled_tc &&
8007 (!is_power_of_2(vsi->tc_config.tc_info[0].qcount))) {
8009 "Failed to create channel. Override queues (%u) not power of 2\n",
8010 vsi->tc_config.tc_info[0].qcount);
8016 dev_info(&vsi->back->pdev->dev,
8017 "Setup channel (id:%u) utilizing num_queues %d\n",
8018 vsi->seid, vsi->tc_config.tc_info[0].qcount);
8020 if (pf->flags & I40E_FLAG_TC_MQPRIO) {
8021 if (vsi->mqprio_qopt.max_rate[0]) {
8022 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
8024 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
8025 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
8027 u64 credits = max_tx_rate;
8029 do_div(credits, I40E_BW_CREDIT_DIVISOR);
8030 dev_dbg(&vsi->back->pdev->dev,
8031 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
8040 ret = i40e_configure_queue_channels(vsi);
8042 vsi->num_queue_pairs = old_queue_pairs;
8044 "Failed configuring queue channels\n");
8051 /* Reset the configuration data to defaults, only TC0 is enabled */
8053 i40e_vsi_set_default_tc_config(vsi);
8058 i40e_unquiesce_vsi(vsi);
8063 * i40e_set_cld_element - sets cloud filter element data
8064 * @filter: cloud filter rule
8065 * @cld: ptr to cloud filter element data
8067 * This is helper function to copy data into cloud filter element
8070 i40e_set_cld_element(struct i40e_cloud_filter *filter,
8071 struct i40e_aqc_cloud_filters_element_data *cld)
8076 memset(cld, 0, sizeof(*cld));
8077 ether_addr_copy(cld->outer_mac, filter->dst_mac);
8078 ether_addr_copy(cld->inner_mac, filter->src_mac);
8080 if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6)
8083 if (filter->n_proto == ETH_P_IPV6) {
8084 #define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
8085 for (i = 0; i < ARRAY_SIZE(filter->dst_ipv6); i++) {
8086 ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
8088 *(__le32 *)&cld->ipaddr.raw_v6.data[i * 2] = cpu_to_le32(ipa);
8091 ipa = be32_to_cpu(filter->dst_ipv4);
8093 memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
8096 cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id));
8098 /* tenant_id is not supported by FW now, once the support is enabled
8099 * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id)
8101 if (filter->tenant_id)
8106 * i40e_add_del_cloud_filter - Add/del cloud filter
8107 * @vsi: pointer to VSI
8108 * @filter: cloud filter rule
8109 * @add: if true, add, if false, delete
8111 * Add or delete a cloud filter for a specific flow spec.
8112 * Returns 0 if the filter were successfully added.
8114 int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
8115 struct i40e_cloud_filter *filter, bool add)
8117 struct i40e_aqc_cloud_filters_element_data cld_filter;
8118 struct i40e_pf *pf = vsi->back;
8120 static const u16 flag_table[128] = {
8121 [I40E_CLOUD_FILTER_FLAGS_OMAC] =
8122 I40E_AQC_ADD_CLOUD_FILTER_OMAC,
8123 [I40E_CLOUD_FILTER_FLAGS_IMAC] =
8124 I40E_AQC_ADD_CLOUD_FILTER_IMAC,
8125 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] =
8126 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
8127 [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
8128 I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
8129 [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
8130 I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
8131 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
8132 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
8133 [I40E_CLOUD_FILTER_FLAGS_IIP] =
8134 I40E_AQC_ADD_CLOUD_FILTER_IIP,
8137 if (filter->flags >= ARRAY_SIZE(flag_table))
8138 return I40E_ERR_CONFIG;
8140 memset(&cld_filter, 0, sizeof(cld_filter));
8142 /* copy element needed to add cloud filter from filter */
8143 i40e_set_cld_element(filter, &cld_filter);
8145 if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE)
8146 cld_filter.flags = cpu_to_le16(filter->tunnel_type <<
8147 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
8149 if (filter->n_proto == ETH_P_IPV6)
8150 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
8151 I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
8153 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
8154 I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
8157 ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid,
8160 ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid,
8163 dev_dbg(&pf->pdev->dev,
8164 "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
8165 add ? "add" : "delete", filter->dst_port, ret,
8166 pf->hw.aq.asq_last_status);
8168 dev_info(&pf->pdev->dev,
8169 "%s cloud filter for VSI: %d\n",
8170 add ? "Added" : "Deleted", filter->seid);
8175 * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf
8176 * @vsi: pointer to VSI
8177 * @filter: cloud filter rule
8178 * @add: if true, add, if false, delete
8180 * Add or delete a cloud filter for a specific flow spec using big buffer.
8181 * Returns 0 if the filter were successfully added.
8183 int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
8184 struct i40e_cloud_filter *filter,
8187 struct i40e_aqc_cloud_filters_element_bb cld_filter;
8188 struct i40e_pf *pf = vsi->back;
8191 /* Both (src/dst) valid mac_addr are not supported */
8192 if ((is_valid_ether_addr(filter->dst_mac) &&
8193 is_valid_ether_addr(filter->src_mac)) ||
8194 (is_multicast_ether_addr(filter->dst_mac) &&
8195 is_multicast_ether_addr(filter->src_mac)))
8198 /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
8199 * ports are not supported via big buffer now.
8201 if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
8204 /* adding filter using src_port/src_ip is not supported at this stage */
8205 if (filter->src_port ||
8206 (filter->src_ipv4 && filter->n_proto != ETH_P_IPV6) ||
8207 !ipv6_addr_any(&filter->ip.v6.src_ip6))
8210 memset(&cld_filter, 0, sizeof(cld_filter));
8212 /* copy element needed to add cloud filter from filter */
8213 i40e_set_cld_element(filter, &cld_filter.element);
8215 if (is_valid_ether_addr(filter->dst_mac) ||
8216 is_valid_ether_addr(filter->src_mac) ||
8217 is_multicast_ether_addr(filter->dst_mac) ||
8218 is_multicast_ether_addr(filter->src_mac)) {
8219 /* MAC + IP : unsupported mode */
8220 if (filter->dst_ipv4)
8223 /* since we validated that L4 port must be valid before
8224 * we get here, start with respective "flags" value
8225 * and update if vlan is present or not
8227 cld_filter.element.flags =
8228 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT);
8230 if (filter->vlan_id) {
8231 cld_filter.element.flags =
8232 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
8235 } else if ((filter->dst_ipv4 && filter->n_proto != ETH_P_IPV6) ||
8236 !ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
8237 cld_filter.element.flags =
8238 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
8239 if (filter->n_proto == ETH_P_IPV6)
8240 cld_filter.element.flags |=
8241 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
8243 cld_filter.element.flags |=
8244 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
8246 dev_err(&pf->pdev->dev,
8247 "either mac or ip has to be valid for cloud filter\n");
8251 /* Now copy L4 port in Byte 6..7 in general fields */
8252 cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] =
8253 be16_to_cpu(filter->dst_port);
8256 /* Validate current device switch mode, change if necessary */
8257 ret = i40e_validate_and_set_switch_mode(vsi);
8259 dev_err(&pf->pdev->dev,
8260 "failed to set switch mode, ret %d\n",
8265 ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid,
8268 ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid,
8273 dev_dbg(&pf->pdev->dev,
8274 "Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
8275 add ? "add" : "delete", ret, pf->hw.aq.asq_last_status);
8277 dev_info(&pf->pdev->dev,
8278 "%s cloud filter for VSI: %d, L4 port: %d\n",
8279 add ? "add" : "delete", filter->seid,
8280 ntohs(filter->dst_port));
8285 * i40e_parse_cls_flower - Parse tc flower filters provided by kernel
8286 * @vsi: Pointer to VSI
8287 * @f: Pointer to struct flow_cls_offload
8288 * @filter: Pointer to cloud filter structure
8291 static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
8292 struct flow_cls_offload *f,
8293 struct i40e_cloud_filter *filter)
8295 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
8296 struct flow_dissector *dissector = rule->match.dissector;
8297 u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
8298 struct i40e_pf *pf = vsi->back;
8301 if (dissector->used_keys &
8302 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
8303 BIT(FLOW_DISSECTOR_KEY_BASIC) |
8304 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
8305 BIT(FLOW_DISSECTOR_KEY_VLAN) |
8306 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
8307 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
8308 BIT(FLOW_DISSECTOR_KEY_PORTS) |
8309 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
8310 dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n",
8311 dissector->used_keys);
8315 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
8316 struct flow_match_enc_keyid match;
8318 flow_rule_match_enc_keyid(rule, &match);
8319 if (match.mask->keyid != 0)
8320 field_flags |= I40E_CLOUD_FIELD_TEN_ID;
8322 filter->tenant_id = be32_to_cpu(match.key->keyid);
8325 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
8326 struct flow_match_basic match;
8328 flow_rule_match_basic(rule, &match);
8329 n_proto_key = ntohs(match.key->n_proto);
8330 n_proto_mask = ntohs(match.mask->n_proto);
8332 if (n_proto_key == ETH_P_ALL) {
8336 filter->n_proto = n_proto_key & n_proto_mask;
8337 filter->ip_proto = match.key->ip_proto;
8340 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
8341 struct flow_match_eth_addrs match;
8343 flow_rule_match_eth_addrs(rule, &match);
8345 /* use is_broadcast and is_zero to check for all 0xf or 0 */
8346 if (!is_zero_ether_addr(match.mask->dst)) {
8347 if (is_broadcast_ether_addr(match.mask->dst)) {
8348 field_flags |= I40E_CLOUD_FIELD_OMAC;
8350 dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
8352 return I40E_ERR_CONFIG;
8356 if (!is_zero_ether_addr(match.mask->src)) {
8357 if (is_broadcast_ether_addr(match.mask->src)) {
8358 field_flags |= I40E_CLOUD_FIELD_IMAC;
8360 dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
8362 return I40E_ERR_CONFIG;
8365 ether_addr_copy(filter->dst_mac, match.key->dst);
8366 ether_addr_copy(filter->src_mac, match.key->src);
8369 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
8370 struct flow_match_vlan match;
8372 flow_rule_match_vlan(rule, &match);
8373 if (match.mask->vlan_id) {
8374 if (match.mask->vlan_id == VLAN_VID_MASK) {
8375 field_flags |= I40E_CLOUD_FIELD_IVLAN;
8378 dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
8379 match.mask->vlan_id);
8380 return I40E_ERR_CONFIG;
8384 filter->vlan_id = cpu_to_be16(match.key->vlan_id);
8387 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
8388 struct flow_match_control match;
8390 flow_rule_match_control(rule, &match);
8391 addr_type = match.key->addr_type;
8394 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
8395 struct flow_match_ipv4_addrs match;
8397 flow_rule_match_ipv4_addrs(rule, &match);
8398 if (match.mask->dst) {
8399 if (match.mask->dst == cpu_to_be32(0xffffffff)) {
8400 field_flags |= I40E_CLOUD_FIELD_IIP;
8402 dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
8404 return I40E_ERR_CONFIG;
8408 if (match.mask->src) {
8409 if (match.mask->src == cpu_to_be32(0xffffffff)) {
8410 field_flags |= I40E_CLOUD_FIELD_IIP;
8412 dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
8414 return I40E_ERR_CONFIG;
8418 if (field_flags & I40E_CLOUD_FIELD_TEN_ID) {
8419 dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
8420 return I40E_ERR_CONFIG;
8422 filter->dst_ipv4 = match.key->dst;
8423 filter->src_ipv4 = match.key->src;
8426 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
8427 struct flow_match_ipv6_addrs match;
8429 flow_rule_match_ipv6_addrs(rule, &match);
8431 /* src and dest IPV6 address should not be LOOPBACK
8432 * (0:0:0:0:0:0:0:1), which can be represented as ::1
8434 if (ipv6_addr_loopback(&match.key->dst) ||
8435 ipv6_addr_loopback(&match.key->src)) {
8436 dev_err(&pf->pdev->dev,
8437 "Bad ipv6, addr is LOOPBACK\n");
8438 return I40E_ERR_CONFIG;
8440 if (!ipv6_addr_any(&match.mask->dst) ||
8441 !ipv6_addr_any(&match.mask->src))
8442 field_flags |= I40E_CLOUD_FIELD_IIP;
8444 memcpy(&filter->src_ipv6, &match.key->src.s6_addr32,
8445 sizeof(filter->src_ipv6));
8446 memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32,
8447 sizeof(filter->dst_ipv6));
8450 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
8451 struct flow_match_ports match;
8453 flow_rule_match_ports(rule, &match);
8454 if (match.mask->src) {
8455 if (match.mask->src == cpu_to_be16(0xffff)) {
8456 field_flags |= I40E_CLOUD_FIELD_IIP;
8458 dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
8459 be16_to_cpu(match.mask->src));
8460 return I40E_ERR_CONFIG;
8464 if (match.mask->dst) {
8465 if (match.mask->dst == cpu_to_be16(0xffff)) {
8466 field_flags |= I40E_CLOUD_FIELD_IIP;
8468 dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
8469 be16_to_cpu(match.mask->dst));
8470 return I40E_ERR_CONFIG;
8474 filter->dst_port = match.key->dst;
8475 filter->src_port = match.key->src;
8477 switch (filter->ip_proto) {
8482 dev_err(&pf->pdev->dev,
8483 "Only UDP and TCP transport are supported\n");
8487 filter->flags = field_flags;
8492 * i40e_handle_tclass: Forward to a traffic class on the device
8493 * @vsi: Pointer to VSI
8494 * @tc: traffic class index on the device
8495 * @filter: Pointer to cloud filter structure
8498 static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc,
8499 struct i40e_cloud_filter *filter)
8501 struct i40e_channel *ch, *ch_tmp;
8503 /* direct to a traffic class on the same device */
8505 filter->seid = vsi->seid;
8507 } else if (vsi->tc_config.enabled_tc & BIT(tc)) {
8508 if (!filter->dst_port) {
8509 dev_err(&vsi->back->pdev->dev,
8510 "Specify destination port to direct to traffic class that is not default\n");
8513 if (list_empty(&vsi->ch_list))
8515 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list,
8517 if (ch->seid == vsi->tc_seid_map[tc])
8518 filter->seid = ch->seid;
8522 dev_err(&vsi->back->pdev->dev, "TC is not enabled\n");
8527 * i40e_configure_clsflower - Configure tc flower filters
8528 * @vsi: Pointer to VSI
8529 * @cls_flower: Pointer to struct flow_cls_offload
8532 static int i40e_configure_clsflower(struct i40e_vsi *vsi,
8533 struct flow_cls_offload *cls_flower)
8535 int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
8536 struct i40e_cloud_filter *filter = NULL;
8537 struct i40e_pf *pf = vsi->back;
8541 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
8546 dev_err(&pf->pdev->dev, "Unable to add filter because of invalid destination");
8550 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
8551 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
8554 if (pf->fdir_pf_active_filters ||
8555 (!hlist_empty(&pf->fdir_filter_list))) {
8556 dev_err(&vsi->back->pdev->dev,
8557 "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
8561 if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) {
8562 dev_err(&vsi->back->pdev->dev,
8563 "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
8564 vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8565 vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8568 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
8572 filter->cookie = cls_flower->cookie;
8574 err = i40e_parse_cls_flower(vsi, cls_flower, filter);
8578 err = i40e_handle_tclass(vsi, tc, filter);
8582 /* Add cloud filter */
8583 if (filter->dst_port)
8584 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true);
8586 err = i40e_add_del_cloud_filter(vsi, filter, true);
8589 dev_err(&pf->pdev->dev, "Failed to add cloud filter, err %d\n",
8594 /* add filter to the ordered list */
8595 INIT_HLIST_NODE(&filter->cloud_node);
8597 hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list);
8599 pf->num_cloud_filters++;
8608 * i40e_find_cloud_filter - Find the could filter in the list
8609 * @vsi: Pointer to VSI
8610 * @cookie: filter specific cookie
8613 static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi,
8614 unsigned long *cookie)
8616 struct i40e_cloud_filter *filter = NULL;
8617 struct hlist_node *node2;
8619 hlist_for_each_entry_safe(filter, node2,
8620 &vsi->back->cloud_filter_list, cloud_node)
8621 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
8627 * i40e_delete_clsflower - Remove tc flower filters
8628 * @vsi: Pointer to VSI
8629 * @cls_flower: Pointer to struct flow_cls_offload
8632 static int i40e_delete_clsflower(struct i40e_vsi *vsi,
8633 struct flow_cls_offload *cls_flower)
8635 struct i40e_cloud_filter *filter = NULL;
8636 struct i40e_pf *pf = vsi->back;
8639 filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie);
8644 hash_del(&filter->cloud_node);
8646 if (filter->dst_port)
8647 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false);
8649 err = i40e_add_del_cloud_filter(vsi, filter, false);
8653 dev_err(&pf->pdev->dev,
8654 "Failed to delete cloud filter, err %s\n",
8655 i40e_stat_str(&pf->hw, err));
8656 return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
8659 pf->num_cloud_filters--;
8660 if (!pf->num_cloud_filters)
8661 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
8662 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
8663 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8664 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8665 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
8671 * i40e_setup_tc_cls_flower - flower classifier offloads
8672 * @np: net device to configure
8673 * @cls_flower: offload data
8675 static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
8676 struct flow_cls_offload *cls_flower)
8678 struct i40e_vsi *vsi = np->vsi;
8680 switch (cls_flower->command) {
8681 case FLOW_CLS_REPLACE:
8682 return i40e_configure_clsflower(vsi, cls_flower);
8683 case FLOW_CLS_DESTROY:
8684 return i40e_delete_clsflower(vsi, cls_flower);
8685 case FLOW_CLS_STATS:
8692 static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
8695 struct i40e_netdev_priv *np = cb_priv;
8697 if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data))
8701 case TC_SETUP_CLSFLOWER:
8702 return i40e_setup_tc_cls_flower(np, type_data);
8709 static LIST_HEAD(i40e_block_cb_list);
8711 static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
8714 struct i40e_netdev_priv *np = netdev_priv(netdev);
8717 case TC_SETUP_QDISC_MQPRIO:
8718 return i40e_setup_tc(netdev, type_data);
8719 case TC_SETUP_BLOCK:
8720 return flow_block_cb_setup_simple(type_data,
8721 &i40e_block_cb_list,
8722 i40e_setup_tc_block_cb,
8730 * i40e_open - Called when a network interface is made active
8731 * @netdev: network interface device structure
8733 * The open entry point is called when a network interface is made
8734 * active by the system (IFF_UP). At this point all resources needed
8735 * for transmit and receive operations are allocated, the interrupt
8736 * handler is registered with the OS, the netdev watchdog subtask is
8737 * enabled, and the stack is notified that the interface is ready.
8739 * Returns 0 on success, negative value on failure
8741 int i40e_open(struct net_device *netdev)
8743 struct i40e_netdev_priv *np = netdev_priv(netdev);
8744 struct i40e_vsi *vsi = np->vsi;
8745 struct i40e_pf *pf = vsi->back;
8748 /* disallow open during test or if eeprom is broken */
8749 if (test_bit(__I40E_TESTING, pf->state) ||
8750 test_bit(__I40E_BAD_EEPROM, pf->state))
8753 netif_carrier_off(netdev);
8755 if (i40e_force_link_state(pf, true))
8758 err = i40e_vsi_open(vsi);
8762 /* configure global TSO hardware offload settings */
8763 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
8764 TCP_FLAG_FIN) >> 16);
8765 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
8767 TCP_FLAG_CWR) >> 16);
8768 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
8769 udp_tunnel_get_rx_info(netdev);
8775 * i40e_netif_set_realnum_tx_rx_queues - Update number of tx/rx queues
8776 * @vsi: vsi structure
8778 * This updates netdev's number of tx/rx queues
8780 * Returns status of setting tx/rx queues
8782 static int i40e_netif_set_realnum_tx_rx_queues(struct i40e_vsi *vsi)
8786 ret = netif_set_real_num_rx_queues(vsi->netdev,
8787 vsi->num_queue_pairs);
8791 return netif_set_real_num_tx_queues(vsi->netdev,
8792 vsi->num_queue_pairs);
8797 * @vsi: the VSI to open
8799 * Finish initialization of the VSI.
8801 * Returns 0 on success, negative value on failure
8803 * Note: expects to be called while under rtnl_lock()
8805 int i40e_vsi_open(struct i40e_vsi *vsi)
8807 struct i40e_pf *pf = vsi->back;
8808 char int_name[I40E_INT_NAME_STR_LEN];
8811 /* allocate descriptors */
8812 err = i40e_vsi_setup_tx_resources(vsi);
8815 err = i40e_vsi_setup_rx_resources(vsi);
8819 err = i40e_vsi_configure(vsi);
8824 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
8825 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
8826 err = i40e_vsi_request_irq(vsi, int_name);
8830 /* Notify the stack of the actual queue counts. */
8831 err = i40e_netif_set_realnum_tx_rx_queues(vsi);
8833 goto err_set_queues;
8835 } else if (vsi->type == I40E_VSI_FDIR) {
8836 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
8837 dev_driver_string(&pf->pdev->dev),
8838 dev_name(&pf->pdev->dev));
8839 err = i40e_vsi_request_irq(vsi, int_name);
8848 err = i40e_up_complete(vsi);
8850 goto err_up_complete;
8857 i40e_vsi_free_irq(vsi);
8859 i40e_vsi_free_rx_resources(vsi);
8861 i40e_vsi_free_tx_resources(vsi);
8862 if (vsi == pf->vsi[pf->lan_vsi])
8863 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
8869 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
8870 * @pf: Pointer to PF
8872 * This function destroys the hlist where all the Flow Director
8873 * filters were saved.
8875 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
8877 struct i40e_fdir_filter *filter;
8878 struct i40e_flex_pit *pit_entry, *tmp;
8879 struct hlist_node *node2;
8881 hlist_for_each_entry_safe(filter, node2,
8882 &pf->fdir_filter_list, fdir_node) {
8883 hlist_del(&filter->fdir_node);
8887 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
8888 list_del(&pit_entry->list);
8891 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
8893 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
8894 list_del(&pit_entry->list);
8897 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
8899 pf->fdir_pf_active_filters = 0;
8900 i40e_reset_fdir_filter_cnt(pf);
8902 /* Reprogram the default input set for TCP/IPv4 */
8903 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
8904 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8905 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8907 /* Reprogram the default input set for TCP/IPv6 */
8908 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
8909 I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
8910 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8912 /* Reprogram the default input set for UDP/IPv4 */
8913 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
8914 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8915 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8917 /* Reprogram the default input set for UDP/IPv6 */
8918 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
8919 I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
8920 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8922 /* Reprogram the default input set for SCTP/IPv4 */
8923 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
8924 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8925 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8927 /* Reprogram the default input set for SCTP/IPv6 */
8928 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
8929 I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
8930 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8932 /* Reprogram the default input set for Other/IPv4 */
8933 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
8934 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8936 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
8937 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8939 /* Reprogram the default input set for Other/IPv6 */
8940 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
8941 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8943 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV6,
8944 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8948 * i40e_cloud_filter_exit - Cleans up the cloud filters
8949 * @pf: Pointer to PF
8951 * This function destroys the hlist where all the cloud filters
8954 static void i40e_cloud_filter_exit(struct i40e_pf *pf)
8956 struct i40e_cloud_filter *cfilter;
8957 struct hlist_node *node;
8959 hlist_for_each_entry_safe(cfilter, node,
8960 &pf->cloud_filter_list, cloud_node) {
8961 hlist_del(&cfilter->cloud_node);
8964 pf->num_cloud_filters = 0;
8966 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
8967 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
8968 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8969 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8970 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
8975 * i40e_close - Disables a network interface
8976 * @netdev: network interface device structure
8978 * The close entry point is called when an interface is de-activated
8979 * by the OS. The hardware is still under the driver's control, but
8980 * this netdev interface is disabled.
8982 * Returns 0, this is not allowed to fail
8984 int i40e_close(struct net_device *netdev)
8986 struct i40e_netdev_priv *np = netdev_priv(netdev);
8987 struct i40e_vsi *vsi = np->vsi;
8989 i40e_vsi_close(vsi);
8995 * i40e_do_reset - Start a PF or Core Reset sequence
8996 * @pf: board private structure
8997 * @reset_flags: which reset is requested
8998 * @lock_acquired: indicates whether or not the lock has been acquired
8999 * before this function was called.
9001 * The essential difference in resets is that the PF Reset
9002 * doesn't clear the packet buffers, doesn't reset the PE
9003 * firmware, and doesn't bother the other PFs on the chip.
9005 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
9009 /* do the biggest reset indicated */
9010 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
9012 /* Request a Global Reset
9014 * This will start the chip's countdown to the actual full
9015 * chip reset event, and a warning interrupt to be sent
9016 * to all PFs, including the requestor. Our handler
9017 * for the warning interrupt will deal with the shutdown
9018 * and recovery of the switch setup.
9020 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
9021 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
9022 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
9023 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
9025 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
9027 /* Request a Core Reset
9029 * Same as Global Reset, except does *not* include the MAC/PHY
9031 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
9032 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
9033 val |= I40E_GLGEN_RTRIG_CORER_MASK;
9034 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
9035 i40e_flush(&pf->hw);
9037 } else if (reset_flags & I40E_PF_RESET_FLAG) {
9039 /* Request a PF Reset
9041 * Resets only the PF-specific registers
9043 * This goes directly to the tear-down and rebuild of
9044 * the switch, since we need to do all the recovery as
9045 * for the Core Reset.
9047 dev_dbg(&pf->pdev->dev, "PFR requested\n");
9048 i40e_handle_reset_warning(pf, lock_acquired);
9050 } else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
9051 /* Request a PF Reset
9053 * Resets PF and reinitializes PFs VSI.
9055 i40e_prep_for_reset(pf);
9056 i40e_reset_and_rebuild(pf, true, lock_acquired);
9057 dev_info(&pf->pdev->dev,
9058 pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
9059 "FW LLDP is disabled\n" :
9060 "FW LLDP is enabled\n");
9062 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
9065 /* Find the VSI(s) that requested a re-init */
9066 dev_info(&pf->pdev->dev,
9067 "VSI reinit requested\n");
9068 for (v = 0; v < pf->num_alloc_vsi; v++) {
9069 struct i40e_vsi *vsi = pf->vsi[v];
9072 test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
9074 i40e_vsi_reinit_locked(pf->vsi[v]);
9076 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
9079 /* Find the VSI(s) that needs to be brought down */
9080 dev_info(&pf->pdev->dev, "VSI down requested\n");
9081 for (v = 0; v < pf->num_alloc_vsi; v++) {
9082 struct i40e_vsi *vsi = pf->vsi[v];
9085 test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
9087 set_bit(__I40E_VSI_DOWN, vsi->state);
9092 dev_info(&pf->pdev->dev,
9093 "bad reset request 0x%08x\n", reset_flags);
9097 #ifdef CONFIG_I40E_DCB
9099 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
9100 * @pf: board private structure
9101 * @old_cfg: current DCB config
9102 * @new_cfg: new DCB config
9104 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
9105 struct i40e_dcbx_config *old_cfg,
9106 struct i40e_dcbx_config *new_cfg)
9108 bool need_reconfig = false;
9110 /* Check if ETS configuration has changed */
9111 if (memcmp(&new_cfg->etscfg,
9113 sizeof(new_cfg->etscfg))) {
9114 /* If Priority Table has changed reconfig is needed */
9115 if (memcmp(&new_cfg->etscfg.prioritytable,
9116 &old_cfg->etscfg.prioritytable,
9117 sizeof(new_cfg->etscfg.prioritytable))) {
9118 need_reconfig = true;
9119 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
9122 if (memcmp(&new_cfg->etscfg.tcbwtable,
9123 &old_cfg->etscfg.tcbwtable,
9124 sizeof(new_cfg->etscfg.tcbwtable)))
9125 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
9127 if (memcmp(&new_cfg->etscfg.tsatable,
9128 &old_cfg->etscfg.tsatable,
9129 sizeof(new_cfg->etscfg.tsatable)))
9130 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
9133 /* Check if PFC configuration has changed */
9134 if (memcmp(&new_cfg->pfc,
9136 sizeof(new_cfg->pfc))) {
9137 need_reconfig = true;
9138 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
9141 /* Check if APP Table has changed */
9142 if (memcmp(&new_cfg->app,
9144 sizeof(new_cfg->app))) {
9145 need_reconfig = true;
9146 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
9149 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
9150 return need_reconfig;
9154 * i40e_handle_lldp_event - Handle LLDP Change MIB event
9155 * @pf: board private structure
9156 * @e: event info posted on ARQ
9158 static int i40e_handle_lldp_event(struct i40e_pf *pf,
9159 struct i40e_arq_event_info *e)
9161 struct i40e_aqc_lldp_get_mib *mib =
9162 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
9163 struct i40e_hw *hw = &pf->hw;
9164 struct i40e_dcbx_config tmp_dcbx_cfg;
9165 bool need_reconfig = false;
9169 /* X710-T*L 2.5G and 5G speeds don't support DCB */
9170 if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
9171 (hw->phy.link_info.link_speed &
9172 ~(I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB)) &&
9173 !(pf->flags & I40E_FLAG_DCB_CAPABLE))
9174 /* let firmware decide if the DCB should be disabled */
9175 pf->flags |= I40E_FLAG_DCB_CAPABLE;
9177 /* Not DCB capable or capability disabled */
9178 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
9181 /* Ignore if event is not for Nearest Bridge */
9182 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
9183 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
9184 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
9185 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
9188 /* Check MIB Type and return if event for Remote MIB update */
9189 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
9190 dev_dbg(&pf->pdev->dev,
9191 "LLDP event mib type %s\n", type ? "remote" : "local");
9192 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
9193 /* Update the remote cached instance and return */
9194 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
9195 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
9196 &hw->remote_dcbx_config);
9200 /* Store the old configuration */
9201 tmp_dcbx_cfg = hw->local_dcbx_config;
9203 /* Reset the old DCBx configuration data */
9204 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
9205 /* Get updated DCBX data from firmware */
9206 ret = i40e_get_dcb_config(&pf->hw);
9208 /* X710-T*L 2.5G and 5G speeds don't support DCB */
9209 if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
9210 (hw->phy.link_info.link_speed &
9211 (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
9212 dev_warn(&pf->pdev->dev,
9213 "DCB is not supported for X710-T*L 2.5/5G speeds\n");
9214 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
9216 dev_info(&pf->pdev->dev,
9217 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
9218 i40e_stat_str(&pf->hw, ret),
9219 i40e_aq_str(&pf->hw,
9220 pf->hw.aq.asq_last_status));
9225 /* No change detected in DCBX configs */
9226 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
9227 sizeof(tmp_dcbx_cfg))) {
9228 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
9232 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
9233 &hw->local_dcbx_config);
9235 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
9240 /* Enable DCB tagging only when more than one TC */
9241 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
9242 pf->flags |= I40E_FLAG_DCB_ENABLED;
9244 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
9246 set_bit(__I40E_PORT_SUSPENDED, pf->state);
9247 /* Reconfiguration needed quiesce all VSIs */
9248 i40e_pf_quiesce_all_vsi(pf);
9250 /* Changes in configuration update VEB/VSI */
9251 i40e_dcb_reconfigure(pf);
9253 ret = i40e_resume_port_tx(pf);
9255 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
9256 /* In case of error no point in resuming VSIs */
9260 /* Wait for the PF's queues to be disabled */
9261 ret = i40e_pf_wait_queues_disabled(pf);
9263 /* Schedule PF reset to recover */
9264 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
9265 i40e_service_event_schedule(pf);
9267 i40e_pf_unquiesce_all_vsi(pf);
9268 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
9269 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
9275 #endif /* CONFIG_I40E_DCB */
9278 * i40e_do_reset_safe - Protected reset path for userland calls.
9279 * @pf: board private structure
9280 * @reset_flags: which reset is requested
9283 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
9286 i40e_do_reset(pf, reset_flags, true);
9291 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
9292 * @pf: board private structure
9293 * @e: event info posted on ARQ
9295 * Handler for LAN Queue Overflow Event generated by the firmware for PF
9298 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
9299 struct i40e_arq_event_info *e)
9301 struct i40e_aqc_lan_overflow *data =
9302 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
9303 u32 queue = le32_to_cpu(data->prtdcb_rupto);
9304 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
9305 struct i40e_hw *hw = &pf->hw;
9309 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
9312 /* Queue belongs to VF, find the VF and issue VF reset */
9313 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
9314 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
9315 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
9316 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
9317 vf_id -= hw->func_caps.vf_base_id;
9318 vf = &pf->vf[vf_id];
9319 i40e_vc_notify_vf_reset(vf);
9320 /* Allow VF to process pending reset notification */
9322 i40e_reset_vf(vf, false);
9327 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
9328 * @pf: board private structure
9330 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
9334 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
9335 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
9340 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
9341 * @pf: board private structure
9343 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
9347 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
9348 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
9349 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
9350 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
9355 * i40e_get_global_fd_count - Get total FD filters programmed on device
9356 * @pf: board private structure
9358 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
9362 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
9363 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
9364 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
9365 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
9370 * i40e_reenable_fdir_sb - Restore FDir SB capability
9371 * @pf: board private structure
9373 static void i40e_reenable_fdir_sb(struct i40e_pf *pf)
9375 if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
9376 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
9377 (I40E_DEBUG_FD & pf->hw.debug_mask))
9378 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
9382 * i40e_reenable_fdir_atr - Restore FDir ATR capability
9383 * @pf: board private structure
9385 static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
9387 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) {
9388 /* ATR uses the same filtering logic as SB rules. It only
9389 * functions properly if the input set mask is at the default
9390 * settings. It is safe to restore the default input set
9391 * because there are no active TCPv4 filter rules.
9393 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
9394 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
9395 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9397 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
9398 (I40E_DEBUG_FD & pf->hw.debug_mask))
9399 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
9404 * i40e_delete_invalid_filter - Delete an invalid FDIR filter
9405 * @pf: board private structure
9406 * @filter: FDir filter to remove
9408 static void i40e_delete_invalid_filter(struct i40e_pf *pf,
9409 struct i40e_fdir_filter *filter)
9411 /* Update counters */
9412 pf->fdir_pf_active_filters--;
9415 switch (filter->flow_type) {
9417 pf->fd_tcp4_filter_cnt--;
9420 pf->fd_udp4_filter_cnt--;
9423 pf->fd_sctp4_filter_cnt--;
9426 pf->fd_tcp6_filter_cnt--;
9429 pf->fd_udp6_filter_cnt--;
9432 pf->fd_udp6_filter_cnt--;
9435 switch (filter->ipl4_proto) {
9437 pf->fd_tcp4_filter_cnt--;
9440 pf->fd_udp4_filter_cnt--;
9443 pf->fd_sctp4_filter_cnt--;
9446 pf->fd_ip4_filter_cnt--;
9450 case IPV6_USER_FLOW:
9451 switch (filter->ipl4_proto) {
9453 pf->fd_tcp6_filter_cnt--;
9456 pf->fd_udp6_filter_cnt--;
9459 pf->fd_sctp6_filter_cnt--;
9462 pf->fd_ip6_filter_cnt--;
9468 /* Remove the filter from the list and free memory */
9469 hlist_del(&filter->fdir_node);
9474 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
9475 * @pf: board private structure
9477 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
9479 struct i40e_fdir_filter *filter;
9480 u32 fcnt_prog, fcnt_avail;
9481 struct hlist_node *node;
9483 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
9486 /* Check if we have enough room to re-enable FDir SB capability. */
9487 fcnt_prog = i40e_get_global_fd_count(pf);
9488 fcnt_avail = pf->fdir_pf_filter_count;
9489 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
9490 (pf->fd_add_err == 0) ||
9491 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt))
9492 i40e_reenable_fdir_sb(pf);
9494 /* We should wait for even more space before re-enabling ATR.
9495 * Additionally, we cannot enable ATR as long as we still have TCP SB
9498 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
9499 pf->fd_tcp4_filter_cnt == 0 && pf->fd_tcp6_filter_cnt == 0)
9500 i40e_reenable_fdir_atr(pf);
9502 /* if hw had a problem adding a filter, delete it */
9503 if (pf->fd_inv > 0) {
9504 hlist_for_each_entry_safe(filter, node,
9505 &pf->fdir_filter_list, fdir_node)
9506 if (filter->fd_id == pf->fd_inv)
9507 i40e_delete_invalid_filter(pf, filter);
9511 #define I40E_MIN_FD_FLUSH_INTERVAL 10
9512 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
9514 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
9515 * @pf: board private structure
9517 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
9519 unsigned long min_flush_time;
9520 int flush_wait_retry = 50;
9521 bool disable_atr = false;
9525 if (!time_after(jiffies, pf->fd_flush_timestamp +
9526 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
9529 /* If the flush is happening too quick and we have mostly SB rules we
9530 * should not re-enable ATR for some time.
9532 min_flush_time = pf->fd_flush_timestamp +
9533 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
9534 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
9536 if (!(time_after(jiffies, min_flush_time)) &&
9537 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
9538 if (I40E_DEBUG_FD & pf->hw.debug_mask)
9539 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
9543 pf->fd_flush_timestamp = jiffies;
9544 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
9545 /* flush all filters */
9546 wr32(&pf->hw, I40E_PFQF_CTL_1,
9547 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
9548 i40e_flush(&pf->hw);
9552 /* Check FD flush status every 5-6msec */
9553 usleep_range(5000, 6000);
9554 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
9555 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
9557 } while (flush_wait_retry--);
9558 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
9559 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
9561 /* replay sideband filters */
9562 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
9563 if (!disable_atr && !pf->fd_tcp4_filter_cnt)
9564 clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
9565 clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
9566 if (I40E_DEBUG_FD & pf->hw.debug_mask)
9567 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
9572 * i40e_get_current_atr_cnt - Get the count of total FD ATR filters programmed
9573 * @pf: board private structure
9575 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
9577 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
9581 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
9582 * @pf: board private structure
9584 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
9587 /* if interface is down do nothing */
9588 if (test_bit(__I40E_DOWN, pf->state))
9591 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
9592 i40e_fdir_flush_and_replay(pf);
9594 i40e_fdir_check_and_reenable(pf);
9599 * i40e_vsi_link_event - notify VSI of a link event
9600 * @vsi: vsi to be notified
9601 * @link_up: link up or down
9603 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
9605 if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
9608 switch (vsi->type) {
9610 if (!vsi->netdev || !vsi->netdev_registered)
9614 netif_carrier_on(vsi->netdev);
9615 netif_tx_wake_all_queues(vsi->netdev);
9617 netif_carrier_off(vsi->netdev);
9618 netif_tx_stop_all_queues(vsi->netdev);
9622 case I40E_VSI_SRIOV:
9623 case I40E_VSI_VMDQ2:
9625 case I40E_VSI_IWARP:
9626 case I40E_VSI_MIRROR:
9628 /* there is no notification for other VSIs */
9634 * i40e_veb_link_event - notify elements on the veb of a link event
9635 * @veb: veb to be notified
9636 * @link_up: link up or down
9638 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
9643 if (!veb || !veb->pf)
9647 /* depth first... */
9648 for (i = 0; i < I40E_MAX_VEB; i++)
9649 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
9650 i40e_veb_link_event(pf->veb[i], link_up);
9652 /* ... now the local VSIs */
9653 for (i = 0; i < pf->num_alloc_vsi; i++)
9654 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
9655 i40e_vsi_link_event(pf->vsi[i], link_up);
9659 * i40e_link_event - Update netif_carrier status
9660 * @pf: board private structure
9662 static void i40e_link_event(struct i40e_pf *pf)
9664 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9665 u8 new_link_speed, old_link_speed;
9667 bool new_link, old_link;
9668 #ifdef CONFIG_I40E_DCB
9670 #endif /* CONFIG_I40E_DCB */
9672 /* set this to force the get_link_status call to refresh state */
9673 pf->hw.phy.get_link_info = true;
9674 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
9675 status = i40e_get_link_status(&pf->hw, &new_link);
9677 /* On success, disable temp link polling */
9678 if (status == I40E_SUCCESS) {
9679 clear_bit(__I40E_TEMP_LINK_POLLING, pf->state);
9681 /* Enable link polling temporarily until i40e_get_link_status
9682 * returns I40E_SUCCESS
9684 set_bit(__I40E_TEMP_LINK_POLLING, pf->state);
9685 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
9690 old_link_speed = pf->hw.phy.link_info_old.link_speed;
9691 new_link_speed = pf->hw.phy.link_info.link_speed;
9693 if (new_link == old_link &&
9694 new_link_speed == old_link_speed &&
9695 (test_bit(__I40E_VSI_DOWN, vsi->state) ||
9696 new_link == netif_carrier_ok(vsi->netdev)))
9699 i40e_print_link_message(vsi, new_link);
9701 /* Notify the base of the switch tree connected to
9702 * the link. Floating VEBs are not notified.
9704 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
9705 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
9707 i40e_vsi_link_event(vsi, new_link);
9710 i40e_vc_notify_link_state(pf);
9712 if (pf->flags & I40E_FLAG_PTP)
9713 i40e_ptp_set_increment(pf);
9714 #ifdef CONFIG_I40E_DCB
9715 if (new_link == old_link)
9717 /* Not SW DCB so firmware will take care of default settings */
9718 if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)
9721 /* We cover here only link down, as after link up in case of SW DCB
9722 * SW LLDP agent will take care of setting it up
9725 dev_dbg(&pf->pdev->dev, "Reconfig DCB to single TC as result of Link Down\n");
9726 memset(&pf->tmp_cfg, 0, sizeof(pf->tmp_cfg));
9727 err = i40e_dcb_sw_default_config(pf);
9729 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
9730 I40E_FLAG_DCB_ENABLED);
9732 pf->dcbx_cap = DCB_CAP_DCBX_HOST |
9733 DCB_CAP_DCBX_VER_IEEE;
9734 pf->flags |= I40E_FLAG_DCB_CAPABLE;
9735 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
9738 #endif /* CONFIG_I40E_DCB */
9742 * i40e_watchdog_subtask - periodic checks not using event driven response
9743 * @pf: board private structure
9745 static void i40e_watchdog_subtask(struct i40e_pf *pf)
9749 /* if interface is down do nothing */
9750 if (test_bit(__I40E_DOWN, pf->state) ||
9751 test_bit(__I40E_CONFIG_BUSY, pf->state))
9754 /* make sure we don't do these things too often */
9755 if (time_before(jiffies, (pf->service_timer_previous +
9756 pf->service_timer_period)))
9758 pf->service_timer_previous = jiffies;
9760 if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
9761 test_bit(__I40E_TEMP_LINK_POLLING, pf->state))
9762 i40e_link_event(pf);
9764 /* Update the stats for active netdevs so the network stack
9765 * can look at updated numbers whenever it cares to
9767 for (i = 0; i < pf->num_alloc_vsi; i++)
9768 if (pf->vsi[i] && pf->vsi[i]->netdev)
9769 i40e_update_stats(pf->vsi[i]);
9771 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
9772 /* Update the stats for the active switching components */
9773 for (i = 0; i < I40E_MAX_VEB; i++)
9775 i40e_update_veb_stats(pf->veb[i]);
9778 i40e_ptp_rx_hang(pf);
9779 i40e_ptp_tx_hang(pf);
9783 * i40e_reset_subtask - Set up for resetting the device and driver
9784 * @pf: board private structure
9786 static void i40e_reset_subtask(struct i40e_pf *pf)
9788 u32 reset_flags = 0;
9790 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
9791 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
9792 clear_bit(__I40E_REINIT_REQUESTED, pf->state);
9794 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
9795 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
9796 clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
9798 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
9799 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
9800 clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
9802 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
9803 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
9804 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
9806 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
9807 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
9808 clear_bit(__I40E_DOWN_REQUESTED, pf->state);
9811 /* If there's a recovery already waiting, it takes
9812 * precedence before starting a new reset sequence.
9814 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
9815 i40e_prep_for_reset(pf);
9817 i40e_rebuild(pf, false, false);
9820 /* If we're already down or resetting, just bail */
9822 !test_bit(__I40E_DOWN, pf->state) &&
9823 !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
9824 i40e_do_reset(pf, reset_flags, false);
9829 * i40e_handle_link_event - Handle link event
9830 * @pf: board private structure
9831 * @e: event info posted on ARQ
9833 static void i40e_handle_link_event(struct i40e_pf *pf,
9834 struct i40e_arq_event_info *e)
9836 struct i40e_aqc_get_link_status *status =
9837 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
9839 /* Do a new status request to re-enable LSE reporting
9840 * and load new status information into the hw struct
9841 * This completely ignores any state information
9842 * in the ARQ event info, instead choosing to always
9843 * issue the AQ update link status command.
9845 i40e_link_event(pf);
9847 /* Check if module meets thermal requirements */
9848 if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) {
9849 dev_err(&pf->pdev->dev,
9850 "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
9851 dev_err(&pf->pdev->dev,
9852 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
9854 /* check for unqualified module, if link is down, suppress
9855 * the message if link was forced to be down.
9857 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
9858 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
9859 (!(status->link_info & I40E_AQ_LINK_UP)) &&
9860 (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
9861 dev_err(&pf->pdev->dev,
9862 "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
9863 dev_err(&pf->pdev->dev,
9864 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
9870 * i40e_clean_adminq_subtask - Clean the AdminQ rings
9871 * @pf: board private structure
9873 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
9875 struct i40e_arq_event_info event;
9876 struct i40e_hw *hw = &pf->hw;
9883 /* Do not run clean AQ when PF reset fails */
9884 if (test_bit(__I40E_RESET_FAILED, pf->state))
9887 /* check for error indications */
9888 val = rd32(&pf->hw, pf->hw.aq.arq.len);
9890 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
9891 if (hw->debug_mask & I40E_DEBUG_AQ)
9892 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
9893 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
9895 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
9896 if (hw->debug_mask & I40E_DEBUG_AQ)
9897 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
9898 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
9899 pf->arq_overflows++;
9901 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
9902 if (hw->debug_mask & I40E_DEBUG_AQ)
9903 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
9904 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
9907 wr32(&pf->hw, pf->hw.aq.arq.len, val);
9909 val = rd32(&pf->hw, pf->hw.aq.asq.len);
9911 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
9912 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9913 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
9914 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
9916 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
9917 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9918 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
9919 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
9921 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
9922 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9923 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
9924 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
9927 wr32(&pf->hw, pf->hw.aq.asq.len, val);
9929 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
9930 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
9935 ret = i40e_clean_arq_element(hw, &event, &pending);
9936 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
9939 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
9943 opcode = le16_to_cpu(event.desc.opcode);
9946 case i40e_aqc_opc_get_link_status:
9948 i40e_handle_link_event(pf, &event);
9951 case i40e_aqc_opc_send_msg_to_pf:
9952 ret = i40e_vc_process_vf_msg(pf,
9953 le16_to_cpu(event.desc.retval),
9954 le32_to_cpu(event.desc.cookie_high),
9955 le32_to_cpu(event.desc.cookie_low),
9959 case i40e_aqc_opc_lldp_update_mib:
9960 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
9961 #ifdef CONFIG_I40E_DCB
9963 i40e_handle_lldp_event(pf, &event);
9965 #endif /* CONFIG_I40E_DCB */
9967 case i40e_aqc_opc_event_lan_overflow:
9968 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
9969 i40e_handle_lan_overflow_event(pf, &event);
9971 case i40e_aqc_opc_send_msg_to_peer:
9972 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
9974 case i40e_aqc_opc_nvm_erase:
9975 case i40e_aqc_opc_nvm_update:
9976 case i40e_aqc_opc_oem_post_update:
9977 i40e_debug(&pf->hw, I40E_DEBUG_NVM,
9978 "ARQ NVM operation 0x%04x completed\n",
9982 dev_info(&pf->pdev->dev,
9983 "ARQ: Unknown event 0x%04x ignored\n",
9987 } while (i++ < pf->adminq_work_limit);
9989 if (i < pf->adminq_work_limit)
9990 clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
9992 /* re-enable Admin queue interrupt cause */
9993 val = rd32(hw, I40E_PFINT_ICR0_ENA);
9994 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
9995 wr32(hw, I40E_PFINT_ICR0_ENA, val);
9998 kfree(event.msg_buf);
10002 * i40e_verify_eeprom - make sure eeprom is good to use
10003 * @pf: board private structure
10005 static void i40e_verify_eeprom(struct i40e_pf *pf)
10009 err = i40e_diag_eeprom_test(&pf->hw);
10011 /* retry in case of garbage read */
10012 err = i40e_diag_eeprom_test(&pf->hw);
10014 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
10016 set_bit(__I40E_BAD_EEPROM, pf->state);
10020 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
10021 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
10022 clear_bit(__I40E_BAD_EEPROM, pf->state);
10027 * i40e_enable_pf_switch_lb
10028 * @pf: pointer to the PF structure
10030 * enable switch loop back or die - no point in a return value
10032 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
10034 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10035 struct i40e_vsi_context ctxt;
10038 ctxt.seid = pf->main_vsi_seid;
10039 ctxt.pf_num = pf->hw.pf_id;
10041 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
10043 dev_info(&pf->pdev->dev,
10044 "couldn't get PF vsi config, err %s aq_err %s\n",
10045 i40e_stat_str(&pf->hw, ret),
10046 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10049 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
10050 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
10051 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
10053 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
10055 dev_info(&pf->pdev->dev,
10056 "update vsi switch failed, err %s aq_err %s\n",
10057 i40e_stat_str(&pf->hw, ret),
10058 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10063 * i40e_disable_pf_switch_lb
10064 * @pf: pointer to the PF structure
10066 * disable switch loop back or die - no point in a return value
10068 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
10070 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10071 struct i40e_vsi_context ctxt;
10074 ctxt.seid = pf->main_vsi_seid;
10075 ctxt.pf_num = pf->hw.pf_id;
10077 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
10079 dev_info(&pf->pdev->dev,
10080 "couldn't get PF vsi config, err %s aq_err %s\n",
10081 i40e_stat_str(&pf->hw, ret),
10082 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10085 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
10086 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
10087 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
10089 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
10091 dev_info(&pf->pdev->dev,
10092 "update vsi switch failed, err %s aq_err %s\n",
10093 i40e_stat_str(&pf->hw, ret),
10094 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10099 * i40e_config_bridge_mode - Configure the HW bridge mode
10100 * @veb: pointer to the bridge instance
10102 * Configure the loop back mode for the LAN VSI that is downlink to the
10103 * specified HW bridge instance. It is expected this function is called
10104 * when a new HW bridge is instantiated.
10106 static void i40e_config_bridge_mode(struct i40e_veb *veb)
10108 struct i40e_pf *pf = veb->pf;
10110 if (pf->hw.debug_mask & I40E_DEBUG_LAN)
10111 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
10112 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
10113 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
10114 i40e_disable_pf_switch_lb(pf);
10116 i40e_enable_pf_switch_lb(pf);
10120 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
10121 * @veb: pointer to the VEB instance
10123 * This is a recursive function that first builds the attached VSIs then
10124 * recurses in to build the next layer of VEB. We track the connections
10125 * through our own index numbers because the seid's from the HW could
10126 * change across the reset.
10128 static int i40e_reconstitute_veb(struct i40e_veb *veb)
10130 struct i40e_vsi *ctl_vsi = NULL;
10131 struct i40e_pf *pf = veb->pf;
10135 /* build VSI that owns this VEB, temporarily attached to base VEB */
10136 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
10138 pf->vsi[v]->veb_idx == veb->idx &&
10139 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
10140 ctl_vsi = pf->vsi[v];
10145 dev_info(&pf->pdev->dev,
10146 "missing owner VSI for veb_idx %d\n", veb->idx);
10148 goto end_reconstitute;
10150 if (ctl_vsi != pf->vsi[pf->lan_vsi])
10151 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
10152 ret = i40e_add_vsi(ctl_vsi);
10154 dev_info(&pf->pdev->dev,
10155 "rebuild of veb_idx %d owner VSI failed: %d\n",
10157 goto end_reconstitute;
10159 i40e_vsi_reset_stats(ctl_vsi);
10161 /* create the VEB in the switch and move the VSI onto the VEB */
10162 ret = i40e_add_veb(veb, ctl_vsi);
10164 goto end_reconstitute;
10166 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
10167 veb->bridge_mode = BRIDGE_MODE_VEB;
10169 veb->bridge_mode = BRIDGE_MODE_VEPA;
10170 i40e_config_bridge_mode(veb);
10172 /* create the remaining VSIs attached to this VEB */
10173 for (v = 0; v < pf->num_alloc_vsi; v++) {
10174 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
10177 if (pf->vsi[v]->veb_idx == veb->idx) {
10178 struct i40e_vsi *vsi = pf->vsi[v];
10180 vsi->uplink_seid = veb->seid;
10181 ret = i40e_add_vsi(vsi);
10183 dev_info(&pf->pdev->dev,
10184 "rebuild of vsi_idx %d failed: %d\n",
10186 goto end_reconstitute;
10188 i40e_vsi_reset_stats(vsi);
10192 /* create any VEBs attached to this VEB - RECURSION */
10193 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
10194 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
10195 pf->veb[veb_idx]->uplink_seid = veb->seid;
10196 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
10207 * i40e_get_capabilities - get info about the HW
10208 * @pf: the PF struct
10209 * @list_type: AQ capability to be queried
10211 static int i40e_get_capabilities(struct i40e_pf *pf,
10212 enum i40e_admin_queue_opc list_type)
10214 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
10219 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
10221 cap_buf = kzalloc(buf_len, GFP_KERNEL);
10225 /* this loads the data into the hw struct for us */
10226 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
10227 &data_size, list_type,
10229 /* data loaded, buffer no longer needed */
10232 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
10233 /* retry with a larger buffer */
10234 buf_len = data_size;
10235 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {
10236 dev_info(&pf->pdev->dev,
10237 "capability discovery failed, err %s aq_err %s\n",
10238 i40e_stat_str(&pf->hw, err),
10239 i40e_aq_str(&pf->hw,
10240 pf->hw.aq.asq_last_status));
10245 if (pf->hw.debug_mask & I40E_DEBUG_USER) {
10246 if (list_type == i40e_aqc_opc_list_func_capabilities) {
10247 dev_info(&pf->pdev->dev,
10248 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
10249 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
10250 pf->hw.func_caps.num_msix_vectors,
10251 pf->hw.func_caps.num_msix_vectors_vf,
10252 pf->hw.func_caps.fd_filters_guaranteed,
10253 pf->hw.func_caps.fd_filters_best_effort,
10254 pf->hw.func_caps.num_tx_qp,
10255 pf->hw.func_caps.num_vsis);
10256 } else if (list_type == i40e_aqc_opc_list_dev_capabilities) {
10257 dev_info(&pf->pdev->dev,
10258 "switch_mode=0x%04x, function_valid=0x%08x\n",
10259 pf->hw.dev_caps.switch_mode,
10260 pf->hw.dev_caps.valid_functions);
10261 dev_info(&pf->pdev->dev,
10262 "SR-IOV=%d, num_vfs for all function=%u\n",
10263 pf->hw.dev_caps.sr_iov_1_1,
10264 pf->hw.dev_caps.num_vfs);
10265 dev_info(&pf->pdev->dev,
10266 "num_vsis=%u, num_rx:%u, num_tx=%u\n",
10267 pf->hw.dev_caps.num_vsis,
10268 pf->hw.dev_caps.num_rx_qp,
10269 pf->hw.dev_caps.num_tx_qp);
10272 if (list_type == i40e_aqc_opc_list_func_capabilities) {
10273 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
10274 + pf->hw.func_caps.num_vfs)
10275 if (pf->hw.revision_id == 0 &&
10276 pf->hw.func_caps.num_vsis < DEF_NUM_VSI) {
10277 dev_info(&pf->pdev->dev,
10278 "got num_vsis %d, setting num_vsis to %d\n",
10279 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
10280 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
10286 static int i40e_vsi_clear(struct i40e_vsi *vsi);
10289 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
10290 * @pf: board private structure
10292 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
10294 struct i40e_vsi *vsi;
10296 /* quick workaround for an NVM issue that leaves a critical register
10299 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
10300 static const u32 hkey[] = {
10301 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
10302 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
10303 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
10307 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
10308 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
10311 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
10314 /* find existing VSI and see if it needs configuring */
10315 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
10317 /* create a new VSI if none exists */
10319 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
10320 pf->vsi[pf->lan_vsi]->seid, 0);
10322 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
10323 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
10324 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
10329 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
10333 * i40e_fdir_teardown - release the Flow Director resources
10334 * @pf: board private structure
10336 static void i40e_fdir_teardown(struct i40e_pf *pf)
10338 struct i40e_vsi *vsi;
10340 i40e_fdir_filter_exit(pf);
10341 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
10343 i40e_vsi_release(vsi);
10347 * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs
10348 * @vsi: PF main vsi
10349 * @seid: seid of main or channel VSIs
10351 * Rebuilds cloud filters associated with main VSI and channel VSIs if they
10352 * existed before reset
10354 static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
10356 struct i40e_cloud_filter *cfilter;
10357 struct i40e_pf *pf = vsi->back;
10358 struct hlist_node *node;
10361 /* Add cloud filters back if they exist */
10362 hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
10364 if (cfilter->seid != seid)
10367 if (cfilter->dst_port)
10368 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
10371 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
10374 dev_dbg(&pf->pdev->dev,
10375 "Failed to rebuild cloud filter, err %s aq_err %s\n",
10376 i40e_stat_str(&pf->hw, ret),
10377 i40e_aq_str(&pf->hw,
10378 pf->hw.aq.asq_last_status));
10386 * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset
10387 * @vsi: PF main vsi
10389 * Rebuilds channel VSIs if they existed before reset
10391 static int i40e_rebuild_channels(struct i40e_vsi *vsi)
10393 struct i40e_channel *ch, *ch_tmp;
10396 if (list_empty(&vsi->ch_list))
10399 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
10400 if (!ch->initialized)
10402 /* Proceed with creation of channel (VMDq2) VSI */
10403 ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch);
10405 dev_info(&vsi->back->pdev->dev,
10406 "failed to rebuild channels using uplink_seid %u\n",
10410 /* Reconfigure TX queues using QTX_CTL register */
10411 ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch);
10413 dev_info(&vsi->back->pdev->dev,
10414 "failed to configure TX rings for channel %u\n",
10418 /* update 'next_base_queue' */
10419 vsi->next_base_queue = vsi->next_base_queue +
10420 ch->num_queue_pairs;
10421 if (ch->max_tx_rate) {
10422 u64 credits = ch->max_tx_rate;
10424 if (i40e_set_bw_limit(vsi, ch->seid,
10428 do_div(credits, I40E_BW_CREDIT_DIVISOR);
10429 dev_dbg(&vsi->back->pdev->dev,
10430 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
10435 ret = i40e_rebuild_cloud_filters(vsi, ch->seid);
10437 dev_dbg(&vsi->back->pdev->dev,
10438 "Failed to rebuild cloud filters for channel VSI %u\n",
10447 * i40e_prep_for_reset - prep for the core to reset
10448 * @pf: board private structure
10450 * Close up the VFs and other things in prep for PF Reset.
10452 static void i40e_prep_for_reset(struct i40e_pf *pf)
10454 struct i40e_hw *hw = &pf->hw;
10455 i40e_status ret = 0;
10458 clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
10459 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
10461 if (i40e_check_asq_alive(&pf->hw))
10462 i40e_vc_notify_reset(pf);
10464 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
10466 /* quiesce the VSIs and their queues that are not already DOWN */
10467 i40e_pf_quiesce_all_vsi(pf);
10469 for (v = 0; v < pf->num_alloc_vsi; v++) {
10471 pf->vsi[v]->seid = 0;
10474 i40e_shutdown_adminq(&pf->hw);
10476 /* call shutdown HMC */
10477 if (hw->hmc.hmc_obj) {
10478 ret = i40e_shutdown_lan_hmc(hw);
10480 dev_warn(&pf->pdev->dev,
10481 "shutdown_lan_hmc failed: %d\n", ret);
10484 /* Save the current PTP time so that we can restore the time after the
10487 i40e_ptp_save_hw_time(pf);
10491 * i40e_send_version - update firmware with driver version
10494 static void i40e_send_version(struct i40e_pf *pf)
10496 struct i40e_driver_version dv;
10498 dv.major_version = 0xff;
10499 dv.minor_version = 0xff;
10500 dv.build_version = 0xff;
10501 dv.subbuild_version = 0;
10502 strlcpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string));
10503 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
10507 * i40e_get_oem_version - get OEM specific version information
10508 * @hw: pointer to the hardware structure
10510 static void i40e_get_oem_version(struct i40e_hw *hw)
10512 u16 block_offset = 0xffff;
10513 u16 block_length = 0;
10514 u16 capabilities = 0;
10518 #define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
10519 #define I40E_NVM_OEM_LENGTH_OFFSET 0x00
10520 #define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
10521 #define I40E_NVM_OEM_GEN_OFFSET 0x02
10522 #define I40E_NVM_OEM_RELEASE_OFFSET 0x03
10523 #define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
10524 #define I40E_NVM_OEM_LENGTH 3
10526 /* Check if pointer to OEM version block is valid. */
10527 i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
10528 if (block_offset == 0xffff)
10531 /* Check if OEM version block has correct length. */
10532 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
10534 if (block_length < I40E_NVM_OEM_LENGTH)
10537 /* Check if OEM version format is as expected. */
10538 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
10540 if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
10543 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
10545 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
10547 hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
10548 hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
10552 * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
10553 * @pf: board private structure
10555 static int i40e_reset(struct i40e_pf *pf)
10557 struct i40e_hw *hw = &pf->hw;
10560 ret = i40e_pf_reset(hw);
10562 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
10563 set_bit(__I40E_RESET_FAILED, pf->state);
10564 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
10572 * i40e_rebuild - rebuild using a saved config
10573 * @pf: board private structure
10574 * @reinit: if the Main VSI needs to re-initialized.
10575 * @lock_acquired: indicates whether or not the lock has been acquired
10576 * before this function was called.
10578 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
10580 int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state);
10581 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10582 struct i40e_hw *hw = &pf->hw;
10587 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
10588 i40e_check_recovery_mode(pf)) {
10589 i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
10592 if (test_bit(__I40E_DOWN, pf->state) &&
10593 !test_bit(__I40E_RECOVERY_MODE, pf->state) &&
10594 !old_recovery_mode_bit)
10595 goto clear_recovery;
10596 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
10598 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
10599 ret = i40e_init_adminq(&pf->hw);
10601 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
10602 i40e_stat_str(&pf->hw, ret),
10603 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10604 goto clear_recovery;
10606 i40e_get_oem_version(&pf->hw);
10608 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) {
10609 /* The following delay is necessary for firmware update. */
10613 /* re-verify the eeprom if we just had an EMP reset */
10614 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
10615 i40e_verify_eeprom(pf);
10617 /* if we are going out of or into recovery mode we have to act
10618 * accordingly with regard to resources initialization
10619 * and deinitialization
10621 if (test_bit(__I40E_RECOVERY_MODE, pf->state) ||
10622 old_recovery_mode_bit) {
10623 if (i40e_get_capabilities(pf,
10624 i40e_aqc_opc_list_func_capabilities))
10627 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
10628 /* we're staying in recovery mode so we'll reinitialize
10631 if (i40e_setup_misc_vector_for_recovery_mode(pf))
10634 if (!lock_acquired)
10636 /* we're going out of recovery mode so we'll free
10637 * the IRQ allocated specifically for recovery mode
10638 * and restore the interrupt scheme
10640 free_irq(pf->pdev->irq, pf);
10641 i40e_clear_interrupt_scheme(pf);
10642 if (i40e_restore_interrupt_scheme(pf))
10646 /* tell the firmware that we're starting */
10647 i40e_send_version(pf);
10649 /* bail out in case recovery mode was detected, as there is
10650 * no need for further configuration.
10655 i40e_clear_pxe_mode(hw);
10656 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
10658 goto end_core_reset;
10660 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
10661 hw->func_caps.num_rx_qp, 0, 0);
10663 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
10664 goto end_core_reset;
10666 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
10668 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
10669 goto end_core_reset;
10672 #ifdef CONFIG_I40E_DCB
10673 /* Enable FW to write a default DCB config on link-up
10674 * unless I40E_FLAG_TC_MQPRIO was enabled or DCB
10675 * is not supported with new link speed
10677 if (pf->flags & I40E_FLAG_TC_MQPRIO) {
10678 i40e_aq_set_dcb_parameters(hw, false, NULL);
10680 if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
10681 (hw->phy.link_info.link_speed &
10682 (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
10683 i40e_aq_set_dcb_parameters(hw, false, NULL);
10684 dev_warn(&pf->pdev->dev,
10685 "DCB is not supported for X710-T*L 2.5/5G speeds\n");
10686 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10688 i40e_aq_set_dcb_parameters(hw, true, NULL);
10689 ret = i40e_init_pf_dcb(pf);
10691 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n",
10693 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10694 /* Continue without DCB enabled */
10699 #endif /* CONFIG_I40E_DCB */
10700 if (!lock_acquired)
10702 ret = i40e_setup_pf_switch(pf, reinit, true);
10706 /* The driver only wants link up/down and module qualification
10707 * reports from firmware. Note the negative logic.
10709 ret = i40e_aq_set_phy_int_mask(&pf->hw,
10710 ~(I40E_AQ_EVENT_LINK_UPDOWN |
10711 I40E_AQ_EVENT_MEDIA_NA |
10712 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
10714 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
10715 i40e_stat_str(&pf->hw, ret),
10716 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10718 /* Rebuild the VSIs and VEBs that existed before reset.
10719 * They are still in our local switch element arrays, so only
10720 * need to rebuild the switch model in the HW.
10722 * If there were VEBs but the reconstitution failed, we'll try
10723 * to recover minimal use by getting the basic PF VSI working.
10725 if (vsi->uplink_seid != pf->mac_seid) {
10726 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
10727 /* find the one VEB connected to the MAC, and find orphans */
10728 for (v = 0; v < I40E_MAX_VEB; v++) {
10732 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
10733 pf->veb[v]->uplink_seid == 0) {
10734 ret = i40e_reconstitute_veb(pf->veb[v]);
10739 /* If Main VEB failed, we're in deep doodoo,
10740 * so give up rebuilding the switch and set up
10741 * for minimal rebuild of PF VSI.
10742 * If orphan failed, we'll report the error
10743 * but try to keep going.
10745 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
10746 dev_info(&pf->pdev->dev,
10747 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
10749 vsi->uplink_seid = pf->mac_seid;
10751 } else if (pf->veb[v]->uplink_seid == 0) {
10752 dev_info(&pf->pdev->dev,
10753 "rebuild of orphan VEB failed: %d\n",
10760 if (vsi->uplink_seid == pf->mac_seid) {
10761 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
10762 /* no VEB, so rebuild only the Main VSI */
10763 ret = i40e_add_vsi(vsi);
10765 dev_info(&pf->pdev->dev,
10766 "rebuild of Main VSI failed: %d\n", ret);
10771 if (vsi->mqprio_qopt.max_rate[0]) {
10772 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
10775 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
10776 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
10780 credits = max_tx_rate;
10781 do_div(credits, I40E_BW_CREDIT_DIVISOR);
10782 dev_dbg(&vsi->back->pdev->dev,
10783 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
10789 ret = i40e_rebuild_cloud_filters(vsi, vsi->seid);
10793 /* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs
10794 * for this main VSI if they exist
10796 ret = i40e_rebuild_channels(vsi);
10800 /* Reconfigure hardware for allowing smaller MSS in the case
10801 * of TSO, so that we avoid the MDD being fired and causing
10802 * a reset in the case of small MSS+TSO.
10804 #define I40E_REG_MSS 0x000E64DC
10805 #define I40E_REG_MSS_MIN_MASK 0x3FF0000
10806 #define I40E_64BYTE_MSS 0x400000
10807 val = rd32(hw, I40E_REG_MSS);
10808 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
10809 val &= ~I40E_REG_MSS_MIN_MASK;
10810 val |= I40E_64BYTE_MSS;
10811 wr32(hw, I40E_REG_MSS, val);
10814 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
10816 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
10818 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
10819 i40e_stat_str(&pf->hw, ret),
10820 i40e_aq_str(&pf->hw,
10821 pf->hw.aq.asq_last_status));
10823 /* reinit the misc interrupt */
10824 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
10825 ret = i40e_setup_misc_vector(pf);
10827 /* Add a filter to drop all Flow control frames from any VSI from being
10828 * transmitted. By doing so we stop a malicious VF from sending out
10829 * PAUSE or PFC frames and potentially controlling traffic for other
10831 * The FW can still send Flow control frames if enabled.
10833 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
10834 pf->main_vsi_seid);
10836 /* restart the VSIs that were rebuilt and running before the reset */
10837 i40e_pf_unquiesce_all_vsi(pf);
10839 /* Release the RTNL lock before we start resetting VFs */
10840 if (!lock_acquired)
10843 /* Restore promiscuous settings */
10844 ret = i40e_set_promiscuous(pf, pf->cur_promisc);
10846 dev_warn(&pf->pdev->dev,
10847 "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
10848 pf->cur_promisc ? "on" : "off",
10849 i40e_stat_str(&pf->hw, ret),
10850 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10852 i40e_reset_all_vfs(pf, true);
10854 /* tell the firmware that we're starting */
10855 i40e_send_version(pf);
10857 /* We've already released the lock, so don't do it again */
10858 goto end_core_reset;
10861 if (!lock_acquired)
10864 clear_bit(__I40E_RESET_FAILED, pf->state);
10866 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
10867 clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
10871 * i40e_reset_and_rebuild - reset and rebuild using a saved config
10872 * @pf: board private structure
10873 * @reinit: if the Main VSI needs to re-initialized.
10874 * @lock_acquired: indicates whether or not the lock has been acquired
10875 * before this function was called.
10877 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
10878 bool lock_acquired)
10882 if (test_bit(__I40E_IN_REMOVE, pf->state))
10884 /* Now we wait for GRST to settle out.
10885 * We don't have to delete the VEBs or VSIs from the hw switch
10886 * because the reset will make them disappear.
10888 ret = i40e_reset(pf);
10890 i40e_rebuild(pf, reinit, lock_acquired);
10894 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
10895 * @pf: board private structure
10897 * Close up the VFs and other things in prep for a Core Reset,
10898 * then get ready to rebuild the world.
10899 * @lock_acquired: indicates whether or not the lock has been acquired
10900 * before this function was called.
10902 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
10904 i40e_prep_for_reset(pf);
10905 i40e_reset_and_rebuild(pf, false, lock_acquired);
10909 * i40e_handle_mdd_event
10910 * @pf: pointer to the PF structure
10912 * Called from the MDD irq handler to identify possibly malicious vfs
10914 static void i40e_handle_mdd_event(struct i40e_pf *pf)
10916 struct i40e_hw *hw = &pf->hw;
10917 bool mdd_detected = false;
10918 struct i40e_vf *vf;
10922 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
10925 /* find what triggered the MDD event */
10926 reg = rd32(hw, I40E_GL_MDET_TX);
10927 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
10928 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
10929 I40E_GL_MDET_TX_PF_NUM_SHIFT;
10930 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
10931 I40E_GL_MDET_TX_VF_NUM_SHIFT;
10932 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
10933 I40E_GL_MDET_TX_EVENT_SHIFT;
10934 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
10935 I40E_GL_MDET_TX_QUEUE_SHIFT) -
10936 pf->hw.func_caps.base_queue;
10937 if (netif_msg_tx_err(pf))
10938 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
10939 event, queue, pf_num, vf_num);
10940 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
10941 mdd_detected = true;
10943 reg = rd32(hw, I40E_GL_MDET_RX);
10944 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
10945 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
10946 I40E_GL_MDET_RX_FUNCTION_SHIFT;
10947 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
10948 I40E_GL_MDET_RX_EVENT_SHIFT;
10949 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
10950 I40E_GL_MDET_RX_QUEUE_SHIFT) -
10951 pf->hw.func_caps.base_queue;
10952 if (netif_msg_rx_err(pf))
10953 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
10954 event, queue, func);
10955 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
10956 mdd_detected = true;
10959 if (mdd_detected) {
10960 reg = rd32(hw, I40E_PF_MDET_TX);
10961 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
10962 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
10963 dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n");
10965 reg = rd32(hw, I40E_PF_MDET_RX);
10966 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
10967 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
10968 dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n");
10972 /* see if one of the VFs needs its hand slapped */
10973 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
10975 reg = rd32(hw, I40E_VP_MDET_TX(i));
10976 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
10977 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
10978 vf->num_mdd_events++;
10979 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
10981 dev_info(&pf->pdev->dev,
10982 "Use PF Control I/F to re-enable the VF\n");
10983 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
10986 reg = rd32(hw, I40E_VP_MDET_RX(i));
10987 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
10988 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
10989 vf->num_mdd_events++;
10990 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
10992 dev_info(&pf->pdev->dev,
10993 "Use PF Control I/F to re-enable the VF\n");
10994 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
10998 /* re-enable mdd interrupt cause */
10999 clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
11000 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
11001 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
11002 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
11007 * i40e_service_task - Run the driver's async subtasks
11008 * @work: pointer to work_struct containing our data
11010 static void i40e_service_task(struct work_struct *work)
11012 struct i40e_pf *pf = container_of(work,
11015 unsigned long start_time = jiffies;
11017 /* don't bother with service tasks if a reset is in progress */
11018 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
11019 test_bit(__I40E_SUSPENDED, pf->state))
11022 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
11025 if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) {
11026 i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
11027 i40e_sync_filters_subtask(pf);
11028 i40e_reset_subtask(pf);
11029 i40e_handle_mdd_event(pf);
11030 i40e_vc_process_vflr_event(pf);
11031 i40e_watchdog_subtask(pf);
11032 i40e_fdir_reinit_subtask(pf);
11033 if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
11034 /* Client subtask will reopen next time through. */
11035 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi],
11038 i40e_client_subtask(pf);
11039 if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
11041 i40e_notify_client_of_l2_param_changes(
11042 pf->vsi[pf->lan_vsi]);
11044 i40e_sync_filters_subtask(pf);
11046 i40e_reset_subtask(pf);
11049 i40e_clean_adminq_subtask(pf);
11051 /* flush memory to make sure state is correct before next watchdog */
11052 smp_mb__before_atomic();
11053 clear_bit(__I40E_SERVICE_SCHED, pf->state);
11055 /* If the tasks have taken longer than one timer cycle or there
11056 * is more work to be done, reschedule the service task now
11057 * rather than wait for the timer to tick again.
11059 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
11060 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
11061 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
11062 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
11063 i40e_service_event_schedule(pf);
11067 * i40e_service_timer - timer callback
11068 * @t: timer list pointer
11070 static void i40e_service_timer(struct timer_list *t)
11072 struct i40e_pf *pf = from_timer(pf, t, service_timer);
11074 mod_timer(&pf->service_timer,
11075 round_jiffies(jiffies + pf->service_timer_period));
11076 i40e_service_event_schedule(pf);
11080 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
11081 * @vsi: the VSI being configured
11083 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
11085 struct i40e_pf *pf = vsi->back;
11087 switch (vsi->type) {
11088 case I40E_VSI_MAIN:
11089 vsi->alloc_queue_pairs = pf->num_lan_qps;
11090 if (!vsi->num_tx_desc)
11091 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11092 I40E_REQ_DESCRIPTOR_MULTIPLE);
11093 if (!vsi->num_rx_desc)
11094 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11095 I40E_REQ_DESCRIPTOR_MULTIPLE);
11096 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
11097 vsi->num_q_vectors = pf->num_lan_msix;
11099 vsi->num_q_vectors = 1;
11103 case I40E_VSI_FDIR:
11104 vsi->alloc_queue_pairs = 1;
11105 vsi->num_tx_desc = ALIGN(I40E_FDIR_RING_COUNT,
11106 I40E_REQ_DESCRIPTOR_MULTIPLE);
11107 vsi->num_rx_desc = ALIGN(I40E_FDIR_RING_COUNT,
11108 I40E_REQ_DESCRIPTOR_MULTIPLE);
11109 vsi->num_q_vectors = pf->num_fdsb_msix;
11112 case I40E_VSI_VMDQ2:
11113 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
11114 if (!vsi->num_tx_desc)
11115 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11116 I40E_REQ_DESCRIPTOR_MULTIPLE);
11117 if (!vsi->num_rx_desc)
11118 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11119 I40E_REQ_DESCRIPTOR_MULTIPLE);
11120 vsi->num_q_vectors = pf->num_vmdq_msix;
11123 case I40E_VSI_SRIOV:
11124 vsi->alloc_queue_pairs = pf->num_vf_qps;
11125 if (!vsi->num_tx_desc)
11126 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11127 I40E_REQ_DESCRIPTOR_MULTIPLE);
11128 if (!vsi->num_rx_desc)
11129 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11130 I40E_REQ_DESCRIPTOR_MULTIPLE);
11138 if (is_kdump_kernel()) {
11139 vsi->num_tx_desc = I40E_MIN_NUM_DESCRIPTORS;
11140 vsi->num_rx_desc = I40E_MIN_NUM_DESCRIPTORS;
11147 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
11148 * @vsi: VSI pointer
11149 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
11151 * On error: returns error code (negative)
11152 * On success: returns 0
11154 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
11156 struct i40e_ring **next_rings;
11160 /* allocate memory for both Tx, XDP Tx and Rx ring pointers */
11161 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
11162 (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
11163 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
11164 if (!vsi->tx_rings)
11166 next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
11167 if (i40e_enabled_xdp_vsi(vsi)) {
11168 vsi->xdp_rings = next_rings;
11169 next_rings += vsi->alloc_queue_pairs;
11171 vsi->rx_rings = next_rings;
11173 if (alloc_qvectors) {
11174 /* allocate memory for q_vector pointers */
11175 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
11176 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
11177 if (!vsi->q_vectors) {
11185 kfree(vsi->tx_rings);
11190 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
11191 * @pf: board private structure
11192 * @type: type of VSI
11194 * On error: returns error code (negative)
11195 * On success: returns vsi index in PF (positive)
11197 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
11200 struct i40e_vsi *vsi;
11204 /* Need to protect the allocation of the VSIs at the PF level */
11205 mutex_lock(&pf->switch_mutex);
11207 /* VSI list may be fragmented if VSI creation/destruction has
11208 * been happening. We can afford to do a quick scan to look
11209 * for any free VSIs in the list.
11211 * find next empty vsi slot, looping back around if necessary
11214 while (i < pf->num_alloc_vsi && pf->vsi[i])
11216 if (i >= pf->num_alloc_vsi) {
11218 while (i < pf->next_vsi && pf->vsi[i])
11222 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
11223 vsi_idx = i; /* Found one! */
11226 goto unlock_pf; /* out of VSI slots! */
11228 pf->next_vsi = ++i;
11230 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
11237 set_bit(__I40E_VSI_DOWN, vsi->state);
11239 vsi->idx = vsi_idx;
11240 vsi->int_rate_limit = 0;
11241 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
11242 pf->rss_table_size : 64;
11243 vsi->netdev_registered = false;
11244 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
11245 hash_init(vsi->mac_filter_hash);
11246 vsi->irqs_ready = false;
11248 if (type == I40E_VSI_MAIN) {
11249 vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL);
11250 if (!vsi->af_xdp_zc_qps)
11254 ret = i40e_set_num_rings_in_vsi(vsi);
11258 ret = i40e_vsi_alloc_arrays(vsi, true);
11262 /* Setup default MSIX irq handler for VSI */
11263 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
11265 /* Initialize VSI lock */
11266 spin_lock_init(&vsi->mac_filter_hash_lock);
11267 pf->vsi[vsi_idx] = vsi;
11272 bitmap_free(vsi->af_xdp_zc_qps);
11273 pf->next_vsi = i - 1;
11276 mutex_unlock(&pf->switch_mutex);
11281 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
11282 * @vsi: VSI pointer
11283 * @free_qvectors: a bool to specify if q_vectors need to be freed.
11285 * On error: returns error code (negative)
11286 * On success: returns 0
11288 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
11290 /* free the ring and vector containers */
11291 if (free_qvectors) {
11292 kfree(vsi->q_vectors);
11293 vsi->q_vectors = NULL;
11295 kfree(vsi->tx_rings);
11296 vsi->tx_rings = NULL;
11297 vsi->rx_rings = NULL;
11298 vsi->xdp_rings = NULL;
11302 * i40e_clear_rss_config_user - clear the user configured RSS hash keys
11304 * @vsi: Pointer to VSI structure
11306 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
11311 kfree(vsi->rss_hkey_user);
11312 vsi->rss_hkey_user = NULL;
11314 kfree(vsi->rss_lut_user);
11315 vsi->rss_lut_user = NULL;
11319 * i40e_vsi_clear - Deallocate the VSI provided
11320 * @vsi: the VSI being un-configured
11322 static int i40e_vsi_clear(struct i40e_vsi *vsi)
11324 struct i40e_pf *pf;
11333 mutex_lock(&pf->switch_mutex);
11334 if (!pf->vsi[vsi->idx]) {
11335 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n",
11336 vsi->idx, vsi->idx, vsi->type);
11340 if (pf->vsi[vsi->idx] != vsi) {
11341 dev_err(&pf->pdev->dev,
11342 "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n",
11343 pf->vsi[vsi->idx]->idx,
11344 pf->vsi[vsi->idx]->type,
11345 vsi->idx, vsi->type);
11349 /* updates the PF for this cleared vsi */
11350 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
11351 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
11353 bitmap_free(vsi->af_xdp_zc_qps);
11354 i40e_vsi_free_arrays(vsi, true);
11355 i40e_clear_rss_config_user(vsi);
11357 pf->vsi[vsi->idx] = NULL;
11358 if (vsi->idx < pf->next_vsi)
11359 pf->next_vsi = vsi->idx;
11362 mutex_unlock(&pf->switch_mutex);
11370 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
11371 * @vsi: the VSI being cleaned
11373 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
11377 if (vsi->tx_rings && vsi->tx_rings[0]) {
11378 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
11379 kfree_rcu(vsi->tx_rings[i], rcu);
11380 WRITE_ONCE(vsi->tx_rings[i], NULL);
11381 WRITE_ONCE(vsi->rx_rings[i], NULL);
11382 if (vsi->xdp_rings)
11383 WRITE_ONCE(vsi->xdp_rings[i], NULL);
11389 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
11390 * @vsi: the VSI being configured
11392 static int i40e_alloc_rings(struct i40e_vsi *vsi)
11394 int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
11395 struct i40e_pf *pf = vsi->back;
11396 struct i40e_ring *ring;
11398 /* Set basic values in the rings to be used later during open() */
11399 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
11400 /* allocate space for both Tx and Rx in one shot */
11401 ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
11405 ring->queue_index = i;
11406 ring->reg_idx = vsi->base_queue + i;
11407 ring->ring_active = false;
11409 ring->netdev = vsi->netdev;
11410 ring->dev = &pf->pdev->dev;
11411 ring->count = vsi->num_tx_desc;
11414 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
11415 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
11416 ring->itr_setting = pf->tx_itr_default;
11417 WRITE_ONCE(vsi->tx_rings[i], ring++);
11419 if (!i40e_enabled_xdp_vsi(vsi))
11422 ring->queue_index = vsi->alloc_queue_pairs + i;
11423 ring->reg_idx = vsi->base_queue + ring->queue_index;
11424 ring->ring_active = false;
11426 ring->netdev = NULL;
11427 ring->dev = &pf->pdev->dev;
11428 ring->count = vsi->num_tx_desc;
11431 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
11432 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
11433 set_ring_xdp(ring);
11434 ring->itr_setting = pf->tx_itr_default;
11435 WRITE_ONCE(vsi->xdp_rings[i], ring++);
11438 ring->queue_index = i;
11439 ring->reg_idx = vsi->base_queue + i;
11440 ring->ring_active = false;
11442 ring->netdev = vsi->netdev;
11443 ring->dev = &pf->pdev->dev;
11444 ring->count = vsi->num_rx_desc;
11447 ring->itr_setting = pf->rx_itr_default;
11448 WRITE_ONCE(vsi->rx_rings[i], ring);
11454 i40e_vsi_clear_rings(vsi);
11459 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
11460 * @pf: board private structure
11461 * @vectors: the number of MSI-X vectors to request
11463 * Returns the number of vectors reserved, or error
11465 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
11467 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
11468 I40E_MIN_MSIX, vectors);
11470 dev_info(&pf->pdev->dev,
11471 "MSI-X vector reservation failed: %d\n", vectors);
11479 * i40e_init_msix - Setup the MSIX capability
11480 * @pf: board private structure
11482 * Work with the OS to set up the MSIX vectors needed.
11484 * Returns the number of vectors reserved or negative on failure
11486 static int i40e_init_msix(struct i40e_pf *pf)
11488 struct i40e_hw *hw = &pf->hw;
11489 int cpus, extra_vectors;
11493 int iwarp_requested = 0;
11495 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
11498 /* The number of vectors we'll request will be comprised of:
11499 * - Add 1 for "other" cause for Admin Queue events, etc.
11500 * - The number of LAN queue pairs
11501 * - Queues being used for RSS.
11502 * We don't need as many as max_rss_size vectors.
11503 * use rss_size instead in the calculation since that
11504 * is governed by number of cpus in the system.
11505 * - assumes symmetric Tx/Rx pairing
11506 * - The number of VMDq pairs
11507 * - The CPU count within the NUMA node if iWARP is enabled
11508 * Once we count this up, try the request.
11510 * If we can't get what we want, we'll simplify to nearly nothing
11511 * and try again. If that still fails, we punt.
11513 vectors_left = hw->func_caps.num_msix_vectors;
11516 /* reserve one vector for miscellaneous handler */
11517 if (vectors_left) {
11522 /* reserve some vectors for the main PF traffic queues. Initially we
11523 * only reserve at most 50% of the available vectors, in the case that
11524 * the number of online CPUs is large. This ensures that we can enable
11525 * extra features as well. Once we've enabled the other features, we
11526 * will use any remaining vectors to reach as close as we can to the
11527 * number of online CPUs.
11529 cpus = num_online_cpus();
11530 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
11531 vectors_left -= pf->num_lan_msix;
11533 /* reserve one vector for sideband flow director */
11534 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11535 if (vectors_left) {
11536 pf->num_fdsb_msix = 1;
11540 pf->num_fdsb_msix = 0;
11544 /* can we reserve enough for iWARP? */
11545 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11546 iwarp_requested = pf->num_iwarp_msix;
11549 pf->num_iwarp_msix = 0;
11550 else if (vectors_left < pf->num_iwarp_msix)
11551 pf->num_iwarp_msix = 1;
11552 v_budget += pf->num_iwarp_msix;
11553 vectors_left -= pf->num_iwarp_msix;
11556 /* any vectors left over go for VMDq support */
11557 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
11558 if (!vectors_left) {
11559 pf->num_vmdq_msix = 0;
11560 pf->num_vmdq_qps = 0;
11562 int vmdq_vecs_wanted =
11563 pf->num_vmdq_vsis * pf->num_vmdq_qps;
11565 min_t(int, vectors_left, vmdq_vecs_wanted);
11567 /* if we're short on vectors for what's desired, we limit
11568 * the queues per vmdq. If this is still more than are
11569 * available, the user will need to change the number of
11570 * queues/vectors used by the PF later with the ethtool
11573 if (vectors_left < vmdq_vecs_wanted) {
11574 pf->num_vmdq_qps = 1;
11575 vmdq_vecs_wanted = pf->num_vmdq_vsis;
11576 vmdq_vecs = min_t(int,
11580 pf->num_vmdq_msix = pf->num_vmdq_qps;
11582 v_budget += vmdq_vecs;
11583 vectors_left -= vmdq_vecs;
11587 /* On systems with a large number of SMP cores, we previously limited
11588 * the number of vectors for num_lan_msix to be at most 50% of the
11589 * available vectors, to allow for other features. Now, we add back
11590 * the remaining vectors. However, we ensure that the total
11591 * num_lan_msix will not exceed num_online_cpus(). To do this, we
11592 * calculate the number of vectors we can add without going over the
11593 * cap of CPUs. For systems with a small number of CPUs this will be
11596 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
11597 pf->num_lan_msix += extra_vectors;
11598 vectors_left -= extra_vectors;
11600 WARN(vectors_left < 0,
11601 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
11603 v_budget += pf->num_lan_msix;
11604 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
11606 if (!pf->msix_entries)
11609 for (i = 0; i < v_budget; i++)
11610 pf->msix_entries[i].entry = i;
11611 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
11613 if (v_actual < I40E_MIN_MSIX) {
11614 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
11615 kfree(pf->msix_entries);
11616 pf->msix_entries = NULL;
11617 pci_disable_msix(pf->pdev);
11620 } else if (v_actual == I40E_MIN_MSIX) {
11621 /* Adjust for minimal MSIX use */
11622 pf->num_vmdq_vsis = 0;
11623 pf->num_vmdq_qps = 0;
11624 pf->num_lan_qps = 1;
11625 pf->num_lan_msix = 1;
11627 } else if (v_actual != v_budget) {
11628 /* If we have limited resources, we will start with no vectors
11629 * for the special features and then allocate vectors to some
11630 * of these features based on the policy and at the end disable
11631 * the features that did not get any vectors.
11635 dev_info(&pf->pdev->dev,
11636 "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n",
11637 v_actual, v_budget);
11638 /* reserve the misc vector */
11639 vec = v_actual - 1;
11641 /* Scale vector usage down */
11642 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
11643 pf->num_vmdq_vsis = 1;
11644 pf->num_vmdq_qps = 1;
11646 /* partition out the remaining vectors */
11649 pf->num_lan_msix = 1;
11652 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11653 pf->num_lan_msix = 1;
11654 pf->num_iwarp_msix = 1;
11656 pf->num_lan_msix = 2;
11660 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11661 pf->num_iwarp_msix = min_t(int, (vec / 3),
11663 pf->num_vmdq_vsis = min_t(int, (vec / 3),
11664 I40E_DEFAULT_NUM_VMDQ_VSI);
11666 pf->num_vmdq_vsis = min_t(int, (vec / 2),
11667 I40E_DEFAULT_NUM_VMDQ_VSI);
11669 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11670 pf->num_fdsb_msix = 1;
11673 pf->num_lan_msix = min_t(int,
11674 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
11676 pf->num_lan_qps = pf->num_lan_msix;
11681 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
11682 (pf->num_fdsb_msix == 0)) {
11683 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
11684 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
11685 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11687 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
11688 (pf->num_vmdq_msix == 0)) {
11689 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
11690 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
11693 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
11694 (pf->num_iwarp_msix == 0)) {
11695 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
11696 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
11698 i40e_debug(&pf->hw, I40E_DEBUG_INIT,
11699 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
11701 pf->num_vmdq_msix * pf->num_vmdq_vsis,
11703 pf->num_iwarp_msix);
11709 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
11710 * @vsi: the VSI being configured
11711 * @v_idx: index of the vector in the vsi struct
11713 * We allocate one q_vector. If allocation fails we return -ENOMEM.
11715 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
11717 struct i40e_q_vector *q_vector;
11719 /* allocate q_vector */
11720 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
11724 q_vector->vsi = vsi;
11725 q_vector->v_idx = v_idx;
11726 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
11729 netif_napi_add(vsi->netdev, &q_vector->napi,
11730 i40e_napi_poll, NAPI_POLL_WEIGHT);
11732 /* tie q_vector and vsi together */
11733 vsi->q_vectors[v_idx] = q_vector;
11739 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
11740 * @vsi: the VSI being configured
11742 * We allocate one q_vector per queue interrupt. If allocation fails we
11745 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
11747 struct i40e_pf *pf = vsi->back;
11748 int err, v_idx, num_q_vectors;
11750 /* if not MSIX, give the one vector only to the LAN VSI */
11751 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
11752 num_q_vectors = vsi->num_q_vectors;
11753 else if (vsi == pf->vsi[pf->lan_vsi])
11758 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
11759 err = i40e_vsi_alloc_q_vector(vsi, v_idx);
11768 i40e_free_q_vector(vsi, v_idx);
11774 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
11775 * @pf: board private structure to initialize
11777 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
11782 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11783 vectors = i40e_init_msix(pf);
11785 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
11786 I40E_FLAG_IWARP_ENABLED |
11787 I40E_FLAG_RSS_ENABLED |
11788 I40E_FLAG_DCB_CAPABLE |
11789 I40E_FLAG_DCB_ENABLED |
11790 I40E_FLAG_SRIOV_ENABLED |
11791 I40E_FLAG_FD_SB_ENABLED |
11792 I40E_FLAG_FD_ATR_ENABLED |
11793 I40E_FLAG_VMDQ_ENABLED);
11794 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11796 /* rework the queue expectations without MSIX */
11797 i40e_determine_queue_usage(pf);
11801 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
11802 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
11803 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
11804 vectors = pci_enable_msi(pf->pdev);
11806 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
11808 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
11810 vectors = 1; /* one MSI or Legacy vector */
11813 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
11814 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
11816 /* set up vector assignment tracking */
11817 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
11818 pf->irq_pile = kzalloc(size, GFP_KERNEL);
11822 pf->irq_pile->num_entries = vectors;
11824 /* track first vector for misc interrupts, ignore return */
11825 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
11831 * i40e_restore_interrupt_scheme - Restore the interrupt scheme
11832 * @pf: private board data structure
11834 * Restore the interrupt scheme that was cleared when we suspended the
11835 * device. This should be called during resume to re-allocate the q_vectors
11836 * and reacquire IRQs.
11838 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
11842 /* We cleared the MSI and MSI-X flags when disabling the old interrupt
11843 * scheme. We need to re-enabled them here in order to attempt to
11844 * re-acquire the MSI or MSI-X vectors
11846 pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
11848 err = i40e_init_interrupt_scheme(pf);
11852 /* Now that we've re-acquired IRQs, we need to remap the vectors and
11853 * rings together again.
11855 for (i = 0; i < pf->num_alloc_vsi; i++) {
11857 err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
11860 i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
11864 err = i40e_setup_misc_vector(pf);
11868 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
11869 i40e_client_update_msix_info(pf);
11876 i40e_vsi_free_q_vectors(pf->vsi[i]);
11883 * i40e_setup_misc_vector_for_recovery_mode - Setup the misc vector to handle
11884 * non queue events in recovery mode
11885 * @pf: board private structure
11887 * This sets up the handler for MSIX 0 or MSI/legacy, which is used to manage
11888 * the non-queue interrupts, e.g. AdminQ and errors in recovery mode.
11889 * This is handled differently than in recovery mode since no Tx/Rx resources
11890 * are being allocated.
11892 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf)
11896 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11897 err = i40e_setup_misc_vector(pf);
11900 dev_info(&pf->pdev->dev,
11901 "MSI-X misc vector request failed, error %d\n",
11906 u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED;
11908 err = request_irq(pf->pdev->irq, i40e_intr, flags,
11912 dev_info(&pf->pdev->dev,
11913 "MSI/legacy misc vector request failed, error %d\n",
11917 i40e_enable_misc_int_causes(pf);
11918 i40e_irq_dynamic_enable_icr0(pf);
11925 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
11926 * @pf: board private structure
11928 * This sets up the handler for MSIX 0, which is used to manage the
11929 * non-queue interrupts, e.g. AdminQ and errors. This is not used
11930 * when in MSI or Legacy interrupt mode.
11932 static int i40e_setup_misc_vector(struct i40e_pf *pf)
11934 struct i40e_hw *hw = &pf->hw;
11937 /* Only request the IRQ once, the first time through. */
11938 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) {
11939 err = request_irq(pf->msix_entries[0].vector,
11940 i40e_intr, 0, pf->int_name, pf);
11942 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
11943 dev_info(&pf->pdev->dev,
11944 "request_irq for %s failed: %d\n",
11945 pf->int_name, err);
11950 i40e_enable_misc_int_causes(pf);
11952 /* associate no queues to the misc vector */
11953 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
11954 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K >> 1);
11958 i40e_irq_dynamic_enable_icr0(pf);
11964 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
11965 * @vsi: Pointer to vsi structure
11966 * @seed: Buffter to store the hash keys
11967 * @lut: Buffer to store the lookup table entries
11968 * @lut_size: Size of buffer to store the lookup table entries
11970 * Return 0 on success, negative on failure
11972 static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
11973 u8 *lut, u16 lut_size)
11975 struct i40e_pf *pf = vsi->back;
11976 struct i40e_hw *hw = &pf->hw;
11980 ret = i40e_aq_get_rss_key(hw, vsi->id,
11981 (struct i40e_aqc_get_set_rss_key_data *)seed);
11983 dev_info(&pf->pdev->dev,
11984 "Cannot get RSS key, err %s aq_err %s\n",
11985 i40e_stat_str(&pf->hw, ret),
11986 i40e_aq_str(&pf->hw,
11987 pf->hw.aq.asq_last_status));
11993 bool pf_lut = vsi->type == I40E_VSI_MAIN;
11995 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
11997 dev_info(&pf->pdev->dev,
11998 "Cannot get RSS lut, err %s aq_err %s\n",
11999 i40e_stat_str(&pf->hw, ret),
12000 i40e_aq_str(&pf->hw,
12001 pf->hw.aq.asq_last_status));
12010 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
12011 * @vsi: Pointer to vsi structure
12012 * @seed: RSS hash seed
12013 * @lut: Lookup table
12014 * @lut_size: Lookup table size
12016 * Returns 0 on success, negative on failure
12018 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
12019 const u8 *lut, u16 lut_size)
12021 struct i40e_pf *pf = vsi->back;
12022 struct i40e_hw *hw = &pf->hw;
12023 u16 vf_id = vsi->vf_id;
12026 /* Fill out hash function seed */
12028 u32 *seed_dw = (u32 *)seed;
12030 if (vsi->type == I40E_VSI_MAIN) {
12031 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
12032 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
12033 } else if (vsi->type == I40E_VSI_SRIOV) {
12034 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
12035 wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
12037 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
12042 u32 *lut_dw = (u32 *)lut;
12044 if (vsi->type == I40E_VSI_MAIN) {
12045 if (lut_size != I40E_HLUT_ARRAY_SIZE)
12047 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12048 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
12049 } else if (vsi->type == I40E_VSI_SRIOV) {
12050 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
12052 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
12053 wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
12055 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
12064 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
12065 * @vsi: Pointer to VSI structure
12066 * @seed: Buffer to store the keys
12067 * @lut: Buffer to store the lookup table entries
12068 * @lut_size: Size of buffer to store the lookup table entries
12070 * Returns 0 on success, negative on failure
12072 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
12073 u8 *lut, u16 lut_size)
12075 struct i40e_pf *pf = vsi->back;
12076 struct i40e_hw *hw = &pf->hw;
12080 u32 *seed_dw = (u32 *)seed;
12082 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
12083 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
12086 u32 *lut_dw = (u32 *)lut;
12088 if (lut_size != I40E_HLUT_ARRAY_SIZE)
12090 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12091 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
12098 * i40e_config_rss - Configure RSS keys and lut
12099 * @vsi: Pointer to VSI structure
12100 * @seed: RSS hash seed
12101 * @lut: Lookup table
12102 * @lut_size: Lookup table size
12104 * Returns 0 on success, negative on failure
12106 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
12108 struct i40e_pf *pf = vsi->back;
12110 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
12111 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
12113 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
12117 * i40e_get_rss - Get RSS keys and lut
12118 * @vsi: Pointer to VSI structure
12119 * @seed: Buffer to store the keys
12120 * @lut: Buffer to store the lookup table entries
12121 * @lut_size: Size of buffer to store the lookup table entries
12123 * Returns 0 on success, negative on failure
12125 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
12127 struct i40e_pf *pf = vsi->back;
12129 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
12130 return i40e_get_rss_aq(vsi, seed, lut, lut_size);
12132 return i40e_get_rss_reg(vsi, seed, lut, lut_size);
12136 * i40e_fill_rss_lut - Fill the RSS lookup table with default values
12137 * @pf: Pointer to board private structure
12138 * @lut: Lookup table
12139 * @rss_table_size: Lookup table size
12140 * @rss_size: Range of queue number for hashing
12142 void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
12143 u16 rss_table_size, u16 rss_size)
12147 for (i = 0; i < rss_table_size; i++)
12148 lut[i] = i % rss_size;
12152 * i40e_pf_config_rss - Prepare for RSS if used
12153 * @pf: board private structure
12155 static int i40e_pf_config_rss(struct i40e_pf *pf)
12157 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
12158 u8 seed[I40E_HKEY_ARRAY_SIZE];
12160 struct i40e_hw *hw = &pf->hw;
12165 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
12166 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
12167 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
12168 hena |= i40e_pf_get_default_rss_hena(pf);
12170 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
12171 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
12173 /* Determine the RSS table size based on the hardware capabilities */
12174 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
12175 reg_val = (pf->rss_table_size == 512) ?
12176 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
12177 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
12178 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
12180 /* Determine the RSS size of the VSI */
12181 if (!vsi->rss_size) {
12183 /* If the firmware does something weird during VSI init, we
12184 * could end up with zero TCs. Check for that to avoid
12185 * divide-by-zero. It probably won't pass traffic, but it also
12188 qcount = vsi->num_queue_pairs /
12189 (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1);
12190 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
12192 if (!vsi->rss_size)
12195 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
12199 /* Use user configured lut if there is one, otherwise use default */
12200 if (vsi->rss_lut_user)
12201 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
12203 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
12205 /* Use user configured hash key if there is one, otherwise
12208 if (vsi->rss_hkey_user)
12209 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
12211 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
12212 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
12219 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
12220 * @pf: board private structure
12221 * @queue_count: the requested queue count for rss.
12223 * returns 0 if rss is not enabled, if enabled returns the final rss queue
12224 * count which may be different from the requested queue count.
12225 * Note: expects to be called while under rtnl_lock()
12227 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
12229 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
12232 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
12235 queue_count = min_t(int, queue_count, num_online_cpus());
12236 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
12238 if (queue_count != vsi->num_queue_pairs) {
12241 vsi->req_queue_pairs = queue_count;
12242 i40e_prep_for_reset(pf);
12243 if (test_bit(__I40E_IN_REMOVE, pf->state))
12244 return pf->alloc_rss_size;
12246 pf->alloc_rss_size = new_rss_size;
12248 i40e_reset_and_rebuild(pf, true, true);
12250 /* Discard the user configured hash keys and lut, if less
12251 * queues are enabled.
12253 if (queue_count < vsi->rss_size) {
12254 i40e_clear_rss_config_user(vsi);
12255 dev_dbg(&pf->pdev->dev,
12256 "discard user configured hash keys and lut\n");
12259 /* Reset vsi->rss_size, as number of enabled queues changed */
12260 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
12261 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
12263 i40e_pf_config_rss(pf);
12265 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
12266 vsi->req_queue_pairs, pf->rss_size_max);
12267 return pf->alloc_rss_size;
12271 * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
12272 * @pf: board private structure
12274 i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
12276 i40e_status status;
12277 bool min_valid, max_valid;
12278 u32 max_bw, min_bw;
12280 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
12281 &min_valid, &max_valid);
12285 pf->min_bw = min_bw;
12287 pf->max_bw = max_bw;
12294 * i40e_set_partition_bw_setting - Set BW settings for this PF partition
12295 * @pf: board private structure
12297 i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
12299 struct i40e_aqc_configure_partition_bw_data bw_data;
12300 i40e_status status;
12302 memset(&bw_data, 0, sizeof(bw_data));
12304 /* Set the valid bit for this PF */
12305 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
12306 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
12307 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
12309 /* Set the new bandwidths */
12310 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
12316 * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
12317 * @pf: board private structure
12319 i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
12321 /* Commit temporary BW setting to permanent NVM image */
12322 enum i40e_admin_queue_err last_aq_status;
12326 if (pf->hw.partition_id != 1) {
12327 dev_info(&pf->pdev->dev,
12328 "Commit BW only works on partition 1! This is partition %d",
12329 pf->hw.partition_id);
12330 ret = I40E_NOT_SUPPORTED;
12331 goto bw_commit_out;
12334 /* Acquire NVM for read access */
12335 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
12336 last_aq_status = pf->hw.aq.asq_last_status;
12338 dev_info(&pf->pdev->dev,
12339 "Cannot acquire NVM for read access, err %s aq_err %s\n",
12340 i40e_stat_str(&pf->hw, ret),
12341 i40e_aq_str(&pf->hw, last_aq_status));
12342 goto bw_commit_out;
12345 /* Read word 0x10 of NVM - SW compatibility word 1 */
12346 ret = i40e_aq_read_nvm(&pf->hw,
12347 I40E_SR_NVM_CONTROL_WORD,
12348 0x10, sizeof(nvm_word), &nvm_word,
12350 /* Save off last admin queue command status before releasing
12353 last_aq_status = pf->hw.aq.asq_last_status;
12354 i40e_release_nvm(&pf->hw);
12356 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
12357 i40e_stat_str(&pf->hw, ret),
12358 i40e_aq_str(&pf->hw, last_aq_status));
12359 goto bw_commit_out;
12362 /* Wait a bit for NVM release to complete */
12365 /* Acquire NVM for write access */
12366 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
12367 last_aq_status = pf->hw.aq.asq_last_status;
12369 dev_info(&pf->pdev->dev,
12370 "Cannot acquire NVM for write access, err %s aq_err %s\n",
12371 i40e_stat_str(&pf->hw, ret),
12372 i40e_aq_str(&pf->hw, last_aq_status));
12373 goto bw_commit_out;
12375 /* Write it back out unchanged to initiate update NVM,
12376 * which will force a write of the shadow (alt) RAM to
12377 * the NVM - thus storing the bandwidth values permanently.
12379 ret = i40e_aq_update_nvm(&pf->hw,
12380 I40E_SR_NVM_CONTROL_WORD,
12381 0x10, sizeof(nvm_word),
12382 &nvm_word, true, 0, NULL);
12383 /* Save off last admin queue command status before releasing
12386 last_aq_status = pf->hw.aq.asq_last_status;
12387 i40e_release_nvm(&pf->hw);
12389 dev_info(&pf->pdev->dev,
12390 "BW settings NOT SAVED, err %s aq_err %s\n",
12391 i40e_stat_str(&pf->hw, ret),
12392 i40e_aq_str(&pf->hw, last_aq_status));
12399 * i40e_is_total_port_shutdown_enabled - read NVM and return value
12400 * if total port shutdown feature is enabled for this PF
12401 * @pf: board private structure
12403 static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
12405 #define I40E_TOTAL_PORT_SHUTDOWN_ENABLED BIT(4)
12406 #define I40E_FEATURES_ENABLE_PTR 0x2A
12407 #define I40E_CURRENT_SETTING_PTR 0x2B
12408 #define I40E_LINK_BEHAVIOR_WORD_OFFSET 0x2D
12409 #define I40E_LINK_BEHAVIOR_WORD_LENGTH 0x1
12410 #define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED BIT(0)
12411 #define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH 4
12412 i40e_status read_status = I40E_SUCCESS;
12413 u16 sr_emp_sr_settings_ptr = 0;
12414 u16 features_enable = 0;
12415 u16 link_behavior = 0;
12418 read_status = i40e_read_nvm_word(&pf->hw,
12419 I40E_SR_EMP_SR_SETTINGS_PTR,
12420 &sr_emp_sr_settings_ptr);
12423 read_status = i40e_read_nvm_word(&pf->hw,
12424 sr_emp_sr_settings_ptr +
12425 I40E_FEATURES_ENABLE_PTR,
12429 if (I40E_TOTAL_PORT_SHUTDOWN_ENABLED & features_enable) {
12430 read_status = i40e_read_nvm_module_data(&pf->hw,
12431 I40E_SR_EMP_SR_SETTINGS_PTR,
12432 I40E_CURRENT_SETTING_PTR,
12433 I40E_LINK_BEHAVIOR_WORD_OFFSET,
12434 I40E_LINK_BEHAVIOR_WORD_LENGTH,
12438 link_behavior >>= (pf->hw.port * I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH);
12439 ret = I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED & link_behavior;
12444 dev_warn(&pf->pdev->dev,
12445 "total-port-shutdown feature is off due to read nvm error: %s\n",
12446 i40e_stat_str(&pf->hw, read_status));
12451 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
12452 * @pf: board private structure to initialize
12454 * i40e_sw_init initializes the Adapter private data structure.
12455 * Fields are initialized based on PCI device information and
12456 * OS network device settings (MTU size).
12458 static int i40e_sw_init(struct i40e_pf *pf)
12464 /* Set default capability flags */
12465 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
12466 I40E_FLAG_MSI_ENABLED |
12467 I40E_FLAG_MSIX_ENABLED;
12469 /* Set default ITR */
12470 pf->rx_itr_default = I40E_ITR_RX_DEF;
12471 pf->tx_itr_default = I40E_ITR_TX_DEF;
12473 /* Depending on PF configurations, it is possible that the RSS
12474 * maximum might end up larger than the available queues
12476 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
12477 pf->alloc_rss_size = 1;
12478 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
12479 pf->rss_size_max = min_t(int, pf->rss_size_max,
12480 pf->hw.func_caps.num_tx_qp);
12482 /* find the next higher power-of-2 of num cpus */
12483 pow = roundup_pow_of_two(num_online_cpus());
12484 pf->rss_size_max = min_t(int, pf->rss_size_max, pow);
12486 if (pf->hw.func_caps.rss) {
12487 pf->flags |= I40E_FLAG_RSS_ENABLED;
12488 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
12489 num_online_cpus());
12492 /* MFP mode enabled */
12493 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
12494 pf->flags |= I40E_FLAG_MFP_ENABLED;
12495 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
12496 if (i40e_get_partition_bw_setting(pf)) {
12497 dev_warn(&pf->pdev->dev,
12498 "Could not get partition bw settings\n");
12500 dev_info(&pf->pdev->dev,
12501 "Partition BW Min = %8.8x, Max = %8.8x\n",
12502 pf->min_bw, pf->max_bw);
12504 /* nudge the Tx scheduler */
12505 i40e_set_partition_bw_setting(pf);
12509 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
12510 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
12511 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
12512 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
12513 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
12514 pf->hw.num_partitions > 1)
12515 dev_info(&pf->pdev->dev,
12516 "Flow Director Sideband mode Disabled in MFP mode\n");
12518 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
12519 pf->fdir_pf_filter_count =
12520 pf->hw.func_caps.fd_filters_guaranteed;
12521 pf->hw.fdir_shared_filter_count =
12522 pf->hw.func_caps.fd_filters_best_effort;
12525 if (pf->hw.mac.type == I40E_MAC_X722) {
12526 pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
12527 I40E_HW_128_QP_RSS_CAPABLE |
12528 I40E_HW_ATR_EVICT_CAPABLE |
12529 I40E_HW_WB_ON_ITR_CAPABLE |
12530 I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
12531 I40E_HW_NO_PCI_LINK_CHECK |
12532 I40E_HW_USE_SET_LLDP_MIB |
12533 I40E_HW_GENEVE_OFFLOAD_CAPABLE |
12534 I40E_HW_PTP_L4_CAPABLE |
12535 I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
12536 I40E_HW_OUTER_UDP_CSUM_CAPABLE);
12538 #define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
12539 if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
12540 I40E_FDEVICT_PCTYPE_DEFAULT) {
12541 dev_warn(&pf->pdev->dev,
12542 "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
12543 pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
12545 } else if ((pf->hw.aq.api_maj_ver > 1) ||
12546 ((pf->hw.aq.api_maj_ver == 1) &&
12547 (pf->hw.aq.api_min_ver > 4))) {
12548 /* Supported in FW API version higher than 1.4 */
12549 pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
12552 /* Enable HW ATR eviction if possible */
12553 if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
12554 pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
12556 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
12557 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
12558 (pf->hw.aq.fw_maj_ver < 4))) {
12559 pf->hw_features |= I40E_HW_RESTART_AUTONEG;
12560 /* No DCB support for FW < v4.33 */
12561 pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
12564 /* Disable FW LLDP if FW < v4.3 */
12565 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
12566 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
12567 (pf->hw.aq.fw_maj_ver < 4)))
12568 pf->hw_features |= I40E_HW_STOP_FW_LLDP;
12570 /* Use the FW Set LLDP MIB API if FW > v4.40 */
12571 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
12572 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
12573 (pf->hw.aq.fw_maj_ver >= 5)))
12574 pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
12576 /* Enable PTP L4 if FW > v6.0 */
12577 if (pf->hw.mac.type == I40E_MAC_XL710 &&
12578 pf->hw.aq.fw_maj_ver >= 6)
12579 pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
12581 if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
12582 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
12583 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
12584 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
12587 if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
12588 pf->flags |= I40E_FLAG_IWARP_ENABLED;
12589 /* IWARP needs one extra vector for CQP just like MISC.*/
12590 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
12592 /* Stopping FW LLDP engine is supported on XL710 and X722
12593 * starting from FW versions determined in i40e_init_adminq.
12594 * Stopping the FW LLDP engine is not supported on XL710
12595 * if NPAR is functioning so unset this hw flag in this case.
12597 if (pf->hw.mac.type == I40E_MAC_XL710 &&
12598 pf->hw.func_caps.npar_enable &&
12599 (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
12600 pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE;
12602 #ifdef CONFIG_PCI_IOV
12603 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
12604 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
12605 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
12606 pf->num_req_vfs = min_t(int,
12607 pf->hw.func_caps.num_vfs,
12608 I40E_MAX_VF_COUNT);
12610 #endif /* CONFIG_PCI_IOV */
12611 pf->eeprom_version = 0xDEAD;
12612 pf->lan_veb = I40E_NO_VEB;
12613 pf->lan_vsi = I40E_NO_VSI;
12615 /* By default FW has this off for performance reasons */
12616 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
12618 /* set up queue assignment tracking */
12619 size = sizeof(struct i40e_lump_tracking)
12620 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
12621 pf->qp_pile = kzalloc(size, GFP_KERNEL);
12622 if (!pf->qp_pile) {
12626 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
12628 pf->tx_timeout_recovery_level = 1;
12630 if (pf->hw.mac.type != I40E_MAC_X722 &&
12631 i40e_is_total_port_shutdown_enabled(pf)) {
12632 /* Link down on close must be on when total port shutdown
12633 * is enabled for a given port
12635 pf->flags |= (I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED |
12636 I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED);
12637 dev_info(&pf->pdev->dev,
12638 "total-port-shutdown was enabled, link-down-on-close is forced on\n");
12640 mutex_init(&pf->switch_mutex);
12647 * i40e_set_ntuple - set the ntuple feature flag and take action
12648 * @pf: board private structure to initialize
12649 * @features: the feature set that the stack is suggesting
12651 * returns a bool to indicate if reset needs to happen
12653 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
12655 bool need_reset = false;
12657 /* Check if Flow Director n-tuple support was enabled or disabled. If
12658 * the state changed, we need to reset.
12660 if (features & NETIF_F_NTUPLE) {
12661 /* Enable filters and mark for reset */
12662 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
12664 /* enable FD_SB only if there is MSI-X vector and no cloud
12667 if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) {
12668 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
12669 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
12672 /* turn off filters, mark for reset and clear SW filter list */
12673 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
12675 i40e_fdir_filter_exit(pf);
12677 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
12678 clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state);
12679 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
12681 /* reset fd counters */
12682 pf->fd_add_err = 0;
12683 pf->fd_atr_cnt = 0;
12684 /* if ATR was auto disabled it can be re-enabled. */
12685 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
12686 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
12687 (I40E_DEBUG_FD & pf->hw.debug_mask))
12688 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
12694 * i40e_clear_rss_lut - clear the rx hash lookup table
12695 * @vsi: the VSI being configured
12697 static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
12699 struct i40e_pf *pf = vsi->back;
12700 struct i40e_hw *hw = &pf->hw;
12701 u16 vf_id = vsi->vf_id;
12704 if (vsi->type == I40E_VSI_MAIN) {
12705 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12706 wr32(hw, I40E_PFQF_HLUT(i), 0);
12707 } else if (vsi->type == I40E_VSI_SRIOV) {
12708 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
12709 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
12711 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
12716 * i40e_set_features - set the netdev feature flags
12717 * @netdev: ptr to the netdev being adjusted
12718 * @features: the feature set that the stack is suggesting
12719 * Note: expects to be called while under rtnl_lock()
12721 static int i40e_set_features(struct net_device *netdev,
12722 netdev_features_t features)
12724 struct i40e_netdev_priv *np = netdev_priv(netdev);
12725 struct i40e_vsi *vsi = np->vsi;
12726 struct i40e_pf *pf = vsi->back;
12729 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
12730 i40e_pf_config_rss(pf);
12731 else if (!(features & NETIF_F_RXHASH) &&
12732 netdev->features & NETIF_F_RXHASH)
12733 i40e_clear_rss_lut(vsi);
12735 if (features & NETIF_F_HW_VLAN_CTAG_RX)
12736 i40e_vlan_stripping_enable(vsi);
12738 i40e_vlan_stripping_disable(vsi);
12740 if (!(features & NETIF_F_HW_TC) &&
12741 (netdev->features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
12742 dev_err(&pf->pdev->dev,
12743 "Offloaded tc filters active, can't turn hw_tc_offload off");
12747 if (!(features & NETIF_F_HW_L2FW_DOFFLOAD) && vsi->macvlan_cnt)
12748 i40e_del_all_macvlans(vsi);
12750 need_reset = i40e_set_ntuple(pf, features);
12753 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
12758 static int i40e_udp_tunnel_set_port(struct net_device *netdev,
12759 unsigned int table, unsigned int idx,
12760 struct udp_tunnel_info *ti)
12762 struct i40e_netdev_priv *np = netdev_priv(netdev);
12763 struct i40e_hw *hw = &np->vsi->back->hw;
12764 u8 type, filter_index;
12767 type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? I40E_AQC_TUNNEL_TYPE_VXLAN :
12768 I40E_AQC_TUNNEL_TYPE_NGE;
12770 ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index,
12773 netdev_info(netdev, "add UDP port failed, err %s aq_err %s\n",
12774 i40e_stat_str(hw, ret),
12775 i40e_aq_str(hw, hw->aq.asq_last_status));
12779 udp_tunnel_nic_set_port_priv(netdev, table, idx, filter_index);
12783 static int i40e_udp_tunnel_unset_port(struct net_device *netdev,
12784 unsigned int table, unsigned int idx,
12785 struct udp_tunnel_info *ti)
12787 struct i40e_netdev_priv *np = netdev_priv(netdev);
12788 struct i40e_hw *hw = &np->vsi->back->hw;
12791 ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL);
12793 netdev_info(netdev, "delete UDP port failed, err %s aq_err %s\n",
12794 i40e_stat_str(hw, ret),
12795 i40e_aq_str(hw, hw->aq.asq_last_status));
12802 static int i40e_get_phys_port_id(struct net_device *netdev,
12803 struct netdev_phys_item_id *ppid)
12805 struct i40e_netdev_priv *np = netdev_priv(netdev);
12806 struct i40e_pf *pf = np->vsi->back;
12807 struct i40e_hw *hw = &pf->hw;
12809 if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
12810 return -EOPNOTSUPP;
12812 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
12813 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
12819 * i40e_ndo_fdb_add - add an entry to the hardware database
12820 * @ndm: the input from the stack
12821 * @tb: pointer to array of nladdr (unused)
12822 * @dev: the net device pointer
12823 * @addr: the MAC address entry being added
12825 * @flags: instructions from stack about fdb operation
12826 * @extack: netlink extended ack, unused currently
12828 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
12829 struct net_device *dev,
12830 const unsigned char *addr, u16 vid,
12832 struct netlink_ext_ack *extack)
12834 struct i40e_netdev_priv *np = netdev_priv(dev);
12835 struct i40e_pf *pf = np->vsi->back;
12838 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
12839 return -EOPNOTSUPP;
12842 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
12846 /* Hardware does not support aging addresses so if a
12847 * ndm_state is given only allow permanent addresses
12849 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
12850 netdev_info(dev, "FDB only supports static addresses\n");
12854 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
12855 err = dev_uc_add_excl(dev, addr);
12856 else if (is_multicast_ether_addr(addr))
12857 err = dev_mc_add_excl(dev, addr);
12861 /* Only return duplicate errors if NLM_F_EXCL is set */
12862 if (err == -EEXIST && !(flags & NLM_F_EXCL))
12869 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
12870 * @dev: the netdev being configured
12871 * @nlh: RTNL message
12872 * @flags: bridge flags
12873 * @extack: netlink extended ack
12875 * Inserts a new hardware bridge if not already created and
12876 * enables the bridging mode requested (VEB or VEPA). If the
12877 * hardware bridge has already been inserted and the request
12878 * is to change the mode then that requires a PF reset to
12879 * allow rebuild of the components with required hardware
12880 * bridge mode enabled.
12882 * Note: expects to be called while under rtnl_lock()
12884 static int i40e_ndo_bridge_setlink(struct net_device *dev,
12885 struct nlmsghdr *nlh,
12887 struct netlink_ext_ack *extack)
12889 struct i40e_netdev_priv *np = netdev_priv(dev);
12890 struct i40e_vsi *vsi = np->vsi;
12891 struct i40e_pf *pf = vsi->back;
12892 struct i40e_veb *veb = NULL;
12893 struct nlattr *attr, *br_spec;
12896 /* Only for PF VSI for now */
12897 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
12898 return -EOPNOTSUPP;
12900 /* Find the HW bridge for PF VSI */
12901 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
12902 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
12906 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
12908 nla_for_each_nested(attr, br_spec, rem) {
12911 if (nla_type(attr) != IFLA_BRIDGE_MODE)
12914 mode = nla_get_u16(attr);
12915 if ((mode != BRIDGE_MODE_VEPA) &&
12916 (mode != BRIDGE_MODE_VEB))
12919 /* Insert a new HW bridge */
12921 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
12922 vsi->tc_config.enabled_tc);
12924 veb->bridge_mode = mode;
12925 i40e_config_bridge_mode(veb);
12927 /* No Bridge HW offload available */
12931 } else if (mode != veb->bridge_mode) {
12932 /* Existing HW bridge but different mode needs reset */
12933 veb->bridge_mode = mode;
12934 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
12935 if (mode == BRIDGE_MODE_VEB)
12936 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
12938 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
12939 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
12948 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
12951 * @seq: RTNL message seq #
12952 * @dev: the netdev being configured
12953 * @filter_mask: unused
12954 * @nlflags: netlink flags passed in
12956 * Return the mode in which the hardware bridge is operating in
12959 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
12960 struct net_device *dev,
12961 u32 __always_unused filter_mask,
12964 struct i40e_netdev_priv *np = netdev_priv(dev);
12965 struct i40e_vsi *vsi = np->vsi;
12966 struct i40e_pf *pf = vsi->back;
12967 struct i40e_veb *veb = NULL;
12970 /* Only for PF VSI for now */
12971 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
12972 return -EOPNOTSUPP;
12974 /* Find the HW bridge for the PF VSI */
12975 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
12976 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
12983 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
12984 0, 0, nlflags, filter_mask, NULL);
12988 * i40e_features_check - Validate encapsulated packet conforms to limits
12990 * @dev: This physical port's netdev
12991 * @features: Offload features that the stack believes apply
12993 static netdev_features_t i40e_features_check(struct sk_buff *skb,
12994 struct net_device *dev,
12995 netdev_features_t features)
12999 /* No point in doing any of this if neither checksum nor GSO are
13000 * being requested for this frame. We can rule out both by just
13001 * checking for CHECKSUM_PARTIAL
13003 if (skb->ip_summed != CHECKSUM_PARTIAL)
13006 /* We cannot support GSO if the MSS is going to be less than
13007 * 64 bytes. If it is then we need to drop support for GSO.
13009 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
13010 features &= ~NETIF_F_GSO_MASK;
13012 /* MACLEN can support at most 63 words */
13013 len = skb_network_header(skb) - skb->data;
13014 if (len & ~(63 * 2))
13017 /* IPLEN and EIPLEN can support at most 127 dwords */
13018 len = skb_transport_header(skb) - skb_network_header(skb);
13019 if (len & ~(127 * 4))
13022 if (skb->encapsulation) {
13023 /* L4TUNLEN can support 127 words */
13024 len = skb_inner_network_header(skb) - skb_transport_header(skb);
13025 if (len & ~(127 * 2))
13028 /* IPLEN can support at most 127 dwords */
13029 len = skb_inner_transport_header(skb) -
13030 skb_inner_network_header(skb);
13031 if (len & ~(127 * 4))
13035 /* No need to validate L4LEN as TCP is the only protocol with a
13036 * a flexible value and we support all possible values supported
13037 * by TCP, which is at most 15 dwords
13042 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
13046 * i40e_xdp_setup - add/remove an XDP program
13047 * @vsi: VSI to changed
13048 * @prog: XDP program
13049 * @extack: netlink extended ack
13051 static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
13052 struct netlink_ext_ack *extack)
13054 int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
13055 struct i40e_pf *pf = vsi->back;
13056 struct bpf_prog *old_prog;
13060 /* Don't allow frames that span over multiple buffers */
13061 if (frame_size > vsi->rx_buf_len) {
13062 NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
13066 /* When turning XDP on->off/off->on we reset and rebuild the rings. */
13067 need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
13070 i40e_prep_for_reset(pf);
13072 /* VSI shall be deleted in a moment, just return EINVAL */
13073 if (test_bit(__I40E_IN_REMOVE, pf->state))
13076 old_prog = xchg(&vsi->xdp_prog, prog);
13080 /* Wait until ndo_xsk_wakeup completes. */
13082 i40e_reset_and_rebuild(pf, true, true);
13085 for (i = 0; i < vsi->num_queue_pairs; i++)
13086 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
13089 bpf_prog_put(old_prog);
13091 /* Kick start the NAPI context if there is an AF_XDP socket open
13092 * on that queue id. This so that receiving will start.
13094 if (need_reset && prog)
13095 for (i = 0; i < vsi->num_queue_pairs; i++)
13096 if (vsi->xdp_rings[i]->xsk_pool)
13097 (void)i40e_xsk_wakeup(vsi->netdev, i,
13104 * i40e_enter_busy_conf - Enters busy config state
13107 * Returns 0 on success, <0 for failure.
13109 static int i40e_enter_busy_conf(struct i40e_vsi *vsi)
13111 struct i40e_pf *pf = vsi->back;
13114 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
13118 usleep_range(1000, 2000);
13125 * i40e_exit_busy_conf - Exits busy config state
13128 static void i40e_exit_busy_conf(struct i40e_vsi *vsi)
13130 struct i40e_pf *pf = vsi->back;
13132 clear_bit(__I40E_CONFIG_BUSY, pf->state);
13136 * i40e_queue_pair_reset_stats - Resets all statistics for a queue pair
13138 * @queue_pair: queue pair
13140 static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
13142 memset(&vsi->rx_rings[queue_pair]->rx_stats, 0,
13143 sizeof(vsi->rx_rings[queue_pair]->rx_stats));
13144 memset(&vsi->tx_rings[queue_pair]->stats, 0,
13145 sizeof(vsi->tx_rings[queue_pair]->stats));
13146 if (i40e_enabled_xdp_vsi(vsi)) {
13147 memset(&vsi->xdp_rings[queue_pair]->stats, 0,
13148 sizeof(vsi->xdp_rings[queue_pair]->stats));
13153 * i40e_queue_pair_clean_rings - Cleans all the rings of a queue pair
13155 * @queue_pair: queue pair
13157 static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
13159 i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
13160 if (i40e_enabled_xdp_vsi(vsi)) {
13161 /* Make sure that in-progress ndo_xdp_xmit calls are
13165 i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
13167 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
13171 * i40e_queue_pair_toggle_napi - Enables/disables NAPI for a queue pair
13173 * @queue_pair: queue pair
13174 * @enable: true for enable, false for disable
13176 static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair,
13179 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13180 struct i40e_q_vector *q_vector = rxr->q_vector;
13185 /* All rings in a qp belong to the same qvector. */
13186 if (q_vector->rx.ring || q_vector->tx.ring) {
13188 napi_enable(&q_vector->napi);
13190 napi_disable(&q_vector->napi);
13195 * i40e_queue_pair_toggle_rings - Enables/disables all rings for a queue pair
13197 * @queue_pair: queue pair
13198 * @enable: true for enable, false for disable
13200 * Returns 0 on success, <0 on failure.
13202 static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair,
13205 struct i40e_pf *pf = vsi->back;
13208 pf_q = vsi->base_queue + queue_pair;
13209 ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q,
13210 false /*is xdp*/, enable);
13212 dev_info(&pf->pdev->dev,
13213 "VSI seid %d Tx ring %d %sable timeout\n",
13214 vsi->seid, pf_q, (enable ? "en" : "dis"));
13218 i40e_control_rx_q(pf, pf_q, enable);
13219 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
13221 dev_info(&pf->pdev->dev,
13222 "VSI seid %d Rx ring %d %sable timeout\n",
13223 vsi->seid, pf_q, (enable ? "en" : "dis"));
13227 /* Due to HW errata, on Rx disable only, the register can
13228 * indicate done before it really is. Needs 50ms to be sure
13233 if (!i40e_enabled_xdp_vsi(vsi))
13236 ret = i40e_control_wait_tx_q(vsi->seid, pf,
13237 pf_q + vsi->alloc_queue_pairs,
13238 true /*is xdp*/, enable);
13240 dev_info(&pf->pdev->dev,
13241 "VSI seid %d XDP Tx ring %d %sable timeout\n",
13242 vsi->seid, pf_q, (enable ? "en" : "dis"));
13249 * i40e_queue_pair_enable_irq - Enables interrupts for a queue pair
13251 * @queue_pair: queue_pair
13253 static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair)
13255 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13256 struct i40e_pf *pf = vsi->back;
13257 struct i40e_hw *hw = &pf->hw;
13259 /* All rings in a qp belong to the same qvector. */
13260 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
13261 i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx);
13263 i40e_irq_dynamic_enable_icr0(pf);
13269 * i40e_queue_pair_disable_irq - Disables interrupts for a queue pair
13271 * @queue_pair: queue_pair
13273 static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair)
13275 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13276 struct i40e_pf *pf = vsi->back;
13277 struct i40e_hw *hw = &pf->hw;
13279 /* For simplicity, instead of removing the qp interrupt causes
13280 * from the interrupt linked list, we simply disable the interrupt, and
13281 * leave the list intact.
13283 * All rings in a qp belong to the same qvector.
13285 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
13286 u32 intpf = vsi->base_vector + rxr->q_vector->v_idx;
13288 wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0);
13290 synchronize_irq(pf->msix_entries[intpf].vector);
13292 /* Legacy and MSI mode - this stops all interrupt handling */
13293 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
13294 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
13296 synchronize_irq(pf->pdev->irq);
13301 * i40e_queue_pair_disable - Disables a queue pair
13303 * @queue_pair: queue pair
13305 * Returns 0 on success, <0 on failure.
13307 int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
13311 err = i40e_enter_busy_conf(vsi);
13315 i40e_queue_pair_disable_irq(vsi, queue_pair);
13316 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
13317 i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
13318 i40e_queue_pair_clean_rings(vsi, queue_pair);
13319 i40e_queue_pair_reset_stats(vsi, queue_pair);
13325 * i40e_queue_pair_enable - Enables a queue pair
13327 * @queue_pair: queue pair
13329 * Returns 0 on success, <0 on failure.
13331 int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair)
13335 err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]);
13339 if (i40e_enabled_xdp_vsi(vsi)) {
13340 err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]);
13345 err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]);
13349 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, true /* on */);
13350 i40e_queue_pair_toggle_napi(vsi, queue_pair, true /* on */);
13351 i40e_queue_pair_enable_irq(vsi, queue_pair);
13353 i40e_exit_busy_conf(vsi);
13359 * i40e_xdp - implements ndo_bpf for i40e
13361 * @xdp: XDP command
13363 static int i40e_xdp(struct net_device *dev,
13364 struct netdev_bpf *xdp)
13366 struct i40e_netdev_priv *np = netdev_priv(dev);
13367 struct i40e_vsi *vsi = np->vsi;
13369 if (vsi->type != I40E_VSI_MAIN)
13372 switch (xdp->command) {
13373 case XDP_SETUP_PROG:
13374 return i40e_xdp_setup(vsi, xdp->prog, xdp->extack);
13375 case XDP_SETUP_XSK_POOL:
13376 return i40e_xsk_pool_setup(vsi, xdp->xsk.pool,
13377 xdp->xsk.queue_id);
13383 static const struct net_device_ops i40e_netdev_ops = {
13384 .ndo_open = i40e_open,
13385 .ndo_stop = i40e_close,
13386 .ndo_start_xmit = i40e_lan_xmit_frame,
13387 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
13388 .ndo_set_rx_mode = i40e_set_rx_mode,
13389 .ndo_validate_addr = eth_validate_addr,
13390 .ndo_set_mac_address = i40e_set_mac,
13391 .ndo_change_mtu = i40e_change_mtu,
13392 .ndo_eth_ioctl = i40e_ioctl,
13393 .ndo_tx_timeout = i40e_tx_timeout,
13394 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
13395 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
13396 #ifdef CONFIG_NET_POLL_CONTROLLER
13397 .ndo_poll_controller = i40e_netpoll,
13399 .ndo_setup_tc = __i40e_setup_tc,
13400 .ndo_select_queue = i40e_lan_select_queue,
13401 .ndo_set_features = i40e_set_features,
13402 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
13403 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
13404 .ndo_get_vf_stats = i40e_get_vf_stats,
13405 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
13406 .ndo_get_vf_config = i40e_ndo_get_vf_config,
13407 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
13408 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
13409 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
13410 .ndo_get_phys_port_id = i40e_get_phys_port_id,
13411 .ndo_fdb_add = i40e_ndo_fdb_add,
13412 .ndo_features_check = i40e_features_check,
13413 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
13414 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
13415 .ndo_bpf = i40e_xdp,
13416 .ndo_xdp_xmit = i40e_xdp_xmit,
13417 .ndo_xsk_wakeup = i40e_xsk_wakeup,
13418 .ndo_dfwd_add_station = i40e_fwd_add,
13419 .ndo_dfwd_del_station = i40e_fwd_del,
13423 * i40e_config_netdev - Setup the netdev flags
13424 * @vsi: the VSI being configured
13426 * Returns 0 on success, negative value on failure
13428 static int i40e_config_netdev(struct i40e_vsi *vsi)
13430 struct i40e_pf *pf = vsi->back;
13431 struct i40e_hw *hw = &pf->hw;
13432 struct i40e_netdev_priv *np;
13433 struct net_device *netdev;
13434 u8 broadcast[ETH_ALEN];
13435 u8 mac_addr[ETH_ALEN];
13437 netdev_features_t hw_enc_features;
13438 netdev_features_t hw_features;
13440 etherdev_size = sizeof(struct i40e_netdev_priv);
13441 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
13445 vsi->netdev = netdev;
13446 np = netdev_priv(netdev);
13449 hw_enc_features = NETIF_F_SG |
13452 NETIF_F_SOFT_FEATURES |
13457 NETIF_F_GSO_GRE_CSUM |
13458 NETIF_F_GSO_PARTIAL |
13459 NETIF_F_GSO_IPXIP4 |
13460 NETIF_F_GSO_IPXIP6 |
13461 NETIF_F_GSO_UDP_TUNNEL |
13462 NETIF_F_GSO_UDP_TUNNEL_CSUM |
13463 NETIF_F_GSO_UDP_L4 |
13469 if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
13470 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
13472 netdev->udp_tunnel_nic_info = &pf->udp_tunnel_nic;
13474 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
13476 netdev->hw_enc_features |= hw_enc_features;
13478 /* record features VLANs can make use of */
13479 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
13481 #define I40E_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
13482 NETIF_F_GSO_GRE_CSUM | \
13483 NETIF_F_GSO_IPXIP4 | \
13484 NETIF_F_GSO_IPXIP6 | \
13485 NETIF_F_GSO_UDP_TUNNEL | \
13486 NETIF_F_GSO_UDP_TUNNEL_CSUM)
13488 netdev->gso_partial_features = I40E_GSO_PARTIAL_FEATURES;
13489 netdev->features |= NETIF_F_GSO_PARTIAL |
13490 I40E_GSO_PARTIAL_FEATURES;
13492 netdev->mpls_features |= NETIF_F_SG;
13493 netdev->mpls_features |= NETIF_F_HW_CSUM;
13494 netdev->mpls_features |= NETIF_F_TSO;
13495 netdev->mpls_features |= NETIF_F_TSO6;
13496 netdev->mpls_features |= I40E_GSO_PARTIAL_FEATURES;
13498 /* enable macvlan offloads */
13499 netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD;
13501 hw_features = hw_enc_features |
13502 NETIF_F_HW_VLAN_CTAG_TX |
13503 NETIF_F_HW_VLAN_CTAG_RX;
13505 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
13506 hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
13508 netdev->hw_features |= hw_features;
13510 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
13511 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
13513 netdev->features &= ~NETIF_F_HW_TC;
13515 if (vsi->type == I40E_VSI_MAIN) {
13516 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
13517 ether_addr_copy(mac_addr, hw->mac.perm_addr);
13518 /* The following steps are necessary for two reasons. First,
13519 * some older NVM configurations load a default MAC-VLAN
13520 * filter that will accept any tagged packet, and we want to
13521 * replace this with a normal filter. Additionally, it is
13522 * possible our MAC address was provided by the platform using
13523 * Open Firmware or similar.
13525 * Thus, we need to remove the default filter and install one
13526 * specific to the MAC address.
13528 i40e_rm_default_mac_filter(vsi, mac_addr);
13529 spin_lock_bh(&vsi->mac_filter_hash_lock);
13530 i40e_add_mac_filter(vsi, mac_addr);
13531 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13533 /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
13534 * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
13535 * the end, which is 4 bytes long, so force truncation of the
13536 * original name by IFNAMSIZ - 4
13538 snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
13540 pf->vsi[pf->lan_vsi]->netdev->name);
13541 eth_random_addr(mac_addr);
13543 spin_lock_bh(&vsi->mac_filter_hash_lock);
13544 i40e_add_mac_filter(vsi, mac_addr);
13545 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13548 /* Add the broadcast filter so that we initially will receive
13549 * broadcast packets. Note that when a new VLAN is first added the
13550 * driver will convert all filters marked I40E_VLAN_ANY into VLAN
13551 * specific filters as part of transitioning into "vlan" operation.
13552 * When more VLANs are added, the driver will copy each existing MAC
13553 * filter and add it for the new VLAN.
13555 * Broadcast filters are handled specially by
13556 * i40e_sync_filters_subtask, as the driver must to set the broadcast
13557 * promiscuous bit instead of adding this directly as a MAC/VLAN
13558 * filter. The subtask will update the correct broadcast promiscuous
13559 * bits as VLANs become active or inactive.
13561 eth_broadcast_addr(broadcast);
13562 spin_lock_bh(&vsi->mac_filter_hash_lock);
13563 i40e_add_mac_filter(vsi, broadcast);
13564 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13566 eth_hw_addr_set(netdev, mac_addr);
13567 ether_addr_copy(netdev->perm_addr, mac_addr);
13569 /* i40iw_net_event() reads 16 bytes from neigh->primary_key */
13570 netdev->neigh_priv_len = sizeof(u32) * 4;
13572 netdev->priv_flags |= IFF_UNICAST_FLT;
13573 netdev->priv_flags |= IFF_SUPP_NOFCS;
13574 /* Setup netdev TC information */
13575 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
13577 netdev->netdev_ops = &i40e_netdev_ops;
13578 netdev->watchdog_timeo = 5 * HZ;
13579 i40e_set_ethtool_ops(netdev);
13581 /* MTU range: 68 - 9706 */
13582 netdev->min_mtu = ETH_MIN_MTU;
13583 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
13589 * i40e_vsi_delete - Delete a VSI from the switch
13590 * @vsi: the VSI being removed
13592 * Returns 0 on success, negative value on failure
13594 static void i40e_vsi_delete(struct i40e_vsi *vsi)
13596 /* remove default VSI is not allowed */
13597 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
13600 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
13604 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
13605 * @vsi: the VSI being queried
13607 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
13609 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
13611 struct i40e_veb *veb;
13612 struct i40e_pf *pf = vsi->back;
13614 /* Uplink is not a bridge so default to VEB */
13615 if (vsi->veb_idx >= I40E_MAX_VEB)
13618 veb = pf->veb[vsi->veb_idx];
13620 dev_info(&pf->pdev->dev,
13621 "There is no veb associated with the bridge\n");
13625 /* Uplink is a bridge in VEPA mode */
13626 if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
13629 /* Uplink is a bridge in VEB mode */
13633 /* VEPA is now default bridge, so return 0 */
13638 * i40e_add_vsi - Add a VSI to the switch
13639 * @vsi: the VSI being configured
13641 * This initializes a VSI context depending on the VSI type to be added and
13642 * passes it down to the add_vsi aq command.
13644 static int i40e_add_vsi(struct i40e_vsi *vsi)
13647 struct i40e_pf *pf = vsi->back;
13648 struct i40e_hw *hw = &pf->hw;
13649 struct i40e_vsi_context ctxt;
13650 struct i40e_mac_filter *f;
13651 struct hlist_node *h;
13654 u8 enabled_tc = 0x1; /* TC0 enabled */
13657 memset(&ctxt, 0, sizeof(ctxt));
13658 switch (vsi->type) {
13659 case I40E_VSI_MAIN:
13660 /* The PF's main VSI is already setup as part of the
13661 * device initialization, so we'll not bother with
13662 * the add_vsi call, but we will retrieve the current
13665 ctxt.seid = pf->main_vsi_seid;
13666 ctxt.pf_num = pf->hw.pf_id;
13668 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
13669 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
13671 dev_info(&pf->pdev->dev,
13672 "couldn't get PF vsi config, err %s aq_err %s\n",
13673 i40e_stat_str(&pf->hw, ret),
13674 i40e_aq_str(&pf->hw,
13675 pf->hw.aq.asq_last_status));
13678 vsi->info = ctxt.info;
13679 vsi->info.valid_sections = 0;
13681 vsi->seid = ctxt.seid;
13682 vsi->id = ctxt.vsi_number;
13684 enabled_tc = i40e_pf_get_tc_map(pf);
13686 /* Source pruning is enabled by default, so the flag is
13687 * negative logic - if it's set, we need to fiddle with
13688 * the VSI to disable source pruning.
13690 if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
13691 memset(&ctxt, 0, sizeof(ctxt));
13692 ctxt.seid = pf->main_vsi_seid;
13693 ctxt.pf_num = pf->hw.pf_id;
13695 ctxt.info.valid_sections |=
13696 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13697 ctxt.info.switch_id =
13698 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
13699 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
13701 dev_info(&pf->pdev->dev,
13702 "update vsi failed, err %s aq_err %s\n",
13703 i40e_stat_str(&pf->hw, ret),
13704 i40e_aq_str(&pf->hw,
13705 pf->hw.aq.asq_last_status));
13711 /* MFP mode setup queue map and update VSI */
13712 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
13713 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
13714 memset(&ctxt, 0, sizeof(ctxt));
13715 ctxt.seid = pf->main_vsi_seid;
13716 ctxt.pf_num = pf->hw.pf_id;
13718 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
13719 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
13721 dev_info(&pf->pdev->dev,
13722 "update vsi failed, err %s aq_err %s\n",
13723 i40e_stat_str(&pf->hw, ret),
13724 i40e_aq_str(&pf->hw,
13725 pf->hw.aq.asq_last_status));
13729 /* update the local VSI info queue map */
13730 i40e_vsi_update_queue_map(vsi, &ctxt);
13731 vsi->info.valid_sections = 0;
13733 /* Default/Main VSI is only enabled for TC0
13734 * reconfigure it to enable all TCs that are
13735 * available on the port in SFP mode.
13736 * For MFP case the iSCSI PF would use this
13737 * flow to enable LAN+iSCSI TC.
13739 ret = i40e_vsi_config_tc(vsi, enabled_tc);
13741 /* Single TC condition is not fatal,
13742 * message and continue
13744 dev_info(&pf->pdev->dev,
13745 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
13747 i40e_stat_str(&pf->hw, ret),
13748 i40e_aq_str(&pf->hw,
13749 pf->hw.aq.asq_last_status));
13754 case I40E_VSI_FDIR:
13755 ctxt.pf_num = hw->pf_id;
13757 ctxt.uplink_seid = vsi->uplink_seid;
13758 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13759 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
13760 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
13761 (i40e_is_vsi_uplink_mode_veb(vsi))) {
13762 ctxt.info.valid_sections |=
13763 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13764 ctxt.info.switch_id =
13765 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13767 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13770 case I40E_VSI_VMDQ2:
13771 ctxt.pf_num = hw->pf_id;
13773 ctxt.uplink_seid = vsi->uplink_seid;
13774 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13775 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
13777 /* This VSI is connected to VEB so the switch_id
13778 * should be set to zero by default.
13780 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
13781 ctxt.info.valid_sections |=
13782 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13783 ctxt.info.switch_id =
13784 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13787 /* Setup the VSI tx/rx queue map for TC0 only for now */
13788 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13791 case I40E_VSI_SRIOV:
13792 ctxt.pf_num = hw->pf_id;
13793 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
13794 ctxt.uplink_seid = vsi->uplink_seid;
13795 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13796 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
13798 /* This VSI is connected to VEB so the switch_id
13799 * should be set to zero by default.
13801 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
13802 ctxt.info.valid_sections |=
13803 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13804 ctxt.info.switch_id =
13805 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13808 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
13809 ctxt.info.valid_sections |=
13810 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
13811 ctxt.info.queueing_opt_flags |=
13812 (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
13813 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
13816 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
13817 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
13818 if (pf->vf[vsi->vf_id].spoofchk) {
13819 ctxt.info.valid_sections |=
13820 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
13821 ctxt.info.sec_flags |=
13822 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
13823 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
13825 /* Setup the VSI tx/rx queue map for TC0 only for now */
13826 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13829 case I40E_VSI_IWARP:
13830 /* send down message to iWARP */
13837 if (vsi->type != I40E_VSI_MAIN) {
13838 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
13840 dev_info(&vsi->back->pdev->dev,
13841 "add vsi failed, err %s aq_err %s\n",
13842 i40e_stat_str(&pf->hw, ret),
13843 i40e_aq_str(&pf->hw,
13844 pf->hw.aq.asq_last_status));
13848 vsi->info = ctxt.info;
13849 vsi->info.valid_sections = 0;
13850 vsi->seid = ctxt.seid;
13851 vsi->id = ctxt.vsi_number;
13854 vsi->active_filters = 0;
13855 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
13856 spin_lock_bh(&vsi->mac_filter_hash_lock);
13857 /* If macvlan filters already exist, force them to get loaded */
13858 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
13859 f->state = I40E_FILTER_NEW;
13862 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13865 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
13866 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
13869 /* Update VSI BW information */
13870 ret = i40e_vsi_get_bw_info(vsi);
13872 dev_info(&pf->pdev->dev,
13873 "couldn't get vsi bw info, err %s aq_err %s\n",
13874 i40e_stat_str(&pf->hw, ret),
13875 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13876 /* VSI is already added so not tearing that up */
13885 * i40e_vsi_release - Delete a VSI and free its resources
13886 * @vsi: the VSI being removed
13888 * Returns 0 on success or < 0 on error
13890 int i40e_vsi_release(struct i40e_vsi *vsi)
13892 struct i40e_mac_filter *f;
13893 struct hlist_node *h;
13894 struct i40e_veb *veb = NULL;
13895 struct i40e_pf *pf;
13901 /* release of a VEB-owner or last VSI is not allowed */
13902 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
13903 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
13904 vsi->seid, vsi->uplink_seid);
13907 if (vsi == pf->vsi[pf->lan_vsi] &&
13908 !test_bit(__I40E_DOWN, pf->state)) {
13909 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
13912 set_bit(__I40E_VSI_RELEASING, vsi->state);
13913 uplink_seid = vsi->uplink_seid;
13914 if (vsi->type != I40E_VSI_SRIOV) {
13915 if (vsi->netdev_registered) {
13916 vsi->netdev_registered = false;
13918 /* results in a call to i40e_close() */
13919 unregister_netdev(vsi->netdev);
13922 i40e_vsi_close(vsi);
13924 i40e_vsi_disable_irq(vsi);
13927 spin_lock_bh(&vsi->mac_filter_hash_lock);
13929 /* clear the sync flag on all filters */
13931 __dev_uc_unsync(vsi->netdev, NULL);
13932 __dev_mc_unsync(vsi->netdev, NULL);
13935 /* make sure any remaining filters are marked for deletion */
13936 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
13937 __i40e_del_filter(vsi, f);
13939 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13941 i40e_sync_vsi_filters(vsi);
13943 i40e_vsi_delete(vsi);
13944 i40e_vsi_free_q_vectors(vsi);
13946 free_netdev(vsi->netdev);
13947 vsi->netdev = NULL;
13949 i40e_vsi_clear_rings(vsi);
13950 i40e_vsi_clear(vsi);
13952 /* If this was the last thing on the VEB, except for the
13953 * controlling VSI, remove the VEB, which puts the controlling
13954 * VSI onto the next level down in the switch.
13956 * Well, okay, there's one more exception here: don't remove
13957 * the orphan VEBs yet. We'll wait for an explicit remove request
13958 * from up the network stack.
13960 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
13962 pf->vsi[i]->uplink_seid == uplink_seid &&
13963 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
13964 n++; /* count the VSIs */
13967 for (i = 0; i < I40E_MAX_VEB; i++) {
13970 if (pf->veb[i]->uplink_seid == uplink_seid)
13971 n++; /* count the VEBs */
13972 if (pf->veb[i]->seid == uplink_seid)
13975 if (n == 0 && veb && veb->uplink_seid != 0)
13976 i40e_veb_release(veb);
13982 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
13983 * @vsi: ptr to the VSI
13985 * This should only be called after i40e_vsi_mem_alloc() which allocates the
13986 * corresponding SW VSI structure and initializes num_queue_pairs for the
13987 * newly allocated VSI.
13989 * Returns 0 on success or negative on failure
13991 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
13994 struct i40e_pf *pf = vsi->back;
13996 if (vsi->q_vectors[0]) {
13997 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
14002 if (vsi->base_vector) {
14003 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
14004 vsi->seid, vsi->base_vector);
14008 ret = i40e_vsi_alloc_q_vectors(vsi);
14010 dev_info(&pf->pdev->dev,
14011 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
14012 vsi->num_q_vectors, vsi->seid, ret);
14013 vsi->num_q_vectors = 0;
14014 goto vector_setup_out;
14017 /* In Legacy mode, we do not have to get any other vector since we
14018 * piggyback on the misc/ICR0 for queue interrupts.
14020 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
14022 if (vsi->num_q_vectors)
14023 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
14024 vsi->num_q_vectors, vsi->idx);
14025 if (vsi->base_vector < 0) {
14026 dev_info(&pf->pdev->dev,
14027 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
14028 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
14029 i40e_vsi_free_q_vectors(vsi);
14031 goto vector_setup_out;
14039 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
14040 * @vsi: pointer to the vsi.
14042 * This re-allocates a vsi's queue resources.
14044 * Returns pointer to the successfully allocated and configured VSI sw struct
14045 * on success, otherwise returns NULL on failure.
14047 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
14049 u16 alloc_queue_pairs;
14050 struct i40e_pf *pf;
14059 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
14060 i40e_vsi_clear_rings(vsi);
14062 i40e_vsi_free_arrays(vsi, false);
14063 i40e_set_num_rings_in_vsi(vsi);
14064 ret = i40e_vsi_alloc_arrays(vsi, false);
14068 alloc_queue_pairs = vsi->alloc_queue_pairs *
14069 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
14071 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
14073 dev_info(&pf->pdev->dev,
14074 "failed to get tracking for %d queues for VSI %d err %d\n",
14075 alloc_queue_pairs, vsi->seid, ret);
14078 vsi->base_queue = ret;
14080 /* Update the FW view of the VSI. Force a reset of TC and queue
14081 * layout configurations.
14083 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
14084 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
14085 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
14086 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
14087 if (vsi->type == I40E_VSI_MAIN)
14088 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
14090 /* assign it some queues */
14091 ret = i40e_alloc_rings(vsi);
14095 /* map all of the rings to the q_vectors */
14096 i40e_vsi_map_rings_to_vectors(vsi);
14100 i40e_vsi_free_q_vectors(vsi);
14101 if (vsi->netdev_registered) {
14102 vsi->netdev_registered = false;
14103 unregister_netdev(vsi->netdev);
14104 free_netdev(vsi->netdev);
14105 vsi->netdev = NULL;
14107 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
14109 i40e_vsi_clear(vsi);
14114 * i40e_vsi_setup - Set up a VSI by a given type
14115 * @pf: board private structure
14117 * @uplink_seid: the switch element to link to
14118 * @param1: usage depends upon VSI type. For VF types, indicates VF id
14120 * This allocates the sw VSI structure and its queue resources, then add a VSI
14121 * to the identified VEB.
14123 * Returns pointer to the successfully allocated and configure VSI sw struct on
14124 * success, otherwise returns NULL on failure.
14126 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
14127 u16 uplink_seid, u32 param1)
14129 struct i40e_vsi *vsi = NULL;
14130 struct i40e_veb *veb = NULL;
14131 u16 alloc_queue_pairs;
14135 /* The requested uplink_seid must be either
14136 * - the PF's port seid
14137 * no VEB is needed because this is the PF
14138 * or this is a Flow Director special case VSI
14139 * - seid of an existing VEB
14140 * - seid of a VSI that owns an existing VEB
14141 * - seid of a VSI that doesn't own a VEB
14142 * a new VEB is created and the VSI becomes the owner
14143 * - seid of the PF VSI, which is what creates the first VEB
14144 * this is a special case of the previous
14146 * Find which uplink_seid we were given and create a new VEB if needed
14148 for (i = 0; i < I40E_MAX_VEB; i++) {
14149 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
14155 if (!veb && uplink_seid != pf->mac_seid) {
14157 for (i = 0; i < pf->num_alloc_vsi; i++) {
14158 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
14164 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
14169 if (vsi->uplink_seid == pf->mac_seid)
14170 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
14171 vsi->tc_config.enabled_tc);
14172 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
14173 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
14174 vsi->tc_config.enabled_tc);
14176 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
14177 dev_info(&vsi->back->pdev->dev,
14178 "New VSI creation error, uplink seid of LAN VSI expected.\n");
14181 /* We come up by default in VEPA mode if SRIOV is not
14182 * already enabled, in which case we can't force VEPA
14185 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
14186 veb->bridge_mode = BRIDGE_MODE_VEPA;
14187 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
14189 i40e_config_bridge_mode(veb);
14191 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
14192 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
14196 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
14200 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
14201 uplink_seid = veb->seid;
14204 /* get vsi sw struct */
14205 v_idx = i40e_vsi_mem_alloc(pf, type);
14208 vsi = pf->vsi[v_idx];
14212 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
14214 if (type == I40E_VSI_MAIN)
14215 pf->lan_vsi = v_idx;
14216 else if (type == I40E_VSI_SRIOV)
14217 vsi->vf_id = param1;
14218 /* assign it some queues */
14219 alloc_queue_pairs = vsi->alloc_queue_pairs *
14220 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
14222 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
14224 dev_info(&pf->pdev->dev,
14225 "failed to get tracking for %d queues for VSI %d err=%d\n",
14226 alloc_queue_pairs, vsi->seid, ret);
14229 vsi->base_queue = ret;
14231 /* get a VSI from the hardware */
14232 vsi->uplink_seid = uplink_seid;
14233 ret = i40e_add_vsi(vsi);
14237 switch (vsi->type) {
14238 /* setup the netdev if needed */
14239 case I40E_VSI_MAIN:
14240 case I40E_VSI_VMDQ2:
14241 ret = i40e_config_netdev(vsi);
14244 ret = i40e_netif_set_realnum_tx_rx_queues(vsi);
14247 ret = register_netdev(vsi->netdev);
14250 vsi->netdev_registered = true;
14251 netif_carrier_off(vsi->netdev);
14252 #ifdef CONFIG_I40E_DCB
14253 /* Setup DCB netlink interface */
14254 i40e_dcbnl_setup(vsi);
14255 #endif /* CONFIG_I40E_DCB */
14257 case I40E_VSI_FDIR:
14258 /* set up vectors and rings if needed */
14259 ret = i40e_vsi_setup_vectors(vsi);
14263 ret = i40e_alloc_rings(vsi);
14267 /* map all of the rings to the q_vectors */
14268 i40e_vsi_map_rings_to_vectors(vsi);
14270 i40e_vsi_reset_stats(vsi);
14273 /* no netdev or rings for the other VSI types */
14277 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
14278 (vsi->type == I40E_VSI_VMDQ2)) {
14279 ret = i40e_vsi_config_rss(vsi);
14284 i40e_vsi_free_q_vectors(vsi);
14286 if (vsi->netdev_registered) {
14287 vsi->netdev_registered = false;
14288 unregister_netdev(vsi->netdev);
14289 free_netdev(vsi->netdev);
14290 vsi->netdev = NULL;
14293 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
14295 i40e_vsi_clear(vsi);
14301 * i40e_veb_get_bw_info - Query VEB BW information
14302 * @veb: the veb to query
14304 * Query the Tx scheduler BW configuration data for given VEB
14306 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
14308 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
14309 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
14310 struct i40e_pf *pf = veb->pf;
14311 struct i40e_hw *hw = &pf->hw;
14316 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
14319 dev_info(&pf->pdev->dev,
14320 "query veb bw config failed, err %s aq_err %s\n",
14321 i40e_stat_str(&pf->hw, ret),
14322 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
14326 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
14329 dev_info(&pf->pdev->dev,
14330 "query veb bw ets config failed, err %s aq_err %s\n",
14331 i40e_stat_str(&pf->hw, ret),
14332 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
14336 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
14337 veb->bw_max_quanta = ets_data.tc_bw_max;
14338 veb->is_abs_credits = bw_data.absolute_credits_enable;
14339 veb->enabled_tc = ets_data.tc_valid_bits;
14340 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
14341 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
14342 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
14343 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
14344 veb->bw_tc_limit_credits[i] =
14345 le16_to_cpu(bw_data.tc_bw_limits[i]);
14346 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
14354 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
14355 * @pf: board private structure
14357 * On error: returns error code (negative)
14358 * On success: returns vsi index in PF (positive)
14360 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
14363 struct i40e_veb *veb;
14366 /* Need to protect the allocation of switch elements at the PF level */
14367 mutex_lock(&pf->switch_mutex);
14369 /* VEB list may be fragmented if VEB creation/destruction has
14370 * been happening. We can afford to do a quick scan to look
14371 * for any free slots in the list.
14373 * find next empty veb slot, looping back around if necessary
14376 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
14378 if (i >= I40E_MAX_VEB) {
14380 goto err_alloc_veb; /* out of VEB slots! */
14383 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
14386 goto err_alloc_veb;
14390 veb->enabled_tc = 1;
14395 mutex_unlock(&pf->switch_mutex);
14400 * i40e_switch_branch_release - Delete a branch of the switch tree
14401 * @branch: where to start deleting
14403 * This uses recursion to find the tips of the branch to be
14404 * removed, deleting until we get back to and can delete this VEB.
14406 static void i40e_switch_branch_release(struct i40e_veb *branch)
14408 struct i40e_pf *pf = branch->pf;
14409 u16 branch_seid = branch->seid;
14410 u16 veb_idx = branch->idx;
14413 /* release any VEBs on this VEB - RECURSION */
14414 for (i = 0; i < I40E_MAX_VEB; i++) {
14417 if (pf->veb[i]->uplink_seid == branch->seid)
14418 i40e_switch_branch_release(pf->veb[i]);
14421 /* Release the VSIs on this VEB, but not the owner VSI.
14423 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
14424 * the VEB itself, so don't use (*branch) after this loop.
14426 for (i = 0; i < pf->num_alloc_vsi; i++) {
14429 if (pf->vsi[i]->uplink_seid == branch_seid &&
14430 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
14431 i40e_vsi_release(pf->vsi[i]);
14435 /* There's one corner case where the VEB might not have been
14436 * removed, so double check it here and remove it if needed.
14437 * This case happens if the veb was created from the debugfs
14438 * commands and no VSIs were added to it.
14440 if (pf->veb[veb_idx])
14441 i40e_veb_release(pf->veb[veb_idx]);
14445 * i40e_veb_clear - remove veb struct
14446 * @veb: the veb to remove
14448 static void i40e_veb_clear(struct i40e_veb *veb)
14454 struct i40e_pf *pf = veb->pf;
14456 mutex_lock(&pf->switch_mutex);
14457 if (pf->veb[veb->idx] == veb)
14458 pf->veb[veb->idx] = NULL;
14459 mutex_unlock(&pf->switch_mutex);
14466 * i40e_veb_release - Delete a VEB and free its resources
14467 * @veb: the VEB being removed
14469 void i40e_veb_release(struct i40e_veb *veb)
14471 struct i40e_vsi *vsi = NULL;
14472 struct i40e_pf *pf;
14477 /* find the remaining VSI and check for extras */
14478 for (i = 0; i < pf->num_alloc_vsi; i++) {
14479 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
14485 dev_info(&pf->pdev->dev,
14486 "can't remove VEB %d with %d VSIs left\n",
14491 /* move the remaining VSI to uplink veb */
14492 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
14493 if (veb->uplink_seid) {
14494 vsi->uplink_seid = veb->uplink_seid;
14495 if (veb->uplink_seid == pf->mac_seid)
14496 vsi->veb_idx = I40E_NO_VEB;
14498 vsi->veb_idx = veb->veb_idx;
14501 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
14502 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
14505 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
14506 i40e_veb_clear(veb);
14510 * i40e_add_veb - create the VEB in the switch
14511 * @veb: the VEB to be instantiated
14512 * @vsi: the controlling VSI
14514 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
14516 struct i40e_pf *pf = veb->pf;
14517 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
14520 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
14521 veb->enabled_tc, false,
14522 &veb->seid, enable_stats, NULL);
14524 /* get a VEB from the hardware */
14526 dev_info(&pf->pdev->dev,
14527 "couldn't add VEB, err %s aq_err %s\n",
14528 i40e_stat_str(&pf->hw, ret),
14529 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14533 /* get statistics counter */
14534 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
14535 &veb->stats_idx, NULL, NULL, NULL);
14537 dev_info(&pf->pdev->dev,
14538 "couldn't get VEB statistics idx, err %s aq_err %s\n",
14539 i40e_stat_str(&pf->hw, ret),
14540 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14543 ret = i40e_veb_get_bw_info(veb);
14545 dev_info(&pf->pdev->dev,
14546 "couldn't get VEB bw info, err %s aq_err %s\n",
14547 i40e_stat_str(&pf->hw, ret),
14548 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14549 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
14553 vsi->uplink_seid = veb->seid;
14554 vsi->veb_idx = veb->idx;
14555 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
14561 * i40e_veb_setup - Set up a VEB
14562 * @pf: board private structure
14563 * @flags: VEB setup flags
14564 * @uplink_seid: the switch element to link to
14565 * @vsi_seid: the initial VSI seid
14566 * @enabled_tc: Enabled TC bit-map
14568 * This allocates the sw VEB structure and links it into the switch
14569 * It is possible and legal for this to be a duplicate of an already
14570 * existing VEB. It is also possible for both uplink and vsi seids
14571 * to be zero, in order to create a floating VEB.
14573 * Returns pointer to the successfully allocated VEB sw struct on
14574 * success, otherwise returns NULL on failure.
14576 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
14577 u16 uplink_seid, u16 vsi_seid,
14580 struct i40e_veb *veb, *uplink_veb = NULL;
14581 int vsi_idx, veb_idx;
14584 /* if one seid is 0, the other must be 0 to create a floating relay */
14585 if ((uplink_seid == 0 || vsi_seid == 0) &&
14586 (uplink_seid + vsi_seid != 0)) {
14587 dev_info(&pf->pdev->dev,
14588 "one, not both seid's are 0: uplink=%d vsi=%d\n",
14589 uplink_seid, vsi_seid);
14593 /* make sure there is such a vsi and uplink */
14594 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
14595 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
14597 if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) {
14598 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
14603 if (uplink_seid && uplink_seid != pf->mac_seid) {
14604 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
14605 if (pf->veb[veb_idx] &&
14606 pf->veb[veb_idx]->seid == uplink_seid) {
14607 uplink_veb = pf->veb[veb_idx];
14612 dev_info(&pf->pdev->dev,
14613 "uplink seid %d not found\n", uplink_seid);
14618 /* get veb sw struct */
14619 veb_idx = i40e_veb_mem_alloc(pf);
14622 veb = pf->veb[veb_idx];
14623 veb->flags = flags;
14624 veb->uplink_seid = uplink_seid;
14625 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
14626 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
14628 /* create the VEB in the switch */
14629 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
14632 if (vsi_idx == pf->lan_vsi)
14633 pf->lan_veb = veb->idx;
14638 i40e_veb_clear(veb);
14644 * i40e_setup_pf_switch_element - set PF vars based on switch type
14645 * @pf: board private structure
14646 * @ele: element we are building info from
14647 * @num_reported: total number of elements
14648 * @printconfig: should we print the contents
14650 * helper function to assist in extracting a few useful SEID values.
14652 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
14653 struct i40e_aqc_switch_config_element_resp *ele,
14654 u16 num_reported, bool printconfig)
14656 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
14657 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
14658 u8 element_type = ele->element_type;
14659 u16 seid = le16_to_cpu(ele->seid);
14662 dev_info(&pf->pdev->dev,
14663 "type=%d seid=%d uplink=%d downlink=%d\n",
14664 element_type, seid, uplink_seid, downlink_seid);
14666 switch (element_type) {
14667 case I40E_SWITCH_ELEMENT_TYPE_MAC:
14668 pf->mac_seid = seid;
14670 case I40E_SWITCH_ELEMENT_TYPE_VEB:
14672 if (uplink_seid != pf->mac_seid)
14674 if (pf->lan_veb >= I40E_MAX_VEB) {
14677 /* find existing or else empty VEB */
14678 for (v = 0; v < I40E_MAX_VEB; v++) {
14679 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
14684 if (pf->lan_veb >= I40E_MAX_VEB) {
14685 v = i40e_veb_mem_alloc(pf);
14691 if (pf->lan_veb >= I40E_MAX_VEB)
14694 pf->veb[pf->lan_veb]->seid = seid;
14695 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
14696 pf->veb[pf->lan_veb]->pf = pf;
14697 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
14699 case I40E_SWITCH_ELEMENT_TYPE_VSI:
14700 if (num_reported != 1)
14702 /* This is immediately after a reset so we can assume this is
14705 pf->mac_seid = uplink_seid;
14706 pf->pf_seid = downlink_seid;
14707 pf->main_vsi_seid = seid;
14709 dev_info(&pf->pdev->dev,
14710 "pf_seid=%d main_vsi_seid=%d\n",
14711 pf->pf_seid, pf->main_vsi_seid);
14713 case I40E_SWITCH_ELEMENT_TYPE_PF:
14714 case I40E_SWITCH_ELEMENT_TYPE_VF:
14715 case I40E_SWITCH_ELEMENT_TYPE_EMP:
14716 case I40E_SWITCH_ELEMENT_TYPE_BMC:
14717 case I40E_SWITCH_ELEMENT_TYPE_PE:
14718 case I40E_SWITCH_ELEMENT_TYPE_PA:
14719 /* ignore these for now */
14722 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
14723 element_type, seid);
14729 * i40e_fetch_switch_configuration - Get switch config from firmware
14730 * @pf: board private structure
14731 * @printconfig: should we print the contents
14733 * Get the current switch configuration from the device and
14734 * extract a few useful SEID values.
14736 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
14738 struct i40e_aqc_get_switch_config_resp *sw_config;
14744 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
14748 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
14750 u16 num_reported, num_total;
14752 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
14756 dev_info(&pf->pdev->dev,
14757 "get switch config failed err %s aq_err %s\n",
14758 i40e_stat_str(&pf->hw, ret),
14759 i40e_aq_str(&pf->hw,
14760 pf->hw.aq.asq_last_status));
14765 num_reported = le16_to_cpu(sw_config->header.num_reported);
14766 num_total = le16_to_cpu(sw_config->header.num_total);
14769 dev_info(&pf->pdev->dev,
14770 "header: %d reported %d total\n",
14771 num_reported, num_total);
14773 for (i = 0; i < num_reported; i++) {
14774 struct i40e_aqc_switch_config_element_resp *ele =
14775 &sw_config->element[i];
14777 i40e_setup_pf_switch_element(pf, ele, num_reported,
14780 } while (next_seid != 0);
14787 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
14788 * @pf: board private structure
14789 * @reinit: if the Main VSI needs to re-initialized.
14790 * @lock_acquired: indicates whether or not the lock has been acquired
14792 * Returns 0 on success, negative value on failure
14794 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired)
14799 /* find out what's out there already */
14800 ret = i40e_fetch_switch_configuration(pf, false);
14802 dev_info(&pf->pdev->dev,
14803 "couldn't fetch switch config, err %s aq_err %s\n",
14804 i40e_stat_str(&pf->hw, ret),
14805 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14808 i40e_pf_reset_stats(pf);
14810 /* set the switch config bit for the whole device to
14811 * support limited promisc or true promisc
14812 * when user requests promisc. The default is limited
14816 if ((pf->hw.pf_id == 0) &&
14817 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
14818 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
14819 pf->last_sw_conf_flags = flags;
14822 if (pf->hw.pf_id == 0) {
14825 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
14826 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0,
14828 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
14829 dev_info(&pf->pdev->dev,
14830 "couldn't set switch config bits, err %s aq_err %s\n",
14831 i40e_stat_str(&pf->hw, ret),
14832 i40e_aq_str(&pf->hw,
14833 pf->hw.aq.asq_last_status));
14834 /* not a fatal problem, just keep going */
14836 pf->last_sw_conf_valid_flags = valid_flags;
14839 /* first time setup */
14840 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
14841 struct i40e_vsi *vsi = NULL;
14844 /* Set up the PF VSI associated with the PF's main VSI
14845 * that is already in the HW switch
14847 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
14848 uplink_seid = pf->veb[pf->lan_veb]->seid;
14850 uplink_seid = pf->mac_seid;
14851 if (pf->lan_vsi == I40E_NO_VSI)
14852 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
14854 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
14856 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
14857 i40e_cloud_filter_exit(pf);
14858 i40e_fdir_teardown(pf);
14862 /* force a reset of TC and queue layout configurations */
14863 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
14865 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
14866 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
14867 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
14869 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
14871 i40e_fdir_sb_setup(pf);
14873 /* Setup static PF queue filter control settings */
14874 ret = i40e_setup_pf_filter_control(pf);
14876 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
14878 /* Failure here should not stop continuing other steps */
14881 /* enable RSS in the HW, even for only one queue, as the stack can use
14884 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
14885 i40e_pf_config_rss(pf);
14887 /* fill in link information and enable LSE reporting */
14888 i40e_link_event(pf);
14890 /* Initialize user-specific link properties */
14891 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
14892 I40E_AQ_AN_COMPLETED) ? true : false);
14896 if (!lock_acquired)
14899 /* repopulate tunnel port filters */
14900 udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev);
14902 if (!lock_acquired)
14909 * i40e_determine_queue_usage - Work out queue distribution
14910 * @pf: board private structure
14912 static void i40e_determine_queue_usage(struct i40e_pf *pf)
14917 pf->num_lan_qps = 0;
14919 /* Find the max queues to be put into basic use. We'll always be
14920 * using TC0, whether or not DCB is running, and TC0 will get the
14923 queues_left = pf->hw.func_caps.num_tx_qp;
14925 if ((queues_left == 1) ||
14926 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
14927 /* one qp for PF, no queues for anything else */
14929 pf->alloc_rss_size = pf->num_lan_qps = 1;
14931 /* make sure all the fancies are disabled */
14932 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
14933 I40E_FLAG_IWARP_ENABLED |
14934 I40E_FLAG_FD_SB_ENABLED |
14935 I40E_FLAG_FD_ATR_ENABLED |
14936 I40E_FLAG_DCB_CAPABLE |
14937 I40E_FLAG_DCB_ENABLED |
14938 I40E_FLAG_SRIOV_ENABLED |
14939 I40E_FLAG_VMDQ_ENABLED);
14940 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14941 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
14942 I40E_FLAG_FD_SB_ENABLED |
14943 I40E_FLAG_FD_ATR_ENABLED |
14944 I40E_FLAG_DCB_CAPABLE))) {
14945 /* one qp for PF */
14946 pf->alloc_rss_size = pf->num_lan_qps = 1;
14947 queues_left -= pf->num_lan_qps;
14949 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
14950 I40E_FLAG_IWARP_ENABLED |
14951 I40E_FLAG_FD_SB_ENABLED |
14952 I40E_FLAG_FD_ATR_ENABLED |
14953 I40E_FLAG_DCB_ENABLED |
14954 I40E_FLAG_VMDQ_ENABLED);
14955 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14957 /* Not enough queues for all TCs */
14958 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
14959 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
14960 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
14961 I40E_FLAG_DCB_ENABLED);
14962 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
14965 /* limit lan qps to the smaller of qps, cpus or msix */
14966 q_max = max_t(int, pf->rss_size_max, num_online_cpus());
14967 q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp);
14968 q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors);
14969 pf->num_lan_qps = q_max;
14971 queues_left -= pf->num_lan_qps;
14974 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
14975 if (queues_left > 1) {
14976 queues_left -= 1; /* save 1 queue for FD */
14978 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
14979 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14980 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
14984 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
14985 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
14986 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
14987 (queues_left / pf->num_vf_qps));
14988 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
14991 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
14992 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
14993 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
14994 (queues_left / pf->num_vmdq_qps));
14995 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
14998 pf->queues_left = queues_left;
14999 dev_dbg(&pf->pdev->dev,
15000 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
15001 pf->hw.func_caps.num_tx_qp,
15002 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
15003 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
15004 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
15009 * i40e_setup_pf_filter_control - Setup PF static filter control
15010 * @pf: PF to be setup
15012 * i40e_setup_pf_filter_control sets up a PF's initial filter control
15013 * settings. If PE/FCoE are enabled then it will also set the per PF
15014 * based filter sizes required for them. It also enables Flow director,
15015 * ethertype and macvlan type filter settings for the pf.
15017 * Returns 0 on success, negative on failure
15019 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
15021 struct i40e_filter_control_settings *settings = &pf->filter_settings;
15023 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
15025 /* Flow Director is enabled */
15026 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
15027 settings->enable_fdir = true;
15029 /* Ethtype and MACVLAN filters enabled for PF */
15030 settings->enable_ethtype = true;
15031 settings->enable_macvlan = true;
15033 if (i40e_set_filter_control(&pf->hw, settings))
15039 #define INFO_STRING_LEN 255
15040 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
15041 static void i40e_print_features(struct i40e_pf *pf)
15043 struct i40e_hw *hw = &pf->hw;
15047 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
15051 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
15052 #ifdef CONFIG_PCI_IOV
15053 i += scnprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
15055 i += scnprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
15056 pf->hw.func_caps.num_vsis,
15057 pf->vsi[pf->lan_vsi]->num_queue_pairs);
15058 if (pf->flags & I40E_FLAG_RSS_ENABLED)
15059 i += scnprintf(&buf[i], REMAIN(i), " RSS");
15060 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
15061 i += scnprintf(&buf[i], REMAIN(i), " FD_ATR");
15062 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
15063 i += scnprintf(&buf[i], REMAIN(i), " FD_SB");
15064 i += scnprintf(&buf[i], REMAIN(i), " NTUPLE");
15066 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
15067 i += scnprintf(&buf[i], REMAIN(i), " DCB");
15068 i += scnprintf(&buf[i], REMAIN(i), " VxLAN");
15069 i += scnprintf(&buf[i], REMAIN(i), " Geneve");
15070 if (pf->flags & I40E_FLAG_PTP)
15071 i += scnprintf(&buf[i], REMAIN(i), " PTP");
15072 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
15073 i += scnprintf(&buf[i], REMAIN(i), " VEB");
15075 i += scnprintf(&buf[i], REMAIN(i), " VEPA");
15077 dev_info(&pf->pdev->dev, "%s\n", buf);
15079 WARN_ON(i > INFO_STRING_LEN);
15083 * i40e_get_platform_mac_addr - get platform-specific MAC address
15084 * @pdev: PCI device information struct
15085 * @pf: board private structure
15087 * Look up the MAC address for the device. First we'll try
15088 * eth_platform_get_mac_address, which will check Open Firmware, or arch
15089 * specific fallback. Otherwise, we'll default to the stored value in
15092 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
15094 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
15095 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
15099 * i40e_set_fec_in_flags - helper function for setting FEC options in flags
15100 * @fec_cfg: FEC option to set in flags
15101 * @flags: ptr to flags in which we set FEC option
15103 void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
15105 if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
15106 *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC;
15107 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) ||
15108 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) {
15109 *flags |= I40E_FLAG_RS_FEC;
15110 *flags &= ~I40E_FLAG_BASE_R_FEC;
15112 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) ||
15113 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) {
15114 *flags |= I40E_FLAG_BASE_R_FEC;
15115 *flags &= ~I40E_FLAG_RS_FEC;
15118 *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC);
15122 * i40e_check_recovery_mode - check if we are running transition firmware
15123 * @pf: board private structure
15125 * Check registers indicating the firmware runs in recovery mode. Sets the
15126 * appropriate driver state.
15128 * Returns true if the recovery mode was detected, false otherwise
15130 static bool i40e_check_recovery_mode(struct i40e_pf *pf)
15132 u32 val = rd32(&pf->hw, I40E_GL_FWSTS);
15134 if (val & I40E_GL_FWSTS_FWS1B_MASK) {
15135 dev_crit(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
15136 dev_crit(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
15137 set_bit(__I40E_RECOVERY_MODE, pf->state);
15141 if (test_bit(__I40E_RECOVERY_MODE, pf->state))
15142 dev_info(&pf->pdev->dev, "Please do Power-On Reset to initialize adapter in normal mode with full functionality.\n");
15148 * i40e_pf_loop_reset - perform reset in a loop.
15149 * @pf: board private structure
15151 * This function is useful when a NIC is about to enter recovery mode.
15152 * When a NIC's internal data structures are corrupted the NIC's
15153 * firmware is going to enter recovery mode.
15154 * Right after a POR it takes about 7 minutes for firmware to enter
15155 * recovery mode. Until that time a NIC is in some kind of intermediate
15156 * state. After that time period the NIC almost surely enters
15157 * recovery mode. The only way for a driver to detect intermediate
15158 * state is to issue a series of pf-resets and check a return value.
15159 * If a PF reset returns success then the firmware could be in recovery
15160 * mode so the caller of this code needs to check for recovery mode
15161 * if this function returns success. There is a little chance that
15162 * firmware will hang in intermediate state forever.
15163 * Since waiting 7 minutes is quite a lot of time this function waits
15164 * 10 seconds and then gives up by returning an error.
15166 * Return 0 on success, negative on failure.
15168 static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf)
15170 /* wait max 10 seconds for PF reset to succeed */
15171 const unsigned long time_end = jiffies + 10 * HZ;
15173 struct i40e_hw *hw = &pf->hw;
15176 ret = i40e_pf_reset(hw);
15177 while (ret != I40E_SUCCESS && time_before(jiffies, time_end)) {
15178 usleep_range(10000, 20000);
15179 ret = i40e_pf_reset(hw);
15182 if (ret == I40E_SUCCESS)
15185 dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret);
15191 * i40e_check_fw_empr - check if FW issued unexpected EMP Reset
15192 * @pf: board private structure
15194 * Check FW registers to determine if FW issued unexpected EMP Reset.
15195 * Every time when unexpected EMP Reset occurs the FW increments
15196 * a counter of unexpected EMP Resets. When the counter reaches 10
15197 * the FW should enter the Recovery mode
15199 * Returns true if FW issued unexpected EMP Reset
15201 static bool i40e_check_fw_empr(struct i40e_pf *pf)
15203 const u32 fw_sts = rd32(&pf->hw, I40E_GL_FWSTS) &
15204 I40E_GL_FWSTS_FWS1B_MASK;
15205 return (fw_sts > I40E_GL_FWSTS_FWS1B_EMPR_0) &&
15206 (fw_sts <= I40E_GL_FWSTS_FWS1B_EMPR_10);
15210 * i40e_handle_resets - handle EMP resets and PF resets
15211 * @pf: board private structure
15213 * Handle both EMP resets and PF resets and conclude whether there are
15214 * any issues regarding these resets. If there are any issues then
15215 * generate log entry.
15217 * Return 0 if NIC is healthy or negative value when there are issues
15220 static i40e_status i40e_handle_resets(struct i40e_pf *pf)
15222 const i40e_status pfr = i40e_pf_loop_reset(pf);
15223 const bool is_empr = i40e_check_fw_empr(pf);
15225 if (is_empr || pfr != I40E_SUCCESS)
15226 dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
15228 return is_empr ? I40E_ERR_RESET_FAILED : pfr;
15232 * i40e_init_recovery_mode - initialize subsystems needed in recovery mode
15233 * @pf: board private structure
15234 * @hw: ptr to the hardware info
15236 * This function does a minimal setup of all subsystems needed for running
15239 * Returns 0 on success, negative on failure
15241 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
15243 struct i40e_vsi *vsi;
15247 pci_save_state(pf->pdev);
15249 /* set up periodic task facility */
15250 timer_setup(&pf->service_timer, i40e_service_timer, 0);
15251 pf->service_timer_period = HZ;
15253 INIT_WORK(&pf->service_task, i40e_service_task);
15254 clear_bit(__I40E_SERVICE_SCHED, pf->state);
15256 err = i40e_init_interrupt_scheme(pf);
15258 goto err_switch_setup;
15260 /* The number of VSIs reported by the FW is the minimum guaranteed
15261 * to us; HW supports far more and we share the remaining pool with
15262 * the other PFs. We allocate space for more than the guarantee with
15263 * the understanding that we might not get them all later.
15265 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
15266 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
15268 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
15270 /* Set up the vsi struct and our local tracking of the MAIN PF vsi. */
15271 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
15275 goto err_switch_setup;
15278 /* We allocate one VSI which is needed as absolute minimum
15279 * in order to register the netdev
15281 v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN);
15284 goto err_switch_setup;
15286 pf->lan_vsi = v_idx;
15287 vsi = pf->vsi[v_idx];
15290 goto err_switch_setup;
15292 vsi->alloc_queue_pairs = 1;
15293 err = i40e_config_netdev(vsi);
15295 goto err_switch_setup;
15296 err = register_netdev(vsi->netdev);
15298 goto err_switch_setup;
15299 vsi->netdev_registered = true;
15300 i40e_dbg_pf_init(pf);
15302 err = i40e_setup_misc_vector_for_recovery_mode(pf);
15304 goto err_switch_setup;
15306 /* tell the firmware that we're starting */
15307 i40e_send_version(pf);
15309 /* since everything's happy, start the service_task timer */
15310 mod_timer(&pf->service_timer,
15311 round_jiffies(jiffies + pf->service_timer_period));
15316 i40e_reset_interrupt_capability(pf);
15317 del_timer_sync(&pf->service_timer);
15318 i40e_shutdown_adminq(hw);
15319 iounmap(hw->hw_addr);
15320 pci_disable_pcie_error_reporting(pf->pdev);
15321 pci_release_mem_regions(pf->pdev);
15322 pci_disable_device(pf->pdev);
15329 * i40e_set_subsystem_device_id - set subsystem device id
15330 * @hw: pointer to the hardware info
15332 * Set PCI subsystem device id either from a pci_dev structure or
15333 * a specific FW register.
15335 static inline void i40e_set_subsystem_device_id(struct i40e_hw *hw)
15337 struct pci_dev *pdev = ((struct i40e_pf *)hw->back)->pdev;
15339 hw->subsystem_device_id = pdev->subsystem_device ?
15340 pdev->subsystem_device :
15341 (ushort)(rd32(hw, I40E_PFPCI_SUBSYSID) & USHRT_MAX);
15345 * i40e_probe - Device initialization routine
15346 * @pdev: PCI device information struct
15347 * @ent: entry in i40e_pci_tbl
15349 * i40e_probe initializes a PF identified by a pci_dev structure.
15350 * The OS initialization, configuring of the PF private structure,
15351 * and a hardware reset occur.
15353 * Returns 0 on success, negative on failure
15355 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
15357 struct i40e_aq_get_phy_abilities_resp abilities;
15358 #ifdef CONFIG_I40E_DCB
15359 enum i40e_get_fw_lldp_status_resp lldp_status;
15360 i40e_status status;
15361 #endif /* CONFIG_I40E_DCB */
15362 struct i40e_pf *pf;
15363 struct i40e_hw *hw;
15364 static u16 pfs_found;
15371 err = pci_enable_device_mem(pdev);
15375 /* set up for high or low dma */
15376 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
15378 dev_err(&pdev->dev,
15379 "DMA configuration failed: 0x%x\n", err);
15383 /* set up pci connections */
15384 err = pci_request_mem_regions(pdev, i40e_driver_name);
15386 dev_info(&pdev->dev,
15387 "pci_request_selected_regions failed %d\n", err);
15391 pci_enable_pcie_error_reporting(pdev);
15392 pci_set_master(pdev);
15394 /* Now that we have a PCI connection, we need to do the
15395 * low level device setup. This is primarily setting up
15396 * the Admin Queue structures and then querying for the
15397 * device's current profile information.
15399 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
15406 set_bit(__I40E_DOWN, pf->state);
15411 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
15412 I40E_MAX_CSR_SPACE);
15413 /* We believe that the highest register to read is
15414 * I40E_GLGEN_STAT_CLEAR, so we check if the BAR size
15415 * is not less than that before mapping to prevent a
15418 if (pf->ioremap_len < I40E_GLGEN_STAT_CLEAR) {
15419 dev_err(&pdev->dev, "Cannot map registers, bar size 0x%X too small, aborting\n",
15424 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
15425 if (!hw->hw_addr) {
15427 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
15428 (unsigned int)pci_resource_start(pdev, 0),
15429 pf->ioremap_len, err);
15432 hw->vendor_id = pdev->vendor;
15433 hw->device_id = pdev->device;
15434 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
15435 hw->subsystem_vendor_id = pdev->subsystem_vendor;
15436 i40e_set_subsystem_device_id(hw);
15437 hw->bus.device = PCI_SLOT(pdev->devfn);
15438 hw->bus.func = PCI_FUNC(pdev->devfn);
15439 hw->bus.bus_id = pdev->bus->number;
15440 pf->instance = pfs_found;
15442 /* Select something other than the 802.1ad ethertype for the
15443 * switch to use internally and drop on ingress.
15445 hw->switch_tag = 0xffff;
15446 hw->first_tag = ETH_P_8021AD;
15447 hw->second_tag = ETH_P_8021Q;
15449 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
15450 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
15451 INIT_LIST_HEAD(&pf->ddp_old_prof);
15453 /* set up the locks for the AQ, do this only once in probe
15454 * and destroy them only once in remove
15456 mutex_init(&hw->aq.asq_mutex);
15457 mutex_init(&hw->aq.arq_mutex);
15459 pf->msg_enable = netif_msg_init(debug,
15464 pf->hw.debug_mask = debug;
15466 /* do a special CORER for clearing PXE mode once at init */
15467 if (hw->revision_id == 0 &&
15468 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
15469 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
15474 i40e_clear_pxe_mode(hw);
15477 /* Reset here to make sure all is clean and to define PF 'n' */
15480 err = i40e_set_mac_type(hw);
15482 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
15487 err = i40e_handle_resets(pf);
15491 i40e_check_recovery_mode(pf);
15493 if (is_kdump_kernel()) {
15494 hw->aq.num_arq_entries = I40E_MIN_ARQ_LEN;
15495 hw->aq.num_asq_entries = I40E_MIN_ASQ_LEN;
15497 hw->aq.num_arq_entries = I40E_AQ_LEN;
15498 hw->aq.num_asq_entries = I40E_AQ_LEN;
15500 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
15501 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
15502 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
15504 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
15506 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
15508 err = i40e_init_shared_code(hw);
15510 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
15515 /* set up a default setting for link flow control */
15516 pf->hw.fc.requested_mode = I40E_FC_NONE;
15518 err = i40e_init_adminq(hw);
15520 if (err == I40E_ERR_FIRMWARE_API_VERSION)
15521 dev_info(&pdev->dev,
15522 "The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n",
15523 hw->aq.api_maj_ver,
15524 hw->aq.api_min_ver,
15525 I40E_FW_API_VERSION_MAJOR,
15526 I40E_FW_MINOR_VERSION(hw));
15528 dev_info(&pdev->dev,
15529 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
15533 i40e_get_oem_version(hw);
15535 /* provide nvm, fw, api versions, vendor:device id, subsys vendor:device id */
15536 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n",
15537 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
15538 hw->aq.api_maj_ver, hw->aq.api_min_ver,
15539 i40e_nvm_version_str(hw), hw->vendor_id, hw->device_id,
15540 hw->subsystem_vendor_id, hw->subsystem_device_id);
15542 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
15543 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
15544 dev_dbg(&pdev->dev,
15545 "The driver for the device detected a newer version of the NVM image v%u.%u than v%u.%u.\n",
15546 hw->aq.api_maj_ver,
15547 hw->aq.api_min_ver,
15548 I40E_FW_API_VERSION_MAJOR,
15549 I40E_FW_MINOR_VERSION(hw));
15550 else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
15551 dev_info(&pdev->dev,
15552 "The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n",
15553 hw->aq.api_maj_ver,
15554 hw->aq.api_min_ver,
15555 I40E_FW_API_VERSION_MAJOR,
15556 I40E_FW_MINOR_VERSION(hw));
15558 i40e_verify_eeprom(pf);
15560 /* Rev 0 hardware was never productized */
15561 if (hw->revision_id < 1)
15562 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
15564 i40e_clear_pxe_mode(hw);
15566 err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
15568 goto err_adminq_setup;
15570 err = i40e_sw_init(pf);
15572 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
15576 if (test_bit(__I40E_RECOVERY_MODE, pf->state))
15577 return i40e_init_recovery_mode(pf, hw);
15579 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
15580 hw->func_caps.num_rx_qp, 0, 0);
15582 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
15583 goto err_init_lan_hmc;
15586 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
15588 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
15590 goto err_configure_lan_hmc;
15593 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
15594 * Ignore error return codes because if it was already disabled via
15595 * hardware settings this will fail
15597 if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
15598 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
15599 i40e_aq_stop_lldp(hw, true, false, NULL);
15602 /* allow a platform config to override the HW addr */
15603 i40e_get_platform_mac_addr(pdev, pf);
15605 if (!is_valid_ether_addr(hw->mac.addr)) {
15606 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
15610 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
15611 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
15612 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
15613 if (is_valid_ether_addr(hw->mac.port_addr))
15614 pf->hw_features |= I40E_HW_PORT_ID_VALID;
15616 i40e_ptp_alloc_pins(pf);
15617 pci_set_drvdata(pdev, pf);
15618 pci_save_state(pdev);
15620 #ifdef CONFIG_I40E_DCB
15621 status = i40e_get_fw_lldp_status(&pf->hw, &lldp_status);
15623 lldp_status == I40E_GET_FW_LLDP_STATUS_ENABLED) ?
15624 (pf->flags &= ~I40E_FLAG_DISABLE_FW_LLDP) :
15625 (pf->flags |= I40E_FLAG_DISABLE_FW_LLDP);
15626 dev_info(&pdev->dev,
15627 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ?
15628 "FW LLDP is disabled\n" :
15629 "FW LLDP is enabled\n");
15631 /* Enable FW to write default DCB config on link-up */
15632 i40e_aq_set_dcb_parameters(hw, true, NULL);
15634 err = i40e_init_pf_dcb(pf);
15636 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
15637 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
15638 /* Continue without DCB enabled */
15640 #endif /* CONFIG_I40E_DCB */
15642 /* set up periodic task facility */
15643 timer_setup(&pf->service_timer, i40e_service_timer, 0);
15644 pf->service_timer_period = HZ;
15646 INIT_WORK(&pf->service_task, i40e_service_task);
15647 clear_bit(__I40E_SERVICE_SCHED, pf->state);
15649 /* NVM bit on means WoL disabled for the port */
15650 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
15651 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
15652 pf->wol_en = false;
15655 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
15657 /* set up the main switch operations */
15658 i40e_determine_queue_usage(pf);
15659 err = i40e_init_interrupt_scheme(pf);
15661 goto err_switch_setup;
15663 /* Reduce Tx and Rx pairs for kdump
15664 * When MSI-X is enabled, it's not allowed to use more TC queue
15665 * pairs than MSI-X vectors (pf->num_lan_msix) exist. Thus
15666 * vsi->num_queue_pairs will be equal to pf->num_lan_msix, i.e., 1.
15668 if (is_kdump_kernel())
15669 pf->num_lan_msix = 1;
15671 pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port;
15672 pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port;
15673 pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
15674 pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared;
15675 pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS;
15676 pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN |
15677 UDP_TUNNEL_TYPE_GENEVE;
15679 /* The number of VSIs reported by the FW is the minimum guaranteed
15680 * to us; HW supports far more and we share the remaining pool with
15681 * the other PFs. We allocate space for more than the guarantee with
15682 * the understanding that we might not get them all later.
15684 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
15685 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
15687 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
15688 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
15689 dev_warn(&pf->pdev->dev,
15690 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
15691 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
15692 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
15695 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
15696 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
15700 goto err_switch_setup;
15703 #ifdef CONFIG_PCI_IOV
15704 /* prep for VF support */
15705 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
15706 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
15707 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
15708 if (pci_num_vf(pdev))
15709 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
15712 err = i40e_setup_pf_switch(pf, false, false);
15714 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
15717 INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
15719 /* if FDIR VSI was set up, start it now */
15720 for (i = 0; i < pf->num_alloc_vsi; i++) {
15721 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
15722 i40e_vsi_open(pf->vsi[i]);
15727 /* The driver only wants link up/down and module qualification
15728 * reports from firmware. Note the negative logic.
15730 err = i40e_aq_set_phy_int_mask(&pf->hw,
15731 ~(I40E_AQ_EVENT_LINK_UPDOWN |
15732 I40E_AQ_EVENT_MEDIA_NA |
15733 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
15735 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
15736 i40e_stat_str(&pf->hw, err),
15737 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15739 /* Reconfigure hardware for allowing smaller MSS in the case
15740 * of TSO, so that we avoid the MDD being fired and causing
15741 * a reset in the case of small MSS+TSO.
15743 val = rd32(hw, I40E_REG_MSS);
15744 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
15745 val &= ~I40E_REG_MSS_MIN_MASK;
15746 val |= I40E_64BYTE_MSS;
15747 wr32(hw, I40E_REG_MSS, val);
15750 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
15752 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
15754 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
15755 i40e_stat_str(&pf->hw, err),
15756 i40e_aq_str(&pf->hw,
15757 pf->hw.aq.asq_last_status));
15759 /* The main driver is (mostly) up and happy. We need to set this state
15760 * before setting up the misc vector or we get a race and the vector
15761 * ends up disabled forever.
15763 clear_bit(__I40E_DOWN, pf->state);
15765 /* In case of MSIX we are going to setup the misc vector right here
15766 * to handle admin queue events etc. In case of legacy and MSI
15767 * the misc functionality and queue processing is combined in
15768 * the same vector and that gets setup at open.
15770 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
15771 err = i40e_setup_misc_vector(pf);
15773 dev_info(&pdev->dev,
15774 "setup of misc vector failed: %d\n", err);
15775 i40e_cloud_filter_exit(pf);
15776 i40e_fdir_teardown(pf);
15781 #ifdef CONFIG_PCI_IOV
15782 /* prep for VF support */
15783 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
15784 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
15785 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
15786 /* disable link interrupts for VFs */
15787 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
15788 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
15789 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
15792 if (pci_num_vf(pdev)) {
15793 dev_info(&pdev->dev,
15794 "Active VFs found, allocating resources.\n");
15795 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
15797 dev_info(&pdev->dev,
15798 "Error %d allocating resources for existing VFs\n",
15802 #endif /* CONFIG_PCI_IOV */
15804 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
15805 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
15806 pf->num_iwarp_msix,
15807 I40E_IWARP_IRQ_PILE_ID);
15808 if (pf->iwarp_base_vector < 0) {
15809 dev_info(&pdev->dev,
15810 "failed to get tracking for %d vectors for IWARP err=%d\n",
15811 pf->num_iwarp_msix, pf->iwarp_base_vector);
15812 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
15816 i40e_dbg_pf_init(pf);
15818 /* tell the firmware that we're starting */
15819 i40e_send_version(pf);
15821 /* since everything's happy, start the service_task timer */
15822 mod_timer(&pf->service_timer,
15823 round_jiffies(jiffies + pf->service_timer_period));
15825 /* add this PF to client device list and launch a client service task */
15826 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
15827 err = i40e_lan_add_device(pf);
15829 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
15833 #define PCI_SPEED_SIZE 8
15834 #define PCI_WIDTH_SIZE 8
15835 /* Devices on the IOSF bus do not have this information
15836 * and will report PCI Gen 1 x 1 by default so don't bother
15839 if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) {
15840 char speed[PCI_SPEED_SIZE] = "Unknown";
15841 char width[PCI_WIDTH_SIZE] = "Unknown";
15843 /* Get the negotiated link width and speed from PCI config
15846 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
15849 i40e_set_pci_config_data(hw, link_status);
15851 switch (hw->bus.speed) {
15852 case i40e_bus_speed_8000:
15853 strlcpy(speed, "8.0", PCI_SPEED_SIZE); break;
15854 case i40e_bus_speed_5000:
15855 strlcpy(speed, "5.0", PCI_SPEED_SIZE); break;
15856 case i40e_bus_speed_2500:
15857 strlcpy(speed, "2.5", PCI_SPEED_SIZE); break;
15861 switch (hw->bus.width) {
15862 case i40e_bus_width_pcie_x8:
15863 strlcpy(width, "8", PCI_WIDTH_SIZE); break;
15864 case i40e_bus_width_pcie_x4:
15865 strlcpy(width, "4", PCI_WIDTH_SIZE); break;
15866 case i40e_bus_width_pcie_x2:
15867 strlcpy(width, "2", PCI_WIDTH_SIZE); break;
15868 case i40e_bus_width_pcie_x1:
15869 strlcpy(width, "1", PCI_WIDTH_SIZE); break;
15874 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
15877 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
15878 hw->bus.speed < i40e_bus_speed_8000) {
15879 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
15880 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
15884 /* get the requested speeds from the fw */
15885 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
15887 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
15888 i40e_stat_str(&pf->hw, err),
15889 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15890 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
15892 /* set the FEC config due to the board capabilities */
15893 i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags);
15895 /* get the supported phy types from the fw */
15896 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
15898 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
15899 i40e_stat_str(&pf->hw, err),
15900 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15902 /* make sure the MFS hasn't been set lower than the default */
15903 #define MAX_FRAME_SIZE_DEFAULT 0x2600
15904 val = (rd32(&pf->hw, I40E_PRTGL_SAH) &
15905 I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT;
15906 if (val < MAX_FRAME_SIZE_DEFAULT)
15907 dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
15910 /* Add a filter to drop all Flow control frames from any VSI from being
15911 * transmitted. By doing so we stop a malicious VF from sending out
15912 * PAUSE or PFC frames and potentially controlling traffic for other
15914 * The FW can still send Flow control frames if enabled.
15916 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
15917 pf->main_vsi_seid);
15919 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
15920 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
15921 pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS;
15922 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
15923 pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER;
15924 /* print a string summarizing features */
15925 i40e_print_features(pf);
15929 /* Unwind what we've done if something failed in the setup */
15931 set_bit(__I40E_DOWN, pf->state);
15932 i40e_clear_interrupt_scheme(pf);
15935 i40e_reset_interrupt_capability(pf);
15936 del_timer_sync(&pf->service_timer);
15938 err_configure_lan_hmc:
15939 (void)i40e_shutdown_lan_hmc(hw);
15941 kfree(pf->qp_pile);
15945 iounmap(hw->hw_addr);
15949 pci_disable_pcie_error_reporting(pdev);
15950 pci_release_mem_regions(pdev);
15953 pci_disable_device(pdev);
15958 * i40e_remove - Device removal routine
15959 * @pdev: PCI device information struct
15961 * i40e_remove is called by the PCI subsystem to alert the driver
15962 * that is should release a PCI device. This could be caused by a
15963 * Hot-Plug event, or because the driver is going to be removed from
15966 static void i40e_remove(struct pci_dev *pdev)
15968 struct i40e_pf *pf = pci_get_drvdata(pdev);
15969 struct i40e_hw *hw = &pf->hw;
15970 i40e_status ret_code;
15973 i40e_dbg_pf_exit(pf);
15977 /* Disable RSS in hw */
15978 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
15979 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
15981 /* Grab __I40E_RESET_RECOVERY_PENDING and set __I40E_IN_REMOVE
15982 * flags, once they are set, i40e_rebuild should not be called as
15983 * i40e_prep_for_reset always returns early.
15985 while (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
15986 usleep_range(1000, 2000);
15987 set_bit(__I40E_IN_REMOVE, pf->state);
15989 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
15990 set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
15992 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
15994 /* no more scheduling of any task */
15995 set_bit(__I40E_SUSPENDED, pf->state);
15996 set_bit(__I40E_DOWN, pf->state);
15997 if (pf->service_timer.function)
15998 del_timer_sync(&pf->service_timer);
15999 if (pf->service_task.func)
16000 cancel_work_sync(&pf->service_task);
16002 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
16003 struct i40e_vsi *vsi = pf->vsi[0];
16005 /* We know that we have allocated only one vsi for this PF,
16006 * it was just for registering netdevice, so the interface
16007 * could be visible in the 'ifconfig' output
16009 unregister_netdev(vsi->netdev);
16010 free_netdev(vsi->netdev);
16015 /* Client close must be called explicitly here because the timer
16016 * has been stopped.
16018 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
16020 i40e_fdir_teardown(pf);
16022 /* If there is a switch structure or any orphans, remove them.
16023 * This will leave only the PF's VSI remaining.
16025 for (i = 0; i < I40E_MAX_VEB; i++) {
16029 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
16030 pf->veb[i]->uplink_seid == 0)
16031 i40e_switch_branch_release(pf->veb[i]);
16034 /* Now we can shutdown the PF's VSI, just before we kill
16037 if (pf->vsi[pf->lan_vsi])
16038 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
16040 i40e_cloud_filter_exit(pf);
16042 /* remove attached clients */
16043 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
16044 ret_code = i40e_lan_del_device(pf);
16046 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
16050 /* shutdown and destroy the HMC */
16051 if (hw->hmc.hmc_obj) {
16052 ret_code = i40e_shutdown_lan_hmc(hw);
16054 dev_warn(&pdev->dev,
16055 "Failed to destroy the HMC resources: %d\n",
16060 /* Free MSI/legacy interrupt 0 when in recovery mode. */
16061 if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
16062 !(pf->flags & I40E_FLAG_MSIX_ENABLED))
16063 free_irq(pf->pdev->irq, pf);
16065 /* shutdown the adminq */
16066 i40e_shutdown_adminq(hw);
16068 /* destroy the locks only once, here */
16069 mutex_destroy(&hw->aq.arq_mutex);
16070 mutex_destroy(&hw->aq.asq_mutex);
16072 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
16074 i40e_clear_interrupt_scheme(pf);
16075 for (i = 0; i < pf->num_alloc_vsi; i++) {
16077 if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
16078 i40e_vsi_clear_rings(pf->vsi[i]);
16079 i40e_vsi_clear(pf->vsi[i]);
16085 for (i = 0; i < I40E_MAX_VEB; i++) {
16090 kfree(pf->qp_pile);
16093 iounmap(hw->hw_addr);
16095 pci_release_mem_regions(pdev);
16097 pci_disable_pcie_error_reporting(pdev);
16098 pci_disable_device(pdev);
16102 * i40e_pci_error_detected - warning that something funky happened in PCI land
16103 * @pdev: PCI device information struct
16104 * @error: the type of PCI error
16106 * Called to warn that something happened and the error handling steps
16107 * are in progress. Allows the driver to quiesce things, be ready for
16110 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
16111 pci_channel_state_t error)
16113 struct i40e_pf *pf = pci_get_drvdata(pdev);
16115 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
16118 dev_info(&pdev->dev,
16119 "Cannot recover - error happened during device probe\n");
16120 return PCI_ERS_RESULT_DISCONNECT;
16123 /* shutdown all operations */
16124 if (!test_bit(__I40E_SUSPENDED, pf->state))
16125 i40e_prep_for_reset(pf);
16127 /* Request a slot reset */
16128 return PCI_ERS_RESULT_NEED_RESET;
16132 * i40e_pci_error_slot_reset - a PCI slot reset just happened
16133 * @pdev: PCI device information struct
16135 * Called to find if the driver can work with the device now that
16136 * the pci slot has been reset. If a basic connection seems good
16137 * (registers are readable and have sane content) then return a
16138 * happy little PCI_ERS_RESULT_xxx.
16140 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
16142 struct i40e_pf *pf = pci_get_drvdata(pdev);
16143 pci_ers_result_t result;
16146 dev_dbg(&pdev->dev, "%s\n", __func__);
16147 if (pci_enable_device_mem(pdev)) {
16148 dev_info(&pdev->dev,
16149 "Cannot re-enable PCI device after reset.\n");
16150 result = PCI_ERS_RESULT_DISCONNECT;
16152 pci_set_master(pdev);
16153 pci_restore_state(pdev);
16154 pci_save_state(pdev);
16155 pci_wake_from_d3(pdev, false);
16157 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
16159 result = PCI_ERS_RESULT_RECOVERED;
16161 result = PCI_ERS_RESULT_DISCONNECT;
16168 * i40e_pci_error_reset_prepare - prepare device driver for pci reset
16169 * @pdev: PCI device information struct
16171 static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
16173 struct i40e_pf *pf = pci_get_drvdata(pdev);
16175 i40e_prep_for_reset(pf);
16179 * i40e_pci_error_reset_done - pci reset done, device driver reset can begin
16180 * @pdev: PCI device information struct
16182 static void i40e_pci_error_reset_done(struct pci_dev *pdev)
16184 struct i40e_pf *pf = pci_get_drvdata(pdev);
16186 if (test_bit(__I40E_IN_REMOVE, pf->state))
16189 i40e_reset_and_rebuild(pf, false, false);
16193 * i40e_pci_error_resume - restart operations after PCI error recovery
16194 * @pdev: PCI device information struct
16196 * Called to allow the driver to bring things back up after PCI error
16197 * and/or reset recovery has finished.
16199 static void i40e_pci_error_resume(struct pci_dev *pdev)
16201 struct i40e_pf *pf = pci_get_drvdata(pdev);
16203 dev_dbg(&pdev->dev, "%s\n", __func__);
16204 if (test_bit(__I40E_SUSPENDED, pf->state))
16207 i40e_handle_reset_warning(pf, false);
16211 * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
16212 * using the mac_address_write admin q function
16213 * @pf: pointer to i40e_pf struct
16215 static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
16217 struct i40e_hw *hw = &pf->hw;
16222 /* Get current MAC address in case it's an LAA */
16223 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
16224 ether_addr_copy(mac_addr,
16225 pf->vsi[pf->lan_vsi]->netdev->dev_addr);
16227 dev_err(&pf->pdev->dev,
16228 "Failed to retrieve MAC address; using default\n");
16229 ether_addr_copy(mac_addr, hw->mac.addr);
16232 /* The FW expects the mac address write cmd to first be called with
16233 * one of these flags before calling it again with the multicast
16236 flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
16238 if (hw->func_caps.flex10_enable && hw->partition_id != 1)
16239 flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
16241 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
16243 dev_err(&pf->pdev->dev,
16244 "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
16248 flags = I40E_AQC_MC_MAG_EN
16249 | I40E_AQC_WOL_PRESERVE_ON_PFR
16250 | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
16251 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
16253 dev_err(&pf->pdev->dev,
16254 "Failed to enable Multicast Magic Packet wake up\n");
16258 * i40e_shutdown - PCI callback for shutting down
16259 * @pdev: PCI device information struct
16261 static void i40e_shutdown(struct pci_dev *pdev)
16263 struct i40e_pf *pf = pci_get_drvdata(pdev);
16264 struct i40e_hw *hw = &pf->hw;
16266 set_bit(__I40E_SUSPENDED, pf->state);
16267 set_bit(__I40E_DOWN, pf->state);
16269 del_timer_sync(&pf->service_timer);
16270 cancel_work_sync(&pf->service_task);
16271 i40e_cloud_filter_exit(pf);
16272 i40e_fdir_teardown(pf);
16274 /* Client close must be called explicitly here because the timer
16275 * has been stopped.
16277 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
16279 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
16280 i40e_enable_mc_magic_wake(pf);
16282 i40e_prep_for_reset(pf);
16284 wr32(hw, I40E_PFPM_APM,
16285 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
16286 wr32(hw, I40E_PFPM_WUFC,
16287 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
16289 /* Free MSI/legacy interrupt 0 when in recovery mode. */
16290 if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
16291 !(pf->flags & I40E_FLAG_MSIX_ENABLED))
16292 free_irq(pf->pdev->irq, pf);
16294 /* Since we're going to destroy queues during the
16295 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
16299 i40e_clear_interrupt_scheme(pf);
16302 if (system_state == SYSTEM_POWER_OFF) {
16303 pci_wake_from_d3(pdev, pf->wol_en);
16304 pci_set_power_state(pdev, PCI_D3hot);
16309 * i40e_suspend - PM callback for moving to D3
16310 * @dev: generic device information structure
16312 static int __maybe_unused i40e_suspend(struct device *dev)
16314 struct i40e_pf *pf = dev_get_drvdata(dev);
16315 struct i40e_hw *hw = &pf->hw;
16317 /* If we're already suspended, then there is nothing to do */
16318 if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
16321 set_bit(__I40E_DOWN, pf->state);
16323 /* Ensure service task will not be running */
16324 del_timer_sync(&pf->service_timer);
16325 cancel_work_sync(&pf->service_task);
16327 /* Client close must be called explicitly here because the timer
16328 * has been stopped.
16330 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
16332 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
16333 i40e_enable_mc_magic_wake(pf);
16335 /* Since we're going to destroy queues during the
16336 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
16341 i40e_prep_for_reset(pf);
16343 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
16344 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
16346 /* Clear the interrupt scheme and release our IRQs so that the system
16347 * can safely hibernate even when there are a large number of CPUs.
16348 * Otherwise hibernation might fail when mapping all the vectors back
16351 i40e_clear_interrupt_scheme(pf);
16359 * i40e_resume - PM callback for waking up from D3
16360 * @dev: generic device information structure
16362 static int __maybe_unused i40e_resume(struct device *dev)
16364 struct i40e_pf *pf = dev_get_drvdata(dev);
16367 /* If we're not suspended, then there is nothing to do */
16368 if (!test_bit(__I40E_SUSPENDED, pf->state))
16371 /* We need to hold the RTNL lock prior to restoring interrupt schemes,
16372 * since we're going to be restoring queues
16376 /* We cleared the interrupt scheme when we suspended, so we need to
16377 * restore it now to resume device functionality.
16379 err = i40e_restore_interrupt_scheme(pf);
16381 dev_err(dev, "Cannot restore interrupt scheme: %d\n",
16385 clear_bit(__I40E_DOWN, pf->state);
16386 i40e_reset_and_rebuild(pf, false, true);
16390 /* Clear suspended state last after everything is recovered */
16391 clear_bit(__I40E_SUSPENDED, pf->state);
16393 /* Restart the service task */
16394 mod_timer(&pf->service_timer,
16395 round_jiffies(jiffies + pf->service_timer_period));
16400 static const struct pci_error_handlers i40e_err_handler = {
16401 .error_detected = i40e_pci_error_detected,
16402 .slot_reset = i40e_pci_error_slot_reset,
16403 .reset_prepare = i40e_pci_error_reset_prepare,
16404 .reset_done = i40e_pci_error_reset_done,
16405 .resume = i40e_pci_error_resume,
16408 static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
16410 static struct pci_driver i40e_driver = {
16411 .name = i40e_driver_name,
16412 .id_table = i40e_pci_tbl,
16413 .probe = i40e_probe,
16414 .remove = i40e_remove,
16416 .pm = &i40e_pm_ops,
16418 .shutdown = i40e_shutdown,
16419 .err_handler = &i40e_err_handler,
16420 .sriov_configure = i40e_pci_sriov_configure,
16424 * i40e_init_module - Driver registration routine
16426 * i40e_init_module is the first routine called when the driver is
16427 * loaded. All it does is register with the PCI subsystem.
16429 static int __init i40e_init_module(void)
16431 pr_info("%s: %s\n", i40e_driver_name, i40e_driver_string);
16432 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
16434 /* There is no need to throttle the number of active tasks because
16435 * each device limits its own task using a state bit for scheduling
16436 * the service task, and the device tasks do not interfere with each
16437 * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM
16438 * since we need to be able to guarantee forward progress even under
16441 i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
16443 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
16448 return pci_register_driver(&i40e_driver);
16450 module_init(i40e_init_module);
16453 * i40e_exit_module - Driver exit cleanup routine
16455 * i40e_exit_module is called just before the driver is removed
16458 static void __exit i40e_exit_module(void)
16460 pci_unregister_driver(&i40e_driver);
16461 destroy_workqueue(i40e_wq);
16462 ida_destroy(&i40e_client_ida);
16465 module_exit(i40e_exit_module);