1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <generated/utsrelease.h>
9 #include <linux/crash_dump.h>
14 #include "ice_dcb_lib.h"
15 #include "ice_dcb_nl.h"
16 #include "ice_devlink.h"
17 /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
18 * ice tracepoint functions. This must be done exactly once across the
21 #define CREATE_TRACE_POINTS
22 #include "ice_trace.h"
23 #include "ice_eswitch.h"
24 #include "ice_tc_lib.h"
25 #include "ice_vsi_vlan_ops.h"
26 #include <net/xdp_sock_drv.h>
28 #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
29 static const char ice_driver_string[] = DRV_SUMMARY;
30 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
32 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
33 #define ICE_DDP_PKG_PATH "intel/ice/ddp/"
34 #define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg"
36 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
37 MODULE_DESCRIPTION(DRV_SUMMARY);
38 MODULE_LICENSE("GPL v2");
39 MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
41 static int debug = -1;
42 module_param(debug, int, 0644);
43 #ifndef CONFIG_DYNAMIC_DEBUG
44 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
46 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
47 #endif /* !CONFIG_DYNAMIC_DEBUG */
49 DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
50 EXPORT_SYMBOL(ice_xdp_locking_key);
53 * ice_hw_to_dev - Get device pointer from the hardware structure
54 * @hw: pointer to the device HW structure
56 * Used to access the device pointer from compilation units which can't easily
57 * include the definition of struct ice_pf without leading to circular header
60 struct device *ice_hw_to_dev(struct ice_hw *hw)
62 struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
64 return &pf->pdev->dev;
67 static struct workqueue_struct *ice_wq;
68 struct workqueue_struct *ice_lag_wq;
69 static const struct net_device_ops ice_netdev_safe_mode_ops;
70 static const struct net_device_ops ice_netdev_ops;
72 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
74 static void ice_vsi_release_all(struct ice_pf *pf);
76 static int ice_rebuild_channels(struct ice_pf *pf);
77 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr);
80 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
81 void *cb_priv, enum tc_setup_type type, void *type_data,
83 void (*cleanup)(struct flow_block_cb *block_cb));
85 bool netif_is_ice(const struct net_device *dev)
87 return dev && (dev->netdev_ops == &ice_netdev_ops);
91 * ice_get_tx_pending - returns number of Tx descriptors not processed
92 * @ring: the ring of descriptors
94 static u16 ice_get_tx_pending(struct ice_tx_ring *ring)
98 head = ring->next_to_clean;
99 tail = ring->next_to_use;
102 return (head < tail) ?
103 tail - head : (tail + ring->count - head);
108 * ice_check_for_hang_subtask - check for and recover hung queues
109 * @pf: pointer to PF struct
111 static void ice_check_for_hang_subtask(struct ice_pf *pf)
113 struct ice_vsi *vsi = NULL;
119 ice_for_each_vsi(pf, v)
120 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
125 if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
128 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
133 ice_for_each_txq(vsi, i) {
134 struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
135 struct ice_ring_stats *ring_stats;
139 if (ice_ring_ch_enabled(tx_ring))
142 ring_stats = tx_ring->ring_stats;
147 /* If packet counter has not changed the queue is
148 * likely stalled, so force an interrupt for this
151 * prev_pkt would be negative if there was no
154 packets = ring_stats->stats.pkts & INT_MAX;
155 if (ring_stats->tx_stats.prev_pkt == packets) {
156 /* Trigger sw interrupt to revive the queue */
157 ice_trigger_sw_intr(hw, tx_ring->q_vector);
161 /* Memory barrier between read of packet count and call
162 * to ice_get_tx_pending()
165 ring_stats->tx_stats.prev_pkt =
166 ice_get_tx_pending(tx_ring) ? packets : -1;
172 * ice_init_mac_fltr - Set initial MAC filters
173 * @pf: board private structure
175 * Set initial set of MAC filters for PF VSI; configure filters for permanent
176 * address and broadcast address. If an error is encountered, netdevice will be
179 static int ice_init_mac_fltr(struct ice_pf *pf)
184 vsi = ice_get_main_vsi(pf);
188 perm_addr = vsi->port_info->mac.perm_addr;
189 return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
193 * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
194 * @netdev: the net device on which the sync is happening
195 * @addr: MAC address to sync
197 * This is a callback function which is called by the in kernel device sync
198 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
199 * populates the tmp_sync_list, which is later used by ice_add_mac to add the
200 * MAC filters from the hardware.
202 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
204 struct ice_netdev_priv *np = netdev_priv(netdev);
205 struct ice_vsi *vsi = np->vsi;
207 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
215 * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
216 * @netdev: the net device on which the unsync is happening
217 * @addr: MAC address to unsync
219 * This is a callback function which is called by the in kernel device unsync
220 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
221 * populates the tmp_unsync_list, which is later used by ice_remove_mac to
222 * delete the MAC filters from the hardware.
224 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
226 struct ice_netdev_priv *np = netdev_priv(netdev);
227 struct ice_vsi *vsi = np->vsi;
229 /* Under some circumstances, we might receive a request to delete our
230 * own device address from our uc list. Because we store the device
231 * address in the VSI's MAC filter list, we need to ignore such
232 * requests and not delete our device address from this list.
234 if (ether_addr_equal(addr, netdev->dev_addr))
237 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
245 * ice_vsi_fltr_changed - check if filter state changed
246 * @vsi: VSI to be checked
248 * returns true if filter state has changed, false otherwise.
250 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
252 return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
253 test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
257 * ice_set_promisc - Enable promiscuous mode for a given PF
258 * @vsi: the VSI being configured
259 * @promisc_m: mask of promiscuous config bits
262 static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
266 if (vsi->type != ICE_VSI_PF)
269 if (ice_vsi_has_non_zero_vlans(vsi)) {
270 promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
271 status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi,
274 status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
277 if (status && status != -EEXIST)
280 netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n",
281 vsi->vsi_num, promisc_m);
286 * ice_clear_promisc - Disable promiscuous mode for a given PF
287 * @vsi: the VSI being configured
288 * @promisc_m: mask of promiscuous config bits
291 static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
295 if (vsi->type != ICE_VSI_PF)
298 if (ice_vsi_has_non_zero_vlans(vsi)) {
299 promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
300 status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi,
303 status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
307 netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n",
308 vsi->vsi_num, promisc_m);
313 * ice_vsi_sync_fltr - Update the VSI filter list to the HW
314 * @vsi: ptr to the VSI
316 * Push any outstanding VSI filter changes through the AdminQ.
318 static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
320 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
321 struct device *dev = ice_pf_to_dev(vsi->back);
322 struct net_device *netdev = vsi->netdev;
323 bool promisc_forced_on = false;
324 struct ice_pf *pf = vsi->back;
325 struct ice_hw *hw = &pf->hw;
326 u32 changed_flags = 0;
332 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
333 usleep_range(1000, 2000);
335 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
336 vsi->current_netdev_flags = vsi->netdev->flags;
338 INIT_LIST_HEAD(&vsi->tmp_sync_list);
339 INIT_LIST_HEAD(&vsi->tmp_unsync_list);
341 if (ice_vsi_fltr_changed(vsi)) {
342 clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
343 clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
345 /* grab the netdev's addr_list_lock */
346 netif_addr_lock_bh(netdev);
347 __dev_uc_sync(netdev, ice_add_mac_to_sync_list,
348 ice_add_mac_to_unsync_list);
349 __dev_mc_sync(netdev, ice_add_mac_to_sync_list,
350 ice_add_mac_to_unsync_list);
351 /* our temp lists are populated. release lock */
352 netif_addr_unlock_bh(netdev);
355 /* Remove MAC addresses in the unsync list */
356 err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
357 ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
359 netdev_err(netdev, "Failed to delete MAC filters\n");
360 /* if we failed because of alloc failures, just bail */
365 /* Add MAC addresses in the sync list */
366 err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
367 ice_fltr_free_list(dev, &vsi->tmp_sync_list);
368 /* If filter is added successfully or already exists, do not go into
369 * 'if' condition and report it as error. Instead continue processing
370 * rest of the function.
372 if (err && err != -EEXIST) {
373 netdev_err(netdev, "Failed to add MAC filters\n");
374 /* If there is no more space for new umac filters, VSI
375 * should go into promiscuous mode. There should be some
376 * space reserved for promiscuous filters.
378 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
379 !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
381 promisc_forced_on = true;
382 netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
389 /* check for changes in promiscuous modes */
390 if (changed_flags & IFF_ALLMULTI) {
391 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
392 err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS);
394 vsi->current_netdev_flags &= ~IFF_ALLMULTI;
398 /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
399 err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS);
401 vsi->current_netdev_flags |= IFF_ALLMULTI;
407 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
408 test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
409 clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
410 if (vsi->current_netdev_flags & IFF_PROMISC) {
411 /* Apply Rx filter rule to get traffic from wire */
412 if (!ice_is_dflt_vsi_in_use(vsi->port_info)) {
413 err = ice_set_dflt_vsi(vsi);
414 if (err && err != -EEXIST) {
415 netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
417 vsi->current_netdev_flags &=
422 vlan_ops->dis_rx_filtering(vsi);
424 /* promiscuous mode implies allmulticast so
425 * that VSIs that are in promiscuous mode are
426 * subscribed to multicast packets coming to
429 err = ice_set_promisc(vsi,
430 ICE_MCAST_PROMISC_BITS);
435 /* Clear Rx filter to remove traffic from wire */
436 if (ice_is_vsi_dflt_vsi(vsi)) {
437 err = ice_clear_dflt_vsi(vsi);
439 netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
441 vsi->current_netdev_flags |=
445 if (vsi->netdev->features &
446 NETIF_F_HW_VLAN_CTAG_FILTER)
447 vlan_ops->ena_rx_filtering(vsi);
450 /* disable allmulti here, but only if allmulti is not
451 * still enabled for the netdev
453 if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
454 err = ice_clear_promisc(vsi,
455 ICE_MCAST_PROMISC_BITS);
457 netdev_err(netdev, "Error %d clearing multicast promiscuous on VSI %i\n",
466 set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
469 /* if something went wrong then set the changed flag so we try again */
470 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
471 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
473 clear_bit(ICE_CFG_BUSY, vsi->state);
478 * ice_sync_fltr_subtask - Sync the VSI filter list with HW
479 * @pf: board private structure
481 static void ice_sync_fltr_subtask(struct ice_pf *pf)
485 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
488 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
490 ice_for_each_vsi(pf, v)
491 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
492 ice_vsi_sync_fltr(pf->vsi[v])) {
493 /* come back and try again later */
494 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
500 * ice_pf_dis_all_vsi - Pause all VSIs on a PF
502 * @locked: is the rtnl_lock already held
504 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
509 ice_for_each_vsi(pf, v)
511 ice_dis_vsi(pf->vsi[v], locked);
513 for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
514 pf->pf_agg_node[node].num_vsis = 0;
516 for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
517 pf->vf_agg_node[node].num_vsis = 0;
521 * ice_clear_sw_switch_recipes - clear switch recipes
522 * @pf: board private structure
524 * Mark switch recipes as not created in sw structures. There are cases where
525 * rules (especially advanced rules) need to be restored, either re-read from
526 * hardware or added again. For example after the reset. 'recp_created' flag
527 * prevents from doing that and need to be cleared upfront.
529 static void ice_clear_sw_switch_recipes(struct ice_pf *pf)
531 struct ice_sw_recipe *recp;
534 recp = pf->hw.switch_info->recp_list;
535 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
536 recp[i].recp_created = false;
540 * ice_prepare_for_reset - prep for reset
541 * @pf: board private structure
542 * @reset_type: reset type requested
544 * Inform or close all dependent features in prep for reset.
547 ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
549 struct ice_hw *hw = &pf->hw;
554 dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
556 /* already prepared for reset */
557 if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
560 ice_unplug_aux_dev(pf);
562 /* Notify VFs of impending reset */
563 if (ice_check_sq_alive(hw, &hw->mailboxq))
564 ice_vc_notify_reset(pf);
566 /* Disable VFs until reset is completed */
567 mutex_lock(&pf->vfs.table_lock);
568 ice_for_each_vf(pf, bkt, vf)
569 ice_set_vf_state_dis(vf);
570 mutex_unlock(&pf->vfs.table_lock);
572 if (ice_is_eswitch_mode_switchdev(pf)) {
573 if (reset_type != ICE_RESET_PFR)
574 ice_clear_sw_switch_recipes(pf);
577 /* release ADQ specific HW and SW resources */
578 vsi = ice_get_main_vsi(pf);
582 /* to be on safe side, reset orig_rss_size so that normal flow
583 * of deciding rss_size can take precedence
585 vsi->orig_rss_size = 0;
587 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
588 if (reset_type == ICE_RESET_PFR) {
589 vsi->old_ena_tc = vsi->all_enatc;
590 vsi->old_numtc = vsi->all_numtc;
592 ice_remove_q_channels(vsi, true);
594 /* for other reset type, do not support channel rebuild
595 * hence reset needed info
603 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
604 memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
609 /* clear SW filtering DB */
610 ice_clear_hw_tbls(hw);
611 /* disable the VSIs and their queues that are not already DOWN */
612 ice_pf_dis_all_vsi(pf, false);
614 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
615 ice_ptp_prepare_for_reset(pf);
617 if (ice_is_feature_supported(pf, ICE_F_GNSS))
621 ice_sched_clear_port(hw->port_info);
623 ice_shutdown_all_ctrlq(hw);
625 set_bit(ICE_PREPARED_FOR_RESET, pf->state);
629 * ice_do_reset - Initiate one of many types of resets
630 * @pf: board private structure
631 * @reset_type: reset type requested before this function was called.
633 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
635 struct device *dev = ice_pf_to_dev(pf);
636 struct ice_hw *hw = &pf->hw;
638 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
640 if (pf->lag && pf->lag->bonded && reset_type == ICE_RESET_PFR) {
641 dev_dbg(dev, "PFR on a bonded interface, promoting to CORER\n");
642 reset_type = ICE_RESET_CORER;
645 ice_prepare_for_reset(pf, reset_type);
647 /* trigger the reset */
648 if (ice_reset(hw, reset_type)) {
649 dev_err(dev, "reset %d failed\n", reset_type);
650 set_bit(ICE_RESET_FAILED, pf->state);
651 clear_bit(ICE_RESET_OICR_RECV, pf->state);
652 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
653 clear_bit(ICE_PFR_REQ, pf->state);
654 clear_bit(ICE_CORER_REQ, pf->state);
655 clear_bit(ICE_GLOBR_REQ, pf->state);
656 wake_up(&pf->reset_wait_queue);
660 /* PFR is a bit of a special case because it doesn't result in an OICR
661 * interrupt. So for PFR, rebuild after the reset and clear the reset-
662 * associated state bits.
664 if (reset_type == ICE_RESET_PFR) {
666 ice_rebuild(pf, reset_type);
667 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
668 clear_bit(ICE_PFR_REQ, pf->state);
669 wake_up(&pf->reset_wait_queue);
670 ice_reset_all_vfs(pf);
675 * ice_reset_subtask - Set up for resetting the device and driver
676 * @pf: board private structure
678 static void ice_reset_subtask(struct ice_pf *pf)
680 enum ice_reset_req reset_type = ICE_RESET_INVAL;
682 /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
683 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
684 * of reset is pending and sets bits in pf->state indicating the reset
685 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
686 * prepare for pending reset if not already (for PF software-initiated
687 * global resets the software should already be prepared for it as
688 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
689 * by firmware or software on other PFs, that bit is not set so prepare
690 * for the reset now), poll for reset done, rebuild and return.
692 if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
693 /* Perform the largest reset requested */
694 if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
695 reset_type = ICE_RESET_CORER;
696 if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
697 reset_type = ICE_RESET_GLOBR;
698 if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
699 reset_type = ICE_RESET_EMPR;
700 /* return if no valid reset type requested */
701 if (reset_type == ICE_RESET_INVAL)
703 ice_prepare_for_reset(pf, reset_type);
705 /* make sure we are ready to rebuild */
706 if (ice_check_reset(&pf->hw)) {
707 set_bit(ICE_RESET_FAILED, pf->state);
709 /* done with reset. start rebuild */
710 pf->hw.reset_ongoing = false;
711 ice_rebuild(pf, reset_type);
712 /* clear bit to resume normal operations, but
713 * ICE_NEEDS_RESTART bit is set in case rebuild failed
715 clear_bit(ICE_RESET_OICR_RECV, pf->state);
716 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
717 clear_bit(ICE_PFR_REQ, pf->state);
718 clear_bit(ICE_CORER_REQ, pf->state);
719 clear_bit(ICE_GLOBR_REQ, pf->state);
720 wake_up(&pf->reset_wait_queue);
721 ice_reset_all_vfs(pf);
727 /* No pending resets to finish processing. Check for new resets */
728 if (test_bit(ICE_PFR_REQ, pf->state)) {
729 reset_type = ICE_RESET_PFR;
730 if (pf->lag && pf->lag->bonded) {
731 dev_dbg(ice_pf_to_dev(pf), "PFR on a bonded interface, promoting to CORER\n");
732 reset_type = ICE_RESET_CORER;
735 if (test_bit(ICE_CORER_REQ, pf->state))
736 reset_type = ICE_RESET_CORER;
737 if (test_bit(ICE_GLOBR_REQ, pf->state))
738 reset_type = ICE_RESET_GLOBR;
739 /* If no valid reset type requested just return */
740 if (reset_type == ICE_RESET_INVAL)
743 /* reset if not already down or busy */
744 if (!test_bit(ICE_DOWN, pf->state) &&
745 !test_bit(ICE_CFG_BUSY, pf->state)) {
746 ice_do_reset(pf, reset_type);
751 * ice_print_topo_conflict - print topology conflict message
752 * @vsi: the VSI whose topology status is being checked
754 static void ice_print_topo_conflict(struct ice_vsi *vsi)
756 switch (vsi->port_info->phy.link_info.topo_media_conflict) {
757 case ICE_AQ_LINK_TOPO_CONFLICT:
758 case ICE_AQ_LINK_MEDIA_CONFLICT:
759 case ICE_AQ_LINK_TOPO_UNREACH_PRT:
760 case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
761 case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
762 netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
764 case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
765 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags))
766 netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n");
768 netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
776 * ice_print_link_msg - print link up or down message
777 * @vsi: the VSI whose link status is being queried
778 * @isup: boolean for if the link is now up or down
780 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
782 struct ice_aqc_get_phy_caps_data *caps;
783 const char *an_advertised;
794 if (vsi->current_isup == isup)
797 vsi->current_isup = isup;
800 netdev_info(vsi->netdev, "NIC Link is Down\n");
804 switch (vsi->port_info->phy.link_info.link_speed) {
805 case ICE_AQ_LINK_SPEED_100GB:
808 case ICE_AQ_LINK_SPEED_50GB:
811 case ICE_AQ_LINK_SPEED_40GB:
814 case ICE_AQ_LINK_SPEED_25GB:
817 case ICE_AQ_LINK_SPEED_20GB:
820 case ICE_AQ_LINK_SPEED_10GB:
823 case ICE_AQ_LINK_SPEED_5GB:
826 case ICE_AQ_LINK_SPEED_2500MB:
829 case ICE_AQ_LINK_SPEED_1000MB:
832 case ICE_AQ_LINK_SPEED_100MB:
840 switch (vsi->port_info->fc.current_mode) {
844 case ICE_FC_TX_PAUSE:
847 case ICE_FC_RX_PAUSE:
858 /* Get FEC mode based on negotiated link info */
859 switch (vsi->port_info->phy.link_info.fec_info) {
860 case ICE_AQ_LINK_25G_RS_528_FEC_EN:
861 case ICE_AQ_LINK_25G_RS_544_FEC_EN:
864 case ICE_AQ_LINK_25G_KR_FEC_EN:
865 fec = "FC-FEC/BASE-R";
872 /* check if autoneg completed, might be false due to not supported */
873 if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
878 /* Get FEC mode requested based on PHY caps last SW configuration */
879 caps = kzalloc(sizeof(*caps), GFP_KERNEL);
882 an_advertised = "Unknown";
886 status = ice_aq_get_phy_caps(vsi->port_info, false,
887 ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
889 netdev_info(vsi->netdev, "Get phy capability failed.\n");
891 an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
893 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
894 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
896 else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
897 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
898 fec_req = "FC-FEC/BASE-R";
905 netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
906 speed, fec_req, fec, an_advertised, an, fc);
907 ice_print_topo_conflict(vsi);
911 * ice_vsi_link_event - update the VSI's netdev
912 * @vsi: the VSI on which the link event occurred
913 * @link_up: whether or not the VSI needs to be set up or down
915 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
920 if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
923 if (vsi->type == ICE_VSI_PF) {
924 if (link_up == netif_carrier_ok(vsi->netdev))
928 netif_carrier_on(vsi->netdev);
929 netif_tx_wake_all_queues(vsi->netdev);
931 netif_carrier_off(vsi->netdev);
932 netif_tx_stop_all_queues(vsi->netdev);
938 * ice_set_dflt_mib - send a default config MIB to the FW
939 * @pf: private PF struct
941 * This function sends a default configuration MIB to the FW.
943 * If this function errors out at any point, the driver is still able to
944 * function. The main impact is that LFC may not operate as expected.
945 * Therefore an error state in this function should be treated with a DBG
946 * message and continue on with driver rebuild/reenable.
948 static void ice_set_dflt_mib(struct ice_pf *pf)
950 struct device *dev = ice_pf_to_dev(pf);
951 u8 mib_type, *buf, *lldpmib = NULL;
952 u16 len, typelen, offset = 0;
953 struct ice_lldp_org_tlv *tlv;
954 struct ice_hw *hw = &pf->hw;
957 mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
958 lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
960 dev_dbg(dev, "%s Failed to allocate MIB memory\n",
965 /* Add ETS CFG TLV */
966 tlv = (struct ice_lldp_org_tlv *)lldpmib;
967 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
968 ICE_IEEE_ETS_TLV_LEN);
969 tlv->typelen = htons(typelen);
970 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
971 ICE_IEEE_SUBTYPE_ETS_CFG);
972 tlv->ouisubtype = htonl(ouisubtype);
977 /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
978 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
979 * Octets 13 - 20 are TSA values - leave as zeros
982 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
984 tlv = (struct ice_lldp_org_tlv *)
985 ((char *)tlv + sizeof(tlv->typelen) + len);
987 /* Add ETS REC TLV */
989 tlv->typelen = htons(typelen);
991 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
992 ICE_IEEE_SUBTYPE_ETS_REC);
993 tlv->ouisubtype = htonl(ouisubtype);
995 /* First octet of buf is reserved
996 * Octets 1 - 4 map UP to TC - all UPs map to zero
997 * Octets 5 - 12 are BW values - set TC 0 to 100%.
998 * Octets 13 - 20 are TSA value - leave as zeros
1002 tlv = (struct ice_lldp_org_tlv *)
1003 ((char *)tlv + sizeof(tlv->typelen) + len);
1005 /* Add PFC CFG TLV */
1006 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
1007 ICE_IEEE_PFC_TLV_LEN);
1008 tlv->typelen = htons(typelen);
1010 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
1011 ICE_IEEE_SUBTYPE_PFC_CFG);
1012 tlv->ouisubtype = htonl(ouisubtype);
1014 /* Octet 1 left as all zeros - PFC disabled */
1016 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
1019 if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
1020 dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
1026 * ice_check_phy_fw_load - check if PHY FW load failed
1027 * @pf: pointer to PF struct
1028 * @link_cfg_err: bitmap from the link info structure
1030 * check if external PHY FW load failed and print an error message if it did
1032 static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err)
1034 if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) {
1035 clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1039 if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags))
1042 if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) {
1043 dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
1044 set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1049 * ice_check_module_power
1050 * @pf: pointer to PF struct
1051 * @link_cfg_err: bitmap from the link info structure
1053 * check module power level returned by a previous call to aq_get_link_info
1054 * and print error messages if module power level is not supported
1056 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
1058 /* if module power level is supported, clear the flag */
1059 if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
1060 ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
1061 clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1065 /* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
1066 * above block didn't clear this bit, there's nothing to do
1068 if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
1071 if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
1072 dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
1073 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1074 } else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
1075 dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
1076 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1081 * ice_check_link_cfg_err - check if link configuration failed
1082 * @pf: pointer to the PF struct
1083 * @link_cfg_err: bitmap from the link info structure
1085 * print if any link configuration failure happens due to the value in the
1086 * link_cfg_err parameter in the link info structure
1088 static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err)
1090 ice_check_module_power(pf, link_cfg_err);
1091 ice_check_phy_fw_load(pf, link_cfg_err);
1095 * ice_link_event - process the link event
1096 * @pf: PF that the link event is associated with
1097 * @pi: port_info for the port that the link event is associated with
1098 * @link_up: true if the physical link is up and false if it is down
1099 * @link_speed: current link speed received from the link event
1101 * Returns 0 on success and negative on failure
1104 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
1107 struct device *dev = ice_pf_to_dev(pf);
1108 struct ice_phy_info *phy_info;
1109 struct ice_vsi *vsi;
1114 phy_info = &pi->phy;
1115 phy_info->link_info_old = phy_info->link_info;
1117 old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
1118 old_link_speed = phy_info->link_info_old.link_speed;
1120 /* update the link info structures and re-enable link events,
1121 * don't bail on failure due to other book keeping needed
1123 status = ice_update_link_info(pi);
1125 dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n",
1127 ice_aq_str(pi->hw->adminq.sq_last_status));
1129 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
1131 /* Check if the link state is up after updating link info, and treat
1132 * this event as an UP event since the link is actually UP now.
1134 if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
1137 vsi = ice_get_main_vsi(pf);
1138 if (!vsi || !vsi->port_info)
1141 /* turn off PHY if media was removed */
1142 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
1143 !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
1144 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
1145 ice_set_link(vsi, false);
1148 /* if the old link up/down and speed is the same as the new */
1149 if (link_up == old_link && link_speed == old_link_speed)
1152 ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
1154 if (ice_is_dcb_active(pf)) {
1155 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
1156 ice_dcb_rebuild(pf);
1159 ice_set_dflt_mib(pf);
1161 ice_vsi_link_event(vsi, link_up);
1162 ice_print_link_msg(vsi, link_up);
1164 ice_vc_notify_link_state(pf);
1170 * ice_watchdog_subtask - periodic tasks not using event driven scheduling
1171 * @pf: board private structure
1173 static void ice_watchdog_subtask(struct ice_pf *pf)
1177 /* if interface is down do nothing */
1178 if (test_bit(ICE_DOWN, pf->state) ||
1179 test_bit(ICE_CFG_BUSY, pf->state))
1182 /* make sure we don't do these things too often */
1183 if (time_before(jiffies,
1184 pf->serv_tmr_prev + pf->serv_tmr_period))
1187 pf->serv_tmr_prev = jiffies;
1189 /* Update the stats for active netdevs so the network stack
1190 * can look at updated numbers whenever it cares to
1192 ice_update_pf_stats(pf);
1193 ice_for_each_vsi(pf, i)
1194 if (pf->vsi[i] && pf->vsi[i]->netdev)
1195 ice_update_vsi_stats(pf->vsi[i]);
1199 * ice_init_link_events - enable/initialize link events
1200 * @pi: pointer to the port_info instance
1202 * Returns -EIO on failure, 0 on success
1204 static int ice_init_link_events(struct ice_port_info *pi)
1208 mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
1209 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL |
1210 ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL));
1212 if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
1213 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
1218 if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
1219 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
1228 * ice_handle_link_event - handle link event via ARQ
1229 * @pf: PF that the link event is associated with
1230 * @event: event structure containing link status info
1233 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1235 struct ice_aqc_get_link_status_data *link_data;
1236 struct ice_port_info *port_info;
1239 link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1240 port_info = pf->hw.port_info;
1244 status = ice_link_event(pf, port_info,
1245 !!(link_data->link_info & ICE_AQ_LINK_UP),
1246 le16_to_cpu(link_data->link_speed));
1248 dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1255 * ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware
1256 * @pf: pointer to the PF private structure
1257 * @task: intermediate helper storage and identifier for waiting
1258 * @opcode: the opcode to wait for
1260 * Prepares to wait for a specific AdminQ completion event on the ARQ for
1261 * a given PF. Actual wait would be done by a call to ice_aq_wait_for_event().
1263 * Calls are separated to allow caller registering for event before sending
1264 * the command, which mitigates a race between registering and FW responding.
1266 * To obtain only the descriptor contents, pass an task->event with null
1267 * msg_buf. If the complete data buffer is desired, allocate the
1268 * task->event.msg_buf with enough space ahead of time.
1270 void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task,
1273 INIT_HLIST_NODE(&task->entry);
1274 task->opcode = opcode;
1275 task->state = ICE_AQ_TASK_WAITING;
1277 spin_lock_bh(&pf->aq_wait_lock);
1278 hlist_add_head(&task->entry, &pf->aq_wait_list);
1279 spin_unlock_bh(&pf->aq_wait_lock);
1283 * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1284 * @pf: pointer to the PF private structure
1285 * @task: ptr prepared by ice_aq_prep_for_event()
1286 * @timeout: how long to wait, in jiffies
1288 * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1289 * current thread will be put to sleep until the specified event occurs or
1290 * until the given timeout is reached.
1292 * Returns: zero on success, or a negative error code on failure.
1294 int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
1295 unsigned long timeout)
1297 enum ice_aq_task_state *state = &task->state;
1298 struct device *dev = ice_pf_to_dev(pf);
1299 unsigned long start = jiffies;
1303 ret = wait_event_interruptible_timeout(pf->aq_wait_queue,
1304 *state != ICE_AQ_TASK_WAITING,
1307 case ICE_AQ_TASK_NOT_PREPARED:
1308 WARN(1, "call to %s without ice_aq_prep_for_event()", __func__);
1311 case ICE_AQ_TASK_WAITING:
1312 err = ret < 0 ? ret : -ETIMEDOUT;
1314 case ICE_AQ_TASK_CANCELED:
1315 err = ret < 0 ? ret : -ECANCELED;
1317 case ICE_AQ_TASK_COMPLETE:
1318 err = ret < 0 ? ret : 0;
1321 WARN(1, "Unexpected AdminQ wait task state %u", *state);
1326 dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1327 jiffies_to_msecs(jiffies - start),
1328 jiffies_to_msecs(timeout),
1331 spin_lock_bh(&pf->aq_wait_lock);
1332 hlist_del(&task->entry);
1333 spin_unlock_bh(&pf->aq_wait_lock);
1339 * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1340 * @pf: pointer to the PF private structure
1341 * @opcode: the opcode of the event
1342 * @event: the event to check
1344 * Loops over the current list of pending threads waiting for an AdminQ event.
1345 * For each matching task, copy the contents of the event into the task
1346 * structure and wake up the thread.
1348 * If multiple threads wait for the same opcode, they will all be woken up.
1350 * Note that event->msg_buf will only be duplicated if the event has a buffer
1351 * with enough space already allocated. Otherwise, only the descriptor and
1352 * message length will be copied.
1354 * Returns: true if an event was found, false otherwise
1356 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1357 struct ice_rq_event_info *event)
1359 struct ice_rq_event_info *task_ev;
1360 struct ice_aq_task *task;
1363 spin_lock_bh(&pf->aq_wait_lock);
1364 hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1365 if (task->state != ICE_AQ_TASK_WAITING)
1367 if (task->opcode != opcode)
1370 task_ev = &task->event;
1371 memcpy(&task_ev->desc, &event->desc, sizeof(event->desc));
1372 task_ev->msg_len = event->msg_len;
1374 /* Only copy the data buffer if a destination was set */
1375 if (task_ev->msg_buf && task_ev->buf_len >= event->buf_len) {
1376 memcpy(task_ev->msg_buf, event->msg_buf,
1378 task_ev->buf_len = event->buf_len;
1381 task->state = ICE_AQ_TASK_COMPLETE;
1384 spin_unlock_bh(&pf->aq_wait_lock);
1387 wake_up(&pf->aq_wait_queue);
1391 * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1392 * @pf: the PF private structure
1394 * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1395 * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1397 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1399 struct ice_aq_task *task;
1401 spin_lock_bh(&pf->aq_wait_lock);
1402 hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1403 task->state = ICE_AQ_TASK_CANCELED;
1404 spin_unlock_bh(&pf->aq_wait_lock);
1406 wake_up(&pf->aq_wait_queue);
1409 #define ICE_MBX_OVERFLOW_WATERMARK 64
1412 * __ice_clean_ctrlq - helper function to clean controlq rings
1413 * @pf: ptr to struct ice_pf
1414 * @q_type: specific Control queue type
1416 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1418 struct device *dev = ice_pf_to_dev(pf);
1419 struct ice_rq_event_info event;
1420 struct ice_hw *hw = &pf->hw;
1421 struct ice_ctl_q_info *cq;
1426 /* Do not clean control queue if/when PF reset fails */
1427 if (test_bit(ICE_RESET_FAILED, pf->state))
1431 case ICE_CTL_Q_ADMIN:
1439 case ICE_CTL_Q_MAILBOX:
1442 /* we are going to try to detect a malicious VF, so set the
1443 * state to begin detection
1445 hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
1448 dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1452 /* check for error indications - PF_xx_AxQLEN register layout for
1453 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1455 val = rd32(hw, cq->rq.len);
1456 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1457 PF_FW_ARQLEN_ARQCRIT_M)) {
1459 if (val & PF_FW_ARQLEN_ARQVFE_M)
1460 dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1462 if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1463 dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1466 if (val & PF_FW_ARQLEN_ARQCRIT_M)
1467 dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1469 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1470 PF_FW_ARQLEN_ARQCRIT_M);
1472 wr32(hw, cq->rq.len, val);
1475 val = rd32(hw, cq->sq.len);
1476 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1477 PF_FW_ATQLEN_ATQCRIT_M)) {
1479 if (val & PF_FW_ATQLEN_ATQVFE_M)
1480 dev_dbg(dev, "%s Send Queue VF Error detected\n",
1482 if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1483 dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1486 if (val & PF_FW_ATQLEN_ATQCRIT_M)
1487 dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1489 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1490 PF_FW_ATQLEN_ATQCRIT_M);
1492 wr32(hw, cq->sq.len, val);
1495 event.buf_len = cq->rq_buf_size;
1496 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1501 struct ice_mbx_data data = {};
1505 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1506 if (ret == -EALREADY)
1509 dev_err(dev, "%s Receive Queue event error %d\n", qtype,
1514 opcode = le16_to_cpu(event.desc.opcode);
1516 /* Notify any thread that might be waiting for this event */
1517 ice_aq_check_events(pf, opcode, &event);
1520 case ice_aqc_opc_get_link_status:
1521 if (ice_handle_link_event(pf, &event))
1522 dev_err(dev, "Could not handle link event\n");
1524 case ice_aqc_opc_event_lan_overflow:
1525 ice_vf_lan_overflow_event(pf, &event);
1527 case ice_mbx_opc_send_msg_to_pf:
1528 data.num_msg_proc = i;
1529 data.num_pending_arq = pending;
1530 data.max_num_msgs_mbx = hw->mailboxq.num_rq_entries;
1531 data.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
1533 ice_vc_process_vf_msg(pf, &event, &data);
1535 case ice_aqc_opc_fw_logging:
1536 ice_output_fw_log(hw, &event.desc, event.msg_buf);
1538 case ice_aqc_opc_lldp_set_mib_change:
1539 ice_dcb_process_lldp_set_mib_change(pf, &event);
1542 dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1546 } while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1548 kfree(event.msg_buf);
1550 return pending && (i == ICE_DFLT_IRQ_WORK);
1554 * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1555 * @hw: pointer to hardware info
1556 * @cq: control queue information
1558 * returns true if there are pending messages in a queue, false if there aren't
1560 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1564 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1565 return cq->rq.next_to_clean != ntu;
1569 * ice_clean_adminq_subtask - clean the AdminQ rings
1570 * @pf: board private structure
1572 static void ice_clean_adminq_subtask(struct ice_pf *pf)
1574 struct ice_hw *hw = &pf->hw;
1576 if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1579 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1582 clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1584 /* There might be a situation where new messages arrive to a control
1585 * queue between processing the last message and clearing the
1586 * EVENT_PENDING bit. So before exiting, check queue head again (using
1587 * ice_ctrlq_pending) and process new messages if any.
1589 if (ice_ctrlq_pending(hw, &hw->adminq))
1590 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1596 * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1597 * @pf: board private structure
1599 static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1601 struct ice_hw *hw = &pf->hw;
1603 if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1606 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1609 clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1611 if (ice_ctrlq_pending(hw, &hw->mailboxq))
1612 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1618 * ice_clean_sbq_subtask - clean the Sideband Queue rings
1619 * @pf: board private structure
1621 static void ice_clean_sbq_subtask(struct ice_pf *pf)
1623 struct ice_hw *hw = &pf->hw;
1625 /* Nothing to do here if sideband queue is not supported */
1626 if (!ice_is_sbq_supported(hw)) {
1627 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1631 if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
1634 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
1637 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1639 if (ice_ctrlq_pending(hw, &hw->sbq))
1640 __ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
1646 * ice_service_task_schedule - schedule the service task to wake up
1647 * @pf: board private structure
1649 * If not already scheduled, this puts the task into the work queue.
1651 void ice_service_task_schedule(struct ice_pf *pf)
1653 if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
1654 !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
1655 !test_bit(ICE_NEEDS_RESTART, pf->state))
1656 queue_work(ice_wq, &pf->serv_task);
1660 * ice_service_task_complete - finish up the service task
1661 * @pf: board private structure
1663 static void ice_service_task_complete(struct ice_pf *pf)
1665 WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1667 /* force memory (pf->state) to sync before next service task */
1668 smp_mb__before_atomic();
1669 clear_bit(ICE_SERVICE_SCHED, pf->state);
1673 * ice_service_task_stop - stop service task and cancel works
1674 * @pf: board private structure
1676 * Return 0 if the ICE_SERVICE_DIS bit was not already set,
1679 static int ice_service_task_stop(struct ice_pf *pf)
1683 ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
1685 if (pf->serv_tmr.function)
1686 del_timer_sync(&pf->serv_tmr);
1687 if (pf->serv_task.func)
1688 cancel_work_sync(&pf->serv_task);
1690 clear_bit(ICE_SERVICE_SCHED, pf->state);
1695 * ice_service_task_restart - restart service task and schedule works
1696 * @pf: board private structure
1698 * This function is needed for suspend and resume works (e.g WoL scenario)
1700 static void ice_service_task_restart(struct ice_pf *pf)
1702 clear_bit(ICE_SERVICE_DIS, pf->state);
1703 ice_service_task_schedule(pf);
1707 * ice_service_timer - timer callback to schedule service task
1708 * @t: pointer to timer_list
1710 static void ice_service_timer(struct timer_list *t)
1712 struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1714 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1715 ice_service_task_schedule(pf);
1719 * ice_handle_mdd_event - handle malicious driver detect event
1720 * @pf: pointer to the PF structure
1722 * Called from service task. OICR interrupt handler indicates MDD event.
1723 * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1724 * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1725 * disable the queue, the PF can be configured to reset the VF using ethtool
1726 * private flag mdd-auto-reset-vf.
1728 static void ice_handle_mdd_event(struct ice_pf *pf)
1730 struct device *dev = ice_pf_to_dev(pf);
1731 struct ice_hw *hw = &pf->hw;
1736 if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
1737 /* Since the VF MDD event logging is rate limited, check if
1738 * there are pending MDD events.
1740 ice_print_vfs_mdd_events(pf);
1744 /* find what triggered an MDD event */
1745 reg = rd32(hw, GL_MDET_TX_PQM);
1746 if (reg & GL_MDET_TX_PQM_VALID_M) {
1747 u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1748 GL_MDET_TX_PQM_PF_NUM_S;
1749 u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1750 GL_MDET_TX_PQM_VF_NUM_S;
1751 u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1752 GL_MDET_TX_PQM_MAL_TYPE_S;
1753 u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1754 GL_MDET_TX_PQM_QNUM_S);
1756 if (netif_msg_tx_err(pf))
1757 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1758 event, queue, pf_num, vf_num);
1759 wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1762 reg = rd32(hw, GL_MDET_TX_TCLAN);
1763 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1764 u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1765 GL_MDET_TX_TCLAN_PF_NUM_S;
1766 u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1767 GL_MDET_TX_TCLAN_VF_NUM_S;
1768 u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1769 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1770 u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1771 GL_MDET_TX_TCLAN_QNUM_S);
1773 if (netif_msg_tx_err(pf))
1774 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1775 event, queue, pf_num, vf_num);
1776 wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1779 reg = rd32(hw, GL_MDET_RX);
1780 if (reg & GL_MDET_RX_VALID_M) {
1781 u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1782 GL_MDET_RX_PF_NUM_S;
1783 u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1784 GL_MDET_RX_VF_NUM_S;
1785 u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1786 GL_MDET_RX_MAL_TYPE_S;
1787 u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1790 if (netif_msg_rx_err(pf))
1791 dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1792 event, queue, pf_num, vf_num);
1793 wr32(hw, GL_MDET_RX, 0xffffffff);
1796 /* check to see if this PF caused an MDD event */
1797 reg = rd32(hw, PF_MDET_TX_PQM);
1798 if (reg & PF_MDET_TX_PQM_VALID_M) {
1799 wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1800 if (netif_msg_tx_err(pf))
1801 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1804 reg = rd32(hw, PF_MDET_TX_TCLAN);
1805 if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1806 wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
1807 if (netif_msg_tx_err(pf))
1808 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1811 reg = rd32(hw, PF_MDET_RX);
1812 if (reg & PF_MDET_RX_VALID_M) {
1813 wr32(hw, PF_MDET_RX, 0xFFFF);
1814 if (netif_msg_rx_err(pf))
1815 dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1818 /* Check to see if one of the VFs caused an MDD event, and then
1819 * increment counters and set print pending
1821 mutex_lock(&pf->vfs.table_lock);
1822 ice_for_each_vf(pf, bkt, vf) {
1823 reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id));
1824 if (reg & VP_MDET_TX_PQM_VALID_M) {
1825 wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF);
1826 vf->mdd_tx_events.count++;
1827 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1828 if (netif_msg_tx_err(pf))
1829 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1833 reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id));
1834 if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1835 wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF);
1836 vf->mdd_tx_events.count++;
1837 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1838 if (netif_msg_tx_err(pf))
1839 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1843 reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id));
1844 if (reg & VP_MDET_TX_TDPU_VALID_M) {
1845 wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF);
1846 vf->mdd_tx_events.count++;
1847 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1848 if (netif_msg_tx_err(pf))
1849 dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1853 reg = rd32(hw, VP_MDET_RX(vf->vf_id));
1854 if (reg & VP_MDET_RX_VALID_M) {
1855 wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF);
1856 vf->mdd_rx_events.count++;
1857 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1858 if (netif_msg_rx_err(pf))
1859 dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1862 /* Since the queue is disabled on VF Rx MDD events, the
1863 * PF can be configured to reset the VF through ethtool
1864 * private flag mdd-auto-reset-vf.
1866 if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
1867 /* VF MDD event counters will be cleared by
1868 * reset, so print the event prior to reset.
1870 ice_print_vf_rx_mdd_event(vf);
1871 ice_reset_vf(vf, ICE_VF_RESET_LOCK);
1875 mutex_unlock(&pf->vfs.table_lock);
1877 ice_print_vfs_mdd_events(pf);
1881 * ice_force_phys_link_state - Force the physical link state
1882 * @vsi: VSI to force the physical link state to up/down
1883 * @link_up: true/false indicates to set the physical link to up/down
1885 * Force the physical link state by getting the current PHY capabilities from
1886 * hardware and setting the PHY config based on the determined capabilities. If
1887 * link changes a link event will be triggered because both the Enable Automatic
1888 * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1890 * Returns 0 on success, negative on failure
1892 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1894 struct ice_aqc_get_phy_caps_data *pcaps;
1895 struct ice_aqc_set_phy_cfg_data *cfg;
1896 struct ice_port_info *pi;
1900 if (!vsi || !vsi->port_info || !vsi->back)
1902 if (vsi->type != ICE_VSI_PF)
1905 dev = ice_pf_to_dev(vsi->back);
1907 pi = vsi->port_info;
1909 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1913 retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1916 dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1917 vsi->vsi_num, retcode);
1922 /* No change in link */
1923 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1924 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1927 /* Use the current user PHY configuration. The current user PHY
1928 * configuration is initialized during probe from PHY capabilities
1929 * software mode, and updated on set PHY configuration.
1931 cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1937 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1939 cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1941 cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1943 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1945 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
1946 vsi->vsi_num, retcode);
1957 * ice_init_nvm_phy_type - Initialize the NVM PHY type
1958 * @pi: port info structure
1960 * Initialize nvm_phy_type_[low|high] for link lenient mode support
1962 static int ice_init_nvm_phy_type(struct ice_port_info *pi)
1964 struct ice_aqc_get_phy_caps_data *pcaps;
1965 struct ice_pf *pf = pi->hw->back;
1968 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1972 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA,
1976 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1980 pf->nvm_phy_type_hi = pcaps->phy_type_high;
1981 pf->nvm_phy_type_lo = pcaps->phy_type_low;
1989 * ice_init_link_dflt_override - Initialize link default override
1990 * @pi: port info structure
1992 * Initialize link default override and PHY total port shutdown during probe
1994 static void ice_init_link_dflt_override(struct ice_port_info *pi)
1996 struct ice_link_default_override_tlv *ldo;
1997 struct ice_pf *pf = pi->hw->back;
1999 ldo = &pf->link_dflt_override;
2000 if (ice_get_link_default_override(ldo, pi))
2003 if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
2006 /* Enable Total Port Shutdown (override/replace link-down-on-close
2007 * ethtool private flag) for ports with Port Disable bit set.
2009 set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
2010 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
2014 * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
2015 * @pi: port info structure
2017 * If default override is enabled, initialize the user PHY cfg speed and FEC
2018 * settings using the default override mask from the NVM.
2020 * The PHY should only be configured with the default override settings the
2021 * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
2022 * is used to indicate that the user PHY cfg default override is initialized
2023 * and the PHY has not been configured with the default override settings. The
2024 * state is set here, and cleared in ice_configure_phy the first time the PHY is
2027 * This function should be called only if the FW doesn't support default
2028 * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
2030 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
2032 struct ice_link_default_override_tlv *ldo;
2033 struct ice_aqc_set_phy_cfg_data *cfg;
2034 struct ice_phy_info *phy = &pi->phy;
2035 struct ice_pf *pf = pi->hw->back;
2037 ldo = &pf->link_dflt_override;
2039 /* If link default override is enabled, use to mask NVM PHY capabilities
2040 * for speed and FEC default configuration.
2042 cfg = &phy->curr_user_phy_cfg;
2044 if (ldo->phy_type_low || ldo->phy_type_high) {
2045 cfg->phy_type_low = pf->nvm_phy_type_lo &
2046 cpu_to_le64(ldo->phy_type_low);
2047 cfg->phy_type_high = pf->nvm_phy_type_hi &
2048 cpu_to_le64(ldo->phy_type_high);
2050 cfg->link_fec_opt = ldo->fec_options;
2051 phy->curr_user_fec_req = ICE_FEC_AUTO;
2053 set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
2057 * ice_init_phy_user_cfg - Initialize the PHY user configuration
2058 * @pi: port info structure
2060 * Initialize the current user PHY configuration, speed, FEC, and FC requested
2061 * mode to default. The PHY defaults are from get PHY capabilities topology
2062 * with media so call when media is first available. An error is returned if
2063 * called when media is not available. The PHY initialization completed state is
2066 * These configurations are used when setting PHY
2067 * configuration. The user PHY configuration is updated on set PHY
2068 * configuration. Returns 0 on success, negative on failure
2070 static int ice_init_phy_user_cfg(struct ice_port_info *pi)
2072 struct ice_aqc_get_phy_caps_data *pcaps;
2073 struct ice_phy_info *phy = &pi->phy;
2074 struct ice_pf *pf = pi->hw->back;
2077 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2080 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2084 if (ice_fw_supports_report_dflt_cfg(pi->hw))
2085 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2088 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2091 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
2095 ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
2097 /* check if lenient mode is supported and enabled */
2098 if (ice_fw_supports_link_override(pi->hw) &&
2099 !(pcaps->module_compliance_enforcement &
2100 ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
2101 set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
2103 /* if the FW supports default PHY configuration mode, then the driver
2104 * does not have to apply link override settings. If not,
2105 * initialize user PHY configuration with link override values
2107 if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
2108 (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
2109 ice_init_phy_cfg_dflt_override(pi);
2114 /* if link default override is not enabled, set user flow control and
2115 * FEC settings based on what get_phy_caps returned
2117 phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
2118 pcaps->link_fec_options);
2119 phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
2122 phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
2123 set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
2130 * ice_configure_phy - configure PHY
2133 * Set the PHY configuration. If the current PHY configuration is the same as
2134 * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
2135 * configure the based get PHY capabilities for topology with media.
2137 static int ice_configure_phy(struct ice_vsi *vsi)
2139 struct device *dev = ice_pf_to_dev(vsi->back);
2140 struct ice_port_info *pi = vsi->port_info;
2141 struct ice_aqc_get_phy_caps_data *pcaps;
2142 struct ice_aqc_set_phy_cfg_data *cfg;
2143 struct ice_phy_info *phy = &pi->phy;
2144 struct ice_pf *pf = vsi->back;
2147 /* Ensure we have media as we cannot configure a medialess port */
2148 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2151 ice_print_topo_conflict(vsi);
2153 if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) &&
2154 phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
2157 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
2158 return ice_force_phys_link_state(vsi, true);
2160 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2164 /* Get current PHY config */
2165 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
2168 dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n",
2173 /* If PHY enable link is configured and configuration has not changed,
2174 * there's nothing to do
2176 if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
2177 ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
2180 /* Use PHY topology as baseline for configuration */
2181 memset(pcaps, 0, sizeof(*pcaps));
2182 if (ice_fw_supports_report_dflt_cfg(pi->hw))
2183 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2186 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2189 dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n",
2194 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
2200 ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
2202 /* Speed - If default override pending, use curr_user_phy_cfg set in
2203 * ice_init_phy_user_cfg_ldo.
2205 if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
2206 vsi->back->state)) {
2207 cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
2208 cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
2210 u64 phy_low = 0, phy_high = 0;
2212 ice_update_phy_type(&phy_low, &phy_high,
2213 pi->phy.curr_user_speed_req);
2214 cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
2215 cfg->phy_type_high = pcaps->phy_type_high &
2216 cpu_to_le64(phy_high);
2219 /* Can't provide what was requested; use PHY capabilities */
2220 if (!cfg->phy_type_low && !cfg->phy_type_high) {
2221 cfg->phy_type_low = pcaps->phy_type_low;
2222 cfg->phy_type_high = pcaps->phy_type_high;
2226 ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
2228 /* Can't provide what was requested; use PHY capabilities */
2229 if (cfg->link_fec_opt !=
2230 (cfg->link_fec_opt & pcaps->link_fec_options)) {
2231 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2232 cfg->link_fec_opt = pcaps->link_fec_options;
2235 /* Flow Control - always supported; no need to check against
2238 ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
2240 /* Enable link and link update */
2241 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
2243 err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
2245 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
2255 * ice_check_media_subtask - Check for media
2256 * @pf: pointer to PF struct
2258 * If media is available, then initialize PHY user configuration if it is not
2259 * been, and configure the PHY if the interface is up.
2261 static void ice_check_media_subtask(struct ice_pf *pf)
2263 struct ice_port_info *pi;
2264 struct ice_vsi *vsi;
2267 /* No need to check for media if it's already present */
2268 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
2271 vsi = ice_get_main_vsi(pf);
2275 /* Refresh link info and check if media is present */
2276 pi = vsi->port_info;
2277 err = ice_update_link_info(pi);
2281 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
2283 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2284 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
2285 ice_init_phy_user_cfg(pi);
2287 /* PHY settings are reset on media insertion, reconfigure
2288 * PHY to preserve settings.
2290 if (test_bit(ICE_VSI_DOWN, vsi->state) &&
2291 test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2294 err = ice_configure_phy(vsi);
2296 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2298 /* A Link Status Event will be generated; the event handler
2299 * will complete bringing the interface up
2305 * ice_service_task - manage and run subtasks
2306 * @work: pointer to work_struct contained by the PF struct
2308 static void ice_service_task(struct work_struct *work)
2310 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2311 unsigned long start_time = jiffies;
2315 /* process reset requests first */
2316 ice_reset_subtask(pf);
2318 /* bail if a reset/recovery cycle is pending or rebuild failed */
2319 if (ice_is_reset_in_progress(pf->state) ||
2320 test_bit(ICE_SUSPENDED, pf->state) ||
2321 test_bit(ICE_NEEDS_RESTART, pf->state)) {
2322 ice_service_task_complete(pf);
2326 if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
2327 struct iidc_event *event;
2329 event = kzalloc(sizeof(*event), GFP_KERNEL);
2331 set_bit(IIDC_EVENT_CRIT_ERR, event->type);
2332 /* report the entire OICR value to AUX driver */
2333 swap(event->reg, pf->oicr_err_reg);
2334 ice_send_event_to_aux(pf, event);
2339 /* unplug aux dev per request, if an unplug request came in
2340 * while processing a plug request, this will handle it
2342 if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags))
2343 ice_unplug_aux_dev(pf);
2345 /* Plug aux device per request */
2346 if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
2347 ice_plug_aux_dev(pf);
2349 if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
2350 struct iidc_event *event;
2352 event = kzalloc(sizeof(*event), GFP_KERNEL);
2354 set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
2355 ice_send_event_to_aux(pf, event);
2360 ice_clean_adminq_subtask(pf);
2361 ice_check_media_subtask(pf);
2362 ice_check_for_hang_subtask(pf);
2363 ice_sync_fltr_subtask(pf);
2364 ice_handle_mdd_event(pf);
2365 ice_watchdog_subtask(pf);
2367 if (ice_is_safe_mode(pf)) {
2368 ice_service_task_complete(pf);
2372 ice_process_vflr_event(pf);
2373 ice_clean_mailboxq_subtask(pf);
2374 ice_clean_sbq_subtask(pf);
2375 ice_sync_arfs_fltrs(pf);
2376 ice_flush_fdir_ctx(pf);
2378 /* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2379 ice_service_task_complete(pf);
2381 /* If the tasks have taken longer than one service timer period
2382 * or there is more work to be done, reset the service timer to
2383 * schedule the service task now.
2385 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2386 test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
2387 test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2388 test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2389 test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
2390 test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
2391 test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2392 mod_timer(&pf->serv_tmr, jiffies);
2396 * ice_set_ctrlq_len - helper function to set controlq length
2397 * @hw: pointer to the HW instance
2399 static void ice_set_ctrlq_len(struct ice_hw *hw)
2401 hw->adminq.num_rq_entries = ICE_AQ_LEN;
2402 hw->adminq.num_sq_entries = ICE_AQ_LEN;
2403 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2404 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2405 hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2406 hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2407 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2408 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2409 hw->sbq.num_rq_entries = ICE_SBQ_LEN;
2410 hw->sbq.num_sq_entries = ICE_SBQ_LEN;
2411 hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2412 hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2416 * ice_schedule_reset - schedule a reset
2417 * @pf: board private structure
2418 * @reset: reset being requested
2420 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2422 struct device *dev = ice_pf_to_dev(pf);
2424 /* bail out if earlier reset has failed */
2425 if (test_bit(ICE_RESET_FAILED, pf->state)) {
2426 dev_dbg(dev, "earlier reset has failed\n");
2429 /* bail if reset/recovery already in progress */
2430 if (ice_is_reset_in_progress(pf->state)) {
2431 dev_dbg(dev, "Reset already in progress\n");
2437 set_bit(ICE_PFR_REQ, pf->state);
2439 case ICE_RESET_CORER:
2440 set_bit(ICE_CORER_REQ, pf->state);
2442 case ICE_RESET_GLOBR:
2443 set_bit(ICE_GLOBR_REQ, pf->state);
2449 ice_service_task_schedule(pf);
2454 * ice_irq_affinity_notify - Callback for affinity changes
2455 * @notify: context as to what irq was changed
2456 * @mask: the new affinity mask
2458 * This is a callback function used by the irq_set_affinity_notifier function
2459 * so that we may register to receive changes to the irq affinity masks.
2462 ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2463 const cpumask_t *mask)
2465 struct ice_q_vector *q_vector =
2466 container_of(notify, struct ice_q_vector, affinity_notify);
2468 cpumask_copy(&q_vector->affinity_mask, mask);
2472 * ice_irq_affinity_release - Callback for affinity notifier release
2473 * @ref: internal core kernel usage
2475 * This is a callback function used by the irq_set_affinity_notifier function
2476 * to inform the current notification subscriber that they will no longer
2477 * receive notifications.
2479 static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2482 * ice_vsi_ena_irq - Enable IRQ for the given VSI
2483 * @vsi: the VSI being configured
2485 static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2487 struct ice_hw *hw = &vsi->back->hw;
2490 ice_for_each_q_vector(vsi, i)
2491 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2498 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2499 * @vsi: the VSI being configured
2500 * @basename: name for the vector
2502 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2504 int q_vectors = vsi->num_q_vectors;
2505 struct ice_pf *pf = vsi->back;
2512 dev = ice_pf_to_dev(pf);
2513 for (vector = 0; vector < q_vectors; vector++) {
2514 struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2516 irq_num = q_vector->irq.virq;
2518 if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) {
2519 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2520 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2522 } else if (q_vector->rx.rx_ring) {
2523 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2524 "%s-%s-%d", basename, "rx", rx_int_idx++);
2525 } else if (q_vector->tx.tx_ring) {
2526 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2527 "%s-%s-%d", basename, "tx", tx_int_idx++);
2529 /* skip this unused q_vector */
2532 if (vsi->type == ICE_VSI_CTRL && vsi->vf)
2533 err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2534 IRQF_SHARED, q_vector->name,
2537 err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2538 0, q_vector->name, q_vector);
2540 netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2545 /* register for affinity change notifications */
2546 if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2547 struct irq_affinity_notify *affinity_notify;
2549 affinity_notify = &q_vector->affinity_notify;
2550 affinity_notify->notify = ice_irq_affinity_notify;
2551 affinity_notify->release = ice_irq_affinity_release;
2552 irq_set_affinity_notifier(irq_num, affinity_notify);
2555 /* assign the mask for this irq */
2556 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
2559 err = ice_set_cpu_rx_rmap(vsi);
2561 netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n",
2562 vsi->vsi_num, ERR_PTR(err));
2566 vsi->irqs_ready = true;
2571 irq_num = vsi->q_vectors[vector]->irq.virq;
2572 if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2573 irq_set_affinity_notifier(irq_num, NULL);
2574 irq_set_affinity_hint(irq_num, NULL);
2575 devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2581 * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2582 * @vsi: VSI to setup Tx rings used by XDP
2584 * Return 0 on success and negative value on error
2586 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2588 struct device *dev = ice_pf_to_dev(vsi->back);
2589 struct ice_tx_desc *tx_desc;
2592 ice_for_each_xdp_txq(vsi, i) {
2593 u16 xdp_q_idx = vsi->alloc_txq + i;
2594 struct ice_ring_stats *ring_stats;
2595 struct ice_tx_ring *xdp_ring;
2597 xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2599 goto free_xdp_rings;
2601 ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
2603 ice_free_tx_ring(xdp_ring);
2604 goto free_xdp_rings;
2607 xdp_ring->ring_stats = ring_stats;
2608 xdp_ring->q_index = xdp_q_idx;
2609 xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2610 xdp_ring->vsi = vsi;
2611 xdp_ring->netdev = NULL;
2612 xdp_ring->dev = dev;
2613 xdp_ring->count = vsi->num_tx_desc;
2614 WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2615 if (ice_setup_tx_ring(xdp_ring))
2616 goto free_xdp_rings;
2617 ice_set_ring_xdp(xdp_ring);
2618 spin_lock_init(&xdp_ring->tx_lock);
2619 for (j = 0; j < xdp_ring->count; j++) {
2620 tx_desc = ICE_TX_DESC(xdp_ring, j);
2621 tx_desc->cmd_type_offset_bsz = 0;
2628 for (; i >= 0; i--) {
2629 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) {
2630 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2631 vsi->xdp_rings[i]->ring_stats = NULL;
2632 ice_free_tx_ring(vsi->xdp_rings[i]);
2639 * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2640 * @vsi: VSI to set the bpf prog on
2641 * @prog: the bpf prog pointer
2643 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2645 struct bpf_prog *old_prog;
2648 old_prog = xchg(&vsi->xdp_prog, prog);
2649 ice_for_each_rxq(vsi, i)
2650 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2653 bpf_prog_put(old_prog);
2657 * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2658 * @vsi: VSI to bring up Tx rings used by XDP
2659 * @prog: bpf program that will be assigned to VSI
2661 * Return 0 on success and negative value on error
2663 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
2665 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2666 int xdp_rings_rem = vsi->num_xdp_txq;
2667 struct ice_pf *pf = vsi->back;
2668 struct ice_qs_cfg xdp_qs_cfg = {
2669 .qs_mutex = &pf->avail_q_mutex,
2670 .pf_map = pf->avail_txqs,
2671 .pf_map_size = pf->max_pf_txqs,
2672 .q_count = vsi->num_xdp_txq,
2673 .scatter_count = ICE_MAX_SCATTER_TXQS,
2674 .vsi_map = vsi->txq_map,
2675 .vsi_map_offset = vsi->alloc_txq,
2676 .mapping_mode = ICE_VSI_MAP_CONTIG
2682 dev = ice_pf_to_dev(pf);
2683 vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2684 sizeof(*vsi->xdp_rings), GFP_KERNEL);
2685 if (!vsi->xdp_rings)
2688 vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2689 if (__ice_vsi_get_qs(&xdp_qs_cfg))
2692 if (static_key_enabled(&ice_xdp_locking_key))
2693 netdev_warn(vsi->netdev,
2694 "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
2696 if (ice_xdp_alloc_setup_rings(vsi))
2697 goto clear_xdp_rings;
2699 /* follow the logic from ice_vsi_map_rings_to_vectors */
2700 ice_for_each_q_vector(vsi, v_idx) {
2701 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2702 int xdp_rings_per_v, q_id, q_base;
2704 xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2705 vsi->num_q_vectors - v_idx);
2706 q_base = vsi->num_xdp_txq - xdp_rings_rem;
2708 for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2709 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
2711 xdp_ring->q_vector = q_vector;
2712 xdp_ring->next = q_vector->tx.tx_ring;
2713 q_vector->tx.tx_ring = xdp_ring;
2715 xdp_rings_rem -= xdp_rings_per_v;
2718 ice_for_each_rxq(vsi, i) {
2719 if (static_key_enabled(&ice_xdp_locking_key)) {
2720 vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
2722 struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
2723 struct ice_tx_ring *ring;
2725 ice_for_each_tx_ring(ring, q_vector->tx) {
2726 if (ice_ring_is_xdp(ring)) {
2727 vsi->rx_rings[i]->xdp_ring = ring;
2732 ice_tx_xsk_pool(vsi, i);
2735 /* omit the scheduler update if in reset path; XDP queues will be
2736 * taken into account at the end of ice_vsi_rebuild, where
2737 * ice_cfg_vsi_lan is being called
2739 if (ice_is_reset_in_progress(pf->state))
2742 /* tell the Tx scheduler that right now we have
2745 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2746 max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2748 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2751 dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
2753 goto clear_xdp_rings;
2756 /* assign the prog only when it's not already present on VSI;
2757 * this flow is a subject of both ethtool -L and ndo_bpf flows;
2758 * VSI rebuild that happens under ethtool -L can expose us to
2759 * the bpf_prog refcount issues as we would be swapping same
2760 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put
2761 * on it as it would be treated as an 'old_prog'; for ndo_bpf
2762 * this is not harmful as dev_xdp_install bumps the refcount
2763 * before calling the op exposed by the driver;
2765 if (!ice_is_xdp_ena_vsi(vsi))
2766 ice_vsi_assign_bpf_prog(vsi, prog);
2770 ice_for_each_xdp_txq(vsi, i)
2771 if (vsi->xdp_rings[i]) {
2772 kfree_rcu(vsi->xdp_rings[i], rcu);
2773 vsi->xdp_rings[i] = NULL;
2777 mutex_lock(&pf->avail_q_mutex);
2778 ice_for_each_xdp_txq(vsi, i) {
2779 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2780 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2782 mutex_unlock(&pf->avail_q_mutex);
2784 devm_kfree(dev, vsi->xdp_rings);
2789 * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2790 * @vsi: VSI to remove XDP rings
2792 * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2795 int ice_destroy_xdp_rings(struct ice_vsi *vsi)
2797 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2798 struct ice_pf *pf = vsi->back;
2801 /* q_vectors are freed in reset path so there's no point in detaching
2802 * rings; in case of rebuild being triggered not from reset bits
2803 * in pf->state won't be set, so additionally check first q_vector
2806 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2809 ice_for_each_q_vector(vsi, v_idx) {
2810 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2811 struct ice_tx_ring *ring;
2813 ice_for_each_tx_ring(ring, q_vector->tx)
2814 if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2817 /* restore the value of last node prior to XDP setup */
2818 q_vector->tx.tx_ring = ring;
2822 mutex_lock(&pf->avail_q_mutex);
2823 ice_for_each_xdp_txq(vsi, i) {
2824 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2825 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2827 mutex_unlock(&pf->avail_q_mutex);
2829 ice_for_each_xdp_txq(vsi, i)
2830 if (vsi->xdp_rings[i]) {
2831 if (vsi->xdp_rings[i]->desc) {
2833 ice_free_tx_ring(vsi->xdp_rings[i]);
2835 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2836 vsi->xdp_rings[i]->ring_stats = NULL;
2837 kfree_rcu(vsi->xdp_rings[i], rcu);
2838 vsi->xdp_rings[i] = NULL;
2841 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2842 vsi->xdp_rings = NULL;
2844 if (static_key_enabled(&ice_xdp_locking_key))
2845 static_branch_dec(&ice_xdp_locking_key);
2847 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2850 ice_vsi_assign_bpf_prog(vsi, NULL);
2852 /* notify Tx scheduler that we destroyed XDP queues and bring
2853 * back the old number of child nodes
2855 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2856 max_txqs[i] = vsi->num_txq;
2858 /* change number of XDP Tx queues to 0 */
2859 vsi->num_xdp_txq = 0;
2861 return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2866 * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2867 * @vsi: VSI to schedule napi on
2869 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2873 ice_for_each_rxq(vsi, i) {
2874 struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
2876 if (rx_ring->xsk_pool)
2877 napi_schedule(&rx_ring->q_vector->napi);
2882 * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
2883 * @vsi: VSI to determine the count of XDP Tx qs
2885 * returns 0 if Tx qs count is higher than at least half of CPU count,
2888 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
2890 u16 avail = ice_get_avail_txq_count(vsi->back);
2891 u16 cpus = num_possible_cpus();
2893 if (avail < cpus / 2)
2896 vsi->num_xdp_txq = min_t(u16, avail, cpus);
2898 if (vsi->num_xdp_txq < cpus)
2899 static_branch_inc(&ice_xdp_locking_key);
2905 * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2906 * @vsi: Pointer to VSI structure
2908 static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
2910 if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
2911 return ICE_RXBUF_1664;
2913 return ICE_RXBUF_3072;
2917 * ice_xdp_setup_prog - Add or remove XDP eBPF program
2918 * @vsi: VSI to setup XDP for
2919 * @prog: XDP program
2920 * @extack: netlink extended ack
2923 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2924 struct netlink_ext_ack *extack)
2926 unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
2927 bool if_running = netif_running(vsi->netdev);
2928 int ret = 0, xdp_ring_err = 0;
2930 if (prog && !prog->aux->xdp_has_frags) {
2931 if (frame_size > ice_max_xdp_frame_size(vsi)) {
2932 NL_SET_ERR_MSG_MOD(extack,
2933 "MTU is too large for linear frames and XDP prog does not support frags");
2938 /* hot swap progs and avoid toggling link */
2939 if (ice_is_xdp_ena_vsi(vsi) == !!prog) {
2940 ice_vsi_assign_bpf_prog(vsi, prog);
2944 /* need to stop netdev while setting up the program for Rx rings */
2945 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
2946 ret = ice_down(vsi);
2948 NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
2953 if (!ice_is_xdp_ena_vsi(vsi) && prog) {
2954 xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
2956 NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
2958 xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
2960 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
2962 xdp_features_set_redirect_target(vsi->netdev, true);
2963 /* reallocate Rx queues that are used for zero-copy */
2964 xdp_ring_err = ice_realloc_zc_buf(vsi, true);
2966 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
2967 } else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
2968 xdp_features_clear_redirect_target(vsi->netdev);
2969 xdp_ring_err = ice_destroy_xdp_rings(vsi);
2971 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
2972 /* reallocate Rx queues that were used for zero-copy */
2973 xdp_ring_err = ice_realloc_zc_buf(vsi, false);
2975 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
2982 ice_vsi_rx_napi_schedule(vsi);
2984 return (ret || xdp_ring_err) ? -ENOMEM : 0;
2988 * ice_xdp_safe_mode - XDP handler for safe mode
2992 static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
2993 struct netdev_bpf *xdp)
2995 NL_SET_ERR_MSG_MOD(xdp->extack,
2996 "Please provide working DDP firmware package in order to use XDP\n"
2997 "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
3002 * ice_xdp - implements XDP handler
3006 static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3008 struct ice_netdev_priv *np = netdev_priv(dev);
3009 struct ice_vsi *vsi = np->vsi;
3011 if (vsi->type != ICE_VSI_PF) {
3012 NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
3016 switch (xdp->command) {
3017 case XDP_SETUP_PROG:
3018 return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
3019 case XDP_SETUP_XSK_POOL:
3020 return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
3028 * ice_ena_misc_vector - enable the non-queue interrupts
3029 * @pf: board private structure
3031 static void ice_ena_misc_vector(struct ice_pf *pf)
3033 struct ice_hw *hw = &pf->hw;
3036 /* Disable anti-spoof detection interrupt to prevent spurious event
3037 * interrupts during a function reset. Anti-spoof functionally is
3040 val = rd32(hw, GL_MDCK_TX_TDPU);
3041 val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
3042 wr32(hw, GL_MDCK_TX_TDPU, val);
3044 /* clear things first */
3045 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
3046 rd32(hw, PFINT_OICR); /* read to clear */
3048 val = (PFINT_OICR_ECC_ERR_M |
3049 PFINT_OICR_MAL_DETECT_M |
3051 PFINT_OICR_PCI_EXCEPTION_M |
3053 PFINT_OICR_HMC_ERR_M |
3054 PFINT_OICR_PE_PUSH_M |
3055 PFINT_OICR_PE_CRITERR_M);
3057 wr32(hw, PFINT_OICR_ENA, val);
3059 /* SW_ITR_IDX = 0, but don't change INTENA */
3060 wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
3061 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
3065 * ice_misc_intr - misc interrupt handler
3066 * @irq: interrupt number
3067 * @data: pointer to a q_vector
3069 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
3071 struct ice_pf *pf = (struct ice_pf *)data;
3072 struct ice_hw *hw = &pf->hw;
3076 dev = ice_pf_to_dev(pf);
3077 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
3078 set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
3079 set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
3081 oicr = rd32(hw, PFINT_OICR);
3082 ena_mask = rd32(hw, PFINT_OICR_ENA);
3084 if (oicr & PFINT_OICR_SWINT_M) {
3085 ena_mask &= ~PFINT_OICR_SWINT_M;
3089 if (oicr & PFINT_OICR_MAL_DETECT_M) {
3090 ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
3091 set_bit(ICE_MDD_EVENT_PENDING, pf->state);
3093 if (oicr & PFINT_OICR_VFLR_M) {
3094 /* disable any further VFLR event notifications */
3095 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
3096 u32 reg = rd32(hw, PFINT_OICR_ENA);
3098 reg &= ~PFINT_OICR_VFLR_M;
3099 wr32(hw, PFINT_OICR_ENA, reg);
3101 ena_mask &= ~PFINT_OICR_VFLR_M;
3102 set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
3106 if (oicr & PFINT_OICR_GRST_M) {
3109 /* we have a reset warning */
3110 ena_mask &= ~PFINT_OICR_GRST_M;
3111 reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
3112 GLGEN_RSTAT_RESET_TYPE_S;
3114 if (reset == ICE_RESET_CORER)
3116 else if (reset == ICE_RESET_GLOBR)
3118 else if (reset == ICE_RESET_EMPR)
3121 dev_dbg(dev, "Invalid reset type %d\n", reset);
3123 /* If a reset cycle isn't already in progress, we set a bit in
3124 * pf->state so that the service task can start a reset/rebuild.
3126 if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
3127 if (reset == ICE_RESET_CORER)
3128 set_bit(ICE_CORER_RECV, pf->state);
3129 else if (reset == ICE_RESET_GLOBR)
3130 set_bit(ICE_GLOBR_RECV, pf->state);
3132 set_bit(ICE_EMPR_RECV, pf->state);
3134 /* There are couple of different bits at play here.
3135 * hw->reset_ongoing indicates whether the hardware is
3136 * in reset. This is set to true when a reset interrupt
3137 * is received and set back to false after the driver
3138 * has determined that the hardware is out of reset.
3140 * ICE_RESET_OICR_RECV in pf->state indicates
3141 * that a post reset rebuild is required before the
3142 * driver is operational again. This is set above.
3144 * As this is the start of the reset/rebuild cycle, set
3145 * both to indicate that.
3147 hw->reset_ongoing = true;
3151 if (oicr & PFINT_OICR_TSYN_TX_M) {
3152 ena_mask &= ~PFINT_OICR_TSYN_TX_M;
3153 if (!hw->reset_ongoing)
3154 set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
3157 if (oicr & PFINT_OICR_TSYN_EVNT_M) {
3158 u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3159 u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
3161 ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
3163 if (hw->func_caps.ts_func_info.src_tmr_owned) {
3164 /* Save EVENTs from GLTSYN register */
3165 pf->ptp.ext_ts_irq |= gltsyn_stat &
3166 (GLTSYN_STAT_EVENT0_M |
3167 GLTSYN_STAT_EVENT1_M |
3168 GLTSYN_STAT_EVENT2_M);
3170 set_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread);
3174 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
3175 if (oicr & ICE_AUX_CRIT_ERR) {
3176 pf->oicr_err_reg |= oicr;
3177 set_bit(ICE_AUX_ERR_PENDING, pf->state);
3178 ena_mask &= ~ICE_AUX_CRIT_ERR;
3181 /* Report any remaining unexpected interrupts */
3184 dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
3185 /* If a critical error is pending there is no choice but to
3188 if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
3189 PFINT_OICR_ECC_ERR_M)) {
3190 set_bit(ICE_PFR_REQ, pf->state);
3194 return IRQ_WAKE_THREAD;
3198 * ice_misc_intr_thread_fn - misc interrupt thread function
3199 * @irq: interrupt number
3200 * @data: pointer to a q_vector
3202 static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
3204 struct ice_pf *pf = data;
3209 if (ice_is_reset_in_progress(pf->state))
3212 ice_service_task_schedule(pf);
3214 if (test_and_clear_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread))
3215 ice_ptp_extts_event(pf);
3217 if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) {
3218 /* Process outstanding Tx timestamps. If there is more work,
3219 * re-arm the interrupt to trigger again.
3221 if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) {
3222 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
3227 ice_irq_dynamic_ena(hw, NULL, NULL);
3233 * ice_dis_ctrlq_interrupts - disable control queue interrupts
3234 * @hw: pointer to HW structure
3236 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
3238 /* disable Admin queue Interrupt causes */
3239 wr32(hw, PFINT_FW_CTL,
3240 rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
3242 /* disable Mailbox queue Interrupt causes */
3243 wr32(hw, PFINT_MBX_CTL,
3244 rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
3246 wr32(hw, PFINT_SB_CTL,
3247 rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
3249 /* disable Control queue Interrupt causes */
3250 wr32(hw, PFINT_OICR_CTL,
3251 rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
3257 * ice_free_irq_msix_misc - Unroll misc vector setup
3258 * @pf: board private structure
3260 static void ice_free_irq_msix_misc(struct ice_pf *pf)
3262 int misc_irq_num = pf->oicr_irq.virq;
3263 struct ice_hw *hw = &pf->hw;
3265 ice_dis_ctrlq_interrupts(hw);
3267 /* disable OICR interrupt */
3268 wr32(hw, PFINT_OICR_ENA, 0);
3271 synchronize_irq(misc_irq_num);
3272 devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf);
3274 ice_free_irq(pf, pf->oicr_irq);
3278 * ice_ena_ctrlq_interrupts - enable control queue interrupts
3279 * @hw: pointer to HW structure
3280 * @reg_idx: HW vector index to associate the control queue interrupts with
3282 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
3286 val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
3287 PFINT_OICR_CTL_CAUSE_ENA_M);
3288 wr32(hw, PFINT_OICR_CTL, val);
3290 /* enable Admin queue Interrupt causes */
3291 val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
3292 PFINT_FW_CTL_CAUSE_ENA_M);
3293 wr32(hw, PFINT_FW_CTL, val);
3295 /* enable Mailbox queue Interrupt causes */
3296 val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
3297 PFINT_MBX_CTL_CAUSE_ENA_M);
3298 wr32(hw, PFINT_MBX_CTL, val);
3300 /* This enables Sideband queue Interrupt causes */
3301 val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
3302 PFINT_SB_CTL_CAUSE_ENA_M);
3303 wr32(hw, PFINT_SB_CTL, val);
3309 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
3310 * @pf: board private structure
3312 * This sets up the handler for MSIX 0, which is used to manage the
3313 * non-queue interrupts, e.g. AdminQ and errors. This is not used
3314 * when in MSI or Legacy interrupt mode.
3316 static int ice_req_irq_msix_misc(struct ice_pf *pf)
3318 struct device *dev = ice_pf_to_dev(pf);
3319 struct ice_hw *hw = &pf->hw;
3320 struct msi_map oicr_irq;
3323 if (!pf->int_name[0])
3324 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
3325 dev_driver_string(dev), dev_name(dev));
3327 /* Do not request IRQ but do enable OICR interrupt since settings are
3328 * lost during reset. Note that this function is called only during
3329 * rebuild path and not while reset is in progress.
3331 if (ice_is_reset_in_progress(pf->state))
3334 /* reserve one vector in irq_tracker for misc interrupts */
3335 oicr_irq = ice_alloc_irq(pf, false);
3336 if (oicr_irq.index < 0)
3337 return oicr_irq.index;
3339 pf->oicr_irq = oicr_irq;
3340 err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr,
3341 ice_misc_intr_thread_fn, 0,
3344 dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n",
3346 ice_free_irq(pf, pf->oicr_irq);
3351 ice_ena_misc_vector(pf);
3353 ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index);
3354 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index),
3355 ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
3358 ice_irq_dynamic_ena(hw, NULL, NULL);
3364 * ice_napi_add - register NAPI handler for the VSI
3365 * @vsi: VSI for which NAPI handler is to be registered
3367 * This function is only called in the driver's load path. Registering the NAPI
3368 * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
3369 * reset/rebuild, etc.)
3371 static void ice_napi_add(struct ice_vsi *vsi)
3378 ice_for_each_q_vector(vsi, v_idx)
3379 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
3384 * ice_set_ops - set netdev and ethtools ops for the given netdev
3385 * @vsi: the VSI associated with the new netdev
3387 static void ice_set_ops(struct ice_vsi *vsi)
3389 struct net_device *netdev = vsi->netdev;
3390 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3392 if (ice_is_safe_mode(pf)) {
3393 netdev->netdev_ops = &ice_netdev_safe_mode_ops;
3394 ice_set_ethtool_safe_mode_ops(netdev);
3398 netdev->netdev_ops = &ice_netdev_ops;
3399 netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
3400 ice_set_ethtool_ops(netdev);
3402 if (vsi->type != ICE_VSI_PF)
3405 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
3406 NETDEV_XDP_ACT_XSK_ZEROCOPY |
3407 NETDEV_XDP_ACT_RX_SG;
3408 netdev->xdp_zc_max_segs = ICE_MAX_BUF_TXD;
3412 * ice_set_netdev_features - set features for the given netdev
3413 * @netdev: netdev instance
3415 static void ice_set_netdev_features(struct net_device *netdev)
3417 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3418 bool is_dvm_ena = ice_is_dvm_ena(&pf->hw);
3419 netdev_features_t csumo_features;
3420 netdev_features_t vlano_features;
3421 netdev_features_t dflt_features;
3422 netdev_features_t tso_features;
3424 if (ice_is_safe_mode(pf)) {
3426 netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
3427 netdev->hw_features = netdev->features;
3431 dflt_features = NETIF_F_SG |
3436 csumo_features = NETIF_F_RXCSUM |
3441 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
3442 NETIF_F_HW_VLAN_CTAG_TX |
3443 NETIF_F_HW_VLAN_CTAG_RX;
3445 /* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */
3447 vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER;
3449 tso_features = NETIF_F_TSO |
3453 NETIF_F_GSO_UDP_TUNNEL |
3454 NETIF_F_GSO_GRE_CSUM |
3455 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3456 NETIF_F_GSO_PARTIAL |
3457 NETIF_F_GSO_IPXIP4 |
3458 NETIF_F_GSO_IPXIP6 |
3461 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3462 NETIF_F_GSO_GRE_CSUM;
3463 /* set features that user can change */
3464 netdev->hw_features = dflt_features | csumo_features |
3465 vlano_features | tso_features;
3467 /* add support for HW_CSUM on packets with MPLS header */
3468 netdev->mpls_features = NETIF_F_HW_CSUM |
3472 /* enable features */
3473 netdev->features |= netdev->hw_features;
3475 netdev->hw_features |= NETIF_F_HW_TC;
3476 netdev->hw_features |= NETIF_F_LOOPBACK;
3478 /* encap and VLAN devices inherit default, csumo and tso features */
3479 netdev->hw_enc_features |= dflt_features | csumo_features |
3481 netdev->vlan_features |= dflt_features | csumo_features |
3484 /* advertise support but don't enable by default since only one type of
3485 * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one
3486 * type turns on the other has to be turned off. This is enforced by the
3487 * ice_fix_features() ndo callback.
3490 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX |
3491 NETIF_F_HW_VLAN_STAG_TX;
3493 /* Leave CRC / FCS stripping enabled by default, but allow the value to
3494 * be changed at runtime
3496 netdev->hw_features |= NETIF_F_RXFCS;
3498 netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE);
3502 * ice_fill_rss_lut - Fill the RSS lookup table with default values
3503 * @lut: Lookup table
3504 * @rss_table_size: Lookup table size
3505 * @rss_size: Range of queue number for hashing
3507 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3511 for (i = 0; i < rss_table_size; i++)
3512 lut[i] = i % rss_size;
3516 * ice_pf_vsi_setup - Set up a PF VSI
3517 * @pf: board private structure
3518 * @pi: pointer to the port_info instance
3520 * Returns pointer to the successfully allocated VSI software struct
3521 * on success, otherwise returns NULL on failure.
3523 static struct ice_vsi *
3524 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3526 struct ice_vsi_cfg_params params = {};
3528 params.type = ICE_VSI_PF;
3530 params.flags = ICE_VSI_FLAG_INIT;
3532 return ice_vsi_setup(pf, ¶ms);
3535 static struct ice_vsi *
3536 ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
3537 struct ice_channel *ch)
3539 struct ice_vsi_cfg_params params = {};
3541 params.type = ICE_VSI_CHNL;
3544 params.flags = ICE_VSI_FLAG_INIT;
3546 return ice_vsi_setup(pf, ¶ms);
3550 * ice_ctrl_vsi_setup - Set up a control VSI
3551 * @pf: board private structure
3552 * @pi: pointer to the port_info instance
3554 * Returns pointer to the successfully allocated VSI software struct
3555 * on success, otherwise returns NULL on failure.
3557 static struct ice_vsi *
3558 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3560 struct ice_vsi_cfg_params params = {};
3562 params.type = ICE_VSI_CTRL;
3564 params.flags = ICE_VSI_FLAG_INIT;
3566 return ice_vsi_setup(pf, ¶ms);
3570 * ice_lb_vsi_setup - Set up a loopback VSI
3571 * @pf: board private structure
3572 * @pi: pointer to the port_info instance
3574 * Returns pointer to the successfully allocated VSI software struct
3575 * on success, otherwise returns NULL on failure.
3578 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3580 struct ice_vsi_cfg_params params = {};
3582 params.type = ICE_VSI_LB;
3584 params.flags = ICE_VSI_FLAG_INIT;
3586 return ice_vsi_setup(pf, ¶ms);
3590 * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3591 * @netdev: network interface to be adjusted
3593 * @vid: VLAN ID to be added
3595 * net_device_ops implementation for adding VLAN IDs
3598 ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3600 struct ice_netdev_priv *np = netdev_priv(netdev);
3601 struct ice_vsi_vlan_ops *vlan_ops;
3602 struct ice_vsi *vsi = np->vsi;
3603 struct ice_vlan vlan;
3606 /* VLAN 0 is added by default during load/reset */
3610 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3611 usleep_range(1000, 2000);
3613 /* Add multicast promisc rule for the VLAN ID to be added if
3614 * all-multicast is currently enabled.
3616 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3617 ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3618 ICE_MCAST_VLAN_PROMISC_BITS,
3624 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3626 /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3627 * packets aren't pruned by the device's internal switch on Rx
3629 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3630 ret = vlan_ops->add_vlan(vsi, &vlan);
3634 /* If all-multicast is currently enabled and this VLAN ID is only one
3635 * besides VLAN-0 we have to update look-up type of multicast promisc
3636 * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN.
3638 if ((vsi->current_netdev_flags & IFF_ALLMULTI) &&
3639 ice_vsi_num_non_zero_vlans(vsi) == 1) {
3640 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3641 ICE_MCAST_PROMISC_BITS, 0);
3642 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3643 ICE_MCAST_VLAN_PROMISC_BITS, 0);
3647 clear_bit(ICE_CFG_BUSY, vsi->state);
3653 * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3654 * @netdev: network interface to be adjusted
3656 * @vid: VLAN ID to be removed
3658 * net_device_ops implementation for removing VLAN IDs
3661 ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3663 struct ice_netdev_priv *np = netdev_priv(netdev);
3664 struct ice_vsi_vlan_ops *vlan_ops;
3665 struct ice_vsi *vsi = np->vsi;
3666 struct ice_vlan vlan;
3669 /* don't allow removal of VLAN 0 */
3673 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3674 usleep_range(1000, 2000);
3676 ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3677 ICE_MCAST_VLAN_PROMISC_BITS, vid);
3679 netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n",
3681 vsi->current_netdev_flags |= IFF_ALLMULTI;
3684 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3686 /* Make sure VLAN delete is successful before updating VLAN
3689 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3690 ret = vlan_ops->del_vlan(vsi, &vlan);
3694 /* Remove multicast promisc rule for the removed VLAN ID if
3695 * all-multicast is enabled.
3697 if (vsi->current_netdev_flags & IFF_ALLMULTI)
3698 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3699 ICE_MCAST_VLAN_PROMISC_BITS, vid);
3701 if (!ice_vsi_has_non_zero_vlans(vsi)) {
3702 /* Update look-up type of multicast promisc rule for VLAN 0
3703 * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when
3704 * all-multicast is enabled and VLAN 0 is the only VLAN rule.
3706 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3707 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3708 ICE_MCAST_VLAN_PROMISC_BITS,
3710 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3711 ICE_MCAST_PROMISC_BITS, 0);
3716 clear_bit(ICE_CFG_BUSY, vsi->state);
3722 * ice_rep_indr_tc_block_unbind
3723 * @cb_priv: indirection block private data
3725 static void ice_rep_indr_tc_block_unbind(void *cb_priv)
3727 struct ice_indr_block_priv *indr_priv = cb_priv;
3729 list_del(&indr_priv->list);
3734 * ice_tc_indir_block_unregister - Unregister TC indirect block notifications
3735 * @vsi: VSI struct which has the netdev
3737 static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)
3739 struct ice_netdev_priv *np = netdev_priv(vsi->netdev);
3741 flow_indr_dev_unregister(ice_indr_setup_tc_cb, np,
3742 ice_rep_indr_tc_block_unbind);
3746 * ice_tc_indir_block_register - Register TC indirect block notifications
3747 * @vsi: VSI struct which has the netdev
3749 * Returns 0 on success, negative value on failure
3751 static int ice_tc_indir_block_register(struct ice_vsi *vsi)
3753 struct ice_netdev_priv *np;
3755 if (!vsi || !vsi->netdev)
3758 np = netdev_priv(vsi->netdev);
3760 INIT_LIST_HEAD(&np->tc_indr_block_priv_list);
3761 return flow_indr_dev_register(ice_indr_setup_tc_cb, np);
3765 * ice_get_avail_q_count - Get count of queues in use
3766 * @pf_qmap: bitmap to get queue use count from
3767 * @lock: pointer to a mutex that protects access to pf_qmap
3768 * @size: size of the bitmap
3771 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3777 for_each_clear_bit(bit, pf_qmap, size)
3785 * ice_get_avail_txq_count - Get count of Tx queues in use
3786 * @pf: pointer to an ice_pf instance
3788 u16 ice_get_avail_txq_count(struct ice_pf *pf)
3790 return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3795 * ice_get_avail_rxq_count - Get count of Rx queues in use
3796 * @pf: pointer to an ice_pf instance
3798 u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3800 return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3805 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3806 * @pf: board private structure to initialize
3808 static void ice_deinit_pf(struct ice_pf *pf)
3810 ice_service_task_stop(pf);
3811 mutex_destroy(&pf->lag_mutex);
3812 mutex_destroy(&pf->adev_mutex);
3813 mutex_destroy(&pf->sw_mutex);
3814 mutex_destroy(&pf->tc_mutex);
3815 mutex_destroy(&pf->avail_q_mutex);
3816 mutex_destroy(&pf->vfs.table_lock);
3818 if (pf->avail_txqs) {
3819 bitmap_free(pf->avail_txqs);
3820 pf->avail_txqs = NULL;
3823 if (pf->avail_rxqs) {
3824 bitmap_free(pf->avail_rxqs);
3825 pf->avail_rxqs = NULL;
3829 ptp_clock_unregister(pf->ptp.clock);
3833 * ice_set_pf_caps - set PFs capability flags
3834 * @pf: pointer to the PF instance
3836 static void ice_set_pf_caps(struct ice_pf *pf)
3838 struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
3840 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3841 if (func_caps->common_cap.rdma)
3842 set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3843 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3844 if (func_caps->common_cap.dcb)
3845 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3846 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3847 if (func_caps->common_cap.sr_iov_1_1) {
3848 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3849 pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs,
3852 clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
3853 if (func_caps->common_cap.rss_table_size)
3854 set_bit(ICE_FLAG_RSS_ENA, pf->flags);
3856 clear_bit(ICE_FLAG_FD_ENA, pf->flags);
3857 if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
3860 /* ctrl_vsi_idx will be set to a valid value when flow director
3861 * is setup by ice_init_fdir
3863 pf->ctrl_vsi_idx = ICE_NO_VSI;
3864 set_bit(ICE_FLAG_FD_ENA, pf->flags);
3865 /* force guaranteed filter pool for PF */
3866 ice_alloc_fd_guar_item(&pf->hw, &unused,
3867 func_caps->fd_fltr_guar);
3868 /* force shared filter pool for PF */
3869 ice_alloc_fd_shrd_item(&pf->hw, &unused,
3870 func_caps->fd_fltr_best_effort);
3873 clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3874 if (func_caps->common_cap.ieee_1588)
3875 set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3877 pf->max_pf_txqs = func_caps->common_cap.num_txq;
3878 pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
3882 * ice_init_pf - Initialize general software structures (struct ice_pf)
3883 * @pf: board private structure to initialize
3885 static int ice_init_pf(struct ice_pf *pf)
3887 ice_set_pf_caps(pf);
3889 mutex_init(&pf->sw_mutex);
3890 mutex_init(&pf->tc_mutex);
3891 mutex_init(&pf->adev_mutex);
3892 mutex_init(&pf->lag_mutex);
3894 INIT_HLIST_HEAD(&pf->aq_wait_list);
3895 spin_lock_init(&pf->aq_wait_lock);
3896 init_waitqueue_head(&pf->aq_wait_queue);
3898 init_waitqueue_head(&pf->reset_wait_queue);
3900 /* setup service timer and periodic service task */
3901 timer_setup(&pf->serv_tmr, ice_service_timer, 0);
3902 pf->serv_tmr_period = HZ;
3903 INIT_WORK(&pf->serv_task, ice_service_task);
3904 clear_bit(ICE_SERVICE_SCHED, pf->state);
3906 mutex_init(&pf->avail_q_mutex);
3907 pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
3908 if (!pf->avail_txqs)
3911 pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
3912 if (!pf->avail_rxqs) {
3913 bitmap_free(pf->avail_txqs);
3914 pf->avail_txqs = NULL;
3918 mutex_init(&pf->vfs.table_lock);
3919 hash_init(pf->vfs.table);
3920 ice_mbx_init_snapshot(&pf->hw);
3926 * ice_is_wol_supported - check if WoL is supported
3927 * @hw: pointer to hardware info
3929 * Check if WoL is supported based on the HW configuration.
3930 * Returns true if NVM supports and enables WoL for this port, false otherwise
3932 bool ice_is_wol_supported(struct ice_hw *hw)
3936 /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
3937 * word) indicates WoL is not supported on the corresponding PF ID.
3939 if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
3942 return !(BIT(hw->port_info->lport) & wol_ctrl);
3946 * ice_vsi_recfg_qs - Change the number of queues on a VSI
3947 * @vsi: VSI being changed
3948 * @new_rx: new number of Rx queues
3949 * @new_tx: new number of Tx queues
3950 * @locked: is adev device_lock held
3952 * Only change the number of queues if new_tx, or new_rx is non-0.
3954 * Returns 0 on success.
3956 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
3958 struct ice_pf *pf = vsi->back;
3959 int err = 0, timeout = 50;
3961 if (!new_rx && !new_tx)
3964 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
3968 usleep_range(1000, 2000);
3972 vsi->req_txq = (u16)new_tx;
3974 vsi->req_rxq = (u16)new_rx;
3976 /* set for the next time the netdev is started */
3977 if (!netif_running(vsi->netdev)) {
3978 ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
3979 dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
3984 ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
3985 ice_pf_dcb_recfg(pf, locked);
3988 clear_bit(ICE_CFG_BUSY, pf->state);
3993 * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
3994 * @pf: PF to configure
3996 * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
3997 * VSI can still Tx/Rx VLAN tagged packets.
3999 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
4001 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4002 struct ice_vsi_ctx *ctxt;
4009 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
4014 ctxt->info = vsi->info;
4016 ctxt->info.valid_sections =
4017 cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
4018 ICE_AQ_VSI_PROP_SECURITY_VALID |
4019 ICE_AQ_VSI_PROP_SW_VALID);
4021 /* disable VLAN anti-spoof */
4022 ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4023 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4025 /* disable VLAN pruning and keep all other settings */
4026 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
4028 /* allow all VLANs on Tx and don't strip on Rx */
4029 ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL |
4030 ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
4032 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
4034 dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
4035 status, ice_aq_str(hw->adminq.sq_last_status));
4037 vsi->info.sec_flags = ctxt->info.sec_flags;
4038 vsi->info.sw_flags2 = ctxt->info.sw_flags2;
4039 vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
4046 * ice_log_pkg_init - log result of DDP package load
4047 * @hw: pointer to hardware info
4048 * @state: state of package load
4050 static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state)
4052 struct ice_pf *pf = hw->back;
4055 dev = ice_pf_to_dev(pf);
4058 case ICE_DDP_PKG_SUCCESS:
4059 dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
4060 hw->active_pkg_name,
4061 hw->active_pkg_ver.major,
4062 hw->active_pkg_ver.minor,
4063 hw->active_pkg_ver.update,
4064 hw->active_pkg_ver.draft);
4066 case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
4067 dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
4068 hw->active_pkg_name,
4069 hw->active_pkg_ver.major,
4070 hw->active_pkg_ver.minor,
4071 hw->active_pkg_ver.update,
4072 hw->active_pkg_ver.draft);
4074 case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED:
4075 dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
4076 hw->active_pkg_name,
4077 hw->active_pkg_ver.major,
4078 hw->active_pkg_ver.minor,
4079 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4081 case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
4082 dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
4083 hw->active_pkg_name,
4084 hw->active_pkg_ver.major,
4085 hw->active_pkg_ver.minor,
4086 hw->active_pkg_ver.update,
4087 hw->active_pkg_ver.draft,
4094 case ICE_DDP_PKG_FW_MISMATCH:
4095 dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n");
4097 case ICE_DDP_PKG_INVALID_FILE:
4098 dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
4100 case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH:
4101 dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
4103 case ICE_DDP_PKG_FILE_VERSION_TOO_LOW:
4104 dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
4105 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4107 case ICE_DDP_PKG_FILE_SIGNATURE_INVALID:
4108 dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
4110 case ICE_DDP_PKG_FILE_REVISION_TOO_LOW:
4111 dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
4113 case ICE_DDP_PKG_LOAD_ERROR:
4114 dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n");
4115 /* poll for reset to complete */
4116 if (ice_check_reset(hw))
4117 dev_err(dev, "Error resetting device. Please reload the driver\n");
4119 case ICE_DDP_PKG_ERR:
4121 dev_err(dev, "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n");
4127 * ice_load_pkg - load/reload the DDP Package file
4128 * @firmware: firmware structure when firmware requested or NULL for reload
4129 * @pf: pointer to the PF instance
4131 * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
4132 * initialize HW tables.
4135 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
4137 enum ice_ddp_state state = ICE_DDP_PKG_ERR;
4138 struct device *dev = ice_pf_to_dev(pf);
4139 struct ice_hw *hw = &pf->hw;
4141 /* Load DDP Package */
4142 if (firmware && !hw->pkg_copy) {
4143 state = ice_copy_and_init_pkg(hw, firmware->data,
4145 ice_log_pkg_init(hw, state);
4146 } else if (!firmware && hw->pkg_copy) {
4147 /* Reload package during rebuild after CORER/GLOBR reset */
4148 state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
4149 ice_log_pkg_init(hw, state);
4151 dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
4154 if (!ice_is_init_pkg_successful(state)) {
4156 clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4160 /* Successful download package is the precondition for advanced
4161 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
4163 set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4167 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
4168 * @pf: pointer to the PF structure
4170 * There is no error returned here because the driver should be able to handle
4171 * 128 Byte cache lines, so we only print a warning in case issues are seen,
4172 * specifically with Tx.
4174 static void ice_verify_cacheline_size(struct ice_pf *pf)
4176 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
4177 dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
4178 ICE_CACHE_LINE_BYTES);
4182 * ice_send_version - update firmware with driver version
4185 * Returns 0 on success, else error code
4187 static int ice_send_version(struct ice_pf *pf)
4189 struct ice_driver_ver dv;
4191 dv.major_ver = 0xff;
4192 dv.minor_ver = 0xff;
4193 dv.build_ver = 0xff;
4194 dv.subbuild_ver = 0;
4195 strscpy((char *)dv.driver_string, UTS_RELEASE,
4196 sizeof(dv.driver_string));
4197 return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
4201 * ice_init_fdir - Initialize flow director VSI and configuration
4202 * @pf: pointer to the PF instance
4204 * returns 0 on success, negative on error
4206 static int ice_init_fdir(struct ice_pf *pf)
4208 struct device *dev = ice_pf_to_dev(pf);
4209 struct ice_vsi *ctrl_vsi;
4212 /* Side Band Flow Director needs to have a control VSI.
4213 * Allocate it and store it in the PF.
4215 ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
4217 dev_dbg(dev, "could not create control VSI\n");
4221 err = ice_vsi_open_ctrl(ctrl_vsi);
4223 dev_dbg(dev, "could not open control VSI\n");
4227 mutex_init(&pf->hw.fdir_fltr_lock);
4229 err = ice_fdir_create_dflt_rules(pf);
4236 ice_fdir_release_flows(&pf->hw);
4237 ice_vsi_close(ctrl_vsi);
4239 ice_vsi_release(ctrl_vsi);
4240 if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4241 pf->vsi[pf->ctrl_vsi_idx] = NULL;
4242 pf->ctrl_vsi_idx = ICE_NO_VSI;
4247 static void ice_deinit_fdir(struct ice_pf *pf)
4249 struct ice_vsi *vsi = ice_get_ctrl_vsi(pf);
4254 ice_vsi_manage_fdir(vsi, false);
4255 ice_vsi_release(vsi);
4256 if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4257 pf->vsi[pf->ctrl_vsi_idx] = NULL;
4258 pf->ctrl_vsi_idx = ICE_NO_VSI;
4261 mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4265 * ice_get_opt_fw_name - return optional firmware file name or NULL
4266 * @pf: pointer to the PF instance
4268 static char *ice_get_opt_fw_name(struct ice_pf *pf)
4270 /* Optional firmware name same as default with additional dash
4271 * followed by a EUI-64 identifier (PCIe Device Serial Number)
4273 struct pci_dev *pdev = pf->pdev;
4274 char *opt_fw_filename;
4277 /* Determine the name of the optional file using the DSN (two
4278 * dwords following the start of the DSN Capability).
4280 dsn = pci_get_dsn(pdev);
4284 opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
4285 if (!opt_fw_filename)
4288 snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
4289 ICE_DDP_PKG_PATH, dsn);
4291 return opt_fw_filename;
4295 * ice_request_fw - Device initialization routine
4296 * @pf: pointer to the PF instance
4298 static void ice_request_fw(struct ice_pf *pf)
4300 char *opt_fw_filename = ice_get_opt_fw_name(pf);
4301 const struct firmware *firmware = NULL;
4302 struct device *dev = ice_pf_to_dev(pf);
4305 /* optional device-specific DDP (if present) overrides the default DDP
4306 * package file. kernel logs a debug message if the file doesn't exist,
4307 * and warning messages for other errors.
4309 if (opt_fw_filename) {
4310 err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
4312 kfree(opt_fw_filename);
4316 /* request for firmware was successful. Download to device */
4317 ice_load_pkg(firmware, pf);
4318 kfree(opt_fw_filename);
4319 release_firmware(firmware);
4324 err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
4326 dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
4330 /* request for firmware was successful. Download to device */
4331 ice_load_pkg(firmware, pf);
4332 release_firmware(firmware);
4336 * ice_print_wake_reason - show the wake up cause in the log
4337 * @pf: pointer to the PF struct
4339 static void ice_print_wake_reason(struct ice_pf *pf)
4341 u32 wus = pf->wakeup_reason;
4342 const char *wake_str;
4344 /* if no wake event, nothing to print */
4348 if (wus & PFPM_WUS_LNKC_M)
4349 wake_str = "Link\n";
4350 else if (wus & PFPM_WUS_MAG_M)
4351 wake_str = "Magic Packet\n";
4352 else if (wus & PFPM_WUS_MNG_M)
4353 wake_str = "Management\n";
4354 else if (wus & PFPM_WUS_FW_RST_WK_M)
4355 wake_str = "Firmware Reset\n";
4357 wake_str = "Unknown\n";
4359 dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4363 * ice_register_netdev - register netdev
4364 * @vsi: pointer to the VSI struct
4366 static int ice_register_netdev(struct ice_vsi *vsi)
4370 if (!vsi || !vsi->netdev)
4373 err = register_netdev(vsi->netdev);
4377 set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4378 netif_carrier_off(vsi->netdev);
4379 netif_tx_stop_all_queues(vsi->netdev);
4384 static void ice_unregister_netdev(struct ice_vsi *vsi)
4386 if (!vsi || !vsi->netdev)
4389 unregister_netdev(vsi->netdev);
4390 clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4394 * ice_cfg_netdev - Allocate, configure and register a netdev
4395 * @vsi: the VSI associated with the new netdev
4397 * Returns 0 on success, negative value on failure
4399 static int ice_cfg_netdev(struct ice_vsi *vsi)
4401 struct ice_netdev_priv *np;
4402 struct net_device *netdev;
4403 u8 mac_addr[ETH_ALEN];
4405 netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
4410 set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4411 vsi->netdev = netdev;
4412 np = netdev_priv(netdev);
4415 ice_set_netdev_features(netdev);
4418 if (vsi->type == ICE_VSI_PF) {
4419 SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
4420 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4421 eth_hw_addr_set(netdev, mac_addr);
4424 netdev->priv_flags |= IFF_UNICAST_FLT;
4426 /* Setup netdev TC information */
4427 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
4429 netdev->max_mtu = ICE_MAX_MTU;
4434 static void ice_decfg_netdev(struct ice_vsi *vsi)
4436 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4437 free_netdev(vsi->netdev);
4441 static int ice_start_eth(struct ice_vsi *vsi)
4445 err = ice_init_mac_fltr(vsi->back);
4449 err = ice_vsi_open(vsi);
4451 ice_fltr_remove_all(vsi);
4456 static void ice_stop_eth(struct ice_vsi *vsi)
4458 ice_fltr_remove_all(vsi);
4462 static int ice_init_eth(struct ice_pf *pf)
4464 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4470 /* init channel list */
4471 INIT_LIST_HEAD(&vsi->ch_list);
4473 err = ice_cfg_netdev(vsi);
4476 /* Setup DCB netlink interface */
4477 ice_dcbnl_setup(vsi);
4479 err = ice_init_mac_fltr(pf);
4481 goto err_init_mac_fltr;
4483 err = ice_devlink_create_pf_port(pf);
4485 goto err_devlink_create_pf_port;
4487 SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
4489 err = ice_register_netdev(vsi);
4491 goto err_register_netdev;
4493 err = ice_tc_indir_block_register(vsi);
4495 goto err_tc_indir_block_register;
4501 err_tc_indir_block_register:
4502 ice_unregister_netdev(vsi);
4503 err_register_netdev:
4504 ice_devlink_destroy_pf_port(pf);
4505 err_devlink_create_pf_port:
4507 ice_decfg_netdev(vsi);
4511 static void ice_deinit_eth(struct ice_pf *pf)
4513 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4519 ice_unregister_netdev(vsi);
4520 ice_devlink_destroy_pf_port(pf);
4521 ice_tc_indir_block_unregister(vsi);
4522 ice_decfg_netdev(vsi);
4526 * ice_wait_for_fw - wait for full FW readiness
4527 * @hw: pointer to the hardware structure
4528 * @timeout: milliseconds that can elapse before timing out
4530 static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
4535 while (elapsed <= timeout) {
4536 fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M;
4538 /* firmware was not yet loaded, we have to wait more */
4550 static int ice_init_dev(struct ice_pf *pf)
4552 struct device *dev = ice_pf_to_dev(pf);
4553 struct ice_hw *hw = &pf->hw;
4556 err = ice_init_hw(hw);
4558 dev_err(dev, "ice_init_hw failed: %d\n", err);
4562 /* Some cards require longer initialization times
4563 * due to necessity of loading FW from an external source.
4564 * This can take even half a minute.
4566 if (ice_is_pf_c827(hw)) {
4567 err = ice_wait_for_fw(hw, 30000);
4569 dev_err(dev, "ice_wait_for_fw timed out");
4574 ice_init_feature_support(pf);
4578 /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
4579 * set in pf->state, which will cause ice_is_safe_mode to return
4582 if (ice_is_safe_mode(pf)) {
4583 /* we already got function/device capabilities but these don't
4584 * reflect what the driver needs to do in safe mode. Instead of
4585 * adding conditional logic everywhere to ignore these
4586 * device/function capabilities, override them.
4588 ice_set_safe_mode_caps(hw);
4591 err = ice_init_pf(pf);
4593 dev_err(dev, "ice_init_pf failed: %d\n", err);
4597 pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4598 pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4599 pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4600 pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4601 if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4602 pf->hw.udp_tunnel_nic.tables[0].n_entries =
4603 pf->hw.tnl.valid_count[TNL_VXLAN];
4604 pf->hw.udp_tunnel_nic.tables[0].tunnel_types =
4605 UDP_TUNNEL_TYPE_VXLAN;
4607 if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4608 pf->hw.udp_tunnel_nic.tables[1].n_entries =
4609 pf->hw.tnl.valid_count[TNL_GENEVE];
4610 pf->hw.udp_tunnel_nic.tables[1].tunnel_types =
4611 UDP_TUNNEL_TYPE_GENEVE;
4614 err = ice_init_interrupt_scheme(pf);
4616 dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4618 goto err_init_interrupt_scheme;
4621 /* In case of MSIX we are going to setup the misc vector right here
4622 * to handle admin queue events etc. In case of legacy and MSI
4623 * the misc functionality and queue processing is combined in
4624 * the same vector and that gets setup at open.
4626 err = ice_req_irq_msix_misc(pf);
4628 dev_err(dev, "setup of misc vector failed: %d\n", err);
4629 goto err_req_irq_msix_misc;
4634 err_req_irq_msix_misc:
4635 ice_clear_interrupt_scheme(pf);
4636 err_init_interrupt_scheme:
4643 static void ice_deinit_dev(struct ice_pf *pf)
4645 ice_free_irq_msix_misc(pf);
4647 ice_deinit_hw(&pf->hw);
4649 /* Service task is already stopped, so call reset directly. */
4650 ice_reset(&pf->hw, ICE_RESET_PFR);
4651 pci_wait_for_pending_transaction(pf->pdev);
4652 ice_clear_interrupt_scheme(pf);
4655 static void ice_init_features(struct ice_pf *pf)
4657 struct device *dev = ice_pf_to_dev(pf);
4659 if (ice_is_safe_mode(pf))
4662 /* initialize DDP driven features */
4663 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4666 if (ice_is_feature_supported(pf, ICE_F_GNSS))
4669 /* Note: Flow director init failure is non-fatal to load */
4670 if (ice_init_fdir(pf))
4671 dev_err(dev, "could not initialize flow director\n");
4673 /* Note: DCB init failure is non-fatal to load */
4674 if (ice_init_pf_dcb(pf, false)) {
4675 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4676 clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4678 ice_cfg_lldp_mib_change(&pf->hw, true);
4681 if (ice_init_lag(pf))
4682 dev_warn(dev, "Failed to init link aggregation support\n");
4685 static void ice_deinit_features(struct ice_pf *pf)
4687 if (ice_is_safe_mode(pf))
4691 if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
4692 ice_cfg_lldp_mib_change(&pf->hw, false);
4693 ice_deinit_fdir(pf);
4694 if (ice_is_feature_supported(pf, ICE_F_GNSS))
4696 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4697 ice_ptp_release(pf);
4700 static void ice_init_wakeup(struct ice_pf *pf)
4702 /* Save wakeup reason register for later use */
4703 pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS);
4705 /* check for a power management event */
4706 ice_print_wake_reason(pf);
4708 /* clear wake status, all bits */
4709 wr32(&pf->hw, PFPM_WUS, U32_MAX);
4711 /* Disable WoL at init, wait for user to enable */
4712 device_set_wakeup_enable(ice_pf_to_dev(pf), false);
4715 static int ice_init_link(struct ice_pf *pf)
4717 struct device *dev = ice_pf_to_dev(pf);
4720 err = ice_init_link_events(pf->hw.port_info);
4722 dev_err(dev, "ice_init_link_events failed: %d\n", err);
4726 /* not a fatal error if this fails */
4727 err = ice_init_nvm_phy_type(pf->hw.port_info);
4729 dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4731 /* not a fatal error if this fails */
4732 err = ice_update_link_info(pf->hw.port_info);
4734 dev_err(dev, "ice_update_link_info failed: %d\n", err);
4736 ice_init_link_dflt_override(pf->hw.port_info);
4738 ice_check_link_cfg_err(pf,
4739 pf->hw.port_info->phy.link_info.link_cfg_err);
4741 /* if media available, initialize PHY settings */
4742 if (pf->hw.port_info->phy.link_info.link_info &
4743 ICE_AQ_MEDIA_AVAILABLE) {
4744 /* not a fatal error if this fails */
4745 err = ice_init_phy_user_cfg(pf->hw.port_info);
4747 dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4749 if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4750 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4753 ice_configure_phy(vsi);
4756 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4762 static int ice_init_pf_sw(struct ice_pf *pf)
4764 bool dvm = ice_is_dvm_ena(&pf->hw);
4765 struct ice_vsi *vsi;
4768 /* create switch struct for the switch element created by FW on boot */
4769 pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL);
4774 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4776 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4778 pf->first_sw->pf = pf;
4780 /* record the sw_id available for later use */
4781 pf->first_sw->sw_id = pf->hw.port_info->sw_id;
4783 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
4785 goto err_aq_set_port_params;
4787 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
4790 goto err_pf_vsi_setup;
4796 err_aq_set_port_params:
4797 kfree(pf->first_sw);
4801 static void ice_deinit_pf_sw(struct ice_pf *pf)
4803 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4808 ice_vsi_release(vsi);
4809 kfree(pf->first_sw);
4812 static int ice_alloc_vsis(struct ice_pf *pf)
4814 struct device *dev = ice_pf_to_dev(pf);
4816 pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi;
4817 if (!pf->num_alloc_vsi)
4820 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
4822 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
4823 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
4824 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
4827 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
4832 pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi,
4833 sizeof(*pf->vsi_stats), GFP_KERNEL);
4834 if (!pf->vsi_stats) {
4835 devm_kfree(dev, pf->vsi);
4842 static void ice_dealloc_vsis(struct ice_pf *pf)
4844 devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats);
4845 pf->vsi_stats = NULL;
4847 pf->num_alloc_vsi = 0;
4848 devm_kfree(ice_pf_to_dev(pf), pf->vsi);
4852 static int ice_init_devlink(struct ice_pf *pf)
4856 err = ice_devlink_register_params(pf);
4860 ice_devlink_init_regions(pf);
4861 ice_devlink_register(pf);
4866 static void ice_deinit_devlink(struct ice_pf *pf)
4868 ice_devlink_unregister(pf);
4869 ice_devlink_destroy_regions(pf);
4870 ice_devlink_unregister_params(pf);
4873 static int ice_init(struct ice_pf *pf)
4877 err = ice_init_dev(pf);
4881 err = ice_alloc_vsis(pf);
4883 goto err_alloc_vsis;
4885 err = ice_init_pf_sw(pf);
4887 goto err_init_pf_sw;
4889 ice_init_wakeup(pf);
4891 err = ice_init_link(pf);
4895 err = ice_send_version(pf);
4899 ice_verify_cacheline_size(pf);
4901 if (ice_is_safe_mode(pf))
4902 ice_set_safe_mode_vlan_cfg(pf);
4904 /* print PCI link speed and width */
4905 pcie_print_link_status(pf->pdev);
4907 /* ready to go, so clear down state bit */
4908 clear_bit(ICE_DOWN, pf->state);
4909 clear_bit(ICE_SERVICE_DIS, pf->state);
4911 /* since everything is good, start the service timer */
4912 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4917 ice_deinit_pf_sw(pf);
4919 ice_dealloc_vsis(pf);
4925 static void ice_deinit(struct ice_pf *pf)
4927 set_bit(ICE_SERVICE_DIS, pf->state);
4928 set_bit(ICE_DOWN, pf->state);
4930 ice_deinit_pf_sw(pf);
4931 ice_dealloc_vsis(pf);
4936 * ice_load - load pf by init hw and starting VSI
4937 * @pf: pointer to the pf instance
4939 int ice_load(struct ice_pf *pf)
4941 struct ice_vsi_cfg_params params = {};
4942 struct ice_vsi *vsi;
4945 err = ice_init_dev(pf);
4949 vsi = ice_get_main_vsi(pf);
4951 params = ice_vsi_to_params(vsi);
4952 params.flags = ICE_VSI_FLAG_INIT;
4955 err = ice_vsi_cfg(vsi, ¶ms);
4959 err = ice_start_eth(ice_get_main_vsi(pf));
4964 err = ice_init_rdma(pf);
4968 ice_init_features(pf);
4969 ice_service_task_restart(pf);
4971 clear_bit(ICE_DOWN, pf->state);
4976 ice_vsi_close(ice_get_main_vsi(pf));
4979 ice_vsi_decfg(ice_get_main_vsi(pf));
4987 * ice_unload - unload pf by stopping VSI and deinit hw
4988 * @pf: pointer to the pf instance
4990 void ice_unload(struct ice_pf *pf)
4992 ice_deinit_features(pf);
4993 ice_deinit_rdma(pf);
4995 ice_stop_eth(ice_get_main_vsi(pf));
4996 ice_vsi_decfg(ice_get_main_vsi(pf));
5002 * ice_probe - Device initialization routine
5003 * @pdev: PCI device information struct
5004 * @ent: entry in ice_pci_tbl
5006 * Returns 0 on success, negative on failure
5009 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
5011 struct device *dev = &pdev->dev;
5016 if (pdev->is_virtfn) {
5017 dev_err(dev, "can't probe a virtual function\n");
5021 /* when under a kdump kernel initiate a reset before enabling the
5022 * device in order to clear out any pending DMA transactions. These
5023 * transactions can cause some systems to machine check when doing
5024 * the pcim_enable_device() below.
5026 if (is_kdump_kernel()) {
5027 pci_save_state(pdev);
5028 pci_clear_master(pdev);
5029 err = pcie_flr(pdev);
5032 pci_restore_state(pdev);
5035 /* this driver uses devres, see
5036 * Documentation/driver-api/driver-model/devres.rst
5038 err = pcim_enable_device(pdev);
5042 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
5044 dev_err(dev, "BAR0 I/O map error %d\n", err);
5048 pf = ice_allocate_pf(dev);
5052 /* initialize Auxiliary index to invalid value */
5055 /* set up for high or low DMA */
5056 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
5058 dev_err(dev, "DMA configuration failed: 0x%x\n", err);
5062 pci_set_master(pdev);
5065 pci_set_drvdata(pdev, pf);
5066 set_bit(ICE_DOWN, pf->state);
5067 /* Disable service task until DOWN bit is cleared */
5068 set_bit(ICE_SERVICE_DIS, pf->state);
5071 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
5072 pci_save_state(pdev);
5075 hw->port_info = NULL;
5076 hw->vendor_id = pdev->vendor;
5077 hw->device_id = pdev->device;
5078 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
5079 hw->subsystem_vendor_id = pdev->subsystem_vendor;
5080 hw->subsystem_device_id = pdev->subsystem_device;
5081 hw->bus.device = PCI_SLOT(pdev->devfn);
5082 hw->bus.func = PCI_FUNC(pdev->devfn);
5083 ice_set_ctrlq_len(hw);
5085 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
5087 #ifndef CONFIG_DYNAMIC_DEBUG
5089 hw->debug_mask = debug;
5096 err = ice_init_eth(pf);
5100 err = ice_init_rdma(pf);
5104 err = ice_init_devlink(pf);
5106 goto err_init_devlink;
5108 ice_init_features(pf);
5113 ice_deinit_rdma(pf);
5119 pci_disable_device(pdev);
5124 * ice_set_wake - enable or disable Wake on LAN
5125 * @pf: pointer to the PF struct
5127 * Simple helper for WoL control
5129 static void ice_set_wake(struct ice_pf *pf)
5131 struct ice_hw *hw = &pf->hw;
5132 bool wol = pf->wol_ena;
5134 /* clear wake state, otherwise new wake events won't fire */
5135 wr32(hw, PFPM_WUS, U32_MAX);
5137 /* enable / disable APM wake up, no RMW needed */
5138 wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
5140 /* set magic packet filter enabled */
5141 wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
5145 * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
5146 * @pf: pointer to the PF struct
5148 * Issue firmware command to enable multicast magic wake, making
5149 * sure that any locally administered address (LAA) is used for
5150 * wake, and that PF reset doesn't undo the LAA.
5152 static void ice_setup_mc_magic_wake(struct ice_pf *pf)
5154 struct device *dev = ice_pf_to_dev(pf);
5155 struct ice_hw *hw = &pf->hw;
5156 u8 mac_addr[ETH_ALEN];
5157 struct ice_vsi *vsi;
5164 vsi = ice_get_main_vsi(pf);
5168 /* Get current MAC address in case it's an LAA */
5170 ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
5172 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
5174 flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
5175 ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
5176 ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
5178 status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
5180 dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n",
5181 status, ice_aq_str(hw->adminq.sq_last_status));
5185 * ice_remove - Device removal routine
5186 * @pdev: PCI device information struct
5188 static void ice_remove(struct pci_dev *pdev)
5190 struct ice_pf *pf = pci_get_drvdata(pdev);
5193 for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
5194 if (!ice_is_reset_in_progress(pf->state))
5199 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
5200 set_bit(ICE_VF_RESETS_DISABLED, pf->state);
5204 ice_service_task_stop(pf);
5205 ice_aq_cancel_waiting_tasks(pf);
5206 set_bit(ICE_DOWN, pf->state);
5208 if (!ice_is_safe_mode(pf))
5209 ice_remove_arfs(pf);
5210 ice_deinit_features(pf);
5211 ice_deinit_devlink(pf);
5212 ice_deinit_rdma(pf);
5216 ice_vsi_release_all(pf);
5218 ice_setup_mc_magic_wake(pf);
5221 pci_disable_device(pdev);
5225 * ice_shutdown - PCI callback for shutting down device
5226 * @pdev: PCI device information struct
5228 static void ice_shutdown(struct pci_dev *pdev)
5230 struct ice_pf *pf = pci_get_drvdata(pdev);
5234 if (system_state == SYSTEM_POWER_OFF) {
5235 pci_wake_from_d3(pdev, pf->wol_ena);
5236 pci_set_power_state(pdev, PCI_D3hot);
5242 * ice_prepare_for_shutdown - prep for PCI shutdown
5243 * @pf: board private structure
5245 * Inform or close all dependent features in prep for PCI device shutdown
5247 static void ice_prepare_for_shutdown(struct ice_pf *pf)
5249 struct ice_hw *hw = &pf->hw;
5252 /* Notify VFs of impending reset */
5253 if (ice_check_sq_alive(hw, &hw->mailboxq))
5254 ice_vc_notify_reset(pf);
5256 dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
5258 /* disable the VSIs and their queues that are not already DOWN */
5259 ice_pf_dis_all_vsi(pf, false);
5261 ice_for_each_vsi(pf, v)
5263 pf->vsi[v]->vsi_num = 0;
5265 ice_shutdown_all_ctrlq(hw);
5269 * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
5270 * @pf: board private structure to reinitialize
5272 * This routine reinitialize interrupt scheme that was cleared during
5273 * power management suspend callback.
5275 * This should be called during resume routine to re-allocate the q_vectors
5276 * and reacquire interrupts.
5278 static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
5280 struct device *dev = ice_pf_to_dev(pf);
5283 /* Since we clear MSIX flag during suspend, we need to
5284 * set it back during resume...
5287 ret = ice_init_interrupt_scheme(pf);
5289 dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
5293 /* Remap vectors and rings, after successful re-init interrupts */
5294 ice_for_each_vsi(pf, v) {
5298 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
5301 ice_vsi_map_rings_to_vectors(pf->vsi[v]);
5304 ret = ice_req_irq_msix_misc(pf);
5306 dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
5316 ice_vsi_free_q_vectors(pf->vsi[v]);
5323 * @dev: generic device information structure
5325 * Power Management callback to quiesce the device and prepare
5326 * for D3 transition.
5328 static int __maybe_unused ice_suspend(struct device *dev)
5330 struct pci_dev *pdev = to_pci_dev(dev);
5334 pf = pci_get_drvdata(pdev);
5336 if (!ice_pf_state_is_nominal(pf)) {
5337 dev_err(dev, "Device is not ready, no need to suspend it\n");
5341 /* Stop watchdog tasks until resume completion.
5342 * Even though it is most likely that the service task is
5343 * disabled if the device is suspended or down, the service task's
5344 * state is controlled by a different state bit, and we should
5345 * store and honor whatever state that bit is in at this point.
5347 disabled = ice_service_task_stop(pf);
5349 ice_unplug_aux_dev(pf);
5351 /* Already suspended?, then there is nothing to do */
5352 if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
5354 ice_service_task_restart(pf);
5358 if (test_bit(ICE_DOWN, pf->state) ||
5359 ice_is_reset_in_progress(pf->state)) {
5360 dev_err(dev, "can't suspend device in reset or already down\n");
5362 ice_service_task_restart(pf);
5366 ice_setup_mc_magic_wake(pf);
5368 ice_prepare_for_shutdown(pf);
5372 /* Free vectors, clear the interrupt scheme and release IRQs
5373 * for proper hibernation, especially with large number of CPUs.
5374 * Otherwise hibernation might fail when mapping all the vectors back
5377 ice_free_irq_msix_misc(pf);
5378 ice_for_each_vsi(pf, v) {
5381 ice_vsi_free_q_vectors(pf->vsi[v]);
5383 ice_clear_interrupt_scheme(pf);
5385 pci_save_state(pdev);
5386 pci_wake_from_d3(pdev, pf->wol_ena);
5387 pci_set_power_state(pdev, PCI_D3hot);
5392 * ice_resume - PM callback for waking up from D3
5393 * @dev: generic device information structure
5395 static int __maybe_unused ice_resume(struct device *dev)
5397 struct pci_dev *pdev = to_pci_dev(dev);
5398 enum ice_reset_req reset_type;
5403 pci_set_power_state(pdev, PCI_D0);
5404 pci_restore_state(pdev);
5405 pci_save_state(pdev);
5407 if (!pci_device_is_present(pdev))
5410 ret = pci_enable_device_mem(pdev);
5412 dev_err(dev, "Cannot enable device after suspend\n");
5416 pf = pci_get_drvdata(pdev);
5419 pf->wakeup_reason = rd32(hw, PFPM_WUS);
5420 ice_print_wake_reason(pf);
5422 /* We cleared the interrupt scheme when we suspended, so we need to
5423 * restore it now to resume device functionality.
5425 ret = ice_reinit_interrupt_scheme(pf);
5427 dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
5429 clear_bit(ICE_DOWN, pf->state);
5430 /* Now perform PF reset and rebuild */
5431 reset_type = ICE_RESET_PFR;
5432 /* re-enable service task for reset, but allow reset to schedule it */
5433 clear_bit(ICE_SERVICE_DIS, pf->state);
5435 if (ice_schedule_reset(pf, reset_type))
5436 dev_err(dev, "Reset during resume failed.\n");
5438 clear_bit(ICE_SUSPENDED, pf->state);
5439 ice_service_task_restart(pf);
5441 /* Restart the service task */
5442 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5446 #endif /* CONFIG_PM */
5449 * ice_pci_err_detected - warning that PCI error has been detected
5450 * @pdev: PCI device information struct
5451 * @err: the type of PCI error
5453 * Called to warn that something happened on the PCI bus and the error handling
5454 * is in progress. Allows the driver to gracefully prepare/handle PCI errors.
5456 static pci_ers_result_t
5457 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
5459 struct ice_pf *pf = pci_get_drvdata(pdev);
5462 dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
5464 return PCI_ERS_RESULT_DISCONNECT;
5467 if (!test_bit(ICE_SUSPENDED, pf->state)) {
5468 ice_service_task_stop(pf);
5470 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5471 set_bit(ICE_PFR_REQ, pf->state);
5472 ice_prepare_for_reset(pf, ICE_RESET_PFR);
5476 return PCI_ERS_RESULT_NEED_RESET;
5480 * ice_pci_err_slot_reset - a PCI slot reset has just happened
5481 * @pdev: PCI device information struct
5483 * Called to determine if the driver can recover from the PCI slot reset by
5484 * using a register read to determine if the device is recoverable.
5486 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
5488 struct ice_pf *pf = pci_get_drvdata(pdev);
5489 pci_ers_result_t result;
5493 err = pci_enable_device_mem(pdev);
5495 dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
5497 result = PCI_ERS_RESULT_DISCONNECT;
5499 pci_set_master(pdev);
5500 pci_restore_state(pdev);
5501 pci_save_state(pdev);
5502 pci_wake_from_d3(pdev, false);
5504 /* Check for life */
5505 reg = rd32(&pf->hw, GLGEN_RTRIG);
5507 result = PCI_ERS_RESULT_RECOVERED;
5509 result = PCI_ERS_RESULT_DISCONNECT;
5516 * ice_pci_err_resume - restart operations after PCI error recovery
5517 * @pdev: PCI device information struct
5519 * Called to allow the driver to bring things back up after PCI error and/or
5520 * reset recovery have finished
5522 static void ice_pci_err_resume(struct pci_dev *pdev)
5524 struct ice_pf *pf = pci_get_drvdata(pdev);
5527 dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
5532 if (test_bit(ICE_SUSPENDED, pf->state)) {
5533 dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
5538 ice_restore_all_vfs_msi_state(pdev);
5540 ice_do_reset(pf, ICE_RESET_PFR);
5541 ice_service_task_restart(pf);
5542 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5546 * ice_pci_err_reset_prepare - prepare device driver for PCI reset
5547 * @pdev: PCI device information struct
5549 static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
5551 struct ice_pf *pf = pci_get_drvdata(pdev);
5553 if (!test_bit(ICE_SUSPENDED, pf->state)) {
5554 ice_service_task_stop(pf);
5556 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5557 set_bit(ICE_PFR_REQ, pf->state);
5558 ice_prepare_for_reset(pf, ICE_RESET_PFR);
5564 * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
5565 * @pdev: PCI device information struct
5567 static void ice_pci_err_reset_done(struct pci_dev *pdev)
5569 ice_pci_err_resume(pdev);
5572 /* ice_pci_tbl - PCI Device ID Table
5574 * Wildcard entries (PCI_ANY_ID) should come last
5575 * Last entry must be all 0s
5577 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
5578 * Class, Class Mask, private data (not used) }
5580 static const struct pci_device_id ice_pci_tbl[] = {
5581 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
5582 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
5583 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
5584 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 },
5585 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },
5586 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
5587 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
5588 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
5589 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
5590 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
5591 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
5592 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
5593 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
5594 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
5595 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
5596 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
5597 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
5598 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
5599 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
5600 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
5601 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
5602 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
5603 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
5604 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
5605 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
5606 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT), 0 },
5607 /* required last entry */
5610 MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
5612 static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
5614 static const struct pci_error_handlers ice_pci_err_handler = {
5615 .error_detected = ice_pci_err_detected,
5616 .slot_reset = ice_pci_err_slot_reset,
5617 .reset_prepare = ice_pci_err_reset_prepare,
5618 .reset_done = ice_pci_err_reset_done,
5619 .resume = ice_pci_err_resume
5622 static struct pci_driver ice_driver = {
5623 .name = KBUILD_MODNAME,
5624 .id_table = ice_pci_tbl,
5626 .remove = ice_remove,
5628 .driver.pm = &ice_pm_ops,
5629 #endif /* CONFIG_PM */
5630 .shutdown = ice_shutdown,
5631 .sriov_configure = ice_sriov_configure,
5632 .err_handler = &ice_pci_err_handler
5636 * ice_module_init - Driver registration routine
5638 * ice_module_init is the first routine called when the driver is
5639 * loaded. All it does is register with the PCI subsystem.
5641 static int __init ice_module_init(void)
5643 int status = -ENOMEM;
5645 pr_info("%s\n", ice_driver_string);
5646 pr_info("%s\n", ice_copyright);
5648 ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
5650 pr_err("Failed to create workqueue\n");
5654 ice_lag_wq = alloc_ordered_workqueue("ice_lag_wq", 0);
5656 pr_err("Failed to create LAG workqueue\n");
5660 status = pci_register_driver(&ice_driver);
5662 pr_err("failed to register PCI driver, err %d\n", status);
5663 goto err_dest_lag_wq;
5669 destroy_workqueue(ice_lag_wq);
5671 destroy_workqueue(ice_wq);
5674 module_init(ice_module_init);
5677 * ice_module_exit - Driver exit cleanup routine
5679 * ice_module_exit is called just before the driver is removed
5682 static void __exit ice_module_exit(void)
5684 pci_unregister_driver(&ice_driver);
5685 destroy_workqueue(ice_wq);
5686 destroy_workqueue(ice_lag_wq);
5687 pr_info("module unloaded\n");
5689 module_exit(ice_module_exit);
5692 * ice_set_mac_address - NDO callback to set MAC address
5693 * @netdev: network interface device structure
5694 * @pi: pointer to an address structure
5696 * Returns 0 on success, negative on failure
5698 static int ice_set_mac_address(struct net_device *netdev, void *pi)
5700 struct ice_netdev_priv *np = netdev_priv(netdev);
5701 struct ice_vsi *vsi = np->vsi;
5702 struct ice_pf *pf = vsi->back;
5703 struct ice_hw *hw = &pf->hw;
5704 struct sockaddr *addr = pi;
5705 u8 old_mac[ETH_ALEN];
5710 mac = (u8 *)addr->sa_data;
5712 if (!is_valid_ether_addr(mac))
5713 return -EADDRNOTAVAIL;
5715 if (test_bit(ICE_DOWN, pf->state) ||
5716 ice_is_reset_in_progress(pf->state)) {
5717 netdev_err(netdev, "can't set mac %pM. device not ready\n",
5722 if (ice_chnl_dmac_fltr_cnt(pf)) {
5723 netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n",
5728 netif_addr_lock_bh(netdev);
5729 ether_addr_copy(old_mac, netdev->dev_addr);
5730 /* change the netdev's MAC address */
5731 eth_hw_addr_set(netdev, mac);
5732 netif_addr_unlock_bh(netdev);
5734 /* Clean up old MAC filter. Not an error if old filter doesn't exist */
5735 err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
5736 if (err && err != -ENOENT) {
5737 err = -EADDRNOTAVAIL;
5738 goto err_update_filters;
5741 /* Add filter for new MAC. If filter exists, return success */
5742 err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
5743 if (err == -EEXIST) {
5744 /* Although this MAC filter is already present in hardware it's
5745 * possible in some cases (e.g. bonding) that dev_addr was
5746 * modified outside of the driver and needs to be restored back
5749 netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
5753 /* error if the new filter addition failed */
5754 err = -EADDRNOTAVAIL;
5759 netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
5761 netif_addr_lock_bh(netdev);
5762 eth_hw_addr_set(netdev, old_mac);
5763 netif_addr_unlock_bh(netdev);
5767 netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
5770 /* write new MAC address to the firmware */
5771 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
5772 err = ice_aq_manage_mac_write(hw, mac, flags, NULL);
5774 netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
5781 * ice_set_rx_mode - NDO callback to set the netdev filters
5782 * @netdev: network interface device structure
5784 static void ice_set_rx_mode(struct net_device *netdev)
5786 struct ice_netdev_priv *np = netdev_priv(netdev);
5787 struct ice_vsi *vsi = np->vsi;
5789 if (!vsi || ice_is_switchdev_running(vsi->back))
5792 /* Set the flags to synchronize filters
5793 * ndo_set_rx_mode may be triggered even without a change in netdev
5796 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
5797 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
5798 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
5800 /* schedule our worker thread which will take care of
5801 * applying the new filter changes
5803 ice_service_task_schedule(vsi->back);
5807 * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
5808 * @netdev: network interface device structure
5809 * @queue_index: Queue ID
5810 * @maxrate: maximum bandwidth in Mbps
5813 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
5815 struct ice_netdev_priv *np = netdev_priv(netdev);
5816 struct ice_vsi *vsi = np->vsi;
5821 /* Validate maxrate requested is within permitted range */
5822 if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
5823 netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
5824 maxrate, queue_index);
5828 q_handle = vsi->tx_rings[queue_index]->q_handle;
5829 tc = ice_dcb_get_tc(vsi, queue_index);
5831 vsi = ice_locate_vsi_using_queue(vsi, queue_index);
5833 netdev_err(netdev, "Invalid VSI for given queue %d\n",
5838 /* Set BW back to default, when user set maxrate to 0 */
5840 status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
5841 q_handle, ICE_MAX_BW);
5843 status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
5844 q_handle, ICE_MAX_BW, maxrate * 1000);
5846 netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
5853 * ice_fdb_add - add an entry to the hardware database
5854 * @ndm: the input from the stack
5855 * @tb: pointer to array of nladdr (unused)
5856 * @dev: the net device pointer
5857 * @addr: the MAC address entry being added
5859 * @flags: instructions from stack about fdb operation
5860 * @extack: netlink extended ack
5863 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
5864 struct net_device *dev, const unsigned char *addr, u16 vid,
5865 u16 flags, struct netlink_ext_ack __always_unused *extack)
5870 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
5873 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
5874 netdev_err(dev, "FDB only supports static addresses\n");
5878 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
5879 err = dev_uc_add_excl(dev, addr);
5880 else if (is_multicast_ether_addr(addr))
5881 err = dev_mc_add_excl(dev, addr);
5885 /* Only return duplicate errors if NLM_F_EXCL is set */
5886 if (err == -EEXIST && !(flags & NLM_F_EXCL))
5893 * ice_fdb_del - delete an entry from the hardware database
5894 * @ndm: the input from the stack
5895 * @tb: pointer to array of nladdr (unused)
5896 * @dev: the net device pointer
5897 * @addr: the MAC address entry being added
5899 * @extack: netlink extended ack
5902 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
5903 struct net_device *dev, const unsigned char *addr,
5904 __always_unused u16 vid, struct netlink_ext_ack *extack)
5908 if (ndm->ndm_state & NUD_PERMANENT) {
5909 netdev_err(dev, "FDB only supports static addresses\n");
5913 if (is_unicast_ether_addr(addr))
5914 err = dev_uc_del(dev, addr);
5915 else if (is_multicast_ether_addr(addr))
5916 err = dev_mc_del(dev, addr);
5923 #define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
5924 NETIF_F_HW_VLAN_CTAG_TX | \
5925 NETIF_F_HW_VLAN_STAG_RX | \
5926 NETIF_F_HW_VLAN_STAG_TX)
5928 #define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
5929 NETIF_F_HW_VLAN_STAG_RX)
5931 #define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
5932 NETIF_F_HW_VLAN_STAG_FILTER)
5935 * ice_fix_features - fix the netdev features flags based on device limitations
5936 * @netdev: ptr to the netdev that flags are being fixed on
5937 * @features: features that need to be checked and possibly fixed
5939 * Make sure any fixups are made to features in this callback. This enables the
5940 * driver to not have to check unsupported configurations throughout the driver
5941 * because that's the responsiblity of this callback.
5943 * Single VLAN Mode (SVM) Supported Features:
5944 * NETIF_F_HW_VLAN_CTAG_FILTER
5945 * NETIF_F_HW_VLAN_CTAG_RX
5946 * NETIF_F_HW_VLAN_CTAG_TX
5948 * Double VLAN Mode (DVM) Supported Features:
5949 * NETIF_F_HW_VLAN_CTAG_FILTER
5950 * NETIF_F_HW_VLAN_CTAG_RX
5951 * NETIF_F_HW_VLAN_CTAG_TX
5953 * NETIF_F_HW_VLAN_STAG_FILTER
5954 * NETIF_HW_VLAN_STAG_RX
5955 * NETIF_HW_VLAN_STAG_TX
5957 * Features that need fixing:
5958 * Cannot simultaneously enable CTAG and STAG stripping and/or insertion.
5959 * These are mutually exlusive as the VSI context cannot support multiple
5960 * VLAN ethertypes simultaneously for stripping and/or insertion. If this
5961 * is not done, then default to clearing the requested STAG offload
5964 * All supported filtering has to be enabled or disabled together. For
5965 * example, in DVM, CTAG and STAG filtering have to be enabled and disabled
5966 * together. If this is not done, then default to VLAN filtering disabled.
5967 * These are mutually exclusive as there is currently no way to
5968 * enable/disable VLAN filtering based on VLAN ethertype when using VLAN
5971 static netdev_features_t
5972 ice_fix_features(struct net_device *netdev, netdev_features_t features)
5974 struct ice_netdev_priv *np = netdev_priv(netdev);
5975 netdev_features_t req_vlan_fltr, cur_vlan_fltr;
5976 bool cur_ctag, cur_stag, req_ctag, req_stag;
5978 cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES;
5979 cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
5980 cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
5982 req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES;
5983 req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
5984 req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
5986 if (req_vlan_fltr != cur_vlan_fltr) {
5987 if (ice_is_dvm_ena(&np->vsi->back->hw)) {
5988 if (req_ctag && req_stag) {
5989 features |= NETIF_VLAN_FILTERING_FEATURES;
5990 } else if (!req_ctag && !req_stag) {
5991 features &= ~NETIF_VLAN_FILTERING_FEATURES;
5992 } else if ((!cur_ctag && req_ctag && !cur_stag) ||
5993 (!cur_stag && req_stag && !cur_ctag)) {
5994 features |= NETIF_VLAN_FILTERING_FEATURES;
5995 netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n");
5996 } else if ((cur_ctag && !req_ctag && cur_stag) ||
5997 (cur_stag && !req_stag && cur_ctag)) {
5998 features &= ~NETIF_VLAN_FILTERING_FEATURES;
5999 netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n");
6002 if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER)
6003 netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n");
6005 if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER)
6006 features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6010 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
6011 (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) {
6012 netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
6013 features &= ~(NETIF_F_HW_VLAN_STAG_RX |
6014 NETIF_F_HW_VLAN_STAG_TX);
6017 if (!(netdev->features & NETIF_F_RXFCS) &&
6018 (features & NETIF_F_RXFCS) &&
6019 (features & NETIF_VLAN_STRIPPING_FEATURES) &&
6020 !ice_vsi_has_non_zero_vlans(np->vsi)) {
6021 netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n");
6022 features &= ~NETIF_VLAN_STRIPPING_FEATURES;
6029 * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
6031 * @features: features used to determine VLAN offload settings
6033 * First, determine the vlan_ethertype based on the VLAN offload bits in
6034 * features. Then determine if stripping and insertion should be enabled or
6035 * disabled. Finally enable or disable VLAN stripping and insertion.
6038 ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features)
6040 bool enable_stripping = true, enable_insertion = true;
6041 struct ice_vsi_vlan_ops *vlan_ops;
6042 int strip_err = 0, insert_err = 0;
6043 u16 vlan_ethertype = 0;
6045 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
6047 if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
6048 vlan_ethertype = ETH_P_8021AD;
6049 else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
6050 vlan_ethertype = ETH_P_8021Q;
6052 if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
6053 enable_stripping = false;
6054 if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
6055 enable_insertion = false;
6057 if (enable_stripping)
6058 strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype);
6060 strip_err = vlan_ops->dis_stripping(vsi);
6062 if (enable_insertion)
6063 insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype);
6065 insert_err = vlan_ops->dis_insertion(vsi);
6067 if (strip_err || insert_err)
6074 * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI
6076 * @features: features used to determine VLAN filtering settings
6078 * Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the
6082 ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
6084 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
6087 /* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
6088 * if either bit is set
6091 (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER))
6092 err = vlan_ops->ena_rx_filtering(vsi);
6094 err = vlan_ops->dis_rx_filtering(vsi);
6100 * ice_set_vlan_features - set VLAN settings based on suggested feature set
6101 * @netdev: ptr to the netdev being adjusted
6102 * @features: the feature set that the stack is suggesting
6104 * Only update VLAN settings if the requested_vlan_features are different than
6105 * the current_vlan_features.
6108 ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
6110 netdev_features_t current_vlan_features, requested_vlan_features;
6111 struct ice_netdev_priv *np = netdev_priv(netdev);
6112 struct ice_vsi *vsi = np->vsi;
6115 current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES;
6116 requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES;
6117 if (current_vlan_features ^ requested_vlan_features) {
6118 if ((features & NETIF_F_RXFCS) &&
6119 (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6120 dev_err(ice_pf_to_dev(vsi->back),
6121 "To enable VLAN stripping, you must first enable FCS/CRC stripping\n");
6125 err = ice_set_vlan_offload_features(vsi, features);
6130 current_vlan_features = netdev->features &
6131 NETIF_VLAN_FILTERING_FEATURES;
6132 requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES;
6133 if (current_vlan_features ^ requested_vlan_features) {
6134 err = ice_set_vlan_filtering_features(vsi, features);
6143 * ice_set_loopback - turn on/off loopback mode on underlying PF
6145 * @ena: flag to indicate the on/off setting
6147 static int ice_set_loopback(struct ice_vsi *vsi, bool ena)
6149 bool if_running = netif_running(vsi->netdev);
6152 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
6153 ret = ice_down(vsi);
6155 netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n");
6159 ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL);
6161 netdev_err(vsi->netdev, "Failed to toggle loopback state\n");
6169 * ice_set_features - set the netdev feature flags
6170 * @netdev: ptr to the netdev being adjusted
6171 * @features: the feature set that the stack is suggesting
6174 ice_set_features(struct net_device *netdev, netdev_features_t features)
6176 netdev_features_t changed = netdev->features ^ features;
6177 struct ice_netdev_priv *np = netdev_priv(netdev);
6178 struct ice_vsi *vsi = np->vsi;
6179 struct ice_pf *pf = vsi->back;
6182 /* Don't set any netdev advanced features with device in Safe Mode */
6183 if (ice_is_safe_mode(pf)) {
6184 dev_err(ice_pf_to_dev(pf),
6185 "Device is in Safe Mode - not enabling advanced netdev features\n");
6189 /* Do not change setting during reset */
6190 if (ice_is_reset_in_progress(pf->state)) {
6191 dev_err(ice_pf_to_dev(pf),
6192 "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
6196 /* Multiple features can be changed in one call so keep features in
6197 * separate if/else statements to guarantee each feature is checked
6199 if (changed & NETIF_F_RXHASH)
6200 ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH));
6202 ret = ice_set_vlan_features(netdev, features);
6206 /* Turn on receive of FCS aka CRC, and after setting this
6207 * flag the packet data will have the 4 byte CRC appended
6209 if (changed & NETIF_F_RXFCS) {
6210 if ((features & NETIF_F_RXFCS) &&
6211 (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6212 dev_err(ice_pf_to_dev(vsi->back),
6213 "To disable FCS/CRC stripping, you must first disable VLAN stripping\n");
6217 ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS));
6218 ret = ice_down_up(vsi);
6223 if (changed & NETIF_F_NTUPLE) {
6224 bool ena = !!(features & NETIF_F_NTUPLE);
6226 ice_vsi_manage_fdir(vsi, ena);
6227 ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi);
6230 /* don't turn off hw_tc_offload when ADQ is already enabled */
6231 if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) {
6232 dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n");
6236 if (changed & NETIF_F_HW_TC) {
6237 bool ena = !!(features & NETIF_F_HW_TC);
6239 ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) :
6240 clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
6243 if (changed & NETIF_F_LOOPBACK)
6244 ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
6250 * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI
6251 * @vsi: VSI to setup VLAN properties for
6253 static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
6257 err = ice_set_vlan_offload_features(vsi, vsi->netdev->features);
6261 err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features);
6265 return ice_vsi_add_vlan_zero(vsi);
6269 * ice_vsi_cfg_lan - Setup the VSI lan related config
6270 * @vsi: the VSI being configured
6272 * Return 0 on success and negative value on error
6274 int ice_vsi_cfg_lan(struct ice_vsi *vsi)
6278 if (vsi->netdev && vsi->type == ICE_VSI_PF) {
6279 ice_set_rx_mode(vsi->netdev);
6281 err = ice_vsi_vlan_setup(vsi);
6285 ice_vsi_cfg_dcb_rings(vsi);
6287 err = ice_vsi_cfg_lan_txqs(vsi);
6288 if (!err && ice_is_xdp_ena_vsi(vsi))
6289 err = ice_vsi_cfg_xdp_txqs(vsi);
6291 err = ice_vsi_cfg_rxqs(vsi);
6296 /* THEORY OF MODERATION:
6297 * The ice driver hardware works differently than the hardware that DIMLIB was
6298 * originally made for. ice hardware doesn't have packet count limits that
6299 * can trigger an interrupt, but it *does* have interrupt rate limit support,
6300 * which is hard-coded to a limit of 250,000 ints/second.
6301 * If not using dynamic moderation, the INTRL value can be modified
6302 * by ethtool rx-usecs-high.
6305 /* the throttle rate for interrupts, basically worst case delay before
6306 * an initial interrupt fires, value is stored in microseconds.
6311 /* Make a different profile for Rx that doesn't allow quite so aggressive
6312 * moderation at the high end (it maxes out at 126us or about 8k interrupts a
6315 static const struct ice_dim rx_profile[] = {
6316 {2}, /* 500,000 ints/s, capped at 250K by INTRL */
6317 {8}, /* 125,000 ints/s */
6318 {16}, /* 62,500 ints/s */
6319 {62}, /* 16,129 ints/s */
6320 {126} /* 7,936 ints/s */
6323 /* The transmit profile, which has the same sorts of values
6324 * as the previous struct
6326 static const struct ice_dim tx_profile[] = {
6327 {2}, /* 500,000 ints/s, capped at 250K by INTRL */
6328 {8}, /* 125,000 ints/s */
6329 {40}, /* 16,125 ints/s */
6330 {128}, /* 7,812 ints/s */
6331 {256} /* 3,906 ints/s */
6334 static void ice_tx_dim_work(struct work_struct *work)
6336 struct ice_ring_container *rc;
6340 dim = container_of(work, struct dim, work);
6343 WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile));
6345 /* look up the values in our local table */
6346 itr = tx_profile[dim->profile_ix].itr;
6348 ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim);
6349 ice_write_itr(rc, itr);
6351 dim->state = DIM_START_MEASURE;
6354 static void ice_rx_dim_work(struct work_struct *work)
6356 struct ice_ring_container *rc;
6360 dim = container_of(work, struct dim, work);
6363 WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile));
6365 /* look up the values in our local table */
6366 itr = rx_profile[dim->profile_ix].itr;
6368 ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim);
6369 ice_write_itr(rc, itr);
6371 dim->state = DIM_START_MEASURE;
6374 #define ICE_DIM_DEFAULT_PROFILE_IX 1
6377 * ice_init_moderation - set up interrupt moderation
6378 * @q_vector: the vector containing rings to be configured
6380 * Set up interrupt moderation registers, with the intent to do the right thing
6381 * when called from reset or from probe, and whether or not dynamic moderation
6382 * is enabled or not. Take special care to write all the registers in both
6383 * dynamic moderation mode or not in order to make sure hardware is in a known
6386 static void ice_init_moderation(struct ice_q_vector *q_vector)
6388 struct ice_ring_container *rc;
6389 bool tx_dynamic, rx_dynamic;
6392 INIT_WORK(&rc->dim.work, ice_tx_dim_work);
6393 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6394 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6396 tx_dynamic = ITR_IS_DYNAMIC(rc);
6398 /* set the initial TX ITR to match the above */
6399 ice_write_itr(rc, tx_dynamic ?
6400 tx_profile[rc->dim.profile_ix].itr : rc->itr_setting);
6403 INIT_WORK(&rc->dim.work, ice_rx_dim_work);
6404 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6405 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6407 rx_dynamic = ITR_IS_DYNAMIC(rc);
6409 /* set the initial RX ITR to match the above */
6410 ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr :
6413 ice_set_q_vector_intrl(q_vector);
6417 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
6418 * @vsi: the VSI being configured
6420 static void ice_napi_enable_all(struct ice_vsi *vsi)
6427 ice_for_each_q_vector(vsi, q_idx) {
6428 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6430 ice_init_moderation(q_vector);
6432 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6433 napi_enable(&q_vector->napi);
6438 * ice_up_complete - Finish the last steps of bringing up a connection
6439 * @vsi: The VSI being configured
6441 * Return 0 on success and negative value on error
6443 static int ice_up_complete(struct ice_vsi *vsi)
6445 struct ice_pf *pf = vsi->back;
6448 ice_vsi_cfg_msix(vsi);
6450 /* Enable only Rx rings, Tx rings were enabled by the FW when the
6451 * Tx queue group list was configured and the context bits were
6452 * programmed using ice_vsi_cfg_txqs
6454 err = ice_vsi_start_all_rx_rings(vsi);
6458 clear_bit(ICE_VSI_DOWN, vsi->state);
6459 ice_napi_enable_all(vsi);
6460 ice_vsi_ena_irq(vsi);
6462 if (vsi->port_info &&
6463 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
6464 vsi->netdev && vsi->type == ICE_VSI_PF) {
6465 ice_print_link_msg(vsi, true);
6466 netif_tx_start_all_queues(vsi->netdev);
6467 netif_carrier_on(vsi->netdev);
6468 ice_ptp_link_change(pf, pf->hw.pf_id, true);
6471 /* Perform an initial read of the statistics registers now to
6472 * set the baseline so counters are ready when interface is up
6474 ice_update_eth_stats(vsi);
6476 if (vsi->type == ICE_VSI_PF)
6477 ice_service_task_schedule(pf);
6483 * ice_up - Bring the connection back up after being down
6484 * @vsi: VSI being configured
6486 int ice_up(struct ice_vsi *vsi)
6490 err = ice_vsi_cfg_lan(vsi);
6492 err = ice_up_complete(vsi);
6498 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
6499 * @syncp: pointer to u64_stats_sync
6500 * @stats: stats that pkts and bytes count will be taken from
6501 * @pkts: packets stats counter
6502 * @bytes: bytes stats counter
6504 * This function fetches stats from the ring considering the atomic operations
6505 * that needs to be performed to read u64 values in 32 bit machine.
6508 ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
6509 struct ice_q_stats stats, u64 *pkts, u64 *bytes)
6514 start = u64_stats_fetch_begin(syncp);
6516 *bytes = stats.bytes;
6517 } while (u64_stats_fetch_retry(syncp, start));
6521 * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
6522 * @vsi: the VSI to be updated
6523 * @vsi_stats: the stats struct to be updated
6524 * @rings: rings to work on
6525 * @count: number of rings
6528 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
6529 struct rtnl_link_stats64 *vsi_stats,
6530 struct ice_tx_ring **rings, u16 count)
6534 for (i = 0; i < count; i++) {
6535 struct ice_tx_ring *ring;
6536 u64 pkts = 0, bytes = 0;
6538 ring = READ_ONCE(rings[i]);
6539 if (!ring || !ring->ring_stats)
6541 ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp,
6542 ring->ring_stats->stats, &pkts,
6544 vsi_stats->tx_packets += pkts;
6545 vsi_stats->tx_bytes += bytes;
6546 vsi->tx_restart += ring->ring_stats->tx_stats.restart_q;
6547 vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy;
6548 vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize;
6553 * ice_update_vsi_ring_stats - Update VSI stats counters
6554 * @vsi: the VSI to be updated
6556 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
6558 struct rtnl_link_stats64 *net_stats, *stats_prev;
6559 struct rtnl_link_stats64 *vsi_stats;
6563 vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
6567 /* reset non-netdev (extended) stats */
6568 vsi->tx_restart = 0;
6570 vsi->tx_linearize = 0;
6571 vsi->rx_buf_failed = 0;
6572 vsi->rx_page_failed = 0;
6576 /* update Tx rings counters */
6577 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
6580 /* update Rx rings counters */
6581 ice_for_each_rxq(vsi, i) {
6582 struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
6583 struct ice_ring_stats *ring_stats;
6585 ring_stats = ring->ring_stats;
6586 ice_fetch_u64_stats_per_ring(&ring_stats->syncp,
6587 ring_stats->stats, &pkts,
6589 vsi_stats->rx_packets += pkts;
6590 vsi_stats->rx_bytes += bytes;
6591 vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed;
6592 vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed;
6595 /* update XDP Tx rings counters */
6596 if (ice_is_xdp_ena_vsi(vsi))
6597 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
6602 net_stats = &vsi->net_stats;
6603 stats_prev = &vsi->net_stats_prev;
6605 /* clear prev counters after reset */
6606 if (vsi_stats->tx_packets < stats_prev->tx_packets ||
6607 vsi_stats->rx_packets < stats_prev->rx_packets) {
6608 stats_prev->tx_packets = 0;
6609 stats_prev->tx_bytes = 0;
6610 stats_prev->rx_packets = 0;
6611 stats_prev->rx_bytes = 0;
6614 /* update netdev counters */
6615 net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
6616 net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
6617 net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
6618 net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
6620 stats_prev->tx_packets = vsi_stats->tx_packets;
6621 stats_prev->tx_bytes = vsi_stats->tx_bytes;
6622 stats_prev->rx_packets = vsi_stats->rx_packets;
6623 stats_prev->rx_bytes = vsi_stats->rx_bytes;
6629 * ice_update_vsi_stats - Update VSI stats counters
6630 * @vsi: the VSI to be updated
6632 void ice_update_vsi_stats(struct ice_vsi *vsi)
6634 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
6635 struct ice_eth_stats *cur_es = &vsi->eth_stats;
6636 struct ice_pf *pf = vsi->back;
6638 if (test_bit(ICE_VSI_DOWN, vsi->state) ||
6639 test_bit(ICE_CFG_BUSY, pf->state))
6642 /* get stats as recorded by Tx/Rx rings */
6643 ice_update_vsi_ring_stats(vsi);
6645 /* get VSI stats as recorded by the hardware */
6646 ice_update_eth_stats(vsi);
6648 cur_ns->tx_errors = cur_es->tx_errors;
6649 cur_ns->rx_dropped = cur_es->rx_discards;
6650 cur_ns->tx_dropped = cur_es->tx_discards;
6651 cur_ns->multicast = cur_es->rx_multicast;
6653 /* update some more netdev stats if this is main VSI */
6654 if (vsi->type == ICE_VSI_PF) {
6655 cur_ns->rx_crc_errors = pf->stats.crc_errors;
6656 cur_ns->rx_errors = pf->stats.crc_errors +
6657 pf->stats.illegal_bytes +
6658 pf->stats.rx_len_errors +
6659 pf->stats.rx_undersize +
6660 pf->hw_csum_rx_error +
6661 pf->stats.rx_jabber +
6662 pf->stats.rx_fragments +
6663 pf->stats.rx_oversize;
6664 cur_ns->rx_length_errors = pf->stats.rx_len_errors;
6665 /* record drops from the port level */
6666 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
6671 * ice_update_pf_stats - Update PF port stats counters
6672 * @pf: PF whose stats needs to be updated
6674 void ice_update_pf_stats(struct ice_pf *pf)
6676 struct ice_hw_port_stats *prev_ps, *cur_ps;
6677 struct ice_hw *hw = &pf->hw;
6681 port = hw->port_info->lport;
6682 prev_ps = &pf->stats_prev;
6683 cur_ps = &pf->stats;
6685 if (ice_is_reset_in_progress(pf->state))
6686 pf->stat_prev_loaded = false;
6688 ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
6689 &prev_ps->eth.rx_bytes,
6690 &cur_ps->eth.rx_bytes);
6692 ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
6693 &prev_ps->eth.rx_unicast,
6694 &cur_ps->eth.rx_unicast);
6696 ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
6697 &prev_ps->eth.rx_multicast,
6698 &cur_ps->eth.rx_multicast);
6700 ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
6701 &prev_ps->eth.rx_broadcast,
6702 &cur_ps->eth.rx_broadcast);
6704 ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
6705 &prev_ps->eth.rx_discards,
6706 &cur_ps->eth.rx_discards);
6708 ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
6709 &prev_ps->eth.tx_bytes,
6710 &cur_ps->eth.tx_bytes);
6712 ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
6713 &prev_ps->eth.tx_unicast,
6714 &cur_ps->eth.tx_unicast);
6716 ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
6717 &prev_ps->eth.tx_multicast,
6718 &cur_ps->eth.tx_multicast);
6720 ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
6721 &prev_ps->eth.tx_broadcast,
6722 &cur_ps->eth.tx_broadcast);
6724 ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
6725 &prev_ps->tx_dropped_link_down,
6726 &cur_ps->tx_dropped_link_down);
6728 ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
6729 &prev_ps->rx_size_64, &cur_ps->rx_size_64);
6731 ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
6732 &prev_ps->rx_size_127, &cur_ps->rx_size_127);
6734 ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
6735 &prev_ps->rx_size_255, &cur_ps->rx_size_255);
6737 ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
6738 &prev_ps->rx_size_511, &cur_ps->rx_size_511);
6740 ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
6741 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
6743 ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
6744 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
6746 ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
6747 &prev_ps->rx_size_big, &cur_ps->rx_size_big);
6749 ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
6750 &prev_ps->tx_size_64, &cur_ps->tx_size_64);
6752 ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
6753 &prev_ps->tx_size_127, &cur_ps->tx_size_127);
6755 ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
6756 &prev_ps->tx_size_255, &cur_ps->tx_size_255);
6758 ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
6759 &prev_ps->tx_size_511, &cur_ps->tx_size_511);
6761 ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
6762 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
6764 ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
6765 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
6767 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
6768 &prev_ps->tx_size_big, &cur_ps->tx_size_big);
6770 fd_ctr_base = hw->fd_ctr_base;
6772 ice_stat_update40(hw,
6773 GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
6774 pf->stat_prev_loaded, &prev_ps->fd_sb_match,
6775 &cur_ps->fd_sb_match);
6776 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
6777 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
6779 ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
6780 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
6782 ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
6783 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
6785 ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
6786 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
6788 ice_update_dcb_stats(pf);
6790 ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
6791 &prev_ps->crc_errors, &cur_ps->crc_errors);
6793 ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
6794 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
6796 ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
6797 &prev_ps->mac_local_faults,
6798 &cur_ps->mac_local_faults);
6800 ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
6801 &prev_ps->mac_remote_faults,
6802 &cur_ps->mac_remote_faults);
6804 ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
6805 &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
6807 ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
6808 &prev_ps->rx_undersize, &cur_ps->rx_undersize);
6810 ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
6811 &prev_ps->rx_fragments, &cur_ps->rx_fragments);
6813 ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
6814 &prev_ps->rx_oversize, &cur_ps->rx_oversize);
6816 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
6817 &prev_ps->rx_jabber, &cur_ps->rx_jabber);
6819 cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
6821 pf->stat_prev_loaded = true;
6825 * ice_get_stats64 - get statistics for network device structure
6826 * @netdev: network interface device structure
6827 * @stats: main device statistics structure
6830 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
6832 struct ice_netdev_priv *np = netdev_priv(netdev);
6833 struct rtnl_link_stats64 *vsi_stats;
6834 struct ice_vsi *vsi = np->vsi;
6836 vsi_stats = &vsi->net_stats;
6838 if (!vsi->num_txq || !vsi->num_rxq)
6841 /* netdev packet/byte stats come from ring counter. These are obtained
6842 * by summing up ring counters (done by ice_update_vsi_ring_stats).
6843 * But, only call the update routine and read the registers if VSI is
6846 if (!test_bit(ICE_VSI_DOWN, vsi->state))
6847 ice_update_vsi_ring_stats(vsi);
6848 stats->tx_packets = vsi_stats->tx_packets;
6849 stats->tx_bytes = vsi_stats->tx_bytes;
6850 stats->rx_packets = vsi_stats->rx_packets;
6851 stats->rx_bytes = vsi_stats->rx_bytes;
6853 /* The rest of the stats can be read from the hardware but instead we
6854 * just return values that the watchdog task has already obtained from
6857 stats->multicast = vsi_stats->multicast;
6858 stats->tx_errors = vsi_stats->tx_errors;
6859 stats->tx_dropped = vsi_stats->tx_dropped;
6860 stats->rx_errors = vsi_stats->rx_errors;
6861 stats->rx_dropped = vsi_stats->rx_dropped;
6862 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
6863 stats->rx_length_errors = vsi_stats->rx_length_errors;
6867 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
6868 * @vsi: VSI having NAPI disabled
6870 static void ice_napi_disable_all(struct ice_vsi *vsi)
6877 ice_for_each_q_vector(vsi, q_idx) {
6878 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6880 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6881 napi_disable(&q_vector->napi);
6883 cancel_work_sync(&q_vector->tx.dim.work);
6884 cancel_work_sync(&q_vector->rx.dim.work);
6889 * ice_down - Shutdown the connection
6890 * @vsi: The VSI being stopped
6892 * Caller of this function is expected to set the vsi->state ICE_DOWN bit
6894 int ice_down(struct ice_vsi *vsi)
6896 int i, tx_err, rx_err, vlan_err = 0;
6898 WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
6900 if (vsi->netdev && vsi->type == ICE_VSI_PF) {
6901 vlan_err = ice_vsi_del_vlan_zero(vsi);
6902 ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
6903 netif_carrier_off(vsi->netdev);
6904 netif_tx_disable(vsi->netdev);
6905 } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
6906 ice_eswitch_stop_all_tx_queues(vsi->back);
6909 ice_vsi_dis_irq(vsi);
6911 tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
6913 netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
6914 vsi->vsi_num, tx_err);
6915 if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
6916 tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
6918 netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
6919 vsi->vsi_num, tx_err);
6922 rx_err = ice_vsi_stop_all_rx_rings(vsi);
6924 netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
6925 vsi->vsi_num, rx_err);
6927 ice_napi_disable_all(vsi);
6929 ice_for_each_txq(vsi, i)
6930 ice_clean_tx_ring(vsi->tx_rings[i]);
6932 if (ice_is_xdp_ena_vsi(vsi))
6933 ice_for_each_xdp_txq(vsi, i)
6934 ice_clean_tx_ring(vsi->xdp_rings[i]);
6936 ice_for_each_rxq(vsi, i)
6937 ice_clean_rx_ring(vsi->rx_rings[i]);
6939 if (tx_err || rx_err || vlan_err) {
6940 netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
6941 vsi->vsi_num, vsi->vsw->sw_id);
6949 * ice_down_up - shutdown the VSI connection and bring it up
6950 * @vsi: the VSI to be reconnected
6952 int ice_down_up(struct ice_vsi *vsi)
6956 /* if DOWN already set, nothing to do */
6957 if (test_and_set_bit(ICE_VSI_DOWN, vsi->state))
6960 ret = ice_down(vsi);
6966 netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n");
6974 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
6975 * @vsi: VSI having resources allocated
6977 * Return 0 on success, negative on failure
6979 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
6983 if (!vsi->num_txq) {
6984 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
6989 ice_for_each_txq(vsi, i) {
6990 struct ice_tx_ring *ring = vsi->tx_rings[i];
6996 ring->netdev = vsi->netdev;
6997 err = ice_setup_tx_ring(ring);
7006 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
7007 * @vsi: VSI having resources allocated
7009 * Return 0 on success, negative on failure
7011 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
7015 if (!vsi->num_rxq) {
7016 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
7021 ice_for_each_rxq(vsi, i) {
7022 struct ice_rx_ring *ring = vsi->rx_rings[i];
7028 ring->netdev = vsi->netdev;
7029 err = ice_setup_rx_ring(ring);
7038 * ice_vsi_open_ctrl - open control VSI for use
7039 * @vsi: the VSI to open
7041 * Initialization of the Control VSI
7043 * Returns 0 on success, negative value on error
7045 int ice_vsi_open_ctrl(struct ice_vsi *vsi)
7047 char int_name[ICE_INT_NAME_STR_LEN];
7048 struct ice_pf *pf = vsi->back;
7052 dev = ice_pf_to_dev(pf);
7053 /* allocate descriptors */
7054 err = ice_vsi_setup_tx_rings(vsi);
7058 err = ice_vsi_setup_rx_rings(vsi);
7062 err = ice_vsi_cfg_lan(vsi);
7066 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
7067 dev_driver_string(dev), dev_name(dev));
7068 err = ice_vsi_req_irq_msix(vsi, int_name);
7072 ice_vsi_cfg_msix(vsi);
7074 err = ice_vsi_start_all_rx_rings(vsi);
7076 goto err_up_complete;
7078 clear_bit(ICE_VSI_DOWN, vsi->state);
7079 ice_vsi_ena_irq(vsi);
7086 ice_vsi_free_rx_rings(vsi);
7088 ice_vsi_free_tx_rings(vsi);
7094 * ice_vsi_open - Called when a network interface is made active
7095 * @vsi: the VSI to open
7097 * Initialization of the VSI
7099 * Returns 0 on success, negative value on error
7101 int ice_vsi_open(struct ice_vsi *vsi)
7103 char int_name[ICE_INT_NAME_STR_LEN];
7104 struct ice_pf *pf = vsi->back;
7107 /* allocate descriptors */
7108 err = ice_vsi_setup_tx_rings(vsi);
7112 err = ice_vsi_setup_rx_rings(vsi);
7116 err = ice_vsi_cfg_lan(vsi);
7120 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
7121 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
7122 err = ice_vsi_req_irq_msix(vsi, int_name);
7126 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
7128 if (vsi->type == ICE_VSI_PF) {
7129 /* Notify the stack of the actual queue counts. */
7130 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
7134 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
7139 err = ice_up_complete(vsi);
7141 goto err_up_complete;
7148 ice_vsi_free_irq(vsi);
7150 ice_vsi_free_rx_rings(vsi);
7152 ice_vsi_free_tx_rings(vsi);
7158 * ice_vsi_release_all - Delete all VSIs
7159 * @pf: PF from which all VSIs are being removed
7161 static void ice_vsi_release_all(struct ice_pf *pf)
7168 ice_for_each_vsi(pf, i) {
7172 if (pf->vsi[i]->type == ICE_VSI_CHNL)
7175 err = ice_vsi_release(pf->vsi[i]);
7177 dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
7178 i, err, pf->vsi[i]->vsi_num);
7183 * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
7184 * @pf: pointer to the PF instance
7185 * @type: VSI type to rebuild
7187 * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
7189 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
7191 struct device *dev = ice_pf_to_dev(pf);
7194 ice_for_each_vsi(pf, i) {
7195 struct ice_vsi *vsi = pf->vsi[i];
7197 if (!vsi || vsi->type != type)
7200 /* rebuild the VSI */
7201 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
7203 dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
7204 err, vsi->idx, ice_vsi_type_str(type));
7208 /* replay filters for the VSI */
7209 err = ice_replay_vsi(&pf->hw, vsi->idx);
7211 dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n",
7212 err, vsi->idx, ice_vsi_type_str(type));
7216 /* Re-map HW VSI number, using VSI handle that has been
7217 * previously validated in ice_replay_vsi() call above
7219 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
7221 /* enable the VSI */
7222 err = ice_ena_vsi(vsi, false);
7224 dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
7225 err, vsi->idx, ice_vsi_type_str(type));
7229 dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
7230 ice_vsi_type_str(type));
7237 * ice_update_pf_netdev_link - Update PF netdev link status
7238 * @pf: pointer to the PF instance
7240 static void ice_update_pf_netdev_link(struct ice_pf *pf)
7245 ice_for_each_vsi(pf, i) {
7246 struct ice_vsi *vsi = pf->vsi[i];
7248 if (!vsi || vsi->type != ICE_VSI_PF)
7251 ice_get_link_status(pf->vsi[i]->port_info, &link_up);
7253 netif_carrier_on(pf->vsi[i]->netdev);
7254 netif_tx_wake_all_queues(pf->vsi[i]->netdev);
7256 netif_carrier_off(pf->vsi[i]->netdev);
7257 netif_tx_stop_all_queues(pf->vsi[i]->netdev);
7263 * ice_rebuild - rebuild after reset
7264 * @pf: PF to rebuild
7265 * @reset_type: type of reset
7267 * Do not rebuild VF VSI in this flow because that is already handled via
7268 * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
7269 * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
7270 * to reset/rebuild all the VF VSI twice.
7272 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
7274 struct device *dev = ice_pf_to_dev(pf);
7275 struct ice_hw *hw = &pf->hw;
7279 if (test_bit(ICE_DOWN, pf->state))
7280 goto clear_recovery;
7282 dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
7284 #define ICE_EMP_RESET_SLEEP_MS 5000
7285 if (reset_type == ICE_RESET_EMPR) {
7286 /* If an EMP reset has occurred, any previously pending flash
7287 * update will have completed. We no longer know whether or
7288 * not the NVM update EMP reset is restricted.
7290 pf->fw_emp_reset_disabled = false;
7292 msleep(ICE_EMP_RESET_SLEEP_MS);
7295 err = ice_init_all_ctrlq(hw);
7297 dev_err(dev, "control queues init failed %d\n", err);
7298 goto err_init_ctrlq;
7301 /* if DDP was previously loaded successfully */
7302 if (!ice_is_safe_mode(pf)) {
7303 /* reload the SW DB of filter tables */
7304 if (reset_type == ICE_RESET_PFR)
7305 ice_fill_blk_tbls(hw);
7307 /* Reload DDP Package after CORER/GLOBR reset */
7308 ice_load_pkg(NULL, pf);
7311 err = ice_clear_pf_cfg(hw);
7313 dev_err(dev, "clear PF configuration failed %d\n", err);
7314 goto err_init_ctrlq;
7317 ice_clear_pxe_mode(hw);
7319 err = ice_init_nvm(hw);
7321 dev_err(dev, "ice_init_nvm failed %d\n", err);
7322 goto err_init_ctrlq;
7325 err = ice_get_caps(hw);
7327 dev_err(dev, "ice_get_caps failed %d\n", err);
7328 goto err_init_ctrlq;
7331 err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
7333 dev_err(dev, "set_mac_cfg failed %d\n", err);
7334 goto err_init_ctrlq;
7337 dvm = ice_is_dvm_ena(hw);
7339 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
7341 goto err_init_ctrlq;
7343 err = ice_sched_init_port(hw->port_info);
7345 goto err_sched_init_port;
7347 /* start misc vector */
7348 err = ice_req_irq_msix_misc(pf);
7350 dev_err(dev, "misc vector setup failed: %d\n", err);
7351 goto err_sched_init_port;
7354 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7355 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
7356 if (!rd32(hw, PFQF_FD_SIZE)) {
7357 u16 unused, guar, b_effort;
7359 guar = hw->func_caps.fd_fltr_guar;
7360 b_effort = hw->func_caps.fd_fltr_best_effort;
7362 /* force guaranteed filter pool for PF */
7363 ice_alloc_fd_guar_item(hw, &unused, guar);
7364 /* force shared filter pool for PF */
7365 ice_alloc_fd_shrd_item(hw, &unused, b_effort);
7369 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
7370 ice_dcb_rebuild(pf);
7372 /* If the PF previously had enabled PTP, PTP init needs to happen before
7373 * the VSI rebuild. If not, this causes the PTP link status events to
7376 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
7379 if (ice_is_feature_supported(pf, ICE_F_GNSS))
7382 /* rebuild PF VSI */
7383 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
7385 dev_err(dev, "PF VSI rebuild failed: %d\n", err);
7386 goto err_vsi_rebuild;
7389 /* configure PTP timestamping after VSI rebuild */
7390 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
7391 ice_ptp_cfg_timestamp(pf, false);
7393 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
7395 dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err);
7396 goto err_vsi_rebuild;
7399 if (reset_type == ICE_RESET_PFR) {
7400 err = ice_rebuild_channels(pf);
7402 dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n",
7404 goto err_vsi_rebuild;
7408 /* If Flow Director is active */
7409 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7410 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
7412 dev_err(dev, "control VSI rebuild failed: %d\n", err);
7413 goto err_vsi_rebuild;
7416 /* replay HW Flow Director recipes */
7418 ice_fdir_replay_flows(hw);
7420 /* replay Flow Director filters */
7421 ice_fdir_replay_fltrs(pf);
7423 ice_rebuild_arfs(pf);
7426 ice_update_pf_netdev_link(pf);
7428 /* tell the firmware we are up */
7429 err = ice_send_version(pf);
7431 dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
7433 goto err_vsi_rebuild;
7436 ice_replay_post(hw);
7438 /* if we get here, reset flow is successful */
7439 clear_bit(ICE_RESET_FAILED, pf->state);
7441 ice_plug_aux_dev(pf);
7442 if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
7443 ice_lag_rebuild(pf);
7447 err_sched_init_port:
7448 ice_sched_cleanup_all(hw);
7450 ice_shutdown_all_ctrlq(hw);
7451 set_bit(ICE_RESET_FAILED, pf->state);
7453 /* set this bit in PF state to control service task scheduling */
7454 set_bit(ICE_NEEDS_RESTART, pf->state);
7455 dev_err(dev, "Rebuild failed, unload and reload driver\n");
7459 * ice_change_mtu - NDO callback to change the MTU
7460 * @netdev: network interface device structure
7461 * @new_mtu: new value for maximum frame size
7463 * Returns 0 on success, negative on failure
7465 static int ice_change_mtu(struct net_device *netdev, int new_mtu)
7467 struct ice_netdev_priv *np = netdev_priv(netdev);
7468 struct ice_vsi *vsi = np->vsi;
7469 struct ice_pf *pf = vsi->back;
7470 struct bpf_prog *prog;
7474 if (new_mtu == (int)netdev->mtu) {
7475 netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
7479 prog = vsi->xdp_prog;
7480 if (prog && !prog->aux->xdp_has_frags) {
7481 int frame_size = ice_max_xdp_frame_size(vsi);
7483 if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
7484 netdev_err(netdev, "max MTU for XDP usage is %d\n",
7485 frame_size - ICE_ETH_PKT_HDR_PAD);
7488 } else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) {
7489 if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) {
7490 netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n",
7491 ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD);
7496 /* if a reset is in progress, wait for some time for it to complete */
7498 if (ice_is_reset_in_progress(pf->state)) {
7500 usleep_range(1000, 2000);
7505 } while (count < 100);
7508 netdev_err(netdev, "can't change MTU. Device is busy\n");
7512 netdev->mtu = (unsigned int)new_mtu;
7513 err = ice_down_up(vsi);
7517 netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
7518 set_bit(ICE_FLAG_MTU_CHANGED, pf->flags);
7524 * ice_eth_ioctl - Access the hwtstamp interface
7525 * @netdev: network interface device structure
7526 * @ifr: interface request data
7527 * @cmd: ioctl command
7529 static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
7531 struct ice_netdev_priv *np = netdev_priv(netdev);
7532 struct ice_pf *pf = np->vsi->back;
7536 return ice_ptp_get_ts_config(pf, ifr);
7538 return ice_ptp_set_ts_config(pf, ifr);
7545 * ice_aq_str - convert AQ err code to a string
7546 * @aq_err: the AQ error code to convert
7548 const char *ice_aq_str(enum ice_aq_err aq_err)
7553 case ICE_AQ_RC_EPERM:
7554 return "ICE_AQ_RC_EPERM";
7555 case ICE_AQ_RC_ENOENT:
7556 return "ICE_AQ_RC_ENOENT";
7557 case ICE_AQ_RC_ENOMEM:
7558 return "ICE_AQ_RC_ENOMEM";
7559 case ICE_AQ_RC_EBUSY:
7560 return "ICE_AQ_RC_EBUSY";
7561 case ICE_AQ_RC_EEXIST:
7562 return "ICE_AQ_RC_EEXIST";
7563 case ICE_AQ_RC_EINVAL:
7564 return "ICE_AQ_RC_EINVAL";
7565 case ICE_AQ_RC_ENOSPC:
7566 return "ICE_AQ_RC_ENOSPC";
7567 case ICE_AQ_RC_ENOSYS:
7568 return "ICE_AQ_RC_ENOSYS";
7569 case ICE_AQ_RC_EMODE:
7570 return "ICE_AQ_RC_EMODE";
7571 case ICE_AQ_RC_ENOSEC:
7572 return "ICE_AQ_RC_ENOSEC";
7573 case ICE_AQ_RC_EBADSIG:
7574 return "ICE_AQ_RC_EBADSIG";
7575 case ICE_AQ_RC_ESVN:
7576 return "ICE_AQ_RC_ESVN";
7577 case ICE_AQ_RC_EBADMAN:
7578 return "ICE_AQ_RC_EBADMAN";
7579 case ICE_AQ_RC_EBADBUF:
7580 return "ICE_AQ_RC_EBADBUF";
7583 return "ICE_AQ_RC_UNKNOWN";
7587 * ice_set_rss_lut - Set RSS LUT
7588 * @vsi: Pointer to VSI structure
7589 * @lut: Lookup table
7590 * @lut_size: Lookup table size
7592 * Returns 0 on success, negative on failure
7594 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7596 struct ice_aq_get_set_rss_lut_params params = {};
7597 struct ice_hw *hw = &vsi->back->hw;
7603 params.vsi_handle = vsi->idx;
7604 params.lut_size = lut_size;
7605 params.lut_type = vsi->rss_lut_type;
7608 status = ice_aq_set_rss_lut(hw, ¶ms);
7610 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
7611 status, ice_aq_str(hw->adminq.sq_last_status));
7617 * ice_set_rss_key - Set RSS key
7618 * @vsi: Pointer to the VSI structure
7619 * @seed: RSS hash seed
7621 * Returns 0 on success, negative on failure
7623 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
7625 struct ice_hw *hw = &vsi->back->hw;
7631 status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7633 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
7634 status, ice_aq_str(hw->adminq.sq_last_status));
7640 * ice_get_rss_lut - Get RSS LUT
7641 * @vsi: Pointer to VSI structure
7642 * @lut: Buffer to store the lookup table entries
7643 * @lut_size: Size of buffer to store the lookup table entries
7645 * Returns 0 on success, negative on failure
7647 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7649 struct ice_aq_get_set_rss_lut_params params = {};
7650 struct ice_hw *hw = &vsi->back->hw;
7656 params.vsi_handle = vsi->idx;
7657 params.lut_size = lut_size;
7658 params.lut_type = vsi->rss_lut_type;
7661 status = ice_aq_get_rss_lut(hw, ¶ms);
7663 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
7664 status, ice_aq_str(hw->adminq.sq_last_status));
7670 * ice_get_rss_key - Get RSS key
7671 * @vsi: Pointer to VSI structure
7672 * @seed: Buffer to store the key in
7674 * Returns 0 on success, negative on failure
7676 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
7678 struct ice_hw *hw = &vsi->back->hw;
7684 status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7686 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
7687 status, ice_aq_str(hw->adminq.sq_last_status));
7693 * ice_bridge_getlink - Get the hardware bridge mode
7696 * @seq: RTNL message seq
7697 * @dev: the netdev being configured
7698 * @filter_mask: filter mask passed in
7699 * @nlflags: netlink flags passed in
7701 * Return the bridge mode (VEB/VEPA)
7704 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
7705 struct net_device *dev, u32 filter_mask, int nlflags)
7707 struct ice_netdev_priv *np = netdev_priv(dev);
7708 struct ice_vsi *vsi = np->vsi;
7709 struct ice_pf *pf = vsi->back;
7712 bmode = pf->first_sw->bridge_mode;
7714 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
7719 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
7720 * @vsi: Pointer to VSI structure
7721 * @bmode: Hardware bridge mode (VEB/VEPA)
7723 * Returns 0 on success, negative on failure
7725 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
7727 struct ice_aqc_vsi_props *vsi_props;
7728 struct ice_hw *hw = &vsi->back->hw;
7729 struct ice_vsi_ctx *ctxt;
7732 vsi_props = &vsi->info;
7734 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
7738 ctxt->info = vsi->info;
7740 if (bmode == BRIDGE_MODE_VEB)
7741 /* change from VEPA to VEB mode */
7742 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7744 /* change from VEB to VEPA mode */
7745 ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7746 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
7748 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
7750 dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
7751 bmode, ret, ice_aq_str(hw->adminq.sq_last_status));
7754 /* Update sw flags for book keeping */
7755 vsi_props->sw_flags = ctxt->info.sw_flags;
7763 * ice_bridge_setlink - Set the hardware bridge mode
7764 * @dev: the netdev being configured
7765 * @nlh: RTNL message
7766 * @flags: bridge setlink flags
7767 * @extack: netlink extended ack
7769 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
7770 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
7771 * not already set for all VSIs connected to this switch. And also update the
7772 * unicast switch filter rules for the corresponding switch of the netdev.
7775 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
7776 u16 __always_unused flags,
7777 struct netlink_ext_ack __always_unused *extack)
7779 struct ice_netdev_priv *np = netdev_priv(dev);
7780 struct ice_pf *pf = np->vsi->back;
7781 struct nlattr *attr, *br_spec;
7782 struct ice_hw *hw = &pf->hw;
7783 struct ice_sw *pf_sw;
7784 int rem, v, err = 0;
7786 pf_sw = pf->first_sw;
7787 /* find the attribute in the netlink message */
7788 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
7790 nla_for_each_nested(attr, br_spec, rem) {
7793 if (nla_type(attr) != IFLA_BRIDGE_MODE)
7795 mode = nla_get_u16(attr);
7796 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
7798 /* Continue if bridge mode is not being flipped */
7799 if (mode == pf_sw->bridge_mode)
7801 /* Iterates through the PF VSI list and update the loopback
7804 ice_for_each_vsi(pf, v) {
7807 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
7812 hw->evb_veb = (mode == BRIDGE_MODE_VEB);
7813 /* Update the unicast switch filter rules for the corresponding
7814 * switch of the netdev
7816 err = ice_update_sw_rule_bridge_mode(hw);
7818 netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n",
7820 ice_aq_str(hw->adminq.sq_last_status));
7821 /* revert hw->evb_veb */
7822 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
7826 pf_sw->bridge_mode = mode;
7833 * ice_tx_timeout - Respond to a Tx Hang
7834 * @netdev: network interface device structure
7835 * @txqueue: Tx queue
7837 static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
7839 struct ice_netdev_priv *np = netdev_priv(netdev);
7840 struct ice_tx_ring *tx_ring = NULL;
7841 struct ice_vsi *vsi = np->vsi;
7842 struct ice_pf *pf = vsi->back;
7845 pf->tx_timeout_count++;
7847 /* Check if PFC is enabled for the TC to which the queue belongs
7848 * to. If yes then Tx timeout is not caused by a hung queue, no
7849 * need to reset and rebuild
7851 if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
7852 dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
7857 /* now that we have an index, find the tx_ring struct */
7858 ice_for_each_txq(vsi, i)
7859 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
7860 if (txqueue == vsi->tx_rings[i]->q_index) {
7861 tx_ring = vsi->tx_rings[i];
7865 /* Reset recovery level if enough time has elapsed after last timeout.
7866 * Also ensure no new reset action happens before next timeout period.
7868 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
7869 pf->tx_timeout_recovery_level = 1;
7870 else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
7871 netdev->watchdog_timeo)))
7875 struct ice_hw *hw = &pf->hw;
7878 head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
7879 QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
7880 /* Read interrupt register */
7881 val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
7883 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
7884 vsi->vsi_num, txqueue, tx_ring->next_to_clean,
7885 head, tx_ring->next_to_use, val);
7888 pf->tx_timeout_last_recovery = jiffies;
7889 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
7890 pf->tx_timeout_recovery_level, txqueue);
7892 switch (pf->tx_timeout_recovery_level) {
7894 set_bit(ICE_PFR_REQ, pf->state);
7897 set_bit(ICE_CORER_REQ, pf->state);
7900 set_bit(ICE_GLOBR_REQ, pf->state);
7903 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
7904 set_bit(ICE_DOWN, pf->state);
7905 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
7906 set_bit(ICE_SERVICE_DIS, pf->state);
7910 ice_service_task_schedule(pf);
7911 pf->tx_timeout_recovery_level++;
7915 * ice_setup_tc_cls_flower - flower classifier offloads
7916 * @np: net device to configure
7917 * @filter_dev: device on which filter is added
7918 * @cls_flower: offload data
7921 ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
7922 struct net_device *filter_dev,
7923 struct flow_cls_offload *cls_flower)
7925 struct ice_vsi *vsi = np->vsi;
7927 if (cls_flower->common.chain_index)
7930 switch (cls_flower->command) {
7931 case FLOW_CLS_REPLACE:
7932 return ice_add_cls_flower(filter_dev, vsi, cls_flower);
7933 case FLOW_CLS_DESTROY:
7934 return ice_del_cls_flower(vsi, cls_flower);
7941 * ice_setup_tc_block_cb - callback handler registered for TC block
7942 * @type: TC SETUP type
7943 * @type_data: TC flower offload data that contains user input
7944 * @cb_priv: netdev private data
7947 ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
7949 struct ice_netdev_priv *np = cb_priv;
7952 case TC_SETUP_CLSFLOWER:
7953 return ice_setup_tc_cls_flower(np, np->vsi->netdev,
7961 * ice_validate_mqprio_qopt - Validate TCF input parameters
7962 * @vsi: Pointer to VSI
7963 * @mqprio_qopt: input parameters for mqprio queue configuration
7965 * This function validates MQPRIO params, such as qcount (power of 2 wherever
7966 * needed), and make sure user doesn't specify qcount and BW rate limit
7967 * for TCs, which are more than "num_tc"
7970 ice_validate_mqprio_qopt(struct ice_vsi *vsi,
7971 struct tc_mqprio_qopt_offload *mqprio_qopt)
7973 int non_power_of_2_qcount = 0;
7974 struct ice_pf *pf = vsi->back;
7975 int max_rss_q_cnt = 0;
7976 u64 sum_min_rate = 0;
7981 if (vsi->type != ICE_VSI_PF)
7984 if (mqprio_qopt->qopt.offset[0] != 0 ||
7985 mqprio_qopt->qopt.num_tc < 1 ||
7986 mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC)
7989 dev = ice_pf_to_dev(pf);
7990 vsi->ch_rss_size = 0;
7991 num_tc = mqprio_qopt->qopt.num_tc;
7992 speed = ice_get_link_speed_kbps(vsi);
7994 for (i = 0; num_tc; i++) {
7995 int qcount = mqprio_qopt->qopt.count[i];
7996 u64 max_rate, min_rate, rem;
8001 if (is_power_of_2(qcount)) {
8002 if (non_power_of_2_qcount &&
8003 qcount > non_power_of_2_qcount) {
8004 dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n",
8005 qcount, non_power_of_2_qcount);
8008 if (qcount > max_rss_q_cnt)
8009 max_rss_q_cnt = qcount;
8011 if (non_power_of_2_qcount &&
8012 qcount != non_power_of_2_qcount) {
8013 dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n",
8014 qcount, non_power_of_2_qcount);
8017 if (qcount < max_rss_q_cnt) {
8018 dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n",
8019 qcount, max_rss_q_cnt);
8022 max_rss_q_cnt = qcount;
8023 non_power_of_2_qcount = qcount;
8026 /* TC command takes input in K/N/Gbps or K/M/Gbit etc but
8027 * converts the bandwidth rate limit into Bytes/s when
8028 * passing it down to the driver. So convert input bandwidth
8029 * from Bytes/s to Kbps
8031 max_rate = mqprio_qopt->max_rate[i];
8032 max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR);
8034 /* min_rate is minimum guaranteed rate and it can't be zero */
8035 min_rate = mqprio_qopt->min_rate[i];
8036 min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR);
8037 sum_min_rate += min_rate;
8039 if (min_rate && min_rate < ICE_MIN_BW_LIMIT) {
8040 dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i,
8041 min_rate, ICE_MIN_BW_LIMIT);
8045 if (max_rate && max_rate > speed) {
8046 dev_err(dev, "TC%d: max_rate(%llu Kbps) > link speed of %u Kbps\n",
8047 i, max_rate, speed);
8051 iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem);
8053 dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps",
8054 i, ICE_MIN_BW_LIMIT);
8058 iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem);
8060 dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps",
8061 i, ICE_MIN_BW_LIMIT);
8065 /* min_rate can't be more than max_rate, except when max_rate
8066 * is zero (implies max_rate sought is max line rate). In such
8067 * a case min_rate can be more than max.
8069 if (max_rate && min_rate > max_rate) {
8070 dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n",
8071 min_rate, max_rate);
8075 if (i >= mqprio_qopt->qopt.num_tc - 1)
8077 if (mqprio_qopt->qopt.offset[i + 1] !=
8078 (mqprio_qopt->qopt.offset[i] + qcount))
8082 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
8085 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
8088 if (sum_min_rate && sum_min_rate > (u64)speed) {
8089 dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n",
8090 sum_min_rate, speed);
8094 /* make sure vsi->ch_rss_size is set correctly based on TC's qcount */
8095 vsi->ch_rss_size = max_rss_q_cnt;
8101 * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF
8102 * @pf: ptr to PF device
8105 static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
8107 struct device *dev = ice_pf_to_dev(pf);
8112 if (!(vsi->num_gfltr || vsi->num_bfltr))
8116 for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
8117 struct ice_fd_hw_prof *prof;
8121 if (!(hw->fdir_prof && hw->fdir_prof[flow] &&
8122 hw->fdir_prof[flow]->cnt))
8125 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
8126 enum ice_flow_priority prio;
8129 /* add this VSI to FDir profile for this flow */
8130 prio = ICE_FLOW_PRIO_NORMAL;
8131 prof = hw->fdir_prof[flow];
8132 prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
8133 status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
8134 prof->vsi_h[0], vsi->idx,
8135 prio, prof->fdir_seg[tun],
8138 dev_err(dev, "channel VSI idx %d, not able to add to group %d\n",
8143 prof->entry_h[prof->cnt][tun] = entry_h;
8146 /* store VSI for filter replay and delete */
8147 prof->vsi_h[prof->cnt] = vsi->idx;
8151 dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx,
8156 dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx);
8162 * ice_add_channel - add a channel by adding VSI
8163 * @pf: ptr to PF device
8164 * @sw_id: underlying HW switching element ID
8165 * @ch: ptr to channel structure
8167 * Add a channel (VSI) using add_vsi and queue_map
8169 static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch)
8171 struct device *dev = ice_pf_to_dev(pf);
8172 struct ice_vsi *vsi;
8174 if (ch->type != ICE_VSI_CHNL) {
8175 dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type);
8179 vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch);
8180 if (!vsi || vsi->type != ICE_VSI_CHNL) {
8181 dev_err(dev, "create chnl VSI failure\n");
8185 ice_add_vsi_to_fdir(pf, vsi);
8188 ch->vsi_num = vsi->vsi_num;
8189 ch->info.mapping_flags = vsi->info.mapping_flags;
8191 /* set the back pointer of channel for newly created VSI */
8194 memcpy(&ch->info.q_mapping, &vsi->info.q_mapping,
8195 sizeof(vsi->info.q_mapping));
8196 memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping,
8197 sizeof(vsi->info.tc_mapping));
8204 * @vsi: the VSI being setup
8205 * @ch: ptr to channel structure
8207 * Configure channel specific resources such as rings, vector.
8209 static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch)
8213 for (i = 0; i < ch->num_txq; i++) {
8214 struct ice_q_vector *tx_q_vector, *rx_q_vector;
8215 struct ice_ring_container *rc;
8216 struct ice_tx_ring *tx_ring;
8217 struct ice_rx_ring *rx_ring;
8219 tx_ring = vsi->tx_rings[ch->base_q + i];
8220 rx_ring = vsi->rx_rings[ch->base_q + i];
8221 if (!tx_ring || !rx_ring)
8224 /* setup ring being channel enabled */
8228 /* following code block sets up vector specific attributes */
8229 tx_q_vector = tx_ring->q_vector;
8230 rx_q_vector = rx_ring->q_vector;
8231 if (!tx_q_vector && !rx_q_vector)
8235 tx_q_vector->ch = ch;
8236 /* setup Tx and Rx ITR setting if DIM is off */
8237 rc = &tx_q_vector->tx;
8238 if (!ITR_IS_DYNAMIC(rc))
8239 ice_write_itr(rc, rc->itr_setting);
8242 rx_q_vector->ch = ch;
8243 /* setup Tx and Rx ITR setting if DIM is off */
8244 rc = &rx_q_vector->rx;
8245 if (!ITR_IS_DYNAMIC(rc))
8246 ice_write_itr(rc, rc->itr_setting);
8250 /* it is safe to assume that, if channel has non-zero num_t[r]xq, then
8251 * GLINT_ITR register would have written to perform in-context
8252 * update, hence perform flush
8254 if (ch->num_txq || ch->num_rxq)
8255 ice_flush(&vsi->back->hw);
8259 * ice_cfg_chnl_all_res - configure channel resources
8260 * @vsi: pte to main_vsi
8261 * @ch: ptr to channel structure
8263 * This function configures channel specific resources such as flow-director
8264 * counter index, and other resources such as queues, vectors, ITR settings
8267 ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch)
8269 /* configure channel (aka ADQ) resources such as queues, vectors,
8270 * ITR settings for channel specific vectors and anything else
8272 ice_chnl_cfg_res(vsi, ch);
8276 * ice_setup_hw_channel - setup new channel
8277 * @pf: ptr to PF device
8278 * @vsi: the VSI being setup
8279 * @ch: ptr to channel structure
8280 * @sw_id: underlying HW switching element ID
8281 * @type: type of channel to be created (VMDq2/VF)
8283 * Setup new channel (VSI) based on specified type (VMDq2/VF)
8284 * and configures Tx rings accordingly
8287 ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8288 struct ice_channel *ch, u16 sw_id, u8 type)
8290 struct device *dev = ice_pf_to_dev(pf);
8293 ch->base_q = vsi->next_base_q;
8296 ret = ice_add_channel(pf, sw_id, ch);
8298 dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id);
8302 /* configure/setup ADQ specific resources */
8303 ice_cfg_chnl_all_res(vsi, ch);
8305 /* make sure to update the next_base_q so that subsequent channel's
8306 * (aka ADQ) VSI queue map is correct
8308 vsi->next_base_q = vsi->next_base_q + ch->num_rxq;
8309 dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num,
8316 * ice_setup_channel - setup new channel using uplink element
8317 * @pf: ptr to PF device
8318 * @vsi: the VSI being setup
8319 * @ch: ptr to channel structure
8321 * Setup new channel (VSI) based on specified type (VMDq2/VF)
8322 * and uplink switching element
8325 ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8326 struct ice_channel *ch)
8328 struct device *dev = ice_pf_to_dev(pf);
8332 if (vsi->type != ICE_VSI_PF) {
8333 dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type);
8337 sw_id = pf->first_sw->sw_id;
8339 /* create channel (VSI) */
8340 ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL);
8342 dev_err(dev, "failed to setup hw_channel\n");
8345 dev_dbg(dev, "successfully created channel()\n");
8347 return ch->ch_vsi ? true : false;
8351 * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
8352 * @vsi: VSI to be configured
8353 * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit
8354 * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit
8357 ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate)
8361 err = ice_set_min_bw_limit(vsi, min_tx_rate);
8365 return ice_set_max_bw_limit(vsi, max_tx_rate);
8369 * ice_create_q_channel - function to create channel
8370 * @vsi: VSI to be configured
8371 * @ch: ptr to channel (it contains channel specific params)
8373 * This function creates channel (VSI) using num_queues specified by user,
8374 * reconfigs RSS if needed.
8376 static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch)
8378 struct ice_pf *pf = vsi->back;
8384 dev = ice_pf_to_dev(pf);
8385 if (!ch->num_txq || !ch->num_rxq) {
8386 dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq);
8390 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) {
8391 dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n",
8392 vsi->cnt_q_avail, ch->num_txq);
8396 if (!ice_setup_channel(pf, vsi, ch)) {
8397 dev_info(dev, "Failed to setup channel\n");
8400 /* configure BW rate limit */
8401 if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) {
8404 ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate,
8407 dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n",
8408 ch->max_tx_rate, ch->ch_vsi->vsi_num);
8410 dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n",
8411 ch->max_tx_rate, ch->ch_vsi->vsi_num);
8414 vsi->cnt_q_avail -= ch->num_txq;
8420 * ice_rem_all_chnl_fltrs - removes all channel filters
8421 * @pf: ptr to PF, TC-flower based filter are tracked at PF level
8423 * Remove all advanced switch filters only if they are channel specific
8424 * tc-flower based filter
8426 static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
8428 struct ice_tc_flower_fltr *fltr;
8429 struct hlist_node *node;
8431 /* to remove all channel filters, iterate an ordered list of filters */
8432 hlist_for_each_entry_safe(fltr, node,
8433 &pf->tc_flower_fltr_list,
8435 struct ice_rule_query_data rule;
8438 /* for now process only channel specific filters */
8439 if (!ice_is_chnl_fltr(fltr))
8442 rule.rid = fltr->rid;
8443 rule.rule_id = fltr->rule_id;
8444 rule.vsi_handle = fltr->dest_vsi_handle;
8445 status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
8447 if (status == -ENOENT)
8448 dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n",
8451 dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n",
8453 } else if (fltr->dest_vsi) {
8454 /* update advanced switch filter count */
8455 if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
8456 u32 flags = fltr->flags;
8458 fltr->dest_vsi->num_chnl_fltr--;
8459 if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
8460 ICE_TC_FLWR_FIELD_ENC_DST_MAC))
8461 pf->num_dmac_chnl_fltrs--;
8465 hlist_del(&fltr->tc_flower_node);
8471 * ice_remove_q_channels - Remove queue channels for the TCs
8472 * @vsi: VSI to be configured
8473 * @rem_fltr: delete advanced switch filter or not
8475 * Remove queue channels for the TCs
8477 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
8479 struct ice_channel *ch, *ch_tmp;
8480 struct ice_pf *pf = vsi->back;
8483 /* remove all tc-flower based filter if they are channel filters only */
8485 ice_rem_all_chnl_fltrs(pf);
8487 /* remove ntuple filters since queue configuration is being changed */
8488 if (vsi->netdev->features & NETIF_F_NTUPLE) {
8489 struct ice_hw *hw = &pf->hw;
8491 mutex_lock(&hw->fdir_fltr_lock);
8492 ice_fdir_del_all_fltrs(vsi);
8493 mutex_unlock(&hw->fdir_fltr_lock);
8496 /* perform cleanup for channels if they exist */
8497 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
8498 struct ice_vsi *ch_vsi;
8500 list_del(&ch->list);
8501 ch_vsi = ch->ch_vsi;
8507 /* Reset queue contexts */
8508 for (i = 0; i < ch->num_rxq; i++) {
8509 struct ice_tx_ring *tx_ring;
8510 struct ice_rx_ring *rx_ring;
8512 tx_ring = vsi->tx_rings[ch->base_q + i];
8513 rx_ring = vsi->rx_rings[ch->base_q + i];
8516 if (tx_ring->q_vector)
8517 tx_ring->q_vector->ch = NULL;
8521 if (rx_ring->q_vector)
8522 rx_ring->q_vector->ch = NULL;
8526 /* Release FD resources for the channel VSI */
8527 ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx);
8529 /* clear the VSI from scheduler tree */
8530 ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
8532 /* Delete VSI from FW, PF and HW VSI arrays */
8533 ice_vsi_delete(ch->ch_vsi);
8535 /* free the channel */
8539 /* clear the channel VSI map which is stored in main VSI */
8540 ice_for_each_chnl_tc(i)
8541 vsi->tc_map_vsi[i] = NULL;
8543 /* reset main VSI's all TC information */
8549 * ice_rebuild_channels - rebuild channel
8552 * Recreate channel VSIs and replay filters
8554 static int ice_rebuild_channels(struct ice_pf *pf)
8556 struct device *dev = ice_pf_to_dev(pf);
8557 struct ice_vsi *main_vsi;
8558 bool rem_adv_fltr = true;
8559 struct ice_channel *ch;
8560 struct ice_vsi *vsi;
8564 main_vsi = ice_get_main_vsi(pf);
8568 if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) ||
8569 main_vsi->old_numtc == 1)
8570 return 0; /* nothing to be done */
8572 /* reconfigure main VSI based on old value of TC and cached values
8575 err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc);
8577 dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n",
8578 main_vsi->old_ena_tc, main_vsi->vsi_num);
8582 /* rebuild ADQ VSIs */
8583 ice_for_each_vsi(pf, i) {
8584 enum ice_vsi_type type;
8587 if (!vsi || vsi->type != ICE_VSI_CHNL)
8592 /* rebuild ADQ VSI */
8593 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
8595 dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
8596 ice_vsi_type_str(type), vsi->idx, err);
8600 /* Re-map HW VSI number, using VSI handle that has been
8601 * previously validated in ice_replay_vsi() call above
8603 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
8605 /* replay filters for the VSI */
8606 err = ice_replay_vsi(&pf->hw, vsi->idx);
8608 dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n",
8609 ice_vsi_type_str(type), err, vsi->idx);
8610 rem_adv_fltr = false;
8613 dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n",
8614 ice_vsi_type_str(type), vsi->idx);
8616 /* store ADQ VSI at correct TC index in main VSI's
8619 main_vsi->tc_map_vsi[tc_idx++] = vsi;
8622 /* ADQ VSI(s) has been rebuilt successfully, so setup
8623 * channel for main VSI's Tx and Rx rings
8625 list_for_each_entry(ch, &main_vsi->ch_list, list) {
8626 struct ice_vsi *ch_vsi;
8628 ch_vsi = ch->ch_vsi;
8632 /* reconfig channel resources */
8633 ice_cfg_chnl_all_res(main_vsi, ch);
8635 /* replay BW rate limit if it is non-zero */
8636 if (!ch->max_tx_rate && !ch->min_tx_rate)
8639 err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate,
8642 dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8643 err, ch->max_tx_rate, ch->min_tx_rate,
8646 dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8647 ch->max_tx_rate, ch->min_tx_rate,
8651 /* reconfig RSS for main VSI */
8652 if (main_vsi->ch_rss_size)
8653 ice_vsi_cfg_rss_lut_key(main_vsi);
8658 ice_remove_q_channels(main_vsi, rem_adv_fltr);
8663 * ice_create_q_channels - Add queue channel for the given TCs
8664 * @vsi: VSI to be configured
8666 * Configures queue channel mapping to the given TCs
8668 static int ice_create_q_channels(struct ice_vsi *vsi)
8670 struct ice_pf *pf = vsi->back;
8671 struct ice_channel *ch;
8674 ice_for_each_chnl_tc(i) {
8675 if (!(vsi->all_enatc & BIT(i)))
8678 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
8683 INIT_LIST_HEAD(&ch->list);
8684 ch->num_rxq = vsi->mqprio_qopt.qopt.count[i];
8685 ch->num_txq = vsi->mqprio_qopt.qopt.count[i];
8686 ch->base_q = vsi->mqprio_qopt.qopt.offset[i];
8687 ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i];
8688 ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i];
8690 /* convert to Kbits/s */
8691 if (ch->max_tx_rate)
8692 ch->max_tx_rate = div_u64(ch->max_tx_rate,
8693 ICE_BW_KBPS_DIVISOR);
8694 if (ch->min_tx_rate)
8695 ch->min_tx_rate = div_u64(ch->min_tx_rate,
8696 ICE_BW_KBPS_DIVISOR);
8698 ret = ice_create_q_channel(vsi, ch);
8700 dev_err(ice_pf_to_dev(pf),
8701 "failed creating channel TC:%d\n", i);
8705 list_add_tail(&ch->list, &vsi->ch_list);
8706 vsi->tc_map_vsi[i] = ch->ch_vsi;
8707 dev_dbg(ice_pf_to_dev(pf),
8708 "successfully created channel: VSI %pK\n", ch->ch_vsi);
8713 ice_remove_q_channels(vsi, false);
8719 * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes
8720 * @netdev: net device to configure
8721 * @type_data: TC offload data
8723 static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data)
8725 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
8726 struct ice_netdev_priv *np = netdev_priv(netdev);
8727 struct ice_vsi *vsi = np->vsi;
8728 struct ice_pf *pf = vsi->back;
8729 u16 mode, ena_tc_qdisc = 0;
8730 int cur_txq, cur_rxq;
8735 dev = ice_pf_to_dev(pf);
8736 num_tcf = mqprio_qopt->qopt.num_tc;
8737 hw = mqprio_qopt->qopt.hw;
8738 mode = mqprio_qopt->mode;
8740 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8741 vsi->ch_rss_size = 0;
8742 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8746 /* Generate queue region map for number of TCF requested */
8747 for (i = 0; i < num_tcf; i++)
8748 ena_tc_qdisc |= BIT(i);
8751 case TC_MQPRIO_MODE_CHANNEL:
8753 if (pf->hw.port_info->is_custom_tx_enabled) {
8754 dev_err(dev, "Custom Tx scheduler feature enabled, can't configure ADQ\n");
8757 ice_tear_down_devlink_rate_tree(pf);
8759 ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt);
8761 netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n",
8765 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8766 set_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8767 /* don't assume state of hw_tc_offload during driver load
8768 * and set the flag for TC flower filter if hw_tc_offload
8771 if (vsi->netdev->features & NETIF_F_HW_TC)
8772 set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
8780 /* Requesting same TCF configuration as already enabled */
8781 if (ena_tc_qdisc == vsi->tc_cfg.ena_tc &&
8782 mode != TC_MQPRIO_MODE_CHANNEL)
8785 /* Pause VSI queues */
8786 ice_dis_vsi(vsi, true);
8788 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
8789 ice_remove_q_channels(vsi, true);
8791 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
8792 vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf),
8794 vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf),
8797 /* logic to rebuild VSI, same like ethtool -L */
8798 u16 offset = 0, qcount_tx = 0, qcount_rx = 0;
8800 for (i = 0; i < num_tcf; i++) {
8801 if (!(ena_tc_qdisc & BIT(i)))
8804 offset = vsi->mqprio_qopt.qopt.offset[i];
8805 qcount_rx = vsi->mqprio_qopt.qopt.count[i];
8806 qcount_tx = vsi->mqprio_qopt.qopt.count[i];
8808 vsi->req_txq = offset + qcount_tx;
8809 vsi->req_rxq = offset + qcount_rx;
8811 /* store away original rss_size info, so that it gets reused
8812 * form ice_vsi_rebuild during tc-qdisc delete stage - to
8813 * determine, what should be the rss_sizefor main VSI
8815 vsi->orig_rss_size = vsi->rss_size;
8818 /* save current values of Tx and Rx queues before calling VSI rebuild
8819 * for fallback option
8821 cur_txq = vsi->num_txq;
8822 cur_rxq = vsi->num_rxq;
8824 /* proceed with rebuild main VSI using correct number of queues */
8825 ret = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
8827 /* fallback to current number of queues */
8828 dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
8829 vsi->req_txq = cur_txq;
8830 vsi->req_rxq = cur_rxq;
8831 clear_bit(ICE_RESET_FAILED, pf->state);
8832 if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) {
8833 dev_err(dev, "Rebuild of main VSI failed again\n");
8838 vsi->all_numtc = num_tcf;
8839 vsi->all_enatc = ena_tc_qdisc;
8840 ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc);
8842 netdev_err(netdev, "failed configuring TC for VSI id=%d\n",
8847 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
8848 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
8849 u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0];
8851 /* set TC0 rate limit if specified */
8852 if (max_tx_rate || min_tx_rate) {
8853 /* convert to Kbits/s */
8855 max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR);
8857 min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR);
8859 ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate);
8861 dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n",
8862 max_tx_rate, min_tx_rate, vsi->vsi_num);
8864 dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n",
8865 max_tx_rate, min_tx_rate, vsi->vsi_num);
8869 ret = ice_create_q_channels(vsi);
8871 netdev_err(netdev, "failed configuring queue channels\n");
8874 netdev_dbg(netdev, "successfully configured channels\n");
8878 if (vsi->ch_rss_size)
8879 ice_vsi_cfg_rss_lut_key(vsi);
8882 /* if error, reset the all_numtc and all_enatc */
8888 ice_ena_vsi(vsi, true);
8893 static LIST_HEAD(ice_block_cb_list);
8896 ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
8899 struct ice_netdev_priv *np = netdev_priv(netdev);
8900 struct ice_pf *pf = np->vsi->back;
8901 bool locked = false;
8905 case TC_SETUP_BLOCK:
8906 return flow_block_cb_setup_simple(type_data,
8908 ice_setup_tc_block_cb,
8910 case TC_SETUP_QDISC_MQPRIO:
8911 if (ice_is_eswitch_mode_switchdev(pf)) {
8912 netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n");
8917 mutex_lock(&pf->adev_mutex);
8918 device_lock(&pf->adev->dev);
8920 if (pf->adev->dev.driver) {
8921 netdev_err(netdev, "Cannot change qdisc when RDMA is active\n");
8927 /* setup traffic classifier for receive side */
8928 mutex_lock(&pf->tc_mutex);
8929 err = ice_setup_tc_mqprio_qdisc(netdev, type_data);
8930 mutex_unlock(&pf->tc_mutex);
8934 device_unlock(&pf->adev->dev);
8935 mutex_unlock(&pf->adev_mutex);
8944 static struct ice_indr_block_priv *
8945 ice_indr_block_priv_lookup(struct ice_netdev_priv *np,
8946 struct net_device *netdev)
8948 struct ice_indr_block_priv *cb_priv;
8950 list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) {
8951 if (!cb_priv->netdev)
8953 if (cb_priv->netdev == netdev)
8960 ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,
8963 struct ice_indr_block_priv *priv = indr_priv;
8964 struct ice_netdev_priv *np = priv->np;
8967 case TC_SETUP_CLSFLOWER:
8968 return ice_setup_tc_cls_flower(np, priv->netdev,
8969 (struct flow_cls_offload *)
8977 ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch,
8978 struct ice_netdev_priv *np,
8979 struct flow_block_offload *f, void *data,
8980 void (*cleanup)(struct flow_block_cb *block_cb))
8982 struct ice_indr_block_priv *indr_priv;
8983 struct flow_block_cb *block_cb;
8985 if (!ice_is_tunnel_supported(netdev) &&
8986 !(is_vlan_dev(netdev) &&
8987 vlan_dev_real_dev(netdev) == np->vsi->netdev))
8990 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
8993 switch (f->command) {
8994 case FLOW_BLOCK_BIND:
8995 indr_priv = ice_indr_block_priv_lookup(np, netdev);
8999 indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL);
9003 indr_priv->netdev = netdev;
9005 list_add(&indr_priv->list, &np->tc_indr_block_priv_list);
9008 flow_indr_block_cb_alloc(ice_indr_setup_block_cb,
9009 indr_priv, indr_priv,
9010 ice_rep_indr_tc_block_unbind,
9011 f, netdev, sch, data, np,
9014 if (IS_ERR(block_cb)) {
9015 list_del(&indr_priv->list);
9017 return PTR_ERR(block_cb);
9019 flow_block_cb_add(block_cb, f);
9020 list_add_tail(&block_cb->driver_list, &ice_block_cb_list);
9022 case FLOW_BLOCK_UNBIND:
9023 indr_priv = ice_indr_block_priv_lookup(np, netdev);
9027 block_cb = flow_block_cb_lookup(f->block,
9028 ice_indr_setup_block_cb,
9033 flow_indr_block_cb_remove(block_cb, f);
9035 list_del(&block_cb->driver_list);
9044 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
9045 void *cb_priv, enum tc_setup_type type, void *type_data,
9047 void (*cleanup)(struct flow_block_cb *block_cb))
9050 case TC_SETUP_BLOCK:
9051 return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data,
9060 * ice_open - Called when a network interface becomes active
9061 * @netdev: network interface device structure
9063 * The open entry point is called when a network interface is made
9064 * active by the system (IFF_UP). At this point all resources needed
9065 * for transmit and receive operations are allocated, the interrupt
9066 * handler is registered with the OS, the netdev watchdog is enabled,
9067 * and the stack is notified that the interface is ready.
9069 * Returns 0 on success, negative value on failure
9071 int ice_open(struct net_device *netdev)
9073 struct ice_netdev_priv *np = netdev_priv(netdev);
9074 struct ice_pf *pf = np->vsi->back;
9076 if (ice_is_reset_in_progress(pf->state)) {
9077 netdev_err(netdev, "can't open net device while reset is in progress");
9081 return ice_open_internal(netdev);
9085 * ice_open_internal - Called when a network interface becomes active
9086 * @netdev: network interface device structure
9088 * Internal ice_open implementation. Should not be used directly except for ice_open and reset
9091 * Returns 0 on success, negative value on failure
9093 int ice_open_internal(struct net_device *netdev)
9095 struct ice_netdev_priv *np = netdev_priv(netdev);
9096 struct ice_vsi *vsi = np->vsi;
9097 struct ice_pf *pf = vsi->back;
9098 struct ice_port_info *pi;
9101 if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
9102 netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
9106 netif_carrier_off(netdev);
9108 pi = vsi->port_info;
9109 err = ice_update_link_info(pi);
9111 netdev_err(netdev, "Failed to get link info, error %d\n", err);
9115 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
9117 /* Set PHY if there is media, otherwise, turn off PHY */
9118 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
9119 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9120 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
9121 err = ice_init_phy_user_cfg(pi);
9123 netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
9129 err = ice_configure_phy(vsi);
9131 netdev_err(netdev, "Failed to set physical link up, error %d\n",
9136 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9137 ice_set_link(vsi, false);
9140 err = ice_vsi_open(vsi);
9142 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
9143 vsi->vsi_num, vsi->vsw->sw_id);
9145 /* Update existing tunnels information */
9146 udp_tunnel_get_rx_info(netdev);
9152 * ice_stop - Disables a network interface
9153 * @netdev: network interface device structure
9155 * The stop entry point is called when an interface is de-activated by the OS,
9156 * and the netdevice enters the DOWN state. The hardware is still under the
9157 * driver's control, but the netdev interface is disabled.
9159 * Returns success only - not allowed to fail
9161 int ice_stop(struct net_device *netdev)
9163 struct ice_netdev_priv *np = netdev_priv(netdev);
9164 struct ice_vsi *vsi = np->vsi;
9165 struct ice_pf *pf = vsi->back;
9167 if (ice_is_reset_in_progress(pf->state)) {
9168 netdev_err(netdev, "can't stop net device while reset is in progress");
9172 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
9173 int link_err = ice_force_phys_link_state(vsi, false);
9176 netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
9177 vsi->vsi_num, link_err);
9188 * ice_features_check - Validate encapsulated packet conforms to limits
9190 * @netdev: This port's netdev
9191 * @features: Offload features that the stack believes apply
9193 static netdev_features_t
9194 ice_features_check(struct sk_buff *skb,
9195 struct net_device __always_unused *netdev,
9196 netdev_features_t features)
9198 bool gso = skb_is_gso(skb);
9201 /* No point in doing any of this if neither checksum nor GSO are
9202 * being requested for this frame. We can rule out both by just
9203 * checking for CHECKSUM_PARTIAL
9205 if (skb->ip_summed != CHECKSUM_PARTIAL)
9208 /* We cannot support GSO if the MSS is going to be less than
9209 * 64 bytes. If it is then we need to drop support for GSO.
9211 if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
9212 features &= ~NETIF_F_GSO_MASK;
9214 len = skb_network_offset(skb);
9215 if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
9216 goto out_rm_features;
9218 len = skb_network_header_len(skb);
9219 if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9220 goto out_rm_features;
9222 if (skb->encapsulation) {
9223 /* this must work for VXLAN frames AND IPIP/SIT frames, and in
9224 * the case of IPIP frames, the transport header pointer is
9225 * after the inner header! So check to make sure that this
9226 * is a GRE or UDP_TUNNEL frame before doing that math.
9228 if (gso && (skb_shinfo(skb)->gso_type &
9229 (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
9230 len = skb_inner_network_header(skb) -
9231 skb_transport_header(skb);
9232 if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
9233 goto out_rm_features;
9236 len = skb_inner_network_header_len(skb);
9237 if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9238 goto out_rm_features;
9243 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
9246 static const struct net_device_ops ice_netdev_safe_mode_ops = {
9247 .ndo_open = ice_open,
9248 .ndo_stop = ice_stop,
9249 .ndo_start_xmit = ice_start_xmit,
9250 .ndo_set_mac_address = ice_set_mac_address,
9251 .ndo_validate_addr = eth_validate_addr,
9252 .ndo_change_mtu = ice_change_mtu,
9253 .ndo_get_stats64 = ice_get_stats64,
9254 .ndo_tx_timeout = ice_tx_timeout,
9255 .ndo_bpf = ice_xdp_safe_mode,
9258 static const struct net_device_ops ice_netdev_ops = {
9259 .ndo_open = ice_open,
9260 .ndo_stop = ice_stop,
9261 .ndo_start_xmit = ice_start_xmit,
9262 .ndo_select_queue = ice_select_queue,
9263 .ndo_features_check = ice_features_check,
9264 .ndo_fix_features = ice_fix_features,
9265 .ndo_set_rx_mode = ice_set_rx_mode,
9266 .ndo_set_mac_address = ice_set_mac_address,
9267 .ndo_validate_addr = eth_validate_addr,
9268 .ndo_change_mtu = ice_change_mtu,
9269 .ndo_get_stats64 = ice_get_stats64,
9270 .ndo_set_tx_maxrate = ice_set_tx_maxrate,
9271 .ndo_eth_ioctl = ice_eth_ioctl,
9272 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
9273 .ndo_set_vf_mac = ice_set_vf_mac,
9274 .ndo_get_vf_config = ice_get_vf_cfg,
9275 .ndo_set_vf_trust = ice_set_vf_trust,
9276 .ndo_set_vf_vlan = ice_set_vf_port_vlan,
9277 .ndo_set_vf_link_state = ice_set_vf_link_state,
9278 .ndo_get_vf_stats = ice_get_vf_stats,
9279 .ndo_set_vf_rate = ice_set_vf_bw,
9280 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
9281 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
9282 .ndo_setup_tc = ice_setup_tc,
9283 .ndo_set_features = ice_set_features,
9284 .ndo_bridge_getlink = ice_bridge_getlink,
9285 .ndo_bridge_setlink = ice_bridge_setlink,
9286 .ndo_fdb_add = ice_fdb_add,
9287 .ndo_fdb_del = ice_fdb_del,
9288 #ifdef CONFIG_RFS_ACCEL
9289 .ndo_rx_flow_steer = ice_rx_flow_steer,
9291 .ndo_tx_timeout = ice_tx_timeout,
9293 .ndo_xdp_xmit = ice_xdp_xmit,
9294 .ndo_xsk_wakeup = ice_xsk_wakeup,