1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <generated/utsrelease.h>
13 #include "ice_dcb_lib.h"
14 #include "ice_dcb_nl.h"
15 #include "ice_devlink.h"
16 /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
17 * ice tracepoint functions. This must be done exactly once across the
20 #define CREATE_TRACE_POINTS
21 #include "ice_trace.h"
22 #include "ice_eswitch.h"
23 #include "ice_tc_lib.h"
24 #include "ice_vsi_vlan_ops.h"
25 #include <net/xdp_sock_drv.h>
27 #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
28 static const char ice_driver_string[] = DRV_SUMMARY;
29 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
31 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
32 #define ICE_DDP_PKG_PATH "intel/ice/ddp/"
33 #define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg"
35 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
36 MODULE_DESCRIPTION(DRV_SUMMARY);
37 MODULE_LICENSE("GPL v2");
38 MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
40 static int debug = -1;
41 module_param(debug, int, 0644);
42 #ifndef CONFIG_DYNAMIC_DEBUG
43 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
45 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
46 #endif /* !CONFIG_DYNAMIC_DEBUG */
48 DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
49 EXPORT_SYMBOL(ice_xdp_locking_key);
52 * ice_hw_to_dev - Get device pointer from the hardware structure
53 * @hw: pointer to the device HW structure
55 * Used to access the device pointer from compilation units which can't easily
56 * include the definition of struct ice_pf without leading to circular header
59 struct device *ice_hw_to_dev(struct ice_hw *hw)
61 struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
63 return &pf->pdev->dev;
66 static struct workqueue_struct *ice_wq;
67 struct workqueue_struct *ice_lag_wq;
68 static const struct net_device_ops ice_netdev_safe_mode_ops;
69 static const struct net_device_ops ice_netdev_ops;
71 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
73 static void ice_vsi_release_all(struct ice_pf *pf);
75 static int ice_rebuild_channels(struct ice_pf *pf);
76 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr);
79 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
80 void *cb_priv, enum tc_setup_type type, void *type_data,
82 void (*cleanup)(struct flow_block_cb *block_cb));
84 bool netif_is_ice(const struct net_device *dev)
86 return dev && (dev->netdev_ops == &ice_netdev_ops);
90 * ice_get_tx_pending - returns number of Tx descriptors not processed
91 * @ring: the ring of descriptors
93 static u16 ice_get_tx_pending(struct ice_tx_ring *ring)
97 head = ring->next_to_clean;
98 tail = ring->next_to_use;
101 return (head < tail) ?
102 tail - head : (tail + ring->count - head);
107 * ice_check_for_hang_subtask - check for and recover hung queues
108 * @pf: pointer to PF struct
110 static void ice_check_for_hang_subtask(struct ice_pf *pf)
112 struct ice_vsi *vsi = NULL;
118 ice_for_each_vsi(pf, v)
119 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
124 if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
127 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
132 ice_for_each_txq(vsi, i) {
133 struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
134 struct ice_ring_stats *ring_stats;
138 if (ice_ring_ch_enabled(tx_ring))
141 ring_stats = tx_ring->ring_stats;
146 /* If packet counter has not changed the queue is
147 * likely stalled, so force an interrupt for this
150 * prev_pkt would be negative if there was no
153 packets = ring_stats->stats.pkts & INT_MAX;
154 if (ring_stats->tx_stats.prev_pkt == packets) {
155 /* Trigger sw interrupt to revive the queue */
156 ice_trigger_sw_intr(hw, tx_ring->q_vector);
160 /* Memory barrier between read of packet count and call
161 * to ice_get_tx_pending()
164 ring_stats->tx_stats.prev_pkt =
165 ice_get_tx_pending(tx_ring) ? packets : -1;
171 * ice_init_mac_fltr - Set initial MAC filters
172 * @pf: board private structure
174 * Set initial set of MAC filters for PF VSI; configure filters for permanent
175 * address and broadcast address. If an error is encountered, netdevice will be
178 static int ice_init_mac_fltr(struct ice_pf *pf)
183 vsi = ice_get_main_vsi(pf);
187 perm_addr = vsi->port_info->mac.perm_addr;
188 return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
192 * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
193 * @netdev: the net device on which the sync is happening
194 * @addr: MAC address to sync
196 * This is a callback function which is called by the in kernel device sync
197 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
198 * populates the tmp_sync_list, which is later used by ice_add_mac to add the
199 * MAC filters from the hardware.
201 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
203 struct ice_netdev_priv *np = netdev_priv(netdev);
204 struct ice_vsi *vsi = np->vsi;
206 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
214 * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
215 * @netdev: the net device on which the unsync is happening
216 * @addr: MAC address to unsync
218 * This is a callback function which is called by the in kernel device unsync
219 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
220 * populates the tmp_unsync_list, which is later used by ice_remove_mac to
221 * delete the MAC filters from the hardware.
223 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
225 struct ice_netdev_priv *np = netdev_priv(netdev);
226 struct ice_vsi *vsi = np->vsi;
228 /* Under some circumstances, we might receive a request to delete our
229 * own device address from our uc list. Because we store the device
230 * address in the VSI's MAC filter list, we need to ignore such
231 * requests and not delete our device address from this list.
233 if (ether_addr_equal(addr, netdev->dev_addr))
236 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
244 * ice_vsi_fltr_changed - check if filter state changed
245 * @vsi: VSI to be checked
247 * returns true if filter state has changed, false otherwise.
249 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
251 return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
252 test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
256 * ice_set_promisc - Enable promiscuous mode for a given PF
257 * @vsi: the VSI being configured
258 * @promisc_m: mask of promiscuous config bits
261 static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
265 if (vsi->type != ICE_VSI_PF)
268 if (ice_vsi_has_non_zero_vlans(vsi)) {
269 promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
270 status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi,
273 status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
276 if (status && status != -EEXIST)
279 netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n",
280 vsi->vsi_num, promisc_m);
285 * ice_clear_promisc - Disable promiscuous mode for a given PF
286 * @vsi: the VSI being configured
287 * @promisc_m: mask of promiscuous config bits
290 static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
294 if (vsi->type != ICE_VSI_PF)
297 if (ice_vsi_has_non_zero_vlans(vsi)) {
298 promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
299 status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi,
302 status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
306 netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n",
307 vsi->vsi_num, promisc_m);
312 * ice_vsi_sync_fltr - Update the VSI filter list to the HW
313 * @vsi: ptr to the VSI
315 * Push any outstanding VSI filter changes through the AdminQ.
317 static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
319 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
320 struct device *dev = ice_pf_to_dev(vsi->back);
321 struct net_device *netdev = vsi->netdev;
322 bool promisc_forced_on = false;
323 struct ice_pf *pf = vsi->back;
324 struct ice_hw *hw = &pf->hw;
325 u32 changed_flags = 0;
331 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
332 usleep_range(1000, 2000);
334 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
335 vsi->current_netdev_flags = vsi->netdev->flags;
337 INIT_LIST_HEAD(&vsi->tmp_sync_list);
338 INIT_LIST_HEAD(&vsi->tmp_unsync_list);
340 if (ice_vsi_fltr_changed(vsi)) {
341 clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
342 clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
344 /* grab the netdev's addr_list_lock */
345 netif_addr_lock_bh(netdev);
346 __dev_uc_sync(netdev, ice_add_mac_to_sync_list,
347 ice_add_mac_to_unsync_list);
348 __dev_mc_sync(netdev, ice_add_mac_to_sync_list,
349 ice_add_mac_to_unsync_list);
350 /* our temp lists are populated. release lock */
351 netif_addr_unlock_bh(netdev);
354 /* Remove MAC addresses in the unsync list */
355 err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
356 ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
358 netdev_err(netdev, "Failed to delete MAC filters\n");
359 /* if we failed because of alloc failures, just bail */
364 /* Add MAC addresses in the sync list */
365 err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
366 ice_fltr_free_list(dev, &vsi->tmp_sync_list);
367 /* If filter is added successfully or already exists, do not go into
368 * 'if' condition and report it as error. Instead continue processing
369 * rest of the function.
371 if (err && err != -EEXIST) {
372 netdev_err(netdev, "Failed to add MAC filters\n");
373 /* If there is no more space for new umac filters, VSI
374 * should go into promiscuous mode. There should be some
375 * space reserved for promiscuous filters.
377 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
378 !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
380 promisc_forced_on = true;
381 netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
388 /* check for changes in promiscuous modes */
389 if (changed_flags & IFF_ALLMULTI) {
390 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
391 err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS);
393 vsi->current_netdev_flags &= ~IFF_ALLMULTI;
397 /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
398 err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS);
400 vsi->current_netdev_flags |= IFF_ALLMULTI;
406 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
407 test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
408 clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
409 if (vsi->current_netdev_flags & IFF_PROMISC) {
410 /* Apply Rx filter rule to get traffic from wire */
411 if (!ice_is_dflt_vsi_in_use(vsi->port_info)) {
412 err = ice_set_dflt_vsi(vsi);
413 if (err && err != -EEXIST) {
414 netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
416 vsi->current_netdev_flags &=
421 vlan_ops->dis_rx_filtering(vsi);
423 /* promiscuous mode implies allmulticast so
424 * that VSIs that are in promiscuous mode are
425 * subscribed to multicast packets coming to
428 err = ice_set_promisc(vsi,
429 ICE_MCAST_PROMISC_BITS);
434 /* Clear Rx filter to remove traffic from wire */
435 if (ice_is_vsi_dflt_vsi(vsi)) {
436 err = ice_clear_dflt_vsi(vsi);
438 netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
440 vsi->current_netdev_flags |=
444 if (vsi->netdev->features &
445 NETIF_F_HW_VLAN_CTAG_FILTER)
446 vlan_ops->ena_rx_filtering(vsi);
449 /* disable allmulti here, but only if allmulti is not
450 * still enabled for the netdev
452 if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
453 err = ice_clear_promisc(vsi,
454 ICE_MCAST_PROMISC_BITS);
456 netdev_err(netdev, "Error %d clearing multicast promiscuous on VSI %i\n",
465 set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
468 /* if something went wrong then set the changed flag so we try again */
469 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
470 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
472 clear_bit(ICE_CFG_BUSY, vsi->state);
477 * ice_sync_fltr_subtask - Sync the VSI filter list with HW
478 * @pf: board private structure
480 static void ice_sync_fltr_subtask(struct ice_pf *pf)
484 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
487 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
489 ice_for_each_vsi(pf, v)
490 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
491 ice_vsi_sync_fltr(pf->vsi[v])) {
492 /* come back and try again later */
493 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
499 * ice_pf_dis_all_vsi - Pause all VSIs on a PF
501 * @locked: is the rtnl_lock already held
503 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
508 ice_for_each_vsi(pf, v)
510 ice_dis_vsi(pf->vsi[v], locked);
512 for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
513 pf->pf_agg_node[node].num_vsis = 0;
515 for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
516 pf->vf_agg_node[node].num_vsis = 0;
520 * ice_clear_sw_switch_recipes - clear switch recipes
521 * @pf: board private structure
523 * Mark switch recipes as not created in sw structures. There are cases where
524 * rules (especially advanced rules) need to be restored, either re-read from
525 * hardware or added again. For example after the reset. 'recp_created' flag
526 * prevents from doing that and need to be cleared upfront.
528 static void ice_clear_sw_switch_recipes(struct ice_pf *pf)
530 struct ice_sw_recipe *recp;
533 recp = pf->hw.switch_info->recp_list;
534 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
535 recp[i].recp_created = false;
539 * ice_prepare_for_reset - prep for reset
540 * @pf: board private structure
541 * @reset_type: reset type requested
543 * Inform or close all dependent features in prep for reset.
546 ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
548 struct ice_hw *hw = &pf->hw;
553 dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
555 /* already prepared for reset */
556 if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
559 ice_unplug_aux_dev(pf);
561 /* Notify VFs of impending reset */
562 if (ice_check_sq_alive(hw, &hw->mailboxq))
563 ice_vc_notify_reset(pf);
565 /* Disable VFs until reset is completed */
566 mutex_lock(&pf->vfs.table_lock);
567 ice_for_each_vf(pf, bkt, vf)
568 ice_set_vf_state_dis(vf);
569 mutex_unlock(&pf->vfs.table_lock);
571 if (ice_is_eswitch_mode_switchdev(pf)) {
572 if (reset_type != ICE_RESET_PFR)
573 ice_clear_sw_switch_recipes(pf);
576 /* release ADQ specific HW and SW resources */
577 vsi = ice_get_main_vsi(pf);
581 /* to be on safe side, reset orig_rss_size so that normal flow
582 * of deciding rss_size can take precedence
584 vsi->orig_rss_size = 0;
586 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
587 if (reset_type == ICE_RESET_PFR) {
588 vsi->old_ena_tc = vsi->all_enatc;
589 vsi->old_numtc = vsi->all_numtc;
591 ice_remove_q_channels(vsi, true);
593 /* for other reset type, do not support channel rebuild
594 * hence reset needed info
602 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
603 memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
608 /* clear SW filtering DB */
609 ice_clear_hw_tbls(hw);
610 /* disable the VSIs and their queues that are not already DOWN */
611 ice_pf_dis_all_vsi(pf, false);
613 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
614 ice_ptp_prepare_for_reset(pf);
616 if (ice_is_feature_supported(pf, ICE_F_GNSS))
620 ice_sched_clear_port(hw->port_info);
622 ice_shutdown_all_ctrlq(hw);
624 set_bit(ICE_PREPARED_FOR_RESET, pf->state);
628 * ice_do_reset - Initiate one of many types of resets
629 * @pf: board private structure
630 * @reset_type: reset type requested before this function was called.
632 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
634 struct device *dev = ice_pf_to_dev(pf);
635 struct ice_hw *hw = &pf->hw;
637 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
639 if (pf->lag && pf->lag->bonded && reset_type == ICE_RESET_PFR) {
640 dev_dbg(dev, "PFR on a bonded interface, promoting to CORER\n");
641 reset_type = ICE_RESET_CORER;
644 ice_prepare_for_reset(pf, reset_type);
646 /* trigger the reset */
647 if (ice_reset(hw, reset_type)) {
648 dev_err(dev, "reset %d failed\n", reset_type);
649 set_bit(ICE_RESET_FAILED, pf->state);
650 clear_bit(ICE_RESET_OICR_RECV, pf->state);
651 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
652 clear_bit(ICE_PFR_REQ, pf->state);
653 clear_bit(ICE_CORER_REQ, pf->state);
654 clear_bit(ICE_GLOBR_REQ, pf->state);
655 wake_up(&pf->reset_wait_queue);
659 /* PFR is a bit of a special case because it doesn't result in an OICR
660 * interrupt. So for PFR, rebuild after the reset and clear the reset-
661 * associated state bits.
663 if (reset_type == ICE_RESET_PFR) {
665 ice_rebuild(pf, reset_type);
666 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
667 clear_bit(ICE_PFR_REQ, pf->state);
668 wake_up(&pf->reset_wait_queue);
669 ice_reset_all_vfs(pf);
674 * ice_reset_subtask - Set up for resetting the device and driver
675 * @pf: board private structure
677 static void ice_reset_subtask(struct ice_pf *pf)
679 enum ice_reset_req reset_type = ICE_RESET_INVAL;
681 /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
682 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
683 * of reset is pending and sets bits in pf->state indicating the reset
684 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
685 * prepare for pending reset if not already (for PF software-initiated
686 * global resets the software should already be prepared for it as
687 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
688 * by firmware or software on other PFs, that bit is not set so prepare
689 * for the reset now), poll for reset done, rebuild and return.
691 if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
692 /* Perform the largest reset requested */
693 if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
694 reset_type = ICE_RESET_CORER;
695 if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
696 reset_type = ICE_RESET_GLOBR;
697 if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
698 reset_type = ICE_RESET_EMPR;
699 /* return if no valid reset type requested */
700 if (reset_type == ICE_RESET_INVAL)
702 ice_prepare_for_reset(pf, reset_type);
704 /* make sure we are ready to rebuild */
705 if (ice_check_reset(&pf->hw)) {
706 set_bit(ICE_RESET_FAILED, pf->state);
708 /* done with reset. start rebuild */
709 pf->hw.reset_ongoing = false;
710 ice_rebuild(pf, reset_type);
711 /* clear bit to resume normal operations, but
712 * ICE_NEEDS_RESTART bit is set in case rebuild failed
714 clear_bit(ICE_RESET_OICR_RECV, pf->state);
715 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
716 clear_bit(ICE_PFR_REQ, pf->state);
717 clear_bit(ICE_CORER_REQ, pf->state);
718 clear_bit(ICE_GLOBR_REQ, pf->state);
719 wake_up(&pf->reset_wait_queue);
720 ice_reset_all_vfs(pf);
726 /* No pending resets to finish processing. Check for new resets */
727 if (test_bit(ICE_PFR_REQ, pf->state)) {
728 reset_type = ICE_RESET_PFR;
729 if (pf->lag && pf->lag->bonded) {
730 dev_dbg(ice_pf_to_dev(pf), "PFR on a bonded interface, promoting to CORER\n");
731 reset_type = ICE_RESET_CORER;
734 if (test_bit(ICE_CORER_REQ, pf->state))
735 reset_type = ICE_RESET_CORER;
736 if (test_bit(ICE_GLOBR_REQ, pf->state))
737 reset_type = ICE_RESET_GLOBR;
738 /* If no valid reset type requested just return */
739 if (reset_type == ICE_RESET_INVAL)
742 /* reset if not already down or busy */
743 if (!test_bit(ICE_DOWN, pf->state) &&
744 !test_bit(ICE_CFG_BUSY, pf->state)) {
745 ice_do_reset(pf, reset_type);
750 * ice_print_topo_conflict - print topology conflict message
751 * @vsi: the VSI whose topology status is being checked
753 static void ice_print_topo_conflict(struct ice_vsi *vsi)
755 switch (vsi->port_info->phy.link_info.topo_media_conflict) {
756 case ICE_AQ_LINK_TOPO_CONFLICT:
757 case ICE_AQ_LINK_MEDIA_CONFLICT:
758 case ICE_AQ_LINK_TOPO_UNREACH_PRT:
759 case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
760 case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
761 netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
763 case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
764 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags))
765 netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n");
767 netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
775 * ice_print_link_msg - print link up or down message
776 * @vsi: the VSI whose link status is being queried
777 * @isup: boolean for if the link is now up or down
779 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
781 struct ice_aqc_get_phy_caps_data *caps;
782 const char *an_advertised;
793 if (vsi->current_isup == isup)
796 vsi->current_isup = isup;
799 netdev_info(vsi->netdev, "NIC Link is Down\n");
803 switch (vsi->port_info->phy.link_info.link_speed) {
804 case ICE_AQ_LINK_SPEED_100GB:
807 case ICE_AQ_LINK_SPEED_50GB:
810 case ICE_AQ_LINK_SPEED_40GB:
813 case ICE_AQ_LINK_SPEED_25GB:
816 case ICE_AQ_LINK_SPEED_20GB:
819 case ICE_AQ_LINK_SPEED_10GB:
822 case ICE_AQ_LINK_SPEED_5GB:
825 case ICE_AQ_LINK_SPEED_2500MB:
828 case ICE_AQ_LINK_SPEED_1000MB:
831 case ICE_AQ_LINK_SPEED_100MB:
839 switch (vsi->port_info->fc.current_mode) {
843 case ICE_FC_TX_PAUSE:
846 case ICE_FC_RX_PAUSE:
857 /* Get FEC mode based on negotiated link info */
858 switch (vsi->port_info->phy.link_info.fec_info) {
859 case ICE_AQ_LINK_25G_RS_528_FEC_EN:
860 case ICE_AQ_LINK_25G_RS_544_FEC_EN:
863 case ICE_AQ_LINK_25G_KR_FEC_EN:
864 fec = "FC-FEC/BASE-R";
871 /* check if autoneg completed, might be false due to not supported */
872 if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
877 /* Get FEC mode requested based on PHY caps last SW configuration */
878 caps = kzalloc(sizeof(*caps), GFP_KERNEL);
881 an_advertised = "Unknown";
885 status = ice_aq_get_phy_caps(vsi->port_info, false,
886 ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
888 netdev_info(vsi->netdev, "Get phy capability failed.\n");
890 an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
892 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
893 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
895 else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
896 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
897 fec_req = "FC-FEC/BASE-R";
904 netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
905 speed, fec_req, fec, an_advertised, an, fc);
906 ice_print_topo_conflict(vsi);
910 * ice_vsi_link_event - update the VSI's netdev
911 * @vsi: the VSI on which the link event occurred
912 * @link_up: whether or not the VSI needs to be set up or down
914 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
919 if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
922 if (vsi->type == ICE_VSI_PF) {
923 if (link_up == netif_carrier_ok(vsi->netdev))
927 netif_carrier_on(vsi->netdev);
928 netif_tx_wake_all_queues(vsi->netdev);
930 netif_carrier_off(vsi->netdev);
931 netif_tx_stop_all_queues(vsi->netdev);
937 * ice_set_dflt_mib - send a default config MIB to the FW
938 * @pf: private PF struct
940 * This function sends a default configuration MIB to the FW.
942 * If this function errors out at any point, the driver is still able to
943 * function. The main impact is that LFC may not operate as expected.
944 * Therefore an error state in this function should be treated with a DBG
945 * message and continue on with driver rebuild/reenable.
947 static void ice_set_dflt_mib(struct ice_pf *pf)
949 struct device *dev = ice_pf_to_dev(pf);
950 u8 mib_type, *buf, *lldpmib = NULL;
951 u16 len, typelen, offset = 0;
952 struct ice_lldp_org_tlv *tlv;
953 struct ice_hw *hw = &pf->hw;
956 mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
957 lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
959 dev_dbg(dev, "%s Failed to allocate MIB memory\n",
964 /* Add ETS CFG TLV */
965 tlv = (struct ice_lldp_org_tlv *)lldpmib;
966 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
967 ICE_IEEE_ETS_TLV_LEN);
968 tlv->typelen = htons(typelen);
969 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
970 ICE_IEEE_SUBTYPE_ETS_CFG);
971 tlv->ouisubtype = htonl(ouisubtype);
976 /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
977 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
978 * Octets 13 - 20 are TSA values - leave as zeros
981 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
983 tlv = (struct ice_lldp_org_tlv *)
984 ((char *)tlv + sizeof(tlv->typelen) + len);
986 /* Add ETS REC TLV */
988 tlv->typelen = htons(typelen);
990 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
991 ICE_IEEE_SUBTYPE_ETS_REC);
992 tlv->ouisubtype = htonl(ouisubtype);
994 /* First octet of buf is reserved
995 * Octets 1 - 4 map UP to TC - all UPs map to zero
996 * Octets 5 - 12 are BW values - set TC 0 to 100%.
997 * Octets 13 - 20 are TSA value - leave as zeros
1001 tlv = (struct ice_lldp_org_tlv *)
1002 ((char *)tlv + sizeof(tlv->typelen) + len);
1004 /* Add PFC CFG TLV */
1005 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
1006 ICE_IEEE_PFC_TLV_LEN);
1007 tlv->typelen = htons(typelen);
1009 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
1010 ICE_IEEE_SUBTYPE_PFC_CFG);
1011 tlv->ouisubtype = htonl(ouisubtype);
1013 /* Octet 1 left as all zeros - PFC disabled */
1015 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
1018 if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
1019 dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
1025 * ice_check_phy_fw_load - check if PHY FW load failed
1026 * @pf: pointer to PF struct
1027 * @link_cfg_err: bitmap from the link info structure
1029 * check if external PHY FW load failed and print an error message if it did
1031 static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err)
1033 if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) {
1034 clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1038 if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags))
1041 if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) {
1042 dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
1043 set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1048 * ice_check_module_power
1049 * @pf: pointer to PF struct
1050 * @link_cfg_err: bitmap from the link info structure
1052 * check module power level returned by a previous call to aq_get_link_info
1053 * and print error messages if module power level is not supported
1055 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
1057 /* if module power level is supported, clear the flag */
1058 if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
1059 ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
1060 clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1064 /* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
1065 * above block didn't clear this bit, there's nothing to do
1067 if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
1070 if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
1071 dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
1072 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1073 } else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
1074 dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
1075 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1080 * ice_check_link_cfg_err - check if link configuration failed
1081 * @pf: pointer to the PF struct
1082 * @link_cfg_err: bitmap from the link info structure
1084 * print if any link configuration failure happens due to the value in the
1085 * link_cfg_err parameter in the link info structure
1087 static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err)
1089 ice_check_module_power(pf, link_cfg_err);
1090 ice_check_phy_fw_load(pf, link_cfg_err);
1094 * ice_link_event - process the link event
1095 * @pf: PF that the link event is associated with
1096 * @pi: port_info for the port that the link event is associated with
1097 * @link_up: true if the physical link is up and false if it is down
1098 * @link_speed: current link speed received from the link event
1100 * Returns 0 on success and negative on failure
1103 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
1106 struct device *dev = ice_pf_to_dev(pf);
1107 struct ice_phy_info *phy_info;
1108 struct ice_vsi *vsi;
1113 phy_info = &pi->phy;
1114 phy_info->link_info_old = phy_info->link_info;
1116 old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
1117 old_link_speed = phy_info->link_info_old.link_speed;
1119 /* update the link info structures and re-enable link events,
1120 * don't bail on failure due to other book keeping needed
1122 status = ice_update_link_info(pi);
1124 dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n",
1126 ice_aq_str(pi->hw->adminq.sq_last_status));
1128 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
1130 /* Check if the link state is up after updating link info, and treat
1131 * this event as an UP event since the link is actually UP now.
1133 if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
1136 vsi = ice_get_main_vsi(pf);
1137 if (!vsi || !vsi->port_info)
1140 /* turn off PHY if media was removed */
1141 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
1142 !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
1143 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
1144 ice_set_link(vsi, false);
1147 /* if the old link up/down and speed is the same as the new */
1148 if (link_up == old_link && link_speed == old_link_speed)
1151 ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
1153 if (ice_is_dcb_active(pf)) {
1154 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
1155 ice_dcb_rebuild(pf);
1158 ice_set_dflt_mib(pf);
1160 ice_vsi_link_event(vsi, link_up);
1161 ice_print_link_msg(vsi, link_up);
1163 ice_vc_notify_link_state(pf);
1169 * ice_watchdog_subtask - periodic tasks not using event driven scheduling
1170 * @pf: board private structure
1172 static void ice_watchdog_subtask(struct ice_pf *pf)
1176 /* if interface is down do nothing */
1177 if (test_bit(ICE_DOWN, pf->state) ||
1178 test_bit(ICE_CFG_BUSY, pf->state))
1181 /* make sure we don't do these things too often */
1182 if (time_before(jiffies,
1183 pf->serv_tmr_prev + pf->serv_tmr_period))
1186 pf->serv_tmr_prev = jiffies;
1188 /* Update the stats for active netdevs so the network stack
1189 * can look at updated numbers whenever it cares to
1191 ice_update_pf_stats(pf);
1192 ice_for_each_vsi(pf, i)
1193 if (pf->vsi[i] && pf->vsi[i]->netdev)
1194 ice_update_vsi_stats(pf->vsi[i]);
1198 * ice_init_link_events - enable/initialize link events
1199 * @pi: pointer to the port_info instance
1201 * Returns -EIO on failure, 0 on success
1203 static int ice_init_link_events(struct ice_port_info *pi)
1207 mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
1208 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL |
1209 ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL));
1211 if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
1212 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
1217 if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
1218 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
1227 * ice_handle_link_event - handle link event via ARQ
1228 * @pf: PF that the link event is associated with
1229 * @event: event structure containing link status info
1232 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1234 struct ice_aqc_get_link_status_data *link_data;
1235 struct ice_port_info *port_info;
1238 link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1239 port_info = pf->hw.port_info;
1243 status = ice_link_event(pf, port_info,
1244 !!(link_data->link_info & ICE_AQ_LINK_UP),
1245 le16_to_cpu(link_data->link_speed));
1247 dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1254 * ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware
1255 * @pf: pointer to the PF private structure
1256 * @task: intermediate helper storage and identifier for waiting
1257 * @opcode: the opcode to wait for
1259 * Prepares to wait for a specific AdminQ completion event on the ARQ for
1260 * a given PF. Actual wait would be done by a call to ice_aq_wait_for_event().
1262 * Calls are separated to allow caller registering for event before sending
1263 * the command, which mitigates a race between registering and FW responding.
1265 * To obtain only the descriptor contents, pass an task->event with null
1266 * msg_buf. If the complete data buffer is desired, allocate the
1267 * task->event.msg_buf with enough space ahead of time.
1269 void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task,
1272 INIT_HLIST_NODE(&task->entry);
1273 task->opcode = opcode;
1274 task->state = ICE_AQ_TASK_WAITING;
1276 spin_lock_bh(&pf->aq_wait_lock);
1277 hlist_add_head(&task->entry, &pf->aq_wait_list);
1278 spin_unlock_bh(&pf->aq_wait_lock);
1282 * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1283 * @pf: pointer to the PF private structure
1284 * @task: ptr prepared by ice_aq_prep_for_event()
1285 * @timeout: how long to wait, in jiffies
1287 * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1288 * current thread will be put to sleep until the specified event occurs or
1289 * until the given timeout is reached.
1291 * Returns: zero on success, or a negative error code on failure.
1293 int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
1294 unsigned long timeout)
1296 enum ice_aq_task_state *state = &task->state;
1297 struct device *dev = ice_pf_to_dev(pf);
1298 unsigned long start = jiffies;
1302 ret = wait_event_interruptible_timeout(pf->aq_wait_queue,
1303 *state != ICE_AQ_TASK_WAITING,
1306 case ICE_AQ_TASK_NOT_PREPARED:
1307 WARN(1, "call to %s without ice_aq_prep_for_event()", __func__);
1310 case ICE_AQ_TASK_WAITING:
1311 err = ret < 0 ? ret : -ETIMEDOUT;
1313 case ICE_AQ_TASK_CANCELED:
1314 err = ret < 0 ? ret : -ECANCELED;
1316 case ICE_AQ_TASK_COMPLETE:
1317 err = ret < 0 ? ret : 0;
1320 WARN(1, "Unexpected AdminQ wait task state %u", *state);
1325 dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1326 jiffies_to_msecs(jiffies - start),
1327 jiffies_to_msecs(timeout),
1330 spin_lock_bh(&pf->aq_wait_lock);
1331 hlist_del(&task->entry);
1332 spin_unlock_bh(&pf->aq_wait_lock);
1338 * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1339 * @pf: pointer to the PF private structure
1340 * @opcode: the opcode of the event
1341 * @event: the event to check
1343 * Loops over the current list of pending threads waiting for an AdminQ event.
1344 * For each matching task, copy the contents of the event into the task
1345 * structure and wake up the thread.
1347 * If multiple threads wait for the same opcode, they will all be woken up.
1349 * Note that event->msg_buf will only be duplicated if the event has a buffer
1350 * with enough space already allocated. Otherwise, only the descriptor and
1351 * message length will be copied.
1353 * Returns: true if an event was found, false otherwise
1355 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1356 struct ice_rq_event_info *event)
1358 struct ice_rq_event_info *task_ev;
1359 struct ice_aq_task *task;
1362 spin_lock_bh(&pf->aq_wait_lock);
1363 hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1364 if (task->state != ICE_AQ_TASK_WAITING)
1366 if (task->opcode != opcode)
1369 task_ev = &task->event;
1370 memcpy(&task_ev->desc, &event->desc, sizeof(event->desc));
1371 task_ev->msg_len = event->msg_len;
1373 /* Only copy the data buffer if a destination was set */
1374 if (task_ev->msg_buf && task_ev->buf_len >= event->buf_len) {
1375 memcpy(task_ev->msg_buf, event->msg_buf,
1377 task_ev->buf_len = event->buf_len;
1380 task->state = ICE_AQ_TASK_COMPLETE;
1383 spin_unlock_bh(&pf->aq_wait_lock);
1386 wake_up(&pf->aq_wait_queue);
1390 * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1391 * @pf: the PF private structure
1393 * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1394 * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1396 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1398 struct ice_aq_task *task;
1400 spin_lock_bh(&pf->aq_wait_lock);
1401 hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1402 task->state = ICE_AQ_TASK_CANCELED;
1403 spin_unlock_bh(&pf->aq_wait_lock);
1405 wake_up(&pf->aq_wait_queue);
1408 #define ICE_MBX_OVERFLOW_WATERMARK 64
1411 * __ice_clean_ctrlq - helper function to clean controlq rings
1412 * @pf: ptr to struct ice_pf
1413 * @q_type: specific Control queue type
1415 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1417 struct device *dev = ice_pf_to_dev(pf);
1418 struct ice_rq_event_info event;
1419 struct ice_hw *hw = &pf->hw;
1420 struct ice_ctl_q_info *cq;
1425 /* Do not clean control queue if/when PF reset fails */
1426 if (test_bit(ICE_RESET_FAILED, pf->state))
1430 case ICE_CTL_Q_ADMIN:
1438 case ICE_CTL_Q_MAILBOX:
1441 /* we are going to try to detect a malicious VF, so set the
1442 * state to begin detection
1444 hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
1447 dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1451 /* check for error indications - PF_xx_AxQLEN register layout for
1452 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1454 val = rd32(hw, cq->rq.len);
1455 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1456 PF_FW_ARQLEN_ARQCRIT_M)) {
1458 if (val & PF_FW_ARQLEN_ARQVFE_M)
1459 dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1461 if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1462 dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1465 if (val & PF_FW_ARQLEN_ARQCRIT_M)
1466 dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1468 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1469 PF_FW_ARQLEN_ARQCRIT_M);
1471 wr32(hw, cq->rq.len, val);
1474 val = rd32(hw, cq->sq.len);
1475 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1476 PF_FW_ATQLEN_ATQCRIT_M)) {
1478 if (val & PF_FW_ATQLEN_ATQVFE_M)
1479 dev_dbg(dev, "%s Send Queue VF Error detected\n",
1481 if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1482 dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1485 if (val & PF_FW_ATQLEN_ATQCRIT_M)
1486 dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1488 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1489 PF_FW_ATQLEN_ATQCRIT_M);
1491 wr32(hw, cq->sq.len, val);
1494 event.buf_len = cq->rq_buf_size;
1495 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1500 struct ice_mbx_data data = {};
1504 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1505 if (ret == -EALREADY)
1508 dev_err(dev, "%s Receive Queue event error %d\n", qtype,
1513 opcode = le16_to_cpu(event.desc.opcode);
1515 /* Notify any thread that might be waiting for this event */
1516 ice_aq_check_events(pf, opcode, &event);
1519 case ice_aqc_opc_get_link_status:
1520 if (ice_handle_link_event(pf, &event))
1521 dev_err(dev, "Could not handle link event\n");
1523 case ice_aqc_opc_event_lan_overflow:
1524 ice_vf_lan_overflow_event(pf, &event);
1526 case ice_mbx_opc_send_msg_to_pf:
1527 data.num_msg_proc = i;
1528 data.num_pending_arq = pending;
1529 data.max_num_msgs_mbx = hw->mailboxq.num_rq_entries;
1530 data.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
1532 ice_vc_process_vf_msg(pf, &event, &data);
1534 case ice_aqc_opc_fw_logging:
1535 ice_output_fw_log(hw, &event.desc, event.msg_buf);
1537 case ice_aqc_opc_lldp_set_mib_change:
1538 ice_dcb_process_lldp_set_mib_change(pf, &event);
1541 dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1545 } while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1547 kfree(event.msg_buf);
1549 return pending && (i == ICE_DFLT_IRQ_WORK);
1553 * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1554 * @hw: pointer to hardware info
1555 * @cq: control queue information
1557 * returns true if there are pending messages in a queue, false if there aren't
1559 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1563 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1564 return cq->rq.next_to_clean != ntu;
1568 * ice_clean_adminq_subtask - clean the AdminQ rings
1569 * @pf: board private structure
1571 static void ice_clean_adminq_subtask(struct ice_pf *pf)
1573 struct ice_hw *hw = &pf->hw;
1575 if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1578 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1581 clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1583 /* There might be a situation where new messages arrive to a control
1584 * queue between processing the last message and clearing the
1585 * EVENT_PENDING bit. So before exiting, check queue head again (using
1586 * ice_ctrlq_pending) and process new messages if any.
1588 if (ice_ctrlq_pending(hw, &hw->adminq))
1589 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1595 * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1596 * @pf: board private structure
1598 static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1600 struct ice_hw *hw = &pf->hw;
1602 if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1605 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1608 clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1610 if (ice_ctrlq_pending(hw, &hw->mailboxq))
1611 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1617 * ice_clean_sbq_subtask - clean the Sideband Queue rings
1618 * @pf: board private structure
1620 static void ice_clean_sbq_subtask(struct ice_pf *pf)
1622 struct ice_hw *hw = &pf->hw;
1624 /* Nothing to do here if sideband queue is not supported */
1625 if (!ice_is_sbq_supported(hw)) {
1626 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1630 if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
1633 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
1636 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1638 if (ice_ctrlq_pending(hw, &hw->sbq))
1639 __ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
1645 * ice_service_task_schedule - schedule the service task to wake up
1646 * @pf: board private structure
1648 * If not already scheduled, this puts the task into the work queue.
1650 void ice_service_task_schedule(struct ice_pf *pf)
1652 if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
1653 !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
1654 !test_bit(ICE_NEEDS_RESTART, pf->state))
1655 queue_work(ice_wq, &pf->serv_task);
1659 * ice_service_task_complete - finish up the service task
1660 * @pf: board private structure
1662 static void ice_service_task_complete(struct ice_pf *pf)
1664 WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1666 /* force memory (pf->state) to sync before next service task */
1667 smp_mb__before_atomic();
1668 clear_bit(ICE_SERVICE_SCHED, pf->state);
1672 * ice_service_task_stop - stop service task and cancel works
1673 * @pf: board private structure
1675 * Return 0 if the ICE_SERVICE_DIS bit was not already set,
1678 static int ice_service_task_stop(struct ice_pf *pf)
1682 ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
1684 if (pf->serv_tmr.function)
1685 del_timer_sync(&pf->serv_tmr);
1686 if (pf->serv_task.func)
1687 cancel_work_sync(&pf->serv_task);
1689 clear_bit(ICE_SERVICE_SCHED, pf->state);
1694 * ice_service_task_restart - restart service task and schedule works
1695 * @pf: board private structure
1697 * This function is needed for suspend and resume works (e.g WoL scenario)
1699 static void ice_service_task_restart(struct ice_pf *pf)
1701 clear_bit(ICE_SERVICE_DIS, pf->state);
1702 ice_service_task_schedule(pf);
1706 * ice_service_timer - timer callback to schedule service task
1707 * @t: pointer to timer_list
1709 static void ice_service_timer(struct timer_list *t)
1711 struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1713 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1714 ice_service_task_schedule(pf);
1718 * ice_handle_mdd_event - handle malicious driver detect event
1719 * @pf: pointer to the PF structure
1721 * Called from service task. OICR interrupt handler indicates MDD event.
1722 * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1723 * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1724 * disable the queue, the PF can be configured to reset the VF using ethtool
1725 * private flag mdd-auto-reset-vf.
1727 static void ice_handle_mdd_event(struct ice_pf *pf)
1729 struct device *dev = ice_pf_to_dev(pf);
1730 struct ice_hw *hw = &pf->hw;
1735 if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
1736 /* Since the VF MDD event logging is rate limited, check if
1737 * there are pending MDD events.
1739 ice_print_vfs_mdd_events(pf);
1743 /* find what triggered an MDD event */
1744 reg = rd32(hw, GL_MDET_TX_PQM);
1745 if (reg & GL_MDET_TX_PQM_VALID_M) {
1746 u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1747 GL_MDET_TX_PQM_PF_NUM_S;
1748 u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1749 GL_MDET_TX_PQM_VF_NUM_S;
1750 u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1751 GL_MDET_TX_PQM_MAL_TYPE_S;
1752 u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1753 GL_MDET_TX_PQM_QNUM_S);
1755 if (netif_msg_tx_err(pf))
1756 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1757 event, queue, pf_num, vf_num);
1758 wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1761 reg = rd32(hw, GL_MDET_TX_TCLAN);
1762 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1763 u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1764 GL_MDET_TX_TCLAN_PF_NUM_S;
1765 u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1766 GL_MDET_TX_TCLAN_VF_NUM_S;
1767 u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1768 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1769 u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1770 GL_MDET_TX_TCLAN_QNUM_S);
1772 if (netif_msg_tx_err(pf))
1773 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1774 event, queue, pf_num, vf_num);
1775 wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1778 reg = rd32(hw, GL_MDET_RX);
1779 if (reg & GL_MDET_RX_VALID_M) {
1780 u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1781 GL_MDET_RX_PF_NUM_S;
1782 u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1783 GL_MDET_RX_VF_NUM_S;
1784 u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1785 GL_MDET_RX_MAL_TYPE_S;
1786 u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1789 if (netif_msg_rx_err(pf))
1790 dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1791 event, queue, pf_num, vf_num);
1792 wr32(hw, GL_MDET_RX, 0xffffffff);
1795 /* check to see if this PF caused an MDD event */
1796 reg = rd32(hw, PF_MDET_TX_PQM);
1797 if (reg & PF_MDET_TX_PQM_VALID_M) {
1798 wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1799 if (netif_msg_tx_err(pf))
1800 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1803 reg = rd32(hw, PF_MDET_TX_TCLAN);
1804 if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1805 wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
1806 if (netif_msg_tx_err(pf))
1807 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1810 reg = rd32(hw, PF_MDET_RX);
1811 if (reg & PF_MDET_RX_VALID_M) {
1812 wr32(hw, PF_MDET_RX, 0xFFFF);
1813 if (netif_msg_rx_err(pf))
1814 dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1817 /* Check to see if one of the VFs caused an MDD event, and then
1818 * increment counters and set print pending
1820 mutex_lock(&pf->vfs.table_lock);
1821 ice_for_each_vf(pf, bkt, vf) {
1822 reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id));
1823 if (reg & VP_MDET_TX_PQM_VALID_M) {
1824 wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF);
1825 vf->mdd_tx_events.count++;
1826 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1827 if (netif_msg_tx_err(pf))
1828 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1832 reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id));
1833 if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1834 wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF);
1835 vf->mdd_tx_events.count++;
1836 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1837 if (netif_msg_tx_err(pf))
1838 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1842 reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id));
1843 if (reg & VP_MDET_TX_TDPU_VALID_M) {
1844 wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF);
1845 vf->mdd_tx_events.count++;
1846 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1847 if (netif_msg_tx_err(pf))
1848 dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1852 reg = rd32(hw, VP_MDET_RX(vf->vf_id));
1853 if (reg & VP_MDET_RX_VALID_M) {
1854 wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF);
1855 vf->mdd_rx_events.count++;
1856 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1857 if (netif_msg_rx_err(pf))
1858 dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1861 /* Since the queue is disabled on VF Rx MDD events, the
1862 * PF can be configured to reset the VF through ethtool
1863 * private flag mdd-auto-reset-vf.
1865 if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
1866 /* VF MDD event counters will be cleared by
1867 * reset, so print the event prior to reset.
1869 ice_print_vf_rx_mdd_event(vf);
1870 ice_reset_vf(vf, ICE_VF_RESET_LOCK);
1874 mutex_unlock(&pf->vfs.table_lock);
1876 ice_print_vfs_mdd_events(pf);
1880 * ice_force_phys_link_state - Force the physical link state
1881 * @vsi: VSI to force the physical link state to up/down
1882 * @link_up: true/false indicates to set the physical link to up/down
1884 * Force the physical link state by getting the current PHY capabilities from
1885 * hardware and setting the PHY config based on the determined capabilities. If
1886 * link changes a link event will be triggered because both the Enable Automatic
1887 * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1889 * Returns 0 on success, negative on failure
1891 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1893 struct ice_aqc_get_phy_caps_data *pcaps;
1894 struct ice_aqc_set_phy_cfg_data *cfg;
1895 struct ice_port_info *pi;
1899 if (!vsi || !vsi->port_info || !vsi->back)
1901 if (vsi->type != ICE_VSI_PF)
1904 dev = ice_pf_to_dev(vsi->back);
1906 pi = vsi->port_info;
1908 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1912 retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1915 dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1916 vsi->vsi_num, retcode);
1921 /* No change in link */
1922 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1923 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1926 /* Use the current user PHY configuration. The current user PHY
1927 * configuration is initialized during probe from PHY capabilities
1928 * software mode, and updated on set PHY configuration.
1930 cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1936 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1938 cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1940 cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1942 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1944 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
1945 vsi->vsi_num, retcode);
1956 * ice_init_nvm_phy_type - Initialize the NVM PHY type
1957 * @pi: port info structure
1959 * Initialize nvm_phy_type_[low|high] for link lenient mode support
1961 static int ice_init_nvm_phy_type(struct ice_port_info *pi)
1963 struct ice_aqc_get_phy_caps_data *pcaps;
1964 struct ice_pf *pf = pi->hw->back;
1967 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1971 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA,
1975 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1979 pf->nvm_phy_type_hi = pcaps->phy_type_high;
1980 pf->nvm_phy_type_lo = pcaps->phy_type_low;
1988 * ice_init_link_dflt_override - Initialize link default override
1989 * @pi: port info structure
1991 * Initialize link default override and PHY total port shutdown during probe
1993 static void ice_init_link_dflt_override(struct ice_port_info *pi)
1995 struct ice_link_default_override_tlv *ldo;
1996 struct ice_pf *pf = pi->hw->back;
1998 ldo = &pf->link_dflt_override;
1999 if (ice_get_link_default_override(ldo, pi))
2002 if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
2005 /* Enable Total Port Shutdown (override/replace link-down-on-close
2006 * ethtool private flag) for ports with Port Disable bit set.
2008 set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
2009 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
2013 * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
2014 * @pi: port info structure
2016 * If default override is enabled, initialize the user PHY cfg speed and FEC
2017 * settings using the default override mask from the NVM.
2019 * The PHY should only be configured with the default override settings the
2020 * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
2021 * is used to indicate that the user PHY cfg default override is initialized
2022 * and the PHY has not been configured with the default override settings. The
2023 * state is set here, and cleared in ice_configure_phy the first time the PHY is
2026 * This function should be called only if the FW doesn't support default
2027 * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
2029 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
2031 struct ice_link_default_override_tlv *ldo;
2032 struct ice_aqc_set_phy_cfg_data *cfg;
2033 struct ice_phy_info *phy = &pi->phy;
2034 struct ice_pf *pf = pi->hw->back;
2036 ldo = &pf->link_dflt_override;
2038 /* If link default override is enabled, use to mask NVM PHY capabilities
2039 * for speed and FEC default configuration.
2041 cfg = &phy->curr_user_phy_cfg;
2043 if (ldo->phy_type_low || ldo->phy_type_high) {
2044 cfg->phy_type_low = pf->nvm_phy_type_lo &
2045 cpu_to_le64(ldo->phy_type_low);
2046 cfg->phy_type_high = pf->nvm_phy_type_hi &
2047 cpu_to_le64(ldo->phy_type_high);
2049 cfg->link_fec_opt = ldo->fec_options;
2050 phy->curr_user_fec_req = ICE_FEC_AUTO;
2052 set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
2056 * ice_init_phy_user_cfg - Initialize the PHY user configuration
2057 * @pi: port info structure
2059 * Initialize the current user PHY configuration, speed, FEC, and FC requested
2060 * mode to default. The PHY defaults are from get PHY capabilities topology
2061 * with media so call when media is first available. An error is returned if
2062 * called when media is not available. The PHY initialization completed state is
2065 * These configurations are used when setting PHY
2066 * configuration. The user PHY configuration is updated on set PHY
2067 * configuration. Returns 0 on success, negative on failure
2069 static int ice_init_phy_user_cfg(struct ice_port_info *pi)
2071 struct ice_aqc_get_phy_caps_data *pcaps;
2072 struct ice_phy_info *phy = &pi->phy;
2073 struct ice_pf *pf = pi->hw->back;
2076 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2079 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2083 if (ice_fw_supports_report_dflt_cfg(pi->hw))
2084 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2087 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2090 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
2094 ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
2096 /* check if lenient mode is supported and enabled */
2097 if (ice_fw_supports_link_override(pi->hw) &&
2098 !(pcaps->module_compliance_enforcement &
2099 ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
2100 set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
2102 /* if the FW supports default PHY configuration mode, then the driver
2103 * does not have to apply link override settings. If not,
2104 * initialize user PHY configuration with link override values
2106 if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
2107 (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
2108 ice_init_phy_cfg_dflt_override(pi);
2113 /* if link default override is not enabled, set user flow control and
2114 * FEC settings based on what get_phy_caps returned
2116 phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
2117 pcaps->link_fec_options);
2118 phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
2121 phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
2122 set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
2129 * ice_configure_phy - configure PHY
2132 * Set the PHY configuration. If the current PHY configuration is the same as
2133 * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
2134 * configure the based get PHY capabilities for topology with media.
2136 static int ice_configure_phy(struct ice_vsi *vsi)
2138 struct device *dev = ice_pf_to_dev(vsi->back);
2139 struct ice_port_info *pi = vsi->port_info;
2140 struct ice_aqc_get_phy_caps_data *pcaps;
2141 struct ice_aqc_set_phy_cfg_data *cfg;
2142 struct ice_phy_info *phy = &pi->phy;
2143 struct ice_pf *pf = vsi->back;
2146 /* Ensure we have media as we cannot configure a medialess port */
2147 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2150 ice_print_topo_conflict(vsi);
2152 if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) &&
2153 phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
2156 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
2157 return ice_force_phys_link_state(vsi, true);
2159 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2163 /* Get current PHY config */
2164 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
2167 dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n",
2172 /* If PHY enable link is configured and configuration has not changed,
2173 * there's nothing to do
2175 if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
2176 ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
2179 /* Use PHY topology as baseline for configuration */
2180 memset(pcaps, 0, sizeof(*pcaps));
2181 if (ice_fw_supports_report_dflt_cfg(pi->hw))
2182 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2185 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2188 dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n",
2193 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
2199 ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
2201 /* Speed - If default override pending, use curr_user_phy_cfg set in
2202 * ice_init_phy_user_cfg_ldo.
2204 if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
2205 vsi->back->state)) {
2206 cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
2207 cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
2209 u64 phy_low = 0, phy_high = 0;
2211 ice_update_phy_type(&phy_low, &phy_high,
2212 pi->phy.curr_user_speed_req);
2213 cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
2214 cfg->phy_type_high = pcaps->phy_type_high &
2215 cpu_to_le64(phy_high);
2218 /* Can't provide what was requested; use PHY capabilities */
2219 if (!cfg->phy_type_low && !cfg->phy_type_high) {
2220 cfg->phy_type_low = pcaps->phy_type_low;
2221 cfg->phy_type_high = pcaps->phy_type_high;
2225 ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
2227 /* Can't provide what was requested; use PHY capabilities */
2228 if (cfg->link_fec_opt !=
2229 (cfg->link_fec_opt & pcaps->link_fec_options)) {
2230 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2231 cfg->link_fec_opt = pcaps->link_fec_options;
2234 /* Flow Control - always supported; no need to check against
2237 ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
2239 /* Enable link and link update */
2240 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
2242 err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
2244 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
2254 * ice_check_media_subtask - Check for media
2255 * @pf: pointer to PF struct
2257 * If media is available, then initialize PHY user configuration if it is not
2258 * been, and configure the PHY if the interface is up.
2260 static void ice_check_media_subtask(struct ice_pf *pf)
2262 struct ice_port_info *pi;
2263 struct ice_vsi *vsi;
2266 /* No need to check for media if it's already present */
2267 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
2270 vsi = ice_get_main_vsi(pf);
2274 /* Refresh link info and check if media is present */
2275 pi = vsi->port_info;
2276 err = ice_update_link_info(pi);
2280 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
2282 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2283 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
2284 ice_init_phy_user_cfg(pi);
2286 /* PHY settings are reset on media insertion, reconfigure
2287 * PHY to preserve settings.
2289 if (test_bit(ICE_VSI_DOWN, vsi->state) &&
2290 test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2293 err = ice_configure_phy(vsi);
2295 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2297 /* A Link Status Event will be generated; the event handler
2298 * will complete bringing the interface up
2304 * ice_service_task - manage and run subtasks
2305 * @work: pointer to work_struct contained by the PF struct
2307 static void ice_service_task(struct work_struct *work)
2309 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2310 unsigned long start_time = jiffies;
2314 /* process reset requests first */
2315 ice_reset_subtask(pf);
2317 /* bail if a reset/recovery cycle is pending or rebuild failed */
2318 if (ice_is_reset_in_progress(pf->state) ||
2319 test_bit(ICE_SUSPENDED, pf->state) ||
2320 test_bit(ICE_NEEDS_RESTART, pf->state)) {
2321 ice_service_task_complete(pf);
2325 if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
2326 struct iidc_event *event;
2328 event = kzalloc(sizeof(*event), GFP_KERNEL);
2330 set_bit(IIDC_EVENT_CRIT_ERR, event->type);
2331 /* report the entire OICR value to AUX driver */
2332 swap(event->reg, pf->oicr_err_reg);
2333 ice_send_event_to_aux(pf, event);
2338 /* unplug aux dev per request, if an unplug request came in
2339 * while processing a plug request, this will handle it
2341 if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags))
2342 ice_unplug_aux_dev(pf);
2344 /* Plug aux device per request */
2345 if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
2346 ice_plug_aux_dev(pf);
2348 if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
2349 struct iidc_event *event;
2351 event = kzalloc(sizeof(*event), GFP_KERNEL);
2353 set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
2354 ice_send_event_to_aux(pf, event);
2359 ice_clean_adminq_subtask(pf);
2360 ice_check_media_subtask(pf);
2361 ice_check_for_hang_subtask(pf);
2362 ice_sync_fltr_subtask(pf);
2363 ice_handle_mdd_event(pf);
2364 ice_watchdog_subtask(pf);
2366 if (ice_is_safe_mode(pf)) {
2367 ice_service_task_complete(pf);
2371 ice_process_vflr_event(pf);
2372 ice_clean_mailboxq_subtask(pf);
2373 ice_clean_sbq_subtask(pf);
2374 ice_sync_arfs_fltrs(pf);
2375 ice_flush_fdir_ctx(pf);
2377 /* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2378 ice_service_task_complete(pf);
2380 /* If the tasks have taken longer than one service timer period
2381 * or there is more work to be done, reset the service timer to
2382 * schedule the service task now.
2384 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2385 test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
2386 test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2387 test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2388 test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
2389 test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
2390 test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2391 mod_timer(&pf->serv_tmr, jiffies);
2395 * ice_set_ctrlq_len - helper function to set controlq length
2396 * @hw: pointer to the HW instance
2398 static void ice_set_ctrlq_len(struct ice_hw *hw)
2400 hw->adminq.num_rq_entries = ICE_AQ_LEN;
2401 hw->adminq.num_sq_entries = ICE_AQ_LEN;
2402 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2403 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2404 hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2405 hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2406 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2407 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2408 hw->sbq.num_rq_entries = ICE_SBQ_LEN;
2409 hw->sbq.num_sq_entries = ICE_SBQ_LEN;
2410 hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2411 hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2415 * ice_schedule_reset - schedule a reset
2416 * @pf: board private structure
2417 * @reset: reset being requested
2419 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2421 struct device *dev = ice_pf_to_dev(pf);
2423 /* bail out if earlier reset has failed */
2424 if (test_bit(ICE_RESET_FAILED, pf->state)) {
2425 dev_dbg(dev, "earlier reset has failed\n");
2428 /* bail if reset/recovery already in progress */
2429 if (ice_is_reset_in_progress(pf->state)) {
2430 dev_dbg(dev, "Reset already in progress\n");
2436 set_bit(ICE_PFR_REQ, pf->state);
2438 case ICE_RESET_CORER:
2439 set_bit(ICE_CORER_REQ, pf->state);
2441 case ICE_RESET_GLOBR:
2442 set_bit(ICE_GLOBR_REQ, pf->state);
2448 ice_service_task_schedule(pf);
2453 * ice_irq_affinity_notify - Callback for affinity changes
2454 * @notify: context as to what irq was changed
2455 * @mask: the new affinity mask
2457 * This is a callback function used by the irq_set_affinity_notifier function
2458 * so that we may register to receive changes to the irq affinity masks.
2461 ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2462 const cpumask_t *mask)
2464 struct ice_q_vector *q_vector =
2465 container_of(notify, struct ice_q_vector, affinity_notify);
2467 cpumask_copy(&q_vector->affinity_mask, mask);
2471 * ice_irq_affinity_release - Callback for affinity notifier release
2472 * @ref: internal core kernel usage
2474 * This is a callback function used by the irq_set_affinity_notifier function
2475 * to inform the current notification subscriber that they will no longer
2476 * receive notifications.
2478 static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2481 * ice_vsi_ena_irq - Enable IRQ for the given VSI
2482 * @vsi: the VSI being configured
2484 static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2486 struct ice_hw *hw = &vsi->back->hw;
2489 ice_for_each_q_vector(vsi, i)
2490 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2497 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2498 * @vsi: the VSI being configured
2499 * @basename: name for the vector
2501 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2503 int q_vectors = vsi->num_q_vectors;
2504 struct ice_pf *pf = vsi->back;
2511 dev = ice_pf_to_dev(pf);
2512 for (vector = 0; vector < q_vectors; vector++) {
2513 struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2515 irq_num = q_vector->irq.virq;
2517 if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) {
2518 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2519 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2521 } else if (q_vector->rx.rx_ring) {
2522 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2523 "%s-%s-%d", basename, "rx", rx_int_idx++);
2524 } else if (q_vector->tx.tx_ring) {
2525 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2526 "%s-%s-%d", basename, "tx", tx_int_idx++);
2528 /* skip this unused q_vector */
2531 if (vsi->type == ICE_VSI_CTRL && vsi->vf)
2532 err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2533 IRQF_SHARED, q_vector->name,
2536 err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2537 0, q_vector->name, q_vector);
2539 netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2544 /* register for affinity change notifications */
2545 if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2546 struct irq_affinity_notify *affinity_notify;
2548 affinity_notify = &q_vector->affinity_notify;
2549 affinity_notify->notify = ice_irq_affinity_notify;
2550 affinity_notify->release = ice_irq_affinity_release;
2551 irq_set_affinity_notifier(irq_num, affinity_notify);
2554 /* assign the mask for this irq */
2555 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
2558 err = ice_set_cpu_rx_rmap(vsi);
2560 netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n",
2561 vsi->vsi_num, ERR_PTR(err));
2565 vsi->irqs_ready = true;
2570 irq_num = vsi->q_vectors[vector]->irq.virq;
2571 if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2572 irq_set_affinity_notifier(irq_num, NULL);
2573 irq_set_affinity_hint(irq_num, NULL);
2574 devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2580 * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2581 * @vsi: VSI to setup Tx rings used by XDP
2583 * Return 0 on success and negative value on error
2585 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2587 struct device *dev = ice_pf_to_dev(vsi->back);
2588 struct ice_tx_desc *tx_desc;
2591 ice_for_each_xdp_txq(vsi, i) {
2592 u16 xdp_q_idx = vsi->alloc_txq + i;
2593 struct ice_ring_stats *ring_stats;
2594 struct ice_tx_ring *xdp_ring;
2596 xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2598 goto free_xdp_rings;
2600 ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
2602 ice_free_tx_ring(xdp_ring);
2603 goto free_xdp_rings;
2606 xdp_ring->ring_stats = ring_stats;
2607 xdp_ring->q_index = xdp_q_idx;
2608 xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2609 xdp_ring->vsi = vsi;
2610 xdp_ring->netdev = NULL;
2611 xdp_ring->dev = dev;
2612 xdp_ring->count = vsi->num_tx_desc;
2613 WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2614 if (ice_setup_tx_ring(xdp_ring))
2615 goto free_xdp_rings;
2616 ice_set_ring_xdp(xdp_ring);
2617 spin_lock_init(&xdp_ring->tx_lock);
2618 for (j = 0; j < xdp_ring->count; j++) {
2619 tx_desc = ICE_TX_DESC(xdp_ring, j);
2620 tx_desc->cmd_type_offset_bsz = 0;
2627 for (; i >= 0; i--) {
2628 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) {
2629 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2630 vsi->xdp_rings[i]->ring_stats = NULL;
2631 ice_free_tx_ring(vsi->xdp_rings[i]);
2638 * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2639 * @vsi: VSI to set the bpf prog on
2640 * @prog: the bpf prog pointer
2642 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2644 struct bpf_prog *old_prog;
2647 old_prog = xchg(&vsi->xdp_prog, prog);
2648 ice_for_each_rxq(vsi, i)
2649 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2652 bpf_prog_put(old_prog);
2656 * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2657 * @vsi: VSI to bring up Tx rings used by XDP
2658 * @prog: bpf program that will be assigned to VSI
2660 * Return 0 on success and negative value on error
2662 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
2664 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2665 int xdp_rings_rem = vsi->num_xdp_txq;
2666 struct ice_pf *pf = vsi->back;
2667 struct ice_qs_cfg xdp_qs_cfg = {
2668 .qs_mutex = &pf->avail_q_mutex,
2669 .pf_map = pf->avail_txqs,
2670 .pf_map_size = pf->max_pf_txqs,
2671 .q_count = vsi->num_xdp_txq,
2672 .scatter_count = ICE_MAX_SCATTER_TXQS,
2673 .vsi_map = vsi->txq_map,
2674 .vsi_map_offset = vsi->alloc_txq,
2675 .mapping_mode = ICE_VSI_MAP_CONTIG
2681 dev = ice_pf_to_dev(pf);
2682 vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2683 sizeof(*vsi->xdp_rings), GFP_KERNEL);
2684 if (!vsi->xdp_rings)
2687 vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2688 if (__ice_vsi_get_qs(&xdp_qs_cfg))
2691 if (static_key_enabled(&ice_xdp_locking_key))
2692 netdev_warn(vsi->netdev,
2693 "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
2695 if (ice_xdp_alloc_setup_rings(vsi))
2696 goto clear_xdp_rings;
2698 /* follow the logic from ice_vsi_map_rings_to_vectors */
2699 ice_for_each_q_vector(vsi, v_idx) {
2700 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2701 int xdp_rings_per_v, q_id, q_base;
2703 xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2704 vsi->num_q_vectors - v_idx);
2705 q_base = vsi->num_xdp_txq - xdp_rings_rem;
2707 for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2708 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
2710 xdp_ring->q_vector = q_vector;
2711 xdp_ring->next = q_vector->tx.tx_ring;
2712 q_vector->tx.tx_ring = xdp_ring;
2714 xdp_rings_rem -= xdp_rings_per_v;
2717 ice_for_each_rxq(vsi, i) {
2718 if (static_key_enabled(&ice_xdp_locking_key)) {
2719 vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
2721 struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
2722 struct ice_tx_ring *ring;
2724 ice_for_each_tx_ring(ring, q_vector->tx) {
2725 if (ice_ring_is_xdp(ring)) {
2726 vsi->rx_rings[i]->xdp_ring = ring;
2731 ice_tx_xsk_pool(vsi, i);
2734 /* omit the scheduler update if in reset path; XDP queues will be
2735 * taken into account at the end of ice_vsi_rebuild, where
2736 * ice_cfg_vsi_lan is being called
2738 if (ice_is_reset_in_progress(pf->state))
2741 /* tell the Tx scheduler that right now we have
2744 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2745 max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2747 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2750 dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
2752 goto clear_xdp_rings;
2755 /* assign the prog only when it's not already present on VSI;
2756 * this flow is a subject of both ethtool -L and ndo_bpf flows;
2757 * VSI rebuild that happens under ethtool -L can expose us to
2758 * the bpf_prog refcount issues as we would be swapping same
2759 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put
2760 * on it as it would be treated as an 'old_prog'; for ndo_bpf
2761 * this is not harmful as dev_xdp_install bumps the refcount
2762 * before calling the op exposed by the driver;
2764 if (!ice_is_xdp_ena_vsi(vsi))
2765 ice_vsi_assign_bpf_prog(vsi, prog);
2769 ice_for_each_xdp_txq(vsi, i)
2770 if (vsi->xdp_rings[i]) {
2771 kfree_rcu(vsi->xdp_rings[i], rcu);
2772 vsi->xdp_rings[i] = NULL;
2776 mutex_lock(&pf->avail_q_mutex);
2777 ice_for_each_xdp_txq(vsi, i) {
2778 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2779 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2781 mutex_unlock(&pf->avail_q_mutex);
2783 devm_kfree(dev, vsi->xdp_rings);
2788 * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2789 * @vsi: VSI to remove XDP rings
2791 * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2794 int ice_destroy_xdp_rings(struct ice_vsi *vsi)
2796 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2797 struct ice_pf *pf = vsi->back;
2800 /* q_vectors are freed in reset path so there's no point in detaching
2801 * rings; in case of rebuild being triggered not from reset bits
2802 * in pf->state won't be set, so additionally check first q_vector
2805 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2808 ice_for_each_q_vector(vsi, v_idx) {
2809 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2810 struct ice_tx_ring *ring;
2812 ice_for_each_tx_ring(ring, q_vector->tx)
2813 if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2816 /* restore the value of last node prior to XDP setup */
2817 q_vector->tx.tx_ring = ring;
2821 mutex_lock(&pf->avail_q_mutex);
2822 ice_for_each_xdp_txq(vsi, i) {
2823 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2824 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2826 mutex_unlock(&pf->avail_q_mutex);
2828 ice_for_each_xdp_txq(vsi, i)
2829 if (vsi->xdp_rings[i]) {
2830 if (vsi->xdp_rings[i]->desc) {
2832 ice_free_tx_ring(vsi->xdp_rings[i]);
2834 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2835 vsi->xdp_rings[i]->ring_stats = NULL;
2836 kfree_rcu(vsi->xdp_rings[i], rcu);
2837 vsi->xdp_rings[i] = NULL;
2840 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2841 vsi->xdp_rings = NULL;
2843 if (static_key_enabled(&ice_xdp_locking_key))
2844 static_branch_dec(&ice_xdp_locking_key);
2846 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2849 ice_vsi_assign_bpf_prog(vsi, NULL);
2851 /* notify Tx scheduler that we destroyed XDP queues and bring
2852 * back the old number of child nodes
2854 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2855 max_txqs[i] = vsi->num_txq;
2857 /* change number of XDP Tx queues to 0 */
2858 vsi->num_xdp_txq = 0;
2860 return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2865 * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2866 * @vsi: VSI to schedule napi on
2868 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2872 ice_for_each_rxq(vsi, i) {
2873 struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
2875 if (rx_ring->xsk_pool)
2876 napi_schedule(&rx_ring->q_vector->napi);
2881 * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
2882 * @vsi: VSI to determine the count of XDP Tx qs
2884 * returns 0 if Tx qs count is higher than at least half of CPU count,
2887 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
2889 u16 avail = ice_get_avail_txq_count(vsi->back);
2890 u16 cpus = num_possible_cpus();
2892 if (avail < cpus / 2)
2895 vsi->num_xdp_txq = min_t(u16, avail, cpus);
2897 if (vsi->num_xdp_txq < cpus)
2898 static_branch_inc(&ice_xdp_locking_key);
2904 * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2905 * @vsi: Pointer to VSI structure
2907 static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
2909 if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
2910 return ICE_RXBUF_1664;
2912 return ICE_RXBUF_3072;
2916 * ice_xdp_setup_prog - Add or remove XDP eBPF program
2917 * @vsi: VSI to setup XDP for
2918 * @prog: XDP program
2919 * @extack: netlink extended ack
2922 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2923 struct netlink_ext_ack *extack)
2925 unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
2926 bool if_running = netif_running(vsi->netdev);
2927 int ret = 0, xdp_ring_err = 0;
2929 if (prog && !prog->aux->xdp_has_frags) {
2930 if (frame_size > ice_max_xdp_frame_size(vsi)) {
2931 NL_SET_ERR_MSG_MOD(extack,
2932 "MTU is too large for linear frames and XDP prog does not support frags");
2937 /* hot swap progs and avoid toggling link */
2938 if (ice_is_xdp_ena_vsi(vsi) == !!prog) {
2939 ice_vsi_assign_bpf_prog(vsi, prog);
2943 /* need to stop netdev while setting up the program for Rx rings */
2944 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
2945 ret = ice_down(vsi);
2947 NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
2952 if (!ice_is_xdp_ena_vsi(vsi) && prog) {
2953 xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
2955 NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
2957 xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
2959 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
2961 xdp_features_set_redirect_target(vsi->netdev, true);
2962 /* reallocate Rx queues that are used for zero-copy */
2963 xdp_ring_err = ice_realloc_zc_buf(vsi, true);
2965 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
2966 } else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
2967 xdp_features_clear_redirect_target(vsi->netdev);
2968 xdp_ring_err = ice_destroy_xdp_rings(vsi);
2970 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
2971 /* reallocate Rx queues that were used for zero-copy */
2972 xdp_ring_err = ice_realloc_zc_buf(vsi, false);
2974 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
2981 ice_vsi_rx_napi_schedule(vsi);
2983 return (ret || xdp_ring_err) ? -ENOMEM : 0;
2987 * ice_xdp_safe_mode - XDP handler for safe mode
2991 static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
2992 struct netdev_bpf *xdp)
2994 NL_SET_ERR_MSG_MOD(xdp->extack,
2995 "Please provide working DDP firmware package in order to use XDP\n"
2996 "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
3001 * ice_xdp - implements XDP handler
3005 static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3007 struct ice_netdev_priv *np = netdev_priv(dev);
3008 struct ice_vsi *vsi = np->vsi;
3010 if (vsi->type != ICE_VSI_PF) {
3011 NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
3015 switch (xdp->command) {
3016 case XDP_SETUP_PROG:
3017 return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
3018 case XDP_SETUP_XSK_POOL:
3019 return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
3027 * ice_ena_misc_vector - enable the non-queue interrupts
3028 * @pf: board private structure
3030 static void ice_ena_misc_vector(struct ice_pf *pf)
3032 struct ice_hw *hw = &pf->hw;
3035 /* Disable anti-spoof detection interrupt to prevent spurious event
3036 * interrupts during a function reset. Anti-spoof functionally is
3039 val = rd32(hw, GL_MDCK_TX_TDPU);
3040 val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
3041 wr32(hw, GL_MDCK_TX_TDPU, val);
3043 /* clear things first */
3044 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
3045 rd32(hw, PFINT_OICR); /* read to clear */
3047 val = (PFINT_OICR_ECC_ERR_M |
3048 PFINT_OICR_MAL_DETECT_M |
3050 PFINT_OICR_PCI_EXCEPTION_M |
3052 PFINT_OICR_HMC_ERR_M |
3053 PFINT_OICR_PE_PUSH_M |
3054 PFINT_OICR_PE_CRITERR_M);
3056 wr32(hw, PFINT_OICR_ENA, val);
3058 /* SW_ITR_IDX = 0, but don't change INTENA */
3059 wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
3060 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
3064 * ice_misc_intr - misc interrupt handler
3065 * @irq: interrupt number
3066 * @data: pointer to a q_vector
3068 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
3070 struct ice_pf *pf = (struct ice_pf *)data;
3071 struct ice_hw *hw = &pf->hw;
3075 dev = ice_pf_to_dev(pf);
3076 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
3077 set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
3078 set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
3080 oicr = rd32(hw, PFINT_OICR);
3081 ena_mask = rd32(hw, PFINT_OICR_ENA);
3083 if (oicr & PFINT_OICR_SWINT_M) {
3084 ena_mask &= ~PFINT_OICR_SWINT_M;
3088 if (oicr & PFINT_OICR_MAL_DETECT_M) {
3089 ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
3090 set_bit(ICE_MDD_EVENT_PENDING, pf->state);
3092 if (oicr & PFINT_OICR_VFLR_M) {
3093 /* disable any further VFLR event notifications */
3094 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
3095 u32 reg = rd32(hw, PFINT_OICR_ENA);
3097 reg &= ~PFINT_OICR_VFLR_M;
3098 wr32(hw, PFINT_OICR_ENA, reg);
3100 ena_mask &= ~PFINT_OICR_VFLR_M;
3101 set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
3105 if (oicr & PFINT_OICR_GRST_M) {
3108 /* we have a reset warning */
3109 ena_mask &= ~PFINT_OICR_GRST_M;
3110 reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
3111 GLGEN_RSTAT_RESET_TYPE_S;
3113 if (reset == ICE_RESET_CORER)
3115 else if (reset == ICE_RESET_GLOBR)
3117 else if (reset == ICE_RESET_EMPR)
3120 dev_dbg(dev, "Invalid reset type %d\n", reset);
3122 /* If a reset cycle isn't already in progress, we set a bit in
3123 * pf->state so that the service task can start a reset/rebuild.
3125 if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
3126 if (reset == ICE_RESET_CORER)
3127 set_bit(ICE_CORER_RECV, pf->state);
3128 else if (reset == ICE_RESET_GLOBR)
3129 set_bit(ICE_GLOBR_RECV, pf->state);
3131 set_bit(ICE_EMPR_RECV, pf->state);
3133 /* There are couple of different bits at play here.
3134 * hw->reset_ongoing indicates whether the hardware is
3135 * in reset. This is set to true when a reset interrupt
3136 * is received and set back to false after the driver
3137 * has determined that the hardware is out of reset.
3139 * ICE_RESET_OICR_RECV in pf->state indicates
3140 * that a post reset rebuild is required before the
3141 * driver is operational again. This is set above.
3143 * As this is the start of the reset/rebuild cycle, set
3144 * both to indicate that.
3146 hw->reset_ongoing = true;
3150 if (oicr & PFINT_OICR_TSYN_TX_M) {
3151 ena_mask &= ~PFINT_OICR_TSYN_TX_M;
3152 if (!hw->reset_ongoing)
3153 set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
3156 if (oicr & PFINT_OICR_TSYN_EVNT_M) {
3157 u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3158 u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
3160 ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
3162 if (hw->func_caps.ts_func_info.src_tmr_owned) {
3163 /* Save EVENTs from GLTSYN register */
3164 pf->ptp.ext_ts_irq |= gltsyn_stat &
3165 (GLTSYN_STAT_EVENT0_M |
3166 GLTSYN_STAT_EVENT1_M |
3167 GLTSYN_STAT_EVENT2_M);
3169 set_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread);
3173 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
3174 if (oicr & ICE_AUX_CRIT_ERR) {
3175 pf->oicr_err_reg |= oicr;
3176 set_bit(ICE_AUX_ERR_PENDING, pf->state);
3177 ena_mask &= ~ICE_AUX_CRIT_ERR;
3180 /* Report any remaining unexpected interrupts */
3183 dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
3184 /* If a critical error is pending there is no choice but to
3187 if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
3188 PFINT_OICR_ECC_ERR_M)) {
3189 set_bit(ICE_PFR_REQ, pf->state);
3193 return IRQ_WAKE_THREAD;
3197 * ice_misc_intr_thread_fn - misc interrupt thread function
3198 * @irq: interrupt number
3199 * @data: pointer to a q_vector
3201 static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
3203 struct ice_pf *pf = data;
3208 if (ice_is_reset_in_progress(pf->state))
3211 ice_service_task_schedule(pf);
3213 if (test_and_clear_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread))
3214 ice_ptp_extts_event(pf);
3216 if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) {
3217 /* Process outstanding Tx timestamps. If there is more work,
3218 * re-arm the interrupt to trigger again.
3220 if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) {
3221 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
3226 ice_irq_dynamic_ena(hw, NULL, NULL);
3232 * ice_dis_ctrlq_interrupts - disable control queue interrupts
3233 * @hw: pointer to HW structure
3235 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
3237 /* disable Admin queue Interrupt causes */
3238 wr32(hw, PFINT_FW_CTL,
3239 rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
3241 /* disable Mailbox queue Interrupt causes */
3242 wr32(hw, PFINT_MBX_CTL,
3243 rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
3245 wr32(hw, PFINT_SB_CTL,
3246 rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
3248 /* disable Control queue Interrupt causes */
3249 wr32(hw, PFINT_OICR_CTL,
3250 rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
3256 * ice_free_irq_msix_misc - Unroll misc vector setup
3257 * @pf: board private structure
3259 static void ice_free_irq_msix_misc(struct ice_pf *pf)
3261 int misc_irq_num = pf->oicr_irq.virq;
3262 struct ice_hw *hw = &pf->hw;
3264 ice_dis_ctrlq_interrupts(hw);
3266 /* disable OICR interrupt */
3267 wr32(hw, PFINT_OICR_ENA, 0);
3270 synchronize_irq(misc_irq_num);
3271 devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf);
3273 ice_free_irq(pf, pf->oicr_irq);
3277 * ice_ena_ctrlq_interrupts - enable control queue interrupts
3278 * @hw: pointer to HW structure
3279 * @reg_idx: HW vector index to associate the control queue interrupts with
3281 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
3285 val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
3286 PFINT_OICR_CTL_CAUSE_ENA_M);
3287 wr32(hw, PFINT_OICR_CTL, val);
3289 /* enable Admin queue Interrupt causes */
3290 val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
3291 PFINT_FW_CTL_CAUSE_ENA_M);
3292 wr32(hw, PFINT_FW_CTL, val);
3294 /* enable Mailbox queue Interrupt causes */
3295 val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
3296 PFINT_MBX_CTL_CAUSE_ENA_M);
3297 wr32(hw, PFINT_MBX_CTL, val);
3299 /* This enables Sideband queue Interrupt causes */
3300 val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
3301 PFINT_SB_CTL_CAUSE_ENA_M);
3302 wr32(hw, PFINT_SB_CTL, val);
3308 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
3309 * @pf: board private structure
3311 * This sets up the handler for MSIX 0, which is used to manage the
3312 * non-queue interrupts, e.g. AdminQ and errors. This is not used
3313 * when in MSI or Legacy interrupt mode.
3315 static int ice_req_irq_msix_misc(struct ice_pf *pf)
3317 struct device *dev = ice_pf_to_dev(pf);
3318 struct ice_hw *hw = &pf->hw;
3319 struct msi_map oicr_irq;
3322 if (!pf->int_name[0])
3323 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
3324 dev_driver_string(dev), dev_name(dev));
3326 /* Do not request IRQ but do enable OICR interrupt since settings are
3327 * lost during reset. Note that this function is called only during
3328 * rebuild path and not while reset is in progress.
3330 if (ice_is_reset_in_progress(pf->state))
3333 /* reserve one vector in irq_tracker for misc interrupts */
3334 oicr_irq = ice_alloc_irq(pf, false);
3335 if (oicr_irq.index < 0)
3336 return oicr_irq.index;
3338 pf->oicr_irq = oicr_irq;
3339 err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr,
3340 ice_misc_intr_thread_fn, 0,
3343 dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n",
3345 ice_free_irq(pf, pf->oicr_irq);
3350 ice_ena_misc_vector(pf);
3352 ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index);
3353 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index),
3354 ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
3357 ice_irq_dynamic_ena(hw, NULL, NULL);
3363 * ice_napi_add - register NAPI handler for the VSI
3364 * @vsi: VSI for which NAPI handler is to be registered
3366 * This function is only called in the driver's load path. Registering the NAPI
3367 * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
3368 * reset/rebuild, etc.)
3370 static void ice_napi_add(struct ice_vsi *vsi)
3377 ice_for_each_q_vector(vsi, v_idx)
3378 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
3383 * ice_set_ops - set netdev and ethtools ops for the given netdev
3384 * @vsi: the VSI associated with the new netdev
3386 static void ice_set_ops(struct ice_vsi *vsi)
3388 struct net_device *netdev = vsi->netdev;
3389 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3391 if (ice_is_safe_mode(pf)) {
3392 netdev->netdev_ops = &ice_netdev_safe_mode_ops;
3393 ice_set_ethtool_safe_mode_ops(netdev);
3397 netdev->netdev_ops = &ice_netdev_ops;
3398 netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
3399 ice_set_ethtool_ops(netdev);
3401 if (vsi->type != ICE_VSI_PF)
3404 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
3405 NETDEV_XDP_ACT_XSK_ZEROCOPY |
3406 NETDEV_XDP_ACT_RX_SG;
3407 netdev->xdp_zc_max_segs = ICE_MAX_BUF_TXD;
3411 * ice_set_netdev_features - set features for the given netdev
3412 * @netdev: netdev instance
3414 static void ice_set_netdev_features(struct net_device *netdev)
3416 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3417 bool is_dvm_ena = ice_is_dvm_ena(&pf->hw);
3418 netdev_features_t csumo_features;
3419 netdev_features_t vlano_features;
3420 netdev_features_t dflt_features;
3421 netdev_features_t tso_features;
3423 if (ice_is_safe_mode(pf)) {
3425 netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
3426 netdev->hw_features = netdev->features;
3430 dflt_features = NETIF_F_SG |
3435 csumo_features = NETIF_F_RXCSUM |
3440 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
3441 NETIF_F_HW_VLAN_CTAG_TX |
3442 NETIF_F_HW_VLAN_CTAG_RX;
3444 /* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */
3446 vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER;
3448 tso_features = NETIF_F_TSO |
3452 NETIF_F_GSO_UDP_TUNNEL |
3453 NETIF_F_GSO_GRE_CSUM |
3454 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3455 NETIF_F_GSO_PARTIAL |
3456 NETIF_F_GSO_IPXIP4 |
3457 NETIF_F_GSO_IPXIP6 |
3460 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3461 NETIF_F_GSO_GRE_CSUM;
3462 /* set features that user can change */
3463 netdev->hw_features = dflt_features | csumo_features |
3464 vlano_features | tso_features;
3466 /* add support for HW_CSUM on packets with MPLS header */
3467 netdev->mpls_features = NETIF_F_HW_CSUM |
3471 /* enable features */
3472 netdev->features |= netdev->hw_features;
3474 netdev->hw_features |= NETIF_F_HW_TC;
3475 netdev->hw_features |= NETIF_F_LOOPBACK;
3477 /* encap and VLAN devices inherit default, csumo and tso features */
3478 netdev->hw_enc_features |= dflt_features | csumo_features |
3480 netdev->vlan_features |= dflt_features | csumo_features |
3483 /* advertise support but don't enable by default since only one type of
3484 * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one
3485 * type turns on the other has to be turned off. This is enforced by the
3486 * ice_fix_features() ndo callback.
3489 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX |
3490 NETIF_F_HW_VLAN_STAG_TX;
3492 /* Leave CRC / FCS stripping enabled by default, but allow the value to
3493 * be changed at runtime
3495 netdev->hw_features |= NETIF_F_RXFCS;
3497 netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE);
3501 * ice_fill_rss_lut - Fill the RSS lookup table with default values
3502 * @lut: Lookup table
3503 * @rss_table_size: Lookup table size
3504 * @rss_size: Range of queue number for hashing
3506 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3510 for (i = 0; i < rss_table_size; i++)
3511 lut[i] = i % rss_size;
3515 * ice_pf_vsi_setup - Set up a PF VSI
3516 * @pf: board private structure
3517 * @pi: pointer to the port_info instance
3519 * Returns pointer to the successfully allocated VSI software struct
3520 * on success, otherwise returns NULL on failure.
3522 static struct ice_vsi *
3523 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3525 struct ice_vsi_cfg_params params = {};
3527 params.type = ICE_VSI_PF;
3529 params.flags = ICE_VSI_FLAG_INIT;
3531 return ice_vsi_setup(pf, ¶ms);
3534 static struct ice_vsi *
3535 ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
3536 struct ice_channel *ch)
3538 struct ice_vsi_cfg_params params = {};
3540 params.type = ICE_VSI_CHNL;
3543 params.flags = ICE_VSI_FLAG_INIT;
3545 return ice_vsi_setup(pf, ¶ms);
3549 * ice_ctrl_vsi_setup - Set up a control VSI
3550 * @pf: board private structure
3551 * @pi: pointer to the port_info instance
3553 * Returns pointer to the successfully allocated VSI software struct
3554 * on success, otherwise returns NULL on failure.
3556 static struct ice_vsi *
3557 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3559 struct ice_vsi_cfg_params params = {};
3561 params.type = ICE_VSI_CTRL;
3563 params.flags = ICE_VSI_FLAG_INIT;
3565 return ice_vsi_setup(pf, ¶ms);
3569 * ice_lb_vsi_setup - Set up a loopback VSI
3570 * @pf: board private structure
3571 * @pi: pointer to the port_info instance
3573 * Returns pointer to the successfully allocated VSI software struct
3574 * on success, otherwise returns NULL on failure.
3577 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3579 struct ice_vsi_cfg_params params = {};
3581 params.type = ICE_VSI_LB;
3583 params.flags = ICE_VSI_FLAG_INIT;
3585 return ice_vsi_setup(pf, ¶ms);
3589 * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3590 * @netdev: network interface to be adjusted
3592 * @vid: VLAN ID to be added
3594 * net_device_ops implementation for adding VLAN IDs
3597 ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3599 struct ice_netdev_priv *np = netdev_priv(netdev);
3600 struct ice_vsi_vlan_ops *vlan_ops;
3601 struct ice_vsi *vsi = np->vsi;
3602 struct ice_vlan vlan;
3605 /* VLAN 0 is added by default during load/reset */
3609 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3610 usleep_range(1000, 2000);
3612 /* Add multicast promisc rule for the VLAN ID to be added if
3613 * all-multicast is currently enabled.
3615 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3616 ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3617 ICE_MCAST_VLAN_PROMISC_BITS,
3623 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3625 /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3626 * packets aren't pruned by the device's internal switch on Rx
3628 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3629 ret = vlan_ops->add_vlan(vsi, &vlan);
3633 /* If all-multicast is currently enabled and this VLAN ID is only one
3634 * besides VLAN-0 we have to update look-up type of multicast promisc
3635 * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN.
3637 if ((vsi->current_netdev_flags & IFF_ALLMULTI) &&
3638 ice_vsi_num_non_zero_vlans(vsi) == 1) {
3639 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3640 ICE_MCAST_PROMISC_BITS, 0);
3641 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3642 ICE_MCAST_VLAN_PROMISC_BITS, 0);
3646 clear_bit(ICE_CFG_BUSY, vsi->state);
3652 * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3653 * @netdev: network interface to be adjusted
3655 * @vid: VLAN ID to be removed
3657 * net_device_ops implementation for removing VLAN IDs
3660 ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3662 struct ice_netdev_priv *np = netdev_priv(netdev);
3663 struct ice_vsi_vlan_ops *vlan_ops;
3664 struct ice_vsi *vsi = np->vsi;
3665 struct ice_vlan vlan;
3668 /* don't allow removal of VLAN 0 */
3672 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3673 usleep_range(1000, 2000);
3675 ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3676 ICE_MCAST_VLAN_PROMISC_BITS, vid);
3678 netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n",
3680 vsi->current_netdev_flags |= IFF_ALLMULTI;
3683 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3685 /* Make sure VLAN delete is successful before updating VLAN
3688 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3689 ret = vlan_ops->del_vlan(vsi, &vlan);
3693 /* Remove multicast promisc rule for the removed VLAN ID if
3694 * all-multicast is enabled.
3696 if (vsi->current_netdev_flags & IFF_ALLMULTI)
3697 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3698 ICE_MCAST_VLAN_PROMISC_BITS, vid);
3700 if (!ice_vsi_has_non_zero_vlans(vsi)) {
3701 /* Update look-up type of multicast promisc rule for VLAN 0
3702 * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when
3703 * all-multicast is enabled and VLAN 0 is the only VLAN rule.
3705 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3706 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3707 ICE_MCAST_VLAN_PROMISC_BITS,
3709 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3710 ICE_MCAST_PROMISC_BITS, 0);
3715 clear_bit(ICE_CFG_BUSY, vsi->state);
3721 * ice_rep_indr_tc_block_unbind
3722 * @cb_priv: indirection block private data
3724 static void ice_rep_indr_tc_block_unbind(void *cb_priv)
3726 struct ice_indr_block_priv *indr_priv = cb_priv;
3728 list_del(&indr_priv->list);
3733 * ice_tc_indir_block_unregister - Unregister TC indirect block notifications
3734 * @vsi: VSI struct which has the netdev
3736 static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)
3738 struct ice_netdev_priv *np = netdev_priv(vsi->netdev);
3740 flow_indr_dev_unregister(ice_indr_setup_tc_cb, np,
3741 ice_rep_indr_tc_block_unbind);
3745 * ice_tc_indir_block_register - Register TC indirect block notifications
3746 * @vsi: VSI struct which has the netdev
3748 * Returns 0 on success, negative value on failure
3750 static int ice_tc_indir_block_register(struct ice_vsi *vsi)
3752 struct ice_netdev_priv *np;
3754 if (!vsi || !vsi->netdev)
3757 np = netdev_priv(vsi->netdev);
3759 INIT_LIST_HEAD(&np->tc_indr_block_priv_list);
3760 return flow_indr_dev_register(ice_indr_setup_tc_cb, np);
3764 * ice_get_avail_q_count - Get count of queues in use
3765 * @pf_qmap: bitmap to get queue use count from
3766 * @lock: pointer to a mutex that protects access to pf_qmap
3767 * @size: size of the bitmap
3770 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3776 for_each_clear_bit(bit, pf_qmap, size)
3784 * ice_get_avail_txq_count - Get count of Tx queues in use
3785 * @pf: pointer to an ice_pf instance
3787 u16 ice_get_avail_txq_count(struct ice_pf *pf)
3789 return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3794 * ice_get_avail_rxq_count - Get count of Rx queues in use
3795 * @pf: pointer to an ice_pf instance
3797 u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3799 return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3804 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3805 * @pf: board private structure to initialize
3807 static void ice_deinit_pf(struct ice_pf *pf)
3809 ice_service_task_stop(pf);
3810 mutex_destroy(&pf->lag_mutex);
3811 mutex_destroy(&pf->adev_mutex);
3812 mutex_destroy(&pf->sw_mutex);
3813 mutex_destroy(&pf->tc_mutex);
3814 mutex_destroy(&pf->avail_q_mutex);
3815 mutex_destroy(&pf->vfs.table_lock);
3817 if (pf->avail_txqs) {
3818 bitmap_free(pf->avail_txqs);
3819 pf->avail_txqs = NULL;
3822 if (pf->avail_rxqs) {
3823 bitmap_free(pf->avail_rxqs);
3824 pf->avail_rxqs = NULL;
3828 ptp_clock_unregister(pf->ptp.clock);
3832 * ice_set_pf_caps - set PFs capability flags
3833 * @pf: pointer to the PF instance
3835 static void ice_set_pf_caps(struct ice_pf *pf)
3837 struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
3839 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3840 if (func_caps->common_cap.rdma)
3841 set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3842 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3843 if (func_caps->common_cap.dcb)
3844 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3845 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3846 if (func_caps->common_cap.sr_iov_1_1) {
3847 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3848 pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs,
3851 clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
3852 if (func_caps->common_cap.rss_table_size)
3853 set_bit(ICE_FLAG_RSS_ENA, pf->flags);
3855 clear_bit(ICE_FLAG_FD_ENA, pf->flags);
3856 if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
3859 /* ctrl_vsi_idx will be set to a valid value when flow director
3860 * is setup by ice_init_fdir
3862 pf->ctrl_vsi_idx = ICE_NO_VSI;
3863 set_bit(ICE_FLAG_FD_ENA, pf->flags);
3864 /* force guaranteed filter pool for PF */
3865 ice_alloc_fd_guar_item(&pf->hw, &unused,
3866 func_caps->fd_fltr_guar);
3867 /* force shared filter pool for PF */
3868 ice_alloc_fd_shrd_item(&pf->hw, &unused,
3869 func_caps->fd_fltr_best_effort);
3872 clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3873 if (func_caps->common_cap.ieee_1588)
3874 set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3876 pf->max_pf_txqs = func_caps->common_cap.num_txq;
3877 pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
3881 * ice_init_pf - Initialize general software structures (struct ice_pf)
3882 * @pf: board private structure to initialize
3884 static int ice_init_pf(struct ice_pf *pf)
3886 ice_set_pf_caps(pf);
3888 mutex_init(&pf->sw_mutex);
3889 mutex_init(&pf->tc_mutex);
3890 mutex_init(&pf->adev_mutex);
3891 mutex_init(&pf->lag_mutex);
3893 INIT_HLIST_HEAD(&pf->aq_wait_list);
3894 spin_lock_init(&pf->aq_wait_lock);
3895 init_waitqueue_head(&pf->aq_wait_queue);
3897 init_waitqueue_head(&pf->reset_wait_queue);
3899 /* setup service timer and periodic service task */
3900 timer_setup(&pf->serv_tmr, ice_service_timer, 0);
3901 pf->serv_tmr_period = HZ;
3902 INIT_WORK(&pf->serv_task, ice_service_task);
3903 clear_bit(ICE_SERVICE_SCHED, pf->state);
3905 mutex_init(&pf->avail_q_mutex);
3906 pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
3907 if (!pf->avail_txqs)
3910 pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
3911 if (!pf->avail_rxqs) {
3912 bitmap_free(pf->avail_txqs);
3913 pf->avail_txqs = NULL;
3917 mutex_init(&pf->vfs.table_lock);
3918 hash_init(pf->vfs.table);
3919 ice_mbx_init_snapshot(&pf->hw);
3925 * ice_is_wol_supported - check if WoL is supported
3926 * @hw: pointer to hardware info
3928 * Check if WoL is supported based on the HW configuration.
3929 * Returns true if NVM supports and enables WoL for this port, false otherwise
3931 bool ice_is_wol_supported(struct ice_hw *hw)
3935 /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
3936 * word) indicates WoL is not supported on the corresponding PF ID.
3938 if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
3941 return !(BIT(hw->port_info->lport) & wol_ctrl);
3945 * ice_vsi_recfg_qs - Change the number of queues on a VSI
3946 * @vsi: VSI being changed
3947 * @new_rx: new number of Rx queues
3948 * @new_tx: new number of Tx queues
3949 * @locked: is adev device_lock held
3951 * Only change the number of queues if new_tx, or new_rx is non-0.
3953 * Returns 0 on success.
3955 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
3957 struct ice_pf *pf = vsi->back;
3958 int err = 0, timeout = 50;
3960 if (!new_rx && !new_tx)
3963 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
3967 usleep_range(1000, 2000);
3971 vsi->req_txq = (u16)new_tx;
3973 vsi->req_rxq = (u16)new_rx;
3975 /* set for the next time the netdev is started */
3976 if (!netif_running(vsi->netdev)) {
3977 ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
3978 dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
3983 ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
3984 ice_pf_dcb_recfg(pf, locked);
3987 clear_bit(ICE_CFG_BUSY, pf->state);
3992 * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
3993 * @pf: PF to configure
3995 * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
3996 * VSI can still Tx/Rx VLAN tagged packets.
3998 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
4000 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4001 struct ice_vsi_ctx *ctxt;
4008 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
4013 ctxt->info = vsi->info;
4015 ctxt->info.valid_sections =
4016 cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
4017 ICE_AQ_VSI_PROP_SECURITY_VALID |
4018 ICE_AQ_VSI_PROP_SW_VALID);
4020 /* disable VLAN anti-spoof */
4021 ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4022 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4024 /* disable VLAN pruning and keep all other settings */
4025 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
4027 /* allow all VLANs on Tx and don't strip on Rx */
4028 ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL |
4029 ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
4031 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
4033 dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
4034 status, ice_aq_str(hw->adminq.sq_last_status));
4036 vsi->info.sec_flags = ctxt->info.sec_flags;
4037 vsi->info.sw_flags2 = ctxt->info.sw_flags2;
4038 vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
4045 * ice_log_pkg_init - log result of DDP package load
4046 * @hw: pointer to hardware info
4047 * @state: state of package load
4049 static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state)
4051 struct ice_pf *pf = hw->back;
4054 dev = ice_pf_to_dev(pf);
4057 case ICE_DDP_PKG_SUCCESS:
4058 dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
4059 hw->active_pkg_name,
4060 hw->active_pkg_ver.major,
4061 hw->active_pkg_ver.minor,
4062 hw->active_pkg_ver.update,
4063 hw->active_pkg_ver.draft);
4065 case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
4066 dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
4067 hw->active_pkg_name,
4068 hw->active_pkg_ver.major,
4069 hw->active_pkg_ver.minor,
4070 hw->active_pkg_ver.update,
4071 hw->active_pkg_ver.draft);
4073 case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED:
4074 dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
4075 hw->active_pkg_name,
4076 hw->active_pkg_ver.major,
4077 hw->active_pkg_ver.minor,
4078 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4080 case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
4081 dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
4082 hw->active_pkg_name,
4083 hw->active_pkg_ver.major,
4084 hw->active_pkg_ver.minor,
4085 hw->active_pkg_ver.update,
4086 hw->active_pkg_ver.draft,
4093 case ICE_DDP_PKG_FW_MISMATCH:
4094 dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n");
4096 case ICE_DDP_PKG_INVALID_FILE:
4097 dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
4099 case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH:
4100 dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
4102 case ICE_DDP_PKG_FILE_VERSION_TOO_LOW:
4103 dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
4104 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4106 case ICE_DDP_PKG_FILE_SIGNATURE_INVALID:
4107 dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
4109 case ICE_DDP_PKG_FILE_REVISION_TOO_LOW:
4110 dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
4112 case ICE_DDP_PKG_LOAD_ERROR:
4113 dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n");
4114 /* poll for reset to complete */
4115 if (ice_check_reset(hw))
4116 dev_err(dev, "Error resetting device. Please reload the driver\n");
4118 case ICE_DDP_PKG_ERR:
4120 dev_err(dev, "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n");
4126 * ice_load_pkg - load/reload the DDP Package file
4127 * @firmware: firmware structure when firmware requested or NULL for reload
4128 * @pf: pointer to the PF instance
4130 * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
4131 * initialize HW tables.
4134 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
4136 enum ice_ddp_state state = ICE_DDP_PKG_ERR;
4137 struct device *dev = ice_pf_to_dev(pf);
4138 struct ice_hw *hw = &pf->hw;
4140 /* Load DDP Package */
4141 if (firmware && !hw->pkg_copy) {
4142 state = ice_copy_and_init_pkg(hw, firmware->data,
4144 ice_log_pkg_init(hw, state);
4145 } else if (!firmware && hw->pkg_copy) {
4146 /* Reload package during rebuild after CORER/GLOBR reset */
4147 state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
4148 ice_log_pkg_init(hw, state);
4150 dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
4153 if (!ice_is_init_pkg_successful(state)) {
4155 clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4159 /* Successful download package is the precondition for advanced
4160 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
4162 set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4166 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
4167 * @pf: pointer to the PF structure
4169 * There is no error returned here because the driver should be able to handle
4170 * 128 Byte cache lines, so we only print a warning in case issues are seen,
4171 * specifically with Tx.
4173 static void ice_verify_cacheline_size(struct ice_pf *pf)
4175 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
4176 dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
4177 ICE_CACHE_LINE_BYTES);
4181 * ice_send_version - update firmware with driver version
4184 * Returns 0 on success, else error code
4186 static int ice_send_version(struct ice_pf *pf)
4188 struct ice_driver_ver dv;
4190 dv.major_ver = 0xff;
4191 dv.minor_ver = 0xff;
4192 dv.build_ver = 0xff;
4193 dv.subbuild_ver = 0;
4194 strscpy((char *)dv.driver_string, UTS_RELEASE,
4195 sizeof(dv.driver_string));
4196 return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
4200 * ice_init_fdir - Initialize flow director VSI and configuration
4201 * @pf: pointer to the PF instance
4203 * returns 0 on success, negative on error
4205 static int ice_init_fdir(struct ice_pf *pf)
4207 struct device *dev = ice_pf_to_dev(pf);
4208 struct ice_vsi *ctrl_vsi;
4211 /* Side Band Flow Director needs to have a control VSI.
4212 * Allocate it and store it in the PF.
4214 ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
4216 dev_dbg(dev, "could not create control VSI\n");
4220 err = ice_vsi_open_ctrl(ctrl_vsi);
4222 dev_dbg(dev, "could not open control VSI\n");
4226 mutex_init(&pf->hw.fdir_fltr_lock);
4228 err = ice_fdir_create_dflt_rules(pf);
4235 ice_fdir_release_flows(&pf->hw);
4236 ice_vsi_close(ctrl_vsi);
4238 ice_vsi_release(ctrl_vsi);
4239 if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4240 pf->vsi[pf->ctrl_vsi_idx] = NULL;
4241 pf->ctrl_vsi_idx = ICE_NO_VSI;
4246 static void ice_deinit_fdir(struct ice_pf *pf)
4248 struct ice_vsi *vsi = ice_get_ctrl_vsi(pf);
4253 ice_vsi_manage_fdir(vsi, false);
4254 ice_vsi_release(vsi);
4255 if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4256 pf->vsi[pf->ctrl_vsi_idx] = NULL;
4257 pf->ctrl_vsi_idx = ICE_NO_VSI;
4260 mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4264 * ice_get_opt_fw_name - return optional firmware file name or NULL
4265 * @pf: pointer to the PF instance
4267 static char *ice_get_opt_fw_name(struct ice_pf *pf)
4269 /* Optional firmware name same as default with additional dash
4270 * followed by a EUI-64 identifier (PCIe Device Serial Number)
4272 struct pci_dev *pdev = pf->pdev;
4273 char *opt_fw_filename;
4276 /* Determine the name of the optional file using the DSN (two
4277 * dwords following the start of the DSN Capability).
4279 dsn = pci_get_dsn(pdev);
4283 opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
4284 if (!opt_fw_filename)
4287 snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
4288 ICE_DDP_PKG_PATH, dsn);
4290 return opt_fw_filename;
4294 * ice_request_fw - Device initialization routine
4295 * @pf: pointer to the PF instance
4297 static void ice_request_fw(struct ice_pf *pf)
4299 char *opt_fw_filename = ice_get_opt_fw_name(pf);
4300 const struct firmware *firmware = NULL;
4301 struct device *dev = ice_pf_to_dev(pf);
4304 /* optional device-specific DDP (if present) overrides the default DDP
4305 * package file. kernel logs a debug message if the file doesn't exist,
4306 * and warning messages for other errors.
4308 if (opt_fw_filename) {
4309 err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
4311 kfree(opt_fw_filename);
4315 /* request for firmware was successful. Download to device */
4316 ice_load_pkg(firmware, pf);
4317 kfree(opt_fw_filename);
4318 release_firmware(firmware);
4323 err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
4325 dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
4329 /* request for firmware was successful. Download to device */
4330 ice_load_pkg(firmware, pf);
4331 release_firmware(firmware);
4335 * ice_print_wake_reason - show the wake up cause in the log
4336 * @pf: pointer to the PF struct
4338 static void ice_print_wake_reason(struct ice_pf *pf)
4340 u32 wus = pf->wakeup_reason;
4341 const char *wake_str;
4343 /* if no wake event, nothing to print */
4347 if (wus & PFPM_WUS_LNKC_M)
4348 wake_str = "Link\n";
4349 else if (wus & PFPM_WUS_MAG_M)
4350 wake_str = "Magic Packet\n";
4351 else if (wus & PFPM_WUS_MNG_M)
4352 wake_str = "Management\n";
4353 else if (wus & PFPM_WUS_FW_RST_WK_M)
4354 wake_str = "Firmware Reset\n";
4356 wake_str = "Unknown\n";
4358 dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4362 * ice_register_netdev - register netdev
4363 * @vsi: pointer to the VSI struct
4365 static int ice_register_netdev(struct ice_vsi *vsi)
4369 if (!vsi || !vsi->netdev)
4372 err = register_netdev(vsi->netdev);
4376 set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4377 netif_carrier_off(vsi->netdev);
4378 netif_tx_stop_all_queues(vsi->netdev);
4383 static void ice_unregister_netdev(struct ice_vsi *vsi)
4385 if (!vsi || !vsi->netdev)
4388 unregister_netdev(vsi->netdev);
4389 clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4393 * ice_cfg_netdev - Allocate, configure and register a netdev
4394 * @vsi: the VSI associated with the new netdev
4396 * Returns 0 on success, negative value on failure
4398 static int ice_cfg_netdev(struct ice_vsi *vsi)
4400 struct ice_netdev_priv *np;
4401 struct net_device *netdev;
4402 u8 mac_addr[ETH_ALEN];
4404 netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
4409 set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4410 vsi->netdev = netdev;
4411 np = netdev_priv(netdev);
4414 ice_set_netdev_features(netdev);
4417 if (vsi->type == ICE_VSI_PF) {
4418 SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
4419 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4420 eth_hw_addr_set(netdev, mac_addr);
4423 netdev->priv_flags |= IFF_UNICAST_FLT;
4425 /* Setup netdev TC information */
4426 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
4428 netdev->max_mtu = ICE_MAX_MTU;
4433 static void ice_decfg_netdev(struct ice_vsi *vsi)
4435 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4436 free_netdev(vsi->netdev);
4440 static int ice_start_eth(struct ice_vsi *vsi)
4444 err = ice_init_mac_fltr(vsi->back);
4448 err = ice_vsi_open(vsi);
4450 ice_fltr_remove_all(vsi);
4455 static void ice_stop_eth(struct ice_vsi *vsi)
4457 ice_fltr_remove_all(vsi);
4461 static int ice_init_eth(struct ice_pf *pf)
4463 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4469 /* init channel list */
4470 INIT_LIST_HEAD(&vsi->ch_list);
4472 err = ice_cfg_netdev(vsi);
4475 /* Setup DCB netlink interface */
4476 ice_dcbnl_setup(vsi);
4478 err = ice_init_mac_fltr(pf);
4480 goto err_init_mac_fltr;
4482 err = ice_devlink_create_pf_port(pf);
4484 goto err_devlink_create_pf_port;
4486 SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
4488 err = ice_register_netdev(vsi);
4490 goto err_register_netdev;
4492 err = ice_tc_indir_block_register(vsi);
4494 goto err_tc_indir_block_register;
4500 err_tc_indir_block_register:
4501 ice_unregister_netdev(vsi);
4502 err_register_netdev:
4503 ice_devlink_destroy_pf_port(pf);
4504 err_devlink_create_pf_port:
4506 ice_decfg_netdev(vsi);
4510 static void ice_deinit_eth(struct ice_pf *pf)
4512 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4518 ice_unregister_netdev(vsi);
4519 ice_devlink_destroy_pf_port(pf);
4520 ice_tc_indir_block_unregister(vsi);
4521 ice_decfg_netdev(vsi);
4525 * ice_wait_for_fw - wait for full FW readiness
4526 * @hw: pointer to the hardware structure
4527 * @timeout: milliseconds that can elapse before timing out
4529 static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
4534 while (elapsed <= timeout) {
4535 fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M;
4537 /* firmware was not yet loaded, we have to wait more */
4549 static int ice_init_dev(struct ice_pf *pf)
4551 struct device *dev = ice_pf_to_dev(pf);
4552 struct ice_hw *hw = &pf->hw;
4555 err = ice_init_hw(hw);
4557 dev_err(dev, "ice_init_hw failed: %d\n", err);
4561 /* Some cards require longer initialization times
4562 * due to necessity of loading FW from an external source.
4563 * This can take even half a minute.
4565 if (ice_is_pf_c827(hw)) {
4566 err = ice_wait_for_fw(hw, 30000);
4568 dev_err(dev, "ice_wait_for_fw timed out");
4573 ice_init_feature_support(pf);
4577 /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
4578 * set in pf->state, which will cause ice_is_safe_mode to return
4581 if (ice_is_safe_mode(pf)) {
4582 /* we already got function/device capabilities but these don't
4583 * reflect what the driver needs to do in safe mode. Instead of
4584 * adding conditional logic everywhere to ignore these
4585 * device/function capabilities, override them.
4587 ice_set_safe_mode_caps(hw);
4590 err = ice_init_pf(pf);
4592 dev_err(dev, "ice_init_pf failed: %d\n", err);
4596 pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4597 pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4598 pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4599 pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4600 if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4601 pf->hw.udp_tunnel_nic.tables[0].n_entries =
4602 pf->hw.tnl.valid_count[TNL_VXLAN];
4603 pf->hw.udp_tunnel_nic.tables[0].tunnel_types =
4604 UDP_TUNNEL_TYPE_VXLAN;
4606 if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4607 pf->hw.udp_tunnel_nic.tables[1].n_entries =
4608 pf->hw.tnl.valid_count[TNL_GENEVE];
4609 pf->hw.udp_tunnel_nic.tables[1].tunnel_types =
4610 UDP_TUNNEL_TYPE_GENEVE;
4613 err = ice_init_interrupt_scheme(pf);
4615 dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4617 goto err_init_interrupt_scheme;
4620 /* In case of MSIX we are going to setup the misc vector right here
4621 * to handle admin queue events etc. In case of legacy and MSI
4622 * the misc functionality and queue processing is combined in
4623 * the same vector and that gets setup at open.
4625 err = ice_req_irq_msix_misc(pf);
4627 dev_err(dev, "setup of misc vector failed: %d\n", err);
4628 goto err_req_irq_msix_misc;
4633 err_req_irq_msix_misc:
4634 ice_clear_interrupt_scheme(pf);
4635 err_init_interrupt_scheme:
4642 static void ice_deinit_dev(struct ice_pf *pf)
4644 ice_free_irq_msix_misc(pf);
4646 ice_deinit_hw(&pf->hw);
4648 /* Service task is already stopped, so call reset directly. */
4649 ice_reset(&pf->hw, ICE_RESET_PFR);
4650 pci_wait_for_pending_transaction(pf->pdev);
4651 ice_clear_interrupt_scheme(pf);
4654 static void ice_init_features(struct ice_pf *pf)
4656 struct device *dev = ice_pf_to_dev(pf);
4658 if (ice_is_safe_mode(pf))
4661 /* initialize DDP driven features */
4662 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4665 if (ice_is_feature_supported(pf, ICE_F_GNSS))
4668 /* Note: Flow director init failure is non-fatal to load */
4669 if (ice_init_fdir(pf))
4670 dev_err(dev, "could not initialize flow director\n");
4672 /* Note: DCB init failure is non-fatal to load */
4673 if (ice_init_pf_dcb(pf, false)) {
4674 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4675 clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4677 ice_cfg_lldp_mib_change(&pf->hw, true);
4680 if (ice_init_lag(pf))
4681 dev_warn(dev, "Failed to init link aggregation support\n");
4684 static void ice_deinit_features(struct ice_pf *pf)
4687 if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
4688 ice_cfg_lldp_mib_change(&pf->hw, false);
4689 ice_deinit_fdir(pf);
4690 if (ice_is_feature_supported(pf, ICE_F_GNSS))
4692 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4693 ice_ptp_release(pf);
4696 static void ice_init_wakeup(struct ice_pf *pf)
4698 /* Save wakeup reason register for later use */
4699 pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS);
4701 /* check for a power management event */
4702 ice_print_wake_reason(pf);
4704 /* clear wake status, all bits */
4705 wr32(&pf->hw, PFPM_WUS, U32_MAX);
4707 /* Disable WoL at init, wait for user to enable */
4708 device_set_wakeup_enable(ice_pf_to_dev(pf), false);
4711 static int ice_init_link(struct ice_pf *pf)
4713 struct device *dev = ice_pf_to_dev(pf);
4716 err = ice_init_link_events(pf->hw.port_info);
4718 dev_err(dev, "ice_init_link_events failed: %d\n", err);
4722 /* not a fatal error if this fails */
4723 err = ice_init_nvm_phy_type(pf->hw.port_info);
4725 dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4727 /* not a fatal error if this fails */
4728 err = ice_update_link_info(pf->hw.port_info);
4730 dev_err(dev, "ice_update_link_info failed: %d\n", err);
4732 ice_init_link_dflt_override(pf->hw.port_info);
4734 ice_check_link_cfg_err(pf,
4735 pf->hw.port_info->phy.link_info.link_cfg_err);
4737 /* if media available, initialize PHY settings */
4738 if (pf->hw.port_info->phy.link_info.link_info &
4739 ICE_AQ_MEDIA_AVAILABLE) {
4740 /* not a fatal error if this fails */
4741 err = ice_init_phy_user_cfg(pf->hw.port_info);
4743 dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4745 if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4746 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4749 ice_configure_phy(vsi);
4752 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4758 static int ice_init_pf_sw(struct ice_pf *pf)
4760 bool dvm = ice_is_dvm_ena(&pf->hw);
4761 struct ice_vsi *vsi;
4764 /* create switch struct for the switch element created by FW on boot */
4765 pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL);
4770 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4772 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4774 pf->first_sw->pf = pf;
4776 /* record the sw_id available for later use */
4777 pf->first_sw->sw_id = pf->hw.port_info->sw_id;
4779 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
4781 goto err_aq_set_port_params;
4783 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
4786 goto err_pf_vsi_setup;
4792 err_aq_set_port_params:
4793 kfree(pf->first_sw);
4797 static void ice_deinit_pf_sw(struct ice_pf *pf)
4799 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4804 ice_vsi_release(vsi);
4805 kfree(pf->first_sw);
4808 static int ice_alloc_vsis(struct ice_pf *pf)
4810 struct device *dev = ice_pf_to_dev(pf);
4812 pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi;
4813 if (!pf->num_alloc_vsi)
4816 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
4818 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
4819 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
4820 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
4823 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
4828 pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi,
4829 sizeof(*pf->vsi_stats), GFP_KERNEL);
4830 if (!pf->vsi_stats) {
4831 devm_kfree(dev, pf->vsi);
4838 static void ice_dealloc_vsis(struct ice_pf *pf)
4840 devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats);
4841 pf->vsi_stats = NULL;
4843 pf->num_alloc_vsi = 0;
4844 devm_kfree(ice_pf_to_dev(pf), pf->vsi);
4848 static int ice_init_devlink(struct ice_pf *pf)
4852 err = ice_devlink_register_params(pf);
4856 ice_devlink_init_regions(pf);
4857 ice_devlink_register(pf);
4862 static void ice_deinit_devlink(struct ice_pf *pf)
4864 ice_devlink_unregister(pf);
4865 ice_devlink_destroy_regions(pf);
4866 ice_devlink_unregister_params(pf);
4869 static int ice_init(struct ice_pf *pf)
4873 err = ice_init_dev(pf);
4877 err = ice_alloc_vsis(pf);
4879 goto err_alloc_vsis;
4881 err = ice_init_pf_sw(pf);
4883 goto err_init_pf_sw;
4885 ice_init_wakeup(pf);
4887 err = ice_init_link(pf);
4891 err = ice_send_version(pf);
4895 ice_verify_cacheline_size(pf);
4897 if (ice_is_safe_mode(pf))
4898 ice_set_safe_mode_vlan_cfg(pf);
4900 /* print PCI link speed and width */
4901 pcie_print_link_status(pf->pdev);
4903 /* ready to go, so clear down state bit */
4904 clear_bit(ICE_DOWN, pf->state);
4905 clear_bit(ICE_SERVICE_DIS, pf->state);
4907 /* since everything is good, start the service timer */
4908 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4913 ice_deinit_pf_sw(pf);
4915 ice_dealloc_vsis(pf);
4921 static void ice_deinit(struct ice_pf *pf)
4923 set_bit(ICE_SERVICE_DIS, pf->state);
4924 set_bit(ICE_DOWN, pf->state);
4926 ice_deinit_pf_sw(pf);
4927 ice_dealloc_vsis(pf);
4932 * ice_load - load pf by init hw and starting VSI
4933 * @pf: pointer to the pf instance
4935 int ice_load(struct ice_pf *pf)
4937 struct ice_vsi_cfg_params params = {};
4938 struct ice_vsi *vsi;
4941 err = ice_init_dev(pf);
4945 vsi = ice_get_main_vsi(pf);
4947 params = ice_vsi_to_params(vsi);
4948 params.flags = ICE_VSI_FLAG_INIT;
4951 err = ice_vsi_cfg(vsi, ¶ms);
4955 err = ice_start_eth(ice_get_main_vsi(pf));
4960 err = ice_init_rdma(pf);
4964 ice_init_features(pf);
4965 ice_service_task_restart(pf);
4967 clear_bit(ICE_DOWN, pf->state);
4972 ice_vsi_close(ice_get_main_vsi(pf));
4975 ice_vsi_decfg(ice_get_main_vsi(pf));
4983 * ice_unload - unload pf by stopping VSI and deinit hw
4984 * @pf: pointer to the pf instance
4986 void ice_unload(struct ice_pf *pf)
4988 ice_deinit_features(pf);
4989 ice_deinit_rdma(pf);
4991 ice_stop_eth(ice_get_main_vsi(pf));
4992 ice_vsi_decfg(ice_get_main_vsi(pf));
4998 * ice_probe - Device initialization routine
4999 * @pdev: PCI device information struct
5000 * @ent: entry in ice_pci_tbl
5002 * Returns 0 on success, negative on failure
5005 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
5007 struct device *dev = &pdev->dev;
5012 if (pdev->is_virtfn) {
5013 dev_err(dev, "can't probe a virtual function\n");
5017 /* this driver uses devres, see
5018 * Documentation/driver-api/driver-model/devres.rst
5020 err = pcim_enable_device(pdev);
5024 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
5026 dev_err(dev, "BAR0 I/O map error %d\n", err);
5030 pf = ice_allocate_pf(dev);
5034 /* initialize Auxiliary index to invalid value */
5037 /* set up for high or low DMA */
5038 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
5040 dev_err(dev, "DMA configuration failed: 0x%x\n", err);
5044 pci_set_master(pdev);
5047 pci_set_drvdata(pdev, pf);
5048 set_bit(ICE_DOWN, pf->state);
5049 /* Disable service task until DOWN bit is cleared */
5050 set_bit(ICE_SERVICE_DIS, pf->state);
5053 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
5054 pci_save_state(pdev);
5057 hw->port_info = NULL;
5058 hw->vendor_id = pdev->vendor;
5059 hw->device_id = pdev->device;
5060 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
5061 hw->subsystem_vendor_id = pdev->subsystem_vendor;
5062 hw->subsystem_device_id = pdev->subsystem_device;
5063 hw->bus.device = PCI_SLOT(pdev->devfn);
5064 hw->bus.func = PCI_FUNC(pdev->devfn);
5065 ice_set_ctrlq_len(hw);
5067 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
5069 #ifndef CONFIG_DYNAMIC_DEBUG
5071 hw->debug_mask = debug;
5078 err = ice_init_eth(pf);
5082 err = ice_init_rdma(pf);
5086 err = ice_init_devlink(pf);
5088 goto err_init_devlink;
5090 ice_init_features(pf);
5095 ice_deinit_rdma(pf);
5101 pci_disable_device(pdev);
5106 * ice_set_wake - enable or disable Wake on LAN
5107 * @pf: pointer to the PF struct
5109 * Simple helper for WoL control
5111 static void ice_set_wake(struct ice_pf *pf)
5113 struct ice_hw *hw = &pf->hw;
5114 bool wol = pf->wol_ena;
5116 /* clear wake state, otherwise new wake events won't fire */
5117 wr32(hw, PFPM_WUS, U32_MAX);
5119 /* enable / disable APM wake up, no RMW needed */
5120 wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
5122 /* set magic packet filter enabled */
5123 wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
5127 * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
5128 * @pf: pointer to the PF struct
5130 * Issue firmware command to enable multicast magic wake, making
5131 * sure that any locally administered address (LAA) is used for
5132 * wake, and that PF reset doesn't undo the LAA.
5134 static void ice_setup_mc_magic_wake(struct ice_pf *pf)
5136 struct device *dev = ice_pf_to_dev(pf);
5137 struct ice_hw *hw = &pf->hw;
5138 u8 mac_addr[ETH_ALEN];
5139 struct ice_vsi *vsi;
5146 vsi = ice_get_main_vsi(pf);
5150 /* Get current MAC address in case it's an LAA */
5152 ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
5154 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
5156 flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
5157 ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
5158 ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
5160 status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
5162 dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n",
5163 status, ice_aq_str(hw->adminq.sq_last_status));
5167 * ice_remove - Device removal routine
5168 * @pdev: PCI device information struct
5170 static void ice_remove(struct pci_dev *pdev)
5172 struct ice_pf *pf = pci_get_drvdata(pdev);
5175 for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
5176 if (!ice_is_reset_in_progress(pf->state))
5181 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
5182 set_bit(ICE_VF_RESETS_DISABLED, pf->state);
5186 ice_service_task_stop(pf);
5187 ice_aq_cancel_waiting_tasks(pf);
5188 set_bit(ICE_DOWN, pf->state);
5190 if (!ice_is_safe_mode(pf))
5191 ice_remove_arfs(pf);
5192 ice_deinit_features(pf);
5193 ice_deinit_devlink(pf);
5194 ice_deinit_rdma(pf);
5198 ice_vsi_release_all(pf);
5200 ice_setup_mc_magic_wake(pf);
5203 pci_disable_device(pdev);
5207 * ice_shutdown - PCI callback for shutting down device
5208 * @pdev: PCI device information struct
5210 static void ice_shutdown(struct pci_dev *pdev)
5212 struct ice_pf *pf = pci_get_drvdata(pdev);
5216 if (system_state == SYSTEM_POWER_OFF) {
5217 pci_wake_from_d3(pdev, pf->wol_ena);
5218 pci_set_power_state(pdev, PCI_D3hot);
5224 * ice_prepare_for_shutdown - prep for PCI shutdown
5225 * @pf: board private structure
5227 * Inform or close all dependent features in prep for PCI device shutdown
5229 static void ice_prepare_for_shutdown(struct ice_pf *pf)
5231 struct ice_hw *hw = &pf->hw;
5234 /* Notify VFs of impending reset */
5235 if (ice_check_sq_alive(hw, &hw->mailboxq))
5236 ice_vc_notify_reset(pf);
5238 dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
5240 /* disable the VSIs and their queues that are not already DOWN */
5241 ice_pf_dis_all_vsi(pf, false);
5243 ice_for_each_vsi(pf, v)
5245 pf->vsi[v]->vsi_num = 0;
5247 ice_shutdown_all_ctrlq(hw);
5251 * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
5252 * @pf: board private structure to reinitialize
5254 * This routine reinitialize interrupt scheme that was cleared during
5255 * power management suspend callback.
5257 * This should be called during resume routine to re-allocate the q_vectors
5258 * and reacquire interrupts.
5260 static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
5262 struct device *dev = ice_pf_to_dev(pf);
5265 /* Since we clear MSIX flag during suspend, we need to
5266 * set it back during resume...
5269 ret = ice_init_interrupt_scheme(pf);
5271 dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
5275 /* Remap vectors and rings, after successful re-init interrupts */
5276 ice_for_each_vsi(pf, v) {
5280 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
5283 ice_vsi_map_rings_to_vectors(pf->vsi[v]);
5286 ret = ice_req_irq_msix_misc(pf);
5288 dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
5298 ice_vsi_free_q_vectors(pf->vsi[v]);
5305 * @dev: generic device information structure
5307 * Power Management callback to quiesce the device and prepare
5308 * for D3 transition.
5310 static int __maybe_unused ice_suspend(struct device *dev)
5312 struct pci_dev *pdev = to_pci_dev(dev);
5316 pf = pci_get_drvdata(pdev);
5318 if (!ice_pf_state_is_nominal(pf)) {
5319 dev_err(dev, "Device is not ready, no need to suspend it\n");
5323 /* Stop watchdog tasks until resume completion.
5324 * Even though it is most likely that the service task is
5325 * disabled if the device is suspended or down, the service task's
5326 * state is controlled by a different state bit, and we should
5327 * store and honor whatever state that bit is in at this point.
5329 disabled = ice_service_task_stop(pf);
5331 ice_unplug_aux_dev(pf);
5333 /* Already suspended?, then there is nothing to do */
5334 if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
5336 ice_service_task_restart(pf);
5340 if (test_bit(ICE_DOWN, pf->state) ||
5341 ice_is_reset_in_progress(pf->state)) {
5342 dev_err(dev, "can't suspend device in reset or already down\n");
5344 ice_service_task_restart(pf);
5348 ice_setup_mc_magic_wake(pf);
5350 ice_prepare_for_shutdown(pf);
5354 /* Free vectors, clear the interrupt scheme and release IRQs
5355 * for proper hibernation, especially with large number of CPUs.
5356 * Otherwise hibernation might fail when mapping all the vectors back
5359 ice_free_irq_msix_misc(pf);
5360 ice_for_each_vsi(pf, v) {
5363 ice_vsi_free_q_vectors(pf->vsi[v]);
5365 ice_clear_interrupt_scheme(pf);
5367 pci_save_state(pdev);
5368 pci_wake_from_d3(pdev, pf->wol_ena);
5369 pci_set_power_state(pdev, PCI_D3hot);
5374 * ice_resume - PM callback for waking up from D3
5375 * @dev: generic device information structure
5377 static int __maybe_unused ice_resume(struct device *dev)
5379 struct pci_dev *pdev = to_pci_dev(dev);
5380 enum ice_reset_req reset_type;
5385 pci_set_power_state(pdev, PCI_D0);
5386 pci_restore_state(pdev);
5387 pci_save_state(pdev);
5389 if (!pci_device_is_present(pdev))
5392 ret = pci_enable_device_mem(pdev);
5394 dev_err(dev, "Cannot enable device after suspend\n");
5398 pf = pci_get_drvdata(pdev);
5401 pf->wakeup_reason = rd32(hw, PFPM_WUS);
5402 ice_print_wake_reason(pf);
5404 /* We cleared the interrupt scheme when we suspended, so we need to
5405 * restore it now to resume device functionality.
5407 ret = ice_reinit_interrupt_scheme(pf);
5409 dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
5411 clear_bit(ICE_DOWN, pf->state);
5412 /* Now perform PF reset and rebuild */
5413 reset_type = ICE_RESET_PFR;
5414 /* re-enable service task for reset, but allow reset to schedule it */
5415 clear_bit(ICE_SERVICE_DIS, pf->state);
5417 if (ice_schedule_reset(pf, reset_type))
5418 dev_err(dev, "Reset during resume failed.\n");
5420 clear_bit(ICE_SUSPENDED, pf->state);
5421 ice_service_task_restart(pf);
5423 /* Restart the service task */
5424 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5428 #endif /* CONFIG_PM */
5431 * ice_pci_err_detected - warning that PCI error has been detected
5432 * @pdev: PCI device information struct
5433 * @err: the type of PCI error
5435 * Called to warn that something happened on the PCI bus and the error handling
5436 * is in progress. Allows the driver to gracefully prepare/handle PCI errors.
5438 static pci_ers_result_t
5439 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
5441 struct ice_pf *pf = pci_get_drvdata(pdev);
5444 dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
5446 return PCI_ERS_RESULT_DISCONNECT;
5449 if (!test_bit(ICE_SUSPENDED, pf->state)) {
5450 ice_service_task_stop(pf);
5452 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5453 set_bit(ICE_PFR_REQ, pf->state);
5454 ice_prepare_for_reset(pf, ICE_RESET_PFR);
5458 return PCI_ERS_RESULT_NEED_RESET;
5462 * ice_pci_err_slot_reset - a PCI slot reset has just happened
5463 * @pdev: PCI device information struct
5465 * Called to determine if the driver can recover from the PCI slot reset by
5466 * using a register read to determine if the device is recoverable.
5468 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
5470 struct ice_pf *pf = pci_get_drvdata(pdev);
5471 pci_ers_result_t result;
5475 err = pci_enable_device_mem(pdev);
5477 dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
5479 result = PCI_ERS_RESULT_DISCONNECT;
5481 pci_set_master(pdev);
5482 pci_restore_state(pdev);
5483 pci_save_state(pdev);
5484 pci_wake_from_d3(pdev, false);
5486 /* Check for life */
5487 reg = rd32(&pf->hw, GLGEN_RTRIG);
5489 result = PCI_ERS_RESULT_RECOVERED;
5491 result = PCI_ERS_RESULT_DISCONNECT;
5498 * ice_pci_err_resume - restart operations after PCI error recovery
5499 * @pdev: PCI device information struct
5501 * Called to allow the driver to bring things back up after PCI error and/or
5502 * reset recovery have finished
5504 static void ice_pci_err_resume(struct pci_dev *pdev)
5506 struct ice_pf *pf = pci_get_drvdata(pdev);
5509 dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
5514 if (test_bit(ICE_SUSPENDED, pf->state)) {
5515 dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
5520 ice_restore_all_vfs_msi_state(pdev);
5522 ice_do_reset(pf, ICE_RESET_PFR);
5523 ice_service_task_restart(pf);
5524 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5528 * ice_pci_err_reset_prepare - prepare device driver for PCI reset
5529 * @pdev: PCI device information struct
5531 static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
5533 struct ice_pf *pf = pci_get_drvdata(pdev);
5535 if (!test_bit(ICE_SUSPENDED, pf->state)) {
5536 ice_service_task_stop(pf);
5538 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5539 set_bit(ICE_PFR_REQ, pf->state);
5540 ice_prepare_for_reset(pf, ICE_RESET_PFR);
5546 * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
5547 * @pdev: PCI device information struct
5549 static void ice_pci_err_reset_done(struct pci_dev *pdev)
5551 ice_pci_err_resume(pdev);
5554 /* ice_pci_tbl - PCI Device ID Table
5556 * Wildcard entries (PCI_ANY_ID) should come last
5557 * Last entry must be all 0s
5559 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
5560 * Class, Class Mask, private data (not used) }
5562 static const struct pci_device_id ice_pci_tbl[] = {
5563 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
5564 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
5565 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
5566 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 },
5567 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },
5568 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
5569 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
5570 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
5571 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
5572 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
5573 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
5574 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
5575 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
5576 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
5577 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
5578 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
5579 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
5580 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
5581 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
5582 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
5583 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
5584 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
5585 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
5586 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
5587 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
5588 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT), 0 },
5589 /* required last entry */
5592 MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
5594 static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
5596 static const struct pci_error_handlers ice_pci_err_handler = {
5597 .error_detected = ice_pci_err_detected,
5598 .slot_reset = ice_pci_err_slot_reset,
5599 .reset_prepare = ice_pci_err_reset_prepare,
5600 .reset_done = ice_pci_err_reset_done,
5601 .resume = ice_pci_err_resume
5604 static struct pci_driver ice_driver = {
5605 .name = KBUILD_MODNAME,
5606 .id_table = ice_pci_tbl,
5608 .remove = ice_remove,
5610 .driver.pm = &ice_pm_ops,
5611 #endif /* CONFIG_PM */
5612 .shutdown = ice_shutdown,
5613 .sriov_configure = ice_sriov_configure,
5614 .err_handler = &ice_pci_err_handler
5618 * ice_module_init - Driver registration routine
5620 * ice_module_init is the first routine called when the driver is
5621 * loaded. All it does is register with the PCI subsystem.
5623 static int __init ice_module_init(void)
5625 int status = -ENOMEM;
5627 pr_info("%s\n", ice_driver_string);
5628 pr_info("%s\n", ice_copyright);
5630 ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
5632 pr_err("Failed to create workqueue\n");
5636 ice_lag_wq = alloc_ordered_workqueue("ice_lag_wq", 0);
5638 pr_err("Failed to create LAG workqueue\n");
5642 status = pci_register_driver(&ice_driver);
5644 pr_err("failed to register PCI driver, err %d\n", status);
5645 goto err_dest_lag_wq;
5651 destroy_workqueue(ice_lag_wq);
5653 destroy_workqueue(ice_wq);
5656 module_init(ice_module_init);
5659 * ice_module_exit - Driver exit cleanup routine
5661 * ice_module_exit is called just before the driver is removed
5664 static void __exit ice_module_exit(void)
5666 pci_unregister_driver(&ice_driver);
5667 destroy_workqueue(ice_wq);
5668 destroy_workqueue(ice_lag_wq);
5669 pr_info("module unloaded\n");
5671 module_exit(ice_module_exit);
5674 * ice_set_mac_address - NDO callback to set MAC address
5675 * @netdev: network interface device structure
5676 * @pi: pointer to an address structure
5678 * Returns 0 on success, negative on failure
5680 static int ice_set_mac_address(struct net_device *netdev, void *pi)
5682 struct ice_netdev_priv *np = netdev_priv(netdev);
5683 struct ice_vsi *vsi = np->vsi;
5684 struct ice_pf *pf = vsi->back;
5685 struct ice_hw *hw = &pf->hw;
5686 struct sockaddr *addr = pi;
5687 u8 old_mac[ETH_ALEN];
5692 mac = (u8 *)addr->sa_data;
5694 if (!is_valid_ether_addr(mac))
5695 return -EADDRNOTAVAIL;
5697 if (test_bit(ICE_DOWN, pf->state) ||
5698 ice_is_reset_in_progress(pf->state)) {
5699 netdev_err(netdev, "can't set mac %pM. device not ready\n",
5704 if (ice_chnl_dmac_fltr_cnt(pf)) {
5705 netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n",
5710 netif_addr_lock_bh(netdev);
5711 ether_addr_copy(old_mac, netdev->dev_addr);
5712 /* change the netdev's MAC address */
5713 eth_hw_addr_set(netdev, mac);
5714 netif_addr_unlock_bh(netdev);
5716 /* Clean up old MAC filter. Not an error if old filter doesn't exist */
5717 err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
5718 if (err && err != -ENOENT) {
5719 err = -EADDRNOTAVAIL;
5720 goto err_update_filters;
5723 /* Add filter for new MAC. If filter exists, return success */
5724 err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
5725 if (err == -EEXIST) {
5726 /* Although this MAC filter is already present in hardware it's
5727 * possible in some cases (e.g. bonding) that dev_addr was
5728 * modified outside of the driver and needs to be restored back
5731 netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
5735 /* error if the new filter addition failed */
5736 err = -EADDRNOTAVAIL;
5741 netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
5743 netif_addr_lock_bh(netdev);
5744 eth_hw_addr_set(netdev, old_mac);
5745 netif_addr_unlock_bh(netdev);
5749 netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
5752 /* write new MAC address to the firmware */
5753 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
5754 err = ice_aq_manage_mac_write(hw, mac, flags, NULL);
5756 netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
5763 * ice_set_rx_mode - NDO callback to set the netdev filters
5764 * @netdev: network interface device structure
5766 static void ice_set_rx_mode(struct net_device *netdev)
5768 struct ice_netdev_priv *np = netdev_priv(netdev);
5769 struct ice_vsi *vsi = np->vsi;
5771 if (!vsi || ice_is_switchdev_running(vsi->back))
5774 /* Set the flags to synchronize filters
5775 * ndo_set_rx_mode may be triggered even without a change in netdev
5778 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
5779 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
5780 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
5782 /* schedule our worker thread which will take care of
5783 * applying the new filter changes
5785 ice_service_task_schedule(vsi->back);
5789 * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
5790 * @netdev: network interface device structure
5791 * @queue_index: Queue ID
5792 * @maxrate: maximum bandwidth in Mbps
5795 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
5797 struct ice_netdev_priv *np = netdev_priv(netdev);
5798 struct ice_vsi *vsi = np->vsi;
5803 /* Validate maxrate requested is within permitted range */
5804 if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
5805 netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
5806 maxrate, queue_index);
5810 q_handle = vsi->tx_rings[queue_index]->q_handle;
5811 tc = ice_dcb_get_tc(vsi, queue_index);
5813 vsi = ice_locate_vsi_using_queue(vsi, queue_index);
5815 netdev_err(netdev, "Invalid VSI for given queue %d\n",
5820 /* Set BW back to default, when user set maxrate to 0 */
5822 status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
5823 q_handle, ICE_MAX_BW);
5825 status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
5826 q_handle, ICE_MAX_BW, maxrate * 1000);
5828 netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
5835 * ice_fdb_add - add an entry to the hardware database
5836 * @ndm: the input from the stack
5837 * @tb: pointer to array of nladdr (unused)
5838 * @dev: the net device pointer
5839 * @addr: the MAC address entry being added
5841 * @flags: instructions from stack about fdb operation
5842 * @extack: netlink extended ack
5845 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
5846 struct net_device *dev, const unsigned char *addr, u16 vid,
5847 u16 flags, struct netlink_ext_ack __always_unused *extack)
5852 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
5855 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
5856 netdev_err(dev, "FDB only supports static addresses\n");
5860 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
5861 err = dev_uc_add_excl(dev, addr);
5862 else if (is_multicast_ether_addr(addr))
5863 err = dev_mc_add_excl(dev, addr);
5867 /* Only return duplicate errors if NLM_F_EXCL is set */
5868 if (err == -EEXIST && !(flags & NLM_F_EXCL))
5875 * ice_fdb_del - delete an entry from the hardware database
5876 * @ndm: the input from the stack
5877 * @tb: pointer to array of nladdr (unused)
5878 * @dev: the net device pointer
5879 * @addr: the MAC address entry being added
5881 * @extack: netlink extended ack
5884 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
5885 struct net_device *dev, const unsigned char *addr,
5886 __always_unused u16 vid, struct netlink_ext_ack *extack)
5890 if (ndm->ndm_state & NUD_PERMANENT) {
5891 netdev_err(dev, "FDB only supports static addresses\n");
5895 if (is_unicast_ether_addr(addr))
5896 err = dev_uc_del(dev, addr);
5897 else if (is_multicast_ether_addr(addr))
5898 err = dev_mc_del(dev, addr);
5905 #define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
5906 NETIF_F_HW_VLAN_CTAG_TX | \
5907 NETIF_F_HW_VLAN_STAG_RX | \
5908 NETIF_F_HW_VLAN_STAG_TX)
5910 #define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
5911 NETIF_F_HW_VLAN_STAG_RX)
5913 #define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
5914 NETIF_F_HW_VLAN_STAG_FILTER)
5917 * ice_fix_features - fix the netdev features flags based on device limitations
5918 * @netdev: ptr to the netdev that flags are being fixed on
5919 * @features: features that need to be checked and possibly fixed
5921 * Make sure any fixups are made to features in this callback. This enables the
5922 * driver to not have to check unsupported configurations throughout the driver
5923 * because that's the responsiblity of this callback.
5925 * Single VLAN Mode (SVM) Supported Features:
5926 * NETIF_F_HW_VLAN_CTAG_FILTER
5927 * NETIF_F_HW_VLAN_CTAG_RX
5928 * NETIF_F_HW_VLAN_CTAG_TX
5930 * Double VLAN Mode (DVM) Supported Features:
5931 * NETIF_F_HW_VLAN_CTAG_FILTER
5932 * NETIF_F_HW_VLAN_CTAG_RX
5933 * NETIF_F_HW_VLAN_CTAG_TX
5935 * NETIF_F_HW_VLAN_STAG_FILTER
5936 * NETIF_HW_VLAN_STAG_RX
5937 * NETIF_HW_VLAN_STAG_TX
5939 * Features that need fixing:
5940 * Cannot simultaneously enable CTAG and STAG stripping and/or insertion.
5941 * These are mutually exlusive as the VSI context cannot support multiple
5942 * VLAN ethertypes simultaneously for stripping and/or insertion. If this
5943 * is not done, then default to clearing the requested STAG offload
5946 * All supported filtering has to be enabled or disabled together. For
5947 * example, in DVM, CTAG and STAG filtering have to be enabled and disabled
5948 * together. If this is not done, then default to VLAN filtering disabled.
5949 * These are mutually exclusive as there is currently no way to
5950 * enable/disable VLAN filtering based on VLAN ethertype when using VLAN
5953 static netdev_features_t
5954 ice_fix_features(struct net_device *netdev, netdev_features_t features)
5956 struct ice_netdev_priv *np = netdev_priv(netdev);
5957 netdev_features_t req_vlan_fltr, cur_vlan_fltr;
5958 bool cur_ctag, cur_stag, req_ctag, req_stag;
5960 cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES;
5961 cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
5962 cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
5964 req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES;
5965 req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
5966 req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
5968 if (req_vlan_fltr != cur_vlan_fltr) {
5969 if (ice_is_dvm_ena(&np->vsi->back->hw)) {
5970 if (req_ctag && req_stag) {
5971 features |= NETIF_VLAN_FILTERING_FEATURES;
5972 } else if (!req_ctag && !req_stag) {
5973 features &= ~NETIF_VLAN_FILTERING_FEATURES;
5974 } else if ((!cur_ctag && req_ctag && !cur_stag) ||
5975 (!cur_stag && req_stag && !cur_ctag)) {
5976 features |= NETIF_VLAN_FILTERING_FEATURES;
5977 netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n");
5978 } else if ((cur_ctag && !req_ctag && cur_stag) ||
5979 (cur_stag && !req_stag && cur_ctag)) {
5980 features &= ~NETIF_VLAN_FILTERING_FEATURES;
5981 netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n");
5984 if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER)
5985 netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n");
5987 if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER)
5988 features |= NETIF_F_HW_VLAN_CTAG_FILTER;
5992 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
5993 (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) {
5994 netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
5995 features &= ~(NETIF_F_HW_VLAN_STAG_RX |
5996 NETIF_F_HW_VLAN_STAG_TX);
5999 if (!(netdev->features & NETIF_F_RXFCS) &&
6000 (features & NETIF_F_RXFCS) &&
6001 (features & NETIF_VLAN_STRIPPING_FEATURES) &&
6002 !ice_vsi_has_non_zero_vlans(np->vsi)) {
6003 netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n");
6004 features &= ~NETIF_VLAN_STRIPPING_FEATURES;
6011 * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
6013 * @features: features used to determine VLAN offload settings
6015 * First, determine the vlan_ethertype based on the VLAN offload bits in
6016 * features. Then determine if stripping and insertion should be enabled or
6017 * disabled. Finally enable or disable VLAN stripping and insertion.
6020 ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features)
6022 bool enable_stripping = true, enable_insertion = true;
6023 struct ice_vsi_vlan_ops *vlan_ops;
6024 int strip_err = 0, insert_err = 0;
6025 u16 vlan_ethertype = 0;
6027 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
6029 if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
6030 vlan_ethertype = ETH_P_8021AD;
6031 else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
6032 vlan_ethertype = ETH_P_8021Q;
6034 if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
6035 enable_stripping = false;
6036 if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
6037 enable_insertion = false;
6039 if (enable_stripping)
6040 strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype);
6042 strip_err = vlan_ops->dis_stripping(vsi);
6044 if (enable_insertion)
6045 insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype);
6047 insert_err = vlan_ops->dis_insertion(vsi);
6049 if (strip_err || insert_err)
6056 * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI
6058 * @features: features used to determine VLAN filtering settings
6060 * Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the
6064 ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
6066 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
6069 /* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
6070 * if either bit is set
6073 (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER))
6074 err = vlan_ops->ena_rx_filtering(vsi);
6076 err = vlan_ops->dis_rx_filtering(vsi);
6082 * ice_set_vlan_features - set VLAN settings based on suggested feature set
6083 * @netdev: ptr to the netdev being adjusted
6084 * @features: the feature set that the stack is suggesting
6086 * Only update VLAN settings if the requested_vlan_features are different than
6087 * the current_vlan_features.
6090 ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
6092 netdev_features_t current_vlan_features, requested_vlan_features;
6093 struct ice_netdev_priv *np = netdev_priv(netdev);
6094 struct ice_vsi *vsi = np->vsi;
6097 current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES;
6098 requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES;
6099 if (current_vlan_features ^ requested_vlan_features) {
6100 if ((features & NETIF_F_RXFCS) &&
6101 (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6102 dev_err(ice_pf_to_dev(vsi->back),
6103 "To enable VLAN stripping, you must first enable FCS/CRC stripping\n");
6107 err = ice_set_vlan_offload_features(vsi, features);
6112 current_vlan_features = netdev->features &
6113 NETIF_VLAN_FILTERING_FEATURES;
6114 requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES;
6115 if (current_vlan_features ^ requested_vlan_features) {
6116 err = ice_set_vlan_filtering_features(vsi, features);
6125 * ice_set_loopback - turn on/off loopback mode on underlying PF
6127 * @ena: flag to indicate the on/off setting
6129 static int ice_set_loopback(struct ice_vsi *vsi, bool ena)
6131 bool if_running = netif_running(vsi->netdev);
6134 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
6135 ret = ice_down(vsi);
6137 netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n");
6141 ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL);
6143 netdev_err(vsi->netdev, "Failed to toggle loopback state\n");
6151 * ice_set_features - set the netdev feature flags
6152 * @netdev: ptr to the netdev being adjusted
6153 * @features: the feature set that the stack is suggesting
6156 ice_set_features(struct net_device *netdev, netdev_features_t features)
6158 netdev_features_t changed = netdev->features ^ features;
6159 struct ice_netdev_priv *np = netdev_priv(netdev);
6160 struct ice_vsi *vsi = np->vsi;
6161 struct ice_pf *pf = vsi->back;
6164 /* Don't set any netdev advanced features with device in Safe Mode */
6165 if (ice_is_safe_mode(pf)) {
6166 dev_err(ice_pf_to_dev(pf),
6167 "Device is in Safe Mode - not enabling advanced netdev features\n");
6171 /* Do not change setting during reset */
6172 if (ice_is_reset_in_progress(pf->state)) {
6173 dev_err(ice_pf_to_dev(pf),
6174 "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
6178 /* Multiple features can be changed in one call so keep features in
6179 * separate if/else statements to guarantee each feature is checked
6181 if (changed & NETIF_F_RXHASH)
6182 ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH));
6184 ret = ice_set_vlan_features(netdev, features);
6188 /* Turn on receive of FCS aka CRC, and after setting this
6189 * flag the packet data will have the 4 byte CRC appended
6191 if (changed & NETIF_F_RXFCS) {
6192 if ((features & NETIF_F_RXFCS) &&
6193 (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6194 dev_err(ice_pf_to_dev(vsi->back),
6195 "To disable FCS/CRC stripping, you must first disable VLAN stripping\n");
6199 ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS));
6200 ret = ice_down_up(vsi);
6205 if (changed & NETIF_F_NTUPLE) {
6206 bool ena = !!(features & NETIF_F_NTUPLE);
6208 ice_vsi_manage_fdir(vsi, ena);
6209 ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi);
6212 /* don't turn off hw_tc_offload when ADQ is already enabled */
6213 if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) {
6214 dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n");
6218 if (changed & NETIF_F_HW_TC) {
6219 bool ena = !!(features & NETIF_F_HW_TC);
6221 ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) :
6222 clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
6225 if (changed & NETIF_F_LOOPBACK)
6226 ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
6232 * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI
6233 * @vsi: VSI to setup VLAN properties for
6235 static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
6239 err = ice_set_vlan_offload_features(vsi, vsi->netdev->features);
6243 err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features);
6247 return ice_vsi_add_vlan_zero(vsi);
6251 * ice_vsi_cfg_lan - Setup the VSI lan related config
6252 * @vsi: the VSI being configured
6254 * Return 0 on success and negative value on error
6256 int ice_vsi_cfg_lan(struct ice_vsi *vsi)
6260 if (vsi->netdev && vsi->type == ICE_VSI_PF) {
6261 ice_set_rx_mode(vsi->netdev);
6263 err = ice_vsi_vlan_setup(vsi);
6267 ice_vsi_cfg_dcb_rings(vsi);
6269 err = ice_vsi_cfg_lan_txqs(vsi);
6270 if (!err && ice_is_xdp_ena_vsi(vsi))
6271 err = ice_vsi_cfg_xdp_txqs(vsi);
6273 err = ice_vsi_cfg_rxqs(vsi);
6278 /* THEORY OF MODERATION:
6279 * The ice driver hardware works differently than the hardware that DIMLIB was
6280 * originally made for. ice hardware doesn't have packet count limits that
6281 * can trigger an interrupt, but it *does* have interrupt rate limit support,
6282 * which is hard-coded to a limit of 250,000 ints/second.
6283 * If not using dynamic moderation, the INTRL value can be modified
6284 * by ethtool rx-usecs-high.
6287 /* the throttle rate for interrupts, basically worst case delay before
6288 * an initial interrupt fires, value is stored in microseconds.
6293 /* Make a different profile for Rx that doesn't allow quite so aggressive
6294 * moderation at the high end (it maxes out at 126us or about 8k interrupts a
6297 static const struct ice_dim rx_profile[] = {
6298 {2}, /* 500,000 ints/s, capped at 250K by INTRL */
6299 {8}, /* 125,000 ints/s */
6300 {16}, /* 62,500 ints/s */
6301 {62}, /* 16,129 ints/s */
6302 {126} /* 7,936 ints/s */
6305 /* The transmit profile, which has the same sorts of values
6306 * as the previous struct
6308 static const struct ice_dim tx_profile[] = {
6309 {2}, /* 500,000 ints/s, capped at 250K by INTRL */
6310 {8}, /* 125,000 ints/s */
6311 {40}, /* 16,125 ints/s */
6312 {128}, /* 7,812 ints/s */
6313 {256} /* 3,906 ints/s */
6316 static void ice_tx_dim_work(struct work_struct *work)
6318 struct ice_ring_container *rc;
6322 dim = container_of(work, struct dim, work);
6325 WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile));
6327 /* look up the values in our local table */
6328 itr = tx_profile[dim->profile_ix].itr;
6330 ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim);
6331 ice_write_itr(rc, itr);
6333 dim->state = DIM_START_MEASURE;
6336 static void ice_rx_dim_work(struct work_struct *work)
6338 struct ice_ring_container *rc;
6342 dim = container_of(work, struct dim, work);
6345 WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile));
6347 /* look up the values in our local table */
6348 itr = rx_profile[dim->profile_ix].itr;
6350 ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim);
6351 ice_write_itr(rc, itr);
6353 dim->state = DIM_START_MEASURE;
6356 #define ICE_DIM_DEFAULT_PROFILE_IX 1
6359 * ice_init_moderation - set up interrupt moderation
6360 * @q_vector: the vector containing rings to be configured
6362 * Set up interrupt moderation registers, with the intent to do the right thing
6363 * when called from reset or from probe, and whether or not dynamic moderation
6364 * is enabled or not. Take special care to write all the registers in both
6365 * dynamic moderation mode or not in order to make sure hardware is in a known
6368 static void ice_init_moderation(struct ice_q_vector *q_vector)
6370 struct ice_ring_container *rc;
6371 bool tx_dynamic, rx_dynamic;
6374 INIT_WORK(&rc->dim.work, ice_tx_dim_work);
6375 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6376 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6378 tx_dynamic = ITR_IS_DYNAMIC(rc);
6380 /* set the initial TX ITR to match the above */
6381 ice_write_itr(rc, tx_dynamic ?
6382 tx_profile[rc->dim.profile_ix].itr : rc->itr_setting);
6385 INIT_WORK(&rc->dim.work, ice_rx_dim_work);
6386 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6387 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6389 rx_dynamic = ITR_IS_DYNAMIC(rc);
6391 /* set the initial RX ITR to match the above */
6392 ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr :
6395 ice_set_q_vector_intrl(q_vector);
6399 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
6400 * @vsi: the VSI being configured
6402 static void ice_napi_enable_all(struct ice_vsi *vsi)
6409 ice_for_each_q_vector(vsi, q_idx) {
6410 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6412 ice_init_moderation(q_vector);
6414 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6415 napi_enable(&q_vector->napi);
6420 * ice_up_complete - Finish the last steps of bringing up a connection
6421 * @vsi: The VSI being configured
6423 * Return 0 on success and negative value on error
6425 static int ice_up_complete(struct ice_vsi *vsi)
6427 struct ice_pf *pf = vsi->back;
6430 ice_vsi_cfg_msix(vsi);
6432 /* Enable only Rx rings, Tx rings were enabled by the FW when the
6433 * Tx queue group list was configured and the context bits were
6434 * programmed using ice_vsi_cfg_txqs
6436 err = ice_vsi_start_all_rx_rings(vsi);
6440 clear_bit(ICE_VSI_DOWN, vsi->state);
6441 ice_napi_enable_all(vsi);
6442 ice_vsi_ena_irq(vsi);
6444 if (vsi->port_info &&
6445 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
6446 vsi->netdev && vsi->type == ICE_VSI_PF) {
6447 ice_print_link_msg(vsi, true);
6448 netif_tx_start_all_queues(vsi->netdev);
6449 netif_carrier_on(vsi->netdev);
6450 ice_ptp_link_change(pf, pf->hw.pf_id, true);
6453 /* Perform an initial read of the statistics registers now to
6454 * set the baseline so counters are ready when interface is up
6456 ice_update_eth_stats(vsi);
6458 if (vsi->type == ICE_VSI_PF)
6459 ice_service_task_schedule(pf);
6465 * ice_up - Bring the connection back up after being down
6466 * @vsi: VSI being configured
6468 int ice_up(struct ice_vsi *vsi)
6472 err = ice_vsi_cfg_lan(vsi);
6474 err = ice_up_complete(vsi);
6480 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
6481 * @syncp: pointer to u64_stats_sync
6482 * @stats: stats that pkts and bytes count will be taken from
6483 * @pkts: packets stats counter
6484 * @bytes: bytes stats counter
6486 * This function fetches stats from the ring considering the atomic operations
6487 * that needs to be performed to read u64 values in 32 bit machine.
6490 ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
6491 struct ice_q_stats stats, u64 *pkts, u64 *bytes)
6496 start = u64_stats_fetch_begin(syncp);
6498 *bytes = stats.bytes;
6499 } while (u64_stats_fetch_retry(syncp, start));
6503 * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
6504 * @vsi: the VSI to be updated
6505 * @vsi_stats: the stats struct to be updated
6506 * @rings: rings to work on
6507 * @count: number of rings
6510 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
6511 struct rtnl_link_stats64 *vsi_stats,
6512 struct ice_tx_ring **rings, u16 count)
6516 for (i = 0; i < count; i++) {
6517 struct ice_tx_ring *ring;
6518 u64 pkts = 0, bytes = 0;
6520 ring = READ_ONCE(rings[i]);
6521 if (!ring || !ring->ring_stats)
6523 ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp,
6524 ring->ring_stats->stats, &pkts,
6526 vsi_stats->tx_packets += pkts;
6527 vsi_stats->tx_bytes += bytes;
6528 vsi->tx_restart += ring->ring_stats->tx_stats.restart_q;
6529 vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy;
6530 vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize;
6535 * ice_update_vsi_ring_stats - Update VSI stats counters
6536 * @vsi: the VSI to be updated
6538 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
6540 struct rtnl_link_stats64 *net_stats, *stats_prev;
6541 struct rtnl_link_stats64 *vsi_stats;
6545 vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
6549 /* reset non-netdev (extended) stats */
6550 vsi->tx_restart = 0;
6552 vsi->tx_linearize = 0;
6553 vsi->rx_buf_failed = 0;
6554 vsi->rx_page_failed = 0;
6558 /* update Tx rings counters */
6559 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
6562 /* update Rx rings counters */
6563 ice_for_each_rxq(vsi, i) {
6564 struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
6565 struct ice_ring_stats *ring_stats;
6567 ring_stats = ring->ring_stats;
6568 ice_fetch_u64_stats_per_ring(&ring_stats->syncp,
6569 ring_stats->stats, &pkts,
6571 vsi_stats->rx_packets += pkts;
6572 vsi_stats->rx_bytes += bytes;
6573 vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed;
6574 vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed;
6577 /* update XDP Tx rings counters */
6578 if (ice_is_xdp_ena_vsi(vsi))
6579 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
6584 net_stats = &vsi->net_stats;
6585 stats_prev = &vsi->net_stats_prev;
6587 /* clear prev counters after reset */
6588 if (vsi_stats->tx_packets < stats_prev->tx_packets ||
6589 vsi_stats->rx_packets < stats_prev->rx_packets) {
6590 stats_prev->tx_packets = 0;
6591 stats_prev->tx_bytes = 0;
6592 stats_prev->rx_packets = 0;
6593 stats_prev->rx_bytes = 0;
6596 /* update netdev counters */
6597 net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
6598 net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
6599 net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
6600 net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
6602 stats_prev->tx_packets = vsi_stats->tx_packets;
6603 stats_prev->tx_bytes = vsi_stats->tx_bytes;
6604 stats_prev->rx_packets = vsi_stats->rx_packets;
6605 stats_prev->rx_bytes = vsi_stats->rx_bytes;
6611 * ice_update_vsi_stats - Update VSI stats counters
6612 * @vsi: the VSI to be updated
6614 void ice_update_vsi_stats(struct ice_vsi *vsi)
6616 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
6617 struct ice_eth_stats *cur_es = &vsi->eth_stats;
6618 struct ice_pf *pf = vsi->back;
6620 if (test_bit(ICE_VSI_DOWN, vsi->state) ||
6621 test_bit(ICE_CFG_BUSY, pf->state))
6624 /* get stats as recorded by Tx/Rx rings */
6625 ice_update_vsi_ring_stats(vsi);
6627 /* get VSI stats as recorded by the hardware */
6628 ice_update_eth_stats(vsi);
6630 cur_ns->tx_errors = cur_es->tx_errors;
6631 cur_ns->rx_dropped = cur_es->rx_discards;
6632 cur_ns->tx_dropped = cur_es->tx_discards;
6633 cur_ns->multicast = cur_es->rx_multicast;
6635 /* update some more netdev stats if this is main VSI */
6636 if (vsi->type == ICE_VSI_PF) {
6637 cur_ns->rx_crc_errors = pf->stats.crc_errors;
6638 cur_ns->rx_errors = pf->stats.crc_errors +
6639 pf->stats.illegal_bytes +
6640 pf->stats.rx_len_errors +
6641 pf->stats.rx_undersize +
6642 pf->hw_csum_rx_error +
6643 pf->stats.rx_jabber +
6644 pf->stats.rx_fragments +
6645 pf->stats.rx_oversize;
6646 cur_ns->rx_length_errors = pf->stats.rx_len_errors;
6647 /* record drops from the port level */
6648 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
6653 * ice_update_pf_stats - Update PF port stats counters
6654 * @pf: PF whose stats needs to be updated
6656 void ice_update_pf_stats(struct ice_pf *pf)
6658 struct ice_hw_port_stats *prev_ps, *cur_ps;
6659 struct ice_hw *hw = &pf->hw;
6663 port = hw->port_info->lport;
6664 prev_ps = &pf->stats_prev;
6665 cur_ps = &pf->stats;
6667 if (ice_is_reset_in_progress(pf->state))
6668 pf->stat_prev_loaded = false;
6670 ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
6671 &prev_ps->eth.rx_bytes,
6672 &cur_ps->eth.rx_bytes);
6674 ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
6675 &prev_ps->eth.rx_unicast,
6676 &cur_ps->eth.rx_unicast);
6678 ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
6679 &prev_ps->eth.rx_multicast,
6680 &cur_ps->eth.rx_multicast);
6682 ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
6683 &prev_ps->eth.rx_broadcast,
6684 &cur_ps->eth.rx_broadcast);
6686 ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
6687 &prev_ps->eth.rx_discards,
6688 &cur_ps->eth.rx_discards);
6690 ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
6691 &prev_ps->eth.tx_bytes,
6692 &cur_ps->eth.tx_bytes);
6694 ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
6695 &prev_ps->eth.tx_unicast,
6696 &cur_ps->eth.tx_unicast);
6698 ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
6699 &prev_ps->eth.tx_multicast,
6700 &cur_ps->eth.tx_multicast);
6702 ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
6703 &prev_ps->eth.tx_broadcast,
6704 &cur_ps->eth.tx_broadcast);
6706 ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
6707 &prev_ps->tx_dropped_link_down,
6708 &cur_ps->tx_dropped_link_down);
6710 ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
6711 &prev_ps->rx_size_64, &cur_ps->rx_size_64);
6713 ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
6714 &prev_ps->rx_size_127, &cur_ps->rx_size_127);
6716 ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
6717 &prev_ps->rx_size_255, &cur_ps->rx_size_255);
6719 ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
6720 &prev_ps->rx_size_511, &cur_ps->rx_size_511);
6722 ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
6723 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
6725 ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
6726 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
6728 ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
6729 &prev_ps->rx_size_big, &cur_ps->rx_size_big);
6731 ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
6732 &prev_ps->tx_size_64, &cur_ps->tx_size_64);
6734 ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
6735 &prev_ps->tx_size_127, &cur_ps->tx_size_127);
6737 ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
6738 &prev_ps->tx_size_255, &cur_ps->tx_size_255);
6740 ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
6741 &prev_ps->tx_size_511, &cur_ps->tx_size_511);
6743 ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
6744 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
6746 ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
6747 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
6749 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
6750 &prev_ps->tx_size_big, &cur_ps->tx_size_big);
6752 fd_ctr_base = hw->fd_ctr_base;
6754 ice_stat_update40(hw,
6755 GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
6756 pf->stat_prev_loaded, &prev_ps->fd_sb_match,
6757 &cur_ps->fd_sb_match);
6758 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
6759 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
6761 ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
6762 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
6764 ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
6765 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
6767 ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
6768 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
6770 ice_update_dcb_stats(pf);
6772 ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
6773 &prev_ps->crc_errors, &cur_ps->crc_errors);
6775 ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
6776 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
6778 ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
6779 &prev_ps->mac_local_faults,
6780 &cur_ps->mac_local_faults);
6782 ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
6783 &prev_ps->mac_remote_faults,
6784 &cur_ps->mac_remote_faults);
6786 ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
6787 &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
6789 ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
6790 &prev_ps->rx_undersize, &cur_ps->rx_undersize);
6792 ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
6793 &prev_ps->rx_fragments, &cur_ps->rx_fragments);
6795 ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
6796 &prev_ps->rx_oversize, &cur_ps->rx_oversize);
6798 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
6799 &prev_ps->rx_jabber, &cur_ps->rx_jabber);
6801 cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
6803 pf->stat_prev_loaded = true;
6807 * ice_get_stats64 - get statistics for network device structure
6808 * @netdev: network interface device structure
6809 * @stats: main device statistics structure
6812 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
6814 struct ice_netdev_priv *np = netdev_priv(netdev);
6815 struct rtnl_link_stats64 *vsi_stats;
6816 struct ice_vsi *vsi = np->vsi;
6818 vsi_stats = &vsi->net_stats;
6820 if (!vsi->num_txq || !vsi->num_rxq)
6823 /* netdev packet/byte stats come from ring counter. These are obtained
6824 * by summing up ring counters (done by ice_update_vsi_ring_stats).
6825 * But, only call the update routine and read the registers if VSI is
6828 if (!test_bit(ICE_VSI_DOWN, vsi->state))
6829 ice_update_vsi_ring_stats(vsi);
6830 stats->tx_packets = vsi_stats->tx_packets;
6831 stats->tx_bytes = vsi_stats->tx_bytes;
6832 stats->rx_packets = vsi_stats->rx_packets;
6833 stats->rx_bytes = vsi_stats->rx_bytes;
6835 /* The rest of the stats can be read from the hardware but instead we
6836 * just return values that the watchdog task has already obtained from
6839 stats->multicast = vsi_stats->multicast;
6840 stats->tx_errors = vsi_stats->tx_errors;
6841 stats->tx_dropped = vsi_stats->tx_dropped;
6842 stats->rx_errors = vsi_stats->rx_errors;
6843 stats->rx_dropped = vsi_stats->rx_dropped;
6844 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
6845 stats->rx_length_errors = vsi_stats->rx_length_errors;
6849 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
6850 * @vsi: VSI having NAPI disabled
6852 static void ice_napi_disable_all(struct ice_vsi *vsi)
6859 ice_for_each_q_vector(vsi, q_idx) {
6860 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6862 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6863 napi_disable(&q_vector->napi);
6865 cancel_work_sync(&q_vector->tx.dim.work);
6866 cancel_work_sync(&q_vector->rx.dim.work);
6871 * ice_down - Shutdown the connection
6872 * @vsi: The VSI being stopped
6874 * Caller of this function is expected to set the vsi->state ICE_DOWN bit
6876 int ice_down(struct ice_vsi *vsi)
6878 int i, tx_err, rx_err, vlan_err = 0;
6880 WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
6882 if (vsi->netdev && vsi->type == ICE_VSI_PF) {
6883 vlan_err = ice_vsi_del_vlan_zero(vsi);
6884 ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
6885 netif_carrier_off(vsi->netdev);
6886 netif_tx_disable(vsi->netdev);
6887 } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
6888 ice_eswitch_stop_all_tx_queues(vsi->back);
6891 ice_vsi_dis_irq(vsi);
6893 tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
6895 netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
6896 vsi->vsi_num, tx_err);
6897 if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
6898 tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
6900 netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
6901 vsi->vsi_num, tx_err);
6904 rx_err = ice_vsi_stop_all_rx_rings(vsi);
6906 netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
6907 vsi->vsi_num, rx_err);
6909 ice_napi_disable_all(vsi);
6911 ice_for_each_txq(vsi, i)
6912 ice_clean_tx_ring(vsi->tx_rings[i]);
6914 if (ice_is_xdp_ena_vsi(vsi))
6915 ice_for_each_xdp_txq(vsi, i)
6916 ice_clean_tx_ring(vsi->xdp_rings[i]);
6918 ice_for_each_rxq(vsi, i)
6919 ice_clean_rx_ring(vsi->rx_rings[i]);
6921 if (tx_err || rx_err || vlan_err) {
6922 netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
6923 vsi->vsi_num, vsi->vsw->sw_id);
6931 * ice_down_up - shutdown the VSI connection and bring it up
6932 * @vsi: the VSI to be reconnected
6934 int ice_down_up(struct ice_vsi *vsi)
6938 /* if DOWN already set, nothing to do */
6939 if (test_and_set_bit(ICE_VSI_DOWN, vsi->state))
6942 ret = ice_down(vsi);
6948 netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n");
6956 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
6957 * @vsi: VSI having resources allocated
6959 * Return 0 on success, negative on failure
6961 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
6965 if (!vsi->num_txq) {
6966 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
6971 ice_for_each_txq(vsi, i) {
6972 struct ice_tx_ring *ring = vsi->tx_rings[i];
6978 ring->netdev = vsi->netdev;
6979 err = ice_setup_tx_ring(ring);
6988 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
6989 * @vsi: VSI having resources allocated
6991 * Return 0 on success, negative on failure
6993 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
6997 if (!vsi->num_rxq) {
6998 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
7003 ice_for_each_rxq(vsi, i) {
7004 struct ice_rx_ring *ring = vsi->rx_rings[i];
7010 ring->netdev = vsi->netdev;
7011 err = ice_setup_rx_ring(ring);
7020 * ice_vsi_open_ctrl - open control VSI for use
7021 * @vsi: the VSI to open
7023 * Initialization of the Control VSI
7025 * Returns 0 on success, negative value on error
7027 int ice_vsi_open_ctrl(struct ice_vsi *vsi)
7029 char int_name[ICE_INT_NAME_STR_LEN];
7030 struct ice_pf *pf = vsi->back;
7034 dev = ice_pf_to_dev(pf);
7035 /* allocate descriptors */
7036 err = ice_vsi_setup_tx_rings(vsi);
7040 err = ice_vsi_setup_rx_rings(vsi);
7044 err = ice_vsi_cfg_lan(vsi);
7048 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
7049 dev_driver_string(dev), dev_name(dev));
7050 err = ice_vsi_req_irq_msix(vsi, int_name);
7054 ice_vsi_cfg_msix(vsi);
7056 err = ice_vsi_start_all_rx_rings(vsi);
7058 goto err_up_complete;
7060 clear_bit(ICE_VSI_DOWN, vsi->state);
7061 ice_vsi_ena_irq(vsi);
7068 ice_vsi_free_rx_rings(vsi);
7070 ice_vsi_free_tx_rings(vsi);
7076 * ice_vsi_open - Called when a network interface is made active
7077 * @vsi: the VSI to open
7079 * Initialization of the VSI
7081 * Returns 0 on success, negative value on error
7083 int ice_vsi_open(struct ice_vsi *vsi)
7085 char int_name[ICE_INT_NAME_STR_LEN];
7086 struct ice_pf *pf = vsi->back;
7089 /* allocate descriptors */
7090 err = ice_vsi_setup_tx_rings(vsi);
7094 err = ice_vsi_setup_rx_rings(vsi);
7098 err = ice_vsi_cfg_lan(vsi);
7102 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
7103 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
7104 err = ice_vsi_req_irq_msix(vsi, int_name);
7108 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
7110 if (vsi->type == ICE_VSI_PF) {
7111 /* Notify the stack of the actual queue counts. */
7112 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
7116 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
7121 err = ice_up_complete(vsi);
7123 goto err_up_complete;
7130 ice_vsi_free_irq(vsi);
7132 ice_vsi_free_rx_rings(vsi);
7134 ice_vsi_free_tx_rings(vsi);
7140 * ice_vsi_release_all - Delete all VSIs
7141 * @pf: PF from which all VSIs are being removed
7143 static void ice_vsi_release_all(struct ice_pf *pf)
7150 ice_for_each_vsi(pf, i) {
7154 if (pf->vsi[i]->type == ICE_VSI_CHNL)
7157 err = ice_vsi_release(pf->vsi[i]);
7159 dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
7160 i, err, pf->vsi[i]->vsi_num);
7165 * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
7166 * @pf: pointer to the PF instance
7167 * @type: VSI type to rebuild
7169 * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
7171 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
7173 struct device *dev = ice_pf_to_dev(pf);
7176 ice_for_each_vsi(pf, i) {
7177 struct ice_vsi *vsi = pf->vsi[i];
7179 if (!vsi || vsi->type != type)
7182 /* rebuild the VSI */
7183 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
7185 dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
7186 err, vsi->idx, ice_vsi_type_str(type));
7190 /* replay filters for the VSI */
7191 err = ice_replay_vsi(&pf->hw, vsi->idx);
7193 dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n",
7194 err, vsi->idx, ice_vsi_type_str(type));
7198 /* Re-map HW VSI number, using VSI handle that has been
7199 * previously validated in ice_replay_vsi() call above
7201 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
7203 /* enable the VSI */
7204 err = ice_ena_vsi(vsi, false);
7206 dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
7207 err, vsi->idx, ice_vsi_type_str(type));
7211 dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
7212 ice_vsi_type_str(type));
7219 * ice_update_pf_netdev_link - Update PF netdev link status
7220 * @pf: pointer to the PF instance
7222 static void ice_update_pf_netdev_link(struct ice_pf *pf)
7227 ice_for_each_vsi(pf, i) {
7228 struct ice_vsi *vsi = pf->vsi[i];
7230 if (!vsi || vsi->type != ICE_VSI_PF)
7233 ice_get_link_status(pf->vsi[i]->port_info, &link_up);
7235 netif_carrier_on(pf->vsi[i]->netdev);
7236 netif_tx_wake_all_queues(pf->vsi[i]->netdev);
7238 netif_carrier_off(pf->vsi[i]->netdev);
7239 netif_tx_stop_all_queues(pf->vsi[i]->netdev);
7245 * ice_rebuild - rebuild after reset
7246 * @pf: PF to rebuild
7247 * @reset_type: type of reset
7249 * Do not rebuild VF VSI in this flow because that is already handled via
7250 * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
7251 * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
7252 * to reset/rebuild all the VF VSI twice.
7254 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
7256 struct device *dev = ice_pf_to_dev(pf);
7257 struct ice_hw *hw = &pf->hw;
7261 if (test_bit(ICE_DOWN, pf->state))
7262 goto clear_recovery;
7264 dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
7266 #define ICE_EMP_RESET_SLEEP_MS 5000
7267 if (reset_type == ICE_RESET_EMPR) {
7268 /* If an EMP reset has occurred, any previously pending flash
7269 * update will have completed. We no longer know whether or
7270 * not the NVM update EMP reset is restricted.
7272 pf->fw_emp_reset_disabled = false;
7274 msleep(ICE_EMP_RESET_SLEEP_MS);
7277 err = ice_init_all_ctrlq(hw);
7279 dev_err(dev, "control queues init failed %d\n", err);
7280 goto err_init_ctrlq;
7283 /* if DDP was previously loaded successfully */
7284 if (!ice_is_safe_mode(pf)) {
7285 /* reload the SW DB of filter tables */
7286 if (reset_type == ICE_RESET_PFR)
7287 ice_fill_blk_tbls(hw);
7289 /* Reload DDP Package after CORER/GLOBR reset */
7290 ice_load_pkg(NULL, pf);
7293 err = ice_clear_pf_cfg(hw);
7295 dev_err(dev, "clear PF configuration failed %d\n", err);
7296 goto err_init_ctrlq;
7299 ice_clear_pxe_mode(hw);
7301 err = ice_init_nvm(hw);
7303 dev_err(dev, "ice_init_nvm failed %d\n", err);
7304 goto err_init_ctrlq;
7307 err = ice_get_caps(hw);
7309 dev_err(dev, "ice_get_caps failed %d\n", err);
7310 goto err_init_ctrlq;
7313 err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
7315 dev_err(dev, "set_mac_cfg failed %d\n", err);
7316 goto err_init_ctrlq;
7319 dvm = ice_is_dvm_ena(hw);
7321 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
7323 goto err_init_ctrlq;
7325 err = ice_sched_init_port(hw->port_info);
7327 goto err_sched_init_port;
7329 /* start misc vector */
7330 err = ice_req_irq_msix_misc(pf);
7332 dev_err(dev, "misc vector setup failed: %d\n", err);
7333 goto err_sched_init_port;
7336 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7337 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
7338 if (!rd32(hw, PFQF_FD_SIZE)) {
7339 u16 unused, guar, b_effort;
7341 guar = hw->func_caps.fd_fltr_guar;
7342 b_effort = hw->func_caps.fd_fltr_best_effort;
7344 /* force guaranteed filter pool for PF */
7345 ice_alloc_fd_guar_item(hw, &unused, guar);
7346 /* force shared filter pool for PF */
7347 ice_alloc_fd_shrd_item(hw, &unused, b_effort);
7351 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
7352 ice_dcb_rebuild(pf);
7354 /* If the PF previously had enabled PTP, PTP init needs to happen before
7355 * the VSI rebuild. If not, this causes the PTP link status events to
7358 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
7361 if (ice_is_feature_supported(pf, ICE_F_GNSS))
7364 /* rebuild PF VSI */
7365 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
7367 dev_err(dev, "PF VSI rebuild failed: %d\n", err);
7368 goto err_vsi_rebuild;
7371 /* configure PTP timestamping after VSI rebuild */
7372 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
7373 ice_ptp_cfg_timestamp(pf, false);
7375 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
7377 dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err);
7378 goto err_vsi_rebuild;
7381 if (reset_type == ICE_RESET_PFR) {
7382 err = ice_rebuild_channels(pf);
7384 dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n",
7386 goto err_vsi_rebuild;
7390 /* If Flow Director is active */
7391 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7392 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
7394 dev_err(dev, "control VSI rebuild failed: %d\n", err);
7395 goto err_vsi_rebuild;
7398 /* replay HW Flow Director recipes */
7400 ice_fdir_replay_flows(hw);
7402 /* replay Flow Director filters */
7403 ice_fdir_replay_fltrs(pf);
7405 ice_rebuild_arfs(pf);
7408 ice_update_pf_netdev_link(pf);
7410 /* tell the firmware we are up */
7411 err = ice_send_version(pf);
7413 dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
7415 goto err_vsi_rebuild;
7418 ice_replay_post(hw);
7420 /* if we get here, reset flow is successful */
7421 clear_bit(ICE_RESET_FAILED, pf->state);
7423 ice_plug_aux_dev(pf);
7424 if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
7425 ice_lag_rebuild(pf);
7429 err_sched_init_port:
7430 ice_sched_cleanup_all(hw);
7432 ice_shutdown_all_ctrlq(hw);
7433 set_bit(ICE_RESET_FAILED, pf->state);
7435 /* set this bit in PF state to control service task scheduling */
7436 set_bit(ICE_NEEDS_RESTART, pf->state);
7437 dev_err(dev, "Rebuild failed, unload and reload driver\n");
7441 * ice_change_mtu - NDO callback to change the MTU
7442 * @netdev: network interface device structure
7443 * @new_mtu: new value for maximum frame size
7445 * Returns 0 on success, negative on failure
7447 static int ice_change_mtu(struct net_device *netdev, int new_mtu)
7449 struct ice_netdev_priv *np = netdev_priv(netdev);
7450 struct ice_vsi *vsi = np->vsi;
7451 struct ice_pf *pf = vsi->back;
7452 struct bpf_prog *prog;
7456 if (new_mtu == (int)netdev->mtu) {
7457 netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
7461 prog = vsi->xdp_prog;
7462 if (prog && !prog->aux->xdp_has_frags) {
7463 int frame_size = ice_max_xdp_frame_size(vsi);
7465 if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
7466 netdev_err(netdev, "max MTU for XDP usage is %d\n",
7467 frame_size - ICE_ETH_PKT_HDR_PAD);
7470 } else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) {
7471 if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) {
7472 netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n",
7473 ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD);
7478 /* if a reset is in progress, wait for some time for it to complete */
7480 if (ice_is_reset_in_progress(pf->state)) {
7482 usleep_range(1000, 2000);
7487 } while (count < 100);
7490 netdev_err(netdev, "can't change MTU. Device is busy\n");
7494 netdev->mtu = (unsigned int)new_mtu;
7495 err = ice_down_up(vsi);
7499 netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
7500 set_bit(ICE_FLAG_MTU_CHANGED, pf->flags);
7506 * ice_eth_ioctl - Access the hwtstamp interface
7507 * @netdev: network interface device structure
7508 * @ifr: interface request data
7509 * @cmd: ioctl command
7511 static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
7513 struct ice_netdev_priv *np = netdev_priv(netdev);
7514 struct ice_pf *pf = np->vsi->back;
7518 return ice_ptp_get_ts_config(pf, ifr);
7520 return ice_ptp_set_ts_config(pf, ifr);
7527 * ice_aq_str - convert AQ err code to a string
7528 * @aq_err: the AQ error code to convert
7530 const char *ice_aq_str(enum ice_aq_err aq_err)
7535 case ICE_AQ_RC_EPERM:
7536 return "ICE_AQ_RC_EPERM";
7537 case ICE_AQ_RC_ENOENT:
7538 return "ICE_AQ_RC_ENOENT";
7539 case ICE_AQ_RC_ENOMEM:
7540 return "ICE_AQ_RC_ENOMEM";
7541 case ICE_AQ_RC_EBUSY:
7542 return "ICE_AQ_RC_EBUSY";
7543 case ICE_AQ_RC_EEXIST:
7544 return "ICE_AQ_RC_EEXIST";
7545 case ICE_AQ_RC_EINVAL:
7546 return "ICE_AQ_RC_EINVAL";
7547 case ICE_AQ_RC_ENOSPC:
7548 return "ICE_AQ_RC_ENOSPC";
7549 case ICE_AQ_RC_ENOSYS:
7550 return "ICE_AQ_RC_ENOSYS";
7551 case ICE_AQ_RC_EMODE:
7552 return "ICE_AQ_RC_EMODE";
7553 case ICE_AQ_RC_ENOSEC:
7554 return "ICE_AQ_RC_ENOSEC";
7555 case ICE_AQ_RC_EBADSIG:
7556 return "ICE_AQ_RC_EBADSIG";
7557 case ICE_AQ_RC_ESVN:
7558 return "ICE_AQ_RC_ESVN";
7559 case ICE_AQ_RC_EBADMAN:
7560 return "ICE_AQ_RC_EBADMAN";
7561 case ICE_AQ_RC_EBADBUF:
7562 return "ICE_AQ_RC_EBADBUF";
7565 return "ICE_AQ_RC_UNKNOWN";
7569 * ice_set_rss_lut - Set RSS LUT
7570 * @vsi: Pointer to VSI structure
7571 * @lut: Lookup table
7572 * @lut_size: Lookup table size
7574 * Returns 0 on success, negative on failure
7576 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7578 struct ice_aq_get_set_rss_lut_params params = {};
7579 struct ice_hw *hw = &vsi->back->hw;
7585 params.vsi_handle = vsi->idx;
7586 params.lut_size = lut_size;
7587 params.lut_type = vsi->rss_lut_type;
7590 status = ice_aq_set_rss_lut(hw, ¶ms);
7592 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
7593 status, ice_aq_str(hw->adminq.sq_last_status));
7599 * ice_set_rss_key - Set RSS key
7600 * @vsi: Pointer to the VSI structure
7601 * @seed: RSS hash seed
7603 * Returns 0 on success, negative on failure
7605 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
7607 struct ice_hw *hw = &vsi->back->hw;
7613 status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7615 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
7616 status, ice_aq_str(hw->adminq.sq_last_status));
7622 * ice_get_rss_lut - Get RSS LUT
7623 * @vsi: Pointer to VSI structure
7624 * @lut: Buffer to store the lookup table entries
7625 * @lut_size: Size of buffer to store the lookup table entries
7627 * Returns 0 on success, negative on failure
7629 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7631 struct ice_aq_get_set_rss_lut_params params = {};
7632 struct ice_hw *hw = &vsi->back->hw;
7638 params.vsi_handle = vsi->idx;
7639 params.lut_size = lut_size;
7640 params.lut_type = vsi->rss_lut_type;
7643 status = ice_aq_get_rss_lut(hw, ¶ms);
7645 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
7646 status, ice_aq_str(hw->adminq.sq_last_status));
7652 * ice_get_rss_key - Get RSS key
7653 * @vsi: Pointer to VSI structure
7654 * @seed: Buffer to store the key in
7656 * Returns 0 on success, negative on failure
7658 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
7660 struct ice_hw *hw = &vsi->back->hw;
7666 status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7668 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
7669 status, ice_aq_str(hw->adminq.sq_last_status));
7675 * ice_bridge_getlink - Get the hardware bridge mode
7678 * @seq: RTNL message seq
7679 * @dev: the netdev being configured
7680 * @filter_mask: filter mask passed in
7681 * @nlflags: netlink flags passed in
7683 * Return the bridge mode (VEB/VEPA)
7686 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
7687 struct net_device *dev, u32 filter_mask, int nlflags)
7689 struct ice_netdev_priv *np = netdev_priv(dev);
7690 struct ice_vsi *vsi = np->vsi;
7691 struct ice_pf *pf = vsi->back;
7694 bmode = pf->first_sw->bridge_mode;
7696 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
7701 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
7702 * @vsi: Pointer to VSI structure
7703 * @bmode: Hardware bridge mode (VEB/VEPA)
7705 * Returns 0 on success, negative on failure
7707 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
7709 struct ice_aqc_vsi_props *vsi_props;
7710 struct ice_hw *hw = &vsi->back->hw;
7711 struct ice_vsi_ctx *ctxt;
7714 vsi_props = &vsi->info;
7716 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
7720 ctxt->info = vsi->info;
7722 if (bmode == BRIDGE_MODE_VEB)
7723 /* change from VEPA to VEB mode */
7724 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7726 /* change from VEB to VEPA mode */
7727 ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7728 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
7730 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
7732 dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
7733 bmode, ret, ice_aq_str(hw->adminq.sq_last_status));
7736 /* Update sw flags for book keeping */
7737 vsi_props->sw_flags = ctxt->info.sw_flags;
7745 * ice_bridge_setlink - Set the hardware bridge mode
7746 * @dev: the netdev being configured
7747 * @nlh: RTNL message
7748 * @flags: bridge setlink flags
7749 * @extack: netlink extended ack
7751 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
7752 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
7753 * not already set for all VSIs connected to this switch. And also update the
7754 * unicast switch filter rules for the corresponding switch of the netdev.
7757 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
7758 u16 __always_unused flags,
7759 struct netlink_ext_ack __always_unused *extack)
7761 struct ice_netdev_priv *np = netdev_priv(dev);
7762 struct ice_pf *pf = np->vsi->back;
7763 struct nlattr *attr, *br_spec;
7764 struct ice_hw *hw = &pf->hw;
7765 struct ice_sw *pf_sw;
7766 int rem, v, err = 0;
7768 pf_sw = pf->first_sw;
7769 /* find the attribute in the netlink message */
7770 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
7772 nla_for_each_nested(attr, br_spec, rem) {
7775 if (nla_type(attr) != IFLA_BRIDGE_MODE)
7777 mode = nla_get_u16(attr);
7778 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
7780 /* Continue if bridge mode is not being flipped */
7781 if (mode == pf_sw->bridge_mode)
7783 /* Iterates through the PF VSI list and update the loopback
7786 ice_for_each_vsi(pf, v) {
7789 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
7794 hw->evb_veb = (mode == BRIDGE_MODE_VEB);
7795 /* Update the unicast switch filter rules for the corresponding
7796 * switch of the netdev
7798 err = ice_update_sw_rule_bridge_mode(hw);
7800 netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n",
7802 ice_aq_str(hw->adminq.sq_last_status));
7803 /* revert hw->evb_veb */
7804 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
7808 pf_sw->bridge_mode = mode;
7815 * ice_tx_timeout - Respond to a Tx Hang
7816 * @netdev: network interface device structure
7817 * @txqueue: Tx queue
7819 static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
7821 struct ice_netdev_priv *np = netdev_priv(netdev);
7822 struct ice_tx_ring *tx_ring = NULL;
7823 struct ice_vsi *vsi = np->vsi;
7824 struct ice_pf *pf = vsi->back;
7827 pf->tx_timeout_count++;
7829 /* Check if PFC is enabled for the TC to which the queue belongs
7830 * to. If yes then Tx timeout is not caused by a hung queue, no
7831 * need to reset and rebuild
7833 if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
7834 dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
7839 /* now that we have an index, find the tx_ring struct */
7840 ice_for_each_txq(vsi, i)
7841 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
7842 if (txqueue == vsi->tx_rings[i]->q_index) {
7843 tx_ring = vsi->tx_rings[i];
7847 /* Reset recovery level if enough time has elapsed after last timeout.
7848 * Also ensure no new reset action happens before next timeout period.
7850 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
7851 pf->tx_timeout_recovery_level = 1;
7852 else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
7853 netdev->watchdog_timeo)))
7857 struct ice_hw *hw = &pf->hw;
7860 head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
7861 QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
7862 /* Read interrupt register */
7863 val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
7865 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
7866 vsi->vsi_num, txqueue, tx_ring->next_to_clean,
7867 head, tx_ring->next_to_use, val);
7870 pf->tx_timeout_last_recovery = jiffies;
7871 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
7872 pf->tx_timeout_recovery_level, txqueue);
7874 switch (pf->tx_timeout_recovery_level) {
7876 set_bit(ICE_PFR_REQ, pf->state);
7879 set_bit(ICE_CORER_REQ, pf->state);
7882 set_bit(ICE_GLOBR_REQ, pf->state);
7885 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
7886 set_bit(ICE_DOWN, pf->state);
7887 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
7888 set_bit(ICE_SERVICE_DIS, pf->state);
7892 ice_service_task_schedule(pf);
7893 pf->tx_timeout_recovery_level++;
7897 * ice_setup_tc_cls_flower - flower classifier offloads
7898 * @np: net device to configure
7899 * @filter_dev: device on which filter is added
7900 * @cls_flower: offload data
7903 ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
7904 struct net_device *filter_dev,
7905 struct flow_cls_offload *cls_flower)
7907 struct ice_vsi *vsi = np->vsi;
7909 if (cls_flower->common.chain_index)
7912 switch (cls_flower->command) {
7913 case FLOW_CLS_REPLACE:
7914 return ice_add_cls_flower(filter_dev, vsi, cls_flower);
7915 case FLOW_CLS_DESTROY:
7916 return ice_del_cls_flower(vsi, cls_flower);
7923 * ice_setup_tc_block_cb - callback handler registered for TC block
7924 * @type: TC SETUP type
7925 * @type_data: TC flower offload data that contains user input
7926 * @cb_priv: netdev private data
7929 ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
7931 struct ice_netdev_priv *np = cb_priv;
7934 case TC_SETUP_CLSFLOWER:
7935 return ice_setup_tc_cls_flower(np, np->vsi->netdev,
7943 * ice_validate_mqprio_qopt - Validate TCF input parameters
7944 * @vsi: Pointer to VSI
7945 * @mqprio_qopt: input parameters for mqprio queue configuration
7947 * This function validates MQPRIO params, such as qcount (power of 2 wherever
7948 * needed), and make sure user doesn't specify qcount and BW rate limit
7949 * for TCs, which are more than "num_tc"
7952 ice_validate_mqprio_qopt(struct ice_vsi *vsi,
7953 struct tc_mqprio_qopt_offload *mqprio_qopt)
7955 int non_power_of_2_qcount = 0;
7956 struct ice_pf *pf = vsi->back;
7957 int max_rss_q_cnt = 0;
7958 u64 sum_min_rate = 0;
7963 if (vsi->type != ICE_VSI_PF)
7966 if (mqprio_qopt->qopt.offset[0] != 0 ||
7967 mqprio_qopt->qopt.num_tc < 1 ||
7968 mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC)
7971 dev = ice_pf_to_dev(pf);
7972 vsi->ch_rss_size = 0;
7973 num_tc = mqprio_qopt->qopt.num_tc;
7974 speed = ice_get_link_speed_kbps(vsi);
7976 for (i = 0; num_tc; i++) {
7977 int qcount = mqprio_qopt->qopt.count[i];
7978 u64 max_rate, min_rate, rem;
7983 if (is_power_of_2(qcount)) {
7984 if (non_power_of_2_qcount &&
7985 qcount > non_power_of_2_qcount) {
7986 dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n",
7987 qcount, non_power_of_2_qcount);
7990 if (qcount > max_rss_q_cnt)
7991 max_rss_q_cnt = qcount;
7993 if (non_power_of_2_qcount &&
7994 qcount != non_power_of_2_qcount) {
7995 dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n",
7996 qcount, non_power_of_2_qcount);
7999 if (qcount < max_rss_q_cnt) {
8000 dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n",
8001 qcount, max_rss_q_cnt);
8004 max_rss_q_cnt = qcount;
8005 non_power_of_2_qcount = qcount;
8008 /* TC command takes input in K/N/Gbps or K/M/Gbit etc but
8009 * converts the bandwidth rate limit into Bytes/s when
8010 * passing it down to the driver. So convert input bandwidth
8011 * from Bytes/s to Kbps
8013 max_rate = mqprio_qopt->max_rate[i];
8014 max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR);
8016 /* min_rate is minimum guaranteed rate and it can't be zero */
8017 min_rate = mqprio_qopt->min_rate[i];
8018 min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR);
8019 sum_min_rate += min_rate;
8021 if (min_rate && min_rate < ICE_MIN_BW_LIMIT) {
8022 dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i,
8023 min_rate, ICE_MIN_BW_LIMIT);
8027 if (max_rate && max_rate > speed) {
8028 dev_err(dev, "TC%d: max_rate(%llu Kbps) > link speed of %u Kbps\n",
8029 i, max_rate, speed);
8033 iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem);
8035 dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps",
8036 i, ICE_MIN_BW_LIMIT);
8040 iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem);
8042 dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps",
8043 i, ICE_MIN_BW_LIMIT);
8047 /* min_rate can't be more than max_rate, except when max_rate
8048 * is zero (implies max_rate sought is max line rate). In such
8049 * a case min_rate can be more than max.
8051 if (max_rate && min_rate > max_rate) {
8052 dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n",
8053 min_rate, max_rate);
8057 if (i >= mqprio_qopt->qopt.num_tc - 1)
8059 if (mqprio_qopt->qopt.offset[i + 1] !=
8060 (mqprio_qopt->qopt.offset[i] + qcount))
8064 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
8067 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
8070 if (sum_min_rate && sum_min_rate > (u64)speed) {
8071 dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n",
8072 sum_min_rate, speed);
8076 /* make sure vsi->ch_rss_size is set correctly based on TC's qcount */
8077 vsi->ch_rss_size = max_rss_q_cnt;
8083 * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF
8084 * @pf: ptr to PF device
8087 static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
8089 struct device *dev = ice_pf_to_dev(pf);
8094 if (!(vsi->num_gfltr || vsi->num_bfltr))
8098 for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
8099 struct ice_fd_hw_prof *prof;
8103 if (!(hw->fdir_prof && hw->fdir_prof[flow] &&
8104 hw->fdir_prof[flow]->cnt))
8107 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
8108 enum ice_flow_priority prio;
8111 /* add this VSI to FDir profile for this flow */
8112 prio = ICE_FLOW_PRIO_NORMAL;
8113 prof = hw->fdir_prof[flow];
8114 prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
8115 status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
8116 prof->vsi_h[0], vsi->idx,
8117 prio, prof->fdir_seg[tun],
8120 dev_err(dev, "channel VSI idx %d, not able to add to group %d\n",
8125 prof->entry_h[prof->cnt][tun] = entry_h;
8128 /* store VSI for filter replay and delete */
8129 prof->vsi_h[prof->cnt] = vsi->idx;
8133 dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx,
8138 dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx);
8144 * ice_add_channel - add a channel by adding VSI
8145 * @pf: ptr to PF device
8146 * @sw_id: underlying HW switching element ID
8147 * @ch: ptr to channel structure
8149 * Add a channel (VSI) using add_vsi and queue_map
8151 static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch)
8153 struct device *dev = ice_pf_to_dev(pf);
8154 struct ice_vsi *vsi;
8156 if (ch->type != ICE_VSI_CHNL) {
8157 dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type);
8161 vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch);
8162 if (!vsi || vsi->type != ICE_VSI_CHNL) {
8163 dev_err(dev, "create chnl VSI failure\n");
8167 ice_add_vsi_to_fdir(pf, vsi);
8170 ch->vsi_num = vsi->vsi_num;
8171 ch->info.mapping_flags = vsi->info.mapping_flags;
8173 /* set the back pointer of channel for newly created VSI */
8176 memcpy(&ch->info.q_mapping, &vsi->info.q_mapping,
8177 sizeof(vsi->info.q_mapping));
8178 memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping,
8179 sizeof(vsi->info.tc_mapping));
8186 * @vsi: the VSI being setup
8187 * @ch: ptr to channel structure
8189 * Configure channel specific resources such as rings, vector.
8191 static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch)
8195 for (i = 0; i < ch->num_txq; i++) {
8196 struct ice_q_vector *tx_q_vector, *rx_q_vector;
8197 struct ice_ring_container *rc;
8198 struct ice_tx_ring *tx_ring;
8199 struct ice_rx_ring *rx_ring;
8201 tx_ring = vsi->tx_rings[ch->base_q + i];
8202 rx_ring = vsi->rx_rings[ch->base_q + i];
8203 if (!tx_ring || !rx_ring)
8206 /* setup ring being channel enabled */
8210 /* following code block sets up vector specific attributes */
8211 tx_q_vector = tx_ring->q_vector;
8212 rx_q_vector = rx_ring->q_vector;
8213 if (!tx_q_vector && !rx_q_vector)
8217 tx_q_vector->ch = ch;
8218 /* setup Tx and Rx ITR setting if DIM is off */
8219 rc = &tx_q_vector->tx;
8220 if (!ITR_IS_DYNAMIC(rc))
8221 ice_write_itr(rc, rc->itr_setting);
8224 rx_q_vector->ch = ch;
8225 /* setup Tx and Rx ITR setting if DIM is off */
8226 rc = &rx_q_vector->rx;
8227 if (!ITR_IS_DYNAMIC(rc))
8228 ice_write_itr(rc, rc->itr_setting);
8232 /* it is safe to assume that, if channel has non-zero num_t[r]xq, then
8233 * GLINT_ITR register would have written to perform in-context
8234 * update, hence perform flush
8236 if (ch->num_txq || ch->num_rxq)
8237 ice_flush(&vsi->back->hw);
8241 * ice_cfg_chnl_all_res - configure channel resources
8242 * @vsi: pte to main_vsi
8243 * @ch: ptr to channel structure
8245 * This function configures channel specific resources such as flow-director
8246 * counter index, and other resources such as queues, vectors, ITR settings
8249 ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch)
8251 /* configure channel (aka ADQ) resources such as queues, vectors,
8252 * ITR settings for channel specific vectors and anything else
8254 ice_chnl_cfg_res(vsi, ch);
8258 * ice_setup_hw_channel - setup new channel
8259 * @pf: ptr to PF device
8260 * @vsi: the VSI being setup
8261 * @ch: ptr to channel structure
8262 * @sw_id: underlying HW switching element ID
8263 * @type: type of channel to be created (VMDq2/VF)
8265 * Setup new channel (VSI) based on specified type (VMDq2/VF)
8266 * and configures Tx rings accordingly
8269 ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8270 struct ice_channel *ch, u16 sw_id, u8 type)
8272 struct device *dev = ice_pf_to_dev(pf);
8275 ch->base_q = vsi->next_base_q;
8278 ret = ice_add_channel(pf, sw_id, ch);
8280 dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id);
8284 /* configure/setup ADQ specific resources */
8285 ice_cfg_chnl_all_res(vsi, ch);
8287 /* make sure to update the next_base_q so that subsequent channel's
8288 * (aka ADQ) VSI queue map is correct
8290 vsi->next_base_q = vsi->next_base_q + ch->num_rxq;
8291 dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num,
8298 * ice_setup_channel - setup new channel using uplink element
8299 * @pf: ptr to PF device
8300 * @vsi: the VSI being setup
8301 * @ch: ptr to channel structure
8303 * Setup new channel (VSI) based on specified type (VMDq2/VF)
8304 * and uplink switching element
8307 ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8308 struct ice_channel *ch)
8310 struct device *dev = ice_pf_to_dev(pf);
8314 if (vsi->type != ICE_VSI_PF) {
8315 dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type);
8319 sw_id = pf->first_sw->sw_id;
8321 /* create channel (VSI) */
8322 ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL);
8324 dev_err(dev, "failed to setup hw_channel\n");
8327 dev_dbg(dev, "successfully created channel()\n");
8329 return ch->ch_vsi ? true : false;
8333 * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
8334 * @vsi: VSI to be configured
8335 * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit
8336 * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit
8339 ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate)
8343 err = ice_set_min_bw_limit(vsi, min_tx_rate);
8347 return ice_set_max_bw_limit(vsi, max_tx_rate);
8351 * ice_create_q_channel - function to create channel
8352 * @vsi: VSI to be configured
8353 * @ch: ptr to channel (it contains channel specific params)
8355 * This function creates channel (VSI) using num_queues specified by user,
8356 * reconfigs RSS if needed.
8358 static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch)
8360 struct ice_pf *pf = vsi->back;
8366 dev = ice_pf_to_dev(pf);
8367 if (!ch->num_txq || !ch->num_rxq) {
8368 dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq);
8372 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) {
8373 dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n",
8374 vsi->cnt_q_avail, ch->num_txq);
8378 if (!ice_setup_channel(pf, vsi, ch)) {
8379 dev_info(dev, "Failed to setup channel\n");
8382 /* configure BW rate limit */
8383 if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) {
8386 ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate,
8389 dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n",
8390 ch->max_tx_rate, ch->ch_vsi->vsi_num);
8392 dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n",
8393 ch->max_tx_rate, ch->ch_vsi->vsi_num);
8396 vsi->cnt_q_avail -= ch->num_txq;
8402 * ice_rem_all_chnl_fltrs - removes all channel filters
8403 * @pf: ptr to PF, TC-flower based filter are tracked at PF level
8405 * Remove all advanced switch filters only if they are channel specific
8406 * tc-flower based filter
8408 static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
8410 struct ice_tc_flower_fltr *fltr;
8411 struct hlist_node *node;
8413 /* to remove all channel filters, iterate an ordered list of filters */
8414 hlist_for_each_entry_safe(fltr, node,
8415 &pf->tc_flower_fltr_list,
8417 struct ice_rule_query_data rule;
8420 /* for now process only channel specific filters */
8421 if (!ice_is_chnl_fltr(fltr))
8424 rule.rid = fltr->rid;
8425 rule.rule_id = fltr->rule_id;
8426 rule.vsi_handle = fltr->dest_vsi_handle;
8427 status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
8429 if (status == -ENOENT)
8430 dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n",
8433 dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n",
8435 } else if (fltr->dest_vsi) {
8436 /* update advanced switch filter count */
8437 if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
8438 u32 flags = fltr->flags;
8440 fltr->dest_vsi->num_chnl_fltr--;
8441 if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
8442 ICE_TC_FLWR_FIELD_ENC_DST_MAC))
8443 pf->num_dmac_chnl_fltrs--;
8447 hlist_del(&fltr->tc_flower_node);
8453 * ice_remove_q_channels - Remove queue channels for the TCs
8454 * @vsi: VSI to be configured
8455 * @rem_fltr: delete advanced switch filter or not
8457 * Remove queue channels for the TCs
8459 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
8461 struct ice_channel *ch, *ch_tmp;
8462 struct ice_pf *pf = vsi->back;
8465 /* remove all tc-flower based filter if they are channel filters only */
8467 ice_rem_all_chnl_fltrs(pf);
8469 /* remove ntuple filters since queue configuration is being changed */
8470 if (vsi->netdev->features & NETIF_F_NTUPLE) {
8471 struct ice_hw *hw = &pf->hw;
8473 mutex_lock(&hw->fdir_fltr_lock);
8474 ice_fdir_del_all_fltrs(vsi);
8475 mutex_unlock(&hw->fdir_fltr_lock);
8478 /* perform cleanup for channels if they exist */
8479 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
8480 struct ice_vsi *ch_vsi;
8482 list_del(&ch->list);
8483 ch_vsi = ch->ch_vsi;
8489 /* Reset queue contexts */
8490 for (i = 0; i < ch->num_rxq; i++) {
8491 struct ice_tx_ring *tx_ring;
8492 struct ice_rx_ring *rx_ring;
8494 tx_ring = vsi->tx_rings[ch->base_q + i];
8495 rx_ring = vsi->rx_rings[ch->base_q + i];
8498 if (tx_ring->q_vector)
8499 tx_ring->q_vector->ch = NULL;
8503 if (rx_ring->q_vector)
8504 rx_ring->q_vector->ch = NULL;
8508 /* Release FD resources for the channel VSI */
8509 ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx);
8511 /* clear the VSI from scheduler tree */
8512 ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
8514 /* Delete VSI from FW, PF and HW VSI arrays */
8515 ice_vsi_delete(ch->ch_vsi);
8517 /* free the channel */
8521 /* clear the channel VSI map which is stored in main VSI */
8522 ice_for_each_chnl_tc(i)
8523 vsi->tc_map_vsi[i] = NULL;
8525 /* reset main VSI's all TC information */
8531 * ice_rebuild_channels - rebuild channel
8534 * Recreate channel VSIs and replay filters
8536 static int ice_rebuild_channels(struct ice_pf *pf)
8538 struct device *dev = ice_pf_to_dev(pf);
8539 struct ice_vsi *main_vsi;
8540 bool rem_adv_fltr = true;
8541 struct ice_channel *ch;
8542 struct ice_vsi *vsi;
8546 main_vsi = ice_get_main_vsi(pf);
8550 if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) ||
8551 main_vsi->old_numtc == 1)
8552 return 0; /* nothing to be done */
8554 /* reconfigure main VSI based on old value of TC and cached values
8557 err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc);
8559 dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n",
8560 main_vsi->old_ena_tc, main_vsi->vsi_num);
8564 /* rebuild ADQ VSIs */
8565 ice_for_each_vsi(pf, i) {
8566 enum ice_vsi_type type;
8569 if (!vsi || vsi->type != ICE_VSI_CHNL)
8574 /* rebuild ADQ VSI */
8575 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
8577 dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
8578 ice_vsi_type_str(type), vsi->idx, err);
8582 /* Re-map HW VSI number, using VSI handle that has been
8583 * previously validated in ice_replay_vsi() call above
8585 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
8587 /* replay filters for the VSI */
8588 err = ice_replay_vsi(&pf->hw, vsi->idx);
8590 dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n",
8591 ice_vsi_type_str(type), err, vsi->idx);
8592 rem_adv_fltr = false;
8595 dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n",
8596 ice_vsi_type_str(type), vsi->idx);
8598 /* store ADQ VSI at correct TC index in main VSI's
8601 main_vsi->tc_map_vsi[tc_idx++] = vsi;
8604 /* ADQ VSI(s) has been rebuilt successfully, so setup
8605 * channel for main VSI's Tx and Rx rings
8607 list_for_each_entry(ch, &main_vsi->ch_list, list) {
8608 struct ice_vsi *ch_vsi;
8610 ch_vsi = ch->ch_vsi;
8614 /* reconfig channel resources */
8615 ice_cfg_chnl_all_res(main_vsi, ch);
8617 /* replay BW rate limit if it is non-zero */
8618 if (!ch->max_tx_rate && !ch->min_tx_rate)
8621 err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate,
8624 dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8625 err, ch->max_tx_rate, ch->min_tx_rate,
8628 dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8629 ch->max_tx_rate, ch->min_tx_rate,
8633 /* reconfig RSS for main VSI */
8634 if (main_vsi->ch_rss_size)
8635 ice_vsi_cfg_rss_lut_key(main_vsi);
8640 ice_remove_q_channels(main_vsi, rem_adv_fltr);
8645 * ice_create_q_channels - Add queue channel for the given TCs
8646 * @vsi: VSI to be configured
8648 * Configures queue channel mapping to the given TCs
8650 static int ice_create_q_channels(struct ice_vsi *vsi)
8652 struct ice_pf *pf = vsi->back;
8653 struct ice_channel *ch;
8656 ice_for_each_chnl_tc(i) {
8657 if (!(vsi->all_enatc & BIT(i)))
8660 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
8665 INIT_LIST_HEAD(&ch->list);
8666 ch->num_rxq = vsi->mqprio_qopt.qopt.count[i];
8667 ch->num_txq = vsi->mqprio_qopt.qopt.count[i];
8668 ch->base_q = vsi->mqprio_qopt.qopt.offset[i];
8669 ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i];
8670 ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i];
8672 /* convert to Kbits/s */
8673 if (ch->max_tx_rate)
8674 ch->max_tx_rate = div_u64(ch->max_tx_rate,
8675 ICE_BW_KBPS_DIVISOR);
8676 if (ch->min_tx_rate)
8677 ch->min_tx_rate = div_u64(ch->min_tx_rate,
8678 ICE_BW_KBPS_DIVISOR);
8680 ret = ice_create_q_channel(vsi, ch);
8682 dev_err(ice_pf_to_dev(pf),
8683 "failed creating channel TC:%d\n", i);
8687 list_add_tail(&ch->list, &vsi->ch_list);
8688 vsi->tc_map_vsi[i] = ch->ch_vsi;
8689 dev_dbg(ice_pf_to_dev(pf),
8690 "successfully created channel: VSI %pK\n", ch->ch_vsi);
8695 ice_remove_q_channels(vsi, false);
8701 * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes
8702 * @netdev: net device to configure
8703 * @type_data: TC offload data
8705 static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data)
8707 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
8708 struct ice_netdev_priv *np = netdev_priv(netdev);
8709 struct ice_vsi *vsi = np->vsi;
8710 struct ice_pf *pf = vsi->back;
8711 u16 mode, ena_tc_qdisc = 0;
8712 int cur_txq, cur_rxq;
8717 dev = ice_pf_to_dev(pf);
8718 num_tcf = mqprio_qopt->qopt.num_tc;
8719 hw = mqprio_qopt->qopt.hw;
8720 mode = mqprio_qopt->mode;
8722 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8723 vsi->ch_rss_size = 0;
8724 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8728 /* Generate queue region map for number of TCF requested */
8729 for (i = 0; i < num_tcf; i++)
8730 ena_tc_qdisc |= BIT(i);
8733 case TC_MQPRIO_MODE_CHANNEL:
8735 if (pf->hw.port_info->is_custom_tx_enabled) {
8736 dev_err(dev, "Custom Tx scheduler feature enabled, can't configure ADQ\n");
8739 ice_tear_down_devlink_rate_tree(pf);
8741 ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt);
8743 netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n",
8747 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8748 set_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8749 /* don't assume state of hw_tc_offload during driver load
8750 * and set the flag for TC flower filter if hw_tc_offload
8753 if (vsi->netdev->features & NETIF_F_HW_TC)
8754 set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
8762 /* Requesting same TCF configuration as already enabled */
8763 if (ena_tc_qdisc == vsi->tc_cfg.ena_tc &&
8764 mode != TC_MQPRIO_MODE_CHANNEL)
8767 /* Pause VSI queues */
8768 ice_dis_vsi(vsi, true);
8770 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
8771 ice_remove_q_channels(vsi, true);
8773 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
8774 vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf),
8776 vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf),
8779 /* logic to rebuild VSI, same like ethtool -L */
8780 u16 offset = 0, qcount_tx = 0, qcount_rx = 0;
8782 for (i = 0; i < num_tcf; i++) {
8783 if (!(ena_tc_qdisc & BIT(i)))
8786 offset = vsi->mqprio_qopt.qopt.offset[i];
8787 qcount_rx = vsi->mqprio_qopt.qopt.count[i];
8788 qcount_tx = vsi->mqprio_qopt.qopt.count[i];
8790 vsi->req_txq = offset + qcount_tx;
8791 vsi->req_rxq = offset + qcount_rx;
8793 /* store away original rss_size info, so that it gets reused
8794 * form ice_vsi_rebuild during tc-qdisc delete stage - to
8795 * determine, what should be the rss_sizefor main VSI
8797 vsi->orig_rss_size = vsi->rss_size;
8800 /* save current values of Tx and Rx queues before calling VSI rebuild
8801 * for fallback option
8803 cur_txq = vsi->num_txq;
8804 cur_rxq = vsi->num_rxq;
8806 /* proceed with rebuild main VSI using correct number of queues */
8807 ret = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
8809 /* fallback to current number of queues */
8810 dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
8811 vsi->req_txq = cur_txq;
8812 vsi->req_rxq = cur_rxq;
8813 clear_bit(ICE_RESET_FAILED, pf->state);
8814 if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) {
8815 dev_err(dev, "Rebuild of main VSI failed again\n");
8820 vsi->all_numtc = num_tcf;
8821 vsi->all_enatc = ena_tc_qdisc;
8822 ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc);
8824 netdev_err(netdev, "failed configuring TC for VSI id=%d\n",
8829 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
8830 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
8831 u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0];
8833 /* set TC0 rate limit if specified */
8834 if (max_tx_rate || min_tx_rate) {
8835 /* convert to Kbits/s */
8837 max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR);
8839 min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR);
8841 ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate);
8843 dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n",
8844 max_tx_rate, min_tx_rate, vsi->vsi_num);
8846 dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n",
8847 max_tx_rate, min_tx_rate, vsi->vsi_num);
8851 ret = ice_create_q_channels(vsi);
8853 netdev_err(netdev, "failed configuring queue channels\n");
8856 netdev_dbg(netdev, "successfully configured channels\n");
8860 if (vsi->ch_rss_size)
8861 ice_vsi_cfg_rss_lut_key(vsi);
8864 /* if error, reset the all_numtc and all_enatc */
8870 ice_ena_vsi(vsi, true);
8875 static LIST_HEAD(ice_block_cb_list);
8878 ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
8881 struct ice_netdev_priv *np = netdev_priv(netdev);
8882 struct ice_pf *pf = np->vsi->back;
8883 bool locked = false;
8887 case TC_SETUP_BLOCK:
8888 return flow_block_cb_setup_simple(type_data,
8890 ice_setup_tc_block_cb,
8892 case TC_SETUP_QDISC_MQPRIO:
8893 if (ice_is_eswitch_mode_switchdev(pf)) {
8894 netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n");
8899 mutex_lock(&pf->adev_mutex);
8900 device_lock(&pf->adev->dev);
8902 if (pf->adev->dev.driver) {
8903 netdev_err(netdev, "Cannot change qdisc when RDMA is active\n");
8909 /* setup traffic classifier for receive side */
8910 mutex_lock(&pf->tc_mutex);
8911 err = ice_setup_tc_mqprio_qdisc(netdev, type_data);
8912 mutex_unlock(&pf->tc_mutex);
8916 device_unlock(&pf->adev->dev);
8917 mutex_unlock(&pf->adev_mutex);
8926 static struct ice_indr_block_priv *
8927 ice_indr_block_priv_lookup(struct ice_netdev_priv *np,
8928 struct net_device *netdev)
8930 struct ice_indr_block_priv *cb_priv;
8932 list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) {
8933 if (!cb_priv->netdev)
8935 if (cb_priv->netdev == netdev)
8942 ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,
8945 struct ice_indr_block_priv *priv = indr_priv;
8946 struct ice_netdev_priv *np = priv->np;
8949 case TC_SETUP_CLSFLOWER:
8950 return ice_setup_tc_cls_flower(np, priv->netdev,
8951 (struct flow_cls_offload *)
8959 ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch,
8960 struct ice_netdev_priv *np,
8961 struct flow_block_offload *f, void *data,
8962 void (*cleanup)(struct flow_block_cb *block_cb))
8964 struct ice_indr_block_priv *indr_priv;
8965 struct flow_block_cb *block_cb;
8967 if (!ice_is_tunnel_supported(netdev) &&
8968 !(is_vlan_dev(netdev) &&
8969 vlan_dev_real_dev(netdev) == np->vsi->netdev))
8972 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
8975 switch (f->command) {
8976 case FLOW_BLOCK_BIND:
8977 indr_priv = ice_indr_block_priv_lookup(np, netdev);
8981 indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL);
8985 indr_priv->netdev = netdev;
8987 list_add(&indr_priv->list, &np->tc_indr_block_priv_list);
8990 flow_indr_block_cb_alloc(ice_indr_setup_block_cb,
8991 indr_priv, indr_priv,
8992 ice_rep_indr_tc_block_unbind,
8993 f, netdev, sch, data, np,
8996 if (IS_ERR(block_cb)) {
8997 list_del(&indr_priv->list);
8999 return PTR_ERR(block_cb);
9001 flow_block_cb_add(block_cb, f);
9002 list_add_tail(&block_cb->driver_list, &ice_block_cb_list);
9004 case FLOW_BLOCK_UNBIND:
9005 indr_priv = ice_indr_block_priv_lookup(np, netdev);
9009 block_cb = flow_block_cb_lookup(f->block,
9010 ice_indr_setup_block_cb,
9015 flow_indr_block_cb_remove(block_cb, f);
9017 list_del(&block_cb->driver_list);
9026 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
9027 void *cb_priv, enum tc_setup_type type, void *type_data,
9029 void (*cleanup)(struct flow_block_cb *block_cb))
9032 case TC_SETUP_BLOCK:
9033 return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data,
9042 * ice_open - Called when a network interface becomes active
9043 * @netdev: network interface device structure
9045 * The open entry point is called when a network interface is made
9046 * active by the system (IFF_UP). At this point all resources needed
9047 * for transmit and receive operations are allocated, the interrupt
9048 * handler is registered with the OS, the netdev watchdog is enabled,
9049 * and the stack is notified that the interface is ready.
9051 * Returns 0 on success, negative value on failure
9053 int ice_open(struct net_device *netdev)
9055 struct ice_netdev_priv *np = netdev_priv(netdev);
9056 struct ice_pf *pf = np->vsi->back;
9058 if (ice_is_reset_in_progress(pf->state)) {
9059 netdev_err(netdev, "can't open net device while reset is in progress");
9063 return ice_open_internal(netdev);
9067 * ice_open_internal - Called when a network interface becomes active
9068 * @netdev: network interface device structure
9070 * Internal ice_open implementation. Should not be used directly except for ice_open and reset
9073 * Returns 0 on success, negative value on failure
9075 int ice_open_internal(struct net_device *netdev)
9077 struct ice_netdev_priv *np = netdev_priv(netdev);
9078 struct ice_vsi *vsi = np->vsi;
9079 struct ice_pf *pf = vsi->back;
9080 struct ice_port_info *pi;
9083 if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
9084 netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
9088 netif_carrier_off(netdev);
9090 pi = vsi->port_info;
9091 err = ice_update_link_info(pi);
9093 netdev_err(netdev, "Failed to get link info, error %d\n", err);
9097 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
9099 /* Set PHY if there is media, otherwise, turn off PHY */
9100 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
9101 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9102 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
9103 err = ice_init_phy_user_cfg(pi);
9105 netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
9111 err = ice_configure_phy(vsi);
9113 netdev_err(netdev, "Failed to set physical link up, error %d\n",
9118 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9119 ice_set_link(vsi, false);
9122 err = ice_vsi_open(vsi);
9124 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
9125 vsi->vsi_num, vsi->vsw->sw_id);
9127 /* Update existing tunnels information */
9128 udp_tunnel_get_rx_info(netdev);
9134 * ice_stop - Disables a network interface
9135 * @netdev: network interface device structure
9137 * The stop entry point is called when an interface is de-activated by the OS,
9138 * and the netdevice enters the DOWN state. The hardware is still under the
9139 * driver's control, but the netdev interface is disabled.
9141 * Returns success only - not allowed to fail
9143 int ice_stop(struct net_device *netdev)
9145 struct ice_netdev_priv *np = netdev_priv(netdev);
9146 struct ice_vsi *vsi = np->vsi;
9147 struct ice_pf *pf = vsi->back;
9149 if (ice_is_reset_in_progress(pf->state)) {
9150 netdev_err(netdev, "can't stop net device while reset is in progress");
9154 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
9155 int link_err = ice_force_phys_link_state(vsi, false);
9158 netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
9159 vsi->vsi_num, link_err);
9170 * ice_features_check - Validate encapsulated packet conforms to limits
9172 * @netdev: This port's netdev
9173 * @features: Offload features that the stack believes apply
9175 static netdev_features_t
9176 ice_features_check(struct sk_buff *skb,
9177 struct net_device __always_unused *netdev,
9178 netdev_features_t features)
9180 bool gso = skb_is_gso(skb);
9183 /* No point in doing any of this if neither checksum nor GSO are
9184 * being requested for this frame. We can rule out both by just
9185 * checking for CHECKSUM_PARTIAL
9187 if (skb->ip_summed != CHECKSUM_PARTIAL)
9190 /* We cannot support GSO if the MSS is going to be less than
9191 * 64 bytes. If it is then we need to drop support for GSO.
9193 if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
9194 features &= ~NETIF_F_GSO_MASK;
9196 len = skb_network_offset(skb);
9197 if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
9198 goto out_rm_features;
9200 len = skb_network_header_len(skb);
9201 if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9202 goto out_rm_features;
9204 if (skb->encapsulation) {
9205 /* this must work for VXLAN frames AND IPIP/SIT frames, and in
9206 * the case of IPIP frames, the transport header pointer is
9207 * after the inner header! So check to make sure that this
9208 * is a GRE or UDP_TUNNEL frame before doing that math.
9210 if (gso && (skb_shinfo(skb)->gso_type &
9211 (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
9212 len = skb_inner_network_header(skb) -
9213 skb_transport_header(skb);
9214 if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
9215 goto out_rm_features;
9218 len = skb_inner_network_header_len(skb);
9219 if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9220 goto out_rm_features;
9225 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
9228 static const struct net_device_ops ice_netdev_safe_mode_ops = {
9229 .ndo_open = ice_open,
9230 .ndo_stop = ice_stop,
9231 .ndo_start_xmit = ice_start_xmit,
9232 .ndo_set_mac_address = ice_set_mac_address,
9233 .ndo_validate_addr = eth_validate_addr,
9234 .ndo_change_mtu = ice_change_mtu,
9235 .ndo_get_stats64 = ice_get_stats64,
9236 .ndo_tx_timeout = ice_tx_timeout,
9237 .ndo_bpf = ice_xdp_safe_mode,
9240 static const struct net_device_ops ice_netdev_ops = {
9241 .ndo_open = ice_open,
9242 .ndo_stop = ice_stop,
9243 .ndo_start_xmit = ice_start_xmit,
9244 .ndo_select_queue = ice_select_queue,
9245 .ndo_features_check = ice_features_check,
9246 .ndo_fix_features = ice_fix_features,
9247 .ndo_set_rx_mode = ice_set_rx_mode,
9248 .ndo_set_mac_address = ice_set_mac_address,
9249 .ndo_validate_addr = eth_validate_addr,
9250 .ndo_change_mtu = ice_change_mtu,
9251 .ndo_get_stats64 = ice_get_stats64,
9252 .ndo_set_tx_maxrate = ice_set_tx_maxrate,
9253 .ndo_eth_ioctl = ice_eth_ioctl,
9254 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
9255 .ndo_set_vf_mac = ice_set_vf_mac,
9256 .ndo_get_vf_config = ice_get_vf_cfg,
9257 .ndo_set_vf_trust = ice_set_vf_trust,
9258 .ndo_set_vf_vlan = ice_set_vf_port_vlan,
9259 .ndo_set_vf_link_state = ice_set_vf_link_state,
9260 .ndo_get_vf_stats = ice_get_vf_stats,
9261 .ndo_set_vf_rate = ice_set_vf_bw,
9262 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
9263 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
9264 .ndo_setup_tc = ice_setup_tc,
9265 .ndo_set_features = ice_set_features,
9266 .ndo_bridge_getlink = ice_bridge_getlink,
9267 .ndo_bridge_setlink = ice_bridge_setlink,
9268 .ndo_fdb_add = ice_fdb_add,
9269 .ndo_fdb_del = ice_fdb_del,
9270 #ifdef CONFIG_RFS_ACCEL
9271 .ndo_rx_flow_steer = ice_rx_flow_steer,
9273 .ndo_tx_timeout = ice_tx_timeout,
9275 .ndo_xdp_xmit = ice_xdp_xmit,
9276 .ndo_xsk_wakeup = ice_xsk_wakeup,