1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2018, Intel Corporation. */
7 #include <linux/types.h>
8 #include <linux/errno.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/firmware.h>
12 #include <linux/netdevice.h>
13 #include <linux/compiler.h>
14 #include <linux/etherdevice.h>
15 #include <linux/skbuff.h>
16 #include <linux/cpumask.h>
17 #include <linux/rtnetlink.h>
18 #include <linux/if_vlan.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/pci.h>
21 #include <linux/workqueue.h>
22 #include <linux/wait.h>
23 #include <linux/interrupt.h>
24 #include <linux/ethtool.h>
25 #include <linux/timer.h>
26 #include <linux/delay.h>
27 #include <linux/bitmap.h>
28 #include <linux/log2.h>
30 #include <linux/sctp.h>
31 #include <linux/ipv6.h>
32 #include <linux/pkt_sched.h>
33 #include <linux/if_bridge.h>
34 #include <linux/ctype.h>
35 #include <linux/bpf.h>
36 #include <linux/btf.h>
37 #include <linux/auxiliary_bus.h>
38 #include <linux/avf/virtchnl.h>
39 #include <linux/cpu_rmap.h>
40 #include <linux/dim.h>
41 #include <linux/gnss.h>
42 #include <net/pkt_cls.h>
43 #include <net/pkt_sched.h>
44 #include <net/tc_act/tc_mirred.h>
45 #include <net/tc_act/tc_gact.h>
47 #include <net/devlink.h>
49 #include <net/xdp_sock.h>
50 #include <net/xdp_sock_drv.h>
51 #include <net/geneve.h>
53 #include <net/udp_tunnel.h>
54 #include <net/vxlan.h>
56 #include <linux/ppp_defs.h>
57 #include "ice_devids.h"
61 #include "ice_switch.h"
62 #include "ice_common.h"
64 #include "ice_sched.h"
65 #include "ice_idc_int.h"
66 #include "ice_sriov.h"
67 #include "ice_vf_mbx.h"
73 #include "ice_eswitch.h"
75 #include "ice_vsi_vlan_ops.h"
80 #define ICE_REQ_DESC_MULTIPLE 32
81 #define ICE_MIN_NUM_DESC 64
82 #define ICE_MAX_NUM_DESC 8160
83 #define ICE_DFLT_MIN_RX_DESC 512
84 #define ICE_DFLT_NUM_TX_DESC 256
85 #define ICE_DFLT_NUM_RX_DESC 2048
87 #define ICE_DFLT_TRAFFIC_CLASS BIT(0)
88 #define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
89 #define ICE_AQ_LEN 192
90 #define ICE_MBXSQ_LEN 64
91 #define ICE_SBQ_LEN 64
92 #define ICE_MIN_LAN_TXRX_MSIX 1
93 #define ICE_MIN_LAN_OICR_MSIX 1
94 #define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX)
95 #define ICE_FDIR_MSIX 2
96 #define ICE_RDMA_NUM_AEQ_MSIX 4
97 #define ICE_MIN_RDMA_MSIX 2
98 #define ICE_ESWITCH_MSIX 1
99 #define ICE_NO_VSI 0xffff
100 #define ICE_VSI_MAP_CONTIG 0
101 #define ICE_VSI_MAP_SCATTER 1
102 #define ICE_MAX_SCATTER_TXQS 16
103 #define ICE_MAX_SCATTER_RXQS 16
104 #define ICE_Q_WAIT_RETRY_LIMIT 10
105 #define ICE_Q_WAIT_MAX_RETRY (5 * ICE_Q_WAIT_RETRY_LIMIT)
106 #define ICE_MAX_LG_RSS_QS 256
107 #define ICE_INVAL_Q_INDEX 0xffff
109 #define ICE_MAX_RXQS_PER_TC 256 /* Used when setting VSI context per TC Rx queues */
111 #define ICE_CHNL_START_TC 1
113 #define ICE_MAX_RESET_WAIT 20
115 #define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4)
117 #define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
119 #define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - ICE_ETH_PKT_HDR_PAD)
121 #define ICE_MAX_TSO_SIZE 131072
123 #define ICE_UP_TABLE_TRANSLATE(val, i) \
124 (((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \
125 ICE_AQ_VSI_UP_TABLE_UP##i##_M)
127 #define ICE_TX_DESC(R, i) (&(((struct ice_tx_desc *)((R)->desc))[i]))
128 #define ICE_RX_DESC(R, i) (&(((union ice_32b_rx_flex_desc *)((R)->desc))[i]))
129 #define ICE_TX_CTX_DESC(R, i) (&(((struct ice_tx_ctx_desc *)((R)->desc))[i]))
130 #define ICE_TX_FDIRDESC(R, i) (&(((struct ice_fltr_desc *)((R)->desc))[i]))
132 /* Minimum BW limit is 500 Kbps for any scheduler node */
133 #define ICE_MIN_BW_LIMIT 500
134 /* User can specify BW in either Kbit/Mbit/Gbit and OS converts it in bytes.
135 * use it to convert user specified BW limit into Kbps
137 #define ICE_BW_KBPS_DIVISOR 125
139 /* Default recipes have priority 4 and below, hence priority values between 5..7
140 * can be used as filter priority for advanced switch filter (advanced switch
141 * filters need new recipe to be created for specified extraction sequence
142 * because default recipe extraction sequence does not represent custom
145 #define ICE_SWITCH_FLTR_PRIO_QUEUE 7
146 /* prio 6 is reserved for future use (e.g. switch filter with L3 fields +
147 * (Optional: IP TOS/TTL) + L4 fields + (optionally: TCP fields such as
150 #define ICE_SWITCH_FLTR_PRIO_RSVD 6
151 #define ICE_SWITCH_FLTR_PRIO_VSI 5
152 #define ICE_SWITCH_FLTR_PRIO_QGRP ICE_SWITCH_FLTR_PRIO_VSI
154 /* Macro for each VSI in a PF */
155 #define ice_for_each_vsi(pf, i) \
156 for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++)
158 /* Macros for each Tx/Xdp/Rx ring in a VSI */
159 #define ice_for_each_txq(vsi, i) \
160 for ((i) = 0; (i) < (vsi)->num_txq; (i)++)
162 #define ice_for_each_xdp_txq(vsi, i) \
163 for ((i) = 0; (i) < (vsi)->num_xdp_txq; (i)++)
165 #define ice_for_each_rxq(vsi, i) \
166 for ((i) = 0; (i) < (vsi)->num_rxq; (i)++)
168 /* Macros for each allocated Tx/Rx ring whether used or not in a VSI */
169 #define ice_for_each_alloc_txq(vsi, i) \
170 for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++)
172 #define ice_for_each_alloc_rxq(vsi, i) \
173 for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++)
175 #define ice_for_each_q_vector(vsi, i) \
176 for ((i) = 0; (i) < (vsi)->num_q_vectors; (i)++)
178 #define ice_for_each_chnl_tc(i) \
179 for ((i) = ICE_CHNL_START_TC; (i) < ICE_CHNL_MAX_TC; (i)++)
181 #define ICE_UCAST_PROMISC_BITS (ICE_PROMISC_UCAST_TX | ICE_PROMISC_UCAST_RX)
183 #define ICE_UCAST_VLAN_PROMISC_BITS (ICE_PROMISC_UCAST_TX | \
184 ICE_PROMISC_UCAST_RX | \
185 ICE_PROMISC_VLAN_TX | \
188 #define ICE_MCAST_PROMISC_BITS (ICE_PROMISC_MCAST_TX | ICE_PROMISC_MCAST_RX)
190 #define ICE_MCAST_VLAN_PROMISC_BITS (ICE_PROMISC_MCAST_TX | \
191 ICE_PROMISC_MCAST_RX | \
192 ICE_PROMISC_VLAN_TX | \
195 #define ice_pf_to_dev(pf) (&((pf)->pdev->dev))
205 DECLARE_STATIC_KEY_FALSE(ice_xdp_locking_key);
208 struct list_head list;
216 struct ice_aqc_vsi_props info;
219 atomic_t num_sb_fltr;
220 struct ice_vsi *ch_vsi;
223 struct ice_txq_meta {
224 u32 q_teid; /* Tx-scheduler element identifier */
225 u16 q_id; /* Entry in VSI's txq_map bitmap */
226 u16 q_handle; /* Relative index of Tx queue within TC */
227 u16 vsi_idx; /* VSI index that Tx queue belongs to */
228 u8 tc; /* TC number that Tx queue belongs to */
239 u8 numtc; /* Total number of enabled TCs */
240 u16 ena_tc; /* Tx map */
241 struct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS];
245 struct mutex *qs_mutex; /* will be assigned to &pf->avail_q_mutex */
246 unsigned long *pf_map;
247 unsigned long pf_map_size;
248 unsigned int q_count;
249 unsigned int scatter_count;
257 u16 sw_id; /* switch ID for this switch */
258 u16 bridge_mode; /* VEB/VEPA/Port Virtualizer */
265 ICE_PREPARED_FOR_RESET, /* set by driver when prepared */
266 ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */
267 ICE_PFR_REQ, /* set by driver */
268 ICE_CORER_REQ, /* set by driver */
269 ICE_GLOBR_REQ, /* set by driver */
270 ICE_CORER_RECV, /* set by OICR handler */
271 ICE_GLOBR_RECV, /* set by OICR handler */
272 ICE_EMPR_RECV, /* set by OICR handler */
273 ICE_SUSPENDED, /* set on module remove path */
274 ICE_RESET_FAILED, /* set by reset/rebuild */
275 /* When checking for the PF to be in a nominal operating state, the
276 * bits that are grouped at the beginning of the list need to be
277 * checked. Bits occurring before ICE_STATE_NOMINAL_CHECK_BITS will
278 * be checked. If you need to add a bit into consideration for nominal
279 * operating state, it must be added before
280 * ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position
281 * without appropriate consideration.
283 ICE_STATE_NOMINAL_CHECK_BITS,
284 ICE_ADMINQ_EVENT_PENDING,
285 ICE_MAILBOXQ_EVENT_PENDING,
286 ICE_SIDEBANDQ_EVENT_PENDING,
287 ICE_MDD_EVENT_PENDING,
288 ICE_VFLR_EVENT_PENDING,
289 ICE_FLTR_OVERFLOW_PROMISC,
295 ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */
296 ICE_MDD_VF_PRINT_PENDING, /* set when MDD event handle */
297 ICE_VF_RESETS_DISABLED, /* disable resets during ice_remove */
298 ICE_LINK_DEFAULT_OVERRIDE_PENDING,
299 ICE_PHY_INIT_COMPLETE,
300 ICE_FD_VF_FLUSH_CTX, /* set at FD Rx IRQ or timeout */
302 ICE_STATE_NBITS /* must be last */
307 ICE_VSI_NEEDS_RESTART,
308 ICE_VSI_NETDEV_ALLOCD,
309 ICE_VSI_NETDEV_REGISTERED,
310 ICE_VSI_UMAC_FLTR_CHANGED,
311 ICE_VSI_MMAC_FLTR_CHANGED,
312 ICE_VSI_PROMISC_CHANGED,
313 ICE_VSI_STATE_NBITS /* must be last */
316 struct ice_vsi_stats {
317 struct ice_ring_stats **tx_ring_stats; /* Tx ring stats array */
318 struct ice_ring_stats **rx_ring_stats; /* Rx ring stats array */
321 /* struct that defines a VSI, associated with a dev */
323 struct net_device *netdev;
324 struct ice_sw *vsw; /* switch this VSI is on */
325 struct ice_pf *back; /* back pointer to PF */
326 struct ice_port_info *port_info; /* back pointer to port_info */
327 struct ice_rx_ring **rx_rings; /* Rx ring array */
328 struct ice_tx_ring **tx_rings; /* Tx ring array */
329 struct ice_q_vector **q_vectors; /* q_vector array */
331 irqreturn_t (*irq_handler)(int irq, void *data);
334 DECLARE_BITMAP(state, ICE_VSI_STATE_NBITS);
335 unsigned int current_netdev_flags;
341 enum ice_vsi_type type;
342 u16 vsi_num; /* HW (absolute) index of this VSI */
343 u16 idx; /* software index in pf->vsi[] */
345 struct ice_vf *vf; /* VF associated with this VSI */
351 u16 rss_table_size; /* HW RSS table size */
352 u16 rss_size; /* Allocated RSS queues */
353 u8 *rss_hkey_user; /* User configured hash keys */
354 u8 *rss_lut_user; /* User configured lookup table entries */
355 u8 rss_lut_type; /* used to configure Get/Set RSS LUT AQ call */
357 /* aRFS members only allocated for the PF VSI */
358 #define ICE_MAX_ARFS_LIST 1024
359 #define ICE_ARFS_LST_MASK (ICE_MAX_ARFS_LIST - 1)
360 struct hlist_head *arfs_fltr_list;
361 struct ice_arfs_active_fltr_cntrs *arfs_fltr_cntrs;
362 spinlock_t arfs_lock; /* protects aRFS hash table and filter state */
363 atomic_t *arfs_last_fltr_id;
368 struct ice_aqc_vsi_props info; /* VSI properties */
371 struct rtnl_link_stats64 net_stats;
372 struct rtnl_link_stats64 net_stats_prev;
373 struct ice_eth_stats eth_stats;
374 struct ice_eth_stats eth_stats_prev;
376 struct list_head tmp_sync_list; /* MAC filters to be synced */
377 struct list_head tmp_unsync_list; /* MAC filters to be unsynced */
380 u8 current_isup:1; /* Sync 'link up' logging */
381 u8 stat_offsets_loaded:1;
382 struct ice_vsi_vlan_ops inner_vlan_ops;
383 struct ice_vsi_vlan_ops outer_vlan_ops;
386 /* queue information */
387 u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
388 u8 rx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
389 u16 *txq_map; /* index in pf->avail_txqs */
390 u16 *rxq_map; /* index in pf->avail_rxqs */
391 u16 alloc_txq; /* Allocated Tx queues */
392 u16 num_txq; /* Used Tx queues */
393 u16 alloc_rxq; /* Allocated Rx queues */
394 u16 num_rxq; /* Used Rx queues */
395 u16 req_txq; /* User requested Tx queues */
396 u16 req_rxq; /* User requested Rx queues */
399 u16 qset_handle[ICE_MAX_TRAFFIC_CLASS];
400 struct ice_tc_cfg tc_cfg;
401 struct bpf_prog *xdp_prog;
402 struct ice_tx_ring **xdp_rings; /* XDP ring array */
403 unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
404 u16 num_xdp_txq; /* Used XDP queues */
405 u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
407 struct net_device **target_netdevs;
409 struct tc_mqprio_qopt_offload mqprio_qopt; /* queue parameters */
411 /* Channel Specific Fields */
412 struct ice_vsi *tc_map_vsi[ICE_CHNL_MAX_TC];
414 u16 next_base_q; /* next queue to be used for channel setup */
415 struct list_head ch_list;
420 /* store away rss size info before configuring ADQ channels so that,
421 * it can be used after tc-qdisc delete, to get back RSS setting as
425 /* this keeps tracks of all enabled TC with and without DCB
426 * and inclusive of ADQ, vsi->mqprio_opt keeps track of queue
432 /* store away TC info, to be used for rebuild logic */
436 struct ice_channel *ch;
438 /* setup back reference, to which aggregator node this VSI
441 struct ice_agg_node *agg_node;
442 } ____cacheline_internodealigned_in_smp;
444 /* struct that defines an interrupt vector */
445 struct ice_q_vector {
448 u16 v_idx; /* index in the vsi->q_vector array. */
450 u8 num_ring_rx; /* total number of Rx rings in vector */
451 u8 num_ring_tx; /* total number of Tx rings in vector */
452 u8 wb_on_itr:1; /* if true, WB on ITR is enabled */
453 /* in usecs, need to use ice_intrl_to_usecs_reg() before writing this
454 * value to the device
458 struct napi_struct napi;
460 struct ice_ring_container rx;
461 struct ice_ring_container tx;
463 cpumask_t affinity_mask;
464 struct irq_affinity_notify affinity_notify;
466 struct ice_channel *ch;
468 char name[ICE_INT_NAME_STR_LEN];
470 u16 total_events; /* net_dim(): number of interrupts processed */
472 } ____cacheline_internodealigned_in_smp;
479 ICE_FLAG_SRIOV_CAPABLE,
480 ICE_FLAG_DCB_CAPABLE,
483 ICE_FLAG_PTP_SUPPORTED, /* PTP is supported by NVM */
484 ICE_FLAG_PTP, /* PTP is enabled by software */
485 ICE_FLAG_ADV_FEATURES,
486 ICE_FLAG_TC_MQPRIO, /* support for Multi queue TC */
488 ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
489 ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA,
491 ICE_FLAG_FW_LLDP_AGENT,
492 ICE_FLAG_MOD_POWER_UNSUPPORTED,
493 ICE_FLAG_PHY_FW_LOAD_FAILED,
494 ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
496 ICE_FLAG_VF_TRUE_PROMISC_ENA,
497 ICE_FLAG_MDD_AUTO_RESET_VF,
498 ICE_FLAG_VF_VLAN_PRUNING,
499 ICE_FLAG_LINK_LENIENT_MODE_ENA,
500 ICE_FLAG_PLUG_AUX_DEV,
501 ICE_FLAG_UNPLUG_AUX_DEV,
502 ICE_FLAG_MTU_CHANGED,
503 ICE_FLAG_GNSS, /* GNSS successfully initialized */
504 ICE_PF_FLAGS_NBITS /* must be last */
507 struct ice_switchdev_info {
508 struct ice_vsi *control_vsi;
509 struct ice_vsi *uplink_vsi;
513 struct ice_agg_node {
515 #define ICE_MAX_VSIS_IN_AGG_NODE 64
521 struct pci_dev *pdev;
523 struct devlink_region *nvm_region;
524 struct devlink_region *sram_region;
525 struct devlink_region *devcaps_region;
527 /* devlink port data */
528 struct devlink_port devlink_port;
530 /* OS reserved IRQ details */
531 struct msix_entry *msix_entries;
532 struct ice_irq_tracker irq_tracker;
533 /* First MSIX vector used by SR-IOV VFs. Calculated by subtracting the
534 * number of MSIX vectors needed for all SR-IOV VFs from the number of
535 * MSIX vectors allowed on this PF.
537 u16 sriov_base_vector;
539 u16 ctrl_vsi_idx; /* control VSI index in pf->vsi array */
541 struct ice_vsi **vsi; /* VSIs created by the driver */
542 struct ice_vsi_stats **vsi_stats;
543 struct ice_sw *first_sw; /* first switch created by firmware */
544 u16 eswitch_mode; /* current mode of eswitch */
546 DECLARE_BITMAP(features, ICE_F_MAX);
547 DECLARE_BITMAP(state, ICE_STATE_NBITS);
548 DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS);
549 unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */
550 unsigned long *avail_rxqs; /* bitmap to track PF Rx queue usage */
551 unsigned long serv_tmr_period;
552 unsigned long serv_tmr_prev;
553 struct timer_list serv_tmr;
554 struct work_struct serv_task;
555 struct mutex avail_q_mutex; /* protects access to avail_[rx|tx]qs */
556 struct mutex sw_mutex; /* lock for protecting VSI alloc flow */
557 struct mutex tc_mutex; /* lock to protect TC changes */
558 struct mutex adev_mutex; /* lock to protect aux device access */
561 struct gnss_serial *gnss_serial;
562 struct gnss_device *gnss_dev;
563 u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */
564 u16 rdma_base_vector;
566 /* spinlock to protect the AdminQ wait list */
567 spinlock_t aq_wait_lock;
568 struct hlist_head aq_wait_list;
569 wait_queue_head_t aq_wait_queue;
570 bool fw_emp_reset_disabled;
572 wait_queue_head_t reset_wait_queue;
574 u32 hw_csum_rx_error;
576 struct msi_map oicr_irq; /* Other interrupt cause MSIX vector */
577 u16 max_pf_txqs; /* Total Tx queues PF wide */
578 u16 max_pf_rxqs; /* Total Rx queues PF wide */
579 u16 num_lan_msix; /* Total MSIX vectors for base driver */
580 u16 num_lan_tx; /* num LAN Tx queues setup */
581 u16 num_lan_rx; /* num LAN Rx queues setup */
582 u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */
584 u16 corer_count; /* Core reset count */
585 u16 globr_count; /* Global reset count */
586 u16 empr_count; /* EMP reset count */
587 u16 pfr_count; /* PF reset count */
589 u8 wol_ena : 1; /* software state of WoL */
590 u32 wakeup_reason; /* last wakeup reason */
591 struct ice_hw_port_stats stats;
592 struct ice_hw_port_stats stats_prev;
594 u8 stat_prev_loaded:1; /* has previous stats been loaded */
597 u32 tx_timeout_count;
598 unsigned long tx_timeout_last_recovery;
599 u32 tx_timeout_recovery_level;
600 char int_name[ICE_INT_NAME_STR_LEN];
601 struct auxiliary_device *adev;
604 /* count of tc_flower filters specific to channel (aka where filter
605 * action is "hw_tc <tc_num>")
607 u16 num_dmac_chnl_fltrs;
608 struct hlist_head tc_flower_fltr_list;
610 u64 supported_rxdids;
612 __le64 nvm_phy_type_lo; /* NVM PHY type low */
613 __le64 nvm_phy_type_hi; /* NVM PHY type high */
614 struct ice_link_default_override_tlv link_dflt_override;
615 struct ice_lag *lag; /* Link Aggregation information */
617 struct ice_switchdev_info switchdev;
619 #define ICE_INVALID_AGG_NODE_ID 0
620 #define ICE_PF_AGG_NODE_ID_START 1
621 #define ICE_MAX_PF_AGG_NODES 32
622 struct ice_agg_node pf_agg_node[ICE_MAX_PF_AGG_NODES];
623 #define ICE_VF_AGG_NODE_ID_START 65
624 #define ICE_MAX_VF_AGG_NODES 32
625 struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES];
628 struct ice_netdev_priv {
630 struct ice_repr *repr;
631 /* indirect block callbacks on registered higher level devices
632 * (e.g. tunnel devices)
634 * tc_indr_block_cb_priv_list is used to look up indirect callback
637 struct list_head tc_indr_block_priv_list;
641 * ice_vector_ch_enabled
642 * @qv: pointer to q_vector, can be NULL
644 * This function returns true if vector is channel enabled otherwise false
646 static inline bool ice_vector_ch_enabled(struct ice_q_vector *qv)
648 return !!qv->ch; /* Enable it to run with TC */
652 * ice_irq_dynamic_ena - Enable default interrupt generation settings
653 * @hw: pointer to HW struct
654 * @vsi: pointer to VSI struct, can be NULL
655 * @q_vector: pointer to q_vector, can be NULL
658 ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
659 struct ice_q_vector *q_vector)
661 u32 vector = (vsi && q_vector) ? q_vector->reg_idx :
662 ((struct ice_pf *)hw->back)->oicr_irq.index;
663 int itr = ICE_ITR_NONE;
666 /* clear the PBA here, as this function is meant to clean out all
667 * previous interrupts and enable the interrupt
669 val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
670 (itr << GLINT_DYN_CTL_ITR_INDX_S);
672 if (test_bit(ICE_VSI_DOWN, vsi->state))
674 wr32(hw, GLINT_DYN_CTL(vector), val);
678 * ice_netdev_to_pf - Retrieve the PF struct associated with a netdev
679 * @netdev: pointer to the netdev struct
681 static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev)
683 struct ice_netdev_priv *np = netdev_priv(netdev);
685 return np->vsi->back;
688 static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi)
690 return !!READ_ONCE(vsi->xdp_prog);
693 static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
695 ring->flags |= ICE_TX_FLAGS_RING_XDP;
699 * ice_xsk_pool - get XSK buffer pool bound to a ring
700 * @ring: Rx ring to use
702 * Returns a pointer to xsk_buff_pool structure if there is a buffer pool
703 * present, NULL otherwise.
705 static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
707 struct ice_vsi *vsi = ring->vsi;
708 u16 qid = ring->q_index;
710 if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
713 return xsk_get_pool_from_qid(vsi->netdev, qid);
717 * ice_tx_xsk_pool - assign XSK buff pool to XDP ring
718 * @vsi: pointer to VSI
719 * @qid: index of a queue to look at XSK buff pool presence
721 * Sets XSK buff pool pointer on XDP ring.
723 * XDP ring is picked from Rx ring, whereas Rx ring is picked based on provided
724 * queue id. Reason for doing so is that queue vectors might have assigned more
725 * than one XDP ring, e.g. when user reduced the queue count on netdev; Rx ring
726 * carries a pointer to one of these XDP rings for its own purposes, such as
727 * handling XDP_TX action, therefore we can piggyback here on the
728 * rx_ring->xdp_ring assignment that was done during XDP rings initialization.
730 static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
732 struct ice_tx_ring *ring;
734 ring = vsi->rx_rings[qid]->xdp_ring;
738 if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) {
739 ring->xsk_pool = NULL;
743 ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid);
747 * ice_get_main_vsi - Get the PF VSI
750 * returns pf->vsi[0], which by definition is the PF VSI
752 static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf)
761 * ice_get_netdev_priv_vsi - return VSI associated with netdev priv.
762 * @np: private netdev structure
764 static inline struct ice_vsi *ice_get_netdev_priv_vsi(struct ice_netdev_priv *np)
766 /* In case of port representor return source port VSI. */
768 return np->repr->src_vsi;
774 * ice_get_ctrl_vsi - Get the control VSI
777 static inline struct ice_vsi *ice_get_ctrl_vsi(struct ice_pf *pf)
779 /* if pf->ctrl_vsi_idx is ICE_NO_VSI, control VSI was not set up */
780 if (!pf->vsi || pf->ctrl_vsi_idx == ICE_NO_VSI)
783 return pf->vsi[pf->ctrl_vsi_idx];
787 * ice_find_vsi - Find the VSI from VSI ID
788 * @pf: The PF pointer to search in
789 * @vsi_num: The VSI ID to search for
791 static inline struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num)
795 ice_for_each_vsi(pf, i)
796 if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num)
802 * ice_is_switchdev_running - check if switchdev is configured
803 * @pf: pointer to PF structure
805 * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV
806 * and switchdev is configured, false otherwise.
808 static inline bool ice_is_switchdev_running(struct ice_pf *pf)
810 return pf->switchdev.is_running;
814 * ice_set_sriov_cap - enable SRIOV in PF flags
817 static inline void ice_set_sriov_cap(struct ice_pf *pf)
819 if (pf->hw.func_caps.common_cap.sr_iov_1_1)
820 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
824 * ice_clear_sriov_cap - disable SRIOV in PF flags
827 static inline void ice_clear_sriov_cap(struct ice_pf *pf)
829 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
832 #define ICE_FD_STAT_CTR_BLOCK_COUNT 256
833 #define ICE_FD_STAT_PF_IDX(base_idx) \
834 ((base_idx) * ICE_FD_STAT_CTR_BLOCK_COUNT)
835 #define ICE_FD_SB_STAT_IDX(base_idx) ICE_FD_STAT_PF_IDX(base_idx)
836 #define ICE_FD_STAT_CH 1
837 #define ICE_FD_CH_STAT_IDX(base_idx) \
838 (ICE_FD_STAT_PF_IDX(base_idx) + ICE_FD_STAT_CH)
841 * ice_is_adq_active - any active ADQs
844 * This function returns true if there are any ADQs configured (which is
845 * determined by looking at VSI type (which should be VSI_PF), numtc, and
846 * TC_MQPRIO flag) otherwise return false
848 static inline bool ice_is_adq_active(struct ice_pf *pf)
852 vsi = ice_get_main_vsi(pf);
856 /* is ADQ configured */
857 if (vsi->tc_cfg.numtc > ICE_CHNL_START_TC &&
858 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
864 bool netif_is_ice(struct net_device *dev);
865 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
866 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi);
867 int ice_vsi_open_ctrl(struct ice_vsi *vsi);
868 int ice_vsi_open(struct ice_vsi *vsi);
869 void ice_set_ethtool_ops(struct net_device *netdev);
870 void ice_set_ethtool_repr_ops(struct net_device *netdev);
871 void ice_set_ethtool_safe_mode_ops(struct net_device *netdev);
872 u16 ice_get_avail_txq_count(struct ice_pf *pf);
873 u16 ice_get_avail_rxq_count(struct ice_pf *pf);
874 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked);
875 void ice_update_vsi_stats(struct ice_vsi *vsi);
876 void ice_update_pf_stats(struct ice_pf *pf);
878 ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
879 struct ice_q_stats stats, u64 *pkts, u64 *bytes);
880 int ice_up(struct ice_vsi *vsi);
881 int ice_down(struct ice_vsi *vsi);
882 int ice_down_up(struct ice_vsi *vsi);
883 int ice_vsi_cfg_lan(struct ice_vsi *vsi);
884 struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
885 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
886 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
887 int ice_destroy_xdp_rings(struct ice_vsi *vsi);
889 ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
891 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size);
892 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size);
893 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed);
894 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed);
895 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
896 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
897 void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
898 int ice_plug_aux_dev(struct ice_pf *pf);
899 void ice_unplug_aux_dev(struct ice_pf *pf);
900 int ice_init_rdma(struct ice_pf *pf);
901 void ice_deinit_rdma(struct ice_pf *pf);
902 const char *ice_aq_str(enum ice_aq_err aq_err);
903 bool ice_is_wol_supported(struct ice_hw *hw);
904 void ice_fdir_del_all_fltrs(struct ice_vsi *vsi);
906 ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
908 void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena);
909 int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd);
910 int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd);
911 int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd);
913 ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd,
915 void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx);
916 void ice_fdir_release_flows(struct ice_hw *hw);
917 void ice_fdir_replay_flows(struct ice_hw *hw);
918 void ice_fdir_replay_fltrs(struct ice_pf *pf);
919 int ice_fdir_create_dflt_rules(struct ice_pf *pf);
920 int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
921 struct ice_rq_event_info *event);
922 int ice_open(struct net_device *netdev);
923 int ice_open_internal(struct net_device *netdev);
924 int ice_stop(struct net_device *netdev);
925 void ice_service_task_schedule(struct ice_pf *pf);
926 int ice_load(struct ice_pf *pf);
927 void ice_unload(struct ice_pf *pf);
930 * ice_set_rdma_cap - enable RDMA support
933 static inline void ice_set_rdma_cap(struct ice_pf *pf)
935 if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) {
936 set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
937 set_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags);
942 * ice_clear_rdma_cap - disable RDMA support
945 static inline void ice_clear_rdma_cap(struct ice_pf *pf)
947 /* defer unplug to service task to avoid RTNL lock and
948 * clear PLUG bit so that pending plugs don't interfere
950 clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags);
951 set_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags);
952 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);