1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
4 #include "ice_common.h"
7 #include "ice_adminq_cmd.h"
10 #define ICE_PF_RESET_WAIT_COUNT 300
13 * ice_set_mac_type - Sets MAC type
14 * @hw: pointer to the HW structure
16 * This function sets the MAC type of the adapter based on the
17 * vendor ID and device ID stored in the HW structure.
19 static enum ice_status ice_set_mac_type(struct ice_hw *hw)
21 if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
22 return ICE_ERR_DEVICE_NOT_SUPPORTED;
24 switch (hw->device_id) {
25 case ICE_DEV_ID_E810C_BACKPLANE:
26 case ICE_DEV_ID_E810C_QSFP:
27 case ICE_DEV_ID_E810C_SFP:
28 case ICE_DEV_ID_E810_XXV_SFP:
29 hw->mac_type = ICE_MAC_E810;
31 case ICE_DEV_ID_E823C_10G_BASE_T:
32 case ICE_DEV_ID_E823C_BACKPLANE:
33 case ICE_DEV_ID_E823C_QSFP:
34 case ICE_DEV_ID_E823C_SFP:
35 case ICE_DEV_ID_E823C_SGMII:
36 case ICE_DEV_ID_E822C_10G_BASE_T:
37 case ICE_DEV_ID_E822C_BACKPLANE:
38 case ICE_DEV_ID_E822C_QSFP:
39 case ICE_DEV_ID_E822C_SFP:
40 case ICE_DEV_ID_E822C_SGMII:
41 case ICE_DEV_ID_E822L_10G_BASE_T:
42 case ICE_DEV_ID_E822L_BACKPLANE:
43 case ICE_DEV_ID_E822L_SFP:
44 case ICE_DEV_ID_E822L_SGMII:
45 case ICE_DEV_ID_E823L_10G_BASE_T:
46 case ICE_DEV_ID_E823L_1GBE:
47 case ICE_DEV_ID_E823L_BACKPLANE:
48 case ICE_DEV_ID_E823L_QSFP:
49 case ICE_DEV_ID_E823L_SFP:
50 hw->mac_type = ICE_MAC_GENERIC;
53 hw->mac_type = ICE_MAC_UNKNOWN;
57 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
63 * @hw: pointer to the hardware structure
65 * returns true if the device is E810 based, false if not.
67 bool ice_is_e810(struct ice_hw *hw)
69 return hw->mac_type == ICE_MAC_E810;
73 * ice_clear_pf_cfg - Clear PF configuration
74 * @hw: pointer to the hardware structure
76 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
77 * configuration, flow director filters, etc.).
79 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
81 struct ice_aq_desc desc;
83 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
85 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
89 * ice_aq_manage_mac_read - manage MAC address read command
90 * @hw: pointer to the HW struct
91 * @buf: a virtual buffer to hold the manage MAC read response
92 * @buf_size: Size of the virtual buffer
93 * @cd: pointer to command details structure or NULL
95 * This function is used to return per PF station MAC address (0x0107).
96 * NOTE: Upon successful completion of this command, MAC address information
97 * is returned in user specified buffer. Please interpret user specified
98 * buffer as "manage_mac_read" response.
99 * Response such as various MAC addresses are stored in HW struct (port.mac)
100 * ice_discover_dev_caps is expected to be called before this function is
103 static enum ice_status
104 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
105 struct ice_sq_cd *cd)
107 struct ice_aqc_manage_mac_read_resp *resp;
108 struct ice_aqc_manage_mac_read *cmd;
109 struct ice_aq_desc desc;
110 enum ice_status status;
114 cmd = &desc.params.mac_read;
116 if (buf_size < sizeof(*resp))
117 return ICE_ERR_BUF_TOO_SHORT;
119 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
121 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
126 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
128 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
129 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
133 /* A single port can report up to two (LAN and WoL) addresses */
134 for (i = 0; i < cmd->num_addr; i++)
135 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
136 ether_addr_copy(hw->port_info->mac.lan_addr,
138 ether_addr_copy(hw->port_info->mac.perm_addr,
147 * ice_aq_get_phy_caps - returns PHY capabilities
148 * @pi: port information structure
149 * @qual_mods: report qualified modules
150 * @report_mode: report mode capabilities
151 * @pcaps: structure for PHY capabilities to be filled
152 * @cd: pointer to command details structure or NULL
154 * Returns the various PHY capabilities supported on the Port (0x0600)
157 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
158 struct ice_aqc_get_phy_caps_data *pcaps,
159 struct ice_sq_cd *cd)
161 struct ice_aqc_get_phy_caps *cmd;
162 u16 pcaps_size = sizeof(*pcaps);
163 struct ice_aq_desc desc;
164 enum ice_status status;
167 cmd = &desc.params.get_phy;
169 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
170 return ICE_ERR_PARAM;
173 if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
174 !ice_fw_supports_report_dflt_cfg(hw))
175 return ICE_ERR_PARAM;
177 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
180 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
182 cmd->param0 |= cpu_to_le16(report_mode);
183 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
185 ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
187 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
188 (unsigned long long)le64_to_cpu(pcaps->phy_type_low));
189 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
190 (unsigned long long)le64_to_cpu(pcaps->phy_type_high));
191 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps);
192 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
193 pcaps->low_power_ctrl_an);
194 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap);
195 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n",
197 ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n",
198 pcaps->link_fec_options);
199 ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n",
200 pcaps->module_compliance_enforcement);
201 ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n",
202 pcaps->extended_compliance_code);
203 ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n",
204 pcaps->module_type[0]);
205 ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n",
206 pcaps->module_type[1]);
207 ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
208 pcaps->module_type[2]);
210 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
211 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
212 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
213 memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
214 sizeof(pi->phy.link_info.module_type));
221 * ice_aq_get_link_topo_handle - get link topology node return status
222 * @pi: port information structure
223 * @node_type: requested node type
224 * @cd: pointer to command details structure or NULL
226 * Get link topology node return status for specified node type (0x06E0)
228 * Node type cage can be used to determine if cage is present. If AQC
229 * returns error (ENOENT), then no cage present. If no cage present, then
230 * connection type is backplane or BASE-T.
232 static enum ice_status
233 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
234 struct ice_sq_cd *cd)
236 struct ice_aqc_get_link_topo *cmd;
237 struct ice_aq_desc desc;
239 cmd = &desc.params.get_link_topo;
241 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
243 cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
244 ICE_AQC_LINK_TOPO_NODE_CTX_S);
247 cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
249 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
253 * ice_is_media_cage_present
254 * @pi: port information structure
256 * Returns true if media cage is present, else false. If no cage, then
257 * media type is backplane or BASE-T.
259 static bool ice_is_media_cage_present(struct ice_port_info *pi)
261 /* Node type cage can be used to determine if cage is present. If AQC
262 * returns error (ENOENT), then no cage present. If no cage present then
263 * connection type is backplane or BASE-T.
265 return !ice_aq_get_link_topo_handle(pi,
266 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
271 * ice_get_media_type - Gets media type
272 * @pi: port information structure
274 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
276 struct ice_link_status *hw_link_info;
279 return ICE_MEDIA_UNKNOWN;
281 hw_link_info = &pi->phy.link_info;
282 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
283 /* If more than one media type is selected, report unknown */
284 return ICE_MEDIA_UNKNOWN;
286 if (hw_link_info->phy_type_low) {
287 /* 1G SGMII is a special case where some DA cable PHYs
288 * may show this as an option when it really shouldn't
289 * be since SGMII is meant to be between a MAC and a PHY
290 * in a backplane. Try to detect this case and handle it
292 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
293 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
294 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
295 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
296 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
299 switch (hw_link_info->phy_type_low) {
300 case ICE_PHY_TYPE_LOW_1000BASE_SX:
301 case ICE_PHY_TYPE_LOW_1000BASE_LX:
302 case ICE_PHY_TYPE_LOW_10GBASE_SR:
303 case ICE_PHY_TYPE_LOW_10GBASE_LR:
304 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
305 case ICE_PHY_TYPE_LOW_25GBASE_SR:
306 case ICE_PHY_TYPE_LOW_25GBASE_LR:
307 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
308 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
309 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
310 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
311 case ICE_PHY_TYPE_LOW_50GBASE_SR:
312 case ICE_PHY_TYPE_LOW_50GBASE_FR:
313 case ICE_PHY_TYPE_LOW_50GBASE_LR:
314 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
315 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
316 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
317 case ICE_PHY_TYPE_LOW_100GBASE_DR:
318 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
319 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
320 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
321 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
322 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
323 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
324 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
325 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
326 return ICE_MEDIA_FIBER;
327 case ICE_PHY_TYPE_LOW_100BASE_TX:
328 case ICE_PHY_TYPE_LOW_1000BASE_T:
329 case ICE_PHY_TYPE_LOW_2500BASE_T:
330 case ICE_PHY_TYPE_LOW_5GBASE_T:
331 case ICE_PHY_TYPE_LOW_10GBASE_T:
332 case ICE_PHY_TYPE_LOW_25GBASE_T:
333 return ICE_MEDIA_BASET;
334 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
335 case ICE_PHY_TYPE_LOW_25GBASE_CR:
336 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
337 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
338 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
339 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
340 case ICE_PHY_TYPE_LOW_50GBASE_CP:
341 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
342 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
343 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
345 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
346 case ICE_PHY_TYPE_LOW_40G_XLAUI:
347 case ICE_PHY_TYPE_LOW_50G_LAUI2:
348 case ICE_PHY_TYPE_LOW_50G_AUI2:
349 case ICE_PHY_TYPE_LOW_50G_AUI1:
350 case ICE_PHY_TYPE_LOW_100G_AUI4:
351 case ICE_PHY_TYPE_LOW_100G_CAUI4:
352 if (ice_is_media_cage_present(pi))
355 case ICE_PHY_TYPE_LOW_1000BASE_KX:
356 case ICE_PHY_TYPE_LOW_2500BASE_KX:
357 case ICE_PHY_TYPE_LOW_2500BASE_X:
358 case ICE_PHY_TYPE_LOW_5GBASE_KR:
359 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
360 case ICE_PHY_TYPE_LOW_25GBASE_KR:
361 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
362 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
363 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
364 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
365 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
366 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
367 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
368 return ICE_MEDIA_BACKPLANE;
371 switch (hw_link_info->phy_type_high) {
372 case ICE_PHY_TYPE_HIGH_100G_AUI2:
373 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
374 if (ice_is_media_cage_present(pi))
377 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
378 return ICE_MEDIA_BACKPLANE;
379 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
380 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
381 return ICE_MEDIA_FIBER;
384 return ICE_MEDIA_UNKNOWN;
388 * ice_aq_get_link_info
389 * @pi: port information structure
390 * @ena_lse: enable/disable LinkStatusEvent reporting
391 * @link: pointer to link status structure - optional
392 * @cd: pointer to command details structure or NULL
394 * Get Link Status (0x607). Returns the link status of the adapter.
397 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
398 struct ice_link_status *link, struct ice_sq_cd *cd)
400 struct ice_aqc_get_link_status_data link_data = { 0 };
401 struct ice_aqc_get_link_status *resp;
402 struct ice_link_status *li_old, *li;
403 enum ice_media_type *hw_media_type;
404 struct ice_fc_info *hw_fc_info;
405 bool tx_pause, rx_pause;
406 struct ice_aq_desc desc;
407 enum ice_status status;
412 return ICE_ERR_PARAM;
414 li_old = &pi->phy.link_info_old;
415 hw_media_type = &pi->phy.media_type;
416 li = &pi->phy.link_info;
417 hw_fc_info = &pi->fc;
419 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
420 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
421 resp = &desc.params.get_link_status;
422 resp->cmd_flags = cpu_to_le16(cmd_flags);
423 resp->lport_num = pi->lport;
425 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
430 /* save off old link status information */
433 /* update current link status information */
434 li->link_speed = le16_to_cpu(link_data.link_speed);
435 li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
436 li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
437 *hw_media_type = ice_get_media_type(pi);
438 li->link_info = link_data.link_info;
439 li->link_cfg_err = link_data.link_cfg_err;
440 li->an_info = link_data.an_info;
441 li->ext_info = link_data.ext_info;
442 li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
443 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
444 li->topo_media_conflict = link_data.topo_media_conflict;
445 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
446 ICE_AQ_CFG_PACING_TYPE_M);
449 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
450 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
451 if (tx_pause && rx_pause)
452 hw_fc_info->current_mode = ICE_FC_FULL;
454 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
456 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
458 hw_fc_info->current_mode = ICE_FC_NONE;
460 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
462 ice_debug(hw, ICE_DBG_LINK, "get link info\n");
463 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed);
464 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
465 (unsigned long long)li->phy_type_low);
466 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
467 (unsigned long long)li->phy_type_high);
468 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
469 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
470 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err);
471 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
472 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
473 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
474 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena);
475 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n",
477 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing);
479 /* save link status information */
483 /* flag cleared so calling functions don't call AQ again */
484 pi->phy.get_link_info = false;
490 * ice_fill_tx_timer_and_fc_thresh
491 * @hw: pointer to the HW struct
492 * @cmd: pointer to MAC cfg structure
494 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
498 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
499 struct ice_aqc_set_mac_cfg *cmd)
501 u16 fc_thres_val, tx_timer_val;
504 /* We read back the transmit timer and FC threshold value of
505 * LFC. Thus, we will use index =
506 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
508 * Also, because we are operating on transmit timer and FC
509 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
511 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
513 /* Retrieve the transmit timer */
514 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
516 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
517 cmd->tx_tmr_value = cpu_to_le16(tx_timer_val);
519 /* Retrieve the FC threshold */
520 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
521 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
523 cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val);
528 * @hw: pointer to the HW struct
529 * @max_frame_size: Maximum Frame Size to be supported
530 * @cd: pointer to command details structure or NULL
532 * Set MAC configuration (0x0603)
535 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
537 struct ice_aqc_set_mac_cfg *cmd;
538 struct ice_aq_desc desc;
540 cmd = &desc.params.set_mac_cfg;
542 if (max_frame_size == 0)
543 return ICE_ERR_PARAM;
545 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
547 cmd->max_frame_size = cpu_to_le16(max_frame_size);
549 ice_fill_tx_timer_and_fc_thresh(hw, cmd);
551 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
555 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
556 * @hw: pointer to the HW struct
558 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
560 struct ice_switch_info *sw;
561 enum ice_status status;
563 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
564 sizeof(*hw->switch_info), GFP_KERNEL);
565 sw = hw->switch_info;
568 return ICE_ERR_NO_MEMORY;
570 INIT_LIST_HEAD(&sw->vsi_list_map_head);
572 status = ice_init_def_sw_recp(hw);
574 devm_kfree(ice_hw_to_dev(hw), hw->switch_info);
581 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
582 * @hw: pointer to the HW struct
584 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
586 struct ice_switch_info *sw = hw->switch_info;
587 struct ice_vsi_list_map_info *v_pos_map;
588 struct ice_vsi_list_map_info *v_tmp_map;
589 struct ice_sw_recipe *recps;
592 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
594 list_del(&v_pos_map->list_entry);
595 devm_kfree(ice_hw_to_dev(hw), v_pos_map);
597 recps = hw->switch_info->recp_list;
598 for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
599 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
601 recps[i].root_rid = i;
602 mutex_destroy(&recps[i].filt_rule_lock);
603 list_for_each_entry_safe(lst_itr, tmp_entry,
604 &recps[i].filt_rules, list_entry) {
605 list_del(&lst_itr->list_entry);
606 devm_kfree(ice_hw_to_dev(hw), lst_itr);
609 ice_rm_all_sw_replay_rule_info(hw);
610 devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
611 devm_kfree(ice_hw_to_dev(hw), sw);
615 * ice_get_fw_log_cfg - get FW logging configuration
616 * @hw: pointer to the HW struct
618 static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
620 struct ice_aq_desc desc;
621 enum ice_status status;
625 size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX;
626 config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
628 return ICE_ERR_NO_MEMORY;
630 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
632 status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
636 /* Save FW logging information into the HW structure */
637 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
640 v = le16_to_cpu(config[i]);
641 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
642 flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
644 if (m < ICE_AQC_FW_LOG_ID_MAX)
645 hw->fw_log.evnts[m].cur = flgs;
649 devm_kfree(ice_hw_to_dev(hw), config);
655 * ice_cfg_fw_log - configure FW logging
656 * @hw: pointer to the HW struct
657 * @enable: enable certain FW logging events if true, disable all if false
659 * This function enables/disables the FW logging via Rx CQ events and a UART
660 * port based on predetermined configurations. FW logging via the Rx CQ can be
661 * enabled/disabled for individual PF's. However, FW logging via the UART can
662 * only be enabled/disabled for all PFs on the same device.
664 * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in
665 * hw->fw_log need to be set accordingly, e.g. based on user-provided input,
666 * before initializing the device.
668 * When re/configuring FW logging, callers need to update the "cfg" elements of
669 * the hw->fw_log.evnts array with the desired logging event configurations for
670 * modules of interest. When disabling FW logging completely, the callers can
671 * just pass false in the "enable" parameter. On completion, the function will
672 * update the "cur" element of the hw->fw_log.evnts array with the resulting
673 * logging event configurations of the modules that are being re/configured. FW
674 * logging modules that are not part of a reconfiguration operation retain their
677 * Before resetting the device, it is recommended that the driver disables FW
678 * logging before shutting down the control queue. When disabling FW logging
679 * ("enable" = false), the latest configurations of FW logging events stored in
680 * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after
683 * When enabling FW logging to emit log messages via the Rx CQ during the
684 * device's initialization phase, a mechanism alternative to interrupt handlers
685 * needs to be used to extract FW log messages from the Rx CQ periodically and
686 * to prevent the Rx CQ from being full and stalling other types of control
687 * messages from FW to SW. Interrupts are typically disabled during the device's
688 * initialization phase.
690 static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
692 struct ice_aqc_fw_logging *cmd;
693 enum ice_status status = 0;
694 u16 i, chgs = 0, len = 0;
695 struct ice_aq_desc desc;
700 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
703 /* Disable FW logging only when the control queue is still responsive */
705 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
708 /* Get current FW log settings */
709 status = ice_get_fw_log_cfg(hw);
713 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
714 cmd = &desc.params.fw_logging;
716 /* Indicate which controls are valid */
717 if (hw->fw_log.cq_en)
718 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
720 if (hw->fw_log.uart_en)
721 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
724 /* Fill in an array of entries with FW logging modules and
725 * logging events being reconfigured.
727 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
730 /* Keep track of enabled event types */
731 actv_evnts |= hw->fw_log.evnts[i].cfg;
733 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
737 data = devm_kcalloc(ice_hw_to_dev(hw),
738 ICE_AQC_FW_LOG_ID_MAX,
742 return ICE_ERR_NO_MEMORY;
745 val = i << ICE_AQC_FW_LOG_ID_S;
746 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
747 data[chgs++] = cpu_to_le16(val);
750 /* Only enable FW logging if at least one module is specified.
751 * If FW logging is currently enabled but all modules are not
752 * enabled to emit log messages, disable FW logging altogether.
755 /* Leave if there is effectively no change */
759 if (hw->fw_log.cq_en)
760 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
762 if (hw->fw_log.uart_en)
763 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
766 len = sizeof(*data) * chgs;
767 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
771 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
773 /* Update the current configuration to reflect events enabled.
774 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW
775 * logging mode is enabled for the device. They do not reflect
776 * actual modules being enabled to emit log messages. So, their
777 * values remain unchanged even when all modules are disabled.
779 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
781 hw->fw_log.actv_evnts = actv_evnts;
782 for (i = 0; i < cnt; i++) {
786 /* When disabling all FW logging events as part
787 * of device's de-initialization, the original
788 * configurations are retained, and can be used
789 * to reconfigure FW logging later if the device
792 hw->fw_log.evnts[i].cur = 0;
796 v = le16_to_cpu(data[i]);
797 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
798 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
804 devm_kfree(ice_hw_to_dev(hw), data);
811 * @hw: pointer to the HW struct
812 * @desc: pointer to the AQ message descriptor
813 * @buf: pointer to the buffer accompanying the AQ message
815 * Formats a FW Log message and outputs it via the standard driver logs.
817 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
819 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
820 ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
821 le16_to_cpu(desc->datalen));
822 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
826 * ice_get_itr_intrl_gran
827 * @hw: pointer to the HW struct
829 * Determines the ITR/INTRL granularities based on the maximum aggregate
830 * bandwidth according to the device's configuration during power-on.
832 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
834 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
835 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
836 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
838 switch (max_agg_bw) {
839 case ICE_MAX_AGG_BW_200G:
840 case ICE_MAX_AGG_BW_100G:
841 case ICE_MAX_AGG_BW_50G:
842 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
843 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
845 case ICE_MAX_AGG_BW_25G:
846 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
847 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
853 * ice_init_hw - main hardware initialization routine
854 * @hw: pointer to the hardware structure
856 enum ice_status ice_init_hw(struct ice_hw *hw)
858 struct ice_aqc_get_phy_caps_data *pcaps;
859 enum ice_status status;
863 /* Set MAC type based on DeviceID */
864 status = ice_set_mac_type(hw);
868 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
869 PF_FUNC_RID_FUNC_NUM_M) >>
870 PF_FUNC_RID_FUNC_NUM_S;
872 status = ice_reset(hw, ICE_RESET_PFR);
876 ice_get_itr_intrl_gran(hw);
878 status = ice_create_all_ctrlq(hw);
880 goto err_unroll_cqinit;
882 /* Enable FW logging. Not fatal if this fails. */
883 status = ice_cfg_fw_log(hw, true);
885 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
887 status = ice_clear_pf_cfg(hw);
889 goto err_unroll_cqinit;
891 /* Set bit to enable Flow Director filters */
892 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
893 INIT_LIST_HEAD(&hw->fdir_list_head);
895 ice_clear_pxe_mode(hw);
897 status = ice_init_nvm(hw);
899 goto err_unroll_cqinit;
901 status = ice_get_caps(hw);
903 goto err_unroll_cqinit;
905 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
906 sizeof(*hw->port_info), GFP_KERNEL);
907 if (!hw->port_info) {
908 status = ICE_ERR_NO_MEMORY;
909 goto err_unroll_cqinit;
912 /* set the back pointer to HW */
913 hw->port_info->hw = hw;
915 /* Initialize port_info struct with switch configuration data */
916 status = ice_get_initial_sw_cfg(hw);
918 goto err_unroll_alloc;
922 /* Query the allocated resources for Tx scheduler */
923 status = ice_sched_query_res_alloc(hw);
925 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
926 goto err_unroll_alloc;
928 ice_sched_get_psm_clk_freq(hw);
930 /* Initialize port_info struct with scheduler data */
931 status = ice_sched_init_port(hw->port_info);
933 goto err_unroll_sched;
935 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
937 status = ICE_ERR_NO_MEMORY;
938 goto err_unroll_sched;
941 /* Initialize port_info struct with PHY capabilities */
942 status = ice_aq_get_phy_caps(hw->port_info, false,
943 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
945 devm_kfree(ice_hw_to_dev(hw), pcaps);
947 dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n",
950 /* Initialize port_info struct with link information */
951 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
953 goto err_unroll_sched;
955 /* need a valid SW entry point to build a Tx tree */
956 if (!hw->sw_entry_point_layer) {
957 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
958 status = ICE_ERR_CFG;
959 goto err_unroll_sched;
961 INIT_LIST_HEAD(&hw->agg_list);
962 /* Initialize max burst size */
963 if (!hw->max_burst_size)
964 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
966 status = ice_init_fltr_mgmt_struct(hw);
968 goto err_unroll_sched;
970 /* Get MAC information */
971 /* A single port can report up to two (LAN and WoL) addresses */
972 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
973 sizeof(struct ice_aqc_manage_mac_read_resp),
975 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
978 status = ICE_ERR_NO_MEMORY;
979 goto err_unroll_fltr_mgmt_struct;
982 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
983 devm_kfree(ice_hw_to_dev(hw), mac_buf);
986 goto err_unroll_fltr_mgmt_struct;
987 /* enable jumbo frame support at MAC level */
988 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
990 goto err_unroll_fltr_mgmt_struct;
991 /* Obtain counter base index which would be used by flow director */
992 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
994 goto err_unroll_fltr_mgmt_struct;
995 status = ice_init_hw_tbls(hw);
997 goto err_unroll_fltr_mgmt_struct;
998 mutex_init(&hw->tnl_lock);
1001 err_unroll_fltr_mgmt_struct:
1002 ice_cleanup_fltr_mgmt_struct(hw);
1004 ice_sched_cleanup_all(hw);
1006 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
1008 ice_destroy_all_ctrlq(hw);
1013 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
1014 * @hw: pointer to the hardware structure
1016 * This should be called only during nominal operation, not as a result of
1017 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
1018 * applicable initializations if it fails for any reason.
1020 void ice_deinit_hw(struct ice_hw *hw)
1022 ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
1023 ice_cleanup_fltr_mgmt_struct(hw);
1025 ice_sched_cleanup_all(hw);
1026 ice_sched_clear_agg(hw);
1028 ice_free_hw_tbls(hw);
1029 mutex_destroy(&hw->tnl_lock);
1031 if (hw->port_info) {
1032 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
1033 hw->port_info = NULL;
1036 /* Attempt to disable FW logging before shutting down control queues */
1037 ice_cfg_fw_log(hw, false);
1038 ice_destroy_all_ctrlq(hw);
1040 /* Clear VSI contexts if not already cleared */
1041 ice_clear_all_vsi_ctx(hw);
1045 * ice_check_reset - Check to see if a global reset is complete
1046 * @hw: pointer to the hardware structure
1048 enum ice_status ice_check_reset(struct ice_hw *hw)
1050 u32 cnt, reg = 0, grst_timeout, uld_mask;
1052 /* Poll for Device Active state in case a recent CORER, GLOBR,
1053 * or EMPR has occurred. The grst delay value is in 100ms units.
1054 * Add 1sec for outstanding AQ commands that can take a long time.
1056 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
1057 GLGEN_RSTCTL_GRSTDEL_S) + 10;
1059 for (cnt = 0; cnt < grst_timeout; cnt++) {
1061 reg = rd32(hw, GLGEN_RSTAT);
1062 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
1066 if (cnt == grst_timeout) {
1067 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
1068 return ICE_ERR_RESET_FAILED;
1071 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
1072 GLNVM_ULD_PCIER_DONE_1_M |\
1073 GLNVM_ULD_CORER_DONE_M |\
1074 GLNVM_ULD_GLOBR_DONE_M |\
1075 GLNVM_ULD_POR_DONE_M |\
1076 GLNVM_ULD_POR_DONE_1_M |\
1077 GLNVM_ULD_PCIER_DONE_2_M)
1079 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ?
1080 GLNVM_ULD_PE_DONE_M : 0);
1082 /* Device is Active; check Global Reset processes are done */
1083 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
1084 reg = rd32(hw, GLNVM_ULD) & uld_mask;
1085 if (reg == uld_mask) {
1086 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
1092 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1093 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
1095 return ICE_ERR_RESET_FAILED;
1102 * ice_pf_reset - Reset the PF
1103 * @hw: pointer to the hardware structure
1105 * If a global reset has been triggered, this function checks
1106 * for its completion and then issues the PF reset
1108 static enum ice_status ice_pf_reset(struct ice_hw *hw)
1112 /* If at function entry a global reset was already in progress, i.e.
1113 * state is not 'device active' or any of the reset done bits are not
1114 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
1115 * global reset is done.
1117 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1118 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1119 /* poll on global reset currently in progress until done */
1120 if (ice_check_reset(hw))
1121 return ICE_ERR_RESET_FAILED;
1127 reg = rd32(hw, PFGEN_CTRL);
1129 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1131 /* Wait for the PFR to complete. The wait time is the global config lock
1132 * timeout plus the PFR timeout which will account for a possible reset
1133 * that is occurring during a download package operation.
1135 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
1136 ICE_PF_RESET_WAIT_COUNT; cnt++) {
1137 reg = rd32(hw, PFGEN_CTRL);
1138 if (!(reg & PFGEN_CTRL_PFSWR_M))
1144 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1145 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
1146 return ICE_ERR_RESET_FAILED;
1153 * ice_reset - Perform different types of reset
1154 * @hw: pointer to the hardware structure
1155 * @req: reset request
1157 * This function triggers a reset as specified by the req parameter.
1160 * If anything other than a PF reset is triggered, PXE mode is restored.
1161 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1162 * interface has been restored in the rebuild flow.
1164 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1170 return ice_pf_reset(hw);
1171 case ICE_RESET_CORER:
1172 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1173 val = GLGEN_RTRIG_CORER_M;
1175 case ICE_RESET_GLOBR:
1176 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1177 val = GLGEN_RTRIG_GLOBR_M;
1180 return ICE_ERR_PARAM;
1183 val |= rd32(hw, GLGEN_RTRIG);
1184 wr32(hw, GLGEN_RTRIG, val);
1187 /* wait for the FW to be ready */
1188 return ice_check_reset(hw);
1192 * ice_copy_rxq_ctx_to_hw
1193 * @hw: pointer to the hardware structure
1194 * @ice_rxq_ctx: pointer to the rxq context
1195 * @rxq_index: the index of the Rx queue
1197 * Copies rxq context from dense structure to HW register space
1199 static enum ice_status
1200 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1205 return ICE_ERR_BAD_PTR;
1207 if (rxq_index > QRX_CTRL_MAX_INDEX)
1208 return ICE_ERR_PARAM;
1210 /* Copy each dword separately to HW */
1211 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1212 wr32(hw, QRX_CONTEXT(i, rxq_index),
1213 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1215 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1216 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1222 /* LAN Rx Queue Context */
1223 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1224 /* Field Width LSB */
1225 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1226 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1227 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1228 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1229 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1230 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1231 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1232 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1233 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1234 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1235 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1236 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1237 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1238 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1239 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1240 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1241 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1242 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1243 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1244 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1250 * @hw: pointer to the hardware structure
1251 * @rlan_ctx: pointer to the rxq context
1252 * @rxq_index: the index of the Rx queue
1254 * Converts rxq context from sparse to dense structure and then writes
1255 * it to HW register space and enables the hardware to prefetch descriptors
1256 * instead of only fetching them on demand
1259 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1262 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1265 return ICE_ERR_BAD_PTR;
1267 rlan_ctx->prefena = 1;
1269 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1270 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1273 /* LAN Tx Queue Context */
1274 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1275 /* Field Width LSB */
1276 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1277 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1278 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1279 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1280 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1281 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1282 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1283 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1284 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1285 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1286 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1287 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1288 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1289 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1290 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1291 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1292 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1293 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1294 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1295 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1296 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1297 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1298 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1299 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1300 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1301 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1302 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1303 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1307 /* Sideband Queue command wrappers */
1310 * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue
1311 * @hw: pointer to the HW struct
1312 * @desc: descriptor describing the command
1313 * @buf: buffer to use for indirect commands (NULL for direct commands)
1314 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1315 * @cd: pointer to command details structure
1318 ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
1319 void *buf, u16 buf_size, struct ice_sq_cd *cd)
1321 return ice_status_to_errno(ice_sq_send_cmd(hw, ice_get_sbq(hw),
1322 (struct ice_aq_desc *)desc,
1323 buf, buf_size, cd));
1327 * ice_sbq_rw_reg - Fill Sideband Queue command
1328 * @hw: pointer to the HW struct
1329 * @in: message info to be filled in descriptor
1331 int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in)
1333 struct ice_sbq_cmd_desc desc = {0};
1334 struct ice_sbq_msg_req msg = {0};
1338 msg_len = sizeof(msg);
1340 msg.dest_dev = in->dest_dev;
1341 msg.opcode = in->opcode;
1342 msg.flags = ICE_SBQ_MSG_FLAGS;
1343 msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE;
1344 msg.msg_addr_low = cpu_to_le16(in->msg_addr_low);
1345 msg.msg_addr_high = cpu_to_le32(in->msg_addr_high);
1348 msg.data = cpu_to_le32(in->data);
1350 /* data read comes back in completion, so shorten the struct by
1353 msg_len -= sizeof(msg.data);
1355 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
1356 desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req);
1357 desc.param0.cmd_len = cpu_to_le16(msg_len);
1358 status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL);
1359 if (!status && !in->opcode)
1360 in->data = le32_to_cpu
1361 (((struct ice_sbq_msg_cmpl *)&msg)->data);
1365 /* FW Admin Queue command wrappers */
1367 /* Software lock/mutex that is meant to be held while the Global Config Lock
1368 * in firmware is acquired by the software to prevent most (but not all) types
1369 * of AQ commands from being sent to FW
1371 DEFINE_MUTEX(ice_global_cfg_lock_sw);
1374 * ice_should_retry_sq_send_cmd
1375 * @opcode: AQ opcode
1377 * Decide if we should retry the send command routine for the ATQ, depending
1380 static bool ice_should_retry_sq_send_cmd(u16 opcode)
1383 case ice_aqc_opc_get_link_topo:
1384 case ice_aqc_opc_lldp_stop:
1385 case ice_aqc_opc_lldp_start:
1386 case ice_aqc_opc_lldp_filter_ctrl:
1394 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ)
1395 * @hw: pointer to the HW struct
1396 * @cq: pointer to the specific Control queue
1397 * @desc: prefilled descriptor describing the command
1398 * @buf: buffer to use for indirect commands (or NULL for direct commands)
1399 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1400 * @cd: pointer to command details structure
1402 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin
1403 * Queue if the EBUSY AQ error is returned.
1405 static enum ice_status
1406 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1407 struct ice_aq_desc *desc, void *buf, u16 buf_size,
1408 struct ice_sq_cd *cd)
1410 struct ice_aq_desc desc_cpy;
1411 enum ice_status status;
1412 bool is_cmd_for_retry;
1417 opcode = le16_to_cpu(desc->opcode);
1418 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
1419 memset(&desc_cpy, 0, sizeof(desc_cpy));
1421 if (is_cmd_for_retry) {
1423 buf_cpy = kzalloc(buf_size, GFP_KERNEL);
1425 return ICE_ERR_NO_MEMORY;
1428 memcpy(&desc_cpy, desc, sizeof(desc_cpy));
1432 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
1434 if (!is_cmd_for_retry || !status ||
1435 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
1439 memcpy(buf, buf_cpy, buf_size);
1441 memcpy(desc, &desc_cpy, sizeof(desc_cpy));
1443 mdelay(ICE_SQ_SEND_DELAY_TIME_MS);
1445 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
1453 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1454 * @hw: pointer to the HW struct
1455 * @desc: descriptor describing the command
1456 * @buf: buffer to use for indirect commands (NULL for direct commands)
1457 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1458 * @cd: pointer to command details structure
1460 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1463 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1464 u16 buf_size, struct ice_sq_cd *cd)
1466 struct ice_aqc_req_res *cmd = &desc->params.res_owner;
1467 bool lock_acquired = false;
1468 enum ice_status status;
1470 /* When a package download is in process (i.e. when the firmware's
1471 * Global Configuration Lock resource is held), only the Download
1472 * Package, Get Version, Get Package Info List and Release Resource
1473 * (with resource ID set to Global Config Lock) AdminQ commands are
1474 * allowed; all others must block until the package download completes
1475 * and the Global Config Lock is released. See also
1476 * ice_acquire_global_cfg_lock().
1478 switch (le16_to_cpu(desc->opcode)) {
1479 case ice_aqc_opc_download_pkg:
1480 case ice_aqc_opc_get_pkg_info_list:
1481 case ice_aqc_opc_get_ver:
1483 case ice_aqc_opc_release_res:
1484 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
1488 mutex_lock(&ice_global_cfg_lock_sw);
1489 lock_acquired = true;
1493 status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd);
1495 mutex_unlock(&ice_global_cfg_lock_sw);
1502 * @hw: pointer to the HW struct
1503 * @cd: pointer to command details structure or NULL
1505 * Get the firmware version (0x0001) from the admin queue commands
1507 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1509 struct ice_aqc_get_ver *resp;
1510 struct ice_aq_desc desc;
1511 enum ice_status status;
1513 resp = &desc.params.get_ver;
1515 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1517 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1520 hw->fw_branch = resp->fw_branch;
1521 hw->fw_maj_ver = resp->fw_major;
1522 hw->fw_min_ver = resp->fw_minor;
1523 hw->fw_patch = resp->fw_patch;
1524 hw->fw_build = le32_to_cpu(resp->fw_build);
1525 hw->api_branch = resp->api_branch;
1526 hw->api_maj_ver = resp->api_major;
1527 hw->api_min_ver = resp->api_minor;
1528 hw->api_patch = resp->api_patch;
1535 * ice_aq_send_driver_ver
1536 * @hw: pointer to the HW struct
1537 * @dv: driver's major, minor version
1538 * @cd: pointer to command details structure or NULL
1540 * Send the driver version (0x0002) to the firmware
1543 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1544 struct ice_sq_cd *cd)
1546 struct ice_aqc_driver_ver *cmd;
1547 struct ice_aq_desc desc;
1550 cmd = &desc.params.driver_ver;
1553 return ICE_ERR_PARAM;
1555 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1557 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1558 cmd->major_ver = dv->major_ver;
1559 cmd->minor_ver = dv->minor_ver;
1560 cmd->build_ver = dv->build_ver;
1561 cmd->subbuild_ver = dv->subbuild_ver;
1564 while (len < sizeof(dv->driver_string) &&
1565 isascii(dv->driver_string[len]) && dv->driver_string[len])
1568 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1573 * @hw: pointer to the HW struct
1574 * @unloading: is the driver unloading itself
1576 * Tell the Firmware that we're shutting down the AdminQ and whether
1577 * or not the driver is unloading as well (0x0003).
1579 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1581 struct ice_aqc_q_shutdown *cmd;
1582 struct ice_aq_desc desc;
1584 cmd = &desc.params.q_shutdown;
1586 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1589 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1591 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1596 * @hw: pointer to the HW struct
1598 * @access: access type
1599 * @sdp_number: resource number
1600 * @timeout: the maximum time in ms that the driver may hold the resource
1601 * @cd: pointer to command details structure or NULL
1603 * Requests common resource using the admin queue commands (0x0008).
1604 * When attempting to acquire the Global Config Lock, the driver can
1605 * learn of three states:
1606 * 1) ICE_SUCCESS - acquired lock, and can perform download package
1607 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
1608 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1609 * successfully downloaded the package; the driver does
1610 * not have to download the package and can continue
1613 * Note that if the caller is in an acquire lock, perform action, release lock
1614 * phase of operation, it is possible that the FW may detect a timeout and issue
1615 * a CORER. In this case, the driver will receive a CORER interrupt and will
1616 * have to determine its cause. The calling thread that is handling this flow
1617 * will likely get an error propagated back to it indicating the Download
1618 * Package, Update Package or the Release Resource AQ commands timed out.
1620 static enum ice_status
1621 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1622 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1623 struct ice_sq_cd *cd)
1625 struct ice_aqc_req_res *cmd_resp;
1626 struct ice_aq_desc desc;
1627 enum ice_status status;
1629 cmd_resp = &desc.params.res_owner;
1631 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1633 cmd_resp->res_id = cpu_to_le16(res);
1634 cmd_resp->access_type = cpu_to_le16(access);
1635 cmd_resp->res_number = cpu_to_le32(sdp_number);
1636 cmd_resp->timeout = cpu_to_le32(*timeout);
1639 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1641 /* The completion specifies the maximum time in ms that the driver
1642 * may hold the resource in the Timeout field.
1645 /* Global config lock response utilizes an additional status field.
1647 * If the Global config lock resource is held by some other driver, the
1648 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1649 * and the timeout field indicates the maximum time the current owner
1650 * of the resource has to free it.
1652 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1653 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1654 *timeout = le32_to_cpu(cmd_resp->timeout);
1656 } else if (le16_to_cpu(cmd_resp->status) ==
1657 ICE_AQ_RES_GLBL_IN_PROG) {
1658 *timeout = le32_to_cpu(cmd_resp->timeout);
1659 return ICE_ERR_AQ_ERROR;
1660 } else if (le16_to_cpu(cmd_resp->status) ==
1661 ICE_AQ_RES_GLBL_DONE) {
1662 return ICE_ERR_AQ_NO_WORK;
1665 /* invalid FW response, force a timeout immediately */
1667 return ICE_ERR_AQ_ERROR;
1670 /* If the resource is held by some other driver, the command completes
1671 * with a busy return value and the timeout field indicates the maximum
1672 * time the current owner of the resource has to free it.
1674 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1675 *timeout = le32_to_cpu(cmd_resp->timeout);
1681 * ice_aq_release_res
1682 * @hw: pointer to the HW struct
1684 * @sdp_number: resource number
1685 * @cd: pointer to command details structure or NULL
1687 * release common resource using the admin queue commands (0x0009)
1689 static enum ice_status
1690 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1691 struct ice_sq_cd *cd)
1693 struct ice_aqc_req_res *cmd;
1694 struct ice_aq_desc desc;
1696 cmd = &desc.params.res_owner;
1698 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1700 cmd->res_id = cpu_to_le16(res);
1701 cmd->res_number = cpu_to_le32(sdp_number);
1703 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1708 * @hw: pointer to the HW structure
1710 * @access: access type (read or write)
1711 * @timeout: timeout in milliseconds
1713 * This function will attempt to acquire the ownership of a resource.
1716 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1717 enum ice_aq_res_access_type access, u32 timeout)
1719 #define ICE_RES_POLLING_DELAY_MS 10
1720 u32 delay = ICE_RES_POLLING_DELAY_MS;
1721 u32 time_left = timeout;
1722 enum ice_status status;
1724 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1726 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1727 * previously acquired the resource and performed any necessary updates;
1728 * in this case the caller does not obtain the resource and has no
1729 * further work to do.
1731 if (status == ICE_ERR_AQ_NO_WORK)
1732 goto ice_acquire_res_exit;
1735 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
1737 /* If necessary, poll until the current lock owner timeouts */
1738 timeout = time_left;
1739 while (status && timeout && time_left) {
1741 timeout = (timeout > delay) ? timeout - delay : 0;
1742 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1744 if (status == ICE_ERR_AQ_NO_WORK)
1745 /* lock free, but no work to do */
1752 if (status && status != ICE_ERR_AQ_NO_WORK)
1753 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1755 ice_acquire_res_exit:
1756 if (status == ICE_ERR_AQ_NO_WORK) {
1757 if (access == ICE_RES_WRITE)
1758 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
1760 ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1767 * @hw: pointer to the HW structure
1770 * This function will release a resource using the proper Admin Command.
1772 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1774 enum ice_status status;
1775 u32 total_delay = 0;
1777 status = ice_aq_release_res(hw, res, 0, NULL);
1779 /* there are some rare cases when trying to release the resource
1780 * results in an admin queue timeout, so handle them correctly
1782 while ((status == ICE_ERR_AQ_TIMEOUT) &&
1783 (total_delay < hw->adminq.sq_cmd_timeout)) {
1785 status = ice_aq_release_res(hw, res, 0, NULL);
1791 * ice_aq_alloc_free_res - command to allocate/free resources
1792 * @hw: pointer to the HW struct
1793 * @num_entries: number of resource entries in buffer
1794 * @buf: Indirect buffer to hold data parameters and response
1795 * @buf_size: size of buffer for indirect commands
1796 * @opc: pass in the command opcode
1797 * @cd: pointer to command details structure or NULL
1799 * Helper function to allocate/free resources using the admin queue commands
1802 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1803 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1804 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1806 struct ice_aqc_alloc_free_res_cmd *cmd;
1807 struct ice_aq_desc desc;
1809 cmd = &desc.params.sw_res_ctrl;
1812 return ICE_ERR_PARAM;
1814 if (buf_size < flex_array_size(buf, elem, num_entries))
1815 return ICE_ERR_PARAM;
1817 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1819 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1821 cmd->num_entries = cpu_to_le16(num_entries);
1823 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1827 * ice_alloc_hw_res - allocate resource
1828 * @hw: pointer to the HW struct
1829 * @type: type of resource
1830 * @num: number of resources to allocate
1831 * @btm: allocate from bottom
1832 * @res: pointer to array that will receive the resources
1835 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1837 struct ice_aqc_alloc_free_res_elem *buf;
1838 enum ice_status status;
1841 buf_len = struct_size(buf, elem, num);
1842 buf = kzalloc(buf_len, GFP_KERNEL);
1844 return ICE_ERR_NO_MEMORY;
1846 /* Prepare buffer to allocate resource. */
1847 buf->num_elems = cpu_to_le16(num);
1848 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1849 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1851 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1853 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1854 ice_aqc_opc_alloc_res, NULL);
1856 goto ice_alloc_res_exit;
1858 memcpy(res, buf->elem, sizeof(*buf->elem) * num);
1866 * ice_free_hw_res - free allocated HW resource
1867 * @hw: pointer to the HW struct
1868 * @type: type of resource to free
1869 * @num: number of resources
1870 * @res: pointer to array that contains the resources to free
1872 enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1874 struct ice_aqc_alloc_free_res_elem *buf;
1875 enum ice_status status;
1878 buf_len = struct_size(buf, elem, num);
1879 buf = kzalloc(buf_len, GFP_KERNEL);
1881 return ICE_ERR_NO_MEMORY;
1883 /* Prepare buffer to free resource. */
1884 buf->num_elems = cpu_to_le16(num);
1885 buf->res_type = cpu_to_le16(type);
1886 memcpy(buf->elem, res, sizeof(*buf->elem) * num);
1888 status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1889 ice_aqc_opc_free_res, NULL);
1891 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1898 * ice_get_num_per_func - determine number of resources per PF
1899 * @hw: pointer to the HW structure
1900 * @max: value to be evenly split between each PF
1902 * Determine the number of valid functions by going through the bitmap returned
1903 * from parsing capabilities and use this to calculate the number of resources
1904 * per PF based on the max value passed in.
1906 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1910 #define ICE_CAPS_VALID_FUNCS_M 0xFF
1911 funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
1912 ICE_CAPS_VALID_FUNCS_M);
1921 * ice_parse_common_caps - parse common device/function capabilities
1922 * @hw: pointer to the HW struct
1923 * @caps: pointer to common capabilities structure
1924 * @elem: the capability element to parse
1925 * @prefix: message prefix for tracing capabilities
1927 * Given a capability element, extract relevant details into the common
1928 * capability structure.
1930 * Returns: true if the capability matches one of the common capability ids,
1934 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1935 struct ice_aqc_list_caps_elem *elem, const char *prefix)
1937 u32 logical_id = le32_to_cpu(elem->logical_id);
1938 u32 phys_id = le32_to_cpu(elem->phys_id);
1939 u32 number = le32_to_cpu(elem->number);
1940 u16 cap = le16_to_cpu(elem->cap);
1944 case ICE_AQC_CAPS_VALID_FUNCTIONS:
1945 caps->valid_functions = number;
1946 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
1947 caps->valid_functions);
1949 case ICE_AQC_CAPS_SRIOV:
1950 caps->sr_iov_1_1 = (number == 1);
1951 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
1954 case ICE_AQC_CAPS_DCB:
1955 caps->dcb = (number == 1);
1956 caps->active_tc_bitmap = logical_id;
1957 caps->maxtc = phys_id;
1958 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
1959 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
1960 caps->active_tc_bitmap);
1961 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
1963 case ICE_AQC_CAPS_RSS:
1964 caps->rss_table_size = number;
1965 caps->rss_table_entry_width = logical_id;
1966 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
1967 caps->rss_table_size);
1968 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
1969 caps->rss_table_entry_width);
1971 case ICE_AQC_CAPS_RXQS:
1972 caps->num_rxq = number;
1973 caps->rxq_first_id = phys_id;
1974 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
1976 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
1977 caps->rxq_first_id);
1979 case ICE_AQC_CAPS_TXQS:
1980 caps->num_txq = number;
1981 caps->txq_first_id = phys_id;
1982 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
1984 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
1985 caps->txq_first_id);
1987 case ICE_AQC_CAPS_MSIX:
1988 caps->num_msix_vectors = number;
1989 caps->msix_vector_first_id = phys_id;
1990 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
1991 caps->num_msix_vectors);
1992 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
1993 caps->msix_vector_first_id);
1995 case ICE_AQC_CAPS_PENDING_NVM_VER:
1996 caps->nvm_update_pending_nvm = true;
1997 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix);
1999 case ICE_AQC_CAPS_PENDING_OROM_VER:
2000 caps->nvm_update_pending_orom = true;
2001 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix);
2003 case ICE_AQC_CAPS_PENDING_NET_VER:
2004 caps->nvm_update_pending_netlist = true;
2005 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix);
2007 case ICE_AQC_CAPS_NVM_MGMT:
2008 caps->nvm_unified_update =
2009 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
2011 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
2012 caps->nvm_unified_update);
2014 case ICE_AQC_CAPS_RDMA:
2015 caps->rdma = (number == 1);
2016 ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma);
2018 case ICE_AQC_CAPS_MAX_MTU:
2019 caps->max_mtu = number;
2020 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
2021 prefix, caps->max_mtu);
2024 /* Not one of the recognized common capabilities */
2032 * ice_recalc_port_limited_caps - Recalculate port limited capabilities
2033 * @hw: pointer to the HW structure
2034 * @caps: pointer to capabilities structure to fix
2036 * Re-calculate the capabilities that are dependent on the number of physical
2037 * ports; i.e. some features are not supported or function differently on
2038 * devices with more than 4 ports.
2041 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
2043 /* This assumes device capabilities are always scanned before function
2044 * capabilities during the initialization flow.
2046 if (hw->dev_caps.num_funcs > 4) {
2047 /* Max 4 TCs per port */
2049 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
2052 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n");
2056 /* print message only when processing device capabilities
2057 * during initialization.
2059 if (caps == &hw->dev_caps.common_cap)
2060 dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n");
2065 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps
2066 * @hw: pointer to the HW struct
2067 * @func_p: pointer to function capabilities structure
2068 * @cap: pointer to the capability element to parse
2070 * Extract function capabilities for ICE_AQC_CAPS_VF.
2073 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2074 struct ice_aqc_list_caps_elem *cap)
2076 u32 logical_id = le32_to_cpu(cap->logical_id);
2077 u32 number = le32_to_cpu(cap->number);
2079 func_p->num_allocd_vfs = number;
2080 func_p->vf_base_id = logical_id;
2081 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
2082 func_p->num_allocd_vfs);
2083 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
2084 func_p->vf_base_id);
2088 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
2089 * @hw: pointer to the HW struct
2090 * @func_p: pointer to function capabilities structure
2091 * @cap: pointer to the capability element to parse
2093 * Extract function capabilities for ICE_AQC_CAPS_VSI.
2096 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2097 struct ice_aqc_list_caps_elem *cap)
2099 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
2100 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
2101 le32_to_cpu(cap->number));
2102 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
2103 func_p->guar_num_vsi);
2107 * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps
2108 * @hw: pointer to the HW struct
2109 * @func_p: pointer to function capabilities structure
2110 * @cap: pointer to the capability element to parse
2112 * Extract function capabilities for ICE_AQC_CAPS_1588.
2115 ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2116 struct ice_aqc_list_caps_elem *cap)
2118 struct ice_ts_func_info *info = &func_p->ts_func_info;
2119 u32 number = le32_to_cpu(cap->number);
2121 info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0);
2122 func_p->common_cap.ieee_1588 = info->ena;
2124 info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0);
2125 info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0);
2126 info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
2127 info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
2129 info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S;
2130 info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
2132 ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n",
2133 func_p->common_cap.ieee_1588);
2134 ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n",
2135 info->src_tmr_owned);
2136 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n",
2138 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n",
2139 info->tmr_index_owned);
2140 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n",
2141 info->tmr_index_assoc);
2142 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n",
2144 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n",
2149 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps
2150 * @hw: pointer to the HW struct
2151 * @func_p: pointer to function capabilities structure
2153 * Extract function capabilities for ICE_AQC_CAPS_FD.
2156 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
2160 reg_val = rd32(hw, GLQF_FD_SIZE);
2161 val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
2162 GLQF_FD_SIZE_FD_GSIZE_S;
2163 func_p->fd_fltr_guar =
2164 ice_get_num_per_func(hw, val);
2165 val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
2166 GLQF_FD_SIZE_FD_BSIZE_S;
2167 func_p->fd_fltr_best_effort = val;
2169 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n",
2170 func_p->fd_fltr_guar);
2171 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n",
2172 func_p->fd_fltr_best_effort);
2176 * ice_parse_func_caps - Parse function capabilities
2177 * @hw: pointer to the HW struct
2178 * @func_p: pointer to function capabilities structure
2179 * @buf: buffer containing the function capability records
2180 * @cap_count: the number of capabilities
2182 * Helper function to parse function (0x000A) capabilities list. For
2183 * capabilities shared between device and function, this relies on
2184 * ice_parse_common_caps.
2186 * Loop through the list of provided capabilities and extract the relevant
2187 * data into the function capabilities structured.
2190 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2191 void *buf, u32 cap_count)
2193 struct ice_aqc_list_caps_elem *cap_resp;
2198 memset(func_p, 0, sizeof(*func_p));
2200 for (i = 0; i < cap_count; i++) {
2201 u16 cap = le16_to_cpu(cap_resp[i].cap);
2204 found = ice_parse_common_caps(hw, &func_p->common_cap,
2205 &cap_resp[i], "func caps");
2208 case ICE_AQC_CAPS_VF:
2209 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
2211 case ICE_AQC_CAPS_VSI:
2212 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2214 case ICE_AQC_CAPS_1588:
2215 ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]);
2217 case ICE_AQC_CAPS_FD:
2218 ice_parse_fdir_func_caps(hw, func_p);
2221 /* Don't list common capabilities as unknown */
2223 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
2229 ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2233 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2234 * @hw: pointer to the HW struct
2235 * @dev_p: pointer to device capabilities structure
2236 * @cap: capability element to parse
2238 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2241 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2242 struct ice_aqc_list_caps_elem *cap)
2244 u32 number = le32_to_cpu(cap->number);
2246 dev_p->num_funcs = hweight32(number);
2247 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2252 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps
2253 * @hw: pointer to the HW struct
2254 * @dev_p: pointer to device capabilities structure
2255 * @cap: capability element to parse
2257 * Parse ICE_AQC_CAPS_VF for device capabilities.
2260 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2261 struct ice_aqc_list_caps_elem *cap)
2263 u32 number = le32_to_cpu(cap->number);
2265 dev_p->num_vfs_exposed = number;
2266 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
2267 dev_p->num_vfs_exposed);
2271 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2272 * @hw: pointer to the HW struct
2273 * @dev_p: pointer to device capabilities structure
2274 * @cap: capability element to parse
2276 * Parse ICE_AQC_CAPS_VSI for device capabilities.
2279 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2280 struct ice_aqc_list_caps_elem *cap)
2282 u32 number = le32_to_cpu(cap->number);
2284 dev_p->num_vsi_allocd_to_host = number;
2285 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2286 dev_p->num_vsi_allocd_to_host);
2290 * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps
2291 * @hw: pointer to the HW struct
2292 * @dev_p: pointer to device capabilities structure
2293 * @cap: capability element to parse
2295 * Parse ICE_AQC_CAPS_1588 for device capabilities.
2298 ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2299 struct ice_aqc_list_caps_elem *cap)
2301 struct ice_ts_dev_info *info = &dev_p->ts_dev_info;
2302 u32 logical_id = le32_to_cpu(cap->logical_id);
2303 u32 phys_id = le32_to_cpu(cap->phys_id);
2304 u32 number = le32_to_cpu(cap->number);
2306 info->ena = ((number & ICE_TS_DEV_ENA_M) != 0);
2307 dev_p->common_cap.ieee_1588 = info->ena;
2309 info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M;
2310 info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0);
2311 info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0);
2313 info->tmr1_owner = (number & ICE_TS_TMR1_OWNR_M) >> ICE_TS_TMR1_OWNR_S;
2314 info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0);
2315 info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0);
2317 info->ena_ports = logical_id;
2318 info->tmr_own_map = phys_id;
2320 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n",
2321 dev_p->common_cap.ieee_1588);
2322 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n",
2324 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n",
2326 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n",
2328 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n",
2330 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n",
2332 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n",
2334 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
2336 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
2341 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps
2342 * @hw: pointer to the HW struct
2343 * @dev_p: pointer to device capabilities structure
2344 * @cap: capability element to parse
2346 * Parse ICE_AQC_CAPS_FD for device capabilities.
2349 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2350 struct ice_aqc_list_caps_elem *cap)
2352 u32 number = le32_to_cpu(cap->number);
2354 dev_p->num_flow_director_fltr = number;
2355 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n",
2356 dev_p->num_flow_director_fltr);
2360 * ice_parse_dev_caps - Parse device capabilities
2361 * @hw: pointer to the HW struct
2362 * @dev_p: pointer to device capabilities structure
2363 * @buf: buffer containing the device capability records
2364 * @cap_count: the number of capabilities
2366 * Helper device to parse device (0x000B) capabilities list. For
2367 * capabilities shared between device and function, this relies on
2368 * ice_parse_common_caps.
2370 * Loop through the list of provided capabilities and extract the relevant
2371 * data into the device capabilities structured.
2374 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2375 void *buf, u32 cap_count)
2377 struct ice_aqc_list_caps_elem *cap_resp;
2382 memset(dev_p, 0, sizeof(*dev_p));
2384 for (i = 0; i < cap_count; i++) {
2385 u16 cap = le16_to_cpu(cap_resp[i].cap);
2388 found = ice_parse_common_caps(hw, &dev_p->common_cap,
2389 &cap_resp[i], "dev caps");
2392 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2393 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2395 case ICE_AQC_CAPS_VF:
2396 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
2398 case ICE_AQC_CAPS_VSI:
2399 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2401 case ICE_AQC_CAPS_1588:
2402 ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]);
2404 case ICE_AQC_CAPS_FD:
2405 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
2408 /* Don't list common capabilities as unknown */
2410 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
2416 ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2420 * ice_aq_list_caps - query function/device capabilities
2421 * @hw: pointer to the HW struct
2422 * @buf: a buffer to hold the capabilities
2423 * @buf_size: size of the buffer
2424 * @cap_count: if not NULL, set to the number of capabilities reported
2425 * @opc: capabilities type to discover, device or function
2426 * @cd: pointer to command details structure or NULL
2428 * Get the function (0x000A) or device (0x000B) capabilities description from
2429 * firmware and store it in the buffer.
2431 * If the cap_count pointer is not NULL, then it is set to the number of
2432 * capabilities firmware will report. Note that if the buffer size is too
2433 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
2434 * cap_count will still be updated in this case. It is recommended that the
2435 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
2436 * firmware could return) to avoid this.
2439 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2440 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2442 struct ice_aqc_list_caps *cmd;
2443 struct ice_aq_desc desc;
2444 enum ice_status status;
2446 cmd = &desc.params.get_cap;
2448 if (opc != ice_aqc_opc_list_func_caps &&
2449 opc != ice_aqc_opc_list_dev_caps)
2450 return ICE_ERR_PARAM;
2452 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2453 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2456 *cap_count = le32_to_cpu(cmd->count);
2462 * ice_discover_dev_caps - Read and extract device capabilities
2463 * @hw: pointer to the hardware structure
2464 * @dev_caps: pointer to device capabilities structure
2466 * Read the device capabilities and extract them into the dev_caps structure
2470 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2472 enum ice_status status;
2476 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2478 return ICE_ERR_NO_MEMORY;
2480 /* Although the driver doesn't know the number of capabilities the
2481 * device will return, we can simply send a 4KB buffer, the maximum
2482 * possible size that firmware can return.
2484 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2486 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2487 ice_aqc_opc_list_dev_caps, NULL);
2489 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2496 * ice_discover_func_caps - Read and extract function capabilities
2497 * @hw: pointer to the hardware structure
2498 * @func_caps: pointer to function capabilities structure
2500 * Read the function capabilities and extract them into the func_caps structure
2503 static enum ice_status
2504 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2506 enum ice_status status;
2510 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2512 return ICE_ERR_NO_MEMORY;
2514 /* Although the driver doesn't know the number of capabilities the
2515 * device will return, we can simply send a 4KB buffer, the maximum
2516 * possible size that firmware can return.
2518 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2520 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2521 ice_aqc_opc_list_func_caps, NULL);
2523 ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2530 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2531 * @hw: pointer to the hardware structure
2533 void ice_set_safe_mode_caps(struct ice_hw *hw)
2535 struct ice_hw_func_caps *func_caps = &hw->func_caps;
2536 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2537 struct ice_hw_common_caps cached_caps;
2540 /* cache some func_caps values that should be restored after memset */
2541 cached_caps = func_caps->common_cap;
2543 /* unset func capabilities */
2544 memset(func_caps, 0, sizeof(*func_caps));
2546 #define ICE_RESTORE_FUNC_CAP(name) \
2547 func_caps->common_cap.name = cached_caps.name
2549 /* restore cached values */
2550 ICE_RESTORE_FUNC_CAP(valid_functions);
2551 ICE_RESTORE_FUNC_CAP(txq_first_id);
2552 ICE_RESTORE_FUNC_CAP(rxq_first_id);
2553 ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
2554 ICE_RESTORE_FUNC_CAP(max_mtu);
2555 ICE_RESTORE_FUNC_CAP(nvm_unified_update);
2556 ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm);
2557 ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom);
2558 ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist);
2560 /* one Tx and one Rx queue in safe mode */
2561 func_caps->common_cap.num_rxq = 1;
2562 func_caps->common_cap.num_txq = 1;
2564 /* two MSIX vectors, one for traffic and one for misc causes */
2565 func_caps->common_cap.num_msix_vectors = 2;
2566 func_caps->guar_num_vsi = 1;
2568 /* cache some dev_caps values that should be restored after memset */
2569 cached_caps = dev_caps->common_cap;
2570 num_funcs = dev_caps->num_funcs;
2572 /* unset dev capabilities */
2573 memset(dev_caps, 0, sizeof(*dev_caps));
2575 #define ICE_RESTORE_DEV_CAP(name) \
2576 dev_caps->common_cap.name = cached_caps.name
2578 /* restore cached values */
2579 ICE_RESTORE_DEV_CAP(valid_functions);
2580 ICE_RESTORE_DEV_CAP(txq_first_id);
2581 ICE_RESTORE_DEV_CAP(rxq_first_id);
2582 ICE_RESTORE_DEV_CAP(msix_vector_first_id);
2583 ICE_RESTORE_DEV_CAP(max_mtu);
2584 ICE_RESTORE_DEV_CAP(nvm_unified_update);
2585 ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm);
2586 ICE_RESTORE_DEV_CAP(nvm_update_pending_orom);
2587 ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist);
2588 dev_caps->num_funcs = num_funcs;
2590 /* one Tx and one Rx queue per function in safe mode */
2591 dev_caps->common_cap.num_rxq = num_funcs;
2592 dev_caps->common_cap.num_txq = num_funcs;
2594 /* two MSIX vectors per function */
2595 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2599 * ice_get_caps - get info about the HW
2600 * @hw: pointer to the hardware structure
2602 enum ice_status ice_get_caps(struct ice_hw *hw)
2604 enum ice_status status;
2606 status = ice_discover_dev_caps(hw, &hw->dev_caps);
2610 return ice_discover_func_caps(hw, &hw->func_caps);
2614 * ice_aq_manage_mac_write - manage MAC address write command
2615 * @hw: pointer to the HW struct
2616 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2617 * @flags: flags to control write behavior
2618 * @cd: pointer to command details structure or NULL
2620 * This function is used to write MAC address to the NVM (0x0108).
2623 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2624 struct ice_sq_cd *cd)
2626 struct ice_aqc_manage_mac_write *cmd;
2627 struct ice_aq_desc desc;
2629 cmd = &desc.params.mac_write;
2630 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2633 ether_addr_copy(cmd->mac_addr, mac_addr);
2635 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2639 * ice_aq_clear_pxe_mode
2640 * @hw: pointer to the HW struct
2642 * Tell the firmware that the driver is taking over from PXE (0x0110).
2644 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
2646 struct ice_aq_desc desc;
2648 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2649 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2651 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2655 * ice_clear_pxe_mode - clear pxe operations mode
2656 * @hw: pointer to the HW struct
2658 * Make sure all PXE mode settings are cleared, including things
2659 * like descriptor fetch/write-back mode.
2661 void ice_clear_pxe_mode(struct ice_hw *hw)
2663 if (ice_check_sq_alive(hw, &hw->adminq))
2664 ice_aq_clear_pxe_mode(hw);
2668 * ice_get_link_speed_based_on_phy_type - returns link speed
2669 * @phy_type_low: lower part of phy_type
2670 * @phy_type_high: higher part of phy_type
2672 * This helper function will convert an entry in PHY type structure
2673 * [phy_type_low, phy_type_high] to its corresponding link speed.
2674 * Note: In the structure of [phy_type_low, phy_type_high], there should
2675 * be one bit set, as this function will convert one PHY type to its
2677 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2678 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2681 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2683 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2684 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2686 switch (phy_type_low) {
2687 case ICE_PHY_TYPE_LOW_100BASE_TX:
2688 case ICE_PHY_TYPE_LOW_100M_SGMII:
2689 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2691 case ICE_PHY_TYPE_LOW_1000BASE_T:
2692 case ICE_PHY_TYPE_LOW_1000BASE_SX:
2693 case ICE_PHY_TYPE_LOW_1000BASE_LX:
2694 case ICE_PHY_TYPE_LOW_1000BASE_KX:
2695 case ICE_PHY_TYPE_LOW_1G_SGMII:
2696 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2698 case ICE_PHY_TYPE_LOW_2500BASE_T:
2699 case ICE_PHY_TYPE_LOW_2500BASE_X:
2700 case ICE_PHY_TYPE_LOW_2500BASE_KX:
2701 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2703 case ICE_PHY_TYPE_LOW_5GBASE_T:
2704 case ICE_PHY_TYPE_LOW_5GBASE_KR:
2705 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2707 case ICE_PHY_TYPE_LOW_10GBASE_T:
2708 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2709 case ICE_PHY_TYPE_LOW_10GBASE_SR:
2710 case ICE_PHY_TYPE_LOW_10GBASE_LR:
2711 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2712 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2713 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2714 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2716 case ICE_PHY_TYPE_LOW_25GBASE_T:
2717 case ICE_PHY_TYPE_LOW_25GBASE_CR:
2718 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2719 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2720 case ICE_PHY_TYPE_LOW_25GBASE_SR:
2721 case ICE_PHY_TYPE_LOW_25GBASE_LR:
2722 case ICE_PHY_TYPE_LOW_25GBASE_KR:
2723 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2724 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2725 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2726 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2727 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2729 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2730 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2731 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2732 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2733 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2734 case ICE_PHY_TYPE_LOW_40G_XLAUI:
2735 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2737 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2738 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2739 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2740 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2741 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2742 case ICE_PHY_TYPE_LOW_50G_LAUI2:
2743 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2744 case ICE_PHY_TYPE_LOW_50G_AUI2:
2745 case ICE_PHY_TYPE_LOW_50GBASE_CP:
2746 case ICE_PHY_TYPE_LOW_50GBASE_SR:
2747 case ICE_PHY_TYPE_LOW_50GBASE_FR:
2748 case ICE_PHY_TYPE_LOW_50GBASE_LR:
2749 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2750 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2751 case ICE_PHY_TYPE_LOW_50G_AUI1:
2752 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2754 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2755 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2756 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2757 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2758 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2759 case ICE_PHY_TYPE_LOW_100G_CAUI4:
2760 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2761 case ICE_PHY_TYPE_LOW_100G_AUI4:
2762 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2763 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2764 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2765 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2766 case ICE_PHY_TYPE_LOW_100GBASE_DR:
2767 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2770 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2774 switch (phy_type_high) {
2775 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2776 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2777 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2778 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2779 case ICE_PHY_TYPE_HIGH_100G_AUI2:
2780 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2783 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2787 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2788 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2789 return ICE_AQ_LINK_SPEED_UNKNOWN;
2790 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2791 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2792 return ICE_AQ_LINK_SPEED_UNKNOWN;
2793 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2794 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2795 return speed_phy_type_low;
2797 return speed_phy_type_high;
2801 * ice_update_phy_type
2802 * @phy_type_low: pointer to the lower part of phy_type
2803 * @phy_type_high: pointer to the higher part of phy_type
2804 * @link_speeds_bitmap: targeted link speeds bitmap
2806 * Note: For the link_speeds_bitmap structure, you can check it at
2807 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2808 * link_speeds_bitmap include multiple speeds.
2810 * Each entry in this [phy_type_low, phy_type_high] structure will
2811 * present a certain link speed. This helper function will turn on bits
2812 * in [phy_type_low, phy_type_high] structure based on the value of
2813 * link_speeds_bitmap input parameter.
2816 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2817 u16 link_speeds_bitmap)
2824 /* We first check with low part of phy_type */
2825 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2826 pt_low = BIT_ULL(index);
2827 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2829 if (link_speeds_bitmap & speed)
2830 *phy_type_low |= BIT_ULL(index);
2833 /* We then check with high part of phy_type */
2834 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2835 pt_high = BIT_ULL(index);
2836 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2838 if (link_speeds_bitmap & speed)
2839 *phy_type_high |= BIT_ULL(index);
2844 * ice_aq_set_phy_cfg
2845 * @hw: pointer to the HW struct
2846 * @pi: port info structure of the interested logical port
2847 * @cfg: structure with PHY configuration data to be set
2848 * @cd: pointer to command details structure or NULL
2850 * Set the various PHY configuration parameters supported on the Port.
2851 * One or more of the Set PHY config parameters may be ignored in an MFP
2852 * mode as the PF may not have the privilege to set some of the PHY Config
2853 * parameters. This status will be indicated by the command response (0x0601).
2856 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
2857 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2859 struct ice_aq_desc desc;
2860 enum ice_status status;
2863 return ICE_ERR_PARAM;
2865 /* Ensure that only valid bits of cfg->caps can be turned on. */
2866 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2867 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2870 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2873 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2874 desc.params.set_phy.lport_num = pi->lport;
2875 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2877 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
2878 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
2879 (unsigned long long)le64_to_cpu(cfg->phy_type_low));
2880 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
2881 (unsigned long long)le64_to_cpu(cfg->phy_type_high));
2882 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps);
2883 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
2884 cfg->low_power_ctrl_an);
2885 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap);
2886 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value);
2887 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n",
2890 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2891 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
2895 pi->phy.curr_user_phy_cfg = *cfg;
2901 * ice_update_link_info - update status of the HW network link
2902 * @pi: port info structure of the interested logical port
2904 enum ice_status ice_update_link_info(struct ice_port_info *pi)
2906 struct ice_link_status *li;
2907 enum ice_status status;
2910 return ICE_ERR_PARAM;
2912 li = &pi->phy.link_info;
2914 status = ice_aq_get_link_info(pi, true, NULL, NULL);
2918 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2919 struct ice_aqc_get_phy_caps_data *pcaps;
2923 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
2926 return ICE_ERR_NO_MEMORY;
2928 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2931 devm_kfree(ice_hw_to_dev(hw), pcaps);
2938 * ice_cache_phy_user_req
2939 * @pi: port information structure
2940 * @cache_data: PHY logging data
2941 * @cache_mode: PHY logging mode
2943 * Log the user request on (FC, FEC, SPEED) for later use.
2946 ice_cache_phy_user_req(struct ice_port_info *pi,
2947 struct ice_phy_cache_mode_data cache_data,
2948 enum ice_phy_cache_mode cache_mode)
2953 switch (cache_mode) {
2955 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
2957 case ICE_SPEED_MODE:
2958 pi->phy.curr_user_speed_req =
2959 cache_data.data.curr_user_speed_req;
2962 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
2970 * ice_caps_to_fc_mode
2971 * @caps: PHY capabilities
2973 * Convert PHY FC capabilities to ice FC mode
2975 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
2977 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
2978 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2981 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
2982 return ICE_FC_TX_PAUSE;
2984 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2985 return ICE_FC_RX_PAUSE;
2991 * ice_caps_to_fec_mode
2992 * @caps: PHY capabilities
2993 * @fec_options: Link FEC options
2995 * Convert PHY FEC capabilities to ice FEC mode
2997 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
2999 if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
3000 return ICE_FEC_AUTO;
3002 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3003 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3004 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
3005 ICE_AQC_PHY_FEC_25G_KR_REQ))
3006 return ICE_FEC_BASER;
3008 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3009 ICE_AQC_PHY_FEC_25G_RS_544_REQ |
3010 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
3013 return ICE_FEC_NONE;
3017 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
3018 * @pi: port information structure
3019 * @cfg: PHY configuration data to set FC mode
3020 * @req_mode: FC mode to configure
3023 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3024 enum ice_fc_mode req_mode)
3026 struct ice_phy_cache_mode_data cache_data;
3027 u8 pause_mask = 0x0;
3030 return ICE_ERR_BAD_PTR;
3034 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3035 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3037 case ICE_FC_RX_PAUSE:
3038 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3040 case ICE_FC_TX_PAUSE:
3041 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3047 /* clear the old pause settings */
3048 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
3049 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
3051 /* set the new capabilities */
3052 cfg->caps |= pause_mask;
3054 /* Cache user FC request */
3055 cache_data.data.curr_user_fc_req = req_mode;
3056 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
3063 * @pi: port information structure
3064 * @aq_failures: pointer to status code, specific to ice_set_fc routine
3065 * @ena_auto_link_update: enable automatic link update
3067 * Set the requested flow control mode.
3070 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
3072 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3073 struct ice_aqc_get_phy_caps_data *pcaps;
3074 enum ice_status status;
3077 if (!pi || !aq_failures)
3078 return ICE_ERR_BAD_PTR;
3083 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
3085 return ICE_ERR_NO_MEMORY;
3087 /* Get the current PHY config */
3088 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3091 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3095 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
3097 /* Configure the set PHY data */
3098 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
3102 /* If the capabilities have changed, then set the new config */
3103 if (cfg.caps != pcaps->caps) {
3104 int retry_count, retry_max = 10;
3106 /* Auto restart link so settings take effect */
3107 if (ena_auto_link_update)
3108 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3110 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3112 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
3116 /* Update the link info
3117 * It sometimes takes a really long time for link to
3118 * come back from the atomic reset. Thus, we wait a
3121 for (retry_count = 0; retry_count < retry_max; retry_count++) {
3122 status = ice_update_link_info(pi);
3131 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
3135 devm_kfree(ice_hw_to_dev(hw), pcaps);
3140 * ice_phy_caps_equals_cfg
3141 * @phy_caps: PHY capabilities
3142 * @phy_cfg: PHY configuration
3144 * Helper function to determine if PHY capabilities matches PHY
3148 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
3149 struct ice_aqc_set_phy_cfg_data *phy_cfg)
3151 u8 caps_mask, cfg_mask;
3153 if (!phy_caps || !phy_cfg)
3156 /* These bits are not common between capabilities and configuration.
3157 * Do not use them to determine equality.
3159 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
3160 ICE_AQC_GET_PHY_EN_MOD_QUAL);
3161 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3163 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
3164 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
3165 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
3166 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
3167 phy_caps->eee_cap != phy_cfg->eee_cap ||
3168 phy_caps->eeer_value != phy_cfg->eeer_value ||
3169 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
3176 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
3177 * @pi: port information structure
3178 * @caps: PHY ability structure to copy date from
3179 * @cfg: PHY configuration structure to copy data to
3181 * Helper function to copy AQC PHY get ability data to PHY set configuration
3185 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
3186 struct ice_aqc_get_phy_caps_data *caps,
3187 struct ice_aqc_set_phy_cfg_data *cfg)
3189 if (!pi || !caps || !cfg)
3192 memset(cfg, 0, sizeof(*cfg));
3193 cfg->phy_type_low = caps->phy_type_low;
3194 cfg->phy_type_high = caps->phy_type_high;
3195 cfg->caps = caps->caps;
3196 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
3197 cfg->eee_cap = caps->eee_cap;
3198 cfg->eeer_value = caps->eeer_value;
3199 cfg->link_fec_opt = caps->link_fec_options;
3200 cfg->module_compliance_enforcement =
3201 caps->module_compliance_enforcement;
3205 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
3206 * @pi: port information structure
3207 * @cfg: PHY configuration data to set FEC mode
3208 * @fec: FEC mode to configure
3211 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3212 enum ice_fec_mode fec)
3214 struct ice_aqc_get_phy_caps_data *pcaps;
3215 enum ice_status status;
3219 return ICE_ERR_BAD_PTR;
3223 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
3225 return ICE_ERR_NO_MEMORY;
3227 status = ice_aq_get_phy_caps(pi, false,
3228 (ice_fw_supports_report_dflt_cfg(hw) ?
3229 ICE_AQC_REPORT_DFLT_CFG :
3230 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL);
3234 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
3235 cfg->link_fec_opt = pcaps->link_fec_options;
3239 /* Clear RS bits, and AND BASE-R ability
3240 * bits and OR request bits.
3242 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3243 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3244 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3245 ICE_AQC_PHY_FEC_25G_KR_REQ;
3248 /* Clear BASE-R bits, and AND RS ability
3249 * bits and OR request bits.
3251 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3252 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3253 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3256 /* Clear all FEC option bits. */
3257 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3260 /* AND auto FEC bit, and all caps bits. */
3261 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3262 cfg->link_fec_opt |= pcaps->link_fec_options;
3265 status = ICE_ERR_PARAM;
3269 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) &&
3270 !ice_fw_supports_report_dflt_cfg(hw)) {
3271 struct ice_link_default_override_tlv tlv;
3273 if (ice_get_link_default_override(&tlv, pi))
3276 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3277 (tlv.options & ICE_LINK_OVERRIDE_EN))
3278 cfg->link_fec_opt = tlv.fec_options;
3288 * ice_get_link_status - get status of the HW network link
3289 * @pi: port information structure
3290 * @link_up: pointer to bool (true/false = linkup/linkdown)
3292 * Variable link_up is true if link is up, false if link is down.
3293 * The variable link_up is invalid if status is non zero. As a
3294 * result of this call, link status reporting becomes enabled
3296 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3298 struct ice_phy_info *phy_info;
3299 enum ice_status status = 0;
3301 if (!pi || !link_up)
3302 return ICE_ERR_PARAM;
3304 phy_info = &pi->phy;
3306 if (phy_info->get_link_info) {
3307 status = ice_update_link_info(pi);
3310 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
3314 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3320 * ice_aq_set_link_restart_an
3321 * @pi: pointer to the port information structure
3322 * @ena_link: if true: enable link, if false: disable link
3323 * @cd: pointer to command details structure or NULL
3325 * Sets up the link and restarts the Auto-Negotiation over the link.
3328 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3329 struct ice_sq_cd *cd)
3331 struct ice_aqc_restart_an *cmd;
3332 struct ice_aq_desc desc;
3334 cmd = &desc.params.restart_an;
3336 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3338 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3339 cmd->lport_num = pi->lport;
3341 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3343 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3345 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3349 * ice_aq_set_event_mask
3350 * @hw: pointer to the HW struct
3351 * @port_num: port number of the physical function
3352 * @mask: event mask to be set
3353 * @cd: pointer to command details structure or NULL
3355 * Set event mask (0x0613)
3358 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3359 struct ice_sq_cd *cd)
3361 struct ice_aqc_set_event_mask *cmd;
3362 struct ice_aq_desc desc;
3364 cmd = &desc.params.set_event_mask;
3366 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3368 cmd->lport_num = port_num;
3370 cmd->event_mask = cpu_to_le16(mask);
3371 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3375 * ice_aq_set_mac_loopback
3376 * @hw: pointer to the HW struct
3377 * @ena_lpbk: Enable or Disable loopback
3378 * @cd: pointer to command details structure or NULL
3380 * Enable/disable loopback on a given port
3383 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3385 struct ice_aqc_set_mac_lb *cmd;
3386 struct ice_aq_desc desc;
3388 cmd = &desc.params.set_mac_lb;
3390 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3392 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3394 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3398 * ice_aq_set_port_id_led
3399 * @pi: pointer to the port information
3400 * @is_orig_mode: is this LED set to original mode (by the net-list)
3401 * @cd: pointer to command details structure or NULL
3403 * Set LED value for the given port (0x06e9)
3406 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3407 struct ice_sq_cd *cd)
3409 struct ice_aqc_set_port_id_led *cmd;
3410 struct ice_hw *hw = pi->hw;
3411 struct ice_aq_desc desc;
3413 cmd = &desc.params.set_port_id_led;
3415 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3418 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3420 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3422 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3427 * @hw: pointer to the HW struct
3428 * @lport: bits [7:0] = logical port, bit [8] = logical port valid
3429 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
3430 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
3432 * @set_page: set or ignore the page
3433 * @data: pointer to data buffer to be read/written to the I2C device.
3434 * @length: 1-16 for read, 1 for write.
3435 * @write: 0 read, 1 for write.
3436 * @cd: pointer to command details structure or NULL
3438 * Read/Write SFF EEPROM (0x06EE)
3441 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3442 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3443 bool write, struct ice_sq_cd *cd)
3445 struct ice_aqc_sff_eeprom *cmd;
3446 struct ice_aq_desc desc;
3447 enum ice_status status;
3449 if (!data || (mem_addr & 0xff00))
3450 return ICE_ERR_PARAM;
3452 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3453 cmd = &desc.params.read_write_sff_param;
3454 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
3455 cmd->lport_num = (u8)(lport & 0xff);
3456 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3457 cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) &
3458 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3460 ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3461 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3462 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff);
3463 cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3465 cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE);
3467 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3472 * __ice_aq_get_set_rss_lut
3473 * @hw: pointer to the hardware structure
3474 * @params: RSS LUT parameters
3475 * @set: set true to set the table, false to get the table
3477 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
3479 static enum ice_status
3480 __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
3482 u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
3483 struct ice_aqc_get_set_rss_lut *cmd_resp;
3484 struct ice_aq_desc desc;
3485 enum ice_status status;
3489 return ICE_ERR_PARAM;
3491 vsi_handle = params->vsi_handle;
3494 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3495 return ICE_ERR_PARAM;
3497 lut_size = params->lut_size;
3498 lut_type = params->lut_type;
3499 glob_lut_idx = params->global_lut_id;
3500 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3502 cmd_resp = &desc.params.get_set_rss_lut;
3505 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
3506 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3508 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
3511 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
3512 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
3513 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
3514 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
3517 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
3518 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
3519 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
3520 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
3521 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
3524 status = ICE_ERR_PARAM;
3525 goto ice_aq_get_set_rss_lut_exit;
3528 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
3529 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
3530 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
3533 goto ice_aq_get_set_rss_lut_send;
3534 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3536 goto ice_aq_get_set_rss_lut_send;
3538 goto ice_aq_get_set_rss_lut_send;
3541 /* LUT size is only valid for Global and PF table types */
3543 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
3545 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
3546 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
3547 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3548 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3550 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
3551 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3552 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
3553 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3554 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3559 status = ICE_ERR_PARAM;
3560 goto ice_aq_get_set_rss_lut_exit;
3563 ice_aq_get_set_rss_lut_send:
3564 cmd_resp->flags = cpu_to_le16(flags);
3565 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
3567 ice_aq_get_set_rss_lut_exit:
3572 * ice_aq_get_rss_lut
3573 * @hw: pointer to the hardware structure
3574 * @get_params: RSS LUT parameters used to specify which RSS LUT to get
3576 * get the RSS lookup table, PF or VSI type
3579 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
3581 return __ice_aq_get_set_rss_lut(hw, get_params, false);
3585 * ice_aq_set_rss_lut
3586 * @hw: pointer to the hardware structure
3587 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT
3589 * set the RSS lookup table, PF or VSI type
3592 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
3594 return __ice_aq_get_set_rss_lut(hw, set_params, true);
3598 * __ice_aq_get_set_rss_key
3599 * @hw: pointer to the HW struct
3600 * @vsi_id: VSI FW index
3601 * @key: pointer to key info struct
3602 * @set: set true to set the key, false to get the key
3604 * get (0x0B04) or set (0x0B02) the RSS key per VSI
3607 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
3608 struct ice_aqc_get_set_rss_keys *key,
3611 struct ice_aqc_get_set_rss_key *cmd_resp;
3612 u16 key_size = sizeof(*key);
3613 struct ice_aq_desc desc;
3615 cmd_resp = &desc.params.get_set_rss_key;
3618 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
3619 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3621 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
3624 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
3625 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
3626 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
3627 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
3629 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
3633 * ice_aq_get_rss_key
3634 * @hw: pointer to the HW struct
3635 * @vsi_handle: software VSI handle
3636 * @key: pointer to key info struct
3638 * get the RSS key per VSI
3641 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
3642 struct ice_aqc_get_set_rss_keys *key)
3644 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
3645 return ICE_ERR_PARAM;
3647 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3652 * ice_aq_set_rss_key
3653 * @hw: pointer to the HW struct
3654 * @vsi_handle: software VSI handle
3655 * @keys: pointer to key info struct
3657 * set the RSS key per VSI
3660 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
3661 struct ice_aqc_get_set_rss_keys *keys)
3663 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
3664 return ICE_ERR_PARAM;
3666 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3671 * ice_aq_add_lan_txq
3672 * @hw: pointer to the hardware structure
3673 * @num_qgrps: Number of added queue groups
3674 * @qg_list: list of queue groups to be added
3675 * @buf_size: size of buffer for indirect command
3676 * @cd: pointer to command details structure or NULL
3678 * Add Tx LAN queue (0x0C30)
3681 * Prior to calling add Tx LAN queue:
3682 * Initialize the following as part of the Tx queue context:
3683 * Completion queue ID if the queue uses Completion queue, Quanta profile,
3684 * Cache profile and Packet shaper profile.
3686 * After add Tx LAN queue AQ command is completed:
3687 * Interrupts should be associated with specific queues,
3688 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
3691 static enum ice_status
3692 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3693 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
3694 struct ice_sq_cd *cd)
3696 struct ice_aqc_add_tx_qgrp *list;
3697 struct ice_aqc_add_txqs *cmd;
3698 struct ice_aq_desc desc;
3699 u16 i, sum_size = 0;
3701 cmd = &desc.params.add_txqs;
3703 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
3706 return ICE_ERR_PARAM;
3708 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3709 return ICE_ERR_PARAM;
3711 for (i = 0, list = qg_list; i < num_qgrps; i++) {
3712 sum_size += struct_size(list, txqs, list->num_txqs);
3713 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
3717 if (buf_size != sum_size)
3718 return ICE_ERR_PARAM;
3720 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3722 cmd->num_qgrps = num_qgrps;
3724 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3728 * ice_aq_dis_lan_txq
3729 * @hw: pointer to the hardware structure
3730 * @num_qgrps: number of groups in the list
3731 * @qg_list: the list of groups to disable
3732 * @buf_size: the total size of the qg_list buffer in bytes
3733 * @rst_src: if called due to reset, specifies the reset source
3734 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3735 * @cd: pointer to command details structure or NULL
3737 * Disable LAN Tx queue (0x0C31)
3739 static enum ice_status
3740 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3741 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
3742 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3743 struct ice_sq_cd *cd)
3745 struct ice_aqc_dis_txq_item *item;
3746 struct ice_aqc_dis_txqs *cmd;
3747 struct ice_aq_desc desc;
3748 enum ice_status status;
3751 cmd = &desc.params.dis_txqs;
3752 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
3754 /* qg_list can be NULL only in VM/VF reset flow */
3755 if (!qg_list && !rst_src)
3756 return ICE_ERR_PARAM;
3758 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3759 return ICE_ERR_PARAM;
3761 cmd->num_entries = num_qgrps;
3763 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
3764 ICE_AQC_Q_DIS_TIMEOUT_M);
3768 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
3769 cmd->vmvf_and_timeout |=
3770 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
3773 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
3774 /* In this case, FW expects vmvf_num to be absolute VF ID */
3775 cmd->vmvf_and_timeout |=
3776 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
3777 ICE_AQC_Q_DIS_VMVF_NUM_M);
3784 /* flush pipe on time out */
3785 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
3786 /* If no queue group info, we are in a reset flow. Issue the AQ */
3790 /* set RD bit to indicate that command buffer is provided by the driver
3791 * and it needs to be read by the firmware
3793 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3795 for (i = 0, item = qg_list; i < num_qgrps; i++) {
3796 u16 item_size = struct_size(item, q_id, item->num_qs);
3798 /* If the num of queues is even, add 2 bytes of padding */
3799 if ((item->num_qs % 2) == 0)
3804 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
3808 return ICE_ERR_PARAM;
3811 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3814 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
3815 vmvf_num, hw->adminq.sq_last_status);
3817 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
3818 le16_to_cpu(qg_list[0].q_id[0]),
3819 hw->adminq.sq_last_status);
3825 * ice_aq_add_rdma_qsets
3826 * @hw: pointer to the hardware structure
3827 * @num_qset_grps: Number of RDMA Qset groups
3828 * @qset_list: list of Qset groups to be added
3829 * @buf_size: size of buffer for indirect command
3830 * @cd: pointer to command details structure or NULL
3832 * Add Tx RDMA Qsets (0x0C33)
3835 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
3836 struct ice_aqc_add_rdma_qset_data *qset_list,
3837 u16 buf_size, struct ice_sq_cd *cd)
3839 struct ice_aqc_add_rdma_qset_data *list;
3840 struct ice_aqc_add_rdma_qset *cmd;
3841 struct ice_aq_desc desc;
3842 u16 i, sum_size = 0;
3844 cmd = &desc.params.add_rdma_qset;
3846 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset);
3848 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS)
3851 for (i = 0, list = qset_list; i < num_qset_grps; i++) {
3852 u16 num_qsets = le16_to_cpu(list->num_qsets);
3854 sum_size += struct_size(list, rdma_qsets, num_qsets);
3855 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets +
3859 if (buf_size != sum_size)
3862 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3864 cmd->num_qset_grps = num_qset_grps;
3866 return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, qset_list,
3870 /* End of FW Admin Queue command wrappers */
3873 * ice_write_byte - write a byte to a packed context structure
3874 * @src_ctx: the context structure to read from
3875 * @dest_ctx: the context to be written to
3876 * @ce_info: a description of the struct to be filled
3879 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3881 u8 src_byte, dest_byte, mask;
3885 /* copy from the next struct field */
3886 from = src_ctx + ce_info->offset;
3888 /* prepare the bits and mask */
3889 shift_width = ce_info->lsb % 8;
3890 mask = (u8)(BIT(ce_info->width) - 1);
3895 /* shift to correct alignment */
3896 mask <<= shift_width;
3897 src_byte <<= shift_width;
3899 /* get the current bits from the target bit string */
3900 dest = dest_ctx + (ce_info->lsb / 8);
3902 memcpy(&dest_byte, dest, sizeof(dest_byte));
3904 dest_byte &= ~mask; /* get the bits not changing */
3905 dest_byte |= src_byte; /* add in the new bits */
3907 /* put it all back */
3908 memcpy(dest, &dest_byte, sizeof(dest_byte));
3912 * ice_write_word - write a word to a packed context structure
3913 * @src_ctx: the context structure to read from
3914 * @dest_ctx: the context to be written to
3915 * @ce_info: a description of the struct to be filled
3918 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3925 /* copy from the next struct field */
3926 from = src_ctx + ce_info->offset;
3928 /* prepare the bits and mask */
3929 shift_width = ce_info->lsb % 8;
3930 mask = BIT(ce_info->width) - 1;
3932 /* don't swizzle the bits until after the mask because the mask bits
3933 * will be in a different bit position on big endian machines
3935 src_word = *(u16 *)from;
3938 /* shift to correct alignment */
3939 mask <<= shift_width;
3940 src_word <<= shift_width;
3942 /* get the current bits from the target bit string */
3943 dest = dest_ctx + (ce_info->lsb / 8);
3945 memcpy(&dest_word, dest, sizeof(dest_word));
3947 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */
3948 dest_word |= cpu_to_le16(src_word); /* add in the new bits */
3950 /* put it all back */
3951 memcpy(dest, &dest_word, sizeof(dest_word));
3955 * ice_write_dword - write a dword to a packed context structure
3956 * @src_ctx: the context structure to read from
3957 * @dest_ctx: the context to be written to
3958 * @ce_info: a description of the struct to be filled
3961 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3963 u32 src_dword, mask;
3968 /* copy from the next struct field */
3969 from = src_ctx + ce_info->offset;
3971 /* prepare the bits and mask */
3972 shift_width = ce_info->lsb % 8;
3974 /* if the field width is exactly 32 on an x86 machine, then the shift
3975 * operation will not work because the SHL instructions count is masked
3976 * to 5 bits so the shift will do nothing
3978 if (ce_info->width < 32)
3979 mask = BIT(ce_info->width) - 1;
3983 /* don't swizzle the bits until after the mask because the mask bits
3984 * will be in a different bit position on big endian machines
3986 src_dword = *(u32 *)from;
3989 /* shift to correct alignment */
3990 mask <<= shift_width;
3991 src_dword <<= shift_width;
3993 /* get the current bits from the target bit string */
3994 dest = dest_ctx + (ce_info->lsb / 8);
3996 memcpy(&dest_dword, dest, sizeof(dest_dword));
3998 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */
3999 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */
4001 /* put it all back */
4002 memcpy(dest, &dest_dword, sizeof(dest_dword));
4006 * ice_write_qword - write a qword to a packed context structure
4007 * @src_ctx: the context structure to read from
4008 * @dest_ctx: the context to be written to
4009 * @ce_info: a description of the struct to be filled
4012 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4014 u64 src_qword, mask;
4019 /* copy from the next struct field */
4020 from = src_ctx + ce_info->offset;
4022 /* prepare the bits and mask */
4023 shift_width = ce_info->lsb % 8;
4025 /* if the field width is exactly 64 on an x86 machine, then the shift
4026 * operation will not work because the SHL instructions count is masked
4027 * to 6 bits so the shift will do nothing
4029 if (ce_info->width < 64)
4030 mask = BIT_ULL(ce_info->width) - 1;
4034 /* don't swizzle the bits until after the mask because the mask bits
4035 * will be in a different bit position on big endian machines
4037 src_qword = *(u64 *)from;
4040 /* shift to correct alignment */
4041 mask <<= shift_width;
4042 src_qword <<= shift_width;
4044 /* get the current bits from the target bit string */
4045 dest = dest_ctx + (ce_info->lsb / 8);
4047 memcpy(&dest_qword, dest, sizeof(dest_qword));
4049 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */
4050 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */
4052 /* put it all back */
4053 memcpy(dest, &dest_qword, sizeof(dest_qword));
4057 * ice_set_ctx - set context bits in packed structure
4058 * @hw: pointer to the hardware structure
4059 * @src_ctx: pointer to a generic non-packed context structure
4060 * @dest_ctx: pointer to memory for the packed structure
4061 * @ce_info: a description of the structure to be transformed
4064 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
4065 const struct ice_ctx_ele *ce_info)
4069 for (f = 0; ce_info[f].width; f++) {
4070 /* We have to deal with each element of the FW response
4071 * using the correct size so that we are correct regardless
4072 * of the endianness of the machine.
4074 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
4075 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
4076 f, ce_info[f].width, ce_info[f].size_of);
4079 switch (ce_info[f].size_of) {
4081 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
4084 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
4087 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
4090 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
4093 return ICE_ERR_INVAL_SIZE;
4101 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
4102 * @hw: pointer to the HW struct
4103 * @vsi_handle: software VSI handle
4105 * @q_handle: software queue handle
4108 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
4110 struct ice_vsi_ctx *vsi;
4111 struct ice_q_ctx *q_ctx;
4113 vsi = ice_get_vsi_ctx(hw, vsi_handle);
4116 if (q_handle >= vsi->num_lan_q_entries[tc])
4118 if (!vsi->lan_q_ctx[tc])
4120 q_ctx = vsi->lan_q_ctx[tc];
4121 return &q_ctx[q_handle];
4126 * @pi: port information structure
4127 * @vsi_handle: software VSI handle
4129 * @q_handle: software queue handle
4130 * @num_qgrps: Number of added queue groups
4131 * @buf: list of queue groups to be added
4132 * @buf_size: size of buffer for indirect command
4133 * @cd: pointer to command details structure or NULL
4135 * This function adds one LAN queue
4138 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
4139 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
4140 struct ice_sq_cd *cd)
4142 struct ice_aqc_txsched_elem_data node = { 0 };
4143 struct ice_sched_node *parent;
4144 struct ice_q_ctx *q_ctx;
4145 enum ice_status status;
4148 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4151 if (num_qgrps > 1 || buf->num_txqs > 1)
4152 return ICE_ERR_MAX_LIMIT;
4156 if (!ice_is_vsi_valid(hw, vsi_handle))
4157 return ICE_ERR_PARAM;
4159 mutex_lock(&pi->sched_lock);
4161 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
4163 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
4165 status = ICE_ERR_PARAM;
4169 /* find a parent node */
4170 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4171 ICE_SCHED_NODE_OWNER_LAN);
4173 status = ICE_ERR_PARAM;
4177 buf->parent_teid = parent->info.node_teid;
4178 node.parent_teid = parent->info.node_teid;
4179 /* Mark that the values in the "generic" section as valid. The default
4180 * value in the "generic" section is zero. This means that :
4181 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
4182 * - 0 priority among siblings, indicated by Bit 1-3.
4183 * - WFQ, indicated by Bit 4.
4184 * - 0 Adjustment value is used in PSM credit update flow, indicated by
4186 * - Bit 7 is reserved.
4187 * Without setting the generic section as valid in valid_sections, the
4188 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
4190 buf->txqs[0].info.valid_sections =
4191 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4192 ICE_AQC_ELEM_VALID_EIR;
4193 buf->txqs[0].info.generic = 0;
4194 buf->txqs[0].info.cir_bw.bw_profile_idx =
4195 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4196 buf->txqs[0].info.cir_bw.bw_alloc =
4197 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4198 buf->txqs[0].info.eir_bw.bw_profile_idx =
4199 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4200 buf->txqs[0].info.eir_bw.bw_alloc =
4201 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4203 /* add the LAN queue */
4204 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
4206 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
4207 le16_to_cpu(buf->txqs[0].txq_id),
4208 hw->adminq.sq_last_status);
4212 node.node_teid = buf->txqs[0].q_teid;
4213 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4214 q_ctx->q_handle = q_handle;
4215 q_ctx->q_teid = le32_to_cpu(node.node_teid);
4217 /* add a leaf node into scheduler tree queue layer */
4218 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
4220 status = ice_sched_replay_q_bw(pi, q_ctx);
4223 mutex_unlock(&pi->sched_lock);
4229 * @pi: port information structure
4230 * @vsi_handle: software VSI handle
4232 * @num_queues: number of queues
4233 * @q_handles: pointer to software queue handle array
4234 * @q_ids: pointer to the q_id array
4235 * @q_teids: pointer to queue node teids
4236 * @rst_src: if called due to reset, specifies the reset source
4237 * @vmvf_num: the relative VM or VF number that is undergoing the reset
4238 * @cd: pointer to command details structure or NULL
4240 * This function removes queues and their corresponding nodes in SW DB
4243 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
4244 u16 *q_handles, u16 *q_ids, u32 *q_teids,
4245 enum ice_disq_rst_src rst_src, u16 vmvf_num,
4246 struct ice_sq_cd *cd)
4248 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
4249 struct ice_aqc_dis_txq_item *qg_list;
4250 struct ice_q_ctx *q_ctx;
4254 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4260 /* if queue is disabled already yet the disable queue command
4261 * has to be sent to complete the VF reset, then call
4262 * ice_aq_dis_lan_txq without any queue information
4265 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
4270 buf_size = struct_size(qg_list, q_id, 1);
4271 qg_list = kzalloc(buf_size, GFP_KERNEL);
4273 return ICE_ERR_NO_MEMORY;
4275 mutex_lock(&pi->sched_lock);
4277 for (i = 0; i < num_queues; i++) {
4278 struct ice_sched_node *node;
4280 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
4283 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
4285 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
4289 if (q_ctx->q_handle != q_handles[i]) {
4290 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
4291 q_ctx->q_handle, q_handles[i]);
4294 qg_list->parent_teid = node->info.parent_teid;
4295 qg_list->num_qs = 1;
4296 qg_list->q_id[0] = cpu_to_le16(q_ids[i]);
4297 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
4302 ice_free_sched_node(pi, node);
4303 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
4305 mutex_unlock(&pi->sched_lock);
4311 * ice_cfg_vsi_qs - configure the new/existing VSI queues
4312 * @pi: port information structure
4313 * @vsi_handle: software VSI handle
4314 * @tc_bitmap: TC bitmap
4315 * @maxqs: max queues array per TC
4316 * @owner: LAN or RDMA
4318 * This function adds/updates the VSI queues per TC.
4320 static enum ice_status
4321 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4322 u16 *maxqs, u8 owner)
4324 enum ice_status status = 0;
4327 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4330 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4331 return ICE_ERR_PARAM;
4333 mutex_lock(&pi->sched_lock);
4335 ice_for_each_traffic_class(i) {
4336 /* configuration is possible only if TC node is present */
4337 if (!ice_sched_get_tc_node(pi, i))
4340 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
4341 ice_is_tc_ena(tc_bitmap, i));
4346 mutex_unlock(&pi->sched_lock);
4351 * ice_cfg_vsi_lan - configure VSI LAN queues
4352 * @pi: port information structure
4353 * @vsi_handle: software VSI handle
4354 * @tc_bitmap: TC bitmap
4355 * @max_lanqs: max LAN queues array per TC
4357 * This function adds/updates the VSI LAN queues per TC.
4360 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4363 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
4364 ICE_SCHED_NODE_OWNER_LAN);
4368 * ice_cfg_vsi_rdma - configure the VSI RDMA queues
4369 * @pi: port information structure
4370 * @vsi_handle: software VSI handle
4371 * @tc_bitmap: TC bitmap
4372 * @max_rdmaqs: max RDMA queues array per TC
4374 * This function adds/updates the VSI RDMA queues per TC.
4377 ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4380 return ice_status_to_errno(ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap,
4382 ICE_SCHED_NODE_OWNER_RDMA));
4386 * ice_ena_vsi_rdma_qset
4387 * @pi: port information structure
4388 * @vsi_handle: software VSI handle
4390 * @rdma_qset: pointer to RDMA Qset
4391 * @num_qsets: number of RDMA Qsets
4392 * @qset_teid: pointer to Qset node TEIDs
4394 * This function adds RDMA Qset
4397 ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
4398 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid)
4400 struct ice_aqc_txsched_elem_data node = { 0 };
4401 struct ice_aqc_add_rdma_qset_data *buf;
4402 struct ice_sched_node *parent;
4403 enum ice_status status;
4408 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4412 if (!ice_is_vsi_valid(hw, vsi_handle))
4415 buf_size = struct_size(buf, rdma_qsets, num_qsets);
4416 buf = kzalloc(buf_size, GFP_KERNEL);
4419 mutex_lock(&pi->sched_lock);
4421 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4422 ICE_SCHED_NODE_OWNER_RDMA);
4425 goto rdma_error_exit;
4427 buf->parent_teid = parent->info.node_teid;
4428 node.parent_teid = parent->info.node_teid;
4430 buf->num_qsets = cpu_to_le16(num_qsets);
4431 for (i = 0; i < num_qsets; i++) {
4432 buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]);
4433 buf->rdma_qsets[i].info.valid_sections =
4434 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4435 ICE_AQC_ELEM_VALID_EIR;
4436 buf->rdma_qsets[i].info.generic = 0;
4437 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx =
4438 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4439 buf->rdma_qsets[i].info.cir_bw.bw_alloc =
4440 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4441 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx =
4442 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4443 buf->rdma_qsets[i].info.eir_bw.bw_alloc =
4444 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4446 ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL);
4448 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n");
4449 goto rdma_error_exit;
4451 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4452 for (i = 0; i < num_qsets; i++) {
4453 node.node_teid = buf->rdma_qsets[i].qset_teid;
4454 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
4457 ret = ice_status_to_errno(status);
4460 qset_teid[i] = le32_to_cpu(node.node_teid);
4463 mutex_unlock(&pi->sched_lock);
4469 * ice_dis_vsi_rdma_qset - free RDMA resources
4470 * @pi: port_info struct
4471 * @count: number of RDMA Qsets to free
4472 * @qset_teid: TEID of Qset node
4473 * @q_id: list of queue IDs being disabled
4476 ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
4479 struct ice_aqc_dis_txq_item *qg_list;
4480 enum ice_status status = 0;
4485 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4490 qg_size = struct_size(qg_list, q_id, 1);
4491 qg_list = kzalloc(qg_size, GFP_KERNEL);
4495 mutex_lock(&pi->sched_lock);
4497 for (i = 0; i < count; i++) {
4498 struct ice_sched_node *node;
4500 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]);
4504 qg_list->parent_teid = node->info.parent_teid;
4505 qg_list->num_qs = 1;
4507 cpu_to_le16(q_id[i] |
4508 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET);
4510 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size,
4511 ICE_NO_RESET, 0, NULL);
4515 ice_free_sched_node(pi, node);
4518 mutex_unlock(&pi->sched_lock);
4520 return ice_status_to_errno(status);
4524 * ice_replay_pre_init - replay pre initialization
4525 * @hw: pointer to the HW struct
4527 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
4529 static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
4531 struct ice_switch_info *sw = hw->switch_info;
4534 /* Delete old entries from replay filter list head if there is any */
4535 ice_rm_all_sw_replay_rule_info(hw);
4536 /* In start of replay, move entries into replay_rules list, it
4537 * will allow adding rules entries back to filt_rules list,
4538 * which is operational list.
4540 for (i = 0; i < ICE_SW_LKUP_LAST; i++)
4541 list_replace_init(&sw->recp_list[i].filt_rules,
4542 &sw->recp_list[i].filt_replay_rules);
4543 ice_sched_replay_agg_vsi_preinit(hw);
4549 * ice_replay_vsi - replay VSI configuration
4550 * @hw: pointer to the HW struct
4551 * @vsi_handle: driver VSI handle
4553 * Restore all VSI configuration after reset. It is required to call this
4554 * function with main VSI first.
4556 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
4558 enum ice_status status;
4560 if (!ice_is_vsi_valid(hw, vsi_handle))
4561 return ICE_ERR_PARAM;
4563 /* Replay pre-initialization if there is any */
4564 if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
4565 status = ice_replay_pre_init(hw);
4569 /* Replay per VSI all RSS configurations */
4570 status = ice_replay_rss_cfg(hw, vsi_handle);
4573 /* Replay per VSI all filters */
4574 status = ice_replay_vsi_all_fltr(hw, vsi_handle);
4576 status = ice_replay_vsi_agg(hw, vsi_handle);
4581 * ice_replay_post - post replay configuration cleanup
4582 * @hw: pointer to the HW struct
4584 * Post replay cleanup.
4586 void ice_replay_post(struct ice_hw *hw)
4588 /* Delete old entries from replay filter list head */
4589 ice_rm_all_sw_replay_rule_info(hw);
4590 ice_sched_replay_agg(hw);
4594 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
4595 * @hw: ptr to the hardware info
4596 * @reg: offset of 64 bit HW register to read from
4597 * @prev_stat_loaded: bool to specify if previous stats are loaded
4598 * @prev_stat: ptr to previous loaded stat value
4599 * @cur_stat: ptr to current stat value
4602 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4603 u64 *prev_stat, u64 *cur_stat)
4605 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
4607 /* device stats are not reset at PFR, they likely will not be zeroed
4608 * when the driver starts. Thus, save the value from the first read
4609 * without adding to the statistic value so that we report stats which
4610 * count up from zero.
4612 if (!prev_stat_loaded) {
4613 *prev_stat = new_data;
4617 /* Calculate the difference between the new and old values, and then
4618 * add it to the software stat value.
4620 if (new_data >= *prev_stat)
4621 *cur_stat += new_data - *prev_stat;
4623 /* to manage the potential roll-over */
4624 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
4626 /* Update the previously stored value to prepare for next read */
4627 *prev_stat = new_data;
4631 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
4632 * @hw: ptr to the hardware info
4633 * @reg: offset of HW register to read from
4634 * @prev_stat_loaded: bool to specify if previous stats are loaded
4635 * @prev_stat: ptr to previous loaded stat value
4636 * @cur_stat: ptr to current stat value
4639 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4640 u64 *prev_stat, u64 *cur_stat)
4644 new_data = rd32(hw, reg);
4646 /* device stats are not reset at PFR, they likely will not be zeroed
4647 * when the driver starts. Thus, save the value from the first read
4648 * without adding to the statistic value so that we report stats which
4649 * count up from zero.
4651 if (!prev_stat_loaded) {
4652 *prev_stat = new_data;
4656 /* Calculate the difference between the new and old values, and then
4657 * add it to the software stat value.
4659 if (new_data >= *prev_stat)
4660 *cur_stat += new_data - *prev_stat;
4662 /* to manage the potential roll-over */
4663 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
4665 /* Update the previously stored value to prepare for next read */
4666 *prev_stat = new_data;
4670 * ice_sched_query_elem - query element information from HW
4671 * @hw: pointer to the HW struct
4672 * @node_teid: node TEID to be queried
4673 * @buf: buffer to element information
4675 * This function queries HW element information
4678 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
4679 struct ice_aqc_txsched_elem_data *buf)
4681 u16 buf_size, num_elem_ret = 0;
4682 enum ice_status status;
4684 buf_size = sizeof(*buf);
4685 memset(buf, 0, buf_size);
4686 buf->node_teid = cpu_to_le32(node_teid);
4687 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
4689 if (status || num_elem_ret != 1)
4690 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
4695 * ice_aq_set_driver_param - Set driver parameter to share via firmware
4696 * @hw: pointer to the HW struct
4697 * @idx: parameter index to set
4698 * @value: the value to set the parameter to
4699 * @cd: pointer to command details structure or NULL
4701 * Set the value of one of the software defined parameters. All PFs connected
4702 * to this device can read the value using ice_aq_get_driver_param.
4704 * Note that firmware provides no synchronization or locking, and will not
4705 * save the parameter value during a device reset. It is expected that
4706 * a single PF will write the parameter value, while all other PFs will only
4710 ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
4711 u32 value, struct ice_sq_cd *cd)
4713 struct ice_aqc_driver_shared_params *cmd;
4714 struct ice_aq_desc desc;
4716 if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
4719 cmd = &desc.params.drv_shared_params;
4721 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
4723 cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_SET;
4724 cmd->param_indx = idx;
4725 cmd->param_val = cpu_to_le32(value);
4727 return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd));
4731 * ice_aq_get_driver_param - Get driver parameter shared via firmware
4732 * @hw: pointer to the HW struct
4733 * @idx: parameter index to set
4734 * @value: storage to return the shared parameter
4735 * @cd: pointer to command details structure or NULL
4737 * Get the value of one of the software defined parameters.
4739 * Note that firmware provides no synchronization or locking. It is expected
4740 * that only a single PF will write a given parameter.
4743 ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
4744 u32 *value, struct ice_sq_cd *cd)
4746 struct ice_aqc_driver_shared_params *cmd;
4747 struct ice_aq_desc desc;
4748 enum ice_status status;
4750 if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
4753 cmd = &desc.params.drv_shared_params;
4755 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
4757 cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_GET;
4758 cmd->param_indx = idx;
4760 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4762 return ice_status_to_errno(status);
4764 *value = le32_to_cpu(cmd->param_val);
4770 * ice_fw_supports_link_override
4771 * @hw: pointer to the hardware structure
4773 * Checks if the firmware supports link override
4775 bool ice_fw_supports_link_override(struct ice_hw *hw)
4777 if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
4778 if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
4780 if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
4781 hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
4783 } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
4791 * ice_get_link_default_override
4792 * @ldo: pointer to the link default override struct
4793 * @pi: pointer to the port info struct
4795 * Gets the link default override for a port
4798 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
4799 struct ice_port_info *pi)
4801 u16 i, tlv, tlv_len, tlv_start, buf, offset;
4802 struct ice_hw *hw = pi->hw;
4803 enum ice_status status;
4805 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
4806 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
4808 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
4812 /* Each port has its own config; calculate for our port */
4813 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
4814 ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
4816 /* link options first */
4817 status = ice_read_sr_word(hw, tlv_start, &buf);
4819 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
4822 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
4823 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
4824 ICE_LINK_OVERRIDE_PHY_CFG_S;
4826 /* link PHY config */
4827 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
4828 status = ice_read_sr_word(hw, offset, &buf);
4830 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
4833 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
4836 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
4837 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4838 status = ice_read_sr_word(hw, (offset + i), &buf);
4840 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
4843 /* shift 16 bits at a time to fill 64 bits */
4844 ldo->phy_type_low |= ((u64)buf << (i * 16));
4847 /* PHY types high */
4848 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
4849 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
4850 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4851 status = ice_read_sr_word(hw, (offset + i), &buf);
4853 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
4856 /* shift 16 bits at a time to fill 64 bits */
4857 ldo->phy_type_high |= ((u64)buf << (i * 16));
4864 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
4865 * @caps: get PHY capability data
4867 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
4869 if (caps->caps & ICE_AQC_PHY_AN_MODE ||
4870 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
4871 ICE_AQC_PHY_AN_EN_CLAUSE73 |
4872 ICE_AQC_PHY_AN_EN_CLAUSE37))
4879 * ice_aq_set_lldp_mib - Set the LLDP MIB
4880 * @hw: pointer to the HW struct
4881 * @mib_type: Local, Remote or both Local and Remote MIBs
4882 * @buf: pointer to the caller-supplied buffer to store the MIB block
4883 * @buf_size: size of the buffer (in bytes)
4884 * @cd: pointer to command details structure or NULL
4886 * Set the LLDP MIB. (0x0A08)
4889 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
4890 struct ice_sq_cd *cd)
4892 struct ice_aqc_lldp_set_local_mib *cmd;
4893 struct ice_aq_desc desc;
4895 cmd = &desc.params.lldp_set_mib;
4897 if (buf_size == 0 || !buf)
4898 return ICE_ERR_PARAM;
4900 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
4902 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
4903 desc.datalen = cpu_to_le16(buf_size);
4905 cmd->type = mib_type;
4906 cmd->length = cpu_to_le16(buf_size);
4908 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4912 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl
4913 * @hw: pointer to HW struct
4915 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
4917 if (hw->mac_type != ICE_MAC_E810)
4920 if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
4921 if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
4923 if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
4924 hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
4926 } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
4933 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
4934 * @hw: pointer to HW struct
4935 * @vsi_num: absolute HW index for VSI
4936 * @add: boolean for if adding or removing a filter
4939 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
4941 struct ice_aqc_lldp_filter_ctrl *cmd;
4942 struct ice_aq_desc desc;
4944 cmd = &desc.params.lldp_filter_ctrl;
4946 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
4949 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
4951 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
4953 cmd->vsi_num = cpu_to_le16(vsi_num);
4955 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
4959 * ice_fw_supports_report_dflt_cfg
4960 * @hw: pointer to the hardware structure
4962 * Checks if the firmware supports report default configuration
4964 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
4966 if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
4967 if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN)
4969 if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN &&
4970 hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH)
4972 } else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) {