1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2022, Intel Corporation. */
4 #include "ice_virtchnl.h"
5 #include "ice_vf_lib_private.h"
10 #include "ice_virtchnl_allowlist.h"
11 #include "ice_vf_vsi_vlan_ops.h"
13 #include "ice_flex_pipe.h"
14 #include "ice_dcb_lib.h"
16 #define FIELD_SELECTOR(proto_hdr_field) \
17 BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
19 struct ice_vc_hdr_match_type {
20 u32 vc_hdr; /* virtchnl headers (VIRTCHNL_PROTO_HDR_XXX) */
21 u32 ice_hdr; /* ice headers (ICE_FLOW_SEG_HDR_XXX) */
24 static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = {
25 {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE},
26 {VIRTCHNL_PROTO_HDR_ETH, ICE_FLOW_SEG_HDR_ETH},
27 {VIRTCHNL_PROTO_HDR_S_VLAN, ICE_FLOW_SEG_HDR_VLAN},
28 {VIRTCHNL_PROTO_HDR_C_VLAN, ICE_FLOW_SEG_HDR_VLAN},
29 {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 |
30 ICE_FLOW_SEG_HDR_IPV_OTHER},
31 {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 |
32 ICE_FLOW_SEG_HDR_IPV_OTHER},
33 {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP},
34 {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP},
35 {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP},
36 {VIRTCHNL_PROTO_HDR_PPPOE, ICE_FLOW_SEG_HDR_PPPOE},
37 {VIRTCHNL_PROTO_HDR_GTPU_IP, ICE_FLOW_SEG_HDR_GTPU_IP},
38 {VIRTCHNL_PROTO_HDR_GTPU_EH, ICE_FLOW_SEG_HDR_GTPU_EH},
39 {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
40 ICE_FLOW_SEG_HDR_GTPU_DWN},
41 {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
42 ICE_FLOW_SEG_HDR_GTPU_UP},
43 {VIRTCHNL_PROTO_HDR_L2TPV3, ICE_FLOW_SEG_HDR_L2TPV3},
44 {VIRTCHNL_PROTO_HDR_ESP, ICE_FLOW_SEG_HDR_ESP},
45 {VIRTCHNL_PROTO_HDR_AH, ICE_FLOW_SEG_HDR_AH},
46 {VIRTCHNL_PROTO_HDR_PFCP, ICE_FLOW_SEG_HDR_PFCP_SESSION},
49 struct ice_vc_hash_field_match_type {
50 u32 vc_hdr; /* virtchnl headers
51 * (VIRTCHNL_PROTO_HDR_XXX)
53 u32 vc_hash_field; /* virtchnl hash fields selector
54 * FIELD_SELECTOR((VIRTCHNL_PROTO_HDR_ETH_XXX))
56 u64 ice_hash_field; /* ice hash fields
57 * (BIT_ULL(ICE_FLOW_FIELD_IDX_XXX))
62 ice_vc_hash_field_match_type ice_vc_hash_field_list[] = {
63 {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC),
64 BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)},
65 {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
66 BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA)},
67 {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) |
68 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
70 {VIRTCHNL_PROTO_HDR_ETH,
71 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE),
72 BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE)},
73 {VIRTCHNL_PROTO_HDR_S_VLAN,
74 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID),
75 BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN)},
76 {VIRTCHNL_PROTO_HDR_C_VLAN,
77 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID),
78 BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN)},
79 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
80 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
81 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
82 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
83 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
84 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
86 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
87 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
88 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
89 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
90 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
91 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
92 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
93 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
94 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
95 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
96 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
97 ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
98 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
99 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
100 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
101 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
102 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
103 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
104 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
105 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
107 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
108 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
109 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
110 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
111 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
112 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
113 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
114 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
115 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
116 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
117 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
118 ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
119 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
120 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
121 {VIRTCHNL_PROTO_HDR_TCP,
122 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
123 BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
124 {VIRTCHNL_PROTO_HDR_TCP,
125 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
126 BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
127 {VIRTCHNL_PROTO_HDR_TCP,
128 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
129 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
130 ICE_FLOW_HASH_TCP_PORT},
131 {VIRTCHNL_PROTO_HDR_UDP,
132 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
133 BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
134 {VIRTCHNL_PROTO_HDR_UDP,
135 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
136 BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
137 {VIRTCHNL_PROTO_HDR_UDP,
138 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
139 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
140 ICE_FLOW_HASH_UDP_PORT},
141 {VIRTCHNL_PROTO_HDR_SCTP,
142 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
143 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
144 {VIRTCHNL_PROTO_HDR_SCTP,
145 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
146 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
147 {VIRTCHNL_PROTO_HDR_SCTP,
148 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
149 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
150 ICE_FLOW_HASH_SCTP_PORT},
151 {VIRTCHNL_PROTO_HDR_PPPOE,
152 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID),
153 BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)},
154 {VIRTCHNL_PROTO_HDR_GTPU_IP,
155 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID),
156 BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)},
157 {VIRTCHNL_PROTO_HDR_L2TPV3,
158 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID),
159 BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)},
160 {VIRTCHNL_PROTO_HDR_ESP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI),
161 BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)},
162 {VIRTCHNL_PROTO_HDR_AH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI),
163 BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)},
164 {VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID),
165 BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)},
169 * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
170 * @pf: pointer to the PF structure
171 * @v_opcode: operation code
172 * @v_retval: return value
173 * @msg: pointer to the msg buffer
174 * @msglen: msg length
177 ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
178 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
180 struct ice_hw *hw = &pf->hw;
184 mutex_lock(&pf->vfs.table_lock);
185 ice_for_each_vf(pf, bkt, vf) {
186 /* Not all vfs are enabled so skip the ones that are not */
187 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
188 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
191 /* Ignore return value on purpose - a given VF may fail, but
192 * we need to keep going and send to all of them
194 ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
197 mutex_unlock(&pf->vfs.table_lock);
201 * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
202 * @vf: pointer to the VF structure
203 * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
204 * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
205 * @link_up: whether or not to set the link up/down
208 ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
209 int ice_link_speed, bool link_up)
211 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
212 pfe->event_data.link_event_adv.link_status = link_up;
214 pfe->event_data.link_event_adv.link_speed =
215 ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
217 pfe->event_data.link_event.link_status = link_up;
218 /* Legacy method for virtchnl link speeds */
219 pfe->event_data.link_event.link_speed =
220 (enum virtchnl_link_speed)
221 ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
226 * ice_vc_notify_vf_link_state - Inform a VF of link status
227 * @vf: pointer to the VF structure
229 * send a link status message to a single VF
231 void ice_vc_notify_vf_link_state(struct ice_vf *vf)
233 struct virtchnl_pf_event pfe = { 0 };
234 struct ice_hw *hw = &vf->pf->hw;
236 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
237 pfe.severity = PF_EVENT_SEVERITY_INFO;
239 if (ice_is_vf_link_up(vf))
240 ice_set_pfe_link(vf, &pfe,
241 hw->port_info->phy.link_info.link_speed, true);
243 ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
245 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
246 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
251 * ice_vc_notify_link_state - Inform all VFs on a PF of link status
252 * @pf: pointer to the PF structure
254 void ice_vc_notify_link_state(struct ice_pf *pf)
259 mutex_lock(&pf->vfs.table_lock);
260 ice_for_each_vf(pf, bkt, vf)
261 ice_vc_notify_vf_link_state(vf);
262 mutex_unlock(&pf->vfs.table_lock);
266 * ice_vc_notify_reset - Send pending reset message to all VFs
267 * @pf: pointer to the PF structure
269 * indicate a pending reset to all VFs on a given PF
271 void ice_vc_notify_reset(struct ice_pf *pf)
273 struct virtchnl_pf_event pfe;
275 if (!ice_has_vfs(pf))
278 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
279 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
280 ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
281 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
285 * ice_vc_send_msg_to_vf - Send message to VF
286 * @vf: pointer to the VF info
287 * @v_opcode: virtual channel opcode
288 * @v_retval: virtual channel return value
289 * @msg: pointer to the msg buffer
290 * @msglen: msg length
295 ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
296 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
303 dev = ice_pf_to_dev(pf);
305 aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
307 if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
308 dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %s\n",
310 ice_aq_str(pf->hw.mailboxq.sq_last_status));
319 * @vf: pointer to the VF info
320 * @msg: pointer to the msg buffer
322 * called from the VF to request the API version used by the PF
324 static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
326 struct virtchnl_version_info info = {
327 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
330 vf->vf_ver = *(struct virtchnl_version_info *)msg;
331 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
332 if (VF_IS_V10(&vf->vf_ver))
333 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
335 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
336 VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
337 sizeof(struct virtchnl_version_info));
341 * ice_vc_get_max_frame_size - get max frame size allowed for VF
342 * @vf: VF used to determine max frame size
344 * Max frame size is determined based on the current port's max frame size and
345 * whether a port VLAN is configured on this VF. The VF is not aware whether
346 * it's in a port VLAN so the PF needs to account for this in max frame size
347 * checks and sending the max frame size to the VF.
349 static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
351 struct ice_port_info *pi = ice_vf_get_port_info(vf);
354 max_frame_size = pi->phy.link_info.max_frame_size;
356 if (ice_vf_is_port_vlan_ena(vf))
357 max_frame_size -= VLAN_HLEN;
359 return max_frame_size;
363 * ice_vc_get_vlan_caps
364 * @hw: pointer to the hw
365 * @vf: pointer to the VF info
366 * @vsi: pointer to the VSI
367 * @driver_caps: current driver caps
369 * Return 0 if there is no VLAN caps supported, or VLAN caps value
372 ice_vc_get_vlan_caps(struct ice_hw *hw, struct ice_vf *vf, struct ice_vsi *vsi,
375 if (ice_is_eswitch_mode_switchdev(vf->pf))
376 /* In switchdev setting VLAN from VF isn't supported */
379 if (driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
380 /* VLAN offloads based on current device configuration */
381 return VIRTCHNL_VF_OFFLOAD_VLAN_V2;
382 } else if (driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN) {
383 /* allow VF to negotiate VIRTCHNL_VF_OFFLOAD explicitly for
384 * these two conditions, which amounts to guest VLAN filtering
385 * and offloads being based on the inner VLAN or the
386 * inner/single VLAN respectively and don't allow VF to
387 * negotiate VIRTCHNL_VF_OFFLOAD in any other cases
389 if (ice_is_dvm_ena(hw) && ice_vf_is_port_vlan_ena(vf)) {
390 return VIRTCHNL_VF_OFFLOAD_VLAN;
391 } else if (!ice_is_dvm_ena(hw) &&
392 !ice_vf_is_port_vlan_ena(vf)) {
393 /* configure backward compatible support for VFs that
394 * only support VIRTCHNL_VF_OFFLOAD_VLAN, the PF is
395 * configured in SVM, and no port VLAN is configured
397 ice_vf_vsi_cfg_svm_legacy_vlan_mode(vsi);
398 return VIRTCHNL_VF_OFFLOAD_VLAN;
399 } else if (ice_is_dvm_ena(hw)) {
400 /* configure software offloaded VLAN support when DVM
401 * is enabled, but no port VLAN is enabled
403 ice_vf_vsi_cfg_dvm_legacy_vlan_mode(vsi);
411 * ice_vc_get_vf_res_msg
412 * @vf: pointer to the VF info
413 * @msg: pointer to the msg buffer
415 * called from the VF to request its resources
417 static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
419 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
420 struct virtchnl_vf_resource *vfres = NULL;
421 struct ice_hw *hw = &vf->pf->hw;
426 if (ice_check_vf_init(vf)) {
427 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
431 len = sizeof(struct virtchnl_vf_resource);
433 vfres = kzalloc(len, GFP_KERNEL);
435 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
439 if (VF_IS_V11(&vf->vf_ver))
440 vf->driver_caps = *(u32 *)msg;
442 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
443 VIRTCHNL_VF_OFFLOAD_RSS_REG |
444 VIRTCHNL_VF_OFFLOAD_VLAN;
446 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
447 vsi = ice_get_vf_vsi(vf);
449 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
453 vfres->vf_cap_flags |= ice_vc_get_vlan_caps(hw, vf, vsi,
456 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
457 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
459 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
460 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
462 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
465 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
466 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF;
468 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
469 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
471 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
472 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
474 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
475 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
477 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
478 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
480 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
481 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
483 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
484 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
486 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
487 vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
489 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF)
490 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF;
492 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO)
493 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_USO;
496 /* Tx and Rx queue are equal for VF */
497 vfres->num_queue_pairs = vsi->num_txq;
498 vfres->max_vectors = vf->pf->vfs.num_msix_per;
499 vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
500 vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
501 vfres->max_mtu = ice_vc_get_max_frame_size(vf);
503 vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
504 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
505 vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
506 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
507 vf->hw_lan_addr.addr);
509 /* match guest capabilities */
510 vf->driver_caps = vfres->vf_cap_flags;
512 ice_vc_set_caps_allowlist(vf);
513 ice_vc_set_working_allowlist(vf);
515 set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
518 /* send the response back to the VF */
519 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
527 * ice_vc_reset_vf_msg
528 * @vf: pointer to the VF info
530 * called from the VF to reset itself,
531 * unlike other virtchnl messages, PF driver
532 * doesn't send the response back to the VF
534 static void ice_vc_reset_vf_msg(struct ice_vf *vf)
536 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
541 * ice_vc_isvalid_vsi_id
542 * @vf: pointer to the VF info
543 * @vsi_id: VF relative VSI ID
545 * check for the valid VSI ID
547 bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
549 struct ice_pf *pf = vf->pf;
552 vsi = ice_find_vsi(pf, vsi_id);
554 return (vsi && (vsi->vf == vf));
558 * ice_vc_isvalid_q_id
559 * @vf: pointer to the VF info
561 * @qid: VSI relative queue ID
563 * check for the valid queue ID
565 static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
567 struct ice_vsi *vsi = ice_find_vsi(vf->pf, vsi_id);
568 /* allocated Tx and Rx queues should be always equal for VF VSI */
569 return (vsi && (qid < vsi->alloc_txq));
573 * ice_vc_isvalid_ring_len
574 * @ring_len: length of ring
576 * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
579 static bool ice_vc_isvalid_ring_len(u16 ring_len)
581 return ring_len == 0 ||
582 (ring_len >= ICE_MIN_NUM_DESC &&
583 ring_len <= ICE_MAX_NUM_DESC &&
584 !(ring_len % ICE_REQ_DESC_MULTIPLE));
588 * ice_vc_validate_pattern
589 * @vf: pointer to the VF info
590 * @proto: virtchnl protocol headers
592 * validate the pattern is supported or not.
594 * Return: true on success, false on error.
597 ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto)
599 bool is_ipv4 = false;
600 bool is_ipv6 = false;
605 while (i < proto->count &&
606 proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) {
607 switch (proto->proto_hdr[i].type) {
608 case VIRTCHNL_PROTO_HDR_ETH:
609 ptype = ICE_PTYPE_MAC_PAY;
611 case VIRTCHNL_PROTO_HDR_IPV4:
612 ptype = ICE_PTYPE_IPV4_PAY;
615 case VIRTCHNL_PROTO_HDR_IPV6:
616 ptype = ICE_PTYPE_IPV6_PAY;
619 case VIRTCHNL_PROTO_HDR_UDP:
621 ptype = ICE_PTYPE_IPV4_UDP_PAY;
623 ptype = ICE_PTYPE_IPV6_UDP_PAY;
626 case VIRTCHNL_PROTO_HDR_TCP:
628 ptype = ICE_PTYPE_IPV4_TCP_PAY;
630 ptype = ICE_PTYPE_IPV6_TCP_PAY;
632 case VIRTCHNL_PROTO_HDR_SCTP:
634 ptype = ICE_PTYPE_IPV4_SCTP_PAY;
636 ptype = ICE_PTYPE_IPV6_SCTP_PAY;
638 case VIRTCHNL_PROTO_HDR_GTPU_IP:
639 case VIRTCHNL_PROTO_HDR_GTPU_EH:
641 ptype = ICE_MAC_IPV4_GTPU;
643 ptype = ICE_MAC_IPV6_GTPU;
645 case VIRTCHNL_PROTO_HDR_L2TPV3:
647 ptype = ICE_MAC_IPV4_L2TPV3;
649 ptype = ICE_MAC_IPV6_L2TPV3;
651 case VIRTCHNL_PROTO_HDR_ESP:
653 ptype = is_udp ? ICE_MAC_IPV4_NAT_T_ESP :
656 ptype = is_udp ? ICE_MAC_IPV6_NAT_T_ESP :
659 case VIRTCHNL_PROTO_HDR_AH:
661 ptype = ICE_MAC_IPV4_AH;
663 ptype = ICE_MAC_IPV6_AH;
665 case VIRTCHNL_PROTO_HDR_PFCP:
667 ptype = ICE_MAC_IPV4_PFCP_SESSION;
669 ptype = ICE_MAC_IPV6_PFCP_SESSION;
678 return ice_hw_ptype_ena(&vf->pf->hw, ptype);
682 * ice_vc_parse_rss_cfg - parses hash fields and headers from
683 * a specific virtchnl RSS cfg
684 * @hw: pointer to the hardware
685 * @rss_cfg: pointer to the virtchnl RSS cfg
686 * @addl_hdrs: pointer to the protocol header fields (ICE_FLOW_SEG_HDR_*)
688 * @hash_flds: pointer to the hash bit fields (ICE_FLOW_HASH_*) to configure
690 * Return true if all the protocol header and hash fields in the RSS cfg could
691 * be parsed, else return false
693 * This function parses the virtchnl RSS cfg to be the intended
694 * hash fields and the intended header for RSS configuration
697 ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg,
698 u32 *addl_hdrs, u64 *hash_flds)
700 const struct ice_vc_hash_field_match_type *hf_list;
701 const struct ice_vc_hdr_match_type *hdr_list;
702 int i, hf_list_len, hdr_list_len;
704 hf_list = ice_vc_hash_field_list;
705 hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list);
706 hdr_list = ice_vc_hdr_list;
707 hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list);
709 for (i = 0; i < rss_cfg->proto_hdrs.count; i++) {
710 struct virtchnl_proto_hdr *proto_hdr =
711 &rss_cfg->proto_hdrs.proto_hdr[i];
712 bool hdr_found = false;
715 /* Find matched ice headers according to virtchnl headers. */
716 for (j = 0; j < hdr_list_len; j++) {
717 struct ice_vc_hdr_match_type hdr_map = hdr_list[j];
719 if (proto_hdr->type == hdr_map.vc_hdr) {
720 *addl_hdrs |= hdr_map.ice_hdr;
728 /* Find matched ice hash fields according to
729 * virtchnl hash fields.
731 for (j = 0; j < hf_list_len; j++) {
732 struct ice_vc_hash_field_match_type hf_map = hf_list[j];
734 if (proto_hdr->type == hf_map.vc_hdr &&
735 proto_hdr->field_selector == hf_map.vc_hash_field) {
736 *hash_flds |= hf_map.ice_hash_field;
746 * ice_vf_adv_rss_offload_ena - determine if capabilities support advanced
748 * @caps: VF driver negotiated capabilities
750 * Return true if VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF capability is set,
753 static bool ice_vf_adv_rss_offload_ena(u32 caps)
755 return !!(caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF);
759 * ice_vc_handle_rss_cfg
760 * @vf: pointer to the VF info
761 * @msg: pointer to the message buffer
762 * @add: add a RSS config if true, otherwise delete a RSS config
764 * This function adds/deletes a RSS config
766 static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
768 u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG;
769 struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg;
770 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
771 struct device *dev = ice_pf_to_dev(vf->pf);
772 struct ice_hw *hw = &vf->pf->hw;
775 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
776 dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n",
778 v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
782 if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) {
783 dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n",
785 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
789 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
790 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
794 if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS ||
795 rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC ||
796 rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) {
797 dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n",
799 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
803 vsi = ice_get_vf_vsi(vf);
805 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
809 if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
810 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
814 if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) {
815 struct ice_vsi_ctx *ctx;
816 u8 lut_type, hash_type;
819 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
820 hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR :
821 ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
823 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
825 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
829 ctx->info.q_opt_rss = ((lut_type <<
830 ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
831 ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
833 ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
835 /* Preserve existing queueing option setting */
836 ctx->info.q_opt_rss |= (vsi->info.q_opt_rss &
837 ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M);
838 ctx->info.q_opt_tc = vsi->info.q_opt_tc;
839 ctx->info.q_opt_flags = vsi->info.q_opt_rss;
841 ctx->info.valid_sections =
842 cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
844 status = ice_update_vsi(hw, vsi->idx, ctx, NULL);
846 dev_err(dev, "update VSI for RSS failed, err %d aq_err %s\n",
847 status, ice_aq_str(hw->adminq.sq_last_status));
848 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
850 vsi->info.q_opt_rss = ctx->info.q_opt_rss;
855 u32 addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
856 u64 hash_flds = ICE_HASH_INVALID;
858 if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &addl_hdrs,
860 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
865 if (ice_add_rss_cfg(hw, vsi->idx, hash_flds,
867 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
868 dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n",
869 vsi->vsi_num, v_ret);
874 status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds,
876 /* We just ignore -ENOENT, because if two configurations
877 * share the same profile remove one of them actually
878 * removes both, since the profile is deleted.
880 if (status && status != -ENOENT) {
881 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
882 dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n",
889 return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0);
893 * ice_vc_config_rss_key
894 * @vf: pointer to the VF info
895 * @msg: pointer to the msg buffer
897 * Configure the VF's RSS key
899 static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
901 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
902 struct virtchnl_rss_key *vrk =
903 (struct virtchnl_rss_key *)msg;
906 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
907 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
911 if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
912 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
916 if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
917 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
921 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
922 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
926 vsi = ice_get_vf_vsi(vf);
928 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
932 if (ice_set_rss_key(vsi, vrk->key))
933 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
935 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
940 * ice_vc_config_rss_lut
941 * @vf: pointer to the VF info
942 * @msg: pointer to the msg buffer
944 * Configure the VF's RSS LUT
946 static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
948 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
949 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
952 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
953 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
957 if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
958 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
962 if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
963 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
967 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
968 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
972 vsi = ice_get_vf_vsi(vf);
974 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
978 if (ice_set_rss_lut(vsi, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
979 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
981 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
986 * ice_vc_cfg_promiscuous_mode_msg
987 * @vf: pointer to the VF info
988 * @msg: pointer to the msg buffer
990 * called from the VF to configure VF VSIs promiscuous mode
992 static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
994 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
995 bool rm_promisc, alluni = false, allmulti = false;
996 struct virtchnl_promisc_info *info =
997 (struct virtchnl_promisc_info *)msg;
998 struct ice_vsi_vlan_ops *vlan_ops;
999 int mcast_err = 0, ucast_err = 0;
1000 struct ice_pf *pf = vf->pf;
1001 struct ice_vsi *vsi;
1002 u8 mcast_m, ucast_m;
1006 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1007 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1011 if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
1012 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1016 vsi = ice_get_vf_vsi(vf);
1018 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1022 dev = ice_pf_to_dev(pf);
1023 if (!ice_is_vf_trusted(vf)) {
1024 dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
1026 /* Leave v_ret alone, lie to the VF on purpose. */
1030 if (info->flags & FLAG_VF_UNICAST_PROMISC)
1033 if (info->flags & FLAG_VF_MULTICAST_PROMISC)
1036 rm_promisc = !allmulti && !alluni;
1038 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1040 ret = vlan_ops->ena_rx_filtering(vsi);
1042 ret = vlan_ops->dis_rx_filtering(vsi);
1044 dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
1045 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1049 ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m);
1051 if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
1053 /* in this case we're turning on promiscuous mode */
1054 ret = ice_set_dflt_vsi(vsi);
1056 /* in this case we're turning off promiscuous mode */
1057 if (ice_is_dflt_vsi_in_use(vsi->port_info))
1058 ret = ice_clear_dflt_vsi(vsi);
1061 /* in this case we're turning on/off only
1065 mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m);
1067 mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
1070 dev_err(dev, "Turning on/off promiscuous mode for VF %d failed, error: %d\n",
1072 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1077 ucast_err = ice_vf_set_vsi_promisc(vf, vsi, ucast_m);
1079 ucast_err = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
1082 mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m);
1084 mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
1086 if (ucast_err || mcast_err)
1087 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1092 !test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
1093 dev_info(dev, "VF %u successfully set multicast promiscuous mode\n",
1095 else if (!allmulti &&
1096 test_and_clear_bit(ICE_VF_STATE_MC_PROMISC,
1098 dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n",
1101 dev_err(dev, "Error while modifying multicast promiscuous mode for VF %u, error: %d\n",
1102 vf->vf_id, mcast_err);
1107 !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
1108 dev_info(dev, "VF %u successfully set unicast promiscuous mode\n",
1111 test_and_clear_bit(ICE_VF_STATE_UC_PROMISC,
1113 dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n",
1116 dev_err(dev, "Error while modifying unicast promiscuous mode for VF %u, error: %d\n",
1117 vf->vf_id, ucast_err);
1121 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
1126 * ice_vc_get_stats_msg
1127 * @vf: pointer to the VF info
1128 * @msg: pointer to the msg buffer
1130 * called from the VF to get VSI stats
1132 static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
1134 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1135 struct virtchnl_queue_select *vqs =
1136 (struct virtchnl_queue_select *)msg;
1137 struct ice_eth_stats stats = { 0 };
1138 struct ice_vsi *vsi;
1140 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1141 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1145 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1146 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1150 vsi = ice_get_vf_vsi(vf);
1152 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1156 ice_update_eth_stats(vsi);
1158 stats = vsi->eth_stats;
1161 /* send the response to the VF */
1162 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
1163 (u8 *)&stats, sizeof(stats));
1167 * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
1168 * @vqs: virtchnl_queue_select structure containing bitmaps to validate
1170 * Return true on successful validation, else false
1172 static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
1174 if ((!vqs->rx_queues && !vqs->tx_queues) ||
1175 vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
1176 vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
1183 * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
1184 * @vsi: VSI of the VF to configure
1185 * @q_idx: VF queue index used to determine the queue in the PF's space
1187 static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
1189 struct ice_hw *hw = &vsi->back->hw;
1190 u32 pfq = vsi->txq_map[q_idx];
1193 reg = rd32(hw, QINT_TQCTL(pfq));
1195 /* MSI-X index 0 in the VF's space is always for the OICR, which means
1196 * this is most likely a poll mode VF driver, so don't enable an
1197 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
1199 if (!(reg & QINT_TQCTL_MSIX_INDX_M))
1202 wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
1206 * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
1207 * @vsi: VSI of the VF to configure
1208 * @q_idx: VF queue index used to determine the queue in the PF's space
1210 static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
1212 struct ice_hw *hw = &vsi->back->hw;
1213 u32 pfq = vsi->rxq_map[q_idx];
1216 reg = rd32(hw, QINT_RQCTL(pfq));
1218 /* MSI-X index 0 in the VF's space is always for the OICR, which means
1219 * this is most likely a poll mode VF driver, so don't enable an
1220 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
1222 if (!(reg & QINT_RQCTL_MSIX_INDX_M))
1225 wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
1230 * @vf: pointer to the VF info
1231 * @msg: pointer to the msg buffer
1233 * called from the VF to enable all or specific queue(s)
1235 static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
1237 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1238 struct virtchnl_queue_select *vqs =
1239 (struct virtchnl_queue_select *)msg;
1240 struct ice_vsi *vsi;
1241 unsigned long q_map;
1244 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1245 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1249 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1250 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1254 if (!ice_vc_validate_vqs_bitmaps(vqs)) {
1255 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1259 vsi = ice_get_vf_vsi(vf);
1261 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1265 /* Enable only Rx rings, Tx rings were enabled by the FW when the
1266 * Tx queue group list was configured and the context bits were
1267 * programmed using ice_vsi_cfg_txqs
1269 q_map = vqs->rx_queues;
1270 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
1271 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
1272 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1276 /* Skip queue if enabled */
1277 if (test_bit(vf_q_id, vf->rxq_ena))
1280 if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
1281 dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
1282 vf_q_id, vsi->vsi_num);
1283 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1287 ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
1288 set_bit(vf_q_id, vf->rxq_ena);
1291 q_map = vqs->tx_queues;
1292 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
1293 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
1294 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1298 /* Skip queue if enabled */
1299 if (test_bit(vf_q_id, vf->txq_ena))
1302 ice_vf_ena_txq_interrupt(vsi, vf_q_id);
1303 set_bit(vf_q_id, vf->txq_ena);
1306 /* Set flag to indicate that queues are enabled */
1307 if (v_ret == VIRTCHNL_STATUS_SUCCESS)
1308 set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
1311 /* send the response to the VF */
1312 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
1317 * ice_vf_vsi_dis_single_txq - disable a single Tx queue
1318 * @vf: VF to disable queue for
1319 * @vsi: VSI for the VF
1320 * @q_id: VF relative (0-based) queue ID
1322 * Attempt to disable the Tx queue passed in. If the Tx queue was successfully
1323 * disabled then clear q_id bit in the enabled queues bitmap and return
1324 * success. Otherwise return error.
1327 ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id)
1329 struct ice_txq_meta txq_meta = { 0 };
1330 struct ice_tx_ring *ring;
1333 if (!test_bit(q_id, vf->txq_ena))
1334 dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
1335 q_id, vsi->vsi_num);
1337 ring = vsi->tx_rings[q_id];
1341 ice_fill_txq_meta(vsi, ring, &txq_meta);
1343 err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, ring, &txq_meta);
1345 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
1346 q_id, vsi->vsi_num);
1350 /* Clear enabled queues flag */
1351 clear_bit(q_id, vf->txq_ena);
1358 * @vf: pointer to the VF info
1359 * @msg: pointer to the msg buffer
1361 * called from the VF to disable all or specific queue(s)
1363 static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
1365 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1366 struct virtchnl_queue_select *vqs =
1367 (struct virtchnl_queue_select *)msg;
1368 struct ice_vsi *vsi;
1369 unsigned long q_map;
1372 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
1373 !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
1374 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1378 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1379 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1383 if (!ice_vc_validate_vqs_bitmaps(vqs)) {
1384 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1388 vsi = ice_get_vf_vsi(vf);
1390 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1394 if (vqs->tx_queues) {
1395 q_map = vqs->tx_queues;
1397 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
1398 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
1399 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1403 if (ice_vf_vsi_dis_single_txq(vf, vsi, vf_q_id)) {
1404 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1410 q_map = vqs->rx_queues;
1411 /* speed up Rx queue disable by batching them if possible */
1413 bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
1414 if (ice_vsi_stop_all_rx_rings(vsi)) {
1415 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
1417 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1421 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
1423 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
1424 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
1425 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1429 /* Skip queue if not enabled */
1430 if (!test_bit(vf_q_id, vf->rxq_ena))
1433 if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
1435 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
1436 vf_q_id, vsi->vsi_num);
1437 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1441 /* Clear enabled queues flag */
1442 clear_bit(vf_q_id, vf->rxq_ena);
1446 /* Clear enabled queues flag */
1447 if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
1448 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
1451 /* send the response to the VF */
1452 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
1458 * @vf: pointer to the VF info
1459 * @vsi: the VSI being configured
1460 * @vector_id: vector ID
1461 * @map: vector map for mapping vectors to queues
1462 * @q_vector: structure for interrupt vector
1463 * configure the IRQ to queue map
1466 ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
1467 struct virtchnl_vector_map *map,
1468 struct ice_q_vector *q_vector)
1470 u16 vsi_q_id, vsi_q_id_idx;
1473 q_vector->num_ring_rx = 0;
1474 q_vector->num_ring_tx = 0;
1476 qmap = map->rxq_map;
1477 for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
1478 vsi_q_id = vsi_q_id_idx;
1480 if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
1481 return VIRTCHNL_STATUS_ERR_PARAM;
1483 q_vector->num_ring_rx++;
1484 q_vector->rx.itr_idx = map->rxitr_idx;
1485 vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
1486 ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
1487 q_vector->rx.itr_idx);
1490 qmap = map->txq_map;
1491 for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
1492 vsi_q_id = vsi_q_id_idx;
1494 if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
1495 return VIRTCHNL_STATUS_ERR_PARAM;
1497 q_vector->num_ring_tx++;
1498 q_vector->tx.itr_idx = map->txitr_idx;
1499 vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
1500 ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
1501 q_vector->tx.itr_idx);
1504 return VIRTCHNL_STATUS_SUCCESS;
1508 * ice_vc_cfg_irq_map_msg
1509 * @vf: pointer to the VF info
1510 * @msg: pointer to the msg buffer
1512 * called from the VF to configure the IRQ to queue map
1514 static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
1516 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1517 u16 num_q_vectors_mapped, vsi_id, vector_id;
1518 struct virtchnl_irq_map_info *irqmap_info;
1519 struct virtchnl_vector_map *map;
1520 struct ice_pf *pf = vf->pf;
1521 struct ice_vsi *vsi;
1524 irqmap_info = (struct virtchnl_irq_map_info *)msg;
1525 num_q_vectors_mapped = irqmap_info->num_vectors;
1527 /* Check to make sure number of VF vectors mapped is not greater than
1528 * number of VF vectors originally allocated, and check that
1529 * there is actually at least a single VF queue vector mapped
1531 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
1532 pf->vfs.num_msix_per < num_q_vectors_mapped ||
1533 !num_q_vectors_mapped) {
1534 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1538 vsi = ice_get_vf_vsi(vf);
1540 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1544 for (i = 0; i < num_q_vectors_mapped; i++) {
1545 struct ice_q_vector *q_vector;
1547 map = &irqmap_info->vecmap[i];
1549 vector_id = map->vector_id;
1550 vsi_id = map->vsi_id;
1551 /* vector_id is always 0-based for each VF, and can never be
1552 * larger than or equal to the max allowed interrupts per VF
1554 if (!(vector_id < pf->vfs.num_msix_per) ||
1555 !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
1556 (!vector_id && (map->rxq_map || map->txq_map))) {
1557 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1561 /* No need to map VF miscellaneous or rogue vector */
1565 /* Subtract non queue vector from vector_id passed by VF
1566 * to get actual number of VSI queue vector array index
1568 q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
1570 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1574 /* lookout for the invalid queue index */
1575 v_ret = (enum virtchnl_status_code)
1576 ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
1582 /* send the response to the VF */
1583 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
1589 * @vf: pointer to the VF info
1590 * @msg: pointer to the msg buffer
1592 * called from the VF to configure the Rx/Tx queues
1594 static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
1596 struct virtchnl_vsi_queue_config_info *qci =
1597 (struct virtchnl_vsi_queue_config_info *)msg;
1598 struct virtchnl_queue_pair_info *qpi;
1599 struct ice_pf *pf = vf->pf;
1600 struct ice_vsi *vsi;
1603 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1606 if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id))
1609 vsi = ice_get_vf_vsi(vf);
1613 if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
1614 qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
1615 dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
1616 vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
1620 for (i = 0; i < qci->num_queue_pairs; i++) {
1621 qpi = &qci->qpair[i];
1622 if (qpi->txq.vsi_id != qci->vsi_id ||
1623 qpi->rxq.vsi_id != qci->vsi_id ||
1624 qpi->rxq.queue_id != qpi->txq.queue_id ||
1625 qpi->txq.headwb_enabled ||
1626 !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
1627 !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
1628 !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
1632 q_idx = qpi->rxq.queue_id;
1634 /* make sure selected "q_idx" is in valid range of queues
1635 * for selected "vsi"
1637 if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
1641 /* copy Tx queue info from VF into VSI */
1642 if (qpi->txq.ring_len > 0) {
1643 vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
1644 vsi->tx_rings[i]->count = qpi->txq.ring_len;
1646 /* Disable any existing queue first */
1647 if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx))
1650 /* Configure a queue with the requested settings */
1651 if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
1652 dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n",
1658 /* copy Rx queue info from VF into VSI */
1659 if (qpi->rxq.ring_len > 0) {
1660 u16 max_frame_size = ice_vc_get_max_frame_size(vf);
1662 vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
1663 vsi->rx_rings[i]->count = qpi->rxq.ring_len;
1665 if (qpi->rxq.databuffer_size != 0 &&
1666 (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
1667 qpi->rxq.databuffer_size < 1024))
1669 vsi->rx_buf_len = qpi->rxq.databuffer_size;
1670 vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
1671 if (qpi->rxq.max_pkt_size > max_frame_size ||
1672 qpi->rxq.max_pkt_size < 64)
1675 vsi->max_frame = qpi->rxq.max_pkt_size;
1676 /* add space for the port VLAN since the VF driver is
1677 * not expected to account for it in the MTU
1680 if (ice_vf_is_port_vlan_ena(vf))
1681 vsi->max_frame += VLAN_HLEN;
1683 if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
1684 dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n",
1691 /* send the response to the VF */
1692 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
1693 VIRTCHNL_STATUS_SUCCESS, NULL, 0);
1695 /* disable whatever we can */
1696 for (; i >= 0; i--) {
1697 if (ice_vsi_ctrl_one_rx_ring(vsi, false, i, true))
1698 dev_err(ice_pf_to_dev(pf), "VF-%d could not disable RX queue %d\n",
1700 if (ice_vf_vsi_dis_single_txq(vf, vsi, i))
1701 dev_err(ice_pf_to_dev(pf), "VF-%d could not disable TX queue %d\n",
1705 /* send the response to the VF */
1706 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
1707 VIRTCHNL_STATUS_ERR_PARAM, NULL, 0);
1711 * ice_can_vf_change_mac
1712 * @vf: pointer to the VF info
1714 * Return true if the VF is allowed to change its MAC filters, false otherwise
1716 static bool ice_can_vf_change_mac(struct ice_vf *vf)
1718 /* If the VF MAC address has been set administratively (via the
1719 * ndo_set_vf_mac command), then deny permission to the VF to
1720 * add/delete unicast MAC addresses, unless the VF is trusted
1722 if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
1729 * ice_vc_ether_addr_type - get type of virtchnl_ether_addr
1730 * @vc_ether_addr: used to extract the type
1733 ice_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr)
1735 return (vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK);
1739 * ice_is_vc_addr_legacy - check if the MAC address is from an older VF
1740 * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
1743 ice_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr)
1745 u8 type = ice_vc_ether_addr_type(vc_ether_addr);
1747 return (type == VIRTCHNL_ETHER_ADDR_LEGACY);
1751 * ice_is_vc_addr_primary - check if the MAC address is the VF's primary MAC
1752 * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
1754 * This function should only be called when the MAC address in
1755 * virtchnl_ether_addr is a valid unicast MAC
1758 ice_is_vc_addr_primary(struct virtchnl_ether_addr __maybe_unused *vc_ether_addr)
1760 u8 type = ice_vc_ether_addr_type(vc_ether_addr);
1762 return (type == VIRTCHNL_ETHER_ADDR_PRIMARY);
1766 * ice_vfhw_mac_add - update the VF's cached hardware MAC if allowed
1768 * @vc_ether_addr: structure from VIRTCHNL with MAC to add
1771 ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
1773 u8 *mac_addr = vc_ether_addr->addr;
1775 if (!is_valid_ether_addr(mac_addr))
1778 /* only allow legacy VF drivers to set the device and hardware MAC if it
1779 * is zero and allow new VF drivers to set the hardware MAC if the type
1780 * was correctly specified over VIRTCHNL
1782 if ((ice_is_vc_addr_legacy(vc_ether_addr) &&
1783 is_zero_ether_addr(vf->hw_lan_addr.addr)) ||
1784 ice_is_vc_addr_primary(vc_ether_addr)) {
1785 ether_addr_copy(vf->dev_lan_addr.addr, mac_addr);
1786 ether_addr_copy(vf->hw_lan_addr.addr, mac_addr);
1789 /* hardware and device MACs are already set, but its possible that the
1790 * VF driver sent the VIRTCHNL_OP_ADD_ETH_ADDR message before the
1791 * VIRTCHNL_OP_DEL_ETH_ADDR when trying to update its MAC, so save it
1792 * away for the legacy VF driver case as it will be updated in the
1793 * delete flow for this case
1795 if (ice_is_vc_addr_legacy(vc_ether_addr)) {
1796 ether_addr_copy(vf->legacy_last_added_umac.addr,
1798 vf->legacy_last_added_umac.time_modified = jiffies;
1803 * ice_vc_add_mac_addr - attempt to add the MAC address passed in
1804 * @vf: pointer to the VF info
1805 * @vsi: pointer to the VF's VSI
1806 * @vc_ether_addr: VIRTCHNL MAC address structure used to add MAC
1809 ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
1810 struct virtchnl_ether_addr *vc_ether_addr)
1812 struct device *dev = ice_pf_to_dev(vf->pf);
1813 u8 *mac_addr = vc_ether_addr->addr;
1816 /* device MAC already added */
1817 if (ether_addr_equal(mac_addr, vf->dev_lan_addr.addr))
1820 if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
1821 dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
1825 ret = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
1826 if (ret == -EEXIST) {
1827 dev_dbg(dev, "MAC %pM already exists for VF %d\n", mac_addr,
1829 /* don't return since we might need to update
1830 * the primary MAC in ice_vfhw_mac_add() below
1833 dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n",
1834 mac_addr, vf->vf_id, ret);
1840 ice_vfhw_mac_add(vf, vc_ether_addr);
1846 * ice_is_legacy_umac_expired - check if last added legacy unicast MAC expired
1847 * @last_added_umac: structure used to check expiration
1849 static bool ice_is_legacy_umac_expired(struct ice_time_mac *last_added_umac)
1851 #define ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME msecs_to_jiffies(3000)
1852 return time_is_before_jiffies(last_added_umac->time_modified +
1853 ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME);
1857 * ice_update_legacy_cached_mac - update cached hardware MAC for legacy VF
1859 * @vc_ether_addr: structure from VIRTCHNL with MAC to check
1861 * only update cached hardware MAC for legacy VF drivers on delete
1862 * because we cannot guarantee order/type of MAC from the VF driver
1865 ice_update_legacy_cached_mac(struct ice_vf *vf,
1866 struct virtchnl_ether_addr *vc_ether_addr)
1868 if (!ice_is_vc_addr_legacy(vc_ether_addr) ||
1869 ice_is_legacy_umac_expired(&vf->legacy_last_added_umac))
1872 ether_addr_copy(vf->dev_lan_addr.addr, vf->legacy_last_added_umac.addr);
1873 ether_addr_copy(vf->hw_lan_addr.addr, vf->legacy_last_added_umac.addr);
1877 * ice_vfhw_mac_del - update the VF's cached hardware MAC if allowed
1879 * @vc_ether_addr: structure from VIRTCHNL with MAC to delete
1882 ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
1884 u8 *mac_addr = vc_ether_addr->addr;
1886 if (!is_valid_ether_addr(mac_addr) ||
1887 !ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
1890 /* allow the device MAC to be repopulated in the add flow and don't
1891 * clear the hardware MAC (i.e. hw_lan_addr.addr) here as that is meant
1892 * to be persistent on VM reboot and across driver unload/load, which
1893 * won't work if we clear the hardware MAC here
1895 eth_zero_addr(vf->dev_lan_addr.addr);
1897 ice_update_legacy_cached_mac(vf, vc_ether_addr);
1901 * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
1902 * @vf: pointer to the VF info
1903 * @vsi: pointer to the VF's VSI
1904 * @vc_ether_addr: VIRTCHNL MAC address structure used to delete MAC
1907 ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
1908 struct virtchnl_ether_addr *vc_ether_addr)
1910 struct device *dev = ice_pf_to_dev(vf->pf);
1911 u8 *mac_addr = vc_ether_addr->addr;
1914 if (!ice_can_vf_change_mac(vf) &&
1915 ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
1918 status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
1919 if (status == -ENOENT) {
1920 dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
1923 } else if (status) {
1924 dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n",
1925 mac_addr, vf->vf_id, status);
1929 ice_vfhw_mac_del(vf, vc_ether_addr);
1937 * ice_vc_handle_mac_addr_msg
1938 * @vf: pointer to the VF info
1939 * @msg: pointer to the msg buffer
1940 * @set: true if MAC filters are being set, false otherwise
1942 * add guest MAC address filter
1945 ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
1947 int (*ice_vc_cfg_mac)
1948 (struct ice_vf *vf, struct ice_vsi *vsi,
1949 struct virtchnl_ether_addr *virtchnl_ether_addr);
1950 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1951 struct virtchnl_ether_addr_list *al =
1952 (struct virtchnl_ether_addr_list *)msg;
1953 struct ice_pf *pf = vf->pf;
1954 enum virtchnl_ops vc_op;
1955 struct ice_vsi *vsi;
1959 vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
1960 ice_vc_cfg_mac = ice_vc_add_mac_addr;
1962 vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
1963 ice_vc_cfg_mac = ice_vc_del_mac_addr;
1966 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
1967 !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
1968 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1969 goto handle_mac_exit;
1972 /* If this VF is not privileged, then we can't add more than a
1973 * limited number of addresses. Check to make sure that the
1974 * additions do not push us over the limit.
1976 if (set && !ice_is_vf_trusted(vf) &&
1977 (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
1978 dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
1980 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1981 goto handle_mac_exit;
1984 vsi = ice_get_vf_vsi(vf);
1986 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1987 goto handle_mac_exit;
1990 for (i = 0; i < al->num_elements; i++) {
1991 u8 *mac_addr = al->list[i].addr;
1994 if (is_broadcast_ether_addr(mac_addr) ||
1995 is_zero_ether_addr(mac_addr))
1998 result = ice_vc_cfg_mac(vf, vsi, &al->list[i]);
1999 if (result == -EEXIST || result == -ENOENT) {
2001 } else if (result) {
2002 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2003 goto handle_mac_exit;
2008 /* send the response to the VF */
2009 return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
2013 * ice_vc_add_mac_addr_msg
2014 * @vf: pointer to the VF info
2015 * @msg: pointer to the msg buffer
2017 * add guest MAC address filter
2019 static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
2021 return ice_vc_handle_mac_addr_msg(vf, msg, true);
2025 * ice_vc_del_mac_addr_msg
2026 * @vf: pointer to the VF info
2027 * @msg: pointer to the msg buffer
2029 * remove guest MAC address filter
2031 static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
2033 return ice_vc_handle_mac_addr_msg(vf, msg, false);
2037 * ice_vc_request_qs_msg
2038 * @vf: pointer to the VF info
2039 * @msg: pointer to the msg buffer
2041 * VFs get a default number of queues but can use this message to request a
2042 * different number. If the request is successful, PF will reset the VF and
2043 * return 0. If unsuccessful, PF will send message informing VF of number of
2044 * available queue pairs via virtchnl message response to VF.
2046 static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
2048 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2049 struct virtchnl_vf_res_request *vfres =
2050 (struct virtchnl_vf_res_request *)msg;
2051 u16 req_queues = vfres->num_queue_pairs;
2052 struct ice_pf *pf = vf->pf;
2053 u16 max_allowed_vf_queues;
2054 u16 tx_rx_queue_left;
2058 dev = ice_pf_to_dev(pf);
2059 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2060 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2064 cur_queues = vf->num_vf_qs;
2065 tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
2066 ice_get_avail_rxq_count(pf));
2067 max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
2069 dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
2071 } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
2072 dev_err(dev, "VF %d tried to request more than %d queues.\n",
2073 vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
2074 vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
2075 } else if (req_queues > cur_queues &&
2076 req_queues - cur_queues > tx_rx_queue_left) {
2077 dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
2078 vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
2079 vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
2080 ICE_MAX_RSS_QS_PER_VF);
2082 /* request is successful, then reset VF */
2083 vf->num_req_qs = req_queues;
2084 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
2085 dev_info(dev, "VF %d granted request of %u queues.\n",
2086 vf->vf_id, req_queues);
2091 /* send the response to the VF */
2092 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
2093 v_ret, (u8 *)vfres, sizeof(*vfres));
2097 * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
2098 * @caps: VF driver negotiated capabilities
2100 * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
2102 static bool ice_vf_vlan_offload_ena(u32 caps)
2104 return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
2108 * ice_is_vlan_promisc_allowed - check if VLAN promiscuous config is allowed
2109 * @vf: VF used to determine if VLAN promiscuous config is allowed
2111 static bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
2113 if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
2114 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
2115 test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, vf->pf->flags))
2122 * ice_vf_ena_vlan_promisc - Enable Tx/Rx VLAN promiscuous for the VLAN
2123 * @vsi: VF's VSI used to enable VLAN promiscuous mode
2124 * @vlan: VLAN used to enable VLAN promiscuous
2126 * This function should only be called if VLAN promiscuous mode is allowed,
2127 * which can be determined via ice_is_vlan_promisc_allowed().
2129 static int ice_vf_ena_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
2131 u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX;
2134 status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
2136 if (status && status != -EEXIST)
2143 * ice_vf_dis_vlan_promisc - Disable Tx/Rx VLAN promiscuous for the VLAN
2144 * @vsi: VF's VSI used to disable VLAN promiscuous mode for
2145 * @vlan: VLAN used to disable VLAN promiscuous
2147 * This function should only be called if VLAN promiscuous mode is allowed,
2148 * which can be determined via ice_is_vlan_promisc_allowed().
2150 static int ice_vf_dis_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
2152 u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX;
2155 status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
2157 if (status && status != -ENOENT)
2164 * ice_vf_has_max_vlans - check if VF already has the max allowed VLAN filters
2165 * @vf: VF to check against
2168 * If the VF is trusted then the VF is allowed to add as many VLANs as it
2169 * wants to, so return false.
2171 * When the VF is untrusted compare the number of non-zero VLANs + 1 to the max
2172 * allowed VLANs for an untrusted VF. Return the result of this comparison.
2174 static bool ice_vf_has_max_vlans(struct ice_vf *vf, struct ice_vsi *vsi)
2176 if (ice_is_vf_trusted(vf))
2179 #define ICE_VF_ADDED_VLAN_ZERO_FLTRS 1
2180 return ((ice_vsi_num_non_zero_vlans(vsi) +
2181 ICE_VF_ADDED_VLAN_ZERO_FLTRS) >= ICE_MAX_VLAN_PER_VF);
2185 * ice_vc_process_vlan_msg
2186 * @vf: pointer to the VF info
2187 * @msg: pointer to the msg buffer
2188 * @add_v: Add VLAN if true, otherwise delete VLAN
2190 * Process virtchnl op to add or remove programmed guest VLAN ID
2192 static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
2194 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2195 struct virtchnl_vlan_filter_list *vfl =
2196 (struct virtchnl_vlan_filter_list *)msg;
2197 struct ice_pf *pf = vf->pf;
2198 bool vlan_promisc = false;
2199 struct ice_vsi *vsi;
2204 dev = ice_pf_to_dev(pf);
2205 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2206 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2210 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
2211 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2215 if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
2216 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2220 for (i = 0; i < vfl->num_elements; i++) {
2221 if (vfl->vlan_id[i] >= VLAN_N_VID) {
2222 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2223 dev_err(dev, "invalid VF VLAN id %d\n",
2229 vsi = ice_get_vf_vsi(vf);
2231 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2235 if (add_v && ice_vf_has_max_vlans(vf, vsi)) {
2236 dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
2238 /* There is no need to let VF know about being not trusted,
2239 * so we can just return success message here
2244 /* in DVM a VF can add/delete inner VLAN filters when
2245 * VIRTCHNL_VF_OFFLOAD_VLAN is negotiated, so only reject in SVM
2247 if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&pf->hw)) {
2248 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2252 /* in DVM VLAN promiscuous is based on the outer VLAN, which would be
2253 * the port VLAN if VIRTCHNL_VF_OFFLOAD_VLAN was negotiated, so only
2254 * allow vlan_promisc = true in SVM and if no port VLAN is configured
2256 vlan_promisc = ice_is_vlan_promisc_allowed(vf) &&
2257 !ice_is_dvm_ena(&pf->hw) &&
2258 !ice_vf_is_port_vlan_ena(vf);
2261 for (i = 0; i < vfl->num_elements; i++) {
2262 u16 vid = vfl->vlan_id[i];
2263 struct ice_vlan vlan;
2265 if (ice_vf_has_max_vlans(vf, vsi)) {
2266 dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
2268 /* There is no need to let VF know about being
2269 * not trusted, so we can just return success
2270 * message here as well.
2275 /* we add VLAN 0 by default for each VF so we can enable
2276 * Tx VLAN anti-spoof without triggering MDD events so
2277 * we don't need to add it again here
2282 vlan = ICE_VLAN(ETH_P_8021Q, vid, 0);
2283 status = vsi->inner_vlan_ops.add_vlan(vsi, &vlan);
2285 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2289 /* Enable VLAN filtering on first non-zero VLAN */
2290 if (!vlan_promisc && vid && !ice_is_dvm_ena(&pf->hw)) {
2291 if (vsi->inner_vlan_ops.ena_rx_filtering(vsi)) {
2292 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2293 dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
2297 } else if (vlan_promisc) {
2298 status = ice_vf_ena_vlan_promisc(vsi, &vlan);
2300 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2301 dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
2307 /* In case of non_trusted VF, number of VLAN elements passed
2308 * to PF for removal might be greater than number of VLANs
2309 * filter programmed for that VF - So, use actual number of
2310 * VLANS added earlier with add VLAN opcode. In order to avoid
2311 * removing VLAN that doesn't exist, which result to sending
2312 * erroneous failed message back to the VF
2316 num_vf_vlan = vsi->num_vlan;
2317 for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
2318 u16 vid = vfl->vlan_id[i];
2319 struct ice_vlan vlan;
2321 /* we add VLAN 0 by default for each VF so we can enable
2322 * Tx VLAN anti-spoof without triggering MDD events so
2323 * we don't want a VIRTCHNL request to remove it
2328 vlan = ICE_VLAN(ETH_P_8021Q, vid, 0);
2329 status = vsi->inner_vlan_ops.del_vlan(vsi, &vlan);
2331 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2335 /* Disable VLAN filtering when only VLAN 0 is left */
2336 if (!ice_vsi_has_non_zero_vlans(vsi))
2337 vsi->inner_vlan_ops.dis_rx_filtering(vsi);
2340 ice_vf_dis_vlan_promisc(vsi, &vlan);
2345 /* send the response to the VF */
2347 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
2350 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
2355 * ice_vc_add_vlan_msg
2356 * @vf: pointer to the VF info
2357 * @msg: pointer to the msg buffer
2359 * Add and program guest VLAN ID
2361 static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
2363 return ice_vc_process_vlan_msg(vf, msg, true);
2367 * ice_vc_remove_vlan_msg
2368 * @vf: pointer to the VF info
2369 * @msg: pointer to the msg buffer
2371 * remove programmed guest VLAN ID
2373 static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
2375 return ice_vc_process_vlan_msg(vf, msg, false);
2379 * ice_vc_ena_vlan_stripping
2380 * @vf: pointer to the VF info
2382 * Enable VLAN header stripping for a given VF
2384 static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
2386 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2387 struct ice_vsi *vsi;
2389 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2390 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2394 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
2395 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2399 vsi = ice_get_vf_vsi(vf);
2401 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2405 if (vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q))
2406 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2409 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
2414 * ice_vc_dis_vlan_stripping
2415 * @vf: pointer to the VF info
2417 * Disable VLAN header stripping for a given VF
2419 static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
2421 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2422 struct ice_vsi *vsi;
2424 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2425 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2429 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
2430 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2434 vsi = ice_get_vf_vsi(vf);
2436 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2440 if (vsi->inner_vlan_ops.dis_stripping(vsi))
2441 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2444 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
2449 * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
2450 * @vf: VF to enable/disable VLAN stripping for on initialization
2452 * Set the default for VLAN stripping based on whether a port VLAN is configured
2453 * and the current VLAN mode of the device.
2455 static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
2457 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
2462 /* don't modify stripping if port VLAN is configured in SVM since the
2463 * port VLAN is based on the inner/single VLAN in SVM
2465 if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&vsi->back->hw))
2468 if (ice_vf_vlan_offload_ena(vf->driver_caps))
2469 return vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q);
2471 return vsi->inner_vlan_ops.dis_stripping(vsi);
2474 static u16 ice_vc_get_max_vlan_fltrs(struct ice_vf *vf)
2479 return ICE_MAX_VLAN_PER_VF;
2483 * ice_vf_outer_vlan_not_allowed - check if outer VLAN can be used
2484 * @vf: VF that being checked for
2486 * When the device is in double VLAN mode, check whether or not the outer VLAN
2489 static bool ice_vf_outer_vlan_not_allowed(struct ice_vf *vf)
2491 if (ice_vf_is_port_vlan_ena(vf))
2498 * ice_vc_set_dvm_caps - set VLAN capabilities when the device is in DVM
2499 * @vf: VF that capabilities are being set for
2500 * @caps: VLAN capabilities to populate
2502 * Determine VLAN capabilities support based on whether a port VLAN is
2503 * configured. If a port VLAN is configured then the VF should use the inner
2504 * filtering/offload capabilities since the port VLAN is using the outer VLAN
2508 ice_vc_set_dvm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps)
2510 struct virtchnl_vlan_supported_caps *supported_caps;
2512 if (ice_vf_outer_vlan_not_allowed(vf)) {
2513 /* until support for inner VLAN filtering is added when a port
2514 * VLAN is configured, only support software offloaded inner
2515 * VLANs when a port VLAN is confgured in DVM
2517 supported_caps = &caps->filtering.filtering_support;
2518 supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
2520 supported_caps = &caps->offloads.stripping_support;
2521 supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2522 VIRTCHNL_VLAN_TOGGLE |
2523 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2524 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2526 supported_caps = &caps->offloads.insertion_support;
2527 supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2528 VIRTCHNL_VLAN_TOGGLE |
2529 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2530 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2532 caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
2533 caps->offloads.ethertype_match =
2534 VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
2536 supported_caps = &caps->filtering.filtering_support;
2537 supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
2538 supported_caps->outer = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2539 VIRTCHNL_VLAN_ETHERTYPE_88A8 |
2540 VIRTCHNL_VLAN_ETHERTYPE_9100 |
2541 VIRTCHNL_VLAN_ETHERTYPE_AND;
2542 caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2543 VIRTCHNL_VLAN_ETHERTYPE_88A8 |
2544 VIRTCHNL_VLAN_ETHERTYPE_9100;
2546 supported_caps = &caps->offloads.stripping_support;
2547 supported_caps->inner = VIRTCHNL_VLAN_TOGGLE |
2548 VIRTCHNL_VLAN_ETHERTYPE_8100 |
2549 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2550 supported_caps->outer = VIRTCHNL_VLAN_TOGGLE |
2551 VIRTCHNL_VLAN_ETHERTYPE_8100 |
2552 VIRTCHNL_VLAN_ETHERTYPE_88A8 |
2553 VIRTCHNL_VLAN_ETHERTYPE_9100 |
2554 VIRTCHNL_VLAN_ETHERTYPE_XOR |
2555 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2;
2557 supported_caps = &caps->offloads.insertion_support;
2558 supported_caps->inner = VIRTCHNL_VLAN_TOGGLE |
2559 VIRTCHNL_VLAN_ETHERTYPE_8100 |
2560 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2561 supported_caps->outer = VIRTCHNL_VLAN_TOGGLE |
2562 VIRTCHNL_VLAN_ETHERTYPE_8100 |
2563 VIRTCHNL_VLAN_ETHERTYPE_88A8 |
2564 VIRTCHNL_VLAN_ETHERTYPE_9100 |
2565 VIRTCHNL_VLAN_ETHERTYPE_XOR |
2566 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2;
2568 caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
2570 caps->offloads.ethertype_match =
2571 VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
2574 caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf);
2578 * ice_vc_set_svm_caps - set VLAN capabilities when the device is in SVM
2579 * @vf: VF that capabilities are being set for
2580 * @caps: VLAN capabilities to populate
2582 * Determine VLAN capabilities support based on whether a port VLAN is
2583 * configured. If a port VLAN is configured then the VF does not have any VLAN
2584 * filtering or offload capabilities since the port VLAN is using the inner VLAN
2585 * capabilities in single VLAN mode (SVM). Otherwise allow the VF to use inner
2586 * VLAN fitlering and offload capabilities.
2589 ice_vc_set_svm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps)
2591 struct virtchnl_vlan_supported_caps *supported_caps;
2593 if (ice_vf_is_port_vlan_ena(vf)) {
2594 supported_caps = &caps->filtering.filtering_support;
2595 supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
2596 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2598 supported_caps = &caps->offloads.stripping_support;
2599 supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
2600 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2602 supported_caps = &caps->offloads.insertion_support;
2603 supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
2604 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2606 caps->offloads.ethertype_init = VIRTCHNL_VLAN_UNSUPPORTED;
2607 caps->offloads.ethertype_match = VIRTCHNL_VLAN_UNSUPPORTED;
2608 caps->filtering.max_filters = 0;
2610 supported_caps = &caps->filtering.filtering_support;
2611 supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100;
2612 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2613 caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
2615 supported_caps = &caps->offloads.stripping_support;
2616 supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2617 VIRTCHNL_VLAN_TOGGLE |
2618 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2619 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2621 supported_caps = &caps->offloads.insertion_support;
2622 supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2623 VIRTCHNL_VLAN_TOGGLE |
2624 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2625 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2627 caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
2628 caps->offloads.ethertype_match =
2629 VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
2630 caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf);
2635 * ice_vc_get_offload_vlan_v2_caps - determine VF's VLAN capabilities
2636 * @vf: VF to determine VLAN capabilities for
2638 * This will only be called if the VF and PF successfully negotiated
2639 * VIRTCHNL_VF_OFFLOAD_VLAN_V2.
2641 * Set VLAN capabilities based on the current VLAN mode and whether a port VLAN
2642 * is configured or not.
2644 static int ice_vc_get_offload_vlan_v2_caps(struct ice_vf *vf)
2646 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2647 struct virtchnl_vlan_caps *caps = NULL;
2650 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2651 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2655 caps = kzalloc(sizeof(*caps), GFP_KERNEL);
2657 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2660 len = sizeof(*caps);
2662 if (ice_is_dvm_ena(&vf->pf->hw))
2663 ice_vc_set_dvm_caps(vf, caps);
2665 ice_vc_set_svm_caps(vf, caps);
2667 /* store negotiated caps to prevent invalid VF messages */
2668 memcpy(&vf->vlan_v2_caps, caps, sizeof(*caps));
2671 err = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS,
2672 v_ret, (u8 *)caps, len);
2678 * ice_vc_validate_vlan_tpid - validate VLAN TPID
2679 * @filtering_caps: negotiated/supported VLAN filtering capabilities
2680 * @tpid: VLAN TPID used for validation
2682 * Convert the VLAN TPID to a VIRTCHNL_VLAN_ETHERTYPE_* and then compare against
2683 * the negotiated/supported filtering caps to see if the VLAN TPID is valid.
2685 static bool ice_vc_validate_vlan_tpid(u16 filtering_caps, u16 tpid)
2687 enum virtchnl_vlan_support vlan_ethertype = VIRTCHNL_VLAN_UNSUPPORTED;
2691 vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_8100;
2694 vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_88A8;
2697 vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_9100;
2701 if (!(filtering_caps & vlan_ethertype))
2708 * ice_vc_is_valid_vlan - validate the virtchnl_vlan
2709 * @vc_vlan: virtchnl_vlan to validate
2711 * If the VLAN TCI and VLAN TPID are 0, then this filter is invalid, so return
2712 * false. Otherwise return true.
2714 static bool ice_vc_is_valid_vlan(struct virtchnl_vlan *vc_vlan)
2716 if (!vc_vlan->tci || !vc_vlan->tpid)
2723 * ice_vc_validate_vlan_filter_list - validate the filter list from the VF
2724 * @vfc: negotiated/supported VLAN filtering capabilities
2725 * @vfl: VLAN filter list from VF to validate
2727 * Validate all of the filters in the VLAN filter list from the VF. If any of
2728 * the checks fail then return false. Otherwise return true.
2731 ice_vc_validate_vlan_filter_list(struct virtchnl_vlan_filtering_caps *vfc,
2732 struct virtchnl_vlan_filter_list_v2 *vfl)
2736 if (!vfl->num_elements)
2739 for (i = 0; i < vfl->num_elements; i++) {
2740 struct virtchnl_vlan_supported_caps *filtering_support =
2741 &vfc->filtering_support;
2742 struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
2743 struct virtchnl_vlan *outer = &vlan_fltr->outer;
2744 struct virtchnl_vlan *inner = &vlan_fltr->inner;
2746 if ((ice_vc_is_valid_vlan(outer) &&
2747 filtering_support->outer == VIRTCHNL_VLAN_UNSUPPORTED) ||
2748 (ice_vc_is_valid_vlan(inner) &&
2749 filtering_support->inner == VIRTCHNL_VLAN_UNSUPPORTED))
2752 if ((outer->tci_mask &&
2753 !(filtering_support->outer & VIRTCHNL_VLAN_FILTER_MASK)) ||
2755 !(filtering_support->inner & VIRTCHNL_VLAN_FILTER_MASK)))
2758 if (((outer->tci & VLAN_PRIO_MASK) &&
2759 !(filtering_support->outer & VIRTCHNL_VLAN_PRIO)) ||
2760 ((inner->tci & VLAN_PRIO_MASK) &&
2761 !(filtering_support->inner & VIRTCHNL_VLAN_PRIO)))
2764 if ((ice_vc_is_valid_vlan(outer) &&
2765 !ice_vc_validate_vlan_tpid(filtering_support->outer,
2767 (ice_vc_is_valid_vlan(inner) &&
2768 !ice_vc_validate_vlan_tpid(filtering_support->inner,
2777 * ice_vc_to_vlan - transform from struct virtchnl_vlan to struct ice_vlan
2778 * @vc_vlan: struct virtchnl_vlan to transform
2780 static struct ice_vlan ice_vc_to_vlan(struct virtchnl_vlan *vc_vlan)
2782 struct ice_vlan vlan = { 0 };
2784 vlan.prio = (vc_vlan->tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
2785 vlan.vid = vc_vlan->tci & VLAN_VID_MASK;
2786 vlan.tpid = vc_vlan->tpid;
2792 * ice_vc_vlan_action - action to perform on the virthcnl_vlan
2793 * @vsi: VF's VSI used to perform the action
2794 * @vlan_action: function to perform the action with (i.e. add/del)
2795 * @vlan: VLAN filter to perform the action with
2798 ice_vc_vlan_action(struct ice_vsi *vsi,
2799 int (*vlan_action)(struct ice_vsi *, struct ice_vlan *),
2800 struct ice_vlan *vlan)
2804 err = vlan_action(vsi, vlan);
2812 * ice_vc_del_vlans - delete VLAN(s) from the virtchnl filter list
2813 * @vf: VF used to delete the VLAN(s)
2814 * @vsi: VF's VSI used to delete the VLAN(s)
2815 * @vfl: virthchnl filter list used to delete the filters
2818 ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
2819 struct virtchnl_vlan_filter_list_v2 *vfl)
2821 bool vlan_promisc = ice_is_vlan_promisc_allowed(vf);
2825 for (i = 0; i < vfl->num_elements; i++) {
2826 struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
2827 struct virtchnl_vlan *vc_vlan;
2829 vc_vlan = &vlan_fltr->outer;
2830 if (ice_vc_is_valid_vlan(vc_vlan)) {
2831 struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
2833 err = ice_vc_vlan_action(vsi,
2834 vsi->outer_vlan_ops.del_vlan,
2840 ice_vf_dis_vlan_promisc(vsi, &vlan);
2843 vc_vlan = &vlan_fltr->inner;
2844 if (ice_vc_is_valid_vlan(vc_vlan)) {
2845 struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
2847 err = ice_vc_vlan_action(vsi,
2848 vsi->inner_vlan_ops.del_vlan,
2853 /* no support for VLAN promiscuous on inner VLAN unless
2854 * we are in Single VLAN Mode (SVM)
2856 if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc)
2857 ice_vf_dis_vlan_promisc(vsi, &vlan);
2865 * ice_vc_remove_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_DEL_VLAN_V2
2866 * @vf: VF the message was received from
2867 * @msg: message received from the VF
2869 static int ice_vc_remove_vlan_v2_msg(struct ice_vf *vf, u8 *msg)
2871 struct virtchnl_vlan_filter_list_v2 *vfl =
2872 (struct virtchnl_vlan_filter_list_v2 *)msg;
2873 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2874 struct ice_vsi *vsi;
2876 if (!ice_vc_validate_vlan_filter_list(&vf->vlan_v2_caps.filtering,
2878 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2882 if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) {
2883 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2887 vsi = ice_get_vf_vsi(vf);
2889 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2893 if (ice_vc_del_vlans(vf, vsi, vfl))
2894 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2897 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN_V2, v_ret, NULL,
2902 * ice_vc_add_vlans - add VLAN(s) from the virtchnl filter list
2903 * @vf: VF used to add the VLAN(s)
2904 * @vsi: VF's VSI used to add the VLAN(s)
2905 * @vfl: virthchnl filter list used to add the filters
2908 ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
2909 struct virtchnl_vlan_filter_list_v2 *vfl)
2911 bool vlan_promisc = ice_is_vlan_promisc_allowed(vf);
2915 for (i = 0; i < vfl->num_elements; i++) {
2916 struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
2917 struct virtchnl_vlan *vc_vlan;
2919 vc_vlan = &vlan_fltr->outer;
2920 if (ice_vc_is_valid_vlan(vc_vlan)) {
2921 struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
2923 err = ice_vc_vlan_action(vsi,
2924 vsi->outer_vlan_ops.add_vlan,
2930 err = ice_vf_ena_vlan_promisc(vsi, &vlan);
2936 vc_vlan = &vlan_fltr->inner;
2937 if (ice_vc_is_valid_vlan(vc_vlan)) {
2938 struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
2940 err = ice_vc_vlan_action(vsi,
2941 vsi->inner_vlan_ops.add_vlan,
2946 /* no support for VLAN promiscuous on inner VLAN unless
2947 * we are in Single VLAN Mode (SVM)
2949 if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc) {
2950 err = ice_vf_ena_vlan_promisc(vsi, &vlan);
2961 * ice_vc_validate_add_vlan_filter_list - validate add filter list from the VF
2962 * @vsi: VF VSI used to get number of existing VLAN filters
2963 * @vfc: negotiated/supported VLAN filtering capabilities
2964 * @vfl: VLAN filter list from VF to validate
2966 * Validate all of the filters in the VLAN filter list from the VF during the
2967 * VIRTCHNL_OP_ADD_VLAN_V2 opcode. If any of the checks fail then return false.
2968 * Otherwise return true.
2971 ice_vc_validate_add_vlan_filter_list(struct ice_vsi *vsi,
2972 struct virtchnl_vlan_filtering_caps *vfc,
2973 struct virtchnl_vlan_filter_list_v2 *vfl)
2975 u16 num_requested_filters = ice_vsi_num_non_zero_vlans(vsi) +
2978 if (num_requested_filters > vfc->max_filters)
2981 return ice_vc_validate_vlan_filter_list(vfc, vfl);
2985 * ice_vc_add_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_ADD_VLAN_V2
2986 * @vf: VF the message was received from
2987 * @msg: message received from the VF
2989 static int ice_vc_add_vlan_v2_msg(struct ice_vf *vf, u8 *msg)
2991 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2992 struct virtchnl_vlan_filter_list_v2 *vfl =
2993 (struct virtchnl_vlan_filter_list_v2 *)msg;
2994 struct ice_vsi *vsi;
2996 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2997 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3001 if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) {
3002 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3006 vsi = ice_get_vf_vsi(vf);
3008 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3012 if (!ice_vc_validate_add_vlan_filter_list(vsi,
3013 &vf->vlan_v2_caps.filtering,
3015 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3019 if (ice_vc_add_vlans(vf, vsi, vfl))
3020 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3023 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN_V2, v_ret, NULL,
3028 * ice_vc_valid_vlan_setting - validate VLAN setting
3029 * @negotiated_settings: negotiated VLAN settings during VF init
3030 * @ethertype_setting: ethertype(s) requested for the VLAN setting
3033 ice_vc_valid_vlan_setting(u32 negotiated_settings, u32 ethertype_setting)
3035 if (ethertype_setting && !(negotiated_settings & ethertype_setting))
3038 /* only allow a single VIRTCHNL_VLAN_ETHERTYPE if
3039 * VIRTHCNL_VLAN_ETHERTYPE_AND is not negotiated/supported
3041 if (!(negotiated_settings & VIRTCHNL_VLAN_ETHERTYPE_AND) &&
3042 hweight32(ethertype_setting) > 1)
3045 /* ability to modify the VLAN setting was not negotiated */
3046 if (!(negotiated_settings & VIRTCHNL_VLAN_TOGGLE))
3053 * ice_vc_valid_vlan_setting_msg - validate the VLAN setting message
3054 * @caps: negotiated VLAN settings during VF init
3055 * @msg: message to validate
3057 * Used to validate any VLAN virtchnl message sent as a
3058 * virtchnl_vlan_setting structure. Validates the message against the
3059 * negotiated/supported caps during VF driver init.
3062 ice_vc_valid_vlan_setting_msg(struct virtchnl_vlan_supported_caps *caps,
3063 struct virtchnl_vlan_setting *msg)
3065 if ((!msg->outer_ethertype_setting &&
3066 !msg->inner_ethertype_setting) ||
3067 (!caps->outer && !caps->inner))
3070 if (msg->outer_ethertype_setting &&
3071 !ice_vc_valid_vlan_setting(caps->outer,
3072 msg->outer_ethertype_setting))
3075 if (msg->inner_ethertype_setting &&
3076 !ice_vc_valid_vlan_setting(caps->inner,
3077 msg->inner_ethertype_setting))
3084 * ice_vc_get_tpid - transform from VIRTCHNL_VLAN_ETHERTYPE_* to VLAN TPID
3085 * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* used to get VLAN TPID
3086 * @tpid: VLAN TPID to populate
3088 static int ice_vc_get_tpid(u32 ethertype_setting, u16 *tpid)
3090 switch (ethertype_setting) {
3091 case VIRTCHNL_VLAN_ETHERTYPE_8100:
3092 *tpid = ETH_P_8021Q;
3094 case VIRTCHNL_VLAN_ETHERTYPE_88A8:
3095 *tpid = ETH_P_8021AD;
3097 case VIRTCHNL_VLAN_ETHERTYPE_9100:
3098 *tpid = ETH_P_QINQ1;
3109 * ice_vc_ena_vlan_offload - enable VLAN offload based on the ethertype_setting
3110 * @vsi: VF's VSI used to enable the VLAN offload
3111 * @ena_offload: function used to enable the VLAN offload
3112 * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* to enable offloads for
3115 ice_vc_ena_vlan_offload(struct ice_vsi *vsi,
3116 int (*ena_offload)(struct ice_vsi *vsi, u16 tpid),
3117 u32 ethertype_setting)
3122 err = ice_vc_get_tpid(ethertype_setting, &tpid);
3126 err = ena_offload(vsi, tpid);
3133 #define ICE_L2TSEL_QRX_CONTEXT_REG_IDX 3
3134 #define ICE_L2TSEL_BIT_OFFSET 23
3136 ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND,
3137 ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1,
3141 * ice_vsi_update_l2tsel - update l2tsel field for all Rx rings on this VSI
3142 * @vsi: VSI used to update l2tsel on
3143 * @l2tsel: l2tsel setting requested
3145 * Use the l2tsel setting to update all of the Rx queue context bits for l2tsel.
3146 * This will modify which descriptor field the first offloaded VLAN will be
3149 static void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel)
3151 struct ice_hw *hw = &vsi->back->hw;
3155 if (l2tsel == ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND)
3158 l2tsel_bit = BIT(ICE_L2TSEL_BIT_OFFSET);
3160 for (i = 0; i < vsi->alloc_rxq; i++) {
3161 u16 pfq = vsi->rxq_map[i];
3162 u32 qrx_context_offset;
3165 qrx_context_offset =
3166 QRX_CONTEXT(ICE_L2TSEL_QRX_CONTEXT_REG_IDX, pfq);
3168 regval = rd32(hw, qrx_context_offset);
3169 regval &= ~BIT(ICE_L2TSEL_BIT_OFFSET);
3170 regval |= l2tsel_bit;
3171 wr32(hw, qrx_context_offset, regval);
3176 * ice_vc_ena_vlan_stripping_v2_msg
3177 * @vf: VF the message was received from
3178 * @msg: message received from the VF
3180 * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2
3182 static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
3184 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3185 struct virtchnl_vlan_supported_caps *stripping_support;
3186 struct virtchnl_vlan_setting *strip_msg =
3187 (struct virtchnl_vlan_setting *)msg;
3188 u32 ethertype_setting;
3189 struct ice_vsi *vsi;
3191 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3192 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3196 if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) {
3197 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3201 vsi = ice_get_vf_vsi(vf);
3203 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3207 stripping_support = &vf->vlan_v2_caps.offloads.stripping_support;
3208 if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) {
3209 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3213 ethertype_setting = strip_msg->outer_ethertype_setting;
3214 if (ethertype_setting) {
3215 if (ice_vc_ena_vlan_offload(vsi,
3216 vsi->outer_vlan_ops.ena_stripping,
3217 ethertype_setting)) {
3218 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3221 enum ice_l2tsel l2tsel =
3222 ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND;
3224 /* PF tells the VF that the outer VLAN tag is always
3225 * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and
3226 * inner is always extracted to
3227 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to
3228 * support outer stripping so the first tag always ends
3229 * up in L2TAG2_2ND and the second/inner tag, if
3230 * enabled, is extracted in L2TAG1.
3232 ice_vsi_update_l2tsel(vsi, l2tsel);
3236 ethertype_setting = strip_msg->inner_ethertype_setting;
3237 if (ethertype_setting &&
3238 ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_stripping,
3239 ethertype_setting)) {
3240 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3245 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2,
3250 * ice_vc_dis_vlan_stripping_v2_msg
3251 * @vf: VF the message was received from
3252 * @msg: message received from the VF
3254 * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2
3256 static int ice_vc_dis_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
3258 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3259 struct virtchnl_vlan_supported_caps *stripping_support;
3260 struct virtchnl_vlan_setting *strip_msg =
3261 (struct virtchnl_vlan_setting *)msg;
3262 u32 ethertype_setting;
3263 struct ice_vsi *vsi;
3265 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3266 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3270 if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) {
3271 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3275 vsi = ice_get_vf_vsi(vf);
3277 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3281 stripping_support = &vf->vlan_v2_caps.offloads.stripping_support;
3282 if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) {
3283 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3287 ethertype_setting = strip_msg->outer_ethertype_setting;
3288 if (ethertype_setting) {
3289 if (vsi->outer_vlan_ops.dis_stripping(vsi)) {
3290 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3293 enum ice_l2tsel l2tsel =
3294 ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1;
3296 /* PF tells the VF that the outer VLAN tag is always
3297 * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and
3298 * inner is always extracted to
3299 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to
3300 * support inner stripping while outer stripping is
3301 * disabled so that the first and only tag is extracted
3304 ice_vsi_update_l2tsel(vsi, l2tsel);
3308 ethertype_setting = strip_msg->inner_ethertype_setting;
3309 if (ethertype_setting && vsi->inner_vlan_ops.dis_stripping(vsi)) {
3310 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3315 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2,
3320 * ice_vc_ena_vlan_insertion_v2_msg
3321 * @vf: VF the message was received from
3322 * @msg: message received from the VF
3324 * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2
3326 static int ice_vc_ena_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg)
3328 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3329 struct virtchnl_vlan_supported_caps *insertion_support;
3330 struct virtchnl_vlan_setting *insertion_msg =
3331 (struct virtchnl_vlan_setting *)msg;
3332 u32 ethertype_setting;
3333 struct ice_vsi *vsi;
3335 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3336 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3340 if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) {
3341 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3345 vsi = ice_get_vf_vsi(vf);
3347 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3351 insertion_support = &vf->vlan_v2_caps.offloads.insertion_support;
3352 if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) {
3353 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3357 ethertype_setting = insertion_msg->outer_ethertype_setting;
3358 if (ethertype_setting &&
3359 ice_vc_ena_vlan_offload(vsi, vsi->outer_vlan_ops.ena_insertion,
3360 ethertype_setting)) {
3361 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3365 ethertype_setting = insertion_msg->inner_ethertype_setting;
3366 if (ethertype_setting &&
3367 ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_insertion,
3368 ethertype_setting)) {
3369 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3374 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2,
3379 * ice_vc_dis_vlan_insertion_v2_msg
3380 * @vf: VF the message was received from
3381 * @msg: message received from the VF
3383 * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2
3385 static int ice_vc_dis_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg)
3387 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3388 struct virtchnl_vlan_supported_caps *insertion_support;
3389 struct virtchnl_vlan_setting *insertion_msg =
3390 (struct virtchnl_vlan_setting *)msg;
3391 u32 ethertype_setting;
3392 struct ice_vsi *vsi;
3394 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3395 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3399 if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) {
3400 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3404 vsi = ice_get_vf_vsi(vf);
3406 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3410 insertion_support = &vf->vlan_v2_caps.offloads.insertion_support;
3411 if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) {
3412 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3416 ethertype_setting = insertion_msg->outer_ethertype_setting;
3417 if (ethertype_setting && vsi->outer_vlan_ops.dis_insertion(vsi)) {
3418 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3422 ethertype_setting = insertion_msg->inner_ethertype_setting;
3423 if (ethertype_setting && vsi->inner_vlan_ops.dis_insertion(vsi)) {
3424 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3429 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2,
3433 static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
3434 .get_ver_msg = ice_vc_get_ver_msg,
3435 .get_vf_res_msg = ice_vc_get_vf_res_msg,
3436 .reset_vf = ice_vc_reset_vf_msg,
3437 .add_mac_addr_msg = ice_vc_add_mac_addr_msg,
3438 .del_mac_addr_msg = ice_vc_del_mac_addr_msg,
3439 .cfg_qs_msg = ice_vc_cfg_qs_msg,
3440 .ena_qs_msg = ice_vc_ena_qs_msg,
3441 .dis_qs_msg = ice_vc_dis_qs_msg,
3442 .request_qs_msg = ice_vc_request_qs_msg,
3443 .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
3444 .config_rss_key = ice_vc_config_rss_key,
3445 .config_rss_lut = ice_vc_config_rss_lut,
3446 .get_stats_msg = ice_vc_get_stats_msg,
3447 .cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg,
3448 .add_vlan_msg = ice_vc_add_vlan_msg,
3449 .remove_vlan_msg = ice_vc_remove_vlan_msg,
3450 .ena_vlan_stripping = ice_vc_ena_vlan_stripping,
3451 .dis_vlan_stripping = ice_vc_dis_vlan_stripping,
3452 .handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
3453 .add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
3454 .del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
3455 .get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps,
3456 .add_vlan_v2_msg = ice_vc_add_vlan_v2_msg,
3457 .remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg,
3458 .ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg,
3459 .dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
3460 .ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
3461 .dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
3465 * ice_virtchnl_set_dflt_ops - Switch to default virtchnl ops
3466 * @vf: the VF to switch ops
3468 void ice_virtchnl_set_dflt_ops(struct ice_vf *vf)
3470 vf->virtchnl_ops = &ice_virtchnl_dflt_ops;
3474 * ice_vc_repr_add_mac
3475 * @vf: pointer to VF
3476 * @msg: virtchannel message
3478 * When port representors are created, we do not add MAC rule
3479 * to firmware, we store it so that PF could report same
3482 static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
3484 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3485 struct virtchnl_ether_addr_list *al =
3486 (struct virtchnl_ether_addr_list *)msg;
3487 struct ice_vsi *vsi;
3491 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
3492 !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3493 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3494 goto handle_mac_exit;
3499 vsi = ice_get_vf_vsi(vf);
3501 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3502 goto handle_mac_exit;
3505 for (i = 0; i < al->num_elements; i++) {
3506 u8 *mac_addr = al->list[i].addr;
3509 if (!is_unicast_ether_addr(mac_addr) ||
3510 ether_addr_equal(mac_addr, vf->hw_lan_addr.addr))
3513 if (vf->pf_set_mac) {
3514 dev_err(ice_pf_to_dev(pf), "VF attempting to override administratively set MAC address\n");
3515 v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
3516 goto handle_mac_exit;
3519 result = ice_eswitch_add_vf_mac_rule(pf, vf, mac_addr);
3521 dev_err(ice_pf_to_dev(pf), "Failed to add MAC %pM for VF %d\n, error %d\n",
3522 mac_addr, vf->vf_id, result);
3523 goto handle_mac_exit;
3526 ice_vfhw_mac_add(vf, &al->list[i]);
3532 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
3537 * ice_vc_repr_del_mac - response with success for deleting MAC
3538 * @vf: pointer to VF
3539 * @msg: virtchannel message
3541 * Respond with success to not break normal VF flow.
3542 * For legacy VF driver try to update cached MAC address.
3545 ice_vc_repr_del_mac(struct ice_vf __always_unused *vf, u8 __always_unused *msg)
3547 struct virtchnl_ether_addr_list *al =
3548 (struct virtchnl_ether_addr_list *)msg;
3550 ice_update_legacy_cached_mac(vf, &al->list[0]);
3552 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
3553 VIRTCHNL_STATUS_SUCCESS, NULL, 0);
3557 ice_vc_repr_cfg_promiscuous_mode(struct ice_vf *vf, u8 __always_unused *msg)
3559 dev_dbg(ice_pf_to_dev(vf->pf),
3560 "Can't config promiscuous mode in switchdev mode for VF %d\n",
3562 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
3563 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
3567 static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
3568 .get_ver_msg = ice_vc_get_ver_msg,
3569 .get_vf_res_msg = ice_vc_get_vf_res_msg,
3570 .reset_vf = ice_vc_reset_vf_msg,
3571 .add_mac_addr_msg = ice_vc_repr_add_mac,
3572 .del_mac_addr_msg = ice_vc_repr_del_mac,
3573 .cfg_qs_msg = ice_vc_cfg_qs_msg,
3574 .ena_qs_msg = ice_vc_ena_qs_msg,
3575 .dis_qs_msg = ice_vc_dis_qs_msg,
3576 .request_qs_msg = ice_vc_request_qs_msg,
3577 .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
3578 .config_rss_key = ice_vc_config_rss_key,
3579 .config_rss_lut = ice_vc_config_rss_lut,
3580 .get_stats_msg = ice_vc_get_stats_msg,
3581 .cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode,
3582 .add_vlan_msg = ice_vc_add_vlan_msg,
3583 .remove_vlan_msg = ice_vc_remove_vlan_msg,
3584 .ena_vlan_stripping = ice_vc_ena_vlan_stripping,
3585 .dis_vlan_stripping = ice_vc_dis_vlan_stripping,
3586 .handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
3587 .add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
3588 .del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
3589 .get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps,
3590 .add_vlan_v2_msg = ice_vc_add_vlan_v2_msg,
3591 .remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg,
3592 .ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg,
3593 .dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
3594 .ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
3595 .dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
3599 * ice_virtchnl_set_repr_ops - Switch to representor virtchnl ops
3600 * @vf: the VF to switch ops
3602 void ice_virtchnl_set_repr_ops(struct ice_vf *vf)
3604 vf->virtchnl_ops = &ice_virtchnl_repr_ops;
3608 * ice_vc_process_vf_msg - Process request from VF
3609 * @pf: pointer to the PF structure
3610 * @event: pointer to the AQ event
3612 * called from the common asq/arq handler to
3613 * process request from VF
3615 void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
3617 u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
3618 s16 vf_id = le16_to_cpu(event->desc.retval);
3619 const struct ice_virtchnl_ops *ops;
3620 u16 msglen = event->msg_len;
3621 u8 *msg = event->msg_buf;
3622 struct ice_vf *vf = NULL;
3626 dev = ice_pf_to_dev(pf);
3628 vf = ice_get_vf_by_id(pf, vf_id);
3630 dev_err(dev, "Unable to locate VF for message from VF ID %d, opcode %d, len %d\n",
3631 vf_id, v_opcode, msglen);
3635 mutex_lock(&vf->cfg_lock);
3637 /* Check if VF is disabled. */
3638 if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
3643 ops = vf->virtchnl_ops;
3645 /* Perform basic checks on the msg */
3646 err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3648 if (err == VIRTCHNL_STATUS_ERR_PARAM)
3656 ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
3658 dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
3659 vf_id, v_opcode, msglen, err);
3663 if (!ice_vc_is_opcode_allowed(vf, v_opcode)) {
3664 ice_vc_send_msg_to_vf(vf, v_opcode,
3665 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL,
3671 case VIRTCHNL_OP_VERSION:
3672 err = ops->get_ver_msg(vf, msg);
3674 case VIRTCHNL_OP_GET_VF_RESOURCES:
3675 err = ops->get_vf_res_msg(vf, msg);
3676 if (ice_vf_init_vlan_stripping(vf))
3677 dev_dbg(dev, "Failed to initialize VLAN stripping for VF %d\n",
3679 ice_vc_notify_vf_link_state(vf);
3681 case VIRTCHNL_OP_RESET_VF:
3684 case VIRTCHNL_OP_ADD_ETH_ADDR:
3685 err = ops->add_mac_addr_msg(vf, msg);
3687 case VIRTCHNL_OP_DEL_ETH_ADDR:
3688 err = ops->del_mac_addr_msg(vf, msg);
3690 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3691 err = ops->cfg_qs_msg(vf, msg);
3693 case VIRTCHNL_OP_ENABLE_QUEUES:
3694 err = ops->ena_qs_msg(vf, msg);
3695 ice_vc_notify_vf_link_state(vf);
3697 case VIRTCHNL_OP_DISABLE_QUEUES:
3698 err = ops->dis_qs_msg(vf, msg);
3700 case VIRTCHNL_OP_REQUEST_QUEUES:
3701 err = ops->request_qs_msg(vf, msg);
3703 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3704 err = ops->cfg_irq_map_msg(vf, msg);
3706 case VIRTCHNL_OP_CONFIG_RSS_KEY:
3707 err = ops->config_rss_key(vf, msg);
3709 case VIRTCHNL_OP_CONFIG_RSS_LUT:
3710 err = ops->config_rss_lut(vf, msg);
3712 case VIRTCHNL_OP_GET_STATS:
3713 err = ops->get_stats_msg(vf, msg);
3715 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
3716 err = ops->cfg_promiscuous_mode_msg(vf, msg);
3718 case VIRTCHNL_OP_ADD_VLAN:
3719 err = ops->add_vlan_msg(vf, msg);
3721 case VIRTCHNL_OP_DEL_VLAN:
3722 err = ops->remove_vlan_msg(vf, msg);
3724 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3725 err = ops->ena_vlan_stripping(vf);
3727 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3728 err = ops->dis_vlan_stripping(vf);
3730 case VIRTCHNL_OP_ADD_FDIR_FILTER:
3731 err = ops->add_fdir_fltr_msg(vf, msg);
3733 case VIRTCHNL_OP_DEL_FDIR_FILTER:
3734 err = ops->del_fdir_fltr_msg(vf, msg);
3736 case VIRTCHNL_OP_ADD_RSS_CFG:
3737 err = ops->handle_rss_cfg_msg(vf, msg, true);
3739 case VIRTCHNL_OP_DEL_RSS_CFG:
3740 err = ops->handle_rss_cfg_msg(vf, msg, false);
3742 case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
3743 err = ops->get_offload_vlan_v2_caps(vf);
3745 case VIRTCHNL_OP_ADD_VLAN_V2:
3746 err = ops->add_vlan_v2_msg(vf, msg);
3748 case VIRTCHNL_OP_DEL_VLAN_V2:
3749 err = ops->remove_vlan_v2_msg(vf, msg);
3751 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
3752 err = ops->ena_vlan_stripping_v2_msg(vf, msg);
3754 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
3755 err = ops->dis_vlan_stripping_v2_msg(vf, msg);
3757 case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
3758 err = ops->ena_vlan_insertion_v2_msg(vf, msg);
3760 case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
3761 err = ops->dis_vlan_insertion_v2_msg(vf, msg);
3763 case VIRTCHNL_OP_UNKNOWN:
3765 dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
3767 err = ice_vc_send_msg_to_vf(vf, v_opcode,
3768 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
3773 /* Helper function cares less about error return values here
3774 * as it is busy with pending work.
3776 dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
3777 vf_id, v_opcode, err);
3781 mutex_unlock(&vf->cfg_lock);