1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
6 /*********************notification routines***********************/
10 * @pf: pointer to the PF structure
11 * @v_opcode: operation code
12 * @v_retval: return value
13 * @msg: pointer to the msg buffer
16 * send a message to all VFs on a given PF
18 static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
19 enum virtchnl_ops v_opcode,
20 i40e_status v_retval, u8 *msg,
23 struct i40e_hw *hw = &pf->hw;
24 struct i40e_vf *vf = pf->vf;
27 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
28 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
29 /* Not all vfs are enabled so skip the ones that are not */
30 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
31 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
34 /* Ignore return value on purpose - a given VF may fail, but
35 * we need to keep going and send to all of them
37 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
43 * i40e_vc_link_speed2mbps
44 * converts i40e_aq_link_speed to integer value of Mbps
45 * @link_speed: the speed to convert
47 * return the speed as direct value of Mbps.
50 i40e_vc_link_speed2mbps(enum i40e_aq_link_speed link_speed)
53 case I40E_LINK_SPEED_100MB:
55 case I40E_LINK_SPEED_1GB:
57 case I40E_LINK_SPEED_2_5GB:
59 case I40E_LINK_SPEED_5GB:
61 case I40E_LINK_SPEED_10GB:
63 case I40E_LINK_SPEED_20GB:
65 case I40E_LINK_SPEED_25GB:
67 case I40E_LINK_SPEED_40GB:
69 case I40E_LINK_SPEED_UNKNOWN:
76 * i40e_set_vf_link_state
77 * @vf: pointer to the VF structure
78 * @pfe: pointer to PF event structure
79 * @ls: pointer to link status structure
81 * set a link state on a single vf
83 static void i40e_set_vf_link_state(struct i40e_vf *vf,
84 struct virtchnl_pf_event *pfe, struct i40e_link_status *ls)
86 u8 link_status = ls->link_info & I40E_AQ_LINK_UP;
89 link_status = vf->link_up;
91 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
92 pfe->event_data.link_event_adv.link_speed = link_status ?
93 i40e_vc_link_speed2mbps(ls->link_speed) : 0;
94 pfe->event_data.link_event_adv.link_status = link_status;
96 pfe->event_data.link_event.link_speed = link_status ?
97 i40e_virtchnl_link_speed(ls->link_speed) : 0;
98 pfe->event_data.link_event.link_status = link_status;
103 * i40e_vc_notify_vf_link_state
104 * @vf: pointer to the VF structure
106 * send a link status message to a single VF
108 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
110 struct virtchnl_pf_event pfe;
111 struct i40e_pf *pf = vf->pf;
112 struct i40e_hw *hw = &pf->hw;
113 struct i40e_link_status *ls = &pf->hw.phy.link_info;
114 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
116 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
117 pfe.severity = PF_EVENT_SEVERITY_INFO;
119 i40e_set_vf_link_state(vf, &pfe, ls);
121 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
122 0, (u8 *)&pfe, sizeof(pfe), NULL);
126 * i40e_vc_notify_link_state
127 * @pf: pointer to the PF structure
129 * send a link status message to all VFs on a given PF
131 void i40e_vc_notify_link_state(struct i40e_pf *pf)
135 for (i = 0; i < pf->num_alloc_vfs; i++)
136 i40e_vc_notify_vf_link_state(&pf->vf[i]);
140 * i40e_vc_notify_reset
141 * @pf: pointer to the PF structure
143 * indicate a pending reset to all VFs on a given PF
145 void i40e_vc_notify_reset(struct i40e_pf *pf)
147 struct virtchnl_pf_event pfe;
149 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
150 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
151 i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
152 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
156 * i40e_vc_notify_vf_reset
157 * @vf: pointer to the VF structure
159 * indicate a pending reset to the given VF
161 void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
163 struct virtchnl_pf_event pfe;
166 /* validate the request */
167 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
170 /* verify if the VF is in either init or active before proceeding */
171 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
172 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
175 abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
177 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
178 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
179 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
181 sizeof(struct virtchnl_pf_event), NULL);
183 /***********************misc routines*****************************/
187 * @vf: pointer to the VF info
188 * @notify_vf: notify vf about reset or not
191 static void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf)
193 struct i40e_pf *pf = vf->pf;
197 i40e_vc_notify_vf_reset(vf);
199 /* We want to ensure that an actual reset occurs initiated after this
200 * function was called. However, we do not want to wait forever, so
201 * we'll give a reasonable time and print a message if we failed to
204 for (i = 0; i < 20; i++) {
205 /* If PF is in VFs releasing state reset VF is impossible,
208 if (test_bit(__I40E_VFS_RELEASING, pf->state))
210 if (i40e_reset_vf(vf, false))
212 usleep_range(10000, 20000);
216 dev_warn(&vf->pf->pdev->dev,
217 "Failed to initiate reset for VF %d after 200 milliseconds\n",
220 dev_dbg(&vf->pf->pdev->dev,
221 "Failed to initiate reset for VF %d after 200 milliseconds\n",
226 * i40e_vc_isvalid_vsi_id
227 * @vf: pointer to the VF info
228 * @vsi_id: VF relative VSI id
230 * check for the valid VSI id
232 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
234 struct i40e_pf *pf = vf->pf;
235 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
237 return (vsi && (vsi->vf_id == vf->vf_id));
241 * i40e_vc_isvalid_queue_id
242 * @vf: pointer to the VF info
244 * @qid: vsi relative queue id
246 * check for the valid queue id
248 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
251 struct i40e_pf *pf = vf->pf;
252 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
254 return (vsi && (qid < vsi->alloc_queue_pairs));
258 * i40e_vc_isvalid_vector_id
259 * @vf: pointer to the VF info
260 * @vector_id: VF relative vector id
262 * check for the valid vector id
264 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id)
266 struct i40e_pf *pf = vf->pf;
268 return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
271 /***********************vf resource mgmt routines*****************/
274 * i40e_vc_get_pf_queue_id
275 * @vf: pointer to the VF info
276 * @vsi_id: id of VSI as provided by the FW
277 * @vsi_queue_id: vsi relative queue id
279 * return PF relative queue id
281 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
284 struct i40e_pf *pf = vf->pf;
285 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
286 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
291 if (le16_to_cpu(vsi->info.mapping_flags) &
292 I40E_AQ_VSI_QUE_MAP_NONCONTIG)
294 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
296 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
303 * i40e_get_real_pf_qid
304 * @vf: pointer to the VF info
306 * @queue_id: queue number
308 * wrapper function to get pf_queue_id handling ADq code as well
310 static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id)
314 if (vf->adq_enabled) {
315 /* Although VF considers all the queues(can be 1 to 16) as its
316 * own but they may actually belong to different VSIs(up to 4).
317 * We need to find which queues belongs to which VSI.
319 for (i = 0; i < vf->num_tc; i++) {
320 if (queue_id < vf->ch[i].num_qps) {
321 vsi_id = vf->ch[i].vsi_id;
324 /* find right queue id which is relative to a
327 queue_id -= vf->ch[i].num_qps;
331 return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id);
335 * i40e_config_irq_link_list
336 * @vf: pointer to the VF info
337 * @vsi_id: id of VSI as given by the FW
338 * @vecmap: irq map info
340 * configure irq link list from the map
342 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
343 struct virtchnl_vector_map *vecmap)
345 unsigned long linklistmap = 0, tempmap;
346 struct i40e_pf *pf = vf->pf;
347 struct i40e_hw *hw = &pf->hw;
348 u16 vsi_queue_id, pf_queue_id;
349 enum i40e_queue_type qtype;
350 u16 next_q, vector_id, size;
354 vector_id = vecmap->vector_id;
357 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
359 reg_idx = I40E_VPINT_LNKLSTN(
360 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
363 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
364 /* Special case - No queues mapped on this vector */
365 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
368 tempmap = vecmap->rxq_map;
369 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
370 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
374 tempmap = vecmap->txq_map;
375 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
376 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
380 size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES;
381 next_q = find_first_bit(&linklistmap, size);
382 if (unlikely(next_q == size))
385 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
386 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
387 pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id);
388 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
390 wr32(hw, reg_idx, reg);
392 while (next_q < size) {
394 case I40E_QUEUE_TYPE_RX:
395 reg_idx = I40E_QINT_RQCTL(pf_queue_id);
396 itr_idx = vecmap->rxitr_idx;
398 case I40E_QUEUE_TYPE_TX:
399 reg_idx = I40E_QINT_TQCTL(pf_queue_id);
400 itr_idx = vecmap->txitr_idx;
406 next_q = find_next_bit(&linklistmap, size, next_q + 1);
408 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
409 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
410 pf_queue_id = i40e_get_real_pf_qid(vf,
414 pf_queue_id = I40E_QUEUE_END_OF_LIST;
418 /* format for the RQCTL & TQCTL regs is same */
420 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
421 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
422 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
423 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
424 wr32(hw, reg_idx, reg);
427 /* if the vf is running in polling mode and using interrupt zero,
428 * need to disable auto-mask on enabling zero interrupt for VFs.
430 if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
432 reg = rd32(hw, I40E_GLINT_CTL);
433 if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
434 reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
435 wr32(hw, I40E_GLINT_CTL, reg);
444 * i40e_release_iwarp_qvlist
445 * @vf: pointer to the VF.
448 static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
450 struct i40e_pf *pf = vf->pf;
451 struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
455 if (!vf->qvlist_info)
458 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
459 for (i = 0; i < qvlist_info->num_vectors; i++) {
460 struct virtchnl_iwarp_qv_info *qv_info;
461 u32 next_q_index, next_q_type;
462 struct i40e_hw *hw = &pf->hw;
463 u32 v_idx, reg_idx, reg;
465 qv_info = &qvlist_info->qv_info[i];
468 v_idx = qv_info->v_idx;
469 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
470 /* Figure out the queue after CEQ and make that the
473 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
474 reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
475 next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
476 >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
477 next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
478 >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
480 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
481 reg = (next_q_index &
482 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
484 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
486 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
489 kfree(vf->qvlist_info);
490 vf->qvlist_info = NULL;
494 * i40e_config_iwarp_qvlist
495 * @vf: pointer to the VF info
496 * @qvlist_info: queue and vector list
498 * Return 0 on success or < 0 on error
500 static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
501 struct virtchnl_iwarp_qvlist_info *qvlist_info)
503 struct i40e_pf *pf = vf->pf;
504 struct i40e_hw *hw = &pf->hw;
505 struct virtchnl_iwarp_qv_info *qv_info;
506 u32 v_idx, i, reg_idx, reg;
507 u32 next_q_idx, next_q_type;
511 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
513 if (qvlist_info->num_vectors > msix_vf) {
514 dev_warn(&pf->pdev->dev,
515 "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n",
516 qvlist_info->num_vectors,
522 kfree(vf->qvlist_info);
523 vf->qvlist_info = kzalloc(struct_size(vf->qvlist_info, qv_info,
524 qvlist_info->num_vectors - 1),
526 if (!vf->qvlist_info) {
530 vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
532 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
533 for (i = 0; i < qvlist_info->num_vectors; i++) {
534 qv_info = &qvlist_info->qv_info[i];
538 /* Validate vector id belongs to this vf */
539 if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) {
544 v_idx = qv_info->v_idx;
546 vf->qvlist_info->qv_info[i] = *qv_info;
548 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
549 /* We might be sharing the interrupt, so get the first queue
550 * index and type, push it down the list by adding the new
551 * queue on top. Also link it with the new queue in CEQCTL.
553 reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
554 next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
555 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
556 next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
557 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
559 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
560 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
561 reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
562 (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
563 (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
564 (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
565 (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
566 wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
568 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
569 reg = (qv_info->ceq_idx &
570 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
571 (I40E_QUEUE_TYPE_PE_CEQ <<
572 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
573 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
576 if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
577 reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
578 (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
579 (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
581 wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
587 kfree(vf->qvlist_info);
588 vf->qvlist_info = NULL;
594 * i40e_config_vsi_tx_queue
595 * @vf: pointer to the VF info
596 * @vsi_id: id of VSI as provided by the FW
597 * @vsi_queue_id: vsi relative queue index
598 * @info: config. info
602 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
604 struct virtchnl_txq_info *info)
606 struct i40e_pf *pf = vf->pf;
607 struct i40e_hw *hw = &pf->hw;
608 struct i40e_hmc_obj_txq tx_ctx;
609 struct i40e_vsi *vsi;
614 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
618 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
619 vsi = i40e_find_vsi_from_id(pf, vsi_id);
625 /* clear the context structure first */
626 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
628 /* only set the required fields */
629 tx_ctx.base = info->dma_ring_addr / 128;
630 tx_ctx.qlen = info->ring_len;
631 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
632 tx_ctx.rdylist_act = 0;
633 tx_ctx.head_wb_ena = info->headwb_enabled;
634 tx_ctx.head_wb_addr = info->dma_headwb_addr;
636 /* clear the context in the HMC */
637 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
639 dev_err(&pf->pdev->dev,
640 "Failed to clear VF LAN Tx queue context %d, error: %d\n",
646 /* set the context in the HMC */
647 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
649 dev_err(&pf->pdev->dev,
650 "Failed to set VF LAN Tx queue context %d error: %d\n",
656 /* associate this queue with the PCI VF function */
657 qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
658 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
659 & I40E_QTX_CTL_PF_INDX_MASK);
660 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
661 << I40E_QTX_CTL_VFVM_INDX_SHIFT)
662 & I40E_QTX_CTL_VFVM_INDX_MASK);
663 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
671 * i40e_config_vsi_rx_queue
672 * @vf: pointer to the VF info
673 * @vsi_id: id of VSI as provided by the FW
674 * @vsi_queue_id: vsi relative queue index
675 * @info: config. info
679 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
681 struct virtchnl_rxq_info *info)
683 u16 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
684 struct i40e_pf *pf = vf->pf;
685 struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
686 struct i40e_hw *hw = &pf->hw;
687 struct i40e_hmc_obj_rxq rx_ctx;
690 /* clear the context structure first */
691 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
693 /* only set the required fields */
694 rx_ctx.base = info->dma_ring_addr / 128;
695 rx_ctx.qlen = info->ring_len;
697 if (info->splithdr_enabled) {
698 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
700 I40E_RX_SPLIT_TCP_UDP |
702 /* header length validation */
703 if (info->hdr_size > ((2 * 1024) - 64)) {
707 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
709 /* set split mode 10b */
710 rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
713 /* databuffer length validation */
714 if (info->databuffer_size > ((16 * 1024) - 128)) {
718 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
720 /* max pkt. length validation */
721 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
725 rx_ctx.rxmax = info->max_pkt_size;
727 /* if port VLAN is configured increase the max packet size */
729 rx_ctx.rxmax += VLAN_HLEN;
731 /* enable 32bytes desc always */
735 rx_ctx.lrxqthresh = 1;
740 /* clear the context in the HMC */
741 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
743 dev_err(&pf->pdev->dev,
744 "Failed to clear VF LAN Rx queue context %d, error: %d\n",
750 /* set the context in the HMC */
751 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
753 dev_err(&pf->pdev->dev,
754 "Failed to set VF LAN Rx queue context %d error: %d\n",
766 * @vf: pointer to the VF info
767 * @idx: VSI index, applies only for ADq mode, zero otherwise
769 * alloc VF vsi context & resources
771 static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
773 struct i40e_mac_filter *f = NULL;
774 struct i40e_pf *pf = vf->pf;
775 struct i40e_vsi *vsi;
779 vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid,
783 dev_err(&pf->pdev->dev,
784 "add vsi failed for VF %d, aq_err %d\n",
785 vf->vf_id, pf->hw.aq.asq_last_status);
787 goto error_alloc_vsi_res;
791 u64 hena = i40e_pf_get_default_rss_hena(pf);
792 u8 broadcast[ETH_ALEN];
794 vf->lan_vsi_idx = vsi->idx;
795 vf->lan_vsi_id = vsi->id;
796 /* If the port VLAN has been configured and then the
797 * VF driver was removed then the VSI port VLAN
798 * configuration was destroyed. Check if there is
799 * a port VLAN and restore the VSI configuration if
802 if (vf->port_vlan_id)
803 i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
805 spin_lock_bh(&vsi->mac_filter_hash_lock);
806 if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
807 f = i40e_add_mac_filter(vsi,
808 vf->default_lan_addr.addr);
810 dev_info(&pf->pdev->dev,
811 "Could not add MAC filter %pM for VF %d\n",
812 vf->default_lan_addr.addr, vf->vf_id);
814 eth_broadcast_addr(broadcast);
815 f = i40e_add_mac_filter(vsi, broadcast);
817 dev_info(&pf->pdev->dev,
818 "Could not allocate VF broadcast filter\n");
819 spin_unlock_bh(&vsi->mac_filter_hash_lock);
820 wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
821 wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
822 /* program mac filter only for VF VSI */
823 ret = i40e_sync_vsi_filters(vsi);
825 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
828 /* storing VSI index and id for ADq and don't apply the mac filter */
829 if (vf->adq_enabled) {
830 vf->ch[idx].vsi_idx = vsi->idx;
831 vf->ch[idx].vsi_id = vsi->id;
834 /* Set VF bandwidth if specified */
836 max_tx_rate = vf->tx_rate;
837 } else if (vf->ch[idx].max_tx_rate) {
838 max_tx_rate = vf->ch[idx].max_tx_rate;
842 max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR);
843 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
844 max_tx_rate, 0, NULL);
846 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
855 * i40e_map_pf_queues_to_vsi
856 * @vf: pointer to the VF info
858 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
859 * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI.
861 static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf)
863 struct i40e_pf *pf = vf->pf;
864 struct i40e_hw *hw = &pf->hw;
865 u32 reg, num_tc = 1; /* VF has at least one traffic class */
872 for (i = 0; i < num_tc; i++) {
873 if (vf->adq_enabled) {
874 qps = vf->ch[i].num_qps;
875 vsi_id = vf->ch[i].vsi_id;
877 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
878 vsi_id = vf->lan_vsi_id;
881 for (j = 0; j < 7; j++) {
886 u16 qid = i40e_vc_get_pf_queue_id(vf,
890 qid = i40e_vc_get_pf_queue_id(vf, vsi_id,
894 i40e_write_rx_ctl(hw,
895 I40E_VSILAN_QTABLE(j, vsi_id),
902 * i40e_map_pf_to_vf_queues
903 * @vf: pointer to the VF info
905 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
906 * function takes care of the second part VPLAN_QTABLE & completes VF mappings.
908 static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf)
910 struct i40e_pf *pf = vf->pf;
911 struct i40e_hw *hw = &pf->hw;
912 u32 reg, total_qps = 0;
913 u32 qps, num_tc = 1; /* VF has at least one traffic class */
920 for (i = 0; i < num_tc; i++) {
921 if (vf->adq_enabled) {
922 qps = vf->ch[i].num_qps;
923 vsi_id = vf->ch[i].vsi_id;
925 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
926 vsi_id = vf->lan_vsi_id;
929 for (j = 0; j < qps; j++) {
930 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j);
932 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
933 wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id),
941 * i40e_enable_vf_mappings
942 * @vf: pointer to the VF info
946 static void i40e_enable_vf_mappings(struct i40e_vf *vf)
948 struct i40e_pf *pf = vf->pf;
949 struct i40e_hw *hw = &pf->hw;
952 /* Tell the hardware we're using noncontiguous mapping. HW requires
953 * that VF queues be mapped using this method, even when they are
954 * contiguous in real life
956 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
957 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
959 /* enable VF vplan_qtable mappings */
960 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
961 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
963 i40e_map_pf_to_vf_queues(vf);
964 i40e_map_pf_queues_to_vsi(vf);
970 * i40e_disable_vf_mappings
971 * @vf: pointer to the VF info
973 * disable VF mappings
975 static void i40e_disable_vf_mappings(struct i40e_vf *vf)
977 struct i40e_pf *pf = vf->pf;
978 struct i40e_hw *hw = &pf->hw;
981 /* disable qp mappings */
982 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
983 for (i = 0; i < I40E_MAX_VSI_QP; i++)
984 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
985 I40E_QUEUE_END_OF_LIST);
991 * @vf: pointer to the VF info
995 static void i40e_free_vf_res(struct i40e_vf *vf)
997 struct i40e_pf *pf = vf->pf;
998 struct i40e_hw *hw = &pf->hw;
1002 /* Start by disabling VF's configuration API to prevent the OS from
1003 * accessing the VF's VSI after it's freed / invalidated.
1005 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1007 /* It's possible the VF had requeuested more queues than the default so
1008 * do the accounting here when we're about to free them.
1010 if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) {
1011 pf->queues_left += vf->num_queue_pairs -
1012 I40E_DEFAULT_QUEUES_PER_VF;
1015 /* free vsi & disconnect it from the parent uplink */
1016 if (vf->lan_vsi_idx) {
1017 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
1018 vf->lan_vsi_idx = 0;
1022 /* do the accounting and remove additional ADq VSI's */
1023 if (vf->adq_enabled && vf->ch[0].vsi_idx) {
1024 for (j = 0; j < vf->num_tc; j++) {
1025 /* At this point VSI0 is already released so don't
1026 * release it again and only clear their values in
1027 * structure variables
1030 i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]);
1031 vf->ch[j].vsi_idx = 0;
1032 vf->ch[j].vsi_id = 0;
1035 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
1037 /* disable interrupts so the VF starts in a known state */
1038 for (i = 0; i < msix_vf; i++) {
1039 /* format is same for both registers */
1041 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
1043 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
1046 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
1050 /* clear the irq settings */
1051 for (i = 0; i < msix_vf; i++) {
1052 /* format is same for both registers */
1054 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
1056 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
1059 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
1060 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1061 wr32(hw, reg_idx, reg);
1064 /* reset some of the state variables keeping track of the resources */
1065 vf->num_queue_pairs = 0;
1066 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1067 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
1072 * @vf: pointer to the VF info
1074 * allocate VF resources
1076 static int i40e_alloc_vf_res(struct i40e_vf *vf)
1078 struct i40e_pf *pf = vf->pf;
1079 int total_queue_pairs = 0;
1082 if (vf->num_req_queues &&
1083 vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
1084 pf->num_vf_qps = vf->num_req_queues;
1086 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
1088 /* allocate hw vsi context & associated resources */
1089 ret = i40e_alloc_vsi_res(vf, 0);
1092 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
1094 /* allocate additional VSIs based on tc information for ADq */
1095 if (vf->adq_enabled) {
1096 if (pf->queues_left >=
1097 (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) {
1098 /* TC 0 always belongs to VF VSI */
1099 for (idx = 1; idx < vf->num_tc; idx++) {
1100 ret = i40e_alloc_vsi_res(vf, idx);
1104 /* send correct number of queues */
1105 total_queue_pairs = I40E_MAX_VF_QUEUES;
1107 dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n",
1109 vf->adq_enabled = false;
1113 /* We account for each VF to get a default number of queue pairs. If
1114 * the VF has now requested more, we need to account for that to make
1115 * certain we never request more queues than we actually have left in
1118 if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF)
1120 total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
1123 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1125 clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1127 /* store the total qps number for the runtime
1130 vf->num_queue_pairs = total_queue_pairs;
1132 /* VF is now completely initialized */
1133 set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1137 i40e_free_vf_res(vf);
1142 #define VF_DEVICE_STATUS 0xAA
1143 #define VF_TRANS_PENDING_MASK 0x20
1145 * i40e_quiesce_vf_pci
1146 * @vf: pointer to the VF structure
1148 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
1149 * if the transactions never clear.
1151 static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
1153 struct i40e_pf *pf = vf->pf;
1154 struct i40e_hw *hw = &pf->hw;
1158 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
1160 wr32(hw, I40E_PF_PCI_CIAA,
1161 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
1162 for (i = 0; i < 100; i++) {
1163 reg = rd32(hw, I40E_PF_PCI_CIAD);
1164 if ((reg & VF_TRANS_PENDING_MASK) == 0)
1172 * __i40e_getnum_vf_vsi_vlan_filters
1173 * @vsi: pointer to the vsi
1175 * called to get the number of VLANs offloaded on this VF
1177 static int __i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1179 struct i40e_mac_filter *f;
1180 u16 num_vlans = 0, bkt;
1182 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1183 if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
1191 * i40e_getnum_vf_vsi_vlan_filters
1192 * @vsi: pointer to the vsi
1194 * wrapper for __i40e_getnum_vf_vsi_vlan_filters() with spinlock held
1196 static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1200 spin_lock_bh(&vsi->mac_filter_hash_lock);
1201 num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
1202 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1208 * i40e_get_vlan_list_sync
1209 * @vsi: pointer to the VSI
1210 * @num_vlans: number of VLANs in mac_filter_hash, returned to caller
1211 * @vlan_list: list of VLANs present in mac_filter_hash, returned to caller.
1212 * This array is allocated here, but has to be freed in caller.
1214 * Called to get number of VLANs and VLAN list present in mac_filter_hash.
1216 static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans,
1219 struct i40e_mac_filter *f;
1223 spin_lock_bh(&vsi->mac_filter_hash_lock);
1224 *num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
1225 *vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list), GFP_ATOMIC);
1229 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1230 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1232 (*vlan_list)[i++] = f->vlan;
1235 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1239 * i40e_set_vsi_promisc
1240 * @vf: pointer to the VF struct
1242 * @multi_enable: set MAC L2 layer multicast promiscuous enable/disable
1244 * @unicast_enable: set MAC L2 layer unicast promiscuous enable/disable
1246 * @vl: List of VLANs - apply filter for given VLANs
1247 * @num_vlans: Number of elements in @vl
1250 i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
1251 bool unicast_enable, s16 *vl, u16 num_vlans)
1253 i40e_status aq_ret, aq_tmp = 0;
1254 struct i40e_pf *pf = vf->pf;
1255 struct i40e_hw *hw = &pf->hw;
1258 /* No VLAN to set promisc on, set on VSI */
1259 if (!num_vlans || !vl) {
1260 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, seid,
1264 int aq_err = pf->hw.aq.asq_last_status;
1266 dev_err(&pf->pdev->dev,
1267 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
1269 i40e_stat_str(&pf->hw, aq_ret),
1270 i40e_aq_str(&pf->hw, aq_err));
1275 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, seid,
1280 int aq_err = pf->hw.aq.asq_last_status;
1282 dev_err(&pf->pdev->dev,
1283 "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
1285 i40e_stat_str(&pf->hw, aq_ret),
1286 i40e_aq_str(&pf->hw, aq_err));
1292 for (i = 0; i < num_vlans; i++) {
1293 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, seid,
1297 int aq_err = pf->hw.aq.asq_last_status;
1299 dev_err(&pf->pdev->dev,
1300 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
1302 i40e_stat_str(&pf->hw, aq_ret),
1303 i40e_aq_str(&pf->hw, aq_err));
1309 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, seid,
1313 int aq_err = pf->hw.aq.asq_last_status;
1315 dev_err(&pf->pdev->dev,
1316 "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
1318 i40e_stat_str(&pf->hw, aq_ret),
1319 i40e_aq_str(&pf->hw, aq_err));
1333 * i40e_config_vf_promiscuous_mode
1334 * @vf: pointer to the VF info
1336 * @allmulti: set MAC L2 layer multicast promiscuous enable/disable
1337 * @alluni: set MAC L2 layer unicast promiscuous enable/disable
1339 * Called from the VF to configure the promiscuous mode of
1340 * VF vsis and from the VF reset path to reset promiscuous mode.
1342 static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
1347 i40e_status aq_ret = I40E_SUCCESS;
1348 struct i40e_pf *pf = vf->pf;
1349 struct i40e_vsi *vsi;
1353 vsi = i40e_find_vsi_from_id(pf, vsi_id);
1354 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
1355 return I40E_ERR_PARAM;
1357 if (vf->port_vlan_id) {
1358 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti,
1359 alluni, &vf->port_vlan_id, 1);
1361 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1362 i40e_get_vlan_list_sync(vsi, &num_vlans, &vl);
1365 return I40E_ERR_NO_MEMORY;
1367 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
1373 /* no VLANs to set on, set on VSI */
1374 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
1380 * i40e_sync_vfr_reset
1381 * @hw: pointer to hw struct
1382 * @vf_id: VF identifier
1384 * Before trigger hardware reset, we need to know if no other process has
1385 * reserved the hardware for any reset operations. This check is done by
1386 * examining the status of the RSTAT1 register used to signal the reset.
1388 static int i40e_sync_vfr_reset(struct i40e_hw *hw, int vf_id)
1393 for (i = 0; i < I40E_VFR_WAIT_COUNT; i++) {
1394 reg = rd32(hw, I40E_VFINT_ICR0_ENA(vf_id)) &
1395 I40E_VFINT_ICR0_ADMINQ_MASK;
1399 usleep_range(100, 200);
1406 * i40e_trigger_vf_reset
1407 * @vf: pointer to the VF structure
1408 * @flr: VFLR was issued or not
1410 * Trigger hardware to start a reset for a particular VF. Expects the caller
1411 * to wait the proper amount of time to allow hardware to reset the VF before
1412 * it cleans up and restores VF functionality.
1414 static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
1416 struct i40e_pf *pf = vf->pf;
1417 struct i40e_hw *hw = &pf->hw;
1418 u32 reg, reg_idx, bit_idx;
1423 vf_active = test_and_clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1425 /* Disable VF's configuration API during reset. The flag is re-enabled
1426 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
1427 * It's normally disabled in i40e_free_vf_res(), but it's safer
1428 * to do it earlier to give some time to finish to any VF config
1429 * functions that may still be running at this point.
1431 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1433 /* In the case of a VFLR, the HW has already reset the VF and we
1434 * just need to clean up, so don't hit the VFRTRIG register.
1437 /* Sync VFR reset before trigger next one */
1438 radq = rd32(hw, I40E_VFINT_ICR0_ENA(vf->vf_id)) &
1439 I40E_VFINT_ICR0_ADMINQ_MASK;
1440 if (vf_active && !radq)
1441 /* waiting for finish reset by virtual driver */
1442 if (i40e_sync_vfr_reset(hw, vf->vf_id))
1443 dev_info(&pf->pdev->dev,
1444 "Reset VF %d never finished\n",
1447 /* Reset VF using VPGEN_VFRTRIG reg. It is also setting
1448 * in progress state in rstat1 register.
1450 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1451 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1452 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1455 /* clear the VFLR bit in GLGEN_VFLRSTAT */
1456 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1457 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1458 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1461 if (i40e_quiesce_vf_pci(vf))
1462 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
1467 * i40e_cleanup_reset_vf
1468 * @vf: pointer to the VF structure
1470 * Cleanup a VF after the hardware reset is finished. Expects the caller to
1471 * have verified whether the reset is finished properly, and ensure the
1472 * minimum amount of wait time has passed.
1474 static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
1476 struct i40e_pf *pf = vf->pf;
1477 struct i40e_hw *hw = &pf->hw;
1480 /* disable promisc modes in case they were enabled */
1481 i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false);
1483 /* free VF resources to begin resetting the VSI state */
1484 i40e_free_vf_res(vf);
1486 /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
1487 * By doing this we allow HW to access VF memory at any point. If we
1488 * did it any sooner, HW could access memory while it was being freed
1489 * in i40e_free_vf_res(), causing an IOMMU fault.
1491 * On the other hand, this needs to be done ASAP, because the VF driver
1492 * is waiting for this to happen and may report a timeout. It's
1493 * harmless, but it gets logged into Guest OS kernel log, so best avoid
1496 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1497 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1498 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1500 /* reallocate VF resources to finish resetting the VSI state */
1501 if (!i40e_alloc_vf_res(vf)) {
1502 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1503 i40e_enable_vf_mappings(vf);
1504 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1505 clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1506 /* Do not notify the client during VF init */
1507 if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
1509 i40e_notify_client_of_vf_reset(pf, abs_vf_id);
1513 /* Tell the VF driver the reset is done. This needs to be done only
1514 * after VF has been fully initialized, because the VF driver may
1515 * request resources immediately after setting this flag.
1517 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1522 * @vf: pointer to the VF structure
1523 * @flr: VFLR was issued or not
1525 * Returns true if the VF is in reset, resets successfully, or resets
1526 * are disabled and false otherwise.
1528 bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
1530 struct i40e_pf *pf = vf->pf;
1531 struct i40e_hw *hw = &pf->hw;
1536 if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
1539 /* If the VFs have been disabled, this means something else is
1540 * resetting the VF, so we shouldn't continue.
1542 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1545 i40e_trigger_vf_reset(vf, flr);
1547 /* poll VPGEN_VFRSTAT reg to make sure
1548 * that reset is complete
1550 for (i = 0; i < 10; i++) {
1551 /* VF reset requires driver to first reset the VF and then
1552 * poll the status register to make sure that the reset
1553 * completed successfully. Due to internal HW FIFO flushes,
1554 * we must wait 10ms before the register will be valid.
1556 usleep_range(10000, 20000);
1557 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1558 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
1565 usleep_range(10000, 20000);
1568 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1570 usleep_range(10000, 20000);
1572 /* On initial reset, we don't have any queues to disable */
1573 if (vf->lan_vsi_idx != 0)
1574 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
1576 i40e_cleanup_reset_vf(vf);
1579 clear_bit(__I40E_VF_DISABLE, pf->state);
1585 * i40e_reset_all_vfs
1586 * @pf: pointer to the PF structure
1587 * @flr: VFLR was issued or not
1589 * Reset all allocated VFs in one go. First, tell the hardware to reset each
1590 * VF, then do all the waiting in one chunk, and finally finish restoring each
1591 * VF after the wait. This is useful during PF routines which need to reset
1592 * all VFs, as otherwise it must perform these resets in a serialized fashion.
1594 * Returns true if any VFs were reset, and false otherwise.
1596 bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
1598 struct i40e_hw *hw = &pf->hw;
1603 /* If we don't have any VFs, then there is nothing to reset */
1604 if (!pf->num_alloc_vfs)
1607 /* If VFs have been disabled, there is no need to reset */
1608 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1611 /* Begin reset on all VFs at once */
1612 for (v = 0; v < pf->num_alloc_vfs; v++)
1613 i40e_trigger_vf_reset(&pf->vf[v], flr);
1615 /* HW requires some time to make sure it can flush the FIFO for a VF
1616 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1617 * sequence to make sure that it has completed. We'll keep track of
1618 * the VFs using a simple iterator that increments once that VF has
1619 * finished resetting.
1621 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1622 usleep_range(10000, 20000);
1624 /* Check each VF in sequence, beginning with the VF to fail
1625 * the previous check.
1627 while (v < pf->num_alloc_vfs) {
1629 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1630 if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
1633 /* If the current VF has finished resetting, move on
1634 * to the next VF in sequence.
1641 usleep_range(10000, 20000);
1643 /* Display a warning if at least one VF didn't manage to reset in
1644 * time, but continue on with the operation.
1646 if (v < pf->num_alloc_vfs)
1647 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1649 usleep_range(10000, 20000);
1651 /* Begin disabling all the rings associated with VFs, but do not wait
1654 for (v = 0; v < pf->num_alloc_vfs; v++) {
1655 /* On initial reset, we don't have any queues to disable */
1656 if (pf->vf[v].lan_vsi_idx == 0)
1659 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
1662 /* Now that we've notified HW to disable all of the VF rings, wait
1663 * until they finish.
1665 for (v = 0; v < pf->num_alloc_vfs; v++) {
1666 /* On initial reset, we don't have any queues to disable */
1667 if (pf->vf[v].lan_vsi_idx == 0)
1670 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
1673 /* Hw may need up to 50ms to finish disabling the RX queues. We
1674 * minimize the wait by delaying only once for all VFs.
1678 /* Finish the reset on each VF */
1679 for (v = 0; v < pf->num_alloc_vfs; v++)
1680 i40e_cleanup_reset_vf(&pf->vf[v]);
1683 clear_bit(__I40E_VF_DISABLE, pf->state);
1690 * @pf: pointer to the PF structure
1694 void i40e_free_vfs(struct i40e_pf *pf)
1696 struct i40e_hw *hw = &pf->hw;
1697 u32 reg_idx, bit_idx;
1703 set_bit(__I40E_VFS_RELEASING, pf->state);
1704 while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1705 usleep_range(1000, 2000);
1707 i40e_notify_client_of_vf_enable(pf, 0);
1709 /* Disable IOV before freeing resources. This lets any VF drivers
1710 * running in the host get themselves cleaned up before we yank
1711 * the carpet out from underneath their feet.
1713 if (!pci_vfs_assigned(pf->pdev))
1714 pci_disable_sriov(pf->pdev);
1716 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
1718 /* Amortize wait time by stopping all VFs at the same time */
1719 for (i = 0; i < pf->num_alloc_vfs; i++) {
1720 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1723 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
1726 for (i = 0; i < pf->num_alloc_vfs; i++) {
1727 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1730 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
1733 /* free up VF resources */
1734 tmp = pf->num_alloc_vfs;
1735 pf->num_alloc_vfs = 0;
1736 for (i = 0; i < tmp; i++) {
1737 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1738 i40e_free_vf_res(&pf->vf[i]);
1739 /* disable qp mappings */
1740 i40e_disable_vf_mappings(&pf->vf[i]);
1746 /* This check is for when the driver is unloaded while VFs are
1747 * assigned. Setting the number of VFs to 0 through sysfs is caught
1748 * before this function ever gets called.
1750 if (!pci_vfs_assigned(pf->pdev)) {
1751 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
1752 * work correctly when SR-IOV gets re-enabled.
1754 for (vf_id = 0; vf_id < tmp; vf_id++) {
1755 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1756 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1757 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1760 clear_bit(__I40E_VF_DISABLE, pf->state);
1761 clear_bit(__I40E_VFS_RELEASING, pf->state);
1764 #ifdef CONFIG_PCI_IOV
1767 * @pf: pointer to the PF structure
1768 * @num_alloc_vfs: number of VFs to allocate
1770 * allocate VF resources
1772 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
1774 struct i40e_vf *vfs;
1777 /* Disable interrupt 0 so we don't try to handle the VFLR. */
1778 i40e_irq_dynamic_disable_icr0(pf);
1780 /* Check to see if we're just allocating resources for extant VFs */
1781 if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
1782 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1784 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1785 pf->num_alloc_vfs = 0;
1789 /* allocate memory */
1790 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
1797 /* apply default profile */
1798 for (i = 0; i < num_alloc_vfs; i++) {
1800 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
1803 /* assign default capabilities */
1804 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1805 vfs[i].spoofchk = true;
1807 set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
1810 pf->num_alloc_vfs = num_alloc_vfs;
1812 /* VF resources get allocated during reset */
1813 i40e_reset_all_vfs(pf, false);
1815 i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
1821 /* Re-enable interrupt 0. */
1822 i40e_irq_dynamic_enable_icr0(pf);
1828 * i40e_pci_sriov_enable
1829 * @pdev: pointer to a pci_dev structure
1830 * @num_vfs: number of VFs to allocate
1832 * Enable or change the number of VFs
1834 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
1836 #ifdef CONFIG_PCI_IOV
1837 struct i40e_pf *pf = pci_get_drvdata(pdev);
1838 int pre_existing_vfs = pci_num_vf(pdev);
1841 if (test_bit(__I40E_TESTING, pf->state)) {
1842 dev_warn(&pdev->dev,
1843 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
1848 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1850 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1853 if (num_vfs > pf->num_req_vfs) {
1854 dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
1855 num_vfs, pf->num_req_vfs);
1860 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
1861 err = i40e_alloc_vfs(pf, num_vfs);
1863 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
1877 * i40e_pci_sriov_configure
1878 * @pdev: pointer to a pci_dev structure
1879 * @num_vfs: number of VFs to allocate
1881 * Enable or change the number of VFs. Called when the user updates the number
1884 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1886 struct i40e_pf *pf = pci_get_drvdata(pdev);
1889 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
1890 dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
1895 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
1896 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
1897 i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
1899 ret = i40e_pci_sriov_enable(pdev, num_vfs);
1900 goto sriov_configure_out;
1903 if (!pci_vfs_assigned(pf->pdev)) {
1905 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1906 i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
1908 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
1910 goto sriov_configure_out;
1912 sriov_configure_out:
1913 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
1917 /***********************virtual channel routines******************/
1920 * i40e_vc_send_msg_to_vf
1921 * @vf: pointer to the VF info
1922 * @v_opcode: virtual channel opcode
1923 * @v_retval: virtual channel return value
1924 * @msg: pointer to the msg buffer
1925 * @msglen: msg length
1929 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1930 u32 v_retval, u8 *msg, u16 msglen)
1937 /* validate the request */
1938 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1943 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1945 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
1948 dev_info(&pf->pdev->dev,
1949 "Unable to send the message to VF %d aq_err %d\n",
1950 vf->vf_id, pf->hw.aq.asq_last_status);
1958 * i40e_vc_send_resp_to_vf
1959 * @vf: pointer to the VF info
1960 * @opcode: operation code
1961 * @retval: return value
1963 * send resp msg to VF
1965 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
1966 enum virtchnl_ops opcode,
1969 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
1973 * i40e_sync_vf_state
1974 * @vf: pointer to the VF info
1977 * Called from a VF message to synchronize the service with a potential
1980 static bool i40e_sync_vf_state(struct i40e_vf *vf, enum i40e_vf_states state)
1984 /* When handling some messages, it needs VF state to be set.
1985 * It is possible that this flag is cleared during VF reset,
1986 * so there is a need to wait until the end of the reset to
1987 * handle the request message correctly.
1989 for (i = 0; i < I40E_VF_STATE_WAIT_COUNT; i++) {
1990 if (test_bit(state, &vf->vf_states))
1992 usleep_range(10000, 20000);
1995 return test_bit(state, &vf->vf_states);
1999 * i40e_vc_get_version_msg
2000 * @vf: pointer to the VF info
2001 * @msg: pointer to the msg buffer
2003 * called from the VF to request the API version used by the PF
2005 static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
2007 struct virtchnl_version_info info = {
2008 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
2011 vf->vf_ver = *(struct virtchnl_version_info *)msg;
2012 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
2013 if (VF_IS_V10(&vf->vf_ver))
2014 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
2015 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
2016 I40E_SUCCESS, (u8 *)&info,
2017 sizeof(struct virtchnl_version_info));
2021 * i40e_del_qch - delete all the additional VSIs created as a part of ADq
2022 * @vf: pointer to VF structure
2024 static void i40e_del_qch(struct i40e_vf *vf)
2026 struct i40e_pf *pf = vf->pf;
2029 /* first element in the array belongs to primary VF VSI and we shouldn't
2030 * delete it. We should however delete the rest of the VSIs created
2032 for (i = 1; i < vf->num_tc; i++) {
2033 if (vf->ch[i].vsi_idx) {
2034 i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]);
2035 vf->ch[i].vsi_idx = 0;
2036 vf->ch[i].vsi_id = 0;
2042 * i40e_vc_get_max_frame_size
2043 * @vf: pointer to the VF
2045 * Max frame size is determined based on the current port's max frame size and
2046 * whether a port VLAN is configured on this VF. The VF is not aware whether
2047 * it's in a port VLAN so the PF needs to account for this in max frame size
2048 * checks and sending the max frame size to the VF.
2050 static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf)
2052 u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size;
2054 if (vf->port_vlan_id)
2055 max_frame_size -= VLAN_HLEN;
2057 return max_frame_size;
2061 * i40e_vc_get_vf_resources_msg
2062 * @vf: pointer to the VF info
2063 * @msg: pointer to the msg buffer
2065 * called from the VF to request its resources
2067 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
2069 struct virtchnl_vf_resource *vfres = NULL;
2070 struct i40e_pf *pf = vf->pf;
2071 i40e_status aq_ret = 0;
2072 struct i40e_vsi *vsi;
2077 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) {
2078 aq_ret = I40E_ERR_PARAM;
2082 len = struct_size(vfres, vsi_res, num_vsis);
2083 vfres = kzalloc(len, GFP_KERNEL);
2085 aq_ret = I40E_ERR_NO_MEMORY;
2089 if (VF_IS_V11(&vf->vf_ver))
2090 vf->driver_caps = *(u32 *)msg;
2092 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
2093 VIRTCHNL_VF_OFFLOAD_RSS_REG |
2094 VIRTCHNL_VF_OFFLOAD_VLAN;
2096 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
2097 vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
2098 vsi = pf->vsi[vf->lan_vsi_idx];
2099 if (!vsi->info.pvid)
2100 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
2102 if (i40e_vf_client_capable(pf, vf->vf_id) &&
2103 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) {
2104 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP;
2105 set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
2107 clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
2110 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2111 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
2113 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
2114 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
2115 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
2117 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
2120 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
2121 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
2122 vfres->vf_cap_flags |=
2123 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
2126 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
2127 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
2129 if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) &&
2130 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
2131 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
2133 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
2134 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
2135 dev_err(&pf->pdev->dev,
2136 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
2138 aq_ret = I40E_ERR_PARAM;
2141 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
2144 if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) {
2145 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2146 vfres->vf_cap_flags |=
2147 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
2150 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
2151 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
2153 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)
2154 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ;
2156 vfres->num_vsis = num_vsis;
2157 vfres->num_queue_pairs = vf->num_queue_pairs;
2158 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
2159 vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
2160 vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
2161 vfres->max_mtu = i40e_vc_get_max_frame_size(vf);
2163 if (vf->lan_vsi_idx) {
2164 vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
2165 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
2166 vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
2167 /* VFs only use TC 0 */
2168 vfres->vsi_res[0].qset_handle
2169 = le16_to_cpu(vsi->info.qs_handle[0]);
2170 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) && !vf->pf_set_mac) {
2171 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
2172 eth_zero_addr(vf->default_lan_addr.addr);
2174 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
2175 vf->default_lan_addr.addr);
2177 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
2180 /* send the response back to the VF */
2181 ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
2182 aq_ret, (u8 *)vfres, len);
2189 * i40e_vc_config_promiscuous_mode_msg
2190 * @vf: pointer to the VF info
2191 * @msg: pointer to the msg buffer
2193 * called from the VF to configure the promiscuous mode of
2196 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
2198 struct virtchnl_promisc_info *info =
2199 (struct virtchnl_promisc_info *)msg;
2200 struct i40e_pf *pf = vf->pf;
2201 i40e_status aq_ret = 0;
2202 bool allmulti = false;
2203 bool alluni = false;
2205 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2206 aq_ret = I40E_ERR_PARAM;
2209 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2210 dev_err(&pf->pdev->dev,
2211 "Unprivileged VF %d is attempting to configure promiscuous mode\n",
2214 /* Lie to the VF on purpose, because this is an error we can
2215 * ignore. Unprivileged VF is not a virtual channel error.
2221 if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) {
2222 aq_ret = I40E_ERR_PARAM;
2226 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2227 aq_ret = I40E_ERR_PARAM;
2231 /* Multicast promiscuous handling*/
2232 if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2235 if (info->flags & FLAG_VF_UNICAST_PROMISC)
2237 aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti,
2243 if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC,
2245 dev_info(&pf->pdev->dev,
2246 "VF %d successfully set multicast promiscuous mode\n",
2248 } else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC,
2250 dev_info(&pf->pdev->dev,
2251 "VF %d successfully unset multicast promiscuous mode\n",
2255 if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC,
2257 dev_info(&pf->pdev->dev,
2258 "VF %d successfully set unicast promiscuous mode\n",
2260 } else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC,
2262 dev_info(&pf->pdev->dev,
2263 "VF %d successfully unset unicast promiscuous mode\n",
2267 /* send the response to the VF */
2268 return i40e_vc_send_resp_to_vf(vf,
2269 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2274 * i40e_vc_config_queues_msg
2275 * @vf: pointer to the VF info
2276 * @msg: pointer to the msg buffer
2278 * called from the VF to configure the rx/tx
2281 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
2283 struct virtchnl_vsi_queue_config_info *qci =
2284 (struct virtchnl_vsi_queue_config_info *)msg;
2285 struct virtchnl_queue_pair_info *qpi;
2286 u16 vsi_id, vsi_queue_id = 0;
2287 struct i40e_pf *pf = vf->pf;
2288 i40e_status aq_ret = 0;
2289 int i, j = 0, idx = 0;
2290 struct i40e_vsi *vsi;
2291 u16 num_qps_all = 0;
2293 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2294 aq_ret = I40E_ERR_PARAM;
2298 if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
2299 aq_ret = I40E_ERR_PARAM;
2303 if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) {
2304 aq_ret = I40E_ERR_PARAM;
2308 if (vf->adq_enabled) {
2309 for (i = 0; i < vf->num_tc; i++)
2310 num_qps_all += vf->ch[i].num_qps;
2311 if (num_qps_all != qci->num_queue_pairs) {
2312 aq_ret = I40E_ERR_PARAM;
2317 vsi_id = qci->vsi_id;
2319 for (i = 0; i < qci->num_queue_pairs; i++) {
2320 qpi = &qci->qpair[i];
2322 if (!vf->adq_enabled) {
2323 if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
2324 qpi->txq.queue_id)) {
2325 aq_ret = I40E_ERR_PARAM;
2329 vsi_queue_id = qpi->txq.queue_id;
2331 if (qpi->txq.vsi_id != qci->vsi_id ||
2332 qpi->rxq.vsi_id != qci->vsi_id ||
2333 qpi->rxq.queue_id != vsi_queue_id) {
2334 aq_ret = I40E_ERR_PARAM;
2339 if (vf->adq_enabled) {
2340 if (idx >= ARRAY_SIZE(vf->ch)) {
2341 aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
2344 vsi_id = vf->ch[idx].vsi_id;
2347 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
2349 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
2351 aq_ret = I40E_ERR_PARAM;
2355 /* For ADq there can be up to 4 VSIs with max 4 queues each.
2356 * VF does not know about these additional VSIs and all
2357 * it cares is about its own queues. PF configures these queues
2358 * to its appropriate VSIs based on TC mapping
2360 if (vf->adq_enabled) {
2361 if (idx >= ARRAY_SIZE(vf->ch)) {
2362 aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
2365 if (j == (vf->ch[idx].num_qps - 1)) {
2367 j = 0; /* resetting the queue count */
2375 /* set vsi num_queue_pairs in use to num configured by VF */
2376 if (!vf->adq_enabled) {
2377 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
2378 qci->num_queue_pairs;
2380 for (i = 0; i < vf->num_tc; i++) {
2381 vsi = pf->vsi[vf->ch[i].vsi_idx];
2382 vsi->num_queue_pairs = vf->ch[i].num_qps;
2384 if (i40e_update_adq_vsi_queues(vsi, i)) {
2385 aq_ret = I40E_ERR_CONFIG;
2392 /* send the response to the VF */
2393 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
2398 * i40e_validate_queue_map - check queue map is valid
2399 * @vf: the VF structure pointer
2401 * @queuemap: Tx or Rx queue map
2403 * check if Tx or Rx queue map is valid
2405 static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
2406 unsigned long queuemap)
2408 u16 vsi_queue_id, queue_id;
2410 for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
2411 if (vf->adq_enabled) {
2412 vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
2413 queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
2415 queue_id = vsi_queue_id;
2418 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id))
2426 * i40e_vc_config_irq_map_msg
2427 * @vf: pointer to the VF info
2428 * @msg: pointer to the msg buffer
2430 * called from the VF to configure the irq to
2433 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
2435 struct virtchnl_irq_map_info *irqmap_info =
2436 (struct virtchnl_irq_map_info *)msg;
2437 struct virtchnl_vector_map *map;
2439 i40e_status aq_ret = 0;
2442 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2443 aq_ret = I40E_ERR_PARAM;
2447 if (irqmap_info->num_vectors >
2448 vf->pf->hw.func_caps.num_msix_vectors_vf) {
2449 aq_ret = I40E_ERR_PARAM;
2453 for (i = 0; i < irqmap_info->num_vectors; i++) {
2454 map = &irqmap_info->vecmap[i];
2455 /* validate msg params */
2456 if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) ||
2457 !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) {
2458 aq_ret = I40E_ERR_PARAM;
2461 vsi_id = map->vsi_id;
2463 if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
2464 aq_ret = I40E_ERR_PARAM;
2468 if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
2469 aq_ret = I40E_ERR_PARAM;
2473 i40e_config_irq_link_list(vf, vsi_id, map);
2476 /* send the response to the VF */
2477 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
2482 * i40e_ctrl_vf_tx_rings
2483 * @vsi: the SRIOV VSI being configured
2484 * @q_map: bit map of the queues to be enabled
2485 * @enable: start or stop the queue
2487 static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2490 struct i40e_pf *pf = vsi->back;
2494 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2495 ret = i40e_control_wait_tx_q(vsi->seid, pf,
2496 vsi->base_queue + q_id,
2497 false /*is xdp*/, enable);
2505 * i40e_ctrl_vf_rx_rings
2506 * @vsi: the SRIOV VSI being configured
2507 * @q_map: bit map of the queues to be enabled
2508 * @enable: start or stop the queue
2510 static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2513 struct i40e_pf *pf = vsi->back;
2517 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2518 ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id,
2527 * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL
2528 * @vqs: virtchnl_queue_select structure containing bitmaps to validate
2530 * Returns true if validation was successful, else false.
2532 static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
2534 if ((!vqs->rx_queues && !vqs->tx_queues) ||
2535 vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) ||
2536 vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES))
2543 * i40e_vc_enable_queues_msg
2544 * @vf: pointer to the VF info
2545 * @msg: pointer to the msg buffer
2547 * called from the VF to enable all or specific queue(s)
2549 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
2551 struct virtchnl_queue_select *vqs =
2552 (struct virtchnl_queue_select *)msg;
2553 struct i40e_pf *pf = vf->pf;
2554 i40e_status aq_ret = 0;
2557 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2558 aq_ret = I40E_ERR_PARAM;
2562 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2563 aq_ret = I40E_ERR_PARAM;
2567 if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2568 aq_ret = I40E_ERR_PARAM;
2572 /* Use the queue bit map sent by the VF */
2573 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2575 aq_ret = I40E_ERR_TIMEOUT;
2578 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2580 aq_ret = I40E_ERR_TIMEOUT;
2584 /* need to start the rings for additional ADq VSI's as well */
2585 if (vf->adq_enabled) {
2586 /* zero belongs to LAN VSI */
2587 for (i = 1; i < vf->num_tc; i++) {
2588 if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx]))
2589 aq_ret = I40E_ERR_TIMEOUT;
2594 /* send the response to the VF */
2595 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
2600 * i40e_vc_disable_queues_msg
2601 * @vf: pointer to the VF info
2602 * @msg: pointer to the msg buffer
2604 * called from the VF to disable all or specific
2607 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
2609 struct virtchnl_queue_select *vqs =
2610 (struct virtchnl_queue_select *)msg;
2611 struct i40e_pf *pf = vf->pf;
2612 i40e_status aq_ret = 0;
2614 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2615 aq_ret = I40E_ERR_PARAM;
2619 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2620 aq_ret = I40E_ERR_PARAM;
2624 if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2625 aq_ret = I40E_ERR_PARAM;
2629 /* Use the queue bit map sent by the VF */
2630 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2632 aq_ret = I40E_ERR_TIMEOUT;
2635 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2637 aq_ret = I40E_ERR_TIMEOUT;
2641 /* send the response to the VF */
2642 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
2647 * i40e_check_enough_queue - find big enough queue number
2648 * @vf: pointer to the VF info
2649 * @needed: the number of items needed
2651 * Returns the base item index of the queue, or negative for error
2653 static int i40e_check_enough_queue(struct i40e_vf *vf, u16 needed)
2655 unsigned int i, cur_queues, more, pool_size;
2656 struct i40e_lump_tracking *pile;
2657 struct i40e_pf *pf = vf->pf;
2658 struct i40e_vsi *vsi;
2660 vsi = pf->vsi[vf->lan_vsi_idx];
2661 cur_queues = vsi->alloc_queue_pairs;
2663 /* if current allocated queues are enough for need */
2664 if (cur_queues >= needed)
2665 return vsi->base_queue;
2668 if (cur_queues > 0) {
2669 /* if the allocated queues are not zero
2670 * just check if there are enough queues for more
2671 * behind the allocated queues.
2673 more = needed - cur_queues;
2674 for (i = vsi->base_queue + cur_queues;
2675 i < pile->num_entries; i++) {
2676 if (pile->list[i] & I40E_PILE_VALID_BIT)
2680 /* there is enough */
2681 return vsi->base_queue;
2686 for (i = 0; i < pile->num_entries; i++) {
2687 if (pile->list[i] & I40E_PILE_VALID_BIT) {
2691 if (needed <= ++pool_size)
2692 /* there is enough */
2700 * i40e_vc_request_queues_msg
2701 * @vf: pointer to the VF info
2702 * @msg: pointer to the msg buffer
2704 * VFs get a default number of queues but can use this message to request a
2705 * different number. If the request is successful, PF will reset the VF and
2706 * return 0. If unsuccessful, PF will send message informing VF of number of
2707 * available queues and return result of sending VF a message.
2709 static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
2711 struct virtchnl_vf_res_request *vfres =
2712 (struct virtchnl_vf_res_request *)msg;
2713 u16 req_pairs = vfres->num_queue_pairs;
2714 u8 cur_pairs = vf->num_queue_pairs;
2715 struct i40e_pf *pf = vf->pf;
2717 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE))
2720 if (req_pairs > I40E_MAX_VF_QUEUES) {
2721 dev_err(&pf->pdev->dev,
2722 "VF %d tried to request more than %d queues.\n",
2724 I40E_MAX_VF_QUEUES);
2725 vfres->num_queue_pairs = I40E_MAX_VF_QUEUES;
2726 } else if (req_pairs - cur_pairs > pf->queues_left) {
2727 dev_warn(&pf->pdev->dev,
2728 "VF %d requested %d more queues, but only %d left.\n",
2730 req_pairs - cur_pairs,
2732 vfres->num_queue_pairs = pf->queues_left + cur_pairs;
2733 } else if (i40e_check_enough_queue(vf, req_pairs) < 0) {
2734 dev_warn(&pf->pdev->dev,
2735 "VF %d requested %d more queues, but there is not enough for it.\n",
2737 req_pairs - cur_pairs);
2738 vfres->num_queue_pairs = cur_pairs;
2740 /* successful request */
2741 vf->num_req_queues = req_pairs;
2742 i40e_vc_reset_vf(vf, true);
2746 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
2747 (u8 *)vfres, sizeof(*vfres));
2751 * i40e_vc_get_stats_msg
2752 * @vf: pointer to the VF info
2753 * @msg: pointer to the msg buffer
2755 * called from the VF to get vsi stats
2757 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
2759 struct virtchnl_queue_select *vqs =
2760 (struct virtchnl_queue_select *)msg;
2761 struct i40e_pf *pf = vf->pf;
2762 struct i40e_eth_stats stats;
2763 i40e_status aq_ret = 0;
2764 struct i40e_vsi *vsi;
2766 memset(&stats, 0, sizeof(struct i40e_eth_stats));
2768 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2769 aq_ret = I40E_ERR_PARAM;
2773 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2774 aq_ret = I40E_ERR_PARAM;
2778 vsi = pf->vsi[vf->lan_vsi_idx];
2780 aq_ret = I40E_ERR_PARAM;
2783 i40e_update_eth_stats(vsi);
2784 stats = vsi->eth_stats;
2787 /* send the response back to the VF */
2788 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
2789 (u8 *)&stats, sizeof(stats));
2792 #define I40E_MAX_MACVLAN_PER_HW 3072
2793 #define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \
2795 /* If the VF is not trusted restrict the number of MAC/VLAN it can program
2796 * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
2798 #define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
2799 #define I40E_VC_MAX_VLAN_PER_VF 16
2801 #define I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(vf_num, num_ports) \
2802 ({ typeof(vf_num) vf_num_ = (vf_num); \
2803 typeof(num_ports) num_ports_ = (num_ports); \
2804 ((I40E_MAX_MACVLAN_PER_PF(num_ports_) - vf_num_ * \
2805 I40E_VC_MAX_MAC_ADDR_PER_VF) / vf_num_) + \
2806 I40E_VC_MAX_MAC_ADDR_PER_VF; })
2808 * i40e_check_vf_permission
2809 * @vf: pointer to the VF info
2810 * @al: MAC address list from virtchnl
2812 * Check that the given list of MAC addresses is allowed. Will return -EPERM
2813 * if any address in the list is not valid. Checks the following conditions:
2815 * 1) broadcast and zero addresses are never valid
2816 * 2) unicast addresses are not allowed if the VMM has administratively set
2817 * the VF MAC address, unless the VF is marked as privileged.
2818 * 3) There is enough space to add all the addresses.
2820 * Note that to guarantee consistency, it is expected this function be called
2821 * while holding the mac_filter_hash_lock, as otherwise the current number of
2822 * addresses might not be accurate.
2824 static inline int i40e_check_vf_permission(struct i40e_vf *vf,
2825 struct virtchnl_ether_addr_list *al)
2827 struct i40e_pf *pf = vf->pf;
2828 struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
2829 struct i40e_hw *hw = &pf->hw;
2830 int mac2add_cnt = 0;
2833 for (i = 0; i < al->num_elements; i++) {
2834 struct i40e_mac_filter *f;
2835 u8 *addr = al->list[i].addr;
2837 if (is_broadcast_ether_addr(addr) ||
2838 is_zero_ether_addr(addr)) {
2839 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
2841 return I40E_ERR_INVALID_MAC_ADDR;
2844 /* If the host VMM administrator has set the VF MAC address
2845 * administratively via the ndo_set_vf_mac command then deny
2846 * permission to the VF to add or delete unicast MAC addresses.
2847 * Unless the VF is privileged and then it can do whatever.
2848 * The VF may request to set the MAC address filter already
2849 * assigned to it so do not return an error in that case.
2851 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2852 !is_multicast_ether_addr(addr) && vf->pf_set_mac &&
2853 !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
2854 dev_err(&pf->pdev->dev,
2855 "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
2859 /*count filters that really will be added*/
2860 f = i40e_find_mac(vsi, addr);
2865 /* If this VF is not privileged, then we can't add more than a limited
2866 * number of addresses. Check to make sure that the additions do not
2867 * push us over the limit.
2869 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2870 if ((i40e_count_filters(vsi) + mac2add_cnt) >
2871 I40E_VC_MAX_MAC_ADDR_PER_VF) {
2872 dev_err(&pf->pdev->dev,
2873 "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
2876 /* If this VF is trusted, it can use more resources than untrusted.
2877 * However to ensure that every trusted VF has appropriate number of
2878 * resources, divide whole pool of resources per port and then across
2882 if ((i40e_count_filters(vsi) + mac2add_cnt) >
2883 I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs,
2885 dev_err(&pf->pdev->dev,
2886 "Cannot add more MAC addresses, trusted VF exhausted it's resources\n");
2894 * i40e_vc_add_mac_addr_msg
2895 * @vf: pointer to the VF info
2896 * @msg: pointer to the msg buffer
2898 * add guest mac address filter
2900 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
2902 struct virtchnl_ether_addr_list *al =
2903 (struct virtchnl_ether_addr_list *)msg;
2904 struct i40e_pf *pf = vf->pf;
2905 struct i40e_vsi *vsi = NULL;
2906 i40e_status ret = 0;
2909 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
2910 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
2911 ret = I40E_ERR_PARAM;
2915 vsi = pf->vsi[vf->lan_vsi_idx];
2917 /* Lock once, because all function inside for loop accesses VSI's
2918 * MAC filter list which needs to be protected using same lock.
2920 spin_lock_bh(&vsi->mac_filter_hash_lock);
2922 ret = i40e_check_vf_permission(vf, al);
2924 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2928 /* add new addresses to the list */
2929 for (i = 0; i < al->num_elements; i++) {
2930 struct i40e_mac_filter *f;
2932 f = i40e_find_mac(vsi, al->list[i].addr);
2934 f = i40e_add_mac_filter(vsi, al->list[i].addr);
2937 dev_err(&pf->pdev->dev,
2938 "Unable to add MAC filter %pM for VF %d\n",
2939 al->list[i].addr, vf->vf_id);
2940 ret = I40E_ERR_PARAM;
2941 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2944 if (is_valid_ether_addr(al->list[i].addr) &&
2945 is_zero_ether_addr(vf->default_lan_addr.addr))
2946 ether_addr_copy(vf->default_lan_addr.addr,
2950 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2952 /* program the updated filter list */
2953 ret = i40e_sync_vsi_filters(vsi);
2955 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2959 /* send the response to the VF */
2960 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
2965 * i40e_vc_del_mac_addr_msg
2966 * @vf: pointer to the VF info
2967 * @msg: pointer to the msg buffer
2969 * remove guest mac address filter
2971 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
2973 struct virtchnl_ether_addr_list *al =
2974 (struct virtchnl_ether_addr_list *)msg;
2975 bool was_unimac_deleted = false;
2976 struct i40e_pf *pf = vf->pf;
2977 struct i40e_vsi *vsi = NULL;
2978 i40e_status ret = 0;
2981 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
2982 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
2983 ret = I40E_ERR_PARAM;
2987 for (i = 0; i < al->num_elements; i++) {
2988 if (is_broadcast_ether_addr(al->list[i].addr) ||
2989 is_zero_ether_addr(al->list[i].addr)) {
2990 dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
2991 al->list[i].addr, vf->vf_id);
2992 ret = I40E_ERR_INVALID_MAC_ADDR;
2995 if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr))
2996 was_unimac_deleted = true;
2998 vsi = pf->vsi[vf->lan_vsi_idx];
3000 spin_lock_bh(&vsi->mac_filter_hash_lock);
3001 /* delete addresses from the list */
3002 for (i = 0; i < al->num_elements; i++)
3003 if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
3004 ret = I40E_ERR_INVALID_MAC_ADDR;
3005 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3009 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3011 /* program the updated filter list */
3012 ret = i40e_sync_vsi_filters(vsi);
3014 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
3017 if (vf->trusted && was_unimac_deleted) {
3018 struct i40e_mac_filter *f;
3019 struct hlist_node *h;
3023 /* set last unicast mac address as default */
3024 spin_lock_bh(&vsi->mac_filter_hash_lock);
3025 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
3026 if (is_valid_ether_addr(f->macaddr))
3027 macaddr = f->macaddr;
3030 ether_addr_copy(vf->default_lan_addr.addr, macaddr);
3031 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3034 /* send the response to the VF */
3035 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, ret);
3039 * i40e_vc_add_vlan_msg
3040 * @vf: pointer to the VF info
3041 * @msg: pointer to the msg buffer
3043 * program guest vlan id
3045 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
3047 struct virtchnl_vlan_filter_list *vfl =
3048 (struct virtchnl_vlan_filter_list *)msg;
3049 struct i40e_pf *pf = vf->pf;
3050 struct i40e_vsi *vsi = NULL;
3051 i40e_status aq_ret = 0;
3054 if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
3055 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3056 dev_err(&pf->pdev->dev,
3057 "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
3060 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3061 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
3062 aq_ret = I40E_ERR_PARAM;
3066 for (i = 0; i < vfl->num_elements; i++) {
3067 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
3068 aq_ret = I40E_ERR_PARAM;
3069 dev_err(&pf->pdev->dev,
3070 "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
3074 vsi = pf->vsi[vf->lan_vsi_idx];
3075 if (vsi->info.pvid) {
3076 aq_ret = I40E_ERR_PARAM;
3080 i40e_vlan_stripping_enable(vsi);
3081 for (i = 0; i < vfl->num_elements; i++) {
3082 /* add new VLAN filter */
3083 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
3087 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
3088 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
3092 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
3093 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
3099 dev_err(&pf->pdev->dev,
3100 "Unable to add VLAN filter %d for VF %d, error %d\n",
3101 vfl->vlan_id[i], vf->vf_id, ret);
3105 /* send the response to the VF */
3106 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
3110 * i40e_vc_remove_vlan_msg
3111 * @vf: pointer to the VF info
3112 * @msg: pointer to the msg buffer
3114 * remove programmed guest vlan id
3116 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
3118 struct virtchnl_vlan_filter_list *vfl =
3119 (struct virtchnl_vlan_filter_list *)msg;
3120 struct i40e_pf *pf = vf->pf;
3121 struct i40e_vsi *vsi = NULL;
3122 i40e_status aq_ret = 0;
3125 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3126 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
3127 aq_ret = I40E_ERR_PARAM;
3131 for (i = 0; i < vfl->num_elements; i++) {
3132 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
3133 aq_ret = I40E_ERR_PARAM;
3138 vsi = pf->vsi[vf->lan_vsi_idx];
3139 if (vsi->info.pvid) {
3140 if (vfl->num_elements > 1 || vfl->vlan_id[0])
3141 aq_ret = I40E_ERR_PARAM;
3145 for (i = 0; i < vfl->num_elements; i++) {
3146 i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
3149 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
3150 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
3154 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
3155 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
3162 /* send the response to the VF */
3163 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
3168 * @vf: pointer to the VF info
3169 * @msg: pointer to the msg buffer
3170 * @msglen: msg length
3172 * called from the VF for the iwarp msgs
3174 static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
3176 struct i40e_pf *pf = vf->pf;
3177 int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
3178 i40e_status aq_ret = 0;
3180 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3181 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
3182 aq_ret = I40E_ERR_PARAM;
3186 i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
3190 /* send the response to the VF */
3191 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP,
3196 * i40e_vc_iwarp_qvmap_msg
3197 * @vf: pointer to the VF info
3198 * @msg: pointer to the msg buffer
3199 * @config: config qvmap or release it
3201 * called from the VF for the iwarp msgs
3203 static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
3205 struct virtchnl_iwarp_qvlist_info *qvlist_info =
3206 (struct virtchnl_iwarp_qvlist_info *)msg;
3207 i40e_status aq_ret = 0;
3209 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3210 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
3211 aq_ret = I40E_ERR_PARAM;
3216 if (i40e_config_iwarp_qvlist(vf, qvlist_info))
3217 aq_ret = I40E_ERR_PARAM;
3219 i40e_release_iwarp_qvlist(vf);
3223 /* send the response to the VF */
3224 return i40e_vc_send_resp_to_vf(vf,
3225 config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
3226 VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
3231 * i40e_vc_config_rss_key
3232 * @vf: pointer to the VF info
3233 * @msg: pointer to the msg buffer
3235 * Configure the VF's RSS key
3237 static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
3239 struct virtchnl_rss_key *vrk =
3240 (struct virtchnl_rss_key *)msg;
3241 struct i40e_pf *pf = vf->pf;
3242 struct i40e_vsi *vsi = NULL;
3243 i40e_status aq_ret = 0;
3245 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3246 !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
3247 vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
3248 aq_ret = I40E_ERR_PARAM;
3252 vsi = pf->vsi[vf->lan_vsi_idx];
3253 aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
3255 /* send the response to the VF */
3256 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
3261 * i40e_vc_config_rss_lut
3262 * @vf: pointer to the VF info
3263 * @msg: pointer to the msg buffer
3265 * Configure the VF's RSS LUT
3267 static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
3269 struct virtchnl_rss_lut *vrl =
3270 (struct virtchnl_rss_lut *)msg;
3271 struct i40e_pf *pf = vf->pf;
3272 struct i40e_vsi *vsi = NULL;
3273 i40e_status aq_ret = 0;
3276 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3277 !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
3278 vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
3279 aq_ret = I40E_ERR_PARAM;
3283 for (i = 0; i < vrl->lut_entries; i++)
3284 if (vrl->lut[i] >= vf->num_queue_pairs) {
3285 aq_ret = I40E_ERR_PARAM;
3289 vsi = pf->vsi[vf->lan_vsi_idx];
3290 aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
3291 /* send the response to the VF */
3293 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
3298 * i40e_vc_get_rss_hena
3299 * @vf: pointer to the VF info
3300 * @msg: pointer to the msg buffer
3302 * Return the RSS HENA bits allowed by the hardware
3304 static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
3306 struct virtchnl_rss_hena *vrh = NULL;
3307 struct i40e_pf *pf = vf->pf;
3308 i40e_status aq_ret = 0;
3311 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3312 aq_ret = I40E_ERR_PARAM;
3315 len = sizeof(struct virtchnl_rss_hena);
3317 vrh = kzalloc(len, GFP_KERNEL);
3319 aq_ret = I40E_ERR_NO_MEMORY;
3323 vrh->hena = i40e_pf_get_default_rss_hena(pf);
3325 /* send the response back to the VF */
3326 aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
3327 aq_ret, (u8 *)vrh, len);
3333 * i40e_vc_set_rss_hena
3334 * @vf: pointer to the VF info
3335 * @msg: pointer to the msg buffer
3337 * Set the RSS HENA bits for the VF
3339 static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
3341 struct virtchnl_rss_hena *vrh =
3342 (struct virtchnl_rss_hena *)msg;
3343 struct i40e_pf *pf = vf->pf;
3344 struct i40e_hw *hw = &pf->hw;
3345 i40e_status aq_ret = 0;
3347 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3348 aq_ret = I40E_ERR_PARAM;
3351 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
3352 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
3353 (u32)(vrh->hena >> 32));
3355 /* send the response to the VF */
3357 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
3361 * i40e_vc_enable_vlan_stripping
3362 * @vf: pointer to the VF info
3363 * @msg: pointer to the msg buffer
3365 * Enable vlan header stripping for the VF
3367 static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3369 i40e_status aq_ret = 0;
3370 struct i40e_vsi *vsi;
3372 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3373 aq_ret = I40E_ERR_PARAM;
3377 vsi = vf->pf->vsi[vf->lan_vsi_idx];
3378 i40e_vlan_stripping_enable(vsi);
3380 /* send the response to the VF */
3382 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
3387 * i40e_vc_disable_vlan_stripping
3388 * @vf: pointer to the VF info
3389 * @msg: pointer to the msg buffer
3391 * Disable vlan header stripping for the VF
3393 static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3395 i40e_status aq_ret = 0;
3396 struct i40e_vsi *vsi;
3398 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3399 aq_ret = I40E_ERR_PARAM;
3403 vsi = vf->pf->vsi[vf->lan_vsi_idx];
3404 i40e_vlan_stripping_disable(vsi);
3406 /* send the response to the VF */
3408 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
3413 * i40e_validate_cloud_filter
3414 * @vf: pointer to VF structure
3415 * @tc_filter: pointer to filter requested
3417 * This function validates cloud filter programmed as TC filter for ADq
3419 static int i40e_validate_cloud_filter(struct i40e_vf *vf,
3420 struct virtchnl_filter *tc_filter)
3422 struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec;
3423 struct virtchnl_l4_spec data = tc_filter->data.tcp_spec;
3424 struct i40e_pf *pf = vf->pf;
3425 struct i40e_vsi *vsi = NULL;
3426 struct i40e_mac_filter *f;
3427 struct hlist_node *h;
3431 if (!tc_filter->action) {
3432 dev_info(&pf->pdev->dev,
3433 "VF %d: Currently ADq doesn't support Drop Action\n",
3438 /* action_meta is TC number here to which the filter is applied */
3439 if (!tc_filter->action_meta ||
3440 tc_filter->action_meta > I40E_MAX_VF_VSI) {
3441 dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
3442 vf->vf_id, tc_filter->action_meta);
3446 /* Check filter if it's programmed for advanced mode or basic mode.
3447 * There are two ADq modes (for VF only),
3448 * 1. Basic mode: intended to allow as many filter options as possible
3449 * to be added to a VF in Non-trusted mode. Main goal is
3450 * to add filters to its own MAC and VLAN id.
3451 * 2. Advanced mode: is for allowing filters to be applied other than
3452 * its own MAC or VLAN. This mode requires the VF to be
3455 if (mask.dst_mac[0] && !mask.dst_ip[0]) {
3456 vsi = pf->vsi[vf->lan_vsi_idx];
3457 f = i40e_find_mac(vsi, data.dst_mac);
3460 dev_info(&pf->pdev->dev,
3461 "Destination MAC %pM doesn't belong to VF %d\n",
3462 data.dst_mac, vf->vf_id);
3467 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f,
3469 if (f->vlan == ntohs(data.vlan_id)) {
3475 dev_info(&pf->pdev->dev,
3476 "VF %d doesn't have any VLAN id %u\n",
3477 vf->vf_id, ntohs(data.vlan_id));
3482 /* Check if VF is trusted */
3483 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3484 dev_err(&pf->pdev->dev,
3485 "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
3487 return I40E_ERR_CONFIG;
3491 if (mask.dst_mac[0] & data.dst_mac[0]) {
3492 if (is_broadcast_ether_addr(data.dst_mac) ||
3493 is_zero_ether_addr(data.dst_mac)) {
3494 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n",
3495 vf->vf_id, data.dst_mac);
3500 if (mask.src_mac[0] & data.src_mac[0]) {
3501 if (is_broadcast_ether_addr(data.src_mac) ||
3502 is_zero_ether_addr(data.src_mac)) {
3503 dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n",
3504 vf->vf_id, data.src_mac);
3509 if (mask.dst_port & data.dst_port) {
3510 if (!data.dst_port) {
3511 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
3517 if (mask.src_port & data.src_port) {
3518 if (!data.src_port) {
3519 dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
3525 if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW &&
3526 tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) {
3527 dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n",
3532 if (mask.vlan_id & data.vlan_id) {
3533 if (ntohs(data.vlan_id) > I40E_MAX_VLANID) {
3534 dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n",
3540 return I40E_SUCCESS;
3542 return I40E_ERR_CONFIG;
3546 * i40e_find_vsi_from_seid - searches for the vsi with the given seid
3547 * @vf: pointer to the VF info
3548 * @seid: seid of the vsi it is searching for
3550 static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid)
3552 struct i40e_pf *pf = vf->pf;
3553 struct i40e_vsi *vsi = NULL;
3556 for (i = 0; i < vf->num_tc ; i++) {
3557 vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id);
3558 if (vsi && vsi->seid == seid)
3565 * i40e_del_all_cloud_filters
3566 * @vf: pointer to the VF info
3568 * This function deletes all cloud filters
3570 static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
3572 struct i40e_cloud_filter *cfilter = NULL;
3573 struct i40e_pf *pf = vf->pf;
3574 struct i40e_vsi *vsi = NULL;
3575 struct hlist_node *node;
3578 hlist_for_each_entry_safe(cfilter, node,
3579 &vf->cloud_filter_list, cloud_node) {
3580 vsi = i40e_find_vsi_from_seid(vf, cfilter->seid);
3583 dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n",
3584 vf->vf_id, cfilter->seid);
3588 if (cfilter->dst_port)
3589 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
3592 ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
3594 dev_err(&pf->pdev->dev,
3595 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3596 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3597 i40e_aq_str(&pf->hw,
3598 pf->hw.aq.asq_last_status));
3600 hlist_del(&cfilter->cloud_node);
3602 vf->num_cloud_filters--;
3607 * i40e_vc_del_cloud_filter
3608 * @vf: pointer to the VF info
3609 * @msg: pointer to the msg buffer
3611 * This function deletes a cloud filter programmed as TC filter for ADq
3613 static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
3615 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3616 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3617 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3618 struct i40e_cloud_filter cfilter, *cf = NULL;
3619 struct i40e_pf *pf = vf->pf;
3620 struct i40e_vsi *vsi = NULL;
3621 struct hlist_node *node;
3622 i40e_status aq_ret = 0;
3625 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3626 aq_ret = I40E_ERR_PARAM;
3630 if (!vf->adq_enabled) {
3631 dev_info(&pf->pdev->dev,
3632 "VF %d: ADq not enabled, can't apply cloud filter\n",
3634 aq_ret = I40E_ERR_PARAM;
3638 if (i40e_validate_cloud_filter(vf, vcf)) {
3639 dev_info(&pf->pdev->dev,
3640 "VF %d: Invalid input, can't apply cloud filter\n",
3642 aq_ret = I40E_ERR_PARAM;
3646 memset(&cfilter, 0, sizeof(cfilter));
3647 /* parse destination mac address */
3648 for (i = 0; i < ETH_ALEN; i++)
3649 cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3651 /* parse source mac address */
3652 for (i = 0; i < ETH_ALEN; i++)
3653 cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3655 cfilter.vlan_id = mask.vlan_id & tcf.vlan_id;
3656 cfilter.dst_port = mask.dst_port & tcf.dst_port;
3657 cfilter.src_port = mask.src_port & tcf.src_port;
3659 switch (vcf->flow_type) {
3660 case VIRTCHNL_TCP_V4_FLOW:
3661 cfilter.n_proto = ETH_P_IP;
3662 if (mask.dst_ip[0] & tcf.dst_ip[0])
3663 memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip,
3664 ARRAY_SIZE(tcf.dst_ip));
3665 else if (mask.src_ip[0] & tcf.dst_ip[0])
3666 memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip,
3667 ARRAY_SIZE(tcf.dst_ip));
3669 case VIRTCHNL_TCP_V6_FLOW:
3670 cfilter.n_proto = ETH_P_IPV6;
3671 if (mask.dst_ip[3] & tcf.dst_ip[3])
3672 memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip,
3673 sizeof(cfilter.ip.v6.dst_ip6));
3674 if (mask.src_ip[3] & tcf.src_ip[3])
3675 memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip,
3676 sizeof(cfilter.ip.v6.src_ip6));
3679 /* TC filter can be configured based on different combinations
3680 * and in this case IP is not a part of filter config
3682 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3686 /* get the vsi to which the tc belongs to */
3687 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3688 cfilter.seid = vsi->seid;
3689 cfilter.flags = vcf->field_flags;
3691 /* Deleting TC filter */
3693 ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false);
3695 ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
3697 dev_err(&pf->pdev->dev,
3698 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3699 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3700 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3704 hlist_for_each_entry_safe(cf, node,
3705 &vf->cloud_filter_list, cloud_node) {
3706 if (cf->seid != cfilter.seid)
3709 if (cfilter.dst_port != cf->dst_port)
3711 if (mask.dst_mac[0])
3712 if (!ether_addr_equal(cf->src_mac, cfilter.src_mac))
3714 /* for ipv4 data to be valid, only first byte of mask is set */
3715 if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0])
3716 if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip,
3717 ARRAY_SIZE(tcf.dst_ip)))
3719 /* for ipv6, mask is set for all sixteen bytes (4 words) */
3720 if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3])
3721 if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6,
3722 sizeof(cfilter.ip.v6.src_ip6)))
3725 if (cfilter.vlan_id != cf->vlan_id)
3728 hlist_del(&cf->cloud_node);
3730 vf->num_cloud_filters--;
3734 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER,
3739 * i40e_vc_add_cloud_filter
3740 * @vf: pointer to the VF info
3741 * @msg: pointer to the msg buffer
3743 * This function adds a cloud filter programmed as TC filter for ADq
3745 static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
3747 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3748 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3749 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3750 struct i40e_cloud_filter *cfilter = NULL;
3751 struct i40e_pf *pf = vf->pf;
3752 struct i40e_vsi *vsi = NULL;
3753 i40e_status aq_ret = 0;
3756 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3757 aq_ret = I40E_ERR_PARAM;
3761 if (!vf->adq_enabled) {
3762 dev_info(&pf->pdev->dev,
3763 "VF %d: ADq is not enabled, can't apply cloud filter\n",
3765 aq_ret = I40E_ERR_PARAM;
3769 if (i40e_validate_cloud_filter(vf, vcf)) {
3770 dev_info(&pf->pdev->dev,
3771 "VF %d: Invalid input/s, can't apply cloud filter\n",
3773 aq_ret = I40E_ERR_PARAM;
3777 cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
3781 /* parse destination mac address */
3782 for (i = 0; i < ETH_ALEN; i++)
3783 cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3785 /* parse source mac address */
3786 for (i = 0; i < ETH_ALEN; i++)
3787 cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3789 cfilter->vlan_id = mask.vlan_id & tcf.vlan_id;
3790 cfilter->dst_port = mask.dst_port & tcf.dst_port;
3791 cfilter->src_port = mask.src_port & tcf.src_port;
3793 switch (vcf->flow_type) {
3794 case VIRTCHNL_TCP_V4_FLOW:
3795 cfilter->n_proto = ETH_P_IP;
3796 if (mask.dst_ip[0] & tcf.dst_ip[0])
3797 memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip,
3798 ARRAY_SIZE(tcf.dst_ip));
3799 else if (mask.src_ip[0] & tcf.dst_ip[0])
3800 memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip,
3801 ARRAY_SIZE(tcf.dst_ip));
3803 case VIRTCHNL_TCP_V6_FLOW:
3804 cfilter->n_proto = ETH_P_IPV6;
3805 if (mask.dst_ip[3] & tcf.dst_ip[3])
3806 memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip,
3807 sizeof(cfilter->ip.v6.dst_ip6));
3808 if (mask.src_ip[3] & tcf.src_ip[3])
3809 memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip,
3810 sizeof(cfilter->ip.v6.src_ip6));
3813 /* TC filter can be configured based on different combinations
3814 * and in this case IP is not a part of filter config
3816 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3820 /* get the VSI to which the TC belongs to */
3821 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3822 cfilter->seid = vsi->seid;
3823 cfilter->flags = vcf->field_flags;
3825 /* Adding cloud filter programmed as TC filter */
3827 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
3829 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
3831 dev_err(&pf->pdev->dev,
3832 "VF %d: Failed to add cloud filter, err %s aq_err %s\n",
3833 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3834 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3838 INIT_HLIST_NODE(&cfilter->cloud_node);
3839 hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
3840 /* release the pointer passing it to the collection */
3842 vf->num_cloud_filters++;
3846 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
3851 * i40e_vc_add_qch_msg: Add queue channel and enable ADq
3852 * @vf: pointer to the VF info
3853 * @msg: pointer to the msg buffer
3855 static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
3857 struct virtchnl_tc_info *tci =
3858 (struct virtchnl_tc_info *)msg;
3859 struct i40e_pf *pf = vf->pf;
3860 struct i40e_link_status *ls = &pf->hw.phy.link_info;
3861 int i, adq_request_qps = 0;
3862 i40e_status aq_ret = 0;
3865 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3866 aq_ret = I40E_ERR_PARAM;
3870 /* ADq cannot be applied if spoof check is ON */
3872 dev_err(&pf->pdev->dev,
3873 "Spoof check is ON, turn it OFF to enable ADq\n");
3874 aq_ret = I40E_ERR_PARAM;
3878 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) {
3879 dev_err(&pf->pdev->dev,
3880 "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
3882 aq_ret = I40E_ERR_PARAM;
3886 /* max number of traffic classes for VF currently capped at 4 */
3887 if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) {
3888 dev_err(&pf->pdev->dev,
3889 "VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n",
3890 vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI);
3891 aq_ret = I40E_ERR_PARAM;
3895 /* validate queues for each TC */
3896 for (i = 0; i < tci->num_tc; i++)
3897 if (!tci->list[i].count ||
3898 tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) {
3899 dev_err(&pf->pdev->dev,
3900 "VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n",
3901 vf->vf_id, i, tci->list[i].count,
3902 I40E_DEFAULT_QUEUES_PER_VF);
3903 aq_ret = I40E_ERR_PARAM;
3907 /* need Max VF queues but already have default number of queues */
3908 adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF;
3910 if (pf->queues_left < adq_request_qps) {
3911 dev_err(&pf->pdev->dev,
3912 "No queues left to allocate to VF %d\n",
3914 aq_ret = I40E_ERR_PARAM;
3917 /* we need to allocate max VF queues to enable ADq so as to
3918 * make sure ADq enabled VF always gets back queues when it
3919 * goes through a reset.
3921 vf->num_queue_pairs = I40E_MAX_VF_QUEUES;
3924 /* get link speed in MB to validate rate limit */
3925 speed = i40e_vc_link_speed2mbps(ls->link_speed);
3926 if (speed == SPEED_UNKNOWN) {
3927 dev_err(&pf->pdev->dev,
3928 "Cannot detect link speed\n");
3929 aq_ret = I40E_ERR_PARAM;
3933 /* parse data from the queue channel info */
3934 vf->num_tc = tci->num_tc;
3935 for (i = 0; i < vf->num_tc; i++) {
3936 if (tci->list[i].max_tx_rate) {
3937 if (tci->list[i].max_tx_rate > speed) {
3938 dev_err(&pf->pdev->dev,
3939 "Invalid max tx rate %llu specified for VF %d.",
3940 tci->list[i].max_tx_rate,
3942 aq_ret = I40E_ERR_PARAM;
3945 vf->ch[i].max_tx_rate =
3946 tci->list[i].max_tx_rate;
3949 vf->ch[i].num_qps = tci->list[i].count;
3952 /* set this flag only after making sure all inputs are sane */
3953 vf->adq_enabled = true;
3955 /* reset the VF in order to allocate resources */
3956 i40e_vc_reset_vf(vf, true);
3958 return I40E_SUCCESS;
3960 /* send the response to the VF */
3962 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS,
3967 * i40e_vc_del_qch_msg
3968 * @vf: pointer to the VF info
3969 * @msg: pointer to the msg buffer
3971 static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
3973 struct i40e_pf *pf = vf->pf;
3974 i40e_status aq_ret = 0;
3976 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3977 aq_ret = I40E_ERR_PARAM;
3981 if (vf->adq_enabled) {
3982 i40e_del_all_cloud_filters(vf);
3984 vf->adq_enabled = false;
3986 dev_info(&pf->pdev->dev,
3987 "Deleting Queue Channels and cloud filters for ADq on VF %d\n",
3990 dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n",
3992 aq_ret = I40E_ERR_PARAM;
3995 /* reset the VF in order to allocate resources */
3996 i40e_vc_reset_vf(vf, true);
3998 return I40E_SUCCESS;
4001 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS,
4006 * i40e_vc_process_vf_msg
4007 * @pf: pointer to the PF structure
4008 * @vf_id: source VF id
4009 * @v_opcode: operation code
4010 * @v_retval: unused return value code
4011 * @msg: pointer to the msg buffer
4012 * @msglen: msg length
4014 * called from the common aeq/arq handler to
4015 * process request from VF
4017 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
4018 u32 __always_unused v_retval, u8 *msg, u16 msglen)
4020 struct i40e_hw *hw = &pf->hw;
4021 int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
4025 pf->vf_aq_requests++;
4026 if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs)
4028 vf = &(pf->vf[local_vf_id]);
4030 /* Check if VF is disabled. */
4031 if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
4032 return I40E_ERR_PARAM;
4034 /* perform basic checks on the msg */
4035 ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
4038 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
4039 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
4040 local_vf_id, v_opcode, msglen);
4042 case VIRTCHNL_STATUS_ERR_PARAM:
4050 case VIRTCHNL_OP_VERSION:
4051 ret = i40e_vc_get_version_msg(vf, msg);
4053 case VIRTCHNL_OP_GET_VF_RESOURCES:
4054 ret = i40e_vc_get_vf_resources_msg(vf, msg);
4055 i40e_vc_notify_vf_link_state(vf);
4057 case VIRTCHNL_OP_RESET_VF:
4058 i40e_vc_reset_vf(vf, false);
4061 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
4062 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg);
4064 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
4065 ret = i40e_vc_config_queues_msg(vf, msg);
4067 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
4068 ret = i40e_vc_config_irq_map_msg(vf, msg);
4070 case VIRTCHNL_OP_ENABLE_QUEUES:
4071 ret = i40e_vc_enable_queues_msg(vf, msg);
4072 i40e_vc_notify_vf_link_state(vf);
4074 case VIRTCHNL_OP_DISABLE_QUEUES:
4075 ret = i40e_vc_disable_queues_msg(vf, msg);
4077 case VIRTCHNL_OP_ADD_ETH_ADDR:
4078 ret = i40e_vc_add_mac_addr_msg(vf, msg);
4080 case VIRTCHNL_OP_DEL_ETH_ADDR:
4081 ret = i40e_vc_del_mac_addr_msg(vf, msg);
4083 case VIRTCHNL_OP_ADD_VLAN:
4084 ret = i40e_vc_add_vlan_msg(vf, msg);
4086 case VIRTCHNL_OP_DEL_VLAN:
4087 ret = i40e_vc_remove_vlan_msg(vf, msg);
4089 case VIRTCHNL_OP_GET_STATS:
4090 ret = i40e_vc_get_stats_msg(vf, msg);
4092 case VIRTCHNL_OP_IWARP:
4093 ret = i40e_vc_iwarp_msg(vf, msg, msglen);
4095 case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
4096 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, true);
4098 case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
4099 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, false);
4101 case VIRTCHNL_OP_CONFIG_RSS_KEY:
4102 ret = i40e_vc_config_rss_key(vf, msg);
4104 case VIRTCHNL_OP_CONFIG_RSS_LUT:
4105 ret = i40e_vc_config_rss_lut(vf, msg);
4107 case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
4108 ret = i40e_vc_get_rss_hena(vf, msg);
4110 case VIRTCHNL_OP_SET_RSS_HENA:
4111 ret = i40e_vc_set_rss_hena(vf, msg);
4113 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
4114 ret = i40e_vc_enable_vlan_stripping(vf, msg);
4116 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
4117 ret = i40e_vc_disable_vlan_stripping(vf, msg);
4119 case VIRTCHNL_OP_REQUEST_QUEUES:
4120 ret = i40e_vc_request_queues_msg(vf, msg);
4122 case VIRTCHNL_OP_ENABLE_CHANNELS:
4123 ret = i40e_vc_add_qch_msg(vf, msg);
4125 case VIRTCHNL_OP_DISABLE_CHANNELS:
4126 ret = i40e_vc_del_qch_msg(vf, msg);
4128 case VIRTCHNL_OP_ADD_CLOUD_FILTER:
4129 ret = i40e_vc_add_cloud_filter(vf, msg);
4131 case VIRTCHNL_OP_DEL_CLOUD_FILTER:
4132 ret = i40e_vc_del_cloud_filter(vf, msg);
4134 case VIRTCHNL_OP_UNKNOWN:
4136 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
4137 v_opcode, local_vf_id);
4138 ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
4139 I40E_ERR_NOT_IMPLEMENTED);
4147 * i40e_vc_process_vflr_event
4148 * @pf: pointer to the PF structure
4150 * called from the vlfr irq handler to
4151 * free up VF resources and state variables
4153 int i40e_vc_process_vflr_event(struct i40e_pf *pf)
4155 struct i40e_hw *hw = &pf->hw;
4156 u32 reg, reg_idx, bit_idx;
4160 if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
4163 /* Re-enable the VFLR interrupt cause here, before looking for which
4164 * VF got reset. Otherwise, if another VF gets a reset while the
4165 * first one is being processed, that interrupt will be lost, and
4166 * that VF will be stuck in reset forever.
4168 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4169 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
4170 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4173 clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
4174 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
4175 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
4176 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
4177 /* read GLGEN_VFLRSTAT register to find out the flr VFs */
4178 vf = &pf->vf[vf_id];
4179 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
4180 if (reg & BIT(bit_idx))
4181 /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
4182 i40e_reset_vf(vf, true);
4190 * @pf: the physical function
4191 * @vf_id: VF identifier
4193 * Check that the VF is enabled and the VSI exists.
4195 * Returns 0 on success, negative on failure
4197 static int i40e_validate_vf(struct i40e_pf *pf, int vf_id)
4199 struct i40e_vsi *vsi;
4203 if (vf_id >= pf->num_alloc_vfs) {
4204 dev_err(&pf->pdev->dev,
4205 "Invalid VF Identifier %d\n", vf_id);
4209 vf = &pf->vf[vf_id];
4210 vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id);
4218 * i40e_ndo_set_vf_mac
4219 * @netdev: network interface device structure
4220 * @vf_id: VF identifier
4223 * program VF mac address
4225 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
4227 struct i40e_netdev_priv *np = netdev_priv(netdev);
4228 struct i40e_vsi *vsi = np->vsi;
4229 struct i40e_pf *pf = vsi->back;
4230 struct i40e_mac_filter *f;
4233 struct hlist_node *h;
4237 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4238 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4242 /* validate the request */
4243 ret = i40e_validate_vf(pf, vf_id);
4247 vf = &pf->vf[vf_id];
4249 /* When the VF is resetting wait until it is done.
4250 * It can take up to 200 milliseconds,
4251 * but wait for up to 300 milliseconds to be safe.
4252 * Acquire the VSI pointer only after the VF has been
4253 * properly initialized.
4255 for (i = 0; i < 15; i++) {
4256 if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
4260 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4261 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4266 vsi = pf->vsi[vf->lan_vsi_idx];
4268 if (is_multicast_ether_addr(mac)) {
4269 dev_err(&pf->pdev->dev,
4270 "Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
4275 /* Lock once because below invoked function add/del_filter requires
4276 * mac_filter_hash_lock to be held
4278 spin_lock_bh(&vsi->mac_filter_hash_lock);
4280 /* delete the temporary mac address */
4281 if (!is_zero_ether_addr(vf->default_lan_addr.addr))
4282 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
4284 /* Delete all the filters for this VSI - we're going to kill it
4287 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
4288 __i40e_del_filter(vsi, f);
4290 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4292 /* program mac filter */
4293 if (i40e_sync_vsi_filters(vsi)) {
4294 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
4298 ether_addr_copy(vf->default_lan_addr.addr, mac);
4300 if (is_zero_ether_addr(mac)) {
4301 vf->pf_set_mac = false;
4302 dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id);
4304 vf->pf_set_mac = true;
4305 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
4309 /* Force the VF interface down so it has to bring up with new MAC
4312 i40e_vc_reset_vf(vf, true);
4313 dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
4316 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4321 * i40e_ndo_set_vf_port_vlan
4322 * @netdev: network interface device structure
4323 * @vf_id: VF identifier
4324 * @vlan_id: mac address
4325 * @qos: priority setting
4326 * @vlan_proto: vlan protocol
4328 * program VF vlan id and/or qos
4330 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
4331 u16 vlan_id, u8 qos, __be16 vlan_proto)
4333 u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
4334 struct i40e_netdev_priv *np = netdev_priv(netdev);
4335 bool allmulti = false, alluni = false;
4336 struct i40e_pf *pf = np->vsi->back;
4337 struct i40e_vsi *vsi;
4341 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4342 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4346 /* validate the request */
4347 ret = i40e_validate_vf(pf, vf_id);
4351 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
4352 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
4357 if (vlan_proto != htons(ETH_P_8021Q)) {
4358 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
4359 ret = -EPROTONOSUPPORT;
4363 vf = &pf->vf[vf_id];
4364 vsi = pf->vsi[vf->lan_vsi_idx];
4365 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4366 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4372 if (le16_to_cpu(vsi->info.pvid) == vlanprio)
4373 /* duplicate request, so just return success */
4376 i40e_vlan_stripping_enable(vsi);
4377 i40e_vc_reset_vf(vf, true);
4378 /* During reset the VF got a new VSI, so refresh a pointer. */
4379 vsi = pf->vsi[vf->lan_vsi_idx];
4380 /* Locked once because multiple functions below iterate list */
4381 spin_lock_bh(&vsi->mac_filter_hash_lock);
4383 /* Check for condition where there was already a port VLAN ID
4384 * filter set and now it is being deleted by setting it to zero.
4385 * Additionally check for the condition where there was a port
4386 * VLAN but now there is a new and different port VLAN being set.
4387 * Before deleting all the old VLAN filters we must add new ones
4388 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
4389 * MAC addresses deleted.
4391 if ((!(vlan_id || qos) ||
4392 vlanprio != le16_to_cpu(vsi->info.pvid)) &&
4394 ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
4396 dev_info(&vsi->back->pdev->dev,
4397 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4398 vsi->back->hw.aq.asq_last_status);
4399 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4404 if (vsi->info.pvid) {
4405 /* remove all filters on the old VLAN */
4406 i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
4410 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4412 /* disable promisc modes in case they were enabled */
4413 ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id,
4416 dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n");
4421 ret = i40e_vsi_add_pvid(vsi, vlanprio);
4423 i40e_vsi_remove_pvid(vsi);
4424 spin_lock_bh(&vsi->mac_filter_hash_lock);
4427 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
4428 vlan_id, qos, vf_id);
4430 /* add new VLAN filter for each MAC */
4431 ret = i40e_add_vlan_all_mac(vsi, vlan_id);
4433 dev_info(&vsi->back->pdev->dev,
4434 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4435 vsi->back->hw.aq.asq_last_status);
4436 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4440 /* remove the previously added non-VLAN MAC filters */
4441 i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
4444 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4446 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
4449 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
4452 /* Schedule the worker thread to take care of applying changes */
4453 i40e_service_event_schedule(vsi->back);
4456 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
4460 /* The Port VLAN needs to be saved across resets the same as the
4461 * default LAN MAC address.
4463 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
4465 ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni);
4467 dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");
4474 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4479 * i40e_ndo_set_vf_bw
4480 * @netdev: network interface device structure
4481 * @vf_id: VF identifier
4482 * @min_tx_rate: Minimum Tx rate
4483 * @max_tx_rate: Maximum Tx rate
4485 * configure VF Tx rate
4487 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
4490 struct i40e_netdev_priv *np = netdev_priv(netdev);
4491 struct i40e_pf *pf = np->vsi->back;
4492 struct i40e_vsi *vsi;
4496 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4497 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4501 /* validate the request */
4502 ret = i40e_validate_vf(pf, vf_id);
4507 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
4508 min_tx_rate, vf_id);
4513 vf = &pf->vf[vf_id];
4514 vsi = pf->vsi[vf->lan_vsi_idx];
4515 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4516 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4522 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
4526 vf->tx_rate = max_tx_rate;
4528 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4533 * i40e_ndo_get_vf_config
4534 * @netdev: network interface device structure
4535 * @vf_id: VF identifier
4536 * @ivi: VF configuration structure
4538 * return VF configuration
4540 int i40e_ndo_get_vf_config(struct net_device *netdev,
4541 int vf_id, struct ifla_vf_info *ivi)
4543 struct i40e_netdev_priv *np = netdev_priv(netdev);
4544 struct i40e_vsi *vsi = np->vsi;
4545 struct i40e_pf *pf = vsi->back;
4549 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4550 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4554 /* validate the request */
4555 ret = i40e_validate_vf(pf, vf_id);
4559 vf = &pf->vf[vf_id];
4560 /* first vsi is always the LAN vsi */
4561 vsi = pf->vsi[vf->lan_vsi_idx];
4569 ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
4571 ivi->max_tx_rate = vf->tx_rate;
4572 ivi->min_tx_rate = 0;
4573 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
4574 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
4575 I40E_VLAN_PRIORITY_SHIFT;
4576 if (vf->link_forced == false)
4577 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
4578 else if (vf->link_up == true)
4579 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
4581 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
4582 ivi->spoofchk = vf->spoofchk;
4583 ivi->trusted = vf->trusted;
4587 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4592 * i40e_ndo_set_vf_link_state
4593 * @netdev: network interface device structure
4594 * @vf_id: VF identifier
4595 * @link: required link state
4597 * Set the link state of a specified VF, regardless of physical link state
4599 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
4601 struct i40e_netdev_priv *np = netdev_priv(netdev);
4602 struct i40e_pf *pf = np->vsi->back;
4603 struct i40e_link_status *ls = &pf->hw.phy.link_info;
4604 struct virtchnl_pf_event pfe;
4605 struct i40e_hw *hw = &pf->hw;
4610 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4611 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4615 /* validate the request */
4616 if (vf_id >= pf->num_alloc_vfs) {
4617 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4622 vf = &pf->vf[vf_id];
4623 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
4625 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
4626 pfe.severity = PF_EVENT_SEVERITY_INFO;
4629 case IFLA_VF_LINK_STATE_AUTO:
4630 vf->link_forced = false;
4631 i40e_set_vf_link_state(vf, &pfe, ls);
4633 case IFLA_VF_LINK_STATE_ENABLE:
4634 vf->link_forced = true;
4636 i40e_set_vf_link_state(vf, &pfe, ls);
4638 case IFLA_VF_LINK_STATE_DISABLE:
4639 vf->link_forced = true;
4640 vf->link_up = false;
4641 i40e_set_vf_link_state(vf, &pfe, ls);
4647 /* Notify the VF of its new link state */
4648 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
4649 0, (u8 *)&pfe, sizeof(pfe), NULL);
4652 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4657 * i40e_ndo_set_vf_spoofchk
4658 * @netdev: network interface device structure
4659 * @vf_id: VF identifier
4660 * @enable: flag to enable or disable feature
4662 * Enable or disable VF spoof checking
4664 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
4666 struct i40e_netdev_priv *np = netdev_priv(netdev);
4667 struct i40e_vsi *vsi = np->vsi;
4668 struct i40e_pf *pf = vsi->back;
4669 struct i40e_vsi_context ctxt;
4670 struct i40e_hw *hw = &pf->hw;
4674 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4675 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4679 /* validate the request */
4680 if (vf_id >= pf->num_alloc_vfs) {
4681 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4686 vf = &(pf->vf[vf_id]);
4687 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4688 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4694 if (enable == vf->spoofchk)
4697 vf->spoofchk = enable;
4698 memset(&ctxt, 0, sizeof(ctxt));
4699 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
4700 ctxt.pf_num = pf->hw.pf_id;
4701 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
4703 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
4704 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
4705 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4707 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
4712 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4717 * i40e_ndo_set_vf_trust
4718 * @netdev: network interface device structure of the pf
4719 * @vf_id: VF identifier
4720 * @setting: trust setting
4722 * Enable or disable VF trust setting
4724 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
4726 struct i40e_netdev_priv *np = netdev_priv(netdev);
4727 struct i40e_pf *pf = np->vsi->back;
4731 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4732 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4736 /* validate the request */
4737 if (vf_id >= pf->num_alloc_vfs) {
4738 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4743 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4744 dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
4749 vf = &pf->vf[vf_id];
4751 if (setting == vf->trusted)
4754 vf->trusted = setting;
4756 /* request PF to sync mac/vlan filters for the VF */
4757 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
4758 pf->vsi[vf->lan_vsi_idx]->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
4760 i40e_vc_reset_vf(vf, true);
4761 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
4762 vf_id, setting ? "" : "un");
4764 if (vf->adq_enabled) {
4766 dev_info(&pf->pdev->dev,
4767 "VF %u no longer Trusted, deleting all cloud filters\n",
4769 i40e_del_all_cloud_filters(vf);
4774 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4779 * i40e_get_vf_stats - populate some stats for the VF
4780 * @netdev: the netdev of the PF
4781 * @vf_id: the host OS identifier (0-127)
4782 * @vf_stats: pointer to the OS memory to be initialized
4784 int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
4785 struct ifla_vf_stats *vf_stats)
4787 struct i40e_netdev_priv *np = netdev_priv(netdev);
4788 struct i40e_pf *pf = np->vsi->back;
4789 struct i40e_eth_stats *stats;
4790 struct i40e_vsi *vsi;
4793 /* validate the request */
4794 if (i40e_validate_vf(pf, vf_id))
4797 vf = &pf->vf[vf_id];
4798 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4799 dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
4803 vsi = pf->vsi[vf->lan_vsi_idx];
4807 i40e_update_eth_stats(vsi);
4808 stats = &vsi->eth_stats;
4810 memset(vf_stats, 0, sizeof(*vf_stats));
4812 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
4813 stats->rx_multicast;
4814 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
4815 stats->tx_multicast;
4816 vf_stats->rx_bytes = stats->rx_bytes;
4817 vf_stats->tx_bytes = stats->tx_bytes;
4818 vf_stats->broadcast = stats->rx_broadcast;
4819 vf_stats->multicast = stats->rx_multicast;
4820 vf_stats->rx_dropped = stats->rx_discards;
4821 vf_stats->tx_dropped = stats->tx_discards;