1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
6 /*********************notification routines***********************/
10 * @pf: pointer to the PF structure
11 * @v_opcode: operation code
12 * @v_retval: return value
13 * @msg: pointer to the msg buffer
16 * send a message to all VFs on a given PF
18 static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
19 enum virtchnl_ops v_opcode,
20 int v_retval, u8 *msg,
23 struct i40e_hw *hw = &pf->hw;
24 struct i40e_vf *vf = pf->vf;
27 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
28 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
29 /* Not all vfs are enabled so skip the ones that are not */
30 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
31 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
34 /* Ignore return value on purpose - a given VF may fail, but
35 * we need to keep going and send to all of them
37 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
43 * i40e_vc_link_speed2mbps
44 * converts i40e_aq_link_speed to integer value of Mbps
45 * @link_speed: the speed to convert
47 * return the speed as direct value of Mbps.
50 i40e_vc_link_speed2mbps(enum i40e_aq_link_speed link_speed)
53 case I40E_LINK_SPEED_100MB:
55 case I40E_LINK_SPEED_1GB:
57 case I40E_LINK_SPEED_2_5GB:
59 case I40E_LINK_SPEED_5GB:
61 case I40E_LINK_SPEED_10GB:
63 case I40E_LINK_SPEED_20GB:
65 case I40E_LINK_SPEED_25GB:
67 case I40E_LINK_SPEED_40GB:
69 case I40E_LINK_SPEED_UNKNOWN:
76 * i40e_set_vf_link_state
77 * @vf: pointer to the VF structure
78 * @pfe: pointer to PF event structure
79 * @ls: pointer to link status structure
81 * set a link state on a single vf
83 static void i40e_set_vf_link_state(struct i40e_vf *vf,
84 struct virtchnl_pf_event *pfe, struct i40e_link_status *ls)
86 u8 link_status = ls->link_info & I40E_AQ_LINK_UP;
89 link_status = vf->link_up;
91 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
92 pfe->event_data.link_event_adv.link_speed = link_status ?
93 i40e_vc_link_speed2mbps(ls->link_speed) : 0;
94 pfe->event_data.link_event_adv.link_status = link_status;
96 pfe->event_data.link_event.link_speed = link_status ?
97 i40e_virtchnl_link_speed(ls->link_speed) : 0;
98 pfe->event_data.link_event.link_status = link_status;
103 * i40e_vc_notify_vf_link_state
104 * @vf: pointer to the VF structure
106 * send a link status message to a single VF
108 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
110 struct virtchnl_pf_event pfe;
111 struct i40e_pf *pf = vf->pf;
112 struct i40e_hw *hw = &pf->hw;
113 struct i40e_link_status *ls = &pf->hw.phy.link_info;
114 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
116 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
117 pfe.severity = PF_EVENT_SEVERITY_INFO;
119 i40e_set_vf_link_state(vf, &pfe, ls);
121 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
122 0, (u8 *)&pfe, sizeof(pfe), NULL);
126 * i40e_vc_notify_link_state
127 * @pf: pointer to the PF structure
129 * send a link status message to all VFs on a given PF
131 void i40e_vc_notify_link_state(struct i40e_pf *pf)
135 for (i = 0; i < pf->num_alloc_vfs; i++)
136 i40e_vc_notify_vf_link_state(&pf->vf[i]);
140 * i40e_vc_notify_reset
141 * @pf: pointer to the PF structure
143 * indicate a pending reset to all VFs on a given PF
145 void i40e_vc_notify_reset(struct i40e_pf *pf)
147 struct virtchnl_pf_event pfe;
149 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
150 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
151 i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
152 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
155 #ifdef CONFIG_PCI_IOV
156 void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev)
161 /* Continue only if this is a PF */
162 if (!pdev->is_physfn)
165 if (!pci_num_vf(pdev))
168 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
170 struct pci_dev *vf_dev = NULL;
172 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id);
173 while ((vf_dev = pci_get_device(pdev->vendor, vf_id, vf_dev))) {
174 if (vf_dev->is_virtfn && vf_dev->physfn == pdev)
175 pci_restore_msi_state(vf_dev);
179 #endif /* CONFIG_PCI_IOV */
182 * i40e_vc_notify_vf_reset
183 * @vf: pointer to the VF structure
185 * indicate a pending reset to the given VF
187 void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
189 struct virtchnl_pf_event pfe;
192 /* validate the request */
193 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
196 /* verify if the VF is in either init or active before proceeding */
197 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
198 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
201 abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
203 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
204 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
205 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
207 sizeof(struct virtchnl_pf_event), NULL);
209 /***********************misc routines*****************************/
213 * @vf: pointer to the VF info
214 * @notify_vf: notify vf about reset or not
217 static void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf)
219 struct i40e_pf *pf = vf->pf;
223 i40e_vc_notify_vf_reset(vf);
225 /* We want to ensure that an actual reset occurs initiated after this
226 * function was called. However, we do not want to wait forever, so
227 * we'll give a reasonable time and print a message if we failed to
230 for (i = 0; i < 20; i++) {
231 /* If PF is in VFs releasing state reset VF is impossible,
234 if (test_bit(__I40E_VFS_RELEASING, pf->state))
236 if (i40e_reset_vf(vf, false))
238 usleep_range(10000, 20000);
242 dev_warn(&vf->pf->pdev->dev,
243 "Failed to initiate reset for VF %d after 200 milliseconds\n",
246 dev_dbg(&vf->pf->pdev->dev,
247 "Failed to initiate reset for VF %d after 200 milliseconds\n",
252 * i40e_vc_isvalid_vsi_id
253 * @vf: pointer to the VF info
254 * @vsi_id: VF relative VSI id
256 * check for the valid VSI id
258 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
260 struct i40e_pf *pf = vf->pf;
261 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
263 return (vsi && (vsi->vf_id == vf->vf_id));
267 * i40e_vc_isvalid_queue_id
268 * @vf: pointer to the VF info
270 * @qid: vsi relative queue id
272 * check for the valid queue id
274 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
277 struct i40e_pf *pf = vf->pf;
278 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
280 return (vsi && (qid < vsi->alloc_queue_pairs));
284 * i40e_vc_isvalid_vector_id
285 * @vf: pointer to the VF info
286 * @vector_id: VF relative vector id
288 * check for the valid vector id
290 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id)
292 struct i40e_pf *pf = vf->pf;
294 return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
297 /***********************vf resource mgmt routines*****************/
300 * i40e_vc_get_pf_queue_id
301 * @vf: pointer to the VF info
302 * @vsi_id: id of VSI as provided by the FW
303 * @vsi_queue_id: vsi relative queue id
305 * return PF relative queue id
307 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
310 struct i40e_pf *pf = vf->pf;
311 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
312 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
317 if (le16_to_cpu(vsi->info.mapping_flags) &
318 I40E_AQ_VSI_QUE_MAP_NONCONTIG)
320 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
322 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
329 * i40e_get_real_pf_qid
330 * @vf: pointer to the VF info
332 * @queue_id: queue number
334 * wrapper function to get pf_queue_id handling ADq code as well
336 static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id)
340 if (vf->adq_enabled) {
341 /* Although VF considers all the queues(can be 1 to 16) as its
342 * own but they may actually belong to different VSIs(up to 4).
343 * We need to find which queues belongs to which VSI.
345 for (i = 0; i < vf->num_tc; i++) {
346 if (queue_id < vf->ch[i].num_qps) {
347 vsi_id = vf->ch[i].vsi_id;
350 /* find right queue id which is relative to a
353 queue_id -= vf->ch[i].num_qps;
357 return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id);
361 * i40e_config_irq_link_list
362 * @vf: pointer to the VF info
363 * @vsi_id: id of VSI as given by the FW
364 * @vecmap: irq map info
366 * configure irq link list from the map
368 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
369 struct virtchnl_vector_map *vecmap)
371 unsigned long linklistmap = 0, tempmap;
372 struct i40e_pf *pf = vf->pf;
373 struct i40e_hw *hw = &pf->hw;
374 u16 vsi_queue_id, pf_queue_id;
375 enum i40e_queue_type qtype;
376 u16 next_q, vector_id, size;
380 vector_id = vecmap->vector_id;
383 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
385 reg_idx = I40E_VPINT_LNKLSTN(
386 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
389 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
390 /* Special case - No queues mapped on this vector */
391 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
394 tempmap = vecmap->rxq_map;
395 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
396 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
400 tempmap = vecmap->txq_map;
401 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
402 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
406 size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES;
407 next_q = find_first_bit(&linklistmap, size);
408 if (unlikely(next_q == size))
411 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
412 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
413 pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id);
414 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
416 wr32(hw, reg_idx, reg);
418 while (next_q < size) {
420 case I40E_QUEUE_TYPE_RX:
421 reg_idx = I40E_QINT_RQCTL(pf_queue_id);
422 itr_idx = vecmap->rxitr_idx;
424 case I40E_QUEUE_TYPE_TX:
425 reg_idx = I40E_QINT_TQCTL(pf_queue_id);
426 itr_idx = vecmap->txitr_idx;
432 next_q = find_next_bit(&linklistmap, size, next_q + 1);
434 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
435 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
436 pf_queue_id = i40e_get_real_pf_qid(vf,
440 pf_queue_id = I40E_QUEUE_END_OF_LIST;
444 /* format for the RQCTL & TQCTL regs is same */
446 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
447 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
448 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
449 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
450 wr32(hw, reg_idx, reg);
453 /* if the vf is running in polling mode and using interrupt zero,
454 * need to disable auto-mask on enabling zero interrupt for VFs.
456 if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
458 reg = rd32(hw, I40E_GLINT_CTL);
459 if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
460 reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
461 wr32(hw, I40E_GLINT_CTL, reg);
470 * i40e_release_rdma_qvlist
471 * @vf: pointer to the VF.
474 static void i40e_release_rdma_qvlist(struct i40e_vf *vf)
476 struct i40e_pf *pf = vf->pf;
477 struct virtchnl_rdma_qvlist_info *qvlist_info = vf->qvlist_info;
481 if (!vf->qvlist_info)
484 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
485 for (i = 0; i < qvlist_info->num_vectors; i++) {
486 struct virtchnl_rdma_qv_info *qv_info;
487 u32 next_q_index, next_q_type;
488 struct i40e_hw *hw = &pf->hw;
489 u32 v_idx, reg_idx, reg;
491 qv_info = &qvlist_info->qv_info[i];
494 v_idx = qv_info->v_idx;
495 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
496 /* Figure out the queue after CEQ and make that the
499 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
500 reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
501 next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
502 >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
503 next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
504 >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
506 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
507 reg = (next_q_index &
508 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
510 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
512 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
515 kfree(vf->qvlist_info);
516 vf->qvlist_info = NULL;
520 * i40e_config_rdma_qvlist
521 * @vf: pointer to the VF info
522 * @qvlist_info: queue and vector list
524 * Return 0 on success or < 0 on error
527 i40e_config_rdma_qvlist(struct i40e_vf *vf,
528 struct virtchnl_rdma_qvlist_info *qvlist_info)
530 struct i40e_pf *pf = vf->pf;
531 struct i40e_hw *hw = &pf->hw;
532 struct virtchnl_rdma_qv_info *qv_info;
533 u32 v_idx, i, reg_idx, reg;
534 u32 next_q_idx, next_q_type;
539 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
541 if (qvlist_info->num_vectors > msix_vf) {
542 dev_warn(&pf->pdev->dev,
543 "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n",
544 qvlist_info->num_vectors,
550 kfree(vf->qvlist_info);
551 size = virtchnl_struct_size(vf->qvlist_info, qv_info,
552 qvlist_info->num_vectors);
553 vf->qvlist_info = kzalloc(size, GFP_KERNEL);
554 if (!vf->qvlist_info) {
558 vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
560 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
561 for (i = 0; i < qvlist_info->num_vectors; i++) {
562 qv_info = &qvlist_info->qv_info[i];
566 /* Validate vector id belongs to this vf */
567 if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) {
572 v_idx = qv_info->v_idx;
574 vf->qvlist_info->qv_info[i] = *qv_info;
576 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
577 /* We might be sharing the interrupt, so get the first queue
578 * index and type, push it down the list by adding the new
579 * queue on top. Also link it with the new queue in CEQCTL.
581 reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
582 next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
583 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
584 next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
585 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
587 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
588 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
589 reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
590 (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
591 (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
592 (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
593 (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
594 wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
596 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
597 reg = (qv_info->ceq_idx &
598 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
599 (I40E_QUEUE_TYPE_PE_CEQ <<
600 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
601 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
604 if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
605 reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
606 (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
607 (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
609 wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
615 kfree(vf->qvlist_info);
616 vf->qvlist_info = NULL;
622 * i40e_config_vsi_tx_queue
623 * @vf: pointer to the VF info
624 * @vsi_id: id of VSI as provided by the FW
625 * @vsi_queue_id: vsi relative queue index
626 * @info: config. info
630 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
632 struct virtchnl_txq_info *info)
634 struct i40e_pf *pf = vf->pf;
635 struct i40e_hw *hw = &pf->hw;
636 struct i40e_hmc_obj_txq tx_ctx;
637 struct i40e_vsi *vsi;
642 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
646 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
647 vsi = i40e_find_vsi_from_id(pf, vsi_id);
653 /* clear the context structure first */
654 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
656 /* only set the required fields */
657 tx_ctx.base = info->dma_ring_addr / 128;
658 tx_ctx.qlen = info->ring_len;
659 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
660 tx_ctx.rdylist_act = 0;
661 tx_ctx.head_wb_ena = info->headwb_enabled;
662 tx_ctx.head_wb_addr = info->dma_headwb_addr;
664 /* clear the context in the HMC */
665 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
667 dev_err(&pf->pdev->dev,
668 "Failed to clear VF LAN Tx queue context %d, error: %d\n",
674 /* set the context in the HMC */
675 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
677 dev_err(&pf->pdev->dev,
678 "Failed to set VF LAN Tx queue context %d error: %d\n",
684 /* associate this queue with the PCI VF function */
685 qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
686 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
687 & I40E_QTX_CTL_PF_INDX_MASK);
688 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
689 << I40E_QTX_CTL_VFVM_INDX_SHIFT)
690 & I40E_QTX_CTL_VFVM_INDX_MASK);
691 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
699 * i40e_config_vsi_rx_queue
700 * @vf: pointer to the VF info
701 * @vsi_id: id of VSI as provided by the FW
702 * @vsi_queue_id: vsi relative queue index
703 * @info: config. info
707 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
709 struct virtchnl_rxq_info *info)
711 u16 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
712 struct i40e_pf *pf = vf->pf;
713 struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
714 struct i40e_hw *hw = &pf->hw;
715 struct i40e_hmc_obj_rxq rx_ctx;
718 /* clear the context structure first */
719 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
721 /* only set the required fields */
722 rx_ctx.base = info->dma_ring_addr / 128;
723 rx_ctx.qlen = info->ring_len;
725 if (info->splithdr_enabled) {
726 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
728 I40E_RX_SPLIT_TCP_UDP |
730 /* header length validation */
731 if (info->hdr_size > ((2 * 1024) - 64)) {
735 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
737 /* set split mode 10b */
738 rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
741 /* databuffer length validation */
742 if (info->databuffer_size > ((16 * 1024) - 128)) {
746 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
748 /* max pkt. length validation */
749 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
753 rx_ctx.rxmax = info->max_pkt_size;
755 /* if port VLAN is configured increase the max packet size */
757 rx_ctx.rxmax += VLAN_HLEN;
759 /* enable 32bytes desc always */
763 rx_ctx.lrxqthresh = 1;
768 /* clear the context in the HMC */
769 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
771 dev_err(&pf->pdev->dev,
772 "Failed to clear VF LAN Rx queue context %d, error: %d\n",
778 /* set the context in the HMC */
779 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
781 dev_err(&pf->pdev->dev,
782 "Failed to set VF LAN Rx queue context %d error: %d\n",
794 * @vf: pointer to the VF info
795 * @idx: VSI index, applies only for ADq mode, zero otherwise
797 * alloc VF vsi context & resources
799 static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
801 struct i40e_mac_filter *f = NULL;
802 struct i40e_pf *pf = vf->pf;
803 struct i40e_vsi *vsi;
807 vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid,
811 dev_err(&pf->pdev->dev,
812 "add vsi failed for VF %d, aq_err %d\n",
813 vf->vf_id, pf->hw.aq.asq_last_status);
815 goto error_alloc_vsi_res;
819 u64 hena = i40e_pf_get_default_rss_hena(pf);
820 u8 broadcast[ETH_ALEN];
822 vf->lan_vsi_idx = vsi->idx;
823 vf->lan_vsi_id = vsi->id;
824 /* If the port VLAN has been configured and then the
825 * VF driver was removed then the VSI port VLAN
826 * configuration was destroyed. Check if there is
827 * a port VLAN and restore the VSI configuration if
830 if (vf->port_vlan_id)
831 i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
833 spin_lock_bh(&vsi->mac_filter_hash_lock);
834 if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
835 f = i40e_add_mac_filter(vsi,
836 vf->default_lan_addr.addr);
838 dev_info(&pf->pdev->dev,
839 "Could not add MAC filter %pM for VF %d\n",
840 vf->default_lan_addr.addr, vf->vf_id);
842 eth_broadcast_addr(broadcast);
843 f = i40e_add_mac_filter(vsi, broadcast);
845 dev_info(&pf->pdev->dev,
846 "Could not allocate VF broadcast filter\n");
847 spin_unlock_bh(&vsi->mac_filter_hash_lock);
848 wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
849 wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
850 /* program mac filter only for VF VSI */
851 ret = i40e_sync_vsi_filters(vsi);
853 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
856 /* storing VSI index and id for ADq and don't apply the mac filter */
857 if (vf->adq_enabled) {
858 vf->ch[idx].vsi_idx = vsi->idx;
859 vf->ch[idx].vsi_id = vsi->id;
862 /* Set VF bandwidth if specified */
864 max_tx_rate = vf->tx_rate;
865 } else if (vf->ch[idx].max_tx_rate) {
866 max_tx_rate = vf->ch[idx].max_tx_rate;
870 max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR);
871 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
872 max_tx_rate, 0, NULL);
874 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
883 * i40e_map_pf_queues_to_vsi
884 * @vf: pointer to the VF info
886 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
887 * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI.
889 static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf)
891 struct i40e_pf *pf = vf->pf;
892 struct i40e_hw *hw = &pf->hw;
893 u32 reg, num_tc = 1; /* VF has at least one traffic class */
900 for (i = 0; i < num_tc; i++) {
901 if (vf->adq_enabled) {
902 qps = vf->ch[i].num_qps;
903 vsi_id = vf->ch[i].vsi_id;
905 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
906 vsi_id = vf->lan_vsi_id;
909 for (j = 0; j < 7; j++) {
914 u16 qid = i40e_vc_get_pf_queue_id(vf,
918 qid = i40e_vc_get_pf_queue_id(vf, vsi_id,
922 i40e_write_rx_ctl(hw,
923 I40E_VSILAN_QTABLE(j, vsi_id),
930 * i40e_map_pf_to_vf_queues
931 * @vf: pointer to the VF info
933 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
934 * function takes care of the second part VPLAN_QTABLE & completes VF mappings.
936 static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf)
938 struct i40e_pf *pf = vf->pf;
939 struct i40e_hw *hw = &pf->hw;
940 u32 reg, total_qps = 0;
941 u32 qps, num_tc = 1; /* VF has at least one traffic class */
948 for (i = 0; i < num_tc; i++) {
949 if (vf->adq_enabled) {
950 qps = vf->ch[i].num_qps;
951 vsi_id = vf->ch[i].vsi_id;
953 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
954 vsi_id = vf->lan_vsi_id;
957 for (j = 0; j < qps; j++) {
958 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j);
960 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
961 wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id),
969 * i40e_enable_vf_mappings
970 * @vf: pointer to the VF info
974 static void i40e_enable_vf_mappings(struct i40e_vf *vf)
976 struct i40e_pf *pf = vf->pf;
977 struct i40e_hw *hw = &pf->hw;
980 /* Tell the hardware we're using noncontiguous mapping. HW requires
981 * that VF queues be mapped using this method, even when they are
982 * contiguous in real life
984 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
985 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
987 /* enable VF vplan_qtable mappings */
988 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
989 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
991 i40e_map_pf_to_vf_queues(vf);
992 i40e_map_pf_queues_to_vsi(vf);
998 * i40e_disable_vf_mappings
999 * @vf: pointer to the VF info
1001 * disable VF mappings
1003 static void i40e_disable_vf_mappings(struct i40e_vf *vf)
1005 struct i40e_pf *pf = vf->pf;
1006 struct i40e_hw *hw = &pf->hw;
1009 /* disable qp mappings */
1010 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
1011 for (i = 0; i < I40E_MAX_VSI_QP; i++)
1012 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
1013 I40E_QUEUE_END_OF_LIST);
1019 * @vf: pointer to the VF info
1023 static void i40e_free_vf_res(struct i40e_vf *vf)
1025 struct i40e_pf *pf = vf->pf;
1026 struct i40e_hw *hw = &pf->hw;
1030 /* Start by disabling VF's configuration API to prevent the OS from
1031 * accessing the VF's VSI after it's freed / invalidated.
1033 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1035 /* It's possible the VF had requeuested more queues than the default so
1036 * do the accounting here when we're about to free them.
1038 if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) {
1039 pf->queues_left += vf->num_queue_pairs -
1040 I40E_DEFAULT_QUEUES_PER_VF;
1043 /* free vsi & disconnect it from the parent uplink */
1044 if (vf->lan_vsi_idx) {
1045 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
1046 vf->lan_vsi_idx = 0;
1050 /* do the accounting and remove additional ADq VSI's */
1051 if (vf->adq_enabled && vf->ch[0].vsi_idx) {
1052 for (j = 0; j < vf->num_tc; j++) {
1053 /* At this point VSI0 is already released so don't
1054 * release it again and only clear their values in
1055 * structure variables
1058 i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]);
1059 vf->ch[j].vsi_idx = 0;
1060 vf->ch[j].vsi_id = 0;
1063 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
1065 /* disable interrupts so the VF starts in a known state */
1066 for (i = 0; i < msix_vf; i++) {
1067 /* format is same for both registers */
1069 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
1071 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
1074 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
1078 /* clear the irq settings */
1079 for (i = 0; i < msix_vf; i++) {
1080 /* format is same for both registers */
1082 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
1084 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
1087 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
1088 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1089 wr32(hw, reg_idx, reg);
1092 /* reset some of the state variables keeping track of the resources */
1093 vf->num_queue_pairs = 0;
1094 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1095 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
1100 * @vf: pointer to the VF info
1102 * allocate VF resources
1104 static int i40e_alloc_vf_res(struct i40e_vf *vf)
1106 struct i40e_pf *pf = vf->pf;
1107 int total_queue_pairs = 0;
1110 if (vf->num_req_queues &&
1111 vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
1112 pf->num_vf_qps = vf->num_req_queues;
1114 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
1116 /* allocate hw vsi context & associated resources */
1117 ret = i40e_alloc_vsi_res(vf, 0);
1120 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
1122 /* allocate additional VSIs based on tc information for ADq */
1123 if (vf->adq_enabled) {
1124 if (pf->queues_left >=
1125 (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) {
1126 /* TC 0 always belongs to VF VSI */
1127 for (idx = 1; idx < vf->num_tc; idx++) {
1128 ret = i40e_alloc_vsi_res(vf, idx);
1132 /* send correct number of queues */
1133 total_queue_pairs = I40E_MAX_VF_QUEUES;
1135 dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n",
1137 vf->adq_enabled = false;
1141 /* We account for each VF to get a default number of queue pairs. If
1142 * the VF has now requested more, we need to account for that to make
1143 * certain we never request more queues than we actually have left in
1146 if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF)
1148 total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
1151 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1153 clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1155 /* store the total qps number for the runtime
1158 vf->num_queue_pairs = total_queue_pairs;
1160 /* VF is now completely initialized */
1161 set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1165 i40e_free_vf_res(vf);
1170 #define VF_DEVICE_STATUS 0xAA
1171 #define VF_TRANS_PENDING_MASK 0x20
1173 * i40e_quiesce_vf_pci
1174 * @vf: pointer to the VF structure
1176 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
1177 * if the transactions never clear.
1179 static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
1181 struct i40e_pf *pf = vf->pf;
1182 struct i40e_hw *hw = &pf->hw;
1186 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
1188 wr32(hw, I40E_PF_PCI_CIAA,
1189 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
1190 for (i = 0; i < 100; i++) {
1191 reg = rd32(hw, I40E_PF_PCI_CIAD);
1192 if ((reg & VF_TRANS_PENDING_MASK) == 0)
1200 * __i40e_getnum_vf_vsi_vlan_filters
1201 * @vsi: pointer to the vsi
1203 * called to get the number of VLANs offloaded on this VF
1205 static int __i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1207 struct i40e_mac_filter *f;
1208 u16 num_vlans = 0, bkt;
1210 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1211 if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
1219 * i40e_getnum_vf_vsi_vlan_filters
1220 * @vsi: pointer to the vsi
1222 * wrapper for __i40e_getnum_vf_vsi_vlan_filters() with spinlock held
1224 static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1228 spin_lock_bh(&vsi->mac_filter_hash_lock);
1229 num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
1230 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1236 * i40e_get_vlan_list_sync
1237 * @vsi: pointer to the VSI
1238 * @num_vlans: number of VLANs in mac_filter_hash, returned to caller
1239 * @vlan_list: list of VLANs present in mac_filter_hash, returned to caller.
1240 * This array is allocated here, but has to be freed in caller.
1242 * Called to get number of VLANs and VLAN list present in mac_filter_hash.
1244 static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans,
1247 struct i40e_mac_filter *f;
1251 spin_lock_bh(&vsi->mac_filter_hash_lock);
1252 *num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
1253 *vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list), GFP_ATOMIC);
1257 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1258 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1260 (*vlan_list)[i++] = f->vlan;
1263 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1267 * i40e_set_vsi_promisc
1268 * @vf: pointer to the VF struct
1270 * @multi_enable: set MAC L2 layer multicast promiscuous enable/disable
1272 * @unicast_enable: set MAC L2 layer unicast promiscuous enable/disable
1274 * @vl: List of VLANs - apply filter for given VLANs
1275 * @num_vlans: Number of elements in @vl
1278 i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
1279 bool unicast_enable, s16 *vl, u16 num_vlans)
1281 struct i40e_pf *pf = vf->pf;
1282 struct i40e_hw *hw = &pf->hw;
1283 int aq_ret, aq_tmp = 0;
1286 /* No VLAN to set promisc on, set on VSI */
1287 if (!num_vlans || !vl) {
1288 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, seid,
1292 int aq_err = pf->hw.aq.asq_last_status;
1294 dev_err(&pf->pdev->dev,
1295 "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
1298 i40e_aq_str(&pf->hw, aq_err));
1303 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, seid,
1308 int aq_err = pf->hw.aq.asq_last_status;
1310 dev_err(&pf->pdev->dev,
1311 "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
1314 i40e_aq_str(&pf->hw, aq_err));
1320 for (i = 0; i < num_vlans; i++) {
1321 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, seid,
1325 int aq_err = pf->hw.aq.asq_last_status;
1327 dev_err(&pf->pdev->dev,
1328 "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
1331 i40e_aq_str(&pf->hw, aq_err));
1337 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, seid,
1341 int aq_err = pf->hw.aq.asq_last_status;
1343 dev_err(&pf->pdev->dev,
1344 "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
1347 i40e_aq_str(&pf->hw, aq_err));
1361 * i40e_config_vf_promiscuous_mode
1362 * @vf: pointer to the VF info
1364 * @allmulti: set MAC L2 layer multicast promiscuous enable/disable
1365 * @alluni: set MAC L2 layer unicast promiscuous enable/disable
1367 * Called from the VF to configure the promiscuous mode of
1368 * VF vsis and from the VF reset path to reset promiscuous mode.
1370 static int i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
1375 struct i40e_pf *pf = vf->pf;
1376 struct i40e_vsi *vsi;
1381 vsi = i40e_find_vsi_from_id(pf, vsi_id);
1382 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
1385 if (vf->port_vlan_id) {
1386 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti,
1387 alluni, &vf->port_vlan_id, 1);
1389 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1390 i40e_get_vlan_list_sync(vsi, &num_vlans, &vl);
1395 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
1401 /* no VLANs to set on, set on VSI */
1402 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
1408 * i40e_sync_vfr_reset
1409 * @hw: pointer to hw struct
1410 * @vf_id: VF identifier
1412 * Before trigger hardware reset, we need to know if no other process has
1413 * reserved the hardware for any reset operations. This check is done by
1414 * examining the status of the RSTAT1 register used to signal the reset.
1416 static int i40e_sync_vfr_reset(struct i40e_hw *hw, int vf_id)
1421 for (i = 0; i < I40E_VFR_WAIT_COUNT; i++) {
1422 reg = rd32(hw, I40E_VFINT_ICR0_ENA(vf_id)) &
1423 I40E_VFINT_ICR0_ADMINQ_MASK;
1427 usleep_range(100, 200);
1434 * i40e_trigger_vf_reset
1435 * @vf: pointer to the VF structure
1436 * @flr: VFLR was issued or not
1438 * Trigger hardware to start a reset for a particular VF. Expects the caller
1439 * to wait the proper amount of time to allow hardware to reset the VF before
1440 * it cleans up and restores VF functionality.
1442 static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
1444 struct i40e_pf *pf = vf->pf;
1445 struct i40e_hw *hw = &pf->hw;
1446 u32 reg, reg_idx, bit_idx;
1451 vf_active = test_and_clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1453 /* Disable VF's configuration API during reset. The flag is re-enabled
1454 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
1455 * It's normally disabled in i40e_free_vf_res(), but it's safer
1456 * to do it earlier to give some time to finish to any VF config
1457 * functions that may still be running at this point.
1459 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1461 /* In the case of a VFLR, the HW has already reset the VF and we
1462 * just need to clean up, so don't hit the VFRTRIG register.
1465 /* Sync VFR reset before trigger next one */
1466 radq = rd32(hw, I40E_VFINT_ICR0_ENA(vf->vf_id)) &
1467 I40E_VFINT_ICR0_ADMINQ_MASK;
1468 if (vf_active && !radq)
1469 /* waiting for finish reset by virtual driver */
1470 if (i40e_sync_vfr_reset(hw, vf->vf_id))
1471 dev_info(&pf->pdev->dev,
1472 "Reset VF %d never finished\n",
1475 /* Reset VF using VPGEN_VFRTRIG reg. It is also setting
1476 * in progress state in rstat1 register.
1478 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1479 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1480 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1483 /* clear the VFLR bit in GLGEN_VFLRSTAT */
1484 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1485 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1486 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1489 if (i40e_quiesce_vf_pci(vf))
1490 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
1495 * i40e_cleanup_reset_vf
1496 * @vf: pointer to the VF structure
1498 * Cleanup a VF after the hardware reset is finished. Expects the caller to
1499 * have verified whether the reset is finished properly, and ensure the
1500 * minimum amount of wait time has passed.
1502 static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
1504 struct i40e_pf *pf = vf->pf;
1505 struct i40e_hw *hw = &pf->hw;
1508 /* disable promisc modes in case they were enabled */
1509 i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false);
1511 /* free VF resources to begin resetting the VSI state */
1512 i40e_free_vf_res(vf);
1514 /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
1515 * By doing this we allow HW to access VF memory at any point. If we
1516 * did it any sooner, HW could access memory while it was being freed
1517 * in i40e_free_vf_res(), causing an IOMMU fault.
1519 * On the other hand, this needs to be done ASAP, because the VF driver
1520 * is waiting for this to happen and may report a timeout. It's
1521 * harmless, but it gets logged into Guest OS kernel log, so best avoid
1524 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1525 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1526 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1528 /* reallocate VF resources to finish resetting the VSI state */
1529 if (!i40e_alloc_vf_res(vf)) {
1530 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1531 i40e_enable_vf_mappings(vf);
1532 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1533 clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1534 /* Do not notify the client during VF init */
1535 if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
1537 i40e_notify_client_of_vf_reset(pf, abs_vf_id);
1541 /* Tell the VF driver the reset is done. This needs to be done only
1542 * after VF has been fully initialized, because the VF driver may
1543 * request resources immediately after setting this flag.
1545 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1550 * @vf: pointer to the VF structure
1551 * @flr: VFLR was issued or not
1553 * Returns true if the VF is in reset, resets successfully, or resets
1554 * are disabled and false otherwise.
1556 bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
1558 struct i40e_pf *pf = vf->pf;
1559 struct i40e_hw *hw = &pf->hw;
1564 if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
1567 /* Bail out if VFs are disabled. */
1568 if (test_bit(__I40E_VF_DISABLE, pf->state))
1571 /* If VF is being reset already we don't need to continue. */
1572 if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1575 i40e_trigger_vf_reset(vf, flr);
1577 /* poll VPGEN_VFRSTAT reg to make sure
1578 * that reset is complete
1580 for (i = 0; i < 10; i++) {
1581 /* VF reset requires driver to first reset the VF and then
1582 * poll the status register to make sure that the reset
1583 * completed successfully. Due to internal HW FIFO flushes,
1584 * we must wait 10ms before the register will be valid.
1586 usleep_range(10000, 20000);
1587 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1588 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
1595 usleep_range(10000, 20000);
1598 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1600 usleep_range(10000, 20000);
1602 /* On initial reset, we don't have any queues to disable */
1603 if (vf->lan_vsi_idx != 0)
1604 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
1606 i40e_cleanup_reset_vf(vf);
1609 usleep_range(20000, 40000);
1610 clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states);
1616 * i40e_reset_all_vfs
1617 * @pf: pointer to the PF structure
1618 * @flr: VFLR was issued or not
1620 * Reset all allocated VFs in one go. First, tell the hardware to reset each
1621 * VF, then do all the waiting in one chunk, and finally finish restoring each
1622 * VF after the wait. This is useful during PF routines which need to reset
1623 * all VFs, as otherwise it must perform these resets in a serialized fashion.
1625 * Returns true if any VFs were reset, and false otherwise.
1627 bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
1629 struct i40e_hw *hw = &pf->hw;
1634 /* If we don't have any VFs, then there is nothing to reset */
1635 if (!pf->num_alloc_vfs)
1638 /* If VFs have been disabled, there is no need to reset */
1639 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1642 /* Begin reset on all VFs at once */
1643 for (v = 0; v < pf->num_alloc_vfs; v++) {
1645 /* If VF is being reset no need to trigger reset again */
1646 if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1647 i40e_trigger_vf_reset(&pf->vf[v], flr);
1650 /* HW requires some time to make sure it can flush the FIFO for a VF
1651 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1652 * sequence to make sure that it has completed. We'll keep track of
1653 * the VFs using a simple iterator that increments once that VF has
1654 * finished resetting.
1656 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1657 usleep_range(10000, 20000);
1659 /* Check each VF in sequence, beginning with the VF to fail
1660 * the previous check.
1662 while (v < pf->num_alloc_vfs) {
1664 if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) {
1665 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1666 if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
1670 /* If the current VF has finished resetting, move on
1671 * to the next VF in sequence.
1678 usleep_range(10000, 20000);
1680 /* Display a warning if at least one VF didn't manage to reset in
1681 * time, but continue on with the operation.
1683 if (v < pf->num_alloc_vfs)
1684 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1686 usleep_range(10000, 20000);
1688 /* Begin disabling all the rings associated with VFs, but do not wait
1691 for (v = 0; v < pf->num_alloc_vfs; v++) {
1692 /* On initial reset, we don't have any queues to disable */
1693 if (pf->vf[v].lan_vsi_idx == 0)
1696 /* If VF is reset in another thread just continue */
1697 if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1700 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
1703 /* Now that we've notified HW to disable all of the VF rings, wait
1704 * until they finish.
1706 for (v = 0; v < pf->num_alloc_vfs; v++) {
1707 /* On initial reset, we don't have any queues to disable */
1708 if (pf->vf[v].lan_vsi_idx == 0)
1711 /* If VF is reset in another thread just continue */
1712 if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1715 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
1718 /* Hw may need up to 50ms to finish disabling the RX queues. We
1719 * minimize the wait by delaying only once for all VFs.
1723 /* Finish the reset on each VF */
1724 for (v = 0; v < pf->num_alloc_vfs; v++) {
1725 /* If VF is reset in another thread just continue */
1726 if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1729 i40e_cleanup_reset_vf(&pf->vf[v]);
1733 usleep_range(20000, 40000);
1734 clear_bit(__I40E_VF_DISABLE, pf->state);
1741 * @pf: pointer to the PF structure
1745 void i40e_free_vfs(struct i40e_pf *pf)
1747 struct i40e_hw *hw = &pf->hw;
1748 u32 reg_idx, bit_idx;
1754 set_bit(__I40E_VFS_RELEASING, pf->state);
1755 while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1756 usleep_range(1000, 2000);
1758 i40e_notify_client_of_vf_enable(pf, 0);
1760 /* Disable IOV before freeing resources. This lets any VF drivers
1761 * running in the host get themselves cleaned up before we yank
1762 * the carpet out from underneath their feet.
1764 if (!pci_vfs_assigned(pf->pdev))
1765 pci_disable_sriov(pf->pdev);
1767 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
1769 /* Amortize wait time by stopping all VFs at the same time */
1770 for (i = 0; i < pf->num_alloc_vfs; i++) {
1771 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1774 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
1777 for (i = 0; i < pf->num_alloc_vfs; i++) {
1778 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1781 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
1784 /* free up VF resources */
1785 tmp = pf->num_alloc_vfs;
1786 pf->num_alloc_vfs = 0;
1787 for (i = 0; i < tmp; i++) {
1788 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1789 i40e_free_vf_res(&pf->vf[i]);
1790 /* disable qp mappings */
1791 i40e_disable_vf_mappings(&pf->vf[i]);
1797 /* This check is for when the driver is unloaded while VFs are
1798 * assigned. Setting the number of VFs to 0 through sysfs is caught
1799 * before this function ever gets called.
1801 if (!pci_vfs_assigned(pf->pdev)) {
1802 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
1803 * work correctly when SR-IOV gets re-enabled.
1805 for (vf_id = 0; vf_id < tmp; vf_id++) {
1806 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1807 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1808 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1811 clear_bit(__I40E_VF_DISABLE, pf->state);
1812 clear_bit(__I40E_VFS_RELEASING, pf->state);
1815 #ifdef CONFIG_PCI_IOV
1818 * @pf: pointer to the PF structure
1819 * @num_alloc_vfs: number of VFs to allocate
1821 * allocate VF resources
1823 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
1825 struct i40e_vf *vfs;
1828 /* Disable interrupt 0 so we don't try to handle the VFLR. */
1829 i40e_irq_dynamic_disable_icr0(pf);
1831 /* Check to see if we're just allocating resources for extant VFs */
1832 if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
1833 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1835 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1836 pf->num_alloc_vfs = 0;
1840 /* allocate memory */
1841 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
1848 /* apply default profile */
1849 for (i = 0; i < num_alloc_vfs; i++) {
1851 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
1854 /* assign default capabilities */
1855 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1856 vfs[i].spoofchk = true;
1858 set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
1861 pf->num_alloc_vfs = num_alloc_vfs;
1863 /* VF resources get allocated during reset */
1864 i40e_reset_all_vfs(pf, false);
1866 i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
1872 /* Re-enable interrupt 0. */
1873 i40e_irq_dynamic_enable_icr0(pf);
1879 * i40e_pci_sriov_enable
1880 * @pdev: pointer to a pci_dev structure
1881 * @num_vfs: number of VFs to allocate
1883 * Enable or change the number of VFs
1885 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
1887 #ifdef CONFIG_PCI_IOV
1888 struct i40e_pf *pf = pci_get_drvdata(pdev);
1889 int pre_existing_vfs = pci_num_vf(pdev);
1892 if (test_bit(__I40E_TESTING, pf->state)) {
1893 dev_warn(&pdev->dev,
1894 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
1899 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1901 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1904 if (num_vfs > pf->num_req_vfs) {
1905 dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
1906 num_vfs, pf->num_req_vfs);
1911 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
1912 err = i40e_alloc_vfs(pf, num_vfs);
1914 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
1928 * i40e_pci_sriov_configure
1929 * @pdev: pointer to a pci_dev structure
1930 * @num_vfs: number of VFs to allocate
1932 * Enable or change the number of VFs. Called when the user updates the number
1935 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1937 struct i40e_pf *pf = pci_get_drvdata(pdev);
1940 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
1941 dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
1946 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
1947 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
1948 i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
1950 ret = i40e_pci_sriov_enable(pdev, num_vfs);
1951 goto sriov_configure_out;
1954 if (!pci_vfs_assigned(pf->pdev)) {
1956 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1957 i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
1959 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
1961 goto sriov_configure_out;
1963 sriov_configure_out:
1964 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
1968 /***********************virtual channel routines******************/
1971 * i40e_vc_send_msg_to_vf
1972 * @vf: pointer to the VF info
1973 * @v_opcode: virtual channel opcode
1974 * @v_retval: virtual channel return value
1975 * @msg: pointer to the msg buffer
1976 * @msglen: msg length
1980 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1981 u32 v_retval, u8 *msg, u16 msglen)
1988 /* validate the request */
1989 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1994 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1996 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
1999 dev_info(&pf->pdev->dev,
2000 "Unable to send the message to VF %d aq_err %d\n",
2001 vf->vf_id, pf->hw.aq.asq_last_status);
2009 * i40e_vc_send_resp_to_vf
2010 * @vf: pointer to the VF info
2011 * @opcode: operation code
2012 * @retval: return value
2014 * send resp msg to VF
2016 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
2017 enum virtchnl_ops opcode,
2020 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
2024 * i40e_sync_vf_state
2025 * @vf: pointer to the VF info
2028 * Called from a VF message to synchronize the service with a potential
2031 static bool i40e_sync_vf_state(struct i40e_vf *vf, enum i40e_vf_states state)
2035 /* When handling some messages, it needs VF state to be set.
2036 * It is possible that this flag is cleared during VF reset,
2037 * so there is a need to wait until the end of the reset to
2038 * handle the request message correctly.
2040 for (i = 0; i < I40E_VF_STATE_WAIT_COUNT; i++) {
2041 if (test_bit(state, &vf->vf_states))
2043 usleep_range(10000, 20000);
2046 return test_bit(state, &vf->vf_states);
2050 * i40e_vc_get_version_msg
2051 * @vf: pointer to the VF info
2052 * @msg: pointer to the msg buffer
2054 * called from the VF to request the API version used by the PF
2056 static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
2058 struct virtchnl_version_info info = {
2059 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
2062 vf->vf_ver = *(struct virtchnl_version_info *)msg;
2063 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
2064 if (VF_IS_V10(&vf->vf_ver))
2065 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
2066 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
2068 sizeof(struct virtchnl_version_info));
2072 * i40e_del_qch - delete all the additional VSIs created as a part of ADq
2073 * @vf: pointer to VF structure
2075 static void i40e_del_qch(struct i40e_vf *vf)
2077 struct i40e_pf *pf = vf->pf;
2080 /* first element in the array belongs to primary VF VSI and we shouldn't
2081 * delete it. We should however delete the rest of the VSIs created
2083 for (i = 1; i < vf->num_tc; i++) {
2084 if (vf->ch[i].vsi_idx) {
2085 i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]);
2086 vf->ch[i].vsi_idx = 0;
2087 vf->ch[i].vsi_id = 0;
2093 * i40e_vc_get_max_frame_size
2094 * @vf: pointer to the VF
2096 * Max frame size is determined based on the current port's max frame size and
2097 * whether a port VLAN is configured on this VF. The VF is not aware whether
2098 * it's in a port VLAN so the PF needs to account for this in max frame size
2099 * checks and sending the max frame size to the VF.
2101 static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf)
2103 u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size;
2105 if (vf->port_vlan_id)
2106 max_frame_size -= VLAN_HLEN;
2108 return max_frame_size;
2112 * i40e_vc_get_vf_resources_msg
2113 * @vf: pointer to the VF info
2114 * @msg: pointer to the msg buffer
2116 * called from the VF to request its resources
2118 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
2120 struct virtchnl_vf_resource *vfres = NULL;
2121 struct i40e_pf *pf = vf->pf;
2122 struct i40e_vsi *vsi;
2128 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) {
2133 len = virtchnl_struct_size(vfres, vsi_res, num_vsis);
2134 vfres = kzalloc(len, GFP_KERNEL);
2140 if (VF_IS_V11(&vf->vf_ver))
2141 vf->driver_caps = *(u32 *)msg;
2143 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
2144 VIRTCHNL_VF_OFFLOAD_RSS_REG |
2145 VIRTCHNL_VF_OFFLOAD_VLAN;
2147 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
2148 vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
2149 vsi = pf->vsi[vf->lan_vsi_idx];
2150 if (!vsi->info.pvid)
2151 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
2153 if (i40e_vf_client_capable(pf, vf->vf_id) &&
2154 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RDMA)) {
2155 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RDMA;
2156 set_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states);
2158 clear_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states);
2161 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2162 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
2164 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
2165 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
2166 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
2168 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
2171 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
2172 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
2173 vfres->vf_cap_flags |=
2174 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
2177 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
2178 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
2180 if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) &&
2181 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
2182 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
2184 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
2185 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
2186 dev_err(&pf->pdev->dev,
2187 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
2192 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
2195 if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) {
2196 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2197 vfres->vf_cap_flags |=
2198 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
2201 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
2202 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
2204 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)
2205 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ;
2207 vfres->num_vsis = num_vsis;
2208 vfres->num_queue_pairs = vf->num_queue_pairs;
2209 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
2210 vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
2211 vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
2212 vfres->max_mtu = i40e_vc_get_max_frame_size(vf);
2214 if (vf->lan_vsi_idx) {
2215 vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
2216 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
2217 vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
2218 /* VFs only use TC 0 */
2219 vfres->vsi_res[0].qset_handle
2220 = le16_to_cpu(vsi->info.qs_handle[0]);
2221 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) && !vf->pf_set_mac) {
2222 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
2223 eth_zero_addr(vf->default_lan_addr.addr);
2225 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
2226 vf->default_lan_addr.addr);
2228 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
2231 /* send the response back to the VF */
2232 ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
2233 aq_ret, (u8 *)vfres, len);
2240 * i40e_vc_config_promiscuous_mode_msg
2241 * @vf: pointer to the VF info
2242 * @msg: pointer to the msg buffer
2244 * called from the VF to configure the promiscuous mode of
2247 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
2249 struct virtchnl_promisc_info *info =
2250 (struct virtchnl_promisc_info *)msg;
2251 struct i40e_pf *pf = vf->pf;
2252 bool allmulti = false;
2253 bool alluni = false;
2256 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2260 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2261 dev_err(&pf->pdev->dev,
2262 "Unprivileged VF %d is attempting to configure promiscuous mode\n",
2265 /* Lie to the VF on purpose, because this is an error we can
2266 * ignore. Unprivileged VF is not a virtual channel error.
2272 if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) {
2277 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2282 /* Multicast promiscuous handling*/
2283 if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2286 if (info->flags & FLAG_VF_UNICAST_PROMISC)
2288 aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti,
2294 if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC,
2296 dev_info(&pf->pdev->dev,
2297 "VF %d successfully set multicast promiscuous mode\n",
2299 } else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC,
2301 dev_info(&pf->pdev->dev,
2302 "VF %d successfully unset multicast promiscuous mode\n",
2306 if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC,
2308 dev_info(&pf->pdev->dev,
2309 "VF %d successfully set unicast promiscuous mode\n",
2311 } else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC,
2313 dev_info(&pf->pdev->dev,
2314 "VF %d successfully unset unicast promiscuous mode\n",
2318 /* send the response to the VF */
2319 return i40e_vc_send_resp_to_vf(vf,
2320 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2325 * i40e_vc_config_queues_msg
2326 * @vf: pointer to the VF info
2327 * @msg: pointer to the msg buffer
2329 * called from the VF to configure the rx/tx
2332 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
2334 struct virtchnl_vsi_queue_config_info *qci =
2335 (struct virtchnl_vsi_queue_config_info *)msg;
2336 struct virtchnl_queue_pair_info *qpi;
2337 u16 vsi_id, vsi_queue_id = 0;
2338 struct i40e_pf *pf = vf->pf;
2339 int i, j = 0, idx = 0;
2340 struct i40e_vsi *vsi;
2341 u16 num_qps_all = 0;
2344 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2349 if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
2354 if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) {
2359 if (vf->adq_enabled) {
2360 for (i = 0; i < vf->num_tc; i++)
2361 num_qps_all += vf->ch[i].num_qps;
2362 if (num_qps_all != qci->num_queue_pairs) {
2368 vsi_id = qci->vsi_id;
2370 for (i = 0; i < qci->num_queue_pairs; i++) {
2371 qpi = &qci->qpair[i];
2373 if (!vf->adq_enabled) {
2374 if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
2375 qpi->txq.queue_id)) {
2380 vsi_queue_id = qpi->txq.queue_id;
2382 if (qpi->txq.vsi_id != qci->vsi_id ||
2383 qpi->rxq.vsi_id != qci->vsi_id ||
2384 qpi->rxq.queue_id != vsi_queue_id) {
2390 if (vf->adq_enabled) {
2391 if (idx >= ARRAY_SIZE(vf->ch)) {
2395 vsi_id = vf->ch[idx].vsi_id;
2398 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
2400 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
2406 /* For ADq there can be up to 4 VSIs with max 4 queues each.
2407 * VF does not know about these additional VSIs and all
2408 * it cares is about its own queues. PF configures these queues
2409 * to its appropriate VSIs based on TC mapping
2411 if (vf->adq_enabled) {
2412 if (idx >= ARRAY_SIZE(vf->ch)) {
2416 if (j == (vf->ch[idx].num_qps - 1)) {
2418 j = 0; /* resetting the queue count */
2426 /* set vsi num_queue_pairs in use to num configured by VF */
2427 if (!vf->adq_enabled) {
2428 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
2429 qci->num_queue_pairs;
2431 for (i = 0; i < vf->num_tc; i++) {
2432 vsi = pf->vsi[vf->ch[i].vsi_idx];
2433 vsi->num_queue_pairs = vf->ch[i].num_qps;
2435 if (i40e_update_adq_vsi_queues(vsi, i)) {
2443 /* send the response to the VF */
2444 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
2449 * i40e_validate_queue_map - check queue map is valid
2450 * @vf: the VF structure pointer
2452 * @queuemap: Tx or Rx queue map
2454 * check if Tx or Rx queue map is valid
2456 static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
2457 unsigned long queuemap)
2459 u16 vsi_queue_id, queue_id;
2461 for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
2462 if (vf->adq_enabled) {
2463 vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
2464 queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
2466 queue_id = vsi_queue_id;
2469 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id))
2477 * i40e_vc_config_irq_map_msg
2478 * @vf: pointer to the VF info
2479 * @msg: pointer to the msg buffer
2481 * called from the VF to configure the irq to
2484 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
2486 struct virtchnl_irq_map_info *irqmap_info =
2487 (struct virtchnl_irq_map_info *)msg;
2488 struct virtchnl_vector_map *map;
2493 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2498 if (irqmap_info->num_vectors >
2499 vf->pf->hw.func_caps.num_msix_vectors_vf) {
2504 for (i = 0; i < irqmap_info->num_vectors; i++) {
2505 map = &irqmap_info->vecmap[i];
2506 /* validate msg params */
2507 if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) ||
2508 !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) {
2512 vsi_id = map->vsi_id;
2514 if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
2519 if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
2524 i40e_config_irq_link_list(vf, vsi_id, map);
2527 /* send the response to the VF */
2528 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
2533 * i40e_ctrl_vf_tx_rings
2534 * @vsi: the SRIOV VSI being configured
2535 * @q_map: bit map of the queues to be enabled
2536 * @enable: start or stop the queue
2538 static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2541 struct i40e_pf *pf = vsi->back;
2545 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2546 ret = i40e_control_wait_tx_q(vsi->seid, pf,
2547 vsi->base_queue + q_id,
2548 false /*is xdp*/, enable);
2556 * i40e_ctrl_vf_rx_rings
2557 * @vsi: the SRIOV VSI being configured
2558 * @q_map: bit map of the queues to be enabled
2559 * @enable: start or stop the queue
2561 static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2564 struct i40e_pf *pf = vsi->back;
2568 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2569 ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id,
2578 * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL
2579 * @vqs: virtchnl_queue_select structure containing bitmaps to validate
2581 * Returns true if validation was successful, else false.
2583 static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
2585 if ((!vqs->rx_queues && !vqs->tx_queues) ||
2586 vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) ||
2587 vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES))
2594 * i40e_vc_enable_queues_msg
2595 * @vf: pointer to the VF info
2596 * @msg: pointer to the msg buffer
2598 * called from the VF to enable all or specific queue(s)
2600 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
2602 struct virtchnl_queue_select *vqs =
2603 (struct virtchnl_queue_select *)msg;
2604 struct i40e_pf *pf = vf->pf;
2608 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2613 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2618 if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2623 /* Use the queue bit map sent by the VF */
2624 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2629 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2635 /* need to start the rings for additional ADq VSI's as well */
2636 if (vf->adq_enabled) {
2637 /* zero belongs to LAN VSI */
2638 for (i = 1; i < vf->num_tc; i++) {
2639 if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx]))
2645 /* send the response to the VF */
2646 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
2651 * i40e_vc_disable_queues_msg
2652 * @vf: pointer to the VF info
2653 * @msg: pointer to the msg buffer
2655 * called from the VF to disable all or specific
2658 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
2660 struct virtchnl_queue_select *vqs =
2661 (struct virtchnl_queue_select *)msg;
2662 struct i40e_pf *pf = vf->pf;
2665 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2670 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2675 if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2680 /* Use the queue bit map sent by the VF */
2681 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2686 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2692 /* send the response to the VF */
2693 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
2698 * i40e_check_enough_queue - find big enough queue number
2699 * @vf: pointer to the VF info
2700 * @needed: the number of items needed
2702 * Returns the base item index of the queue, or negative for error
2704 static int i40e_check_enough_queue(struct i40e_vf *vf, u16 needed)
2706 unsigned int i, cur_queues, more, pool_size;
2707 struct i40e_lump_tracking *pile;
2708 struct i40e_pf *pf = vf->pf;
2709 struct i40e_vsi *vsi;
2711 vsi = pf->vsi[vf->lan_vsi_idx];
2712 cur_queues = vsi->alloc_queue_pairs;
2714 /* if current allocated queues are enough for need */
2715 if (cur_queues >= needed)
2716 return vsi->base_queue;
2719 if (cur_queues > 0) {
2720 /* if the allocated queues are not zero
2721 * just check if there are enough queues for more
2722 * behind the allocated queues.
2724 more = needed - cur_queues;
2725 for (i = vsi->base_queue + cur_queues;
2726 i < pile->num_entries; i++) {
2727 if (pile->list[i] & I40E_PILE_VALID_BIT)
2731 /* there is enough */
2732 return vsi->base_queue;
2737 for (i = 0; i < pile->num_entries; i++) {
2738 if (pile->list[i] & I40E_PILE_VALID_BIT) {
2742 if (needed <= ++pool_size)
2743 /* there is enough */
2751 * i40e_vc_request_queues_msg
2752 * @vf: pointer to the VF info
2753 * @msg: pointer to the msg buffer
2755 * VFs get a default number of queues but can use this message to request a
2756 * different number. If the request is successful, PF will reset the VF and
2757 * return 0. If unsuccessful, PF will send message informing VF of number of
2758 * available queues and return result of sending VF a message.
2760 static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
2762 struct virtchnl_vf_res_request *vfres =
2763 (struct virtchnl_vf_res_request *)msg;
2764 u16 req_pairs = vfres->num_queue_pairs;
2765 u8 cur_pairs = vf->num_queue_pairs;
2766 struct i40e_pf *pf = vf->pf;
2768 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE))
2771 if (req_pairs > I40E_MAX_VF_QUEUES) {
2772 dev_err(&pf->pdev->dev,
2773 "VF %d tried to request more than %d queues.\n",
2775 I40E_MAX_VF_QUEUES);
2776 vfres->num_queue_pairs = I40E_MAX_VF_QUEUES;
2777 } else if (req_pairs - cur_pairs > pf->queues_left) {
2778 dev_warn(&pf->pdev->dev,
2779 "VF %d requested %d more queues, but only %d left.\n",
2781 req_pairs - cur_pairs,
2783 vfres->num_queue_pairs = pf->queues_left + cur_pairs;
2784 } else if (i40e_check_enough_queue(vf, req_pairs) < 0) {
2785 dev_warn(&pf->pdev->dev,
2786 "VF %d requested %d more queues, but there is not enough for it.\n",
2788 req_pairs - cur_pairs);
2789 vfres->num_queue_pairs = cur_pairs;
2791 /* successful request */
2792 vf->num_req_queues = req_pairs;
2793 i40e_vc_reset_vf(vf, true);
2797 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
2798 (u8 *)vfres, sizeof(*vfres));
2802 * i40e_vc_get_stats_msg
2803 * @vf: pointer to the VF info
2804 * @msg: pointer to the msg buffer
2806 * called from the VF to get vsi stats
2808 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
2810 struct virtchnl_queue_select *vqs =
2811 (struct virtchnl_queue_select *)msg;
2812 struct i40e_pf *pf = vf->pf;
2813 struct i40e_eth_stats stats;
2815 struct i40e_vsi *vsi;
2817 memset(&stats, 0, sizeof(struct i40e_eth_stats));
2819 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2824 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2829 vsi = pf->vsi[vf->lan_vsi_idx];
2834 i40e_update_eth_stats(vsi);
2835 stats = vsi->eth_stats;
2838 /* send the response back to the VF */
2839 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
2840 (u8 *)&stats, sizeof(stats));
2843 #define I40E_MAX_MACVLAN_PER_HW 3072
2844 #define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \
2846 /* If the VF is not trusted restrict the number of MAC/VLAN it can program
2847 * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
2849 #define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
2850 #define I40E_VC_MAX_VLAN_PER_VF 16
2852 #define I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(vf_num, num_ports) \
2853 ({ typeof(vf_num) vf_num_ = (vf_num); \
2854 typeof(num_ports) num_ports_ = (num_ports); \
2855 ((I40E_MAX_MACVLAN_PER_PF(num_ports_) - vf_num_ * \
2856 I40E_VC_MAX_MAC_ADDR_PER_VF) / vf_num_) + \
2857 I40E_VC_MAX_MAC_ADDR_PER_VF; })
2859 * i40e_check_vf_permission
2860 * @vf: pointer to the VF info
2861 * @al: MAC address list from virtchnl
2863 * Check that the given list of MAC addresses is allowed. Will return -EPERM
2864 * if any address in the list is not valid. Checks the following conditions:
2866 * 1) broadcast and zero addresses are never valid
2867 * 2) unicast addresses are not allowed if the VMM has administratively set
2868 * the VF MAC address, unless the VF is marked as privileged.
2869 * 3) There is enough space to add all the addresses.
2871 * Note that to guarantee consistency, it is expected this function be called
2872 * while holding the mac_filter_hash_lock, as otherwise the current number of
2873 * addresses might not be accurate.
2875 static inline int i40e_check_vf_permission(struct i40e_vf *vf,
2876 struct virtchnl_ether_addr_list *al)
2878 struct i40e_pf *pf = vf->pf;
2879 struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
2880 struct i40e_hw *hw = &pf->hw;
2881 int mac2add_cnt = 0;
2884 for (i = 0; i < al->num_elements; i++) {
2885 struct i40e_mac_filter *f;
2886 u8 *addr = al->list[i].addr;
2888 if (is_broadcast_ether_addr(addr) ||
2889 is_zero_ether_addr(addr)) {
2890 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
2895 /* If the host VMM administrator has set the VF MAC address
2896 * administratively via the ndo_set_vf_mac command then deny
2897 * permission to the VF to add or delete unicast MAC addresses.
2898 * Unless the VF is privileged and then it can do whatever.
2899 * The VF may request to set the MAC address filter already
2900 * assigned to it so do not return an error in that case.
2902 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2903 !is_multicast_ether_addr(addr) && vf->pf_set_mac &&
2904 !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
2905 dev_err(&pf->pdev->dev,
2906 "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
2910 /*count filters that really will be added*/
2911 f = i40e_find_mac(vsi, addr);
2916 /* If this VF is not privileged, then we can't add more than a limited
2917 * number of addresses. Check to make sure that the additions do not
2918 * push us over the limit.
2920 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2921 if ((i40e_count_filters(vsi) + mac2add_cnt) >
2922 I40E_VC_MAX_MAC_ADDR_PER_VF) {
2923 dev_err(&pf->pdev->dev,
2924 "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
2927 /* If this VF is trusted, it can use more resources than untrusted.
2928 * However to ensure that every trusted VF has appropriate number of
2929 * resources, divide whole pool of resources per port and then across
2933 if ((i40e_count_filters(vsi) + mac2add_cnt) >
2934 I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs,
2936 dev_err(&pf->pdev->dev,
2937 "Cannot add more MAC addresses, trusted VF exhausted it's resources\n");
2945 * i40e_vc_ether_addr_type - get type of virtchnl_ether_addr
2946 * @vc_ether_addr: used to extract the type
2949 i40e_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr)
2951 return vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK;
2955 * i40e_is_vc_addr_legacy
2956 * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
2958 * check if the MAC address is from an older VF
2961 i40e_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr)
2963 return i40e_vc_ether_addr_type(vc_ether_addr) ==
2964 VIRTCHNL_ETHER_ADDR_LEGACY;
2968 * i40e_is_vc_addr_primary
2969 * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
2971 * check if the MAC address is the VF's primary MAC
2972 * This function should only be called when the MAC address in
2973 * virtchnl_ether_addr is a valid unicast MAC
2976 i40e_is_vc_addr_primary(struct virtchnl_ether_addr *vc_ether_addr)
2978 return i40e_vc_ether_addr_type(vc_ether_addr) ==
2979 VIRTCHNL_ETHER_ADDR_PRIMARY;
2983 * i40e_update_vf_mac_addr
2985 * @vc_ether_addr: structure from VIRTCHNL with MAC to add
2987 * update the VF's cached hardware MAC if allowed
2990 i40e_update_vf_mac_addr(struct i40e_vf *vf,
2991 struct virtchnl_ether_addr *vc_ether_addr)
2993 u8 *mac_addr = vc_ether_addr->addr;
2995 if (!is_valid_ether_addr(mac_addr))
2998 /* If request to add MAC filter is a primary request update its default
2999 * MAC address with the requested one. If it is a legacy request then
3000 * check if current default is empty if so update the default MAC
3002 if (i40e_is_vc_addr_primary(vc_ether_addr)) {
3003 ether_addr_copy(vf->default_lan_addr.addr, mac_addr);
3004 } else if (i40e_is_vc_addr_legacy(vc_ether_addr)) {
3005 if (is_zero_ether_addr(vf->default_lan_addr.addr))
3006 ether_addr_copy(vf->default_lan_addr.addr, mac_addr);
3011 * i40e_vc_add_mac_addr_msg
3012 * @vf: pointer to the VF info
3013 * @msg: pointer to the msg buffer
3015 * add guest mac address filter
3017 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
3019 struct virtchnl_ether_addr_list *al =
3020 (struct virtchnl_ether_addr_list *)msg;
3021 struct i40e_pf *pf = vf->pf;
3022 struct i40e_vsi *vsi = NULL;
3026 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3027 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3032 vsi = pf->vsi[vf->lan_vsi_idx];
3034 /* Lock once, because all function inside for loop accesses VSI's
3035 * MAC filter list which needs to be protected using same lock.
3037 spin_lock_bh(&vsi->mac_filter_hash_lock);
3039 ret = i40e_check_vf_permission(vf, al);
3041 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3045 /* add new addresses to the list */
3046 for (i = 0; i < al->num_elements; i++) {
3047 struct i40e_mac_filter *f;
3049 f = i40e_find_mac(vsi, al->list[i].addr);
3051 f = i40e_add_mac_filter(vsi, al->list[i].addr);
3054 dev_err(&pf->pdev->dev,
3055 "Unable to add MAC filter %pM for VF %d\n",
3056 al->list[i].addr, vf->vf_id);
3058 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3062 i40e_update_vf_mac_addr(vf, &al->list[i]);
3064 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3066 /* program the updated filter list */
3067 ret = i40e_sync_vsi_filters(vsi);
3069 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
3073 /* send the response to the VF */
3074 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
3079 * i40e_vc_del_mac_addr_msg
3080 * @vf: pointer to the VF info
3081 * @msg: pointer to the msg buffer
3083 * remove guest mac address filter
3085 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
3087 struct virtchnl_ether_addr_list *al =
3088 (struct virtchnl_ether_addr_list *)msg;
3089 bool was_unimac_deleted = false;
3090 struct i40e_pf *pf = vf->pf;
3091 struct i40e_vsi *vsi = NULL;
3095 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3096 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3101 for (i = 0; i < al->num_elements; i++) {
3102 if (is_broadcast_ether_addr(al->list[i].addr) ||
3103 is_zero_ether_addr(al->list[i].addr)) {
3104 dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
3105 al->list[i].addr, vf->vf_id);
3109 if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr))
3110 was_unimac_deleted = true;
3112 vsi = pf->vsi[vf->lan_vsi_idx];
3114 spin_lock_bh(&vsi->mac_filter_hash_lock);
3115 /* delete addresses from the list */
3116 for (i = 0; i < al->num_elements; i++)
3117 if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
3119 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3123 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3125 if (was_unimac_deleted)
3126 eth_zero_addr(vf->default_lan_addr.addr);
3128 /* program the updated filter list */
3129 ret = i40e_sync_vsi_filters(vsi);
3131 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
3134 if (vf->trusted && was_unimac_deleted) {
3135 struct i40e_mac_filter *f;
3136 struct hlist_node *h;
3140 /* set last unicast mac address as default */
3141 spin_lock_bh(&vsi->mac_filter_hash_lock);
3142 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
3143 if (is_valid_ether_addr(f->macaddr))
3144 macaddr = f->macaddr;
3147 ether_addr_copy(vf->default_lan_addr.addr, macaddr);
3148 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3151 /* send the response to the VF */
3152 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, ret);
3156 * i40e_vc_add_vlan_msg
3157 * @vf: pointer to the VF info
3158 * @msg: pointer to the msg buffer
3160 * program guest vlan id
3162 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
3164 struct virtchnl_vlan_filter_list *vfl =
3165 (struct virtchnl_vlan_filter_list *)msg;
3166 struct i40e_pf *pf = vf->pf;
3167 struct i40e_vsi *vsi = NULL;
3171 if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
3172 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3173 dev_err(&pf->pdev->dev,
3174 "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
3177 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3178 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
3183 for (i = 0; i < vfl->num_elements; i++) {
3184 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
3186 dev_err(&pf->pdev->dev,
3187 "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
3191 vsi = pf->vsi[vf->lan_vsi_idx];
3192 if (vsi->info.pvid) {
3197 i40e_vlan_stripping_enable(vsi);
3198 for (i = 0; i < vfl->num_elements; i++) {
3199 /* add new VLAN filter */
3200 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
3204 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
3205 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
3209 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
3210 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
3216 dev_err(&pf->pdev->dev,
3217 "Unable to add VLAN filter %d for VF %d, error %d\n",
3218 vfl->vlan_id[i], vf->vf_id, ret);
3222 /* send the response to the VF */
3223 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
3227 * i40e_vc_remove_vlan_msg
3228 * @vf: pointer to the VF info
3229 * @msg: pointer to the msg buffer
3231 * remove programmed guest vlan id
3233 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
3235 struct virtchnl_vlan_filter_list *vfl =
3236 (struct virtchnl_vlan_filter_list *)msg;
3237 struct i40e_pf *pf = vf->pf;
3238 struct i40e_vsi *vsi = NULL;
3242 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3243 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
3248 for (i = 0; i < vfl->num_elements; i++) {
3249 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
3255 vsi = pf->vsi[vf->lan_vsi_idx];
3256 if (vsi->info.pvid) {
3257 if (vfl->num_elements > 1 || vfl->vlan_id[0])
3262 for (i = 0; i < vfl->num_elements; i++) {
3263 i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
3266 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
3267 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
3271 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
3272 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
3279 /* send the response to the VF */
3280 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
3285 * @vf: pointer to the VF info
3286 * @msg: pointer to the msg buffer
3287 * @msglen: msg length
3289 * called from the VF for the iwarp msgs
3291 static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
3293 struct i40e_pf *pf = vf->pf;
3294 int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
3297 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3298 !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) {
3303 i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
3307 /* send the response to the VF */
3308 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_RDMA,
3313 * i40e_vc_rdma_qvmap_msg
3314 * @vf: pointer to the VF info
3315 * @msg: pointer to the msg buffer
3316 * @config: config qvmap or release it
3318 * called from the VF for the iwarp msgs
3320 static int i40e_vc_rdma_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
3322 struct virtchnl_rdma_qvlist_info *qvlist_info =
3323 (struct virtchnl_rdma_qvlist_info *)msg;
3326 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3327 !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) {
3333 if (i40e_config_rdma_qvlist(vf, qvlist_info))
3336 i40e_release_rdma_qvlist(vf);
3340 /* send the response to the VF */
3341 return i40e_vc_send_resp_to_vf(vf,
3342 config ? VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP :
3343 VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP,
3348 * i40e_vc_config_rss_key
3349 * @vf: pointer to the VF info
3350 * @msg: pointer to the msg buffer
3352 * Configure the VF's RSS key
3354 static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
3356 struct virtchnl_rss_key *vrk =
3357 (struct virtchnl_rss_key *)msg;
3358 struct i40e_pf *pf = vf->pf;
3359 struct i40e_vsi *vsi = NULL;
3362 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3363 !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
3364 vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
3369 vsi = pf->vsi[vf->lan_vsi_idx];
3370 aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
3372 /* send the response to the VF */
3373 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
3378 * i40e_vc_config_rss_lut
3379 * @vf: pointer to the VF info
3380 * @msg: pointer to the msg buffer
3382 * Configure the VF's RSS LUT
3384 static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
3386 struct virtchnl_rss_lut *vrl =
3387 (struct virtchnl_rss_lut *)msg;
3388 struct i40e_pf *pf = vf->pf;
3389 struct i40e_vsi *vsi = NULL;
3393 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3394 !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
3395 vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
3400 for (i = 0; i < vrl->lut_entries; i++)
3401 if (vrl->lut[i] >= vf->num_queue_pairs) {
3406 vsi = pf->vsi[vf->lan_vsi_idx];
3407 aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
3408 /* send the response to the VF */
3410 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
3415 * i40e_vc_get_rss_hena
3416 * @vf: pointer to the VF info
3417 * @msg: pointer to the msg buffer
3419 * Return the RSS HENA bits allowed by the hardware
3421 static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
3423 struct virtchnl_rss_hena *vrh = NULL;
3424 struct i40e_pf *pf = vf->pf;
3428 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3432 len = sizeof(struct virtchnl_rss_hena);
3434 vrh = kzalloc(len, GFP_KERNEL);
3440 vrh->hena = i40e_pf_get_default_rss_hena(pf);
3442 /* send the response back to the VF */
3443 aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
3444 aq_ret, (u8 *)vrh, len);
3450 * i40e_vc_set_rss_hena
3451 * @vf: pointer to the VF info
3452 * @msg: pointer to the msg buffer
3454 * Set the RSS HENA bits for the VF
3456 static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
3458 struct virtchnl_rss_hena *vrh =
3459 (struct virtchnl_rss_hena *)msg;
3460 struct i40e_pf *pf = vf->pf;
3461 struct i40e_hw *hw = &pf->hw;
3464 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3468 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
3469 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
3470 (u32)(vrh->hena >> 32));
3472 /* send the response to the VF */
3474 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
3478 * i40e_vc_enable_vlan_stripping
3479 * @vf: pointer to the VF info
3480 * @msg: pointer to the msg buffer
3482 * Enable vlan header stripping for the VF
3484 static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3486 struct i40e_vsi *vsi;
3489 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3494 vsi = vf->pf->vsi[vf->lan_vsi_idx];
3495 i40e_vlan_stripping_enable(vsi);
3497 /* send the response to the VF */
3499 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
3504 * i40e_vc_disable_vlan_stripping
3505 * @vf: pointer to the VF info
3506 * @msg: pointer to the msg buffer
3508 * Disable vlan header stripping for the VF
3510 static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3512 struct i40e_vsi *vsi;
3515 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3520 vsi = vf->pf->vsi[vf->lan_vsi_idx];
3521 i40e_vlan_stripping_disable(vsi);
3523 /* send the response to the VF */
3525 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
3530 * i40e_validate_cloud_filter
3531 * @vf: pointer to VF structure
3532 * @tc_filter: pointer to filter requested
3534 * This function validates cloud filter programmed as TC filter for ADq
3536 static int i40e_validate_cloud_filter(struct i40e_vf *vf,
3537 struct virtchnl_filter *tc_filter)
3539 struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec;
3540 struct virtchnl_l4_spec data = tc_filter->data.tcp_spec;
3541 struct i40e_pf *pf = vf->pf;
3542 struct i40e_vsi *vsi = NULL;
3543 struct i40e_mac_filter *f;
3544 struct hlist_node *h;
3548 if (tc_filter->action != VIRTCHNL_ACTION_TC_REDIRECT) {
3549 dev_info(&pf->pdev->dev,
3550 "VF %d: ADQ doesn't support this action (%d)\n",
3551 vf->vf_id, tc_filter->action);
3555 /* action_meta is TC number here to which the filter is applied */
3556 if (!tc_filter->action_meta ||
3557 tc_filter->action_meta > vf->num_tc) {
3558 dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
3559 vf->vf_id, tc_filter->action_meta);
3563 /* Check filter if it's programmed for advanced mode or basic mode.
3564 * There are two ADq modes (for VF only),
3565 * 1. Basic mode: intended to allow as many filter options as possible
3566 * to be added to a VF in Non-trusted mode. Main goal is
3567 * to add filters to its own MAC and VLAN id.
3568 * 2. Advanced mode: is for allowing filters to be applied other than
3569 * its own MAC or VLAN. This mode requires the VF to be
3572 if (mask.dst_mac[0] && !mask.dst_ip[0]) {
3573 vsi = pf->vsi[vf->lan_vsi_idx];
3574 f = i40e_find_mac(vsi, data.dst_mac);
3577 dev_info(&pf->pdev->dev,
3578 "Destination MAC %pM doesn't belong to VF %d\n",
3579 data.dst_mac, vf->vf_id);
3584 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f,
3586 if (f->vlan == ntohs(data.vlan_id)) {
3592 dev_info(&pf->pdev->dev,
3593 "VF %d doesn't have any VLAN id %u\n",
3594 vf->vf_id, ntohs(data.vlan_id));
3599 /* Check if VF is trusted */
3600 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3601 dev_err(&pf->pdev->dev,
3602 "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
3608 if (mask.dst_mac[0] & data.dst_mac[0]) {
3609 if (is_broadcast_ether_addr(data.dst_mac) ||
3610 is_zero_ether_addr(data.dst_mac)) {
3611 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n",
3612 vf->vf_id, data.dst_mac);
3617 if (mask.src_mac[0] & data.src_mac[0]) {
3618 if (is_broadcast_ether_addr(data.src_mac) ||
3619 is_zero_ether_addr(data.src_mac)) {
3620 dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n",
3621 vf->vf_id, data.src_mac);
3626 if (mask.dst_port & data.dst_port) {
3627 if (!data.dst_port) {
3628 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
3634 if (mask.src_port & data.src_port) {
3635 if (!data.src_port) {
3636 dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
3642 if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW &&
3643 tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) {
3644 dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n",
3649 if (mask.vlan_id & data.vlan_id) {
3650 if (ntohs(data.vlan_id) > I40E_MAX_VLANID) {
3651 dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n",
3663 * i40e_find_vsi_from_seid - searches for the vsi with the given seid
3664 * @vf: pointer to the VF info
3665 * @seid: seid of the vsi it is searching for
3667 static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid)
3669 struct i40e_pf *pf = vf->pf;
3670 struct i40e_vsi *vsi = NULL;
3673 for (i = 0; i < vf->num_tc ; i++) {
3674 vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id);
3675 if (vsi && vsi->seid == seid)
3682 * i40e_del_all_cloud_filters
3683 * @vf: pointer to the VF info
3685 * This function deletes all cloud filters
3687 static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
3689 struct i40e_cloud_filter *cfilter = NULL;
3690 struct i40e_pf *pf = vf->pf;
3691 struct i40e_vsi *vsi = NULL;
3692 struct hlist_node *node;
3695 hlist_for_each_entry_safe(cfilter, node,
3696 &vf->cloud_filter_list, cloud_node) {
3697 vsi = i40e_find_vsi_from_seid(vf, cfilter->seid);
3700 dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n",
3701 vf->vf_id, cfilter->seid);
3705 if (cfilter->dst_port)
3706 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
3709 ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
3711 dev_err(&pf->pdev->dev,
3712 "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
3713 vf->vf_id, ERR_PTR(ret),
3714 i40e_aq_str(&pf->hw,
3715 pf->hw.aq.asq_last_status));
3717 hlist_del(&cfilter->cloud_node);
3719 vf->num_cloud_filters--;
3724 * i40e_vc_del_cloud_filter
3725 * @vf: pointer to the VF info
3726 * @msg: pointer to the msg buffer
3728 * This function deletes a cloud filter programmed as TC filter for ADq
3730 static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
3732 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3733 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3734 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3735 struct i40e_cloud_filter cfilter, *cf = NULL;
3736 struct i40e_pf *pf = vf->pf;
3737 struct i40e_vsi *vsi = NULL;
3738 struct hlist_node *node;
3742 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3747 if (!vf->adq_enabled) {
3748 dev_info(&pf->pdev->dev,
3749 "VF %d: ADq not enabled, can't apply cloud filter\n",
3755 if (i40e_validate_cloud_filter(vf, vcf)) {
3756 dev_info(&pf->pdev->dev,
3757 "VF %d: Invalid input, can't apply cloud filter\n",
3763 memset(&cfilter, 0, sizeof(cfilter));
3764 /* parse destination mac address */
3765 for (i = 0; i < ETH_ALEN; i++)
3766 cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3768 /* parse source mac address */
3769 for (i = 0; i < ETH_ALEN; i++)
3770 cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3772 cfilter.vlan_id = mask.vlan_id & tcf.vlan_id;
3773 cfilter.dst_port = mask.dst_port & tcf.dst_port;
3774 cfilter.src_port = mask.src_port & tcf.src_port;
3776 switch (vcf->flow_type) {
3777 case VIRTCHNL_TCP_V4_FLOW:
3778 cfilter.n_proto = ETH_P_IP;
3779 if (mask.dst_ip[0] & tcf.dst_ip[0])
3780 memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip,
3781 ARRAY_SIZE(tcf.dst_ip));
3782 else if (mask.src_ip[0] & tcf.dst_ip[0])
3783 memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip,
3784 ARRAY_SIZE(tcf.dst_ip));
3786 case VIRTCHNL_TCP_V6_FLOW:
3787 cfilter.n_proto = ETH_P_IPV6;
3788 if (mask.dst_ip[3] & tcf.dst_ip[3])
3789 memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip,
3790 sizeof(cfilter.ip.v6.dst_ip6));
3791 if (mask.src_ip[3] & tcf.src_ip[3])
3792 memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip,
3793 sizeof(cfilter.ip.v6.src_ip6));
3796 /* TC filter can be configured based on different combinations
3797 * and in this case IP is not a part of filter config
3799 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3803 /* get the vsi to which the tc belongs to */
3804 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3805 cfilter.seid = vsi->seid;
3806 cfilter.flags = vcf->field_flags;
3808 /* Deleting TC filter */
3810 ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false);
3812 ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
3814 dev_err(&pf->pdev->dev,
3815 "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
3816 vf->vf_id, ERR_PTR(ret),
3817 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3821 hlist_for_each_entry_safe(cf, node,
3822 &vf->cloud_filter_list, cloud_node) {
3823 if (cf->seid != cfilter.seid)
3826 if (cfilter.dst_port != cf->dst_port)
3828 if (mask.dst_mac[0])
3829 if (!ether_addr_equal(cf->src_mac, cfilter.src_mac))
3831 /* for ipv4 data to be valid, only first byte of mask is set */
3832 if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0])
3833 if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip,
3834 ARRAY_SIZE(tcf.dst_ip)))
3836 /* for ipv6, mask is set for all sixteen bytes (4 words) */
3837 if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3])
3838 if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6,
3839 sizeof(cfilter.ip.v6.src_ip6)))
3842 if (cfilter.vlan_id != cf->vlan_id)
3845 hlist_del(&cf->cloud_node);
3847 vf->num_cloud_filters--;
3851 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER,
3856 * i40e_vc_add_cloud_filter
3857 * @vf: pointer to the VF info
3858 * @msg: pointer to the msg buffer
3860 * This function adds a cloud filter programmed as TC filter for ADq
3862 static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
3864 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3865 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3866 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3867 struct i40e_cloud_filter *cfilter = NULL;
3868 struct i40e_pf *pf = vf->pf;
3869 struct i40e_vsi *vsi = NULL;
3873 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3878 if (!vf->adq_enabled) {
3879 dev_info(&pf->pdev->dev,
3880 "VF %d: ADq is not enabled, can't apply cloud filter\n",
3886 if (i40e_validate_cloud_filter(vf, vcf)) {
3887 dev_info(&pf->pdev->dev,
3888 "VF %d: Invalid input/s, can't apply cloud filter\n",
3894 cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
3900 /* parse destination mac address */
3901 for (i = 0; i < ETH_ALEN; i++)
3902 cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3904 /* parse source mac address */
3905 for (i = 0; i < ETH_ALEN; i++)
3906 cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3908 cfilter->vlan_id = mask.vlan_id & tcf.vlan_id;
3909 cfilter->dst_port = mask.dst_port & tcf.dst_port;
3910 cfilter->src_port = mask.src_port & tcf.src_port;
3912 switch (vcf->flow_type) {
3913 case VIRTCHNL_TCP_V4_FLOW:
3914 cfilter->n_proto = ETH_P_IP;
3915 if (mask.dst_ip[0] & tcf.dst_ip[0])
3916 memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip,
3917 ARRAY_SIZE(tcf.dst_ip));
3918 else if (mask.src_ip[0] & tcf.dst_ip[0])
3919 memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip,
3920 ARRAY_SIZE(tcf.dst_ip));
3922 case VIRTCHNL_TCP_V6_FLOW:
3923 cfilter->n_proto = ETH_P_IPV6;
3924 if (mask.dst_ip[3] & tcf.dst_ip[3])
3925 memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip,
3926 sizeof(cfilter->ip.v6.dst_ip6));
3927 if (mask.src_ip[3] & tcf.src_ip[3])
3928 memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip,
3929 sizeof(cfilter->ip.v6.src_ip6));
3932 /* TC filter can be configured based on different combinations
3933 * and in this case IP is not a part of filter config
3935 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3939 /* get the VSI to which the TC belongs to */
3940 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3941 cfilter->seid = vsi->seid;
3942 cfilter->flags = vcf->field_flags;
3944 /* Adding cloud filter programmed as TC filter */
3946 aq_ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
3948 aq_ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
3950 dev_err(&pf->pdev->dev,
3951 "VF %d: Failed to add cloud filter, err %pe aq_err %s\n",
3952 vf->vf_id, ERR_PTR(aq_ret),
3953 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3957 INIT_HLIST_NODE(&cfilter->cloud_node);
3958 hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
3959 /* release the pointer passing it to the collection */
3961 vf->num_cloud_filters++;
3965 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
3970 * i40e_vc_add_qch_msg: Add queue channel and enable ADq
3971 * @vf: pointer to the VF info
3972 * @msg: pointer to the msg buffer
3974 static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
3976 struct virtchnl_tc_info *tci =
3977 (struct virtchnl_tc_info *)msg;
3978 struct i40e_pf *pf = vf->pf;
3979 struct i40e_link_status *ls = &pf->hw.phy.link_info;
3980 int i, adq_request_qps = 0;
3984 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3989 /* ADq cannot be applied if spoof check is ON */
3991 dev_err(&pf->pdev->dev,
3992 "Spoof check is ON, turn it OFF to enable ADq\n");
3997 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) {
3998 dev_err(&pf->pdev->dev,
3999 "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
4005 /* max number of traffic classes for VF currently capped at 4 */
4006 if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) {
4007 dev_err(&pf->pdev->dev,
4008 "VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n",
4009 vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI);
4014 /* validate queues for each TC */
4015 for (i = 0; i < tci->num_tc; i++)
4016 if (!tci->list[i].count ||
4017 tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) {
4018 dev_err(&pf->pdev->dev,
4019 "VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n",
4020 vf->vf_id, i, tci->list[i].count,
4021 I40E_DEFAULT_QUEUES_PER_VF);
4026 /* need Max VF queues but already have default number of queues */
4027 adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF;
4029 if (pf->queues_left < adq_request_qps) {
4030 dev_err(&pf->pdev->dev,
4031 "No queues left to allocate to VF %d\n",
4036 /* we need to allocate max VF queues to enable ADq so as to
4037 * make sure ADq enabled VF always gets back queues when it
4038 * goes through a reset.
4040 vf->num_queue_pairs = I40E_MAX_VF_QUEUES;
4043 /* get link speed in MB to validate rate limit */
4044 speed = i40e_vc_link_speed2mbps(ls->link_speed);
4045 if (speed == SPEED_UNKNOWN) {
4046 dev_err(&pf->pdev->dev,
4047 "Cannot detect link speed\n");
4052 /* parse data from the queue channel info */
4053 vf->num_tc = tci->num_tc;
4054 for (i = 0; i < vf->num_tc; i++) {
4055 if (tci->list[i].max_tx_rate) {
4056 if (tci->list[i].max_tx_rate > speed) {
4057 dev_err(&pf->pdev->dev,
4058 "Invalid max tx rate %llu specified for VF %d.",
4059 tci->list[i].max_tx_rate,
4064 vf->ch[i].max_tx_rate =
4065 tci->list[i].max_tx_rate;
4068 vf->ch[i].num_qps = tci->list[i].count;
4071 /* set this flag only after making sure all inputs are sane */
4072 vf->adq_enabled = true;
4074 /* reset the VF in order to allocate resources */
4075 i40e_vc_reset_vf(vf, true);
4079 /* send the response to the VF */
4081 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS,
4086 * i40e_vc_del_qch_msg
4087 * @vf: pointer to the VF info
4088 * @msg: pointer to the msg buffer
4090 static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
4092 struct i40e_pf *pf = vf->pf;
4095 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
4100 if (vf->adq_enabled) {
4101 i40e_del_all_cloud_filters(vf);
4103 vf->adq_enabled = false;
4105 dev_info(&pf->pdev->dev,
4106 "Deleting Queue Channels and cloud filters for ADq on VF %d\n",
4109 dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n",
4114 /* reset the VF in order to allocate resources */
4115 i40e_vc_reset_vf(vf, true);
4120 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS,
4125 * i40e_vc_process_vf_msg
4126 * @pf: pointer to the PF structure
4127 * @vf_id: source VF id
4128 * @v_opcode: operation code
4129 * @v_retval: unused return value code
4130 * @msg: pointer to the msg buffer
4131 * @msglen: msg length
4133 * called from the common aeq/arq handler to
4134 * process request from VF
4136 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
4137 u32 __always_unused v_retval, u8 *msg, u16 msglen)
4139 struct i40e_hw *hw = &pf->hw;
4140 int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
4144 pf->vf_aq_requests++;
4145 if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs)
4147 vf = &(pf->vf[local_vf_id]);
4149 /* Check if VF is disabled. */
4150 if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
4153 /* perform basic checks on the msg */
4154 ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
4157 i40e_vc_send_resp_to_vf(vf, v_opcode, -EINVAL);
4158 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
4159 local_vf_id, v_opcode, msglen);
4164 case VIRTCHNL_OP_VERSION:
4165 ret = i40e_vc_get_version_msg(vf, msg);
4167 case VIRTCHNL_OP_GET_VF_RESOURCES:
4168 ret = i40e_vc_get_vf_resources_msg(vf, msg);
4169 i40e_vc_notify_vf_link_state(vf);
4171 case VIRTCHNL_OP_RESET_VF:
4172 i40e_vc_reset_vf(vf, false);
4175 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
4176 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg);
4178 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
4179 ret = i40e_vc_config_queues_msg(vf, msg);
4181 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
4182 ret = i40e_vc_config_irq_map_msg(vf, msg);
4184 case VIRTCHNL_OP_ENABLE_QUEUES:
4185 ret = i40e_vc_enable_queues_msg(vf, msg);
4186 i40e_vc_notify_vf_link_state(vf);
4188 case VIRTCHNL_OP_DISABLE_QUEUES:
4189 ret = i40e_vc_disable_queues_msg(vf, msg);
4191 case VIRTCHNL_OP_ADD_ETH_ADDR:
4192 ret = i40e_vc_add_mac_addr_msg(vf, msg);
4194 case VIRTCHNL_OP_DEL_ETH_ADDR:
4195 ret = i40e_vc_del_mac_addr_msg(vf, msg);
4197 case VIRTCHNL_OP_ADD_VLAN:
4198 ret = i40e_vc_add_vlan_msg(vf, msg);
4200 case VIRTCHNL_OP_DEL_VLAN:
4201 ret = i40e_vc_remove_vlan_msg(vf, msg);
4203 case VIRTCHNL_OP_GET_STATS:
4204 ret = i40e_vc_get_stats_msg(vf, msg);
4206 case VIRTCHNL_OP_RDMA:
4207 ret = i40e_vc_rdma_msg(vf, msg, msglen);
4209 case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP:
4210 ret = i40e_vc_rdma_qvmap_msg(vf, msg, true);
4212 case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP:
4213 ret = i40e_vc_rdma_qvmap_msg(vf, msg, false);
4215 case VIRTCHNL_OP_CONFIG_RSS_KEY:
4216 ret = i40e_vc_config_rss_key(vf, msg);
4218 case VIRTCHNL_OP_CONFIG_RSS_LUT:
4219 ret = i40e_vc_config_rss_lut(vf, msg);
4221 case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
4222 ret = i40e_vc_get_rss_hena(vf, msg);
4224 case VIRTCHNL_OP_SET_RSS_HENA:
4225 ret = i40e_vc_set_rss_hena(vf, msg);
4227 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
4228 ret = i40e_vc_enable_vlan_stripping(vf, msg);
4230 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
4231 ret = i40e_vc_disable_vlan_stripping(vf, msg);
4233 case VIRTCHNL_OP_REQUEST_QUEUES:
4234 ret = i40e_vc_request_queues_msg(vf, msg);
4236 case VIRTCHNL_OP_ENABLE_CHANNELS:
4237 ret = i40e_vc_add_qch_msg(vf, msg);
4239 case VIRTCHNL_OP_DISABLE_CHANNELS:
4240 ret = i40e_vc_del_qch_msg(vf, msg);
4242 case VIRTCHNL_OP_ADD_CLOUD_FILTER:
4243 ret = i40e_vc_add_cloud_filter(vf, msg);
4245 case VIRTCHNL_OP_DEL_CLOUD_FILTER:
4246 ret = i40e_vc_del_cloud_filter(vf, msg);
4248 case VIRTCHNL_OP_UNKNOWN:
4250 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
4251 v_opcode, local_vf_id);
4252 ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
4261 * i40e_vc_process_vflr_event
4262 * @pf: pointer to the PF structure
4264 * called from the vlfr irq handler to
4265 * free up VF resources and state variables
4267 int i40e_vc_process_vflr_event(struct i40e_pf *pf)
4269 struct i40e_hw *hw = &pf->hw;
4270 u32 reg, reg_idx, bit_idx;
4274 if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
4277 /* Re-enable the VFLR interrupt cause here, before looking for which
4278 * VF got reset. Otherwise, if another VF gets a reset while the
4279 * first one is being processed, that interrupt will be lost, and
4280 * that VF will be stuck in reset forever.
4282 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4283 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
4284 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4287 clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
4288 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
4289 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
4290 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
4291 /* read GLGEN_VFLRSTAT register to find out the flr VFs */
4292 vf = &pf->vf[vf_id];
4293 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
4294 if (reg & BIT(bit_idx))
4295 /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
4296 i40e_reset_vf(vf, true);
4304 * @pf: the physical function
4305 * @vf_id: VF identifier
4307 * Check that the VF is enabled and the VSI exists.
4309 * Returns 0 on success, negative on failure
4311 static int i40e_validate_vf(struct i40e_pf *pf, int vf_id)
4313 struct i40e_vsi *vsi;
4317 if (vf_id >= pf->num_alloc_vfs) {
4318 dev_err(&pf->pdev->dev,
4319 "Invalid VF Identifier %d\n", vf_id);
4323 vf = &pf->vf[vf_id];
4324 vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id);
4332 * i40e_check_vf_init_timeout
4333 * @vf: the virtual function
4335 * Check that the VF's initialization was successfully done and if not
4336 * wait up to 300ms for its finish.
4338 * Returns true when VF is initialized, false on timeout
4340 static bool i40e_check_vf_init_timeout(struct i40e_vf *vf)
4344 /* When the VF is resetting wait until it is done.
4345 * It can take up to 200 milliseconds, but wait for
4346 * up to 300 milliseconds to be safe.
4348 for (i = 0; i < 15; i++) {
4349 if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
4354 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4355 dev_err(&vf->pf->pdev->dev,
4356 "VF %d still in reset. Try again.\n", vf->vf_id);
4364 * i40e_ndo_set_vf_mac
4365 * @netdev: network interface device structure
4366 * @vf_id: VF identifier
4369 * program VF mac address
4371 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
4373 struct i40e_netdev_priv *np = netdev_priv(netdev);
4374 struct i40e_vsi *vsi = np->vsi;
4375 struct i40e_pf *pf = vsi->back;
4376 struct i40e_mac_filter *f;
4379 struct hlist_node *h;
4382 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4383 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4387 /* validate the request */
4388 ret = i40e_validate_vf(pf, vf_id);
4392 vf = &pf->vf[vf_id];
4393 if (!i40e_check_vf_init_timeout(vf)) {
4397 vsi = pf->vsi[vf->lan_vsi_idx];
4399 if (is_multicast_ether_addr(mac)) {
4400 dev_err(&pf->pdev->dev,
4401 "Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
4406 /* Lock once because below invoked function add/del_filter requires
4407 * mac_filter_hash_lock to be held
4409 spin_lock_bh(&vsi->mac_filter_hash_lock);
4411 /* delete the temporary mac address */
4412 if (!is_zero_ether_addr(vf->default_lan_addr.addr))
4413 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
4415 /* Delete all the filters for this VSI - we're going to kill it
4418 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
4419 __i40e_del_filter(vsi, f);
4421 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4423 /* program mac filter */
4424 if (i40e_sync_vsi_filters(vsi)) {
4425 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
4429 ether_addr_copy(vf->default_lan_addr.addr, mac);
4431 if (is_zero_ether_addr(mac)) {
4432 vf->pf_set_mac = false;
4433 dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id);
4435 vf->pf_set_mac = true;
4436 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
4440 /* Force the VF interface down so it has to bring up with new MAC
4443 i40e_vc_reset_vf(vf, true);
4444 dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
4447 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4452 * i40e_ndo_set_vf_port_vlan
4453 * @netdev: network interface device structure
4454 * @vf_id: VF identifier
4455 * @vlan_id: mac address
4456 * @qos: priority setting
4457 * @vlan_proto: vlan protocol
4459 * program VF vlan id and/or qos
4461 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
4462 u16 vlan_id, u8 qos, __be16 vlan_proto)
4464 u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
4465 struct i40e_netdev_priv *np = netdev_priv(netdev);
4466 bool allmulti = false, alluni = false;
4467 struct i40e_pf *pf = np->vsi->back;
4468 struct i40e_vsi *vsi;
4472 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4473 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4477 /* validate the request */
4478 ret = i40e_validate_vf(pf, vf_id);
4482 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
4483 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
4488 if (vlan_proto != htons(ETH_P_8021Q)) {
4489 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
4490 ret = -EPROTONOSUPPORT;
4494 vf = &pf->vf[vf_id];
4495 if (!i40e_check_vf_init_timeout(vf)) {
4499 vsi = pf->vsi[vf->lan_vsi_idx];
4501 if (le16_to_cpu(vsi->info.pvid) == vlanprio)
4502 /* duplicate request, so just return success */
4505 i40e_vlan_stripping_enable(vsi);
4507 /* Locked once because multiple functions below iterate list */
4508 spin_lock_bh(&vsi->mac_filter_hash_lock);
4510 /* Check for condition where there was already a port VLAN ID
4511 * filter set and now it is being deleted by setting it to zero.
4512 * Additionally check for the condition where there was a port
4513 * VLAN but now there is a new and different port VLAN being set.
4514 * Before deleting all the old VLAN filters we must add new ones
4515 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
4516 * MAC addresses deleted.
4518 if ((!(vlan_id || qos) ||
4519 vlanprio != le16_to_cpu(vsi->info.pvid)) &&
4521 ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
4523 dev_info(&vsi->back->pdev->dev,
4524 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4525 vsi->back->hw.aq.asq_last_status);
4526 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4531 if (vsi->info.pvid) {
4532 /* remove all filters on the old VLAN */
4533 i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
4537 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4539 /* disable promisc modes in case they were enabled */
4540 ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id,
4543 dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n");
4548 ret = i40e_vsi_add_pvid(vsi, vlanprio);
4550 i40e_vsi_remove_pvid(vsi);
4551 spin_lock_bh(&vsi->mac_filter_hash_lock);
4554 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
4555 vlan_id, qos, vf_id);
4557 /* add new VLAN filter for each MAC */
4558 ret = i40e_add_vlan_all_mac(vsi, vlan_id);
4560 dev_info(&vsi->back->pdev->dev,
4561 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4562 vsi->back->hw.aq.asq_last_status);
4563 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4567 /* remove the previously added non-VLAN MAC filters */
4568 i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
4571 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4573 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
4576 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
4579 /* Schedule the worker thread to take care of applying changes */
4580 i40e_service_event_schedule(vsi->back);
4583 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
4587 /* The Port VLAN needs to be saved across resets the same as the
4588 * default LAN MAC address.
4590 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
4592 i40e_vc_reset_vf(vf, true);
4593 /* During reset the VF got a new VSI, so refresh a pointer. */
4594 vsi = pf->vsi[vf->lan_vsi_idx];
4596 ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni);
4598 dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");
4605 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4610 * i40e_ndo_set_vf_bw
4611 * @netdev: network interface device structure
4612 * @vf_id: VF identifier
4613 * @min_tx_rate: Minimum Tx rate
4614 * @max_tx_rate: Maximum Tx rate
4616 * configure VF Tx rate
4618 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
4621 struct i40e_netdev_priv *np = netdev_priv(netdev);
4622 struct i40e_pf *pf = np->vsi->back;
4623 struct i40e_vsi *vsi;
4627 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4628 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4632 /* validate the request */
4633 ret = i40e_validate_vf(pf, vf_id);
4638 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
4639 min_tx_rate, vf_id);
4644 vf = &pf->vf[vf_id];
4645 if (!i40e_check_vf_init_timeout(vf)) {
4649 vsi = pf->vsi[vf->lan_vsi_idx];
4651 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
4655 vf->tx_rate = max_tx_rate;
4657 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4662 * i40e_ndo_get_vf_config
4663 * @netdev: network interface device structure
4664 * @vf_id: VF identifier
4665 * @ivi: VF configuration structure
4667 * return VF configuration
4669 int i40e_ndo_get_vf_config(struct net_device *netdev,
4670 int vf_id, struct ifla_vf_info *ivi)
4672 struct i40e_netdev_priv *np = netdev_priv(netdev);
4673 struct i40e_vsi *vsi = np->vsi;
4674 struct i40e_pf *pf = vsi->back;
4678 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4679 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4683 /* validate the request */
4684 ret = i40e_validate_vf(pf, vf_id);
4688 vf = &pf->vf[vf_id];
4689 /* first vsi is always the LAN vsi */
4690 vsi = pf->vsi[vf->lan_vsi_idx];
4698 ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
4700 ivi->max_tx_rate = vf->tx_rate;
4701 ivi->min_tx_rate = 0;
4702 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
4703 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
4704 I40E_VLAN_PRIORITY_SHIFT;
4705 if (vf->link_forced == false)
4706 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
4707 else if (vf->link_up == true)
4708 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
4710 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
4711 ivi->spoofchk = vf->spoofchk;
4712 ivi->trusted = vf->trusted;
4716 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4721 * i40e_ndo_set_vf_link_state
4722 * @netdev: network interface device structure
4723 * @vf_id: VF identifier
4724 * @link: required link state
4726 * Set the link state of a specified VF, regardless of physical link state
4728 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
4730 struct i40e_netdev_priv *np = netdev_priv(netdev);
4731 struct i40e_pf *pf = np->vsi->back;
4732 struct i40e_link_status *ls = &pf->hw.phy.link_info;
4733 struct virtchnl_pf_event pfe;
4734 struct i40e_hw *hw = &pf->hw;
4739 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4740 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4744 /* validate the request */
4745 if (vf_id >= pf->num_alloc_vfs) {
4746 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4751 vf = &pf->vf[vf_id];
4752 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
4754 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
4755 pfe.severity = PF_EVENT_SEVERITY_INFO;
4758 case IFLA_VF_LINK_STATE_AUTO:
4759 vf->link_forced = false;
4760 i40e_set_vf_link_state(vf, &pfe, ls);
4762 case IFLA_VF_LINK_STATE_ENABLE:
4763 vf->link_forced = true;
4765 i40e_set_vf_link_state(vf, &pfe, ls);
4767 case IFLA_VF_LINK_STATE_DISABLE:
4768 vf->link_forced = true;
4769 vf->link_up = false;
4770 i40e_set_vf_link_state(vf, &pfe, ls);
4776 /* Notify the VF of its new link state */
4777 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
4778 0, (u8 *)&pfe, sizeof(pfe), NULL);
4781 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4786 * i40e_ndo_set_vf_spoofchk
4787 * @netdev: network interface device structure
4788 * @vf_id: VF identifier
4789 * @enable: flag to enable or disable feature
4791 * Enable or disable VF spoof checking
4793 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
4795 struct i40e_netdev_priv *np = netdev_priv(netdev);
4796 struct i40e_vsi *vsi = np->vsi;
4797 struct i40e_pf *pf = vsi->back;
4798 struct i40e_vsi_context ctxt;
4799 struct i40e_hw *hw = &pf->hw;
4803 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4804 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4808 /* validate the request */
4809 if (vf_id >= pf->num_alloc_vfs) {
4810 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4815 vf = &(pf->vf[vf_id]);
4816 if (!i40e_check_vf_init_timeout(vf)) {
4821 if (enable == vf->spoofchk)
4824 vf->spoofchk = enable;
4825 memset(&ctxt, 0, sizeof(ctxt));
4826 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
4827 ctxt.pf_num = pf->hw.pf_id;
4828 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
4830 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
4831 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
4832 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4834 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
4839 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4844 * i40e_ndo_set_vf_trust
4845 * @netdev: network interface device structure of the pf
4846 * @vf_id: VF identifier
4847 * @setting: trust setting
4849 * Enable or disable VF trust setting
4851 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
4853 struct i40e_netdev_priv *np = netdev_priv(netdev);
4854 struct i40e_pf *pf = np->vsi->back;
4858 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4859 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4863 /* validate the request */
4864 if (vf_id >= pf->num_alloc_vfs) {
4865 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4870 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4871 dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
4876 vf = &pf->vf[vf_id];
4878 if (setting == vf->trusted)
4881 vf->trusted = setting;
4883 /* request PF to sync mac/vlan filters for the VF */
4884 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
4885 pf->vsi[vf->lan_vsi_idx]->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
4887 i40e_vc_reset_vf(vf, true);
4888 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
4889 vf_id, setting ? "" : "un");
4891 if (vf->adq_enabled) {
4893 dev_info(&pf->pdev->dev,
4894 "VF %u no longer Trusted, deleting all cloud filters\n",
4896 i40e_del_all_cloud_filters(vf);
4901 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4906 * i40e_get_vf_stats - populate some stats for the VF
4907 * @netdev: the netdev of the PF
4908 * @vf_id: the host OS identifier (0-127)
4909 * @vf_stats: pointer to the OS memory to be initialized
4911 int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
4912 struct ifla_vf_stats *vf_stats)
4914 struct i40e_netdev_priv *np = netdev_priv(netdev);
4915 struct i40e_pf *pf = np->vsi->back;
4916 struct i40e_eth_stats *stats;
4917 struct i40e_vsi *vsi;
4920 /* validate the request */
4921 if (i40e_validate_vf(pf, vf_id))
4924 vf = &pf->vf[vf_id];
4925 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4926 dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
4930 vsi = pf->vsi[vf->lan_vsi_idx];
4934 i40e_update_eth_stats(vsi);
4935 stats = &vsi->eth_stats;
4937 memset(vf_stats, 0, sizeof(*vf_stats));
4939 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
4940 stats->rx_multicast;
4941 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
4942 stats->tx_multicast;
4943 vf_stats->rx_bytes = stats->rx_bytes;
4944 vf_stats->tx_bytes = stats->tx_bytes;
4945 vf_stats->broadcast = stats->rx_broadcast;
4946 vf_stats->multicast = stats->rx_multicast;
4947 vf_stats->rx_dropped = stats->rx_discards;
4948 vf_stats->tx_dropped = stats->tx_discards;