1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
4 #include <net/xdp_sock_drv.h>
7 #include "ice_dcb_lib.h"
9 static bool ice_alloc_rx_buf_zc(struct ice_rx_ring *rx_ring)
11 rx_ring->xdp_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->xdp_buf), GFP_KERNEL);
12 return !!rx_ring->xdp_buf;
15 static bool ice_alloc_rx_buf(struct ice_rx_ring *rx_ring)
17 rx_ring->rx_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
18 return !!rx_ring->rx_buf;
22 * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
23 * @qs_cfg: gathered variables needed for PF->VSI queues assignment
25 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
27 static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
29 unsigned int offset, i;
31 mutex_lock(qs_cfg->qs_mutex);
32 offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,
33 0, qs_cfg->q_count, 0);
34 if (offset >= qs_cfg->pf_map_size) {
35 mutex_unlock(qs_cfg->qs_mutex);
39 bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
40 for (i = 0; i < qs_cfg->q_count; i++)
41 qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)(i + offset);
42 mutex_unlock(qs_cfg->qs_mutex);
48 * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI
49 * @qs_cfg: gathered variables needed for pf->vsi queues assignment
51 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
53 static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
55 unsigned int i, index = 0;
57 mutex_lock(qs_cfg->qs_mutex);
58 for (i = 0; i < qs_cfg->q_count; i++) {
59 index = find_next_zero_bit(qs_cfg->pf_map,
60 qs_cfg->pf_map_size, index);
61 if (index >= qs_cfg->pf_map_size)
63 set_bit(index, qs_cfg->pf_map);
64 qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)index;
66 mutex_unlock(qs_cfg->qs_mutex);
70 for (index = 0; index < i; index++) {
71 clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map);
72 qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0;
74 mutex_unlock(qs_cfg->qs_mutex);
80 * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
81 * @pf: the PF being configured
83 * @ena: enable or disable state of the queue
85 * This routine will wait for the given Rx queue of the PF to reach the
86 * enabled or disabled state.
87 * Returns -ETIMEDOUT in case of failing to reach the requested state after
88 * multiple retries; else will return 0 in case of success.
90 static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
94 for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
95 if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) &
96 QRX_CTRL_QENA_STAT_M))
106 * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
107 * @vsi: the VSI being configured
108 * @v_idx: index of the vector in the VSI struct
110 * We allocate one q_vector and set default value for ITR setting associated
111 * with this q_vector. If allocation fails we return -ENOMEM.
113 static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
115 struct ice_pf *pf = vsi->back;
116 struct ice_q_vector *q_vector;
118 /* allocate q_vector */
119 q_vector = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*q_vector),
125 q_vector->v_idx = v_idx;
126 q_vector->tx.itr_setting = ICE_DFLT_TX_ITR;
127 q_vector->rx.itr_setting = ICE_DFLT_RX_ITR;
128 q_vector->tx.itr_mode = ITR_DYNAMIC;
129 q_vector->rx.itr_mode = ITR_DYNAMIC;
130 q_vector->tx.type = ICE_TX_CONTAINER;
131 q_vector->rx.type = ICE_RX_CONTAINER;
133 if (vsi->type == ICE_VSI_VF)
135 /* only set affinity_mask if the CPU is online */
136 if (cpu_online(v_idx))
137 cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
139 /* This will not be called in the driver load path because the netdev
140 * will not be created yet. All other cases with register the NAPI
141 * handler here (i.e. resume, reset/rebuild, etc.)
144 netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
148 /* tie q_vector and VSI together */
149 vsi->q_vectors[v_idx] = q_vector;
155 * ice_free_q_vector - Free memory allocated for a specific interrupt vector
156 * @vsi: VSI having the memory freed
157 * @v_idx: index of the vector to be freed
159 static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
161 struct ice_q_vector *q_vector;
162 struct ice_pf *pf = vsi->back;
163 struct ice_tx_ring *tx_ring;
164 struct ice_rx_ring *rx_ring;
167 dev = ice_pf_to_dev(pf);
168 if (!vsi->q_vectors[v_idx]) {
169 dev_dbg(dev, "Queue vector at index %d not found\n", v_idx);
172 q_vector = vsi->q_vectors[v_idx];
174 ice_for_each_tx_ring(tx_ring, q_vector->tx)
175 tx_ring->q_vector = NULL;
176 ice_for_each_rx_ring(rx_ring, q_vector->rx)
177 rx_ring->q_vector = NULL;
179 /* only VSI with an associated netdev is set up with NAPI */
181 netif_napi_del(&q_vector->napi);
183 devm_kfree(dev, q_vector);
184 vsi->q_vectors[v_idx] = NULL;
188 * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set
189 * @hw: board specific structure
191 static void ice_cfg_itr_gran(struct ice_hw *hw)
193 u32 regval = rd32(hw, GLINT_CTL);
195 /* no need to update global register if ITR gran is already set */
196 if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&
197 (((regval & GLINT_CTL_ITR_GRAN_200_M) >>
198 GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) &&
199 (((regval & GLINT_CTL_ITR_GRAN_100_M) >>
200 GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) &&
201 (((regval & GLINT_CTL_ITR_GRAN_50_M) >>
202 GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) &&
203 (((regval & GLINT_CTL_ITR_GRAN_25_M) >>
204 GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US))
207 regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) &
208 GLINT_CTL_ITR_GRAN_200_M) |
209 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) &
210 GLINT_CTL_ITR_GRAN_100_M) |
211 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) &
212 GLINT_CTL_ITR_GRAN_50_M) |
213 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) &
214 GLINT_CTL_ITR_GRAN_25_M);
215 wr32(hw, GLINT_CTL, regval);
219 * ice_calc_txq_handle - calculate the queue handle
220 * @vsi: VSI that ring belongs to
221 * @ring: ring to get the absolute queue index
222 * @tc: traffic class number
224 static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 tc)
226 WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n");
229 return ring->q_index - ring->ch->base_q;
231 /* Idea here for calculation is that we subtract the number of queue
232 * count from TC that ring belongs to from it's absolute queue index
233 * and as a result we get the queue's index within TC.
235 return ring->q_index - vsi->tc_cfg.tc_info[tc].qoffset;
239 * ice_eswitch_calc_txq_handle
240 * @ring: pointer to ring which unique index is needed
242 * To correctly work with many netdevs ring->q_index of Tx rings on switchdev
243 * VSI can repeat. Hardware ring setup requires unique q_index. Calculate it
244 * here by finding index in vsi->tx_rings of this ring.
246 * Return ICE_INVAL_Q_INDEX when index wasn't found. Should never happen,
247 * because VSI is get from ring->vsi, so it has to be present in this VSI.
249 static u16 ice_eswitch_calc_txq_handle(struct ice_tx_ring *ring)
251 struct ice_vsi *vsi = ring->vsi;
254 ice_for_each_txq(vsi, i) {
255 if (vsi->tx_rings[i] == ring)
259 return ICE_INVAL_Q_INDEX;
263 * ice_cfg_xps_tx_ring - Configure XPS for a Tx ring
264 * @ring: The Tx ring to configure
266 * This enables/disables XPS for a given Tx descriptor ring
267 * based on the TCs enabled for the VSI that ring belongs to.
269 static void ice_cfg_xps_tx_ring(struct ice_tx_ring *ring)
271 if (!ring->q_vector || !ring->netdev)
274 /* We only initialize XPS once, so as not to overwrite user settings */
275 if (test_and_set_bit(ICE_TX_XPS_INIT_DONE, ring->xps_state))
278 netif_set_xps_queue(ring->netdev, &ring->q_vector->affinity_mask,
283 * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
284 * @ring: The Tx ring to configure
285 * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
286 * @pf_q: queue index in the PF space
288 * Configure the Tx descriptor ring in TLAN context.
291 ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
293 struct ice_vsi *vsi = ring->vsi;
294 struct ice_hw *hw = &vsi->back->hw;
296 tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
298 tlan_ctx->port_num = vsi->port_info->lport;
300 /* Transmit Queue Length */
301 tlan_ctx->qlen = ring->count;
303 ice_set_cgd_num(tlan_ctx, ring->dcb_tc);
306 tlan_ctx->pf_num = hw->pf_id;
308 /* queue belongs to a specific VSI type
309 * VF / VM index should be programmed per vmvf_type setting:
310 * for vmvf_type = VF, it is VF number between 0-256
311 * for vmvf_type = VM, it is VM number between 0-767
312 * for PF or EMP this field should be set to zero
319 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
321 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
324 /* Firmware expects vmvf_num to be absolute VF ID */
325 tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
326 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
328 case ICE_VSI_SWITCHDEV_CTRL:
329 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
335 /* make sure the context is associated with the right VSI */
337 tlan_ctx->src_vsi = ring->ch->vsi_num;
339 tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
341 /* Restrict Tx timestamps to the PF VSI */
344 tlan_ctx->tsyn_ena = 1;
350 tlan_ctx->tso_ena = ICE_TX_LEGACY;
351 tlan_ctx->tso_qnum = pf_q;
353 /* Legacy or Advanced Host Interface:
354 * 0: Advanced Host Interface
355 * 1: Legacy Host Interface
357 tlan_ctx->legacy_int = ICE_TX_LEGACY;
361 * ice_rx_offset - Return expected offset into page to access data
362 * @rx_ring: Ring we are requesting offset of
364 * Returns the offset value for ring into the data buffer.
366 static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring)
368 if (ice_ring_uses_build_skb(rx_ring))
370 else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
371 return XDP_PACKET_HEADROOM;
377 * ice_setup_rx_ctx - Configure a receive ring context
378 * @ring: The Rx ring to configure
380 * Configure the Rx descriptor ring in RLAN context.
382 static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
384 int chain_len = ICE_MAX_CHAINED_RX_BUFS;
385 struct ice_vsi *vsi = ring->vsi;
386 u32 rxdid = ICE_RXDID_FLEX_NIC;
387 struct ice_rlan_ctx rlan_ctx;
394 /* what is Rx queue number in global space of 2K Rx queues */
395 pf_q = vsi->rxq_map[ring->q_index];
397 /* clear the context structure first */
398 memset(&rlan_ctx, 0, sizeof(rlan_ctx));
400 /* Receive Queue Base Address.
401 * Indicates the starting address of the descriptor queue defined in
404 rlan_ctx.base = ring->dma >> 7;
406 rlan_ctx.qlen = ring->count;
408 /* Receive Packet Data Buffer Size.
409 * The Packet Data Buffer Size is defined in 128 byte units.
411 rlan_ctx.dbuf = ring->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
413 /* use 32 byte descriptors */
416 /* Strip the Ethernet CRC bytes before the packet is posted to host
419 rlan_ctx.crcstrip = 1;
421 /* L2TSEL flag defines the reported L2 Tags in the receive descriptor
422 * and it needs to remain 1 for non-DVM capable configurations to not
423 * break backward compatibility for VF drivers. Setting this field to 0
424 * will cause the single/outer VLAN tag to be stripped to the L2TAG2_2ND
425 * field in the Rx descriptor. Setting it to 1 allows the VLAN tag to
426 * be stripped in L2TAG1 of the Rx descriptor, which is where VFs will
429 if (ice_is_dvm_ena(hw))
430 if (vsi->type == ICE_VSI_VF &&
431 ice_vf_is_port_vlan_ena(&vsi->back->vf[vsi->vf_id]))
438 rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
439 rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
440 rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
442 /* This controls whether VLAN is stripped from inner headers
443 * The VLAN in the inner L2 header is stripped to the receive
444 * descriptor if enabled by this flag.
448 /* For AF_XDP ZC, we disallow packets to span on
449 * multiple buffers, thus letting us skip that
450 * handling in the fast-path.
454 /* Max packet size for this queue - must not be set to a larger value
457 rlan_ctx.rxmax = min_t(u32, vsi->max_frame,
458 chain_len * ring->rx_buf_len);
460 /* Rx queue threshold in units of 64 */
461 rlan_ctx.lrxqthresh = 1;
463 /* Enable Flexible Descriptors in the queue context which
464 * allows this driver to select a specific receive descriptor format
465 * increasing context priority to pick up profile ID; default is 0x01;
466 * setting to 0x03 to ensure profile is programming if prev context is
469 if (vsi->type != ICE_VSI_VF)
470 ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
472 ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3,
475 /* Absolute queue number out of 2K needs to be passed */
476 err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
478 dev_err(ice_pf_to_dev(vsi->back), "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
483 if (vsi->type == ICE_VSI_VF)
486 /* configure Rx buffer alignment */
487 if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
488 ice_clear_ring_build_skb_ena(ring);
490 ice_set_ring_build_skb_ena(ring);
492 ring->rx_offset = ice_rx_offset(ring);
494 /* init queue specific tail register */
495 ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
496 writel(0, ring->tail);
502 * ice_vsi_cfg_rxq - Configure an Rx queue
503 * @ring: the ring being configured
505 * Return 0 on success and a negative value on error.
507 int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
509 struct device *dev = ice_pf_to_dev(ring->vsi->back);
510 u16 num_bufs = ICE_DESC_UNUSED(ring);
513 ring->rx_buf_len = ring->vsi->rx_buf_len;
515 if (ring->vsi->type == ICE_VSI_PF) {
516 if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
517 /* coverity[check_return] */
518 xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
519 ring->q_index, ring->q_vector->napi.napi_id);
522 ring->xsk_pool = ice_xsk_pool(ring);
523 if (ring->xsk_pool) {
524 if (!ice_alloc_rx_buf_zc(ring))
526 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
529 xsk_pool_get_rx_frame_size(ring->xsk_pool);
530 err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
531 MEM_TYPE_XSK_BUFF_POOL,
535 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
537 dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
540 if (!ice_alloc_rx_buf(ring))
542 if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
543 /* coverity[check_return] */
544 xdp_rxq_info_reg(&ring->xdp_rxq,
546 ring->q_index, ring->q_vector->napi.napi_id);
548 err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
549 MEM_TYPE_PAGE_SHARED,
556 err = ice_setup_rx_ctx(ring);
558 dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
563 if (ring->xsk_pool) {
566 if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) {
567 dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n",
568 num_bufs, ring->q_index);
569 dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n");
574 ok = ice_alloc_rx_bufs_zc(ring, num_bufs);
576 u16 pf_q = ring->vsi->rxq_map[ring->q_index];
578 dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n",
579 ring->q_index, pf_q);
585 ice_alloc_rx_bufs(ring, num_bufs);
591 * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
592 * @qs_cfg: gathered variables needed for pf->vsi queues assignment
594 * This function first tries to find contiguous space. If it is not successful,
595 * it tries with the scatter approach.
597 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
599 int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
603 ret = __ice_vsi_get_qs_contig(qs_cfg);
605 /* contig failed, so try with scatter approach */
606 qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
607 qs_cfg->q_count = min_t(unsigned int, qs_cfg->q_count,
608 qs_cfg->scatter_count);
609 ret = __ice_vsi_get_qs_sc(qs_cfg);
615 * ice_vsi_ctrl_one_rx_ring - start/stop VSI's Rx ring with no busy wait
616 * @vsi: the VSI being configured
617 * @ena: start or stop the Rx ring
618 * @rxq_idx: 0-based Rx queue index for the VSI passed in
619 * @wait: wait or don't wait for configuration to finish in hardware
621 * Return 0 on success and negative on error.
624 ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait)
626 int pf_q = vsi->rxq_map[rxq_idx];
627 struct ice_pf *pf = vsi->back;
628 struct ice_hw *hw = &pf->hw;
631 rx_reg = rd32(hw, QRX_CTRL(pf_q));
633 /* Skip if the queue is already in the requested state */
634 if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
637 /* turn on/off the queue */
639 rx_reg |= QRX_CTRL_QENA_REQ_M;
641 rx_reg &= ~QRX_CTRL_QENA_REQ_M;
642 wr32(hw, QRX_CTRL(pf_q), rx_reg);
648 return ice_pf_rxq_wait(pf, pf_q, ena);
652 * ice_vsi_wait_one_rx_ring - wait for a VSI's Rx ring to be stopped/started
653 * @vsi: the VSI being configured
654 * @ena: true/false to verify Rx ring has been enabled/disabled respectively
655 * @rxq_idx: 0-based Rx queue index for the VSI passed in
657 * This routine will wait for the given Rx queue of the VSI to reach the
658 * enabled or disabled state. Returns -ETIMEDOUT in case of failing to reach
659 * the requested state after multiple retries; else will return 0 in case of
662 int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
664 int pf_q = vsi->rxq_map[rxq_idx];
665 struct ice_pf *pf = vsi->back;
667 return ice_pf_rxq_wait(pf, pf_q, ena);
671 * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
672 * @vsi: the VSI being configured
674 * We allocate one q_vector per queue interrupt. If allocation fails we
677 int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
679 struct device *dev = ice_pf_to_dev(vsi->back);
683 if (vsi->q_vectors[0]) {
684 dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num);
688 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) {
689 err = ice_vsi_alloc_q_vector(vsi, v_idx);
698 ice_free_q_vector(vsi, v_idx);
700 dev_err(dev, "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
701 vsi->num_q_vectors, vsi->vsi_num, err);
702 vsi->num_q_vectors = 0;
707 * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
708 * @vsi: the VSI being configured
710 * This function maps descriptor rings to the queue-specific vectors allotted
711 * through the MSI-X enabling code. On a constrained vector budget, we map Tx
712 * and Rx rings to the vector as "efficiently" as possible.
714 void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
716 int q_vectors = vsi->num_q_vectors;
717 u16 tx_rings_rem, rx_rings_rem;
720 /* initially assigning remaining rings count to VSIs num queue value */
721 tx_rings_rem = vsi->num_txq;
722 rx_rings_rem = vsi->num_rxq;
724 for (v_id = 0; v_id < q_vectors; v_id++) {
725 struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
726 u8 tx_rings_per_v, rx_rings_per_v;
729 /* Tx rings mapping to vector */
730 tx_rings_per_v = (u8)DIV_ROUND_UP(tx_rings_rem,
732 q_vector->num_ring_tx = tx_rings_per_v;
733 q_vector->tx.tx_ring = NULL;
734 q_vector->tx.itr_idx = ICE_TX_ITR;
735 q_base = vsi->num_txq - tx_rings_rem;
737 for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
738 struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id];
740 tx_ring->q_vector = q_vector;
741 tx_ring->next = q_vector->tx.tx_ring;
742 q_vector->tx.tx_ring = tx_ring;
744 tx_rings_rem -= tx_rings_per_v;
746 /* Rx rings mapping to vector */
747 rx_rings_per_v = (u8)DIV_ROUND_UP(rx_rings_rem,
749 q_vector->num_ring_rx = rx_rings_per_v;
750 q_vector->rx.rx_ring = NULL;
751 q_vector->rx.itr_idx = ICE_RX_ITR;
752 q_base = vsi->num_rxq - rx_rings_rem;
754 for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
755 struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id];
757 rx_ring->q_vector = q_vector;
758 rx_ring->next = q_vector->rx.rx_ring;
759 q_vector->rx.rx_ring = rx_ring;
761 rx_rings_rem -= rx_rings_per_v;
766 * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
767 * @vsi: the VSI having memory freed
769 void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
773 ice_for_each_q_vector(vsi, v_idx)
774 ice_free_q_vector(vsi, v_idx);
778 * ice_vsi_cfg_txq - Configure single Tx queue
779 * @vsi: the VSI that queue belongs to
780 * @ring: Tx ring to be configured
781 * @qg_buf: queue group buffer
784 ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
785 struct ice_aqc_add_tx_qgrp *qg_buf)
787 u8 buf_len = struct_size(qg_buf, txqs, 1);
788 struct ice_tlan_ctx tlan_ctx = { 0 };
789 struct ice_aqc_add_txqs_perq *txq;
790 struct ice_channel *ch = ring->ch;
791 struct ice_pf *pf = vsi->back;
792 struct ice_hw *hw = &pf->hw;
798 ice_cfg_xps_tx_ring(ring);
800 pf_q = ring->reg_idx;
801 ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
802 /* copy context contents into the qg_buf */
803 qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
804 ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
807 /* init queue specific tail reg. It is referred as
808 * transmit comm scheduler queue doorbell.
810 ring->tail = hw->hw_addr + QTX_COMM_DBELL(pf_q);
812 if (IS_ENABLED(CONFIG_DCB))
817 /* Add unique software queue handle of the Tx queue per
818 * TC into the VSI Tx ring
820 if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
821 ring->q_handle = ice_eswitch_calc_txq_handle(ring);
823 if (ring->q_handle == ICE_INVAL_Q_INDEX)
826 ring->q_handle = ice_calc_txq_handle(vsi, ring, tc);
830 status = ice_ena_vsi_txq(vsi->port_info, ch->ch_vsi->idx, 0,
831 ring->q_handle, 1, qg_buf, buf_len,
834 status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
835 ring->q_handle, 1, qg_buf, buf_len,
838 dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n",
843 /* Add Tx Queue TEID into the VSI Tx ring from the
844 * response. This will complete configuring and
845 * enabling the queue.
847 txq = &qg_buf->txqs[0];
848 if (pf_q == le16_to_cpu(txq->txq_id))
849 ring->txq_teid = le32_to_cpu(txq->q_teid);
855 * ice_cfg_itr - configure the initial interrupt throttle values
856 * @hw: pointer to the HW structure
857 * @q_vector: interrupt vector that's being configured
859 * Configure interrupt throttling values for the ring containers that are
860 * associated with the interrupt vector passed in.
862 void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
864 ice_cfg_itr_gran(hw);
866 if (q_vector->num_ring_rx)
867 ice_write_itr(&q_vector->rx, q_vector->rx.itr_setting);
869 if (q_vector->num_ring_tx)
870 ice_write_itr(&q_vector->tx, q_vector->tx.itr_setting);
872 ice_write_intrl(q_vector, q_vector->intrl);
876 * ice_cfg_txq_interrupt - configure interrupt on Tx queue
877 * @vsi: the VSI being configured
878 * @txq: Tx queue being mapped to MSI-X vector
879 * @msix_idx: MSI-X vector index within the function
880 * @itr_idx: ITR index of the interrupt cause
882 * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector
883 * within the function space.
886 ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
888 struct ice_pf *pf = vsi->back;
889 struct ice_hw *hw = &pf->hw;
892 itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M;
894 val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
895 ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M);
897 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
898 if (ice_is_xdp_ena_vsi(vsi)) {
899 u32 xdp_txq = txq + vsi->num_xdp_txq;
901 wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]),
908 * ice_cfg_rxq_interrupt - configure interrupt on Rx queue
909 * @vsi: the VSI being configured
910 * @rxq: Rx queue being mapped to MSI-X vector
911 * @msix_idx: MSI-X vector index within the function
912 * @itr_idx: ITR index of the interrupt cause
914 * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector
915 * within the function space.
918 ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
920 struct ice_pf *pf = vsi->back;
921 struct ice_hw *hw = &pf->hw;
924 itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M;
926 val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
927 ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M);
929 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
935 * ice_trigger_sw_intr - trigger a software interrupt
936 * @hw: pointer to the HW structure
937 * @q_vector: interrupt vector to trigger the software interrupt for
939 void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
941 wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx),
942 (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) |
943 GLINT_DYN_CTL_SWINT_TRIG_M |
944 GLINT_DYN_CTL_INTENA_M);
948 * ice_vsi_stop_tx_ring - Disable single Tx ring
949 * @vsi: the VSI being configured
950 * @rst_src: reset source
951 * @rel_vmvf_num: Relative ID of VF/VM
952 * @ring: Tx ring to be stopped
953 * @txq_meta: Meta data of Tx ring to be stopped
956 ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
957 u16 rel_vmvf_num, struct ice_tx_ring *ring,
958 struct ice_txq_meta *txq_meta)
960 struct ice_pf *pf = vsi->back;
961 struct ice_q_vector *q_vector;
962 struct ice_hw *hw = &pf->hw;
966 /* clear cause_ena bit for disabled queues */
967 val = rd32(hw, QINT_TQCTL(ring->reg_idx));
968 val &= ~QINT_TQCTL_CAUSE_ENA_M;
969 wr32(hw, QINT_TQCTL(ring->reg_idx), val);
971 /* software is expected to wait for 100 ns */
974 /* trigger a software interrupt for the vector
975 * associated to the queue to schedule NAPI handler
977 q_vector = ring->q_vector;
979 ice_trigger_sw_intr(hw, q_vector);
981 status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx,
982 txq_meta->tc, 1, &txq_meta->q_handle,
983 &txq_meta->q_id, &txq_meta->q_teid, rst_src,
986 /* if the disable queue command was exercised during an
987 * active reset flow, -EBUSY is returned.
988 * This is not an error as the reset operation disables
989 * queues at the hardware level anyway.
991 if (status == -EBUSY) {
992 dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n");
993 } else if (status == -ENOENT) {
994 dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n");
996 dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n",
1005 * ice_fill_txq_meta - Prepare the Tx queue's meta data
1006 * @vsi: VSI that ring belongs to
1007 * @ring: ring that txq_meta will be based on
1008 * @txq_meta: a helper struct that wraps Tx queue's information
1010 * Set up a helper struct that will contain all the necessary fields that
1011 * are needed for stopping Tx queue
1014 ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_tx_ring *ring,
1015 struct ice_txq_meta *txq_meta)
1017 struct ice_channel *ch = ring->ch;
1020 if (IS_ENABLED(CONFIG_DCB))
1025 txq_meta->q_id = ring->reg_idx;
1026 txq_meta->q_teid = ring->txq_teid;
1027 txq_meta->q_handle = ring->q_handle;
1029 txq_meta->vsi_idx = ch->ch_vsi->idx;
1032 txq_meta->vsi_idx = vsi->idx;