1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
4 * Copyright (C) 2018 Marvell.
8 #include <linux/module.h>
11 #include "rvu_struct.h"
16 #include "lmac_common.h"
17 #include "rvu_npc_hash.h"
19 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
20 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
21 int type, int chan_id);
22 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
24 static int nix_setup_ipolicers(struct rvu *rvu,
25 struct nix_hw *nix_hw, int blkaddr);
26 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw);
27 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
28 struct nix_hw *nix_hw, u16 pcifunc);
29 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
30 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
32 static const char *nix_get_ctx_name(int ctype);
58 enum nix_makr_fmt_indexes {
59 NIX_MARK_CFG_IP_DSCP_RED,
60 NIX_MARK_CFG_IP_DSCP_YELLOW,
61 NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
62 NIX_MARK_CFG_IP_ECN_RED,
63 NIX_MARK_CFG_IP_ECN_YELLOW,
64 NIX_MARK_CFG_IP_ECN_YELLOW_RED,
65 NIX_MARK_CFG_VLAN_DEI_RED,
66 NIX_MARK_CFG_VLAN_DEI_YELLOW,
67 NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
71 /* For now considering MC resources needed for broadcast
72 * pkt replication only. i.e 256 HWVFs + 12 PFs.
74 #define MC_TBL_SIZE MC_TBL_SZ_512
75 #define MC_BUF_CNT MC_BUF_CNT_128
78 struct hlist_node node;
82 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
86 /*If blkaddr is 0, return the first nix block address*/
88 return rvu->nix_blkaddr[blkaddr];
90 while (i + 1 < MAX_NIX_BLKS) {
91 if (rvu->nix_blkaddr[i] == blkaddr)
92 return rvu->nix_blkaddr[i + 1];
99 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
101 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
104 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
105 if (!pfvf->nixlf || blkaddr < 0)
110 int rvu_get_nixlf_count(struct rvu *rvu)
112 int blkaddr = 0, max = 0;
113 struct rvu_block *block;
115 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
117 block = &rvu->hw->block[blkaddr];
118 max += block->lf.max;
119 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
124 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
126 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
127 struct rvu_hwinfo *hw = rvu->hw;
130 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
131 if (!pfvf->nixlf || blkaddr < 0)
132 return NIX_AF_ERR_AF_LF_INVALID;
134 *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
136 return NIX_AF_ERR_AF_LF_INVALID;
139 *nix_blkaddr = blkaddr;
144 int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
145 struct nix_hw **nix_hw, int *blkaddr)
147 struct rvu_pfvf *pfvf;
149 pfvf = rvu_get_pfvf(rvu, pcifunc);
150 *blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
151 if (!pfvf->nixlf || *blkaddr < 0)
152 return NIX_AF_ERR_AF_LF_INVALID;
154 *nix_hw = get_nix_hw(rvu->hw, *blkaddr);
156 return NIX_AF_ERR_INVALID_NIXBLK;
160 static void nix_mce_list_init(struct nix_mce_list *list, int max)
162 INIT_HLIST_HEAD(&list->head);
167 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
174 idx = mcast->next_free_mce;
175 mcast->next_free_mce += count;
179 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
181 int nix_blkaddr = 0, i = 0;
182 struct rvu *rvu = hw->rvu;
184 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
185 while (nix_blkaddr) {
186 if (blkaddr == nix_blkaddr && hw->nix)
188 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
194 int nix_get_dwrr_mtu_reg(struct rvu_hwinfo *hw, int smq_link_type)
196 if (hw->cap.nix_multiple_dwrr_mtu)
197 return NIX_AF_DWRR_MTUX(smq_link_type);
199 if (smq_link_type == SMQ_LINK_TYPE_SDP)
200 return NIX_AF_DWRR_SDP_MTU;
202 /* Here it's same reg for RPM and LBK */
203 return NIX_AF_DWRR_RPM_MTU;
206 u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu)
210 /* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
211 * Value of 4 is reserved for MTU value of 9728 bytes.
212 * Value of 5 is reserved for MTU value of 10240 bytes.
220 return BIT_ULL(dwrr_mtu);
226 u32 convert_bytes_to_dwrr_mtu(u32 bytes)
228 /* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
229 * Value of 4 is reserved for MTU value of 9728 bytes.
230 * Value of 5 is reserved for MTU value of 10240 bytes.
232 if (bytes > BIT_ULL(16))
247 static void nix_rx_sync(struct rvu *rvu, int blkaddr)
251 /* Sync all in flight RX packets to LLC/DRAM */
252 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
253 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
255 dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n");
257 /* SW_SYNC ensures all existing transactions are finished and pkts
258 * are written to LLC/DRAM, queues should be teared down after
259 * successful SW_SYNC. Due to a HW errata, in some rare scenarios
260 * an existing transaction might end after SW_SYNC operation. To
261 * ensure operation is fully done, do the SW_SYNC twice.
263 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
264 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
266 dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n");
269 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
270 int lvl, u16 pcifunc, u16 schq)
272 struct rvu_hwinfo *hw = rvu->hw;
273 struct nix_txsch *txsch;
274 struct nix_hw *nix_hw;
277 nix_hw = get_nix_hw(rvu->hw, blkaddr);
281 txsch = &nix_hw->txsch[lvl];
282 /* Check out of bounds */
283 if (schq >= txsch->schq.max)
286 mutex_lock(&rvu->rsrc_lock);
287 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
288 mutex_unlock(&rvu->rsrc_lock);
290 /* TLs aggegating traffic are shared across PF and VFs */
291 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
292 if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
298 if (map_func != pcifunc)
304 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
305 struct nix_lf_alloc_rsp *rsp, bool loop)
307 struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc);
308 u16 req_chan_base, req_chan_end, req_chan_cnt;
309 struct rvu_hwinfo *hw = rvu->hw;
310 struct sdp_node_info *sdp_info;
311 int pkind, pf, vf, lbkid, vfid;
316 pf = rvu_get_pf(pcifunc);
317 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
318 type != NIX_INTF_TYPE_SDP)
322 case NIX_INTF_TYPE_CGX:
323 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
324 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
326 pkind = rvu_npc_get_pkind(rvu, pf);
329 "PF_Func 0x%x: Invalid pkind\n", pcifunc);
332 pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
333 pfvf->tx_chan_base = pfvf->rx_chan_base;
334 pfvf->rx_chan_cnt = 1;
335 pfvf->tx_chan_cnt = 1;
336 rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id;
338 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
339 rvu_npc_set_pkind(rvu, pkind, pfvf);
342 case NIX_INTF_TYPE_LBK:
343 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
345 /* If NIX1 block is present on the silicon then NIXes are
346 * assigned alternatively for lbk interfaces. NIX0 should
347 * send packets on lbk link 1 channels and NIX1 should send
348 * on lbk link 0 channels for the communication between
352 if (rvu->hw->lbk_links > 1)
353 lbkid = vf & 0x1 ? 0 : 1;
355 /* By default NIX0 is configured to send packet on lbk link 1
356 * (which corresponds to LBK1), same packet will receive on
357 * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0
358 * (which corresponds to LBK2) packet will receive on NIX0 lbk
360 * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0
361 * transmits and receives on lbk link 0, whick corresponds
362 * to LBK1 block, back to back connectivity between NIX and
363 * LBK can be achieved (which is similar to 96xx)
366 * NIX0 lbk link 1 (LBK2) 1 (LBK1)
367 * NIX0 lbk link 0 (LBK0) 0 (LBK0)
368 * NIX1 lbk link 0 (LBK1) 0 (LBK2)
369 * NIX1 lbk link 1 (LBK3) 1 (LBK3)
374 /* Note that AF's VFs work in pairs and talk over consecutive
375 * loopback channels.Therefore if odd number of AF VFs are
376 * enabled then the last VF remains with no pair.
378 pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf);
379 pfvf->tx_chan_base = vf & 0x1 ?
380 rvu_nix_chan_lbk(rvu, lbkid, vf - 1) :
381 rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
382 pfvf->rx_chan_cnt = 1;
383 pfvf->tx_chan_cnt = 1;
384 rsp->tx_link = hw->cgx_links + lbkid;
386 rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf);
387 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
392 case NIX_INTF_TYPE_SDP:
393 from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
394 parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
395 sdp_info = parent_pf->sdp_info;
397 dev_err(rvu->dev, "Invalid sdp_info pointer\n");
401 req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn +
402 sdp_info->num_pf_rings;
403 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
404 for (vfid = 0; vfid < vf; vfid++)
405 req_chan_base += sdp_info->vf_rings[vfid];
406 req_chan_cnt = sdp_info->vf_rings[vf];
407 req_chan_end = req_chan_base + req_chan_cnt - 1;
408 if (req_chan_base < rvu_nix_chan_sdp(rvu, 0) ||
409 req_chan_end > rvu_nix_chan_sdp(rvu, 255)) {
411 "PF_Func 0x%x: Invalid channel base and count\n",
416 req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn;
417 req_chan_cnt = sdp_info->num_pf_rings;
420 pfvf->rx_chan_base = req_chan_base;
421 pfvf->rx_chan_cnt = req_chan_cnt;
422 pfvf->tx_chan_base = pfvf->rx_chan_base;
423 pfvf->tx_chan_cnt = pfvf->rx_chan_cnt;
425 rsp->tx_link = hw->cgx_links + hw->lbk_links;
426 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
432 /* Add a UCAST forwarding rule in MCAM with this NIXLF attached
433 * RVU PF/VF's MAC address.
435 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
436 pfvf->rx_chan_base, pfvf->mac_addr);
438 /* Add this PF_FUNC to bcast pkt replication list */
439 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true);
442 "Bcast list, failed to enable PF_FUNC 0x%x\n",
446 /* Install MCAM rule matching Ethernet broadcast mac address */
447 rvu_npc_install_bcast_match_entry(rvu, pcifunc,
448 nixlf, pfvf->rx_chan_base);
450 pfvf->maxlen = NIC_HW_MIN_FRS;
451 pfvf->minlen = NIC_HW_MIN_FRS;
456 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
458 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
464 /* Remove this PF_FUNC from bcast pkt replication list */
465 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false);
468 "Bcast list, failed to disable PF_FUNC 0x%x\n",
472 /* Free and disable any MCAM entries used by this NIX LF */
473 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
475 /* Disable DMAC filters used */
476 rvu_cgx_disable_dmac_entries(rvu, pcifunc);
479 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
480 struct nix_bp_cfg_req *req,
483 u16 pcifunc = req->hdr.pcifunc;
484 struct rvu_pfvf *pfvf;
485 int blkaddr, pf, type;
489 pf = rvu_get_pf(pcifunc);
490 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
491 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
494 pfvf = rvu_get_pfvf(rvu, pcifunc);
495 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
497 chan_base = pfvf->rx_chan_base + req->chan_base;
498 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
499 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
500 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
506 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
507 int type, int chan_id)
509 int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt;
510 u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt;
511 struct rvu_hwinfo *hw = rvu->hw;
512 struct rvu_pfvf *pfvf;
516 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
517 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
518 lmac_chan_cnt = cfg & 0xFF;
520 cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
521 lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
523 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
524 sdp_chan_cnt = cfg & 0xFFF;
525 sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt;
527 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
529 /* Backpressure IDs range division
530 * CGX channles are mapped to (0 - 191) BPIDs
531 * LBK channles are mapped to (192 - 255) BPIDs
532 * SDP channles are mapped to (256 - 511) BPIDs
534 * Lmac channles and bpids mapped as follows
535 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
536 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
537 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
540 case NIX_INTF_TYPE_CGX:
541 if ((req->chan_base + req->chan_cnt) > 16)
543 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
544 /* Assign bpid based on cgx, lmac and chan id */
545 bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
546 (lmac_id * lmac_chan_cnt) + req->chan_base;
548 if (req->bpid_per_chan)
550 if (bpid > cgx_bpid_cnt)
554 case NIX_INTF_TYPE_LBK:
555 if ((req->chan_base + req->chan_cnt) > 63)
557 bpid = cgx_bpid_cnt + req->chan_base;
558 if (req->bpid_per_chan)
560 if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
563 case NIX_INTF_TYPE_SDP:
564 if ((req->chan_base + req->chan_cnt) > 255)
567 bpid = sdp_bpid_cnt + req->chan_base;
568 if (req->bpid_per_chan)
571 if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt))
580 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
581 struct nix_bp_cfg_req *req,
582 struct nix_bp_cfg_rsp *rsp)
584 int blkaddr, pf, type, chan_id = 0;
585 u16 pcifunc = req->hdr.pcifunc;
586 struct rvu_pfvf *pfvf;
591 pf = rvu_get_pf(pcifunc);
592 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
593 if (is_sdp_pfvf(pcifunc))
594 type = NIX_INTF_TYPE_SDP;
596 /* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */
597 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
598 type != NIX_INTF_TYPE_SDP)
601 pfvf = rvu_get_pfvf(rvu, pcifunc);
602 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
604 bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
605 chan_base = pfvf->rx_chan_base + req->chan_base;
608 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
610 dev_warn(rvu->dev, "Fail to enable backpressure\n");
614 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
615 cfg &= ~GENMASK_ULL(8, 0);
616 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
617 cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16));
619 bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
622 for (chan = 0; chan < req->chan_cnt; chan++) {
623 /* Map channel and bpid assign to it */
624 rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
626 if (req->bpid_per_chan)
629 rsp->chan_cnt = req->chan_cnt;
634 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
635 u64 format, bool v4, u64 *fidx)
637 struct nix_lso_format field = {0};
639 /* IP's Length field */
640 field.layer = NIX_TXLAYER_OL3;
641 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
642 field.offset = v4 ? 2 : 4;
643 field.sizem1 = 1; /* i.e 2 bytes */
644 field.alg = NIX_LSOALG_ADD_PAYLEN;
645 rvu_write64(rvu, blkaddr,
646 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
649 /* No ID field in IPv6 header */
654 field.layer = NIX_TXLAYER_OL3;
656 field.sizem1 = 1; /* i.e 2 bytes */
657 field.alg = NIX_LSOALG_ADD_SEGNUM;
658 rvu_write64(rvu, blkaddr,
659 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
663 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
664 u64 format, u64 *fidx)
666 struct nix_lso_format field = {0};
668 /* TCP's sequence number field */
669 field.layer = NIX_TXLAYER_OL4;
671 field.sizem1 = 3; /* i.e 4 bytes */
672 field.alg = NIX_LSOALG_ADD_OFFSET;
673 rvu_write64(rvu, blkaddr,
674 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
677 /* TCP's flags field */
678 field.layer = NIX_TXLAYER_OL4;
680 field.sizem1 = 1; /* 2 bytes */
681 field.alg = NIX_LSOALG_TCP_FLAGS;
682 rvu_write64(rvu, blkaddr,
683 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
687 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
689 u64 cfg, idx, fidx = 0;
691 /* Get max HW supported format indices */
692 cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
693 nix_hw->lso.total = cfg;
696 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
697 /* For TSO, set first and middle segment flags to
698 * mask out PSH, RST & FIN flags in TCP packet
700 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
701 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
702 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
704 /* Setup default static LSO formats
706 * Configure format fields for TCPv4 segmentation offload
708 idx = NIX_LSO_FORMAT_IDX_TSOV4;
709 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
710 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
712 /* Set rest of the fields to NOP */
713 for (; fidx < 8; fidx++) {
714 rvu_write64(rvu, blkaddr,
715 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
717 nix_hw->lso.in_use++;
719 /* Configure format fields for TCPv6 segmentation offload */
720 idx = NIX_LSO_FORMAT_IDX_TSOV6;
722 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
723 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
725 /* Set rest of the fields to NOP */
726 for (; fidx < 8; fidx++) {
727 rvu_write64(rvu, blkaddr,
728 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
730 nix_hw->lso.in_use++;
733 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
735 kfree(pfvf->rq_bmap);
736 kfree(pfvf->sq_bmap);
737 kfree(pfvf->cq_bmap);
739 qmem_free(rvu->dev, pfvf->rq_ctx);
741 qmem_free(rvu->dev, pfvf->sq_ctx);
743 qmem_free(rvu->dev, pfvf->cq_ctx);
745 qmem_free(rvu->dev, pfvf->rss_ctx);
746 if (pfvf->nix_qints_ctx)
747 qmem_free(rvu->dev, pfvf->nix_qints_ctx);
748 if (pfvf->cq_ints_ctx)
749 qmem_free(rvu->dev, pfvf->cq_ints_ctx);
751 pfvf->rq_bmap = NULL;
752 pfvf->cq_bmap = NULL;
753 pfvf->sq_bmap = NULL;
757 pfvf->rss_ctx = NULL;
758 pfvf->nix_qints_ctx = NULL;
759 pfvf->cq_ints_ctx = NULL;
762 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
763 struct rvu_pfvf *pfvf, int nixlf,
764 int rss_sz, int rss_grps, int hwctx_size,
765 u64 way_mask, bool tag_lsb_as_adder)
767 int err, grp, num_indices;
770 /* RSS is not requested for this NIXLF */
773 num_indices = rss_sz * rss_grps;
775 /* Alloc NIX RSS HW context memory and config the base */
776 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
780 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
781 (u64)pfvf->rss_ctx->iova);
783 /* Config full RSS table size, enable RSS and caching */
784 val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 |
785 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE);
787 if (tag_lsb_as_adder)
790 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val);
791 /* Config RSS group offset and sizes */
792 for (grp = 0; grp < rss_grps; grp++)
793 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
794 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
798 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
799 struct nix_aq_inst_s *inst)
801 struct admin_queue *aq = block->aq;
802 struct nix_aq_res_s *result;
807 result = (struct nix_aq_res_s *)aq->res->base;
809 /* Get current head pointer where to append this instruction */
810 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
811 head = (reg >> 4) & AQ_PTR_MASK;
813 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
814 (void *)inst, aq->inst->entry_sz);
815 memset(result, 0, sizeof(*result));
816 /* sync into memory */
819 /* Ring the doorbell and wait for result */
820 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
821 while (result->compcode == NIX_AQ_COMP_NOTDONE) {
829 if (result->compcode != NIX_AQ_COMP_GOOD) {
830 /* TODO: Replace this with some error code */
831 if (result->compcode == NIX_AQ_COMP_CTX_FAULT ||
832 result->compcode == NIX_AQ_COMP_LOCKERR ||
833 result->compcode == NIX_AQ_COMP_CTX_POISON) {
834 ret = rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_RX);
835 ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_TX);
836 ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_RX);
837 ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_TX);
840 "%s: Not able to unlock cachelines\n", __func__);
849 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
850 struct nix_aq_enq_req *req,
851 struct nix_aq_enq_rsp *rsp)
853 struct rvu_hwinfo *hw = rvu->hw;
854 u16 pcifunc = req->hdr.pcifunc;
855 int nixlf, blkaddr, rc = 0;
856 struct nix_aq_inst_s inst;
857 struct rvu_block *block;
858 struct admin_queue *aq;
859 struct rvu_pfvf *pfvf;
864 blkaddr = nix_hw->blkaddr;
865 block = &hw->block[blkaddr];
868 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
869 return NIX_AF_ERR_AQ_ENQUEUE;
872 pfvf = rvu_get_pfvf(rvu, pcifunc);
873 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
875 /* Skip NIXLF check for broadcast MCE entry and bandwidth profile
876 * operations done by AF itself.
878 if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) ||
879 (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) {
880 if (!pfvf->nixlf || nixlf < 0)
881 return NIX_AF_ERR_AF_LF_INVALID;
884 switch (req->ctype) {
885 case NIX_AQ_CTYPE_RQ:
886 /* Check if index exceeds max no of queues */
887 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
888 rc = NIX_AF_ERR_AQ_ENQUEUE;
890 case NIX_AQ_CTYPE_SQ:
891 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
892 rc = NIX_AF_ERR_AQ_ENQUEUE;
894 case NIX_AQ_CTYPE_CQ:
895 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
896 rc = NIX_AF_ERR_AQ_ENQUEUE;
898 case NIX_AQ_CTYPE_RSS:
899 /* Check if RSS is enabled and qidx is within range */
900 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
901 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
902 (req->qidx >= (256UL << (cfg & 0xF))))
903 rc = NIX_AF_ERR_AQ_ENQUEUE;
905 case NIX_AQ_CTYPE_MCE:
906 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
908 /* Check if index exceeds MCE list length */
909 if (!nix_hw->mcast.mce_ctx ||
910 (req->qidx >= (256UL << (cfg & 0xF))))
911 rc = NIX_AF_ERR_AQ_ENQUEUE;
913 /* Adding multicast lists for requests from PF/VFs is not
914 * yet supported, so ignore this.
917 rc = NIX_AF_ERR_AQ_ENQUEUE;
919 case NIX_AQ_CTYPE_BANDPROF:
920 if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req,
922 rc = NIX_AF_ERR_INVALID_BANDPROF;
925 rc = NIX_AF_ERR_AQ_ENQUEUE;
931 /* Check if SQ pointed SMQ belongs to this PF/VF or not */
932 if (req->ctype == NIX_AQ_CTYPE_SQ &&
933 ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
934 (req->op == NIX_AQ_INSTOP_WRITE &&
935 req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
936 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
937 pcifunc, req->sq.smq))
938 return NIX_AF_ERR_AQ_ENQUEUE;
941 memset(&inst, 0, sizeof(struct nix_aq_inst_s));
943 inst.cindex = req->qidx;
944 inst.ctype = req->ctype;
946 /* Currently we are not supporting enqueuing multiple instructions,
947 * so always choose first entry in result memory.
949 inst.res_addr = (u64)aq->res->iova;
951 /* Hardware uses same aq->res->base for updating result of
952 * previous instruction hence wait here till it is done.
954 spin_lock(&aq->lock);
956 /* Clean result + context memory */
957 memset(aq->res->base, 0, aq->res->entry_sz);
958 /* Context needs to be written at RES_ADDR + 128 */
959 ctx = aq->res->base + 128;
960 /* Mask needs to be written at RES_ADDR + 256 */
961 mask = aq->res->base + 256;
964 case NIX_AQ_INSTOP_WRITE:
965 if (req->ctype == NIX_AQ_CTYPE_RQ)
966 memcpy(mask, &req->rq_mask,
967 sizeof(struct nix_rq_ctx_s));
968 else if (req->ctype == NIX_AQ_CTYPE_SQ)
969 memcpy(mask, &req->sq_mask,
970 sizeof(struct nix_sq_ctx_s));
971 else if (req->ctype == NIX_AQ_CTYPE_CQ)
972 memcpy(mask, &req->cq_mask,
973 sizeof(struct nix_cq_ctx_s));
974 else if (req->ctype == NIX_AQ_CTYPE_RSS)
975 memcpy(mask, &req->rss_mask,
976 sizeof(struct nix_rsse_s));
977 else if (req->ctype == NIX_AQ_CTYPE_MCE)
978 memcpy(mask, &req->mce_mask,
979 sizeof(struct nix_rx_mce_s));
980 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
981 memcpy(mask, &req->prof_mask,
982 sizeof(struct nix_bandprof_s));
984 case NIX_AQ_INSTOP_INIT:
985 if (req->ctype == NIX_AQ_CTYPE_RQ)
986 memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
987 else if (req->ctype == NIX_AQ_CTYPE_SQ)
988 memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
989 else if (req->ctype == NIX_AQ_CTYPE_CQ)
990 memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
991 else if (req->ctype == NIX_AQ_CTYPE_RSS)
992 memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
993 else if (req->ctype == NIX_AQ_CTYPE_MCE)
994 memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
995 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
996 memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s));
998 case NIX_AQ_INSTOP_NOP:
999 case NIX_AQ_INSTOP_READ:
1000 case NIX_AQ_INSTOP_LOCK:
1001 case NIX_AQ_INSTOP_UNLOCK:
1004 rc = NIX_AF_ERR_AQ_ENQUEUE;
1005 spin_unlock(&aq->lock);
1009 /* Submit the instruction to AQ */
1010 rc = nix_aq_enqueue_wait(rvu, block, &inst);
1012 spin_unlock(&aq->lock);
1016 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
1017 if (req->op == NIX_AQ_INSTOP_INIT) {
1018 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
1019 __set_bit(req->qidx, pfvf->rq_bmap);
1020 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
1021 __set_bit(req->qidx, pfvf->sq_bmap);
1022 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
1023 __set_bit(req->qidx, pfvf->cq_bmap);
1026 if (req->op == NIX_AQ_INSTOP_WRITE) {
1027 if (req->ctype == NIX_AQ_CTYPE_RQ) {
1028 ena = (req->rq.ena & req->rq_mask.ena) |
1029 (test_bit(req->qidx, pfvf->rq_bmap) &
1032 __set_bit(req->qidx, pfvf->rq_bmap);
1034 __clear_bit(req->qidx, pfvf->rq_bmap);
1036 if (req->ctype == NIX_AQ_CTYPE_SQ) {
1037 ena = (req->rq.ena & req->sq_mask.ena) |
1038 (test_bit(req->qidx, pfvf->sq_bmap) &
1041 __set_bit(req->qidx, pfvf->sq_bmap);
1043 __clear_bit(req->qidx, pfvf->sq_bmap);
1045 if (req->ctype == NIX_AQ_CTYPE_CQ) {
1046 ena = (req->rq.ena & req->cq_mask.ena) |
1047 (test_bit(req->qidx, pfvf->cq_bmap) &
1050 __set_bit(req->qidx, pfvf->cq_bmap);
1052 __clear_bit(req->qidx, pfvf->cq_bmap);
1057 /* Copy read context into mailbox */
1058 if (req->op == NIX_AQ_INSTOP_READ) {
1059 if (req->ctype == NIX_AQ_CTYPE_RQ)
1060 memcpy(&rsp->rq, ctx,
1061 sizeof(struct nix_rq_ctx_s));
1062 else if (req->ctype == NIX_AQ_CTYPE_SQ)
1063 memcpy(&rsp->sq, ctx,
1064 sizeof(struct nix_sq_ctx_s));
1065 else if (req->ctype == NIX_AQ_CTYPE_CQ)
1066 memcpy(&rsp->cq, ctx,
1067 sizeof(struct nix_cq_ctx_s));
1068 else if (req->ctype == NIX_AQ_CTYPE_RSS)
1069 memcpy(&rsp->rss, ctx,
1070 sizeof(struct nix_rsse_s));
1071 else if (req->ctype == NIX_AQ_CTYPE_MCE)
1072 memcpy(&rsp->mce, ctx,
1073 sizeof(struct nix_rx_mce_s));
1074 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
1075 memcpy(&rsp->prof, ctx,
1076 sizeof(struct nix_bandprof_s));
1080 spin_unlock(&aq->lock);
1084 static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw,
1085 struct nix_aq_enq_req *req, u8 ctype)
1087 struct nix_cn10k_aq_enq_req aq_req;
1088 struct nix_cn10k_aq_enq_rsp aq_rsp;
1091 if (req->ctype != NIX_AQ_CTYPE_CQ)
1094 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
1095 req->hdr.pcifunc, ctype, req->qidx);
1098 "%s: Failed to fetch %s%d context of PFFUNC 0x%x\n",
1099 __func__, nix_get_ctx_name(ctype), req->qidx,
1104 /* Make copy of original context & mask which are required
1107 memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s));
1108 memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s));
1110 /* exclude fields which HW can update */
1111 aq_req.cq_mask.cq_err = 0;
1112 aq_req.cq_mask.wrptr = 0;
1113 aq_req.cq_mask.tail = 0;
1114 aq_req.cq_mask.head = 0;
1115 aq_req.cq_mask.avg_level = 0;
1116 aq_req.cq_mask.update_time = 0;
1117 aq_req.cq_mask.substream = 0;
1119 /* Context mask (cq_mask) holds mask value of fields which
1120 * are changed in AQ WRITE operation.
1121 * for example cq.drop = 0xa;
1122 * cq_mask.drop = 0xff;
1123 * Below logic performs '&' between cq and cq_mask so that non
1124 * updated fields are masked out for request and response
1127 for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64);
1129 *(u64 *)((u8 *)&aq_rsp.cq + word * 8) &=
1130 (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
1131 *(u64 *)((u8 *)&aq_req.cq + word * 8) &=
1132 (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
1135 if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s)))
1136 return NIX_AF_ERR_AQ_CTX_RETRY_WRITE;
1141 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
1142 struct nix_aq_enq_rsp *rsp)
1144 struct nix_hw *nix_hw;
1145 int err, retries = 5;
1148 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
1150 return NIX_AF_ERR_AF_LF_INVALID;
1152 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1154 return NIX_AF_ERR_INVALID_NIXBLK;
1157 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
1159 /* HW errata 'AQ Modification to CQ could be discarded on heavy traffic'
1160 * As a work around perfrom CQ context read after each AQ write. If AQ
1161 * read shows AQ write is not updated perform AQ write again.
1163 if (!err && req->op == NIX_AQ_INSTOP_WRITE) {
1164 err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ);
1165 if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) {
1169 return NIX_AF_ERR_CQ_CTX_WRITE_ERR;
1176 static const char *nix_get_ctx_name(int ctype)
1179 case NIX_AQ_CTYPE_CQ:
1181 case NIX_AQ_CTYPE_SQ:
1183 case NIX_AQ_CTYPE_RQ:
1185 case NIX_AQ_CTYPE_RSS:
1191 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
1193 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
1194 struct nix_aq_enq_req aq_req;
1195 unsigned long *bmap;
1196 int qidx, q_cnt = 0;
1199 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
1200 return NIX_AF_ERR_AQ_ENQUEUE;
1202 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1203 aq_req.hdr.pcifunc = req->hdr.pcifunc;
1205 if (req->ctype == NIX_AQ_CTYPE_CQ) {
1207 aq_req.cq_mask.ena = 1;
1208 aq_req.cq.bp_ena = 0;
1209 aq_req.cq_mask.bp_ena = 1;
1210 q_cnt = pfvf->cq_ctx->qsize;
1211 bmap = pfvf->cq_bmap;
1213 if (req->ctype == NIX_AQ_CTYPE_SQ) {
1215 aq_req.sq_mask.ena = 1;
1216 q_cnt = pfvf->sq_ctx->qsize;
1217 bmap = pfvf->sq_bmap;
1219 if (req->ctype == NIX_AQ_CTYPE_RQ) {
1221 aq_req.rq_mask.ena = 1;
1222 q_cnt = pfvf->rq_ctx->qsize;
1223 bmap = pfvf->rq_bmap;
1226 aq_req.ctype = req->ctype;
1227 aq_req.op = NIX_AQ_INSTOP_WRITE;
1229 for (qidx = 0; qidx < q_cnt; qidx++) {
1230 if (!test_bit(qidx, bmap))
1233 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
1236 dev_err(rvu->dev, "Failed to disable %s:%d context\n",
1237 nix_get_ctx_name(req->ctype), qidx);
1244 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
1245 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
1247 struct nix_aq_enq_req lock_ctx_req;
1250 if (req->op != NIX_AQ_INSTOP_INIT)
1253 if (req->ctype == NIX_AQ_CTYPE_MCE ||
1254 req->ctype == NIX_AQ_CTYPE_DYNO)
1257 memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
1258 lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
1259 lock_ctx_req.ctype = req->ctype;
1260 lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
1261 lock_ctx_req.qidx = req->qidx;
1262 err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
1265 "PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
1267 nix_get_ctx_name(req->ctype), req->qidx);
1271 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1272 struct nix_aq_enq_req *req,
1273 struct nix_aq_enq_rsp *rsp)
1277 err = rvu_nix_aq_enq_inst(rvu, req, rsp);
1279 err = nix_lf_hwctx_lockdown(rvu, req);
1284 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1285 struct nix_aq_enq_req *req,
1286 struct nix_aq_enq_rsp *rsp)
1288 return rvu_nix_aq_enq_inst(rvu, req, rsp);
1291 /* CN10K mbox handler */
1292 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
1293 struct nix_cn10k_aq_enq_req *req,
1294 struct nix_cn10k_aq_enq_rsp *rsp)
1296 return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
1297 (struct nix_aq_enq_rsp *)rsp);
1300 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
1301 struct hwctx_disable_req *req,
1302 struct msg_rsp *rsp)
1304 return nix_lf_hwctx_disable(rvu, req);
1307 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
1308 struct nix_lf_alloc_req *req,
1309 struct nix_lf_alloc_rsp *rsp)
1311 int nixlf, qints, hwctx_size, intf, err, rc = 0;
1312 struct rvu_hwinfo *hw = rvu->hw;
1313 u16 pcifunc = req->hdr.pcifunc;
1314 struct rvu_block *block;
1315 struct rvu_pfvf *pfvf;
1319 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
1320 return NIX_AF_ERR_PARAM;
1323 req->way_mask &= 0xFFFF;
1325 pfvf = rvu_get_pfvf(rvu, pcifunc);
1326 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1327 if (!pfvf->nixlf || blkaddr < 0)
1328 return NIX_AF_ERR_AF_LF_INVALID;
1330 block = &hw->block[blkaddr];
1331 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1333 return NIX_AF_ERR_AF_LF_INVALID;
1335 /* Check if requested 'NIXLF <=> NPALF' mapping is valid */
1336 if (req->npa_func) {
1337 /* If default, use 'this' NIXLF's PFFUNC */
1338 if (req->npa_func == RVU_DEFAULT_PF_FUNC)
1339 req->npa_func = pcifunc;
1340 if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
1341 return NIX_AF_INVAL_NPA_PF_FUNC;
1344 /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
1345 if (req->sso_func) {
1346 /* If default, use 'this' NIXLF's PFFUNC */
1347 if (req->sso_func == RVU_DEFAULT_PF_FUNC)
1348 req->sso_func = pcifunc;
1349 if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
1350 return NIX_AF_INVAL_SSO_PF_FUNC;
1353 /* If RSS is being enabled, check if requested config is valid.
1354 * RSS table size should be power of two, otherwise
1355 * RSS_GRP::OFFSET + adder might go beyond that group or
1356 * won't be able to use entire table.
1358 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
1359 !is_power_of_2(req->rss_sz)))
1360 return NIX_AF_ERR_RSS_SIZE_INVALID;
1363 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
1364 return NIX_AF_ERR_RSS_GRPS_INVALID;
1366 /* Reset this NIX LF */
1367 err = rvu_lf_reset(rvu, block, nixlf);
1369 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1370 block->addr - BLKADDR_NIX0, nixlf);
1371 return NIX_AF_ERR_LF_RESET;
1374 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
1376 /* Alloc NIX RQ HW context memory and config the base */
1377 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
1378 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
1382 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
1386 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
1387 (u64)pfvf->rq_ctx->iova);
1389 /* Set caching and queue count in HW */
1390 cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
1391 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
1393 /* Alloc NIX SQ HW context memory and config the base */
1394 hwctx_size = 1UL << (ctx_cfg & 0xF);
1395 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
1399 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
1403 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
1404 (u64)pfvf->sq_ctx->iova);
1406 cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
1407 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
1409 /* Alloc NIX CQ HW context memory and config the base */
1410 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
1411 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
1415 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
1419 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
1420 (u64)pfvf->cq_ctx->iova);
1422 cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
1423 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
1425 /* Initialize receive side scaling (RSS) */
1426 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
1427 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
1428 req->rss_grps, hwctx_size, req->way_mask,
1429 !!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER));
1433 /* Alloc memory for CQINT's HW contexts */
1434 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1435 qints = (cfg >> 24) & 0xFFF;
1436 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
1437 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
1441 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
1442 (u64)pfvf->cq_ints_ctx->iova);
1444 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
1445 BIT_ULL(36) | req->way_mask << 20);
1447 /* Alloc memory for QINT's HW contexts */
1448 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1449 qints = (cfg >> 12) & 0xFFF;
1450 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
1451 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
1455 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
1456 (u64)pfvf->nix_qints_ctx->iova);
1457 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
1458 BIT_ULL(36) | req->way_mask << 20);
1460 /* Setup VLANX TPID's.
1461 * Use VLAN1 for 802.1Q
1462 * and VLAN0 for 802.1AD.
1464 cfg = (0x8100ULL << 16) | 0x88A8ULL;
1465 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
1467 /* Enable LMTST for this NIX LF */
1468 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
1470 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
1472 cfg = req->npa_func;
1474 cfg |= (u64)req->sso_func << 16;
1476 cfg |= (u64)req->xqe_sz << 33;
1477 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
1479 /* Config Rx pkt length, csum checks and apad enable / disable */
1480 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
1482 /* Configure pkind for TX parse config */
1483 cfg = NPC_TX_DEF_PKIND;
1484 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
1486 intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
1487 if (is_sdp_pfvf(pcifunc))
1488 intf = NIX_INTF_TYPE_SDP;
1490 err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp,
1491 !!(req->flags & NIX_LF_LBK_BLK_SEL));
1495 /* Disable NPC entries as NIXLF's contexts are not initialized yet */
1496 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
1498 /* Configure RX VTAG Type 7 (strip) for vf vlan */
1499 rvu_write64(rvu, blkaddr,
1500 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
1501 VTAGSIZE_T4 | VTAG_STRIP);
1506 nix_ctx_free(rvu, pfvf);
1510 /* Set macaddr of this PF/VF */
1511 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
1513 /* set SQB size info */
1514 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
1515 rsp->sqb_size = (cfg >> 34) & 0xFFFF;
1516 rsp->rx_chan_base = pfvf->rx_chan_base;
1517 rsp->tx_chan_base = pfvf->tx_chan_base;
1518 rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
1519 rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
1520 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
1521 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
1522 /* Get HW supported stat count */
1523 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1524 rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
1525 rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
1526 /* Get count of CQ IRQs and error IRQs supported per LF */
1527 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1528 rsp->qints = ((cfg >> 12) & 0xFFF);
1529 rsp->cints = ((cfg >> 24) & 0xFFF);
1530 rsp->cgx_links = hw->cgx_links;
1531 rsp->lbk_links = hw->lbk_links;
1532 rsp->sdp_links = hw->sdp_links;
1537 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
1538 struct msg_rsp *rsp)
1540 struct rvu_hwinfo *hw = rvu->hw;
1541 u16 pcifunc = req->hdr.pcifunc;
1542 struct rvu_block *block;
1543 int blkaddr, nixlf, err;
1544 struct rvu_pfvf *pfvf;
1546 pfvf = rvu_get_pfvf(rvu, pcifunc);
1547 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1548 if (!pfvf->nixlf || blkaddr < 0)
1549 return NIX_AF_ERR_AF_LF_INVALID;
1551 block = &hw->block[blkaddr];
1552 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1554 return NIX_AF_ERR_AF_LF_INVALID;
1556 if (req->flags & NIX_LF_DISABLE_FLOWS)
1557 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
1559 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
1561 /* Free any tx vtag def entries used by this NIX LF */
1562 if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
1563 nix_free_tx_vtag_entries(rvu, pcifunc);
1565 nix_interface_deinit(rvu, pcifunc, nixlf);
1567 /* Reset this NIX LF */
1568 err = rvu_lf_reset(rvu, block, nixlf);
1570 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1571 block->addr - BLKADDR_NIX0, nixlf);
1572 return NIX_AF_ERR_LF_RESET;
1575 nix_ctx_free(rvu, pfvf);
1580 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
1581 struct nix_mark_format_cfg *req,
1582 struct nix_mark_format_cfg_rsp *rsp)
1584 u16 pcifunc = req->hdr.pcifunc;
1585 struct nix_hw *nix_hw;
1586 struct rvu_pfvf *pfvf;
1590 pfvf = rvu_get_pfvf(rvu, pcifunc);
1591 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1592 if (!pfvf->nixlf || blkaddr < 0)
1593 return NIX_AF_ERR_AF_LF_INVALID;
1595 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1597 return NIX_AF_ERR_INVALID_NIXBLK;
1599 cfg = (((u32)req->offset & 0x7) << 16) |
1600 (((u32)req->y_mask & 0xF) << 12) |
1601 (((u32)req->y_val & 0xF) << 8) |
1602 (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
1604 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
1606 dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
1607 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1608 return NIX_AF_ERR_MARK_CFG_FAIL;
1611 rsp->mark_format_idx = rc;
1615 /* Handle shaper update specially for few revisions */
1617 handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf,
1618 int lvl, u64 reg, u64 regval)
1620 u64 regbase, oldval, sw_xoff = 0;
1621 u64 dbgval, md_debug0 = 0;
1622 unsigned long poll_tmo;
1626 regbase = reg & 0xFFFF;
1627 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1629 /* Check for rate register */
1631 case NIX_TXSCH_LVL_TL1:
1632 md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq);
1633 sw_xoff = NIX_AF_TL1X_SW_XOFF(schq);
1635 rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0));
1637 case NIX_TXSCH_LVL_TL2:
1638 md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq);
1639 sw_xoff = NIX_AF_TL2X_SW_XOFF(schq);
1641 rate_reg = (regbase == NIX_AF_TL2X_CIR(0) ||
1642 regbase == NIX_AF_TL2X_PIR(0));
1644 case NIX_TXSCH_LVL_TL3:
1645 md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq);
1646 sw_xoff = NIX_AF_TL3X_SW_XOFF(schq);
1648 rate_reg = (regbase == NIX_AF_TL3X_CIR(0) ||
1649 regbase == NIX_AF_TL3X_PIR(0));
1651 case NIX_TXSCH_LVL_TL4:
1652 md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq);
1653 sw_xoff = NIX_AF_TL4X_SW_XOFF(schq);
1655 rate_reg = (regbase == NIX_AF_TL4X_CIR(0) ||
1656 regbase == NIX_AF_TL4X_PIR(0));
1658 case NIX_TXSCH_LVL_MDQ:
1659 sw_xoff = NIX_AF_MDQX_SW_XOFF(schq);
1660 rate_reg = (regbase == NIX_AF_MDQX_CIR(0) ||
1661 regbase == NIX_AF_MDQX_PIR(0));
1668 /* Nothing special to do when state is not toggled */
1669 oldval = rvu_read64(rvu, blkaddr, reg);
1670 if ((oldval & 0x1) == (regval & 0x1)) {
1671 rvu_write64(rvu, blkaddr, reg, regval);
1675 /* PIR/CIR disable */
1676 if (!(regval & 0x1)) {
1677 rvu_write64(rvu, blkaddr, sw_xoff, 1);
1678 rvu_write64(rvu, blkaddr, reg, 0);
1680 rvu_write64(rvu, blkaddr, sw_xoff, 0);
1684 /* PIR/CIR enable */
1685 rvu_write64(rvu, blkaddr, sw_xoff, 1);
1687 poll_tmo = jiffies + usecs_to_jiffies(10000);
1688 /* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */
1690 if (time_after(jiffies, poll_tmo)) {
1692 "NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n",
1697 dbgval = rvu_read64(rvu, blkaddr, md_debug0);
1698 } while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48)));
1700 rvu_write64(rvu, blkaddr, reg, regval);
1702 rvu_write64(rvu, blkaddr, sw_xoff, 0);
1706 static void nix_reset_tx_schedule(struct rvu *rvu, int blkaddr,
1709 u64 tlx_parent = 0, tlx_schedule = 0;
1712 case NIX_TXSCH_LVL_TL2:
1713 tlx_parent = NIX_AF_TL2X_PARENT(schq);
1714 tlx_schedule = NIX_AF_TL2X_SCHEDULE(schq);
1716 case NIX_TXSCH_LVL_TL3:
1717 tlx_parent = NIX_AF_TL3X_PARENT(schq);
1718 tlx_schedule = NIX_AF_TL3X_SCHEDULE(schq);
1720 case NIX_TXSCH_LVL_TL4:
1721 tlx_parent = NIX_AF_TL4X_PARENT(schq);
1722 tlx_schedule = NIX_AF_TL4X_SCHEDULE(schq);
1724 case NIX_TXSCH_LVL_MDQ:
1725 /* no need to reset SMQ_CFG as HW clears this CSR
1728 tlx_parent = NIX_AF_MDQX_PARENT(schq);
1729 tlx_schedule = NIX_AF_MDQX_SCHEDULE(schq);
1736 rvu_write64(rvu, blkaddr, tlx_parent, 0x0);
1739 rvu_write64(rvu, blkaddr, tlx_schedule, 0x0);
1742 /* Disable shaping of pkts by a scheduler queue
1743 * at a given scheduler level.
1745 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
1746 int nixlf, int lvl, int schq)
1748 struct rvu_hwinfo *hw = rvu->hw;
1749 u64 cir_reg = 0, pir_reg = 0;
1753 case NIX_TXSCH_LVL_TL1:
1754 cir_reg = NIX_AF_TL1X_CIR(schq);
1755 pir_reg = 0; /* PIR not available at TL1 */
1757 case NIX_TXSCH_LVL_TL2:
1758 cir_reg = NIX_AF_TL2X_CIR(schq);
1759 pir_reg = NIX_AF_TL2X_PIR(schq);
1761 case NIX_TXSCH_LVL_TL3:
1762 cir_reg = NIX_AF_TL3X_CIR(schq);
1763 pir_reg = NIX_AF_TL3X_PIR(schq);
1765 case NIX_TXSCH_LVL_TL4:
1766 cir_reg = NIX_AF_TL4X_CIR(schq);
1767 pir_reg = NIX_AF_TL4X_PIR(schq);
1769 case NIX_TXSCH_LVL_MDQ:
1770 cir_reg = NIX_AF_MDQX_CIR(schq);
1771 pir_reg = NIX_AF_MDQX_PIR(schq);
1775 /* Shaper state toggle needs wait/poll */
1776 if (hw->cap.nix_shaper_toggle_wait) {
1778 handle_txschq_shaper_update(rvu, blkaddr, nixlf,
1781 handle_txschq_shaper_update(rvu, blkaddr, nixlf,
1788 cfg = rvu_read64(rvu, blkaddr, cir_reg);
1789 rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
1793 cfg = rvu_read64(rvu, blkaddr, pir_reg);
1794 rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
1797 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
1800 struct rvu_hwinfo *hw = rvu->hw;
1804 if (lvl >= hw->cap.nix_tx_aggr_lvl)
1807 /* Reset TL4's SDP link config */
1808 if (lvl == NIX_TXSCH_LVL_TL4)
1809 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
1811 link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
1812 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1813 if (lvl != link_level)
1816 /* Reset TL2's CGX or LBK link config */
1817 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
1818 rvu_write64(rvu, blkaddr,
1819 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
1822 static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr,
1825 struct rvu_hwinfo *hw = rvu->hw;
1828 /* Skip this if shaping is not supported */
1829 if (!hw->cap.nix_shaping)
1832 /* Clear level specific SW_XOFF */
1834 case NIX_TXSCH_LVL_TL1:
1835 reg = NIX_AF_TL1X_SW_XOFF(schq);
1837 case NIX_TXSCH_LVL_TL2:
1838 reg = NIX_AF_TL2X_SW_XOFF(schq);
1840 case NIX_TXSCH_LVL_TL3:
1841 reg = NIX_AF_TL3X_SW_XOFF(schq);
1843 case NIX_TXSCH_LVL_TL4:
1844 reg = NIX_AF_TL4X_SW_XOFF(schq);
1846 case NIX_TXSCH_LVL_MDQ:
1847 reg = NIX_AF_MDQX_SW_XOFF(schq);
1853 rvu_write64(rvu, blkaddr, reg, 0x0);
1856 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
1858 struct rvu_hwinfo *hw = rvu->hw;
1859 int pf = rvu_get_pf(pcifunc);
1860 u8 cgx_id = 0, lmac_id = 0;
1862 if (is_afvf(pcifunc)) {/* LBK links */
1863 return hw->cgx_links;
1864 } else if (is_pf_cgxmapped(rvu, pf)) {
1865 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1866 return (cgx_id * hw->lmac_per_cgx) + lmac_id;
1870 return hw->cgx_links + hw->lbk_links;
1873 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
1874 int link, int *start, int *end)
1876 struct rvu_hwinfo *hw = rvu->hw;
1877 int pf = rvu_get_pf(pcifunc);
1879 if (is_afvf(pcifunc)) { /* LBK links */
1880 *start = hw->cap.nix_txsch_per_cgx_lmac * link;
1881 *end = *start + hw->cap.nix_txsch_per_lbk_lmac;
1882 } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
1883 *start = hw->cap.nix_txsch_per_cgx_lmac * link;
1884 *end = *start + hw->cap.nix_txsch_per_cgx_lmac;
1885 } else { /* SDP link */
1886 *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
1887 (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
1888 *end = *start + hw->cap.nix_txsch_per_sdp_lmac;
1892 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
1893 struct nix_hw *nix_hw,
1894 struct nix_txsch_alloc_req *req)
1896 struct rvu_hwinfo *hw = rvu->hw;
1897 int schq, req_schq, free_cnt;
1898 struct nix_txsch *txsch;
1899 int link, start, end;
1901 txsch = &nix_hw->txsch[lvl];
1902 req_schq = req->schq_contig[lvl] + req->schq[lvl];
1907 link = nix_get_tx_link(rvu, pcifunc);
1909 /* For traffic aggregating scheduler level, one queue is enough */
1910 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1912 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1916 /* Get free SCHQ count and check if request can be accomodated */
1917 if (hw->cap.nix_fixed_txschq_mapping) {
1918 nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1919 schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
1920 if (end <= txsch->schq.max && schq < end &&
1921 !test_bit(schq, txsch->schq.bmap))
1926 free_cnt = rvu_rsrc_free_count(&txsch->schq);
1929 if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC ||
1930 req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC)
1931 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1933 /* If contiguous queues are needed, check for availability */
1934 if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
1935 !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
1936 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1941 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
1942 struct nix_txsch_alloc_rsp *rsp,
1943 int lvl, int start, int end)
1945 struct rvu_hwinfo *hw = rvu->hw;
1946 u16 pcifunc = rsp->hdr.pcifunc;
1949 /* For traffic aggregating levels, queue alloc is based
1950 * on transmit link to which PF_FUNC is mapped to.
1952 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1953 /* A single TL queue is allocated */
1954 if (rsp->schq_contig[lvl]) {
1955 rsp->schq_contig[lvl] = 1;
1956 rsp->schq_contig_list[lvl][0] = start;
1959 /* Both contig and non-contig reqs doesn't make sense here */
1960 if (rsp->schq_contig[lvl])
1963 if (rsp->schq[lvl]) {
1965 rsp->schq_list[lvl][0] = start;
1970 /* Adjust the queue request count if HW supports
1971 * only one queue per level configuration.
1973 if (hw->cap.nix_fixed_txschq_mapping) {
1974 idx = pcifunc & RVU_PFVF_FUNC_MASK;
1976 if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
1977 rsp->schq_contig[lvl] = 0;
1982 if (rsp->schq_contig[lvl]) {
1983 rsp->schq_contig[lvl] = 1;
1984 set_bit(schq, txsch->schq.bmap);
1985 rsp->schq_contig_list[lvl][0] = schq;
1987 } else if (rsp->schq[lvl]) {
1989 set_bit(schq, txsch->schq.bmap);
1990 rsp->schq_list[lvl][0] = schq;
1995 /* Allocate contiguous queue indices requesty first */
1996 if (rsp->schq_contig[lvl]) {
1997 schq = bitmap_find_next_zero_area(txsch->schq.bmap,
1998 txsch->schq.max, start,
1999 rsp->schq_contig[lvl], 0);
2001 rsp->schq_contig[lvl] = 0;
2002 for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
2003 set_bit(schq, txsch->schq.bmap);
2004 rsp->schq_contig_list[lvl][idx] = schq;
2009 /* Allocate non-contiguous queue indices */
2010 if (rsp->schq[lvl]) {
2012 for (schq = start; schq < end; schq++) {
2013 if (!test_bit(schq, txsch->schq.bmap)) {
2014 set_bit(schq, txsch->schq.bmap);
2015 rsp->schq_list[lvl][idx++] = schq;
2017 if (idx == rsp->schq[lvl])
2020 /* Update how many were allocated */
2021 rsp->schq[lvl] = idx;
2025 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
2026 struct nix_txsch_alloc_req *req,
2027 struct nix_txsch_alloc_rsp *rsp)
2029 struct rvu_hwinfo *hw = rvu->hw;
2030 u16 pcifunc = req->hdr.pcifunc;
2031 int link, blkaddr, rc = 0;
2032 int lvl, idx, start, end;
2033 struct nix_txsch *txsch;
2034 struct nix_hw *nix_hw;
2039 rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2043 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2045 return NIX_AF_ERR_INVALID_NIXBLK;
2047 mutex_lock(&rvu->rsrc_lock);
2049 /* Check if request is valid as per HW capabilities
2050 * and can be accomodated.
2052 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2053 rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
2058 /* Allocate requested Tx scheduler queues */
2059 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2060 txsch = &nix_hw->txsch[lvl];
2061 pfvf_map = txsch->pfvf_map;
2063 if (!req->schq[lvl] && !req->schq_contig[lvl])
2066 rsp->schq[lvl] = req->schq[lvl];
2067 rsp->schq_contig[lvl] = req->schq_contig[lvl];
2069 link = nix_get_tx_link(rvu, pcifunc);
2071 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
2074 } else if (hw->cap.nix_fixed_txschq_mapping) {
2075 nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
2078 end = txsch->schq.max;
2081 nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
2083 /* Reset queue config */
2084 for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
2085 schq = rsp->schq_contig_list[lvl][idx];
2086 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
2087 NIX_TXSCHQ_CFG_DONE))
2088 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
2089 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2090 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2091 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2094 for (idx = 0; idx < req->schq[lvl]; idx++) {
2095 schq = rsp->schq_list[lvl][idx];
2096 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
2097 NIX_TXSCHQ_CFG_DONE))
2098 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
2099 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2100 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2101 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2105 rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
2106 rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
2107 rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
2108 NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
2109 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
2112 rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
2114 mutex_unlock(&rvu->rsrc_lock);
2118 static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq,
2119 struct nix_smq_flush_ctx *smq_flush_ctx)
2121 struct nix_smq_tree_ctx *smq_tree_ctx;
2122 u64 parent_off, regval;
2126 smq_flush_ctx->smq = smq;
2129 for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
2130 smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
2131 if (lvl == NIX_TXSCH_LVL_TL1) {
2132 smq_flush_ctx->tl1_schq = schq;
2133 smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq);
2134 smq_tree_ctx->pir_off = 0;
2135 smq_tree_ctx->pir_val = 0;
2137 } else if (lvl == NIX_TXSCH_LVL_TL2) {
2138 smq_flush_ctx->tl2_schq = schq;
2139 smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq);
2140 smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq);
2141 parent_off = NIX_AF_TL2X_PARENT(schq);
2142 } else if (lvl == NIX_TXSCH_LVL_TL3) {
2143 smq_tree_ctx->cir_off = NIX_AF_TL3X_CIR(schq);
2144 smq_tree_ctx->pir_off = NIX_AF_TL3X_PIR(schq);
2145 parent_off = NIX_AF_TL3X_PARENT(schq);
2146 } else if (lvl == NIX_TXSCH_LVL_TL4) {
2147 smq_tree_ctx->cir_off = NIX_AF_TL4X_CIR(schq);
2148 smq_tree_ctx->pir_off = NIX_AF_TL4X_PIR(schq);
2149 parent_off = NIX_AF_TL4X_PARENT(schq);
2150 } else if (lvl == NIX_TXSCH_LVL_MDQ) {
2151 smq_tree_ctx->cir_off = NIX_AF_MDQX_CIR(schq);
2152 smq_tree_ctx->pir_off = NIX_AF_MDQX_PIR(schq);
2153 parent_off = NIX_AF_MDQX_PARENT(schq);
2155 /* save cir/pir register values */
2156 smq_tree_ctx->cir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->cir_off);
2157 if (smq_tree_ctx->pir_off)
2158 smq_tree_ctx->pir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->pir_off);
2160 /* get parent txsch node */
2162 regval = rvu_read64(rvu, blkaddr, parent_off);
2163 schq = (regval >> 16) & 0x1FF;
2168 static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
2169 struct nix_smq_flush_ctx *smq_flush_ctx, bool enable)
2171 struct nix_txsch *txsch;
2172 struct nix_hw *nix_hw;
2176 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2180 /* loop through all TL2s with matching PF_FUNC */
2181 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
2182 for (tl2 = 0; tl2 < txsch->schq.max; tl2++) {
2183 /* skip the smq(flush) TL2 */
2184 if (tl2 == smq_flush_ctx->tl2_schq)
2186 /* skip unused TL2s */
2187 if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE)
2189 /* skip if PF_FUNC doesn't match */
2190 if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) !=
2191 (TXSCH_MAP_FUNC(txsch->pfvf_map[smq_flush_ctx->tl2_schq] &
2192 ~RVU_PFVF_FUNC_MASK)))
2194 /* enable/disable XOFF */
2195 regoff = NIX_AF_TL2X_SW_XOFF(tl2);
2197 rvu_write64(rvu, blkaddr, regoff, 0x1);
2199 rvu_write64(rvu, blkaddr, regoff, 0x0);
2203 static void nix_smq_flush_enadis_rate(struct rvu *rvu, int blkaddr,
2204 struct nix_smq_flush_ctx *smq_flush_ctx, bool enable)
2206 u64 cir_off, pir_off, cir_val, pir_val;
2207 struct nix_smq_tree_ctx *smq_tree_ctx;
2210 for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
2211 smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
2212 cir_off = smq_tree_ctx->cir_off;
2213 cir_val = smq_tree_ctx->cir_val;
2214 pir_off = smq_tree_ctx->pir_off;
2215 pir_val = smq_tree_ctx->pir_val;
2218 rvu_write64(rvu, blkaddr, cir_off, cir_val);
2219 if (lvl != NIX_TXSCH_LVL_TL1)
2220 rvu_write64(rvu, blkaddr, pir_off, pir_val);
2222 rvu_write64(rvu, blkaddr, cir_off, 0x0);
2223 if (lvl != NIX_TXSCH_LVL_TL1)
2224 rvu_write64(rvu, blkaddr, pir_off, 0x0);
2229 static int nix_smq_flush(struct rvu *rvu, int blkaddr,
2230 int smq, u16 pcifunc, int nixlf)
2232 struct nix_smq_flush_ctx *smq_flush_ctx;
2233 int pf = rvu_get_pf(pcifunc);
2234 u8 cgx_id = 0, lmac_id = 0;
2235 int err, restore_tx_en = 0;
2238 if (!is_rvu_otx2(rvu)) {
2239 /* Skip SMQ flush if pkt count is zero */
2240 cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_IN_MD_COUNT(smq));
2245 /* enable cgx tx if disabled */
2246 if (is_pf_cgxmapped(rvu, pf)) {
2247 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
2248 restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
2252 /* XOFF all TL2s whose parent TL1 matches SMQ tree TL1 */
2253 smq_flush_ctx = kzalloc(sizeof(*smq_flush_ctx), GFP_KERNEL);
2256 nix_smq_flush_fill_ctx(rvu, blkaddr, smq, smq_flush_ctx);
2257 nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true);
2258 nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false);
2260 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
2261 /* Do SMQ flush and set enqueue xoff */
2262 cfg |= BIT_ULL(50) | BIT_ULL(49);
2263 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
2265 /* Disable backpressure from physical link,
2266 * otherwise SMQ flush may stall.
2268 rvu_cgx_enadis_rx_bp(rvu, pf, false);
2270 /* Wait for flush to complete */
2271 err = rvu_poll_reg(rvu, blkaddr,
2272 NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
2275 "NIXLF%d: SMQ%d flush failed, txlink might be busy\n",
2278 /* clear XOFF on TL2s */
2279 nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true);
2280 nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false);
2281 kfree(smq_flush_ctx);
2283 rvu_cgx_enadis_rx_bp(rvu, pf, true);
2284 /* restore cgx tx state */
2286 rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
2290 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
2292 int blkaddr, nixlf, lvl, schq, err;
2293 struct rvu_hwinfo *hw = rvu->hw;
2294 struct nix_txsch *txsch;
2295 struct nix_hw *nix_hw;
2298 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2300 return NIX_AF_ERR_AF_LF_INVALID;
2302 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2304 return NIX_AF_ERR_INVALID_NIXBLK;
2306 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2308 return NIX_AF_ERR_AF_LF_INVALID;
2310 /* Disable TL2/3 queue links and all XOFF's before SMQ flush*/
2311 mutex_lock(&rvu->rsrc_lock);
2312 for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2313 txsch = &nix_hw->txsch[lvl];
2315 if (lvl >= hw->cap.nix_tx_aggr_lvl)
2318 for (schq = 0; schq < txsch->schq.max; schq++) {
2319 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2321 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2322 nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
2323 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2326 nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1,
2327 nix_get_tx_link(rvu, pcifunc));
2329 /* On PF cleanup, clear cfg done flag as
2330 * PF would have changed default config.
2332 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
2333 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1];
2334 schq = nix_get_tx_link(rvu, pcifunc);
2335 /* Do not clear pcifunc in txsch->pfvf_map[schq] because
2336 * VF might be using this TL1 queue
2338 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
2339 txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0);
2343 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
2344 for (schq = 0; schq < txsch->schq.max; schq++) {
2345 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2347 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
2350 /* Now free scheduler queues to free pool */
2351 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2352 /* TLs above aggregation level are shared across all PF
2353 * and it's VFs, hence skip freeing them.
2355 if (lvl >= hw->cap.nix_tx_aggr_lvl)
2358 txsch = &nix_hw->txsch[lvl];
2359 for (schq = 0; schq < txsch->schq.max; schq++) {
2360 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2362 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2363 rvu_free_rsrc(&txsch->schq, schq);
2364 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2367 mutex_unlock(&rvu->rsrc_lock);
2369 /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
2370 rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
2371 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
2373 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
2378 static int nix_txschq_free_one(struct rvu *rvu,
2379 struct nix_txsch_free_req *req)
2381 struct rvu_hwinfo *hw = rvu->hw;
2382 u16 pcifunc = req->hdr.pcifunc;
2383 int lvl, schq, nixlf, blkaddr;
2384 struct nix_txsch *txsch;
2385 struct nix_hw *nix_hw;
2389 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2391 return NIX_AF_ERR_AF_LF_INVALID;
2393 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2395 return NIX_AF_ERR_INVALID_NIXBLK;
2397 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2399 return NIX_AF_ERR_AF_LF_INVALID;
2401 lvl = req->schq_lvl;
2403 txsch = &nix_hw->txsch[lvl];
2405 if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
2408 pfvf_map = txsch->pfvf_map;
2409 mutex_lock(&rvu->rsrc_lock);
2411 if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
2412 rc = NIX_AF_ERR_TLX_INVALID;
2416 /* Clear SW_XOFF of this resource only.
2417 * For SMQ level, all path XOFF's
2418 * need to be made clear by user
2420 nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
2422 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2423 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2425 /* Flush if it is a SMQ. Onus of disabling
2426 * TL2/3 queue links before SMQ flush is on user
2428 if (lvl == NIX_TXSCH_LVL_SMQ &&
2429 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf)) {
2430 rc = NIX_AF_SMQ_FLUSH_FAILED;
2434 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2436 /* Free the resource */
2437 rvu_free_rsrc(&txsch->schq, schq);
2438 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2439 mutex_unlock(&rvu->rsrc_lock);
2442 mutex_unlock(&rvu->rsrc_lock);
2446 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
2447 struct nix_txsch_free_req *req,
2448 struct msg_rsp *rsp)
2450 if (req->flags & TXSCHQ_FREE_ALL)
2451 return nix_txschq_free(rvu, req->hdr.pcifunc);
2453 return nix_txschq_free_one(rvu, req);
2456 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
2457 int lvl, u64 reg, u64 regval)
2459 u64 regbase = reg & 0xFFFF;
2462 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
2465 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2466 /* Check if this schq belongs to this PF/VF or not */
2467 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
2470 parent = (regval >> 16) & 0x1FF;
2471 /* Validate MDQ's TL4 parent */
2472 if (regbase == NIX_AF_MDQX_PARENT(0) &&
2473 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
2476 /* Validate TL4's TL3 parent */
2477 if (regbase == NIX_AF_TL4X_PARENT(0) &&
2478 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
2481 /* Validate TL3's TL2 parent */
2482 if (regbase == NIX_AF_TL3X_PARENT(0) &&
2483 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
2486 /* Validate TL2's TL1 parent */
2487 if (regbase == NIX_AF_TL2X_PARENT(0) &&
2488 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
2494 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
2498 if (hw->cap.nix_shaping)
2501 /* If shaping and coloring is not supported, then
2502 * *_CIR and *_PIR registers should not be configured.
2504 regbase = reg & 0xFFFF;
2507 case NIX_TXSCH_LVL_TL1:
2508 if (regbase == NIX_AF_TL1X_CIR(0))
2511 case NIX_TXSCH_LVL_TL2:
2512 if (regbase == NIX_AF_TL2X_CIR(0) ||
2513 regbase == NIX_AF_TL2X_PIR(0))
2516 case NIX_TXSCH_LVL_TL3:
2517 if (regbase == NIX_AF_TL3X_CIR(0) ||
2518 regbase == NIX_AF_TL3X_PIR(0))
2521 case NIX_TXSCH_LVL_TL4:
2522 if (regbase == NIX_AF_TL4X_CIR(0) ||
2523 regbase == NIX_AF_TL4X_PIR(0))
2526 case NIX_TXSCH_LVL_MDQ:
2527 if (regbase == NIX_AF_MDQX_CIR(0) ||
2528 regbase == NIX_AF_MDQX_PIR(0))
2535 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
2536 u16 pcifunc, int blkaddr)
2541 schq = nix_get_tx_link(rvu, pcifunc);
2542 pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
2543 /* Skip if PF has already done the config */
2544 if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
2546 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
2547 (TXSCH_TL1_DFLT_RR_PRIO << 1));
2549 /* On OcteonTx2 the config was in bytes and newer silcons
2550 * it's changed to weight.
2552 if (!rvu->hw->cap.nix_common_dwrr_mtu)
2553 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
2554 TXSCH_TL1_DFLT_RR_QTM);
2556 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
2557 CN10K_MAX_DWRR_WEIGHT);
2559 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
2560 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
2563 /* Register offset - [15:0]
2564 * Scheduler Queue number - [25:16]
2566 #define NIX_TX_SCHQ_MASK GENMASK_ULL(25, 0)
2568 static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw,
2569 int blkaddr, struct nix_txschq_config *req,
2570 struct nix_txschq_config *rsp)
2572 u16 pcifunc = req->hdr.pcifunc;
2576 for (idx = 0; idx < req->num_regs; idx++) {
2577 reg = req->reg[idx];
2578 reg &= NIX_TX_SCHQ_MASK;
2579 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2580 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, req->lvl, reg) ||
2581 !is_valid_txschq(rvu, blkaddr, req->lvl, pcifunc, schq))
2582 return NIX_AF_INVAL_TXSCHQ_CFG;
2583 rsp->regval[idx] = rvu_read64(rvu, blkaddr, reg);
2585 rsp->lvl = req->lvl;
2586 rsp->num_regs = req->num_regs;
2590 void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc,
2591 struct nix_txsch *txsch, bool enable)
2593 struct rvu_hwinfo *hw = rvu->hw;
2594 int lbk_link_start, lbk_links;
2595 u8 pf = rvu_get_pf(pcifunc);
2599 if (!is_pf_cgxmapped(rvu, pf))
2602 cfg = enable ? (BIT_ULL(12) | RVU_SWITCH_LBK_CHAN) : 0;
2603 lbk_link_start = hw->cgx_links;
2605 for (schq = 0; schq < txsch->schq.max; schq++) {
2606 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2608 /* Enable all LBK links with channel 63 by default so that
2609 * packets can be sent to LBK with a NPC TX MCAM rule
2611 lbk_links = hw->lbk_links;
2613 rvu_write64(rvu, blkaddr,
2614 NIX_AF_TL3_TL2X_LINKX_CFG(schq,
2620 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
2621 struct nix_txschq_config *req,
2622 struct nix_txschq_config *rsp)
2624 u64 reg, val, regval, schq_regbase, val_mask;
2625 struct rvu_hwinfo *hw = rvu->hw;
2626 u16 pcifunc = req->hdr.pcifunc;
2627 struct nix_txsch *txsch;
2628 struct nix_hw *nix_hw;
2629 int blkaddr, idx, err;
2633 if (req->lvl >= NIX_TXSCH_LVL_CNT ||
2634 req->num_regs > MAX_REGS_PER_MBOX_MSG)
2635 return NIX_AF_INVAL_TXSCHQ_CFG;
2637 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2641 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2643 return NIX_AF_ERR_INVALID_NIXBLK;
2646 return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp);
2648 txsch = &nix_hw->txsch[req->lvl];
2649 pfvf_map = txsch->pfvf_map;
2651 if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
2652 pcifunc & RVU_PFVF_FUNC_MASK) {
2653 mutex_lock(&rvu->rsrc_lock);
2654 if (req->lvl == NIX_TXSCH_LVL_TL1)
2655 nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
2656 mutex_unlock(&rvu->rsrc_lock);
2660 for (idx = 0; idx < req->num_regs; idx++) {
2661 reg = req->reg[idx];
2662 reg &= NIX_TX_SCHQ_MASK;
2663 regval = req->regval[idx];
2664 schq_regbase = reg & 0xFFFF;
2665 val_mask = req->regval_mask[idx];
2667 if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
2668 txsch->lvl, reg, regval))
2669 return NIX_AF_INVAL_TXSCHQ_CFG;
2671 /* Check if shaping and coloring is supported */
2672 if (!is_txschq_shaping_valid(hw, req->lvl, reg))
2675 val = rvu_read64(rvu, blkaddr, reg);
2676 regval = (val & val_mask) | (regval & ~val_mask);
2678 /* Handle shaping state toggle specially */
2679 if (hw->cap.nix_shaper_toggle_wait &&
2680 handle_txschq_shaper_update(rvu, blkaddr, nixlf,
2681 req->lvl, reg, regval))
2684 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */
2685 if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
2686 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
2688 regval &= ~(0x7FULL << 24);
2689 regval |= ((u64)nixlf << 24);
2692 /* Clear 'BP_ENA' config, if it's not allowed */
2693 if (!hw->cap.nix_tx_link_bp) {
2694 if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
2695 (schq_regbase & 0xFF00) ==
2696 NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
2697 regval &= ~BIT_ULL(13);
2700 /* Mark config as done for TL1 by PF */
2701 if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
2702 schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
2703 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2704 mutex_lock(&rvu->rsrc_lock);
2705 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
2706 NIX_TXSCHQ_CFG_DONE);
2707 mutex_unlock(&rvu->rsrc_lock);
2710 /* SMQ flush is special hence split register writes such
2711 * that flush first and write rest of the bits later.
2713 if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
2714 (regval & BIT_ULL(49))) {
2715 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2716 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
2717 regval &= ~BIT_ULL(49);
2719 rvu_write64(rvu, blkaddr, reg, regval);
2725 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
2726 struct nix_vtag_config *req)
2728 u64 regval = req->vtag_size;
2730 if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 ||
2731 req->vtag_size > VTAGSIZE_T8)
2734 /* RX VTAG Type 7 reserved for vf vlan */
2735 if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7)
2736 return NIX_AF_ERR_RX_VTAG_INUSE;
2738 if (req->rx.capture_vtag)
2739 regval |= BIT_ULL(5);
2740 if (req->rx.strip_vtag)
2741 regval |= BIT_ULL(4);
2743 rvu_write64(rvu, blkaddr,
2744 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
2748 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
2749 u16 pcifunc, int index)
2751 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2752 struct nix_txvlan *vlan;
2755 return NIX_AF_ERR_INVALID_NIXBLK;
2757 vlan = &nix_hw->txvlan;
2758 if (vlan->entry2pfvf_map[index] != pcifunc)
2759 return NIX_AF_ERR_PARAM;
2761 rvu_write64(rvu, blkaddr,
2762 NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull);
2763 rvu_write64(rvu, blkaddr,
2764 NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull);
2766 vlan->entry2pfvf_map[index] = 0;
2767 rvu_free_rsrc(&vlan->rsrc, index);
2772 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
2774 struct nix_txvlan *vlan;
2775 struct nix_hw *nix_hw;
2778 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2782 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2786 vlan = &nix_hw->txvlan;
2788 mutex_lock(&vlan->rsrc_lock);
2789 /* Scan all the entries and free the ones mapped to 'pcifunc' */
2790 for (index = 0; index < vlan->rsrc.max; index++) {
2791 if (vlan->entry2pfvf_map[index] == pcifunc)
2792 nix_tx_vtag_free(rvu, blkaddr, pcifunc, index);
2794 mutex_unlock(&vlan->rsrc_lock);
2797 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
2800 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2801 struct nix_txvlan *vlan;
2806 return NIX_AF_ERR_INVALID_NIXBLK;
2808 vlan = &nix_hw->txvlan;
2810 mutex_lock(&vlan->rsrc_lock);
2812 index = rvu_alloc_rsrc(&vlan->rsrc);
2814 mutex_unlock(&vlan->rsrc_lock);
2818 mutex_unlock(&vlan->rsrc_lock);
2820 regval = size ? vtag : vtag << 32;
2822 rvu_write64(rvu, blkaddr,
2823 NIX_AF_TX_VTAG_DEFX_DATA(index), regval);
2824 rvu_write64(rvu, blkaddr,
2825 NIX_AF_TX_VTAG_DEFX_CTL(index), size);
2830 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
2831 struct nix_vtag_config *req)
2833 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2834 u16 pcifunc = req->hdr.pcifunc;
2835 int idx0 = req->tx.vtag0_idx;
2836 int idx1 = req->tx.vtag1_idx;
2837 struct nix_txvlan *vlan;
2841 return NIX_AF_ERR_INVALID_NIXBLK;
2843 vlan = &nix_hw->txvlan;
2844 if (req->tx.free_vtag0 && req->tx.free_vtag1)
2845 if (vlan->entry2pfvf_map[idx0] != pcifunc ||
2846 vlan->entry2pfvf_map[idx1] != pcifunc)
2847 return NIX_AF_ERR_PARAM;
2849 mutex_lock(&vlan->rsrc_lock);
2851 if (req->tx.free_vtag0) {
2852 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0);
2857 if (req->tx.free_vtag1)
2858 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1);
2861 mutex_unlock(&vlan->rsrc_lock);
2865 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
2866 struct nix_vtag_config *req,
2867 struct nix_vtag_config_rsp *rsp)
2869 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2870 struct nix_txvlan *vlan;
2871 u16 pcifunc = req->hdr.pcifunc;
2874 return NIX_AF_ERR_INVALID_NIXBLK;
2876 vlan = &nix_hw->txvlan;
2877 if (req->tx.cfg_vtag0) {
2879 nix_tx_vtag_alloc(rvu, blkaddr,
2880 req->tx.vtag0, req->vtag_size);
2882 if (rsp->vtag0_idx < 0)
2883 return NIX_AF_ERR_TX_VTAG_NOSPC;
2885 vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc;
2888 if (req->tx.cfg_vtag1) {
2890 nix_tx_vtag_alloc(rvu, blkaddr,
2891 req->tx.vtag1, req->vtag_size);
2893 if (rsp->vtag1_idx < 0)
2896 vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc;
2902 if (req->tx.cfg_vtag0)
2903 nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx);
2905 return NIX_AF_ERR_TX_VTAG_NOSPC;
2908 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
2909 struct nix_vtag_config *req,
2910 struct nix_vtag_config_rsp *rsp)
2912 u16 pcifunc = req->hdr.pcifunc;
2913 int blkaddr, nixlf, err;
2915 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2919 if (req->cfg_type) {
2920 /* rx vtag configuration */
2921 err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
2923 return NIX_AF_ERR_PARAM;
2925 /* tx vtag configuration */
2926 if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) &&
2927 (req->tx.free_vtag0 || req->tx.free_vtag1))
2928 return NIX_AF_ERR_PARAM;
2930 if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1)
2931 return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp);
2933 if (req->tx.free_vtag0 || req->tx.free_vtag1)
2934 return nix_tx_vtag_decfg(rvu, blkaddr, req);
2940 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
2941 int mce, u8 op, u16 pcifunc, int next, bool eol)
2943 struct nix_aq_enq_req aq_req;
2946 aq_req.hdr.pcifunc = 0;
2947 aq_req.ctype = NIX_AQ_CTYPE_MCE;
2951 /* Use RSS with RSS index 0 */
2953 aq_req.mce.index = 0;
2954 aq_req.mce.eol = eol;
2955 aq_req.mce.pf_func = pcifunc;
2956 aq_req.mce.next = next;
2958 /* All fields valid */
2959 *(u64 *)(&aq_req.mce_mask) = ~0ULL;
2961 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
2963 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
2964 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
2970 static int nix_update_mce_list_entry(struct nix_mce_list *mce_list,
2971 u16 pcifunc, bool add)
2973 struct mce *mce, *tail = NULL;
2974 bool delete = false;
2976 /* Scan through the current list */
2977 hlist_for_each_entry(mce, &mce_list->head, node) {
2978 /* If already exists, then delete */
2979 if (mce->pcifunc == pcifunc && !add) {
2982 } else if (mce->pcifunc == pcifunc && add) {
2983 /* entry already exists */
2990 hlist_del(&mce->node);
2999 /* Add a new one to the list, at the tail */
3000 mce = kzalloc(sizeof(*mce), GFP_KERNEL);
3003 mce->pcifunc = pcifunc;
3005 hlist_add_head(&mce->node, &mce_list->head);
3007 hlist_add_behind(&mce->node, &tail->node);
3012 int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
3013 struct nix_mce_list *mce_list,
3014 int mce_idx, int mcam_index, bool add)
3016 int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr;
3017 struct npc_mcam *mcam = &rvu->hw->mcam;
3018 struct nix_mcast *mcast;
3019 struct nix_hw *nix_hw;
3025 /* Get this PF/VF func's MCE index */
3026 idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
3028 if (idx > (mce_idx + mce_list->max)) {
3030 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
3031 __func__, idx, mce_list->max,
3032 pcifunc >> RVU_PFVF_PF_SHIFT);
3036 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
3040 mcast = &nix_hw->mcast;
3041 mutex_lock(&mcast->mce_lock);
3043 err = nix_update_mce_list_entry(mce_list, pcifunc, add);
3047 /* Disable MCAM entry in NPC */
3048 if (!mce_list->count) {
3049 npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3050 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false);
3054 /* Dump the updated list to HW */
3056 last_idx = idx + mce_list->count - 1;
3057 hlist_for_each_entry(mce, &mce_list->head, node) {
3062 /* EOL should be set in last MCE */
3063 err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
3064 mce->pcifunc, next_idx,
3065 (next_idx > last_idx) ? true : false);
3072 mutex_unlock(&mcast->mce_lock);
3076 void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
3077 struct nix_mce_list **mce_list, int *mce_idx)
3079 struct rvu_hwinfo *hw = rvu->hw;
3080 struct rvu_pfvf *pfvf;
3082 if (!hw->cap.nix_rx_multicast ||
3083 !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) {
3089 /* Get this PF/VF func's MCE index */
3090 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
3092 if (type == NIXLF_BCAST_ENTRY) {
3093 *mce_list = &pfvf->bcast_mce_list;
3094 *mce_idx = pfvf->bcast_mce_idx;
3095 } else if (type == NIXLF_ALLMULTI_ENTRY) {
3096 *mce_list = &pfvf->mcast_mce_list;
3097 *mce_idx = pfvf->mcast_mce_idx;
3098 } else if (type == NIXLF_PROMISC_ENTRY) {
3099 *mce_list = &pfvf->promisc_mce_list;
3100 *mce_idx = pfvf->promisc_mce_idx;
3107 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
3110 int err = 0, nixlf, blkaddr, mcam_index, mce_idx;
3111 struct npc_mcam *mcam = &rvu->hw->mcam;
3112 struct rvu_hwinfo *hw = rvu->hw;
3113 struct nix_mce_list *mce_list;
3116 /* skip multicast pkt replication for AF's VFs & SDP links */
3117 if (is_afvf(pcifunc) || is_sdp_pfvf(pcifunc))
3120 if (!hw->cap.nix_rx_multicast)
3123 pf = rvu_get_pf(pcifunc);
3124 if (!is_pf_cgxmapped(rvu, pf))
3127 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3131 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
3135 nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
3137 mcam_index = npc_get_nixlf_mcam_index(mcam,
3138 pcifunc & ~RVU_PFVF_FUNC_MASK,
3140 err = nix_update_mce_list(rvu, pcifunc, mce_list,
3141 mce_idx, mcam_index, add);
3145 static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
3147 struct nix_mcast *mcast = &nix_hw->mcast;
3148 int err, pf, numvfs, idx;
3149 struct rvu_pfvf *pfvf;
3153 /* Skip PF0 (i.e AF) */
3154 for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
3155 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
3156 /* If PF is not enabled, nothing to do */
3157 if (!((cfg >> 20) & 0x01))
3159 /* Get numVFs attached to this PF */
3160 numvfs = (cfg >> 12) & 0xFF;
3162 pfvf = &rvu->pf[pf];
3164 /* This NIX0/1 block mapped to PF ? */
3165 if (pfvf->nix_blkaddr != nix_hw->blkaddr)
3168 /* save start idx of broadcast mce list */
3169 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
3170 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
3172 /* save start idx of multicast mce list */
3173 pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
3174 nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1);
3176 /* save the start idx of promisc mce list */
3177 pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
3178 nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1);
3180 for (idx = 0; idx < (numvfs + 1); idx++) {
3181 /* idx-0 is for PF, followed by VFs */
3182 pcifunc = (pf << RVU_PFVF_PF_SHIFT);
3184 /* Add dummy entries now, so that we don't have to check
3185 * for whether AQ_OP should be INIT/WRITE later on.
3186 * Will be updated when a NIXLF is attached/detached to
3189 err = nix_blk_setup_mce(rvu, nix_hw,
3190 pfvf->bcast_mce_idx + idx,
3196 /* add dummy entries to multicast mce list */
3197 err = nix_blk_setup_mce(rvu, nix_hw,
3198 pfvf->mcast_mce_idx + idx,
3204 /* add dummy entries to promisc mce list */
3205 err = nix_blk_setup_mce(rvu, nix_hw,
3206 pfvf->promisc_mce_idx + idx,
3216 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
3218 struct nix_mcast *mcast = &nix_hw->mcast;
3219 struct rvu_hwinfo *hw = rvu->hw;
3222 size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
3223 size = (1ULL << size);
3225 /* Alloc memory for multicast/mirror replication entries */
3226 err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
3227 (256UL << MC_TBL_SIZE), size);
3231 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
3232 (u64)mcast->mce_ctx->iova);
3234 /* Set max list length equal to max no of VFs per PF + PF itself */
3235 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
3236 BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
3238 /* Alloc memory for multicast replication buffers */
3239 size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
3240 err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
3241 (8UL << MC_BUF_CNT), size);
3245 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
3246 (u64)mcast->mcast_buf->iova);
3248 /* Alloc pkind for NIX internal RX multicast/mirror replay */
3249 mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
3251 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
3252 BIT_ULL(63) | (mcast->replay_pkind << 24) |
3253 BIT_ULL(20) | MC_BUF_CNT);
3255 mutex_init(&mcast->mce_lock);
3257 return nix_setup_mce_tables(rvu, nix_hw);
3260 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
3262 struct nix_txvlan *vlan = &nix_hw->txvlan;
3265 /* Allocate resource bimap for tx vtag def registers*/
3266 vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX;
3267 err = rvu_alloc_bitmap(&vlan->rsrc);
3271 /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
3272 vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max,
3273 sizeof(u16), GFP_KERNEL);
3274 if (!vlan->entry2pfvf_map)
3277 mutex_init(&vlan->rsrc_lock);
3281 kfree(vlan->rsrc.bmap);
3285 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
3287 struct nix_txsch *txsch;
3291 /* Get scheduler queue count of each type and alloc
3292 * bitmap for each for alloc/free/attach operations.
3294 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
3295 txsch = &nix_hw->txsch[lvl];
3298 case NIX_TXSCH_LVL_SMQ:
3299 reg = NIX_AF_MDQ_CONST;
3301 case NIX_TXSCH_LVL_TL4:
3302 reg = NIX_AF_TL4_CONST;
3304 case NIX_TXSCH_LVL_TL3:
3305 reg = NIX_AF_TL3_CONST;
3307 case NIX_TXSCH_LVL_TL2:
3308 reg = NIX_AF_TL2_CONST;
3310 case NIX_TXSCH_LVL_TL1:
3311 reg = NIX_AF_TL1_CONST;
3314 cfg = rvu_read64(rvu, blkaddr, reg);
3315 txsch->schq.max = cfg & 0xFFFF;
3316 err = rvu_alloc_bitmap(&txsch->schq);
3320 /* Allocate memory for scheduler queues to
3321 * PF/VF pcifunc mapping info.
3323 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
3324 sizeof(u32), GFP_KERNEL);
3325 if (!txsch->pfvf_map)
3327 for (schq = 0; schq < txsch->schq.max; schq++)
3328 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
3331 /* Setup a default value of 8192 as DWRR MTU */
3332 if (rvu->hw->cap.nix_common_dwrr_mtu ||
3333 rvu->hw->cap.nix_multiple_dwrr_mtu) {
3334 rvu_write64(rvu, blkaddr,
3335 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM),
3336 convert_bytes_to_dwrr_mtu(8192));
3337 rvu_write64(rvu, blkaddr,
3338 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK),
3339 convert_bytes_to_dwrr_mtu(8192));
3340 rvu_write64(rvu, blkaddr,
3341 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP),
3342 convert_bytes_to_dwrr_mtu(8192));
3348 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
3349 int blkaddr, u32 cfg)
3353 for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
3354 if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
3357 if (fmt_idx >= nix_hw->mark_format.total)
3360 rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
3361 nix_hw->mark_format.cfg[fmt_idx] = cfg;
3362 nix_hw->mark_format.in_use++;
3366 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
3370 [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003,
3371 [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200,
3372 [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203,
3373 [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c,
3374 [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00,
3375 [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c,
3376 [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008,
3377 [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800,
3378 [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
3383 total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
3384 nix_hw->mark_format.total = (u8)total;
3385 nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
3387 if (!nix_hw->mark_format.cfg)
3389 for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
3390 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
3392 dev_err(rvu->dev, "Err %d in setup mark format %d\n",
3399 static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu)
3401 /* CN10K supports LBK FIFO size 72 KB */
3402 if (rvu->hw->lbk_bufsize == 0x12000)
3403 *max_mtu = CN10K_LBK_LINK_MAX_FRS;
3405 *max_mtu = NIC_HW_MAX_FRS;
3408 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
3410 int fifo_size = rvu_cgx_get_fifolen(rvu);
3412 /* RPM supports FIFO len 128 KB and RPM2 supports double the
3413 * FIFO len to accommodate 8 LMACS
3415 if (fifo_size == 0x20000 || fifo_size == 0x40000)
3416 *max_mtu = CN10K_LMAC_LINK_MAX_FRS;
3418 *max_mtu = NIC_HW_MAX_FRS;
3421 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
3422 struct nix_hw_info *rsp)
3424 u16 pcifunc = req->hdr.pcifunc;
3428 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3430 return NIX_AF_ERR_AF_LF_INVALID;
3432 if (is_afvf(pcifunc))
3433 rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
3435 rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
3437 rsp->min_mtu = NIC_HW_MIN_FRS;
3439 if (!rvu->hw->cap.nix_common_dwrr_mtu &&
3440 !rvu->hw->cap.nix_multiple_dwrr_mtu) {
3441 /* Return '1' on OTx2 */
3442 rsp->rpm_dwrr_mtu = 1;
3443 rsp->sdp_dwrr_mtu = 1;
3444 rsp->lbk_dwrr_mtu = 1;
3448 /* Return DWRR_MTU for TLx_SCHEDULE[RR_WEIGHT] config */
3449 dwrr_mtu = rvu_read64(rvu, blkaddr,
3450 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM));
3451 rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
3453 dwrr_mtu = rvu_read64(rvu, blkaddr,
3454 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP));
3455 rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
3457 dwrr_mtu = rvu_read64(rvu, blkaddr,
3458 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK));
3459 rsp->lbk_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
3464 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
3465 struct msg_rsp *rsp)
3467 u16 pcifunc = req->hdr.pcifunc;
3468 int i, nixlf, blkaddr, err;
3471 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3475 /* Get stats count supported by HW */
3476 stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
3478 /* Reset tx stats */
3479 for (i = 0; i < ((stats >> 24) & 0xFF); i++)
3480 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
3482 /* Reset rx stats */
3483 for (i = 0; i < ((stats >> 32) & 0xFF); i++)
3484 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
3489 /* Returns the ALG index to be set into NPC_RX_ACTION */
3490 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
3494 /* Scan over exiting algo entries to find a match */
3495 for (i = 0; i < nix_hw->flowkey.in_use; i++)
3496 if (nix_hw->flowkey.flowkey[i] == flow_cfg)
3502 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
3504 int idx, nr_field, key_off, field_marker, keyoff_marker;
3505 int max_key_off, max_bit_pos, group_member;
3506 struct nix_rx_flowkey_alg *field;
3507 struct nix_rx_flowkey_alg tmp;
3508 u32 key_type, valid_key;
3510 int l4_key_offset = 0;
3515 #define FIELDS_PER_ALG 5
3516 #define MAX_KEY_OFF 40
3517 /* Clear all fields */
3518 memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
3520 /* Each of the 32 possible flow key algorithm definitions should
3521 * fall into above incremental config (except ALG0). Otherwise a
3522 * single NPC MCAM entry is not sufficient for supporting RSS.
3524 * If a different definition or combination needed then NPC MCAM
3525 * has to be programmed to filter such pkts and it's action should
3526 * point to this definition to calculate flowtag or hash.
3528 * The `for loop` goes over _all_ protocol field and the following
3529 * variables depicts the state machine forward progress logic.
3531 * keyoff_marker - Enabled when hash byte length needs to be accounted
3532 * in field->key_offset update.
3533 * field_marker - Enabled when a new field needs to be selected.
3534 * group_member - Enabled when protocol is part of a group.
3537 /* Last 4 bits (31:28) are reserved to specify SRC, DST
3538 * selection for L3, L4 i.e IPV[4,6]_SRC, IPV[4,6]_DST,
3539 * [TCP,UDP,SCTP]_SRC, [TCP,UDP,SCTP]_DST
3540 * 31 => L3_SRC, 30 => L3_DST, 29 => L4_SRC, 28 => L4_DST
3542 l3_l4_src_dst = flow_cfg;
3543 /* Reset these 4 bits, so that these won't be part of key */
3544 flow_cfg &= NIX_FLOW_KEY_TYPE_L3_L4_MASK;
3546 keyoff_marker = 0; max_key_off = 0; group_member = 0;
3547 nr_field = 0; key_off = 0; field_marker = 1;
3548 field = &tmp; max_bit_pos = fls(flow_cfg);
3550 idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
3551 key_off < MAX_KEY_OFF; idx++) {
3552 key_type = BIT(idx);
3553 valid_key = flow_cfg & key_type;
3554 /* Found a field marker, reset the field values */
3556 memset(&tmp, 0, sizeof(tmp));
3558 field_marker = true;
3559 keyoff_marker = true;
3561 case NIX_FLOW_KEY_TYPE_PORT:
3562 field->sel_chan = true;
3563 /* This should be set to 1, when SEL_CHAN is set */
3566 case NIX_FLOW_KEY_TYPE_IPV4_PROTO:
3567 field->lid = NPC_LID_LC;
3568 field->hdr_offset = 9; /* offset */
3569 field->bytesm1 = 0; /* 1 byte */
3570 field->ltype_match = NPC_LT_LC_IP;
3571 field->ltype_mask = 0xF;
3573 case NIX_FLOW_KEY_TYPE_IPV4:
3574 case NIX_FLOW_KEY_TYPE_INNR_IPV4:
3575 field->lid = NPC_LID_LC;
3576 field->ltype_match = NPC_LT_LC_IP;
3577 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
3578 field->lid = NPC_LID_LG;
3579 field->ltype_match = NPC_LT_LG_TU_IP;
3581 field->hdr_offset = 12; /* SIP offset */
3582 field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
3585 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY)
3586 field->bytesm1 = 3; /* SIP, 4 bytes */
3588 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) {
3589 /* Both SIP + DIP */
3590 if (field->bytesm1 == 3) {
3591 field->bytesm1 = 7; /* SIP + DIP, 8B */
3594 field->hdr_offset = 16; /* DIP off */
3595 field->bytesm1 = 3; /* DIP, 4 bytes */
3599 field->ltype_mask = 0xF; /* Match only IPv4 */
3600 keyoff_marker = false;
3602 case NIX_FLOW_KEY_TYPE_IPV6:
3603 case NIX_FLOW_KEY_TYPE_INNR_IPV6:
3604 field->lid = NPC_LID_LC;
3605 field->ltype_match = NPC_LT_LC_IP6;
3606 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
3607 field->lid = NPC_LID_LG;
3608 field->ltype_match = NPC_LT_LG_TU_IP6;
3610 field->hdr_offset = 8; /* SIP offset */
3611 field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
3614 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY)
3615 field->bytesm1 = 15; /* SIP, 16 bytes */
3617 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) {
3618 /* Both SIP + DIP */
3619 if (field->bytesm1 == 15) {
3620 /* SIP + DIP, 32 bytes */
3621 field->bytesm1 = 31;
3624 field->hdr_offset = 24; /* DIP off */
3625 field->bytesm1 = 15; /* DIP,16 bytes */
3628 field->ltype_mask = 0xF; /* Match only IPv6 */
3630 case NIX_FLOW_KEY_TYPE_TCP:
3631 case NIX_FLOW_KEY_TYPE_UDP:
3632 case NIX_FLOW_KEY_TYPE_SCTP:
3633 case NIX_FLOW_KEY_TYPE_INNR_TCP:
3634 case NIX_FLOW_KEY_TYPE_INNR_UDP:
3635 case NIX_FLOW_KEY_TYPE_INNR_SCTP:
3636 field->lid = NPC_LID_LD;
3637 if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
3638 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
3639 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
3640 field->lid = NPC_LID_LH;
3641 field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
3643 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_SRC_ONLY)
3644 field->bytesm1 = 1; /* SRC, 2 bytes */
3646 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_DST_ONLY) {
3647 /* Both SRC + DST */
3648 if (field->bytesm1 == 1) {
3649 /* SRC + DST, 4 bytes */
3653 field->hdr_offset = 2; /* DST off */
3654 field->bytesm1 = 1; /* DST, 2 bytes */
3658 /* Enum values for NPC_LID_LD and NPC_LID_LG are same,
3659 * so no need to change the ltype_match, just change
3660 * the lid for inner protocols
3662 BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
3663 (int)NPC_LT_LH_TU_TCP);
3664 BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
3665 (int)NPC_LT_LH_TU_UDP);
3666 BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
3667 (int)NPC_LT_LH_TU_SCTP);
3669 if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
3670 key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
3672 field->ltype_match |= NPC_LT_LD_TCP;
3673 group_member = true;
3674 } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
3675 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
3677 field->ltype_match |= NPC_LT_LD_UDP;
3678 group_member = true;
3679 } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
3680 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
3682 field->ltype_match |= NPC_LT_LD_SCTP;
3683 group_member = true;
3685 field->ltype_mask = ~field->ltype_match;
3686 if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
3687 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
3688 /* Handle the case where any of the group item
3689 * is enabled in the group but not the final one
3693 group_member = false;
3696 field_marker = false;
3697 keyoff_marker = false;
3700 /* TCP/UDP/SCTP and ESP/AH falls at same offset so
3701 * remember the TCP key offset of 40 byte hash key.
3703 if (key_type == NIX_FLOW_KEY_TYPE_TCP)
3704 l4_key_offset = key_off;
3706 case NIX_FLOW_KEY_TYPE_NVGRE:
3707 field->lid = NPC_LID_LD;
3708 field->hdr_offset = 4; /* VSID offset */
3710 field->ltype_match = NPC_LT_LD_NVGRE;
3711 field->ltype_mask = 0xF;
3713 case NIX_FLOW_KEY_TYPE_VXLAN:
3714 case NIX_FLOW_KEY_TYPE_GENEVE:
3715 field->lid = NPC_LID_LE;
3717 field->hdr_offset = 4;
3718 field->ltype_mask = 0xF;
3719 field_marker = false;
3720 keyoff_marker = false;
3722 if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
3723 field->ltype_match |= NPC_LT_LE_VXLAN;
3724 group_member = true;
3727 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
3728 field->ltype_match |= NPC_LT_LE_GENEVE;
3729 group_member = true;
3732 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
3734 field->ltype_mask = ~field->ltype_match;
3735 field_marker = true;
3736 keyoff_marker = true;
3738 group_member = false;
3742 case NIX_FLOW_KEY_TYPE_ETH_DMAC:
3743 case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
3744 field->lid = NPC_LID_LA;
3745 field->ltype_match = NPC_LT_LA_ETHER;
3746 if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
3747 field->lid = NPC_LID_LF;
3748 field->ltype_match = NPC_LT_LF_TU_ETHER;
3750 field->hdr_offset = 0;
3751 field->bytesm1 = 5; /* DMAC 6 Byte */
3752 field->ltype_mask = 0xF;
3754 case NIX_FLOW_KEY_TYPE_IPV6_EXT:
3755 field->lid = NPC_LID_LC;
3756 field->hdr_offset = 40; /* IPV6 hdr */
3757 field->bytesm1 = 0; /* 1 Byte ext hdr*/
3758 field->ltype_match = NPC_LT_LC_IP6_EXT;
3759 field->ltype_mask = 0xF;
3761 case NIX_FLOW_KEY_TYPE_GTPU:
3762 field->lid = NPC_LID_LE;
3763 field->hdr_offset = 4;
3764 field->bytesm1 = 3; /* 4 bytes TID*/
3765 field->ltype_match = NPC_LT_LE_GTPU;
3766 field->ltype_mask = 0xF;
3768 case NIX_FLOW_KEY_TYPE_VLAN:
3769 field->lid = NPC_LID_LB;
3770 field->hdr_offset = 2; /* Skip TPID (2-bytes) */
3771 field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
3772 field->ltype_match = NPC_LT_LB_CTAG;
3773 field->ltype_mask = 0xF;
3774 field->fn_mask = 1; /* Mask out the first nibble */
3776 case NIX_FLOW_KEY_TYPE_AH:
3777 case NIX_FLOW_KEY_TYPE_ESP:
3778 field->hdr_offset = 0;
3779 field->bytesm1 = 7; /* SPI + sequence number */
3780 field->ltype_mask = 0xF;
3781 field->lid = NPC_LID_LE;
3782 field->ltype_match = NPC_LT_LE_ESP;
3783 if (key_type == NIX_FLOW_KEY_TYPE_AH) {
3784 field->lid = NPC_LID_LD;
3785 field->ltype_match = NPC_LT_LD_AH;
3786 field->hdr_offset = 4;
3787 keyoff_marker = false;
3793 /* Found a valid flow key type */
3795 /* Use the key offset of TCP/UDP/SCTP fields
3796 * for ESP/AH fields.
3798 if (key_type == NIX_FLOW_KEY_TYPE_ESP ||
3799 key_type == NIX_FLOW_KEY_TYPE_AH)
3800 key_off = l4_key_offset;
3801 field->key_offset = key_off;
3802 memcpy(&alg[nr_field], field, sizeof(*field));
3803 max_key_off = max(max_key_off, field->bytesm1 + 1);
3805 /* Found a field marker, get the next field */
3810 /* Found a keyoff marker, update the new key_off */
3811 if (keyoff_marker) {
3812 key_off += max_key_off;
3816 /* Processed all the flow key types */
3817 if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
3820 return NIX_AF_ERR_RSS_NOSPC_FIELD;
3823 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
3825 u64 field[FIELDS_PER_ALG];
3829 hw = get_nix_hw(rvu->hw, blkaddr);
3831 return NIX_AF_ERR_INVALID_NIXBLK;
3833 /* No room to add new flow hash algoritham */
3834 if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
3835 return NIX_AF_ERR_RSS_NOSPC_ALGO;
3837 /* Generate algo fields for the given flow_cfg */
3838 rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
3842 /* Update ALGX_FIELDX register with generated fields */
3843 for (fid = 0; fid < FIELDS_PER_ALG; fid++)
3844 rvu_write64(rvu, blkaddr,
3845 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
3848 /* Store the flow_cfg for futher lookup */
3849 rc = hw->flowkey.in_use;
3850 hw->flowkey.flowkey[rc] = flow_cfg;
3851 hw->flowkey.in_use++;
3856 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
3857 struct nix_rss_flowkey_cfg *req,
3858 struct nix_rss_flowkey_cfg_rsp *rsp)
3860 u16 pcifunc = req->hdr.pcifunc;
3861 int alg_idx, nixlf, blkaddr;
3862 struct nix_hw *nix_hw;
3865 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3869 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3871 return NIX_AF_ERR_INVALID_NIXBLK;
3873 alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
3874 /* Failed to get algo index from the exiting list, reserve new */
3876 alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
3881 rsp->alg_idx = alg_idx;
3882 rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
3883 alg_idx, req->mcam_index);
3887 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
3889 u32 flowkey_cfg, minkey_cfg;
3892 /* Disable all flow key algx fieldx */
3893 for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
3894 for (fid = 0; fid < FIELDS_PER_ALG; fid++)
3895 rvu_write64(rvu, blkaddr,
3896 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
3900 /* IPv4/IPv6 SIP/DIPs */
3901 flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
3902 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3906 /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3907 minkey_cfg = flowkey_cfg;
3908 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
3909 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3913 /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3914 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
3915 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3919 /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3920 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
3921 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3925 /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
3926 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3927 NIX_FLOW_KEY_TYPE_UDP;
3928 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3932 /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3933 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3934 NIX_FLOW_KEY_TYPE_SCTP;
3935 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3939 /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3940 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
3941 NIX_FLOW_KEY_TYPE_SCTP;
3942 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3946 /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3947 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3948 NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
3949 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3956 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
3957 struct nix_set_mac_addr *req,
3958 struct msg_rsp *rsp)
3960 bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK;
3961 u16 pcifunc = req->hdr.pcifunc;
3962 int blkaddr, nixlf, err;
3963 struct rvu_pfvf *pfvf;
3965 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3969 pfvf = rvu_get_pfvf(rvu, pcifunc);
3971 /* untrusted VF can't overwrite admin(PF) changes */
3972 if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
3973 (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) {
3975 "MAC address set by admin(PF) cannot be overwritten by untrusted VF");
3979 ether_addr_copy(pfvf->mac_addr, req->mac_addr);
3981 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
3982 pfvf->rx_chan_base, req->mac_addr);
3984 if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
3985 ether_addr_copy(pfvf->default_mac, req->mac_addr);
3987 rvu_switch_update_rules(rvu, pcifunc);
3992 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
3993 struct msg_req *req,
3994 struct nix_get_mac_addr_rsp *rsp)
3996 u16 pcifunc = req->hdr.pcifunc;
3997 struct rvu_pfvf *pfvf;
3999 if (!is_nixlf_attached(rvu, pcifunc))
4000 return NIX_AF_ERR_AF_LF_INVALID;
4002 pfvf = rvu_get_pfvf(rvu, pcifunc);
4004 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
4009 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
4010 struct msg_rsp *rsp)
4012 bool allmulti, promisc, nix_rx_multicast;
4013 u16 pcifunc = req->hdr.pcifunc;
4014 struct rvu_pfvf *pfvf;
4017 pfvf = rvu_get_pfvf(rvu, pcifunc);
4018 promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false;
4019 allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false;
4020 pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false;
4022 nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list;
4024 if (is_vf(pcifunc) && !nix_rx_multicast &&
4025 (promisc || allmulti)) {
4026 dev_warn_ratelimited(rvu->dev,
4027 "VF promisc/multicast not supported\n");
4031 /* untrusted VF can't configure promisc/allmulti */
4032 if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
4033 (promisc || allmulti))
4036 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
4040 if (nix_rx_multicast) {
4041 /* add/del this PF_FUNC to/from mcast pkt replication list */
4042 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY,
4046 "Failed to update pcifunc 0x%x to multicast list\n",
4051 /* add/del this PF_FUNC to/from promisc pkt replication list */
4052 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY,
4056 "Failed to update pcifunc 0x%x to promisc list\n",
4062 /* install/uninstall allmulti entry */
4064 rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf,
4065 pfvf->rx_chan_base);
4067 if (!nix_rx_multicast)
4068 rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false);
4071 /* install/uninstall promisc entry */
4073 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
4077 if (!nix_rx_multicast)
4078 rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
4083 static void nix_find_link_frs(struct rvu *rvu,
4084 struct nix_frs_cfg *req, u16 pcifunc)
4086 int pf = rvu_get_pf(pcifunc);
4087 struct rvu_pfvf *pfvf;
4092 /* Update with requester's min/max lengths */
4093 pfvf = rvu_get_pfvf(rvu, pcifunc);
4094 pfvf->maxlen = req->maxlen;
4095 if (req->update_minlen)
4096 pfvf->minlen = req->minlen;
4098 maxlen = req->maxlen;
4099 minlen = req->update_minlen ? req->minlen : 0;
4101 /* Get this PF's numVFs and starting hwvf */
4102 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
4104 /* For each VF, compare requested max/minlen */
4105 for (vf = 0; vf < numvfs; vf++) {
4106 pfvf = &rvu->hwvf[hwvf + vf];
4107 if (pfvf->maxlen > maxlen)
4108 maxlen = pfvf->maxlen;
4109 if (req->update_minlen &&
4110 pfvf->minlen && pfvf->minlen < minlen)
4111 minlen = pfvf->minlen;
4114 /* Compare requested max/minlen with PF's max/minlen */
4115 pfvf = &rvu->pf[pf];
4116 if (pfvf->maxlen > maxlen)
4117 maxlen = pfvf->maxlen;
4118 if (req->update_minlen &&
4119 pfvf->minlen && pfvf->minlen < minlen)
4120 minlen = pfvf->minlen;
4122 /* Update the request with max/min PF's and it's VF's max/min */
4123 req->maxlen = maxlen;
4124 if (req->update_minlen)
4125 req->minlen = minlen;
4129 nix_config_link_credits(struct rvu *rvu, int blkaddr, int link,
4130 u16 pcifunc, u64 tx_credits)
4132 struct rvu_hwinfo *hw = rvu->hw;
4133 int pf = rvu_get_pf(pcifunc);
4134 u8 cgx_id = 0, lmac_id = 0;
4135 unsigned long poll_tmo;
4136 bool restore_tx_en = 0;
4137 struct nix_hw *nix_hw;
4138 u64 cfg, sw_xoff = 0;
4143 nix_hw = get_nix_hw(rvu->hw, blkaddr);
4145 return NIX_AF_ERR_INVALID_NIXBLK;
4147 if (tx_credits == nix_hw->tx_credits[link])
4150 /* Enable cgx tx if disabled for credits to be back */
4151 if (is_pf_cgxmapped(rvu, pf)) {
4152 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
4153 restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
4157 mutex_lock(&rvu->rsrc_lock);
4158 /* Disable new traffic to link */
4159 if (hw->cap.nix_shaping) {
4160 schq = nix_get_tx_link(rvu, pcifunc);
4161 sw_xoff = rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq));
4162 rvu_write64(rvu, blkaddr,
4163 NIX_AF_TL1X_SW_XOFF(schq), BIT_ULL(0));
4166 rc = NIX_AF_ERR_LINK_CREDITS;
4167 poll_tmo = jiffies + usecs_to_jiffies(200000);
4168 /* Wait for credits to return */
4170 if (time_after(jiffies, poll_tmo))
4172 usleep_range(100, 200);
4174 cfg = rvu_read64(rvu, blkaddr,
4175 NIX_AF_TX_LINKX_NORM_CREDIT(link));
4176 credits = (cfg >> 12) & 0xFFFFFULL;
4177 } while (credits != nix_hw->tx_credits[link]);
4179 cfg &= ~(0xFFFFFULL << 12);
4180 cfg |= (tx_credits << 12);
4181 rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
4184 nix_hw->tx_credits[link] = tx_credits;
4187 /* Enable traffic back */
4188 if (hw->cap.nix_shaping && !sw_xoff)
4189 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq), 0);
4191 /* Restore state of cgx tx */
4193 rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
4195 mutex_unlock(&rvu->rsrc_lock);
4199 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
4200 struct msg_rsp *rsp)
4202 struct rvu_hwinfo *hw = rvu->hw;
4203 u16 pcifunc = req->hdr.pcifunc;
4204 int pf = rvu_get_pf(pcifunc);
4205 int blkaddr, schq, link = -1;
4206 struct nix_txsch *txsch;
4207 u64 cfg, lmac_fifo_len;
4208 struct nix_hw *nix_hw;
4209 struct rvu_pfvf *pfvf;
4210 u8 cgx = 0, lmac = 0;
4213 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
4215 return NIX_AF_ERR_AF_LF_INVALID;
4217 nix_hw = get_nix_hw(rvu->hw, blkaddr);
4219 return NIX_AF_ERR_INVALID_NIXBLK;
4221 if (is_afvf(pcifunc))
4222 rvu_get_lbk_link_max_frs(rvu, &max_mtu);
4224 rvu_get_lmac_link_max_frs(rvu, &max_mtu);
4226 if (!req->sdp_link && req->maxlen > max_mtu)
4227 return NIX_AF_ERR_FRS_INVALID;
4229 if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
4230 return NIX_AF_ERR_FRS_INVALID;
4232 /* Check if requester wants to update SMQ's */
4233 if (!req->update_smq)
4236 /* Update min/maxlen in each of the SMQ attached to this PF/VF */
4237 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
4238 mutex_lock(&rvu->rsrc_lock);
4239 for (schq = 0; schq < txsch->schq.max; schq++) {
4240 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
4242 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
4243 cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
4244 if (req->update_minlen)
4245 cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
4246 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
4248 mutex_unlock(&rvu->rsrc_lock);
4251 /* Check if config is for SDP link */
4252 if (req->sdp_link) {
4254 return NIX_AF_ERR_RX_LINK_INVALID;
4255 link = hw->cgx_links + hw->lbk_links;
4259 /* Check if the request is from CGX mapped RVU PF */
4260 if (is_pf_cgxmapped(rvu, pf)) {
4261 /* Get CGX and LMAC to which this PF is mapped and find link */
4262 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
4263 link = (cgx * hw->lmac_per_cgx) + lmac;
4264 } else if (pf == 0) {
4265 /* For VFs of PF0 ingress is LBK port, so config LBK link */
4266 pfvf = rvu_get_pfvf(rvu, pcifunc);
4267 link = hw->cgx_links + pfvf->lbkid;
4271 return NIX_AF_ERR_RX_LINK_INVALID;
4275 nix_find_link_frs(rvu, req, pcifunc);
4277 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
4278 cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
4279 if (req->update_minlen)
4280 cfg = (cfg & ~0xFFFFULL) | req->minlen;
4281 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
4283 if (req->sdp_link || pf == 0)
4286 /* Update transmit credits for CGX links */
4287 lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, lmac);
4288 if (!lmac_fifo_len) {
4290 "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n",
4291 __func__, cgx, lmac);
4294 return nix_config_link_credits(rvu, blkaddr, link, pcifunc,
4295 (lmac_fifo_len - req->maxlen) / 16);
4298 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
4299 struct msg_rsp *rsp)
4301 int nixlf, blkaddr, err;
4304 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
4308 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
4309 /* Set the interface configuration */
4310 if (req->len_verify & BIT(0))
4313 cfg &= ~BIT_ULL(41);
4315 if (req->len_verify & BIT(1))
4318 cfg &= ~BIT_ULL(40);
4320 if (req->len_verify & NIX_RX_DROP_RE)
4323 cfg &= ~BIT_ULL(32);
4325 if (req->csum_verify & BIT(0))
4328 cfg &= ~BIT_ULL(37);
4330 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
4335 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
4337 return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
4340 static void nix_link_config(struct rvu *rvu, int blkaddr,
4341 struct nix_hw *nix_hw)
4343 struct rvu_hwinfo *hw = rvu->hw;
4344 int cgx, lmac_cnt, slink, link;
4345 u16 lbk_max_frs, lmac_max_frs;
4346 unsigned long lmac_bmap;
4347 u64 tx_credits, cfg;
4351 rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
4352 rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
4354 /* Set default min/max packet lengths allowed on NIX Rx links.
4356 * With HW reset minlen value of 60byte, HW will treat ARP pkts
4357 * as undersize and report them to SW as error pkts, hence
4358 * setting it to 40 bytes.
4360 for (link = 0; link < hw->cgx_links; link++) {
4361 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
4362 ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS);
4365 for (link = hw->cgx_links; link < hw->lbk_links; link++) {
4366 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
4367 ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
4369 if (hw->sdp_links) {
4370 link = hw->cgx_links + hw->lbk_links;
4371 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
4372 SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
4375 /* Set credits for Tx links assuming max packet length allowed.
4376 * This will be reconfigured based on MTU set for PF/VF.
4378 for (cgx = 0; cgx < hw->cgx; cgx++) {
4379 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
4380 /* Skip when cgx is not available or lmac cnt is zero */
4383 slink = cgx * hw->lmac_per_cgx;
4385 /* Get LMAC id's from bitmap */
4386 lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
4387 for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
4388 lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter);
4389 if (!lmac_fifo_len) {
4391 "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n",
4392 __func__, cgx, iter);
4395 tx_credits = (lmac_fifo_len - lmac_max_frs) / 16;
4396 /* Enable credits and set credit pkt count to max allowed */
4397 cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
4399 link = iter + slink;
4400 nix_hw->tx_credits[link] = tx_credits;
4401 rvu_write64(rvu, blkaddr,
4402 NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
4406 /* Set Tx credits for LBK link */
4407 slink = hw->cgx_links;
4408 for (link = slink; link < (slink + hw->lbk_links); link++) {
4409 tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs);
4410 nix_hw->tx_credits[link] = tx_credits;
4411 /* Enable credits and set credit pkt count to max allowed */
4412 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
4413 rvu_write64(rvu, blkaddr,
4414 NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
4418 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
4423 /* Start X2P bus calibration */
4424 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4425 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
4426 /* Wait for calibration to complete */
4427 err = rvu_poll_reg(rvu, blkaddr,
4428 NIX_AF_STATUS, BIT_ULL(10), false);
4430 dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
4434 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
4435 /* Check if CGX devices are ready */
4436 for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
4437 /* Skip when cgx port is not available */
4438 if (!rvu_cgx_pdata(idx, rvu) ||
4439 (status & (BIT_ULL(16 + idx))))
4442 "CGX%d didn't respond to NIX X2P calibration\n", idx);
4446 /* Check if LBK is ready */
4447 if (!(status & BIT_ULL(19))) {
4449 "LBK didn't respond to NIX X2P calibration\n");
4453 /* Clear 'calibrate_x2p' bit */
4454 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4455 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
4456 if (err || (status & 0x3FFULL))
4458 "NIX X2P calibration failed, status 0x%llx\n", status);
4464 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
4469 /* Set admin queue endianness */
4470 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
4473 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
4476 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
4479 /* Do not bypass NDC cache */
4480 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
4482 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
4483 /* Disable caching of SQB aka SQEs */
4486 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
4488 /* Result structure can be followed by RQ/SQ/CQ context at
4489 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
4490 * operation type. Alloc sufficient result memory for all operations.
4492 err = rvu_aq_alloc(rvu, &block->aq,
4493 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
4494 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
4498 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
4499 rvu_write64(rvu, block->addr,
4500 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
4504 static void rvu_nix_setup_capabilities(struct rvu *rvu, int blkaddr)
4506 struct rvu_hwinfo *hw = rvu->hw;
4509 hw_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
4511 /* On OcteonTx2 DWRR quantum is directly configured into each of
4512 * the transmit scheduler queues. And PF/VF drivers were free to
4513 * config any value upto 2^24.
4514 * On CN10K, HW is modified, the quantum configuration at scheduler
4515 * queues is in terms of weight. And SW needs to setup a base DWRR MTU
4516 * at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do
4517 * 'DWRR MTU * weight' to get the quantum.
4519 * Check if HW uses a common MTU for all DWRR quantum configs.
4520 * On OcteonTx2 this register field is '0'.
4522 if ((((hw_const >> 56) & 0x10) == 0x10) && !(hw_const & BIT_ULL(61)))
4523 hw->cap.nix_common_dwrr_mtu = true;
4525 if (hw_const & BIT_ULL(61))
4526 hw->cap.nix_multiple_dwrr_mtu = true;
4529 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
4531 const struct npc_lt_def_cfg *ltdefs;
4532 struct rvu_hwinfo *hw = rvu->hw;
4533 int blkaddr = nix_hw->blkaddr;
4534 struct rvu_block *block;
4538 block = &hw->block[blkaddr];
4540 if (is_rvu_96xx_B0(rvu)) {
4541 /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
4542 * internal state when conditional clocks are turned off.
4543 * Hence enable them.
4545 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4546 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
4548 /* Set chan/link to backpressure TL3 instead of TL2 */
4549 rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
4551 /* Disable SQ manager's sticky mode operation (set TM6 = 0)
4552 * This sticky mode is known to cause SQ stalls when multiple
4553 * SQs are mapped to same SMQ and transmitting pkts at a time.
4555 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
4556 cfg &= ~BIT_ULL(15);
4557 rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
4560 ltdefs = rvu->kpu.lt_def;
4561 /* Calibrate X2P bus to check if CGX/LBK links are fine */
4562 err = nix_calibrate_x2p(rvu, blkaddr);
4566 /* Setup capabilities of the NIX block */
4567 rvu_nix_setup_capabilities(rvu, blkaddr);
4569 /* Initialize admin queue */
4570 err = nix_aq_init(rvu, block);
4574 /* Restore CINT timer delay to HW reset values */
4575 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
4577 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SEB_CFG);
4579 /* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */
4581 if (!is_rvu_otx2(rvu))
4582 cfg |= NIX_PTP_1STEP_EN;
4584 rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg);
4586 if (!is_rvu_otx2(rvu))
4587 rvu_nix_block_cn10k_init(rvu, nix_hw);
4589 if (is_block_implemented(hw, blkaddr)) {
4590 err = nix_setup_txschq(rvu, nix_hw, blkaddr);
4594 err = nix_setup_ipolicers(rvu, nix_hw, blkaddr);
4598 err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
4602 err = nix_setup_mcast(rvu, nix_hw, blkaddr);
4606 err = nix_setup_txvlan(rvu, nix_hw);
4610 /* Configure segmentation offload formats */
4611 nix_setup_lso(rvu, nix_hw, blkaddr);
4613 /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
4614 * This helps HW protocol checker to identify headers
4615 * and validate length and checksums.
4617 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
4618 (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) |
4619 ltdefs->rx_ol2.ltype_mask);
4620 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
4621 (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) |
4622 ltdefs->rx_oip4.ltype_mask);
4623 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
4624 (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) |
4625 ltdefs->rx_iip4.ltype_mask);
4626 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
4627 (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) |
4628 ltdefs->rx_oip6.ltype_mask);
4629 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
4630 (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) |
4631 ltdefs->rx_iip6.ltype_mask);
4632 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
4633 (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) |
4634 ltdefs->rx_otcp.ltype_mask);
4635 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
4636 (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) |
4637 ltdefs->rx_itcp.ltype_mask);
4638 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
4639 (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) |
4640 ltdefs->rx_oudp.ltype_mask);
4641 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
4642 (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) |
4643 ltdefs->rx_iudp.ltype_mask);
4644 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
4645 (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) |
4646 ltdefs->rx_osctp.ltype_mask);
4647 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
4648 (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
4649 ltdefs->rx_isctp.ltype_mask);
4651 if (!is_rvu_otx2(rvu)) {
4652 /* Enable APAD calculation for other protocols
4653 * matching APAD0 and APAD1 lt def registers.
4655 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0,
4656 (ltdefs->rx_apad0.valid << 11) |
4657 (ltdefs->rx_apad0.lid << 8) |
4658 (ltdefs->rx_apad0.ltype_match << 4) |
4659 ltdefs->rx_apad0.ltype_mask);
4660 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1,
4661 (ltdefs->rx_apad1.valid << 11) |
4662 (ltdefs->rx_apad1.lid << 8) |
4663 (ltdefs->rx_apad1.ltype_match << 4) |
4664 ltdefs->rx_apad1.ltype_mask);
4666 /* Receive ethertype defination register defines layer
4667 * information in NPC_RESULT_S to identify the Ethertype
4668 * location in L2 header. Used for Ethertype overwriting
4669 * in inline IPsec flow.
4671 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0),
4672 (ltdefs->rx_et[0].offset << 12) |
4673 (ltdefs->rx_et[0].valid << 11) |
4674 (ltdefs->rx_et[0].lid << 8) |
4675 (ltdefs->rx_et[0].ltype_match << 4) |
4676 ltdefs->rx_et[0].ltype_mask);
4677 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1),
4678 (ltdefs->rx_et[1].offset << 12) |
4679 (ltdefs->rx_et[1].valid << 11) |
4680 (ltdefs->rx_et[1].lid << 8) |
4681 (ltdefs->rx_et[1].ltype_match << 4) |
4682 ltdefs->rx_et[1].ltype_mask);
4685 err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
4689 nix_hw->tx_credits = kcalloc(hw->cgx_links + hw->lbk_links,
4690 sizeof(u64), GFP_KERNEL);
4691 if (!nix_hw->tx_credits)
4694 /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
4695 nix_link_config(rvu, blkaddr, nix_hw);
4697 /* Enable Channel backpressure */
4698 rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
4703 int rvu_nix_init(struct rvu *rvu)
4705 struct rvu_hwinfo *hw = rvu->hw;
4706 struct nix_hw *nix_hw;
4707 int blkaddr = 0, err;
4710 hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw),
4715 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
4717 nix_hw = &hw->nix[i];
4719 nix_hw->blkaddr = blkaddr;
4720 err = rvu_nix_block_init(rvu, nix_hw);
4723 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
4730 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
4731 struct rvu_block *block)
4733 struct nix_txsch *txsch;
4734 struct nix_mcast *mcast;
4735 struct nix_txvlan *vlan;
4736 struct nix_hw *nix_hw;
4739 rvu_aq_free(rvu, block->aq);
4741 if (is_block_implemented(rvu->hw, blkaddr)) {
4742 nix_hw = get_nix_hw(rvu->hw, blkaddr);
4746 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
4747 txsch = &nix_hw->txsch[lvl];
4748 kfree(txsch->schq.bmap);
4751 kfree(nix_hw->tx_credits);
4753 nix_ipolicer_freemem(rvu, nix_hw);
4755 vlan = &nix_hw->txvlan;
4756 kfree(vlan->rsrc.bmap);
4757 mutex_destroy(&vlan->rsrc_lock);
4759 mcast = &nix_hw->mcast;
4760 qmem_free(rvu->dev, mcast->mce_ctx);
4761 qmem_free(rvu->dev, mcast->mcast_buf);
4762 mutex_destroy(&mcast->mce_lock);
4766 void rvu_nix_freemem(struct rvu *rvu)
4768 struct rvu_hwinfo *hw = rvu->hw;
4769 struct rvu_block *block;
4772 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
4774 block = &hw->block[blkaddr];
4775 rvu_nix_block_freemem(rvu, blkaddr, block);
4776 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
4780 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
4781 struct msg_rsp *rsp)
4783 u16 pcifunc = req->hdr.pcifunc;
4784 struct rvu_pfvf *pfvf;
4787 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
4791 rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
4793 npc_mcam_enable_flows(rvu, pcifunc);
4795 pfvf = rvu_get_pfvf(rvu, pcifunc);
4796 set_bit(NIXLF_INITIALIZED, &pfvf->flags);
4798 rvu_switch_update_rules(rvu, pcifunc);
4800 return rvu_cgx_start_stop_io(rvu, pcifunc, true);
4803 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
4804 struct msg_rsp *rsp)
4806 u16 pcifunc = req->hdr.pcifunc;
4807 struct rvu_pfvf *pfvf;
4810 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
4814 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
4816 pfvf = rvu_get_pfvf(rvu, pcifunc);
4817 clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
4819 return rvu_cgx_start_stop_io(rvu, pcifunc, false);
4822 #define RX_SA_BASE GENMASK_ULL(52, 7)
4824 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
4826 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
4827 struct hwctx_disable_req ctx_req;
4828 int pf = rvu_get_pf(pcifunc);
4829 struct mac_ops *mac_ops;
4835 ctx_req.hdr.pcifunc = pcifunc;
4837 /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
4838 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
4839 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
4840 nix_interface_deinit(rvu, pcifunc, nixlf);
4841 nix_rx_sync(rvu, blkaddr);
4842 nix_txschq_free(rvu, pcifunc);
4844 clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
4846 rvu_cgx_start_stop_io(rvu, pcifunc, false);
4849 ctx_req.ctype = NIX_AQ_CTYPE_SQ;
4850 err = nix_lf_hwctx_disable(rvu, &ctx_req);
4852 dev_err(rvu->dev, "SQ ctx disable failed\n");
4856 ctx_req.ctype = NIX_AQ_CTYPE_RQ;
4857 err = nix_lf_hwctx_disable(rvu, &ctx_req);
4859 dev_err(rvu->dev, "RQ ctx disable failed\n");
4863 ctx_req.ctype = NIX_AQ_CTYPE_CQ;
4864 err = nix_lf_hwctx_disable(rvu, &ctx_req);
4866 dev_err(rvu->dev, "CQ ctx disable failed\n");
4869 /* reset HW config done for Switch headers */
4870 rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT,
4871 (PKIND_TX | PKIND_RX), 0, 0, 0, 0);
4873 /* Disabling CGX and NPC config done for PTP */
4874 if (pfvf->hw_rx_tstamp_en) {
4875 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
4876 cgxd = rvu_cgx_pdata(cgx_id, rvu);
4877 mac_ops = get_mac_ops(cgxd);
4878 mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, false);
4879 /* Undo NPC config done for PTP */
4880 if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false))
4881 dev_err(rvu->dev, "NPC config for PTP failed\n");
4882 pfvf->hw_rx_tstamp_en = false;
4885 /* reset priority flow control config */
4886 rvu_cgx_prio_flow_ctrl_cfg(rvu, pcifunc, 0, 0, 0);
4888 /* reset 802.3x flow control config */
4889 rvu_cgx_cfg_pause_frm(rvu, pcifunc, 0, 0);
4891 nix_ctx_free(rvu, pfvf);
4893 nix_free_all_bandprof(rvu, pcifunc);
4895 sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf));
4896 if (FIELD_GET(RX_SA_BASE, sa_base)) {
4897 err = rvu_cpt_ctx_flush(rvu, pcifunc);
4900 "CPT ctx flush failed with error: %d\n", err);
4904 #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
4906 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
4908 struct rvu_hwinfo *hw = rvu->hw;
4909 struct rvu_block *block;
4914 pf = rvu_get_pf(pcifunc);
4915 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
4918 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
4920 return NIX_AF_ERR_AF_LF_INVALID;
4922 block = &hw->block[blkaddr];
4923 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
4925 return NIX_AF_ERR_AF_LF_INVALID;
4927 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
4930 cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
4932 cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
4934 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
4939 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
4940 struct msg_rsp *rsp)
4942 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
4945 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
4946 struct msg_rsp *rsp)
4948 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
4951 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
4952 struct nix_lso_format_cfg *req,
4953 struct nix_lso_format_cfg_rsp *rsp)
4955 u16 pcifunc = req->hdr.pcifunc;
4956 struct nix_hw *nix_hw;
4957 struct rvu_pfvf *pfvf;
4958 int blkaddr, idx, f;
4961 pfvf = rvu_get_pfvf(rvu, pcifunc);
4962 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
4963 if (!pfvf->nixlf || blkaddr < 0)
4964 return NIX_AF_ERR_AF_LF_INVALID;
4966 nix_hw = get_nix_hw(rvu->hw, blkaddr);
4968 return NIX_AF_ERR_INVALID_NIXBLK;
4970 /* Find existing matching LSO format, if any */
4971 for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
4972 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
4973 reg = rvu_read64(rvu, blkaddr,
4974 NIX_AF_LSO_FORMATX_FIELDX(idx, f));
4975 if (req->fields[f] != (reg & req->field_mask))
4979 if (f == NIX_LSO_FIELD_MAX)
4983 if (idx < nix_hw->lso.in_use) {
4985 rsp->lso_format_idx = idx;
4989 if (nix_hw->lso.in_use == nix_hw->lso.total)
4990 return NIX_AF_ERR_LSO_CFG_FAIL;
4992 rsp->lso_format_idx = nix_hw->lso.in_use++;
4994 for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
4995 rvu_write64(rvu, blkaddr,
4996 NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
5002 #define IPSEC_GEN_CFG_EGRP GENMASK_ULL(50, 48)
5003 #define IPSEC_GEN_CFG_OPCODE GENMASK_ULL(47, 32)
5004 #define IPSEC_GEN_CFG_PARAM1 GENMASK_ULL(31, 16)
5005 #define IPSEC_GEN_CFG_PARAM2 GENMASK_ULL(15, 0)
5007 #define CPT_INST_QSEL_BLOCK GENMASK_ULL(28, 24)
5008 #define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8)
5009 #define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0)
5011 #define CPT_INST_CREDIT_TH GENMASK_ULL(53, 32)
5012 #define CPT_INST_CREDIT_BPID GENMASK_ULL(30, 22)
5013 #define CPT_INST_CREDIT_CNT GENMASK_ULL(21, 0)
5015 static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req,
5018 u8 cpt_idx, cpt_blkaddr;
5021 cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
5024 /* Enable context prefetching */
5025 if (!is_rvu_otx2(rvu))
5028 /* Set OPCODE and EGRP */
5029 val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp);
5030 val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode);
5031 val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1);
5032 val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2);
5034 rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val);
5036 /* Set CPT queue for inline IPSec */
5037 val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot);
5038 val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC,
5039 req->inst_qsel.cpt_pf_func);
5041 if (!is_rvu_otx2(rvu)) {
5042 cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 :
5044 val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr);
5047 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
5050 /* Set CPT credit */
5051 val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx));
5052 if ((val & 0x3FFFFF) != 0x3FFFFF)
5053 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
5056 val = FIELD_PREP(CPT_INST_CREDIT_CNT, req->cpt_credit);
5057 val |= FIELD_PREP(CPT_INST_CREDIT_BPID, req->bpid);
5058 val |= FIELD_PREP(CPT_INST_CREDIT_TH, req->credit_th);
5059 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), val);
5061 rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0);
5062 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
5064 val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx));
5065 if ((val & 0x3FFFFF) != 0x3FFFFF)
5066 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
5071 int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu,
5072 struct nix_inline_ipsec_cfg *req,
5073 struct msg_rsp *rsp)
5075 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
5078 nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0);
5079 if (is_block_implemented(rvu->hw, BLKADDR_CPT1))
5080 nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1);
5085 int rvu_mbox_handler_nix_read_inline_ipsec_cfg(struct rvu *rvu,
5086 struct msg_req *req,
5087 struct nix_inline_ipsec_cfg *rsp)
5092 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
5095 val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_IPSEC_GEN_CFG);
5096 rsp->gen_cfg.egrp = FIELD_GET(IPSEC_GEN_CFG_EGRP, val);
5097 rsp->gen_cfg.opcode = FIELD_GET(IPSEC_GEN_CFG_OPCODE, val);
5098 rsp->gen_cfg.param1 = FIELD_GET(IPSEC_GEN_CFG_PARAM1, val);
5099 rsp->gen_cfg.param2 = FIELD_GET(IPSEC_GEN_CFG_PARAM2, val);
5101 val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_CPTX_CREDIT(0));
5102 rsp->cpt_credit = FIELD_GET(CPT_INST_CREDIT_CNT, val);
5103 rsp->credit_th = FIELD_GET(CPT_INST_CREDIT_TH, val);
5104 rsp->bpid = FIELD_GET(CPT_INST_CREDIT_BPID, val);
5109 int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu,
5110 struct nix_inline_ipsec_lf_cfg *req,
5111 struct msg_rsp *rsp)
5113 int lf, blkaddr, err;
5116 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
5119 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr);
5124 /* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */
5125 val = (u64)req->ipsec_cfg0.tt << 44 |
5126 (u64)req->ipsec_cfg0.tag_const << 20 |
5127 (u64)req->ipsec_cfg0.sa_pow2_size << 16 |
5128 req->ipsec_cfg0.lenm1_max;
5130 if (blkaddr == BLKADDR_NIX1)
5133 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val);
5135 /* Set SA_IDX_W and SA_IDX_MAX */
5136 val = (u64)req->ipsec_cfg1.sa_idx_w << 32 |
5137 req->ipsec_cfg1.sa_idx_max;
5138 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val);
5140 /* Set SA base address */
5141 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
5144 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0);
5145 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0);
5146 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
5153 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
5155 bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
5157 /* overwrite vf mac address with default_mac */
5159 ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
5162 /* NIX ingress policers or bandwidth profiles APIs */
5163 static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr)
5165 struct npc_lt_def_cfg defs, *ltdefs;
5168 memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg));
5170 /* Extract PCP and DEI fields from outer VLAN from byte offset
5171 * 2 from the start of LB_PTR (ie TAG).
5172 * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN
5173 * fields are considered when 'Tunnel enable' is set in profile.
5175 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI,
5176 (2UL << 12) | (ltdefs->ovlan.lid << 8) |
5177 (ltdefs->ovlan.ltype_match << 4) |
5178 ltdefs->ovlan.ltype_mask);
5179 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI,
5180 (2UL << 12) | (ltdefs->ivlan.lid << 8) |
5181 (ltdefs->ivlan.ltype_match << 4) |
5182 ltdefs->ivlan.ltype_mask);
5184 /* DSCP field in outer and tunneled IPv4 packets */
5185 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP,
5186 (1UL << 12) | (ltdefs->rx_oip4.lid << 8) |
5187 (ltdefs->rx_oip4.ltype_match << 4) |
5188 ltdefs->rx_oip4.ltype_mask);
5189 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP,
5190 (1UL << 12) | (ltdefs->rx_iip4.lid << 8) |
5191 (ltdefs->rx_iip4.ltype_match << 4) |
5192 ltdefs->rx_iip4.ltype_mask);
5194 /* DSCP field (traffic class) in outer and tunneled IPv6 packets */
5195 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP,
5196 (1UL << 11) | (ltdefs->rx_oip6.lid << 8) |
5197 (ltdefs->rx_oip6.ltype_match << 4) |
5198 ltdefs->rx_oip6.ltype_mask);
5199 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP,
5200 (1UL << 11) | (ltdefs->rx_iip6.lid << 8) |
5201 (ltdefs->rx_iip6.ltype_match << 4) |
5202 ltdefs->rx_iip6.ltype_mask);
5205 static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw,
5206 int layer, int prof_idx)
5208 struct nix_cn10k_aq_enq_req aq_req;
5211 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5213 aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14);
5214 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
5215 aq_req.op = NIX_AQ_INSTOP_INIT;
5217 /* Context is all zeros, submit to AQ */
5218 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5219 (struct nix_aq_enq_req *)&aq_req, NULL);
5221 dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n",
5226 static int nix_setup_ipolicers(struct rvu *rvu,
5227 struct nix_hw *nix_hw, int blkaddr)
5229 struct rvu_hwinfo *hw = rvu->hw;
5230 struct nix_ipolicer *ipolicer;
5231 int err, layer, prof_idx;
5234 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
5235 if (!(cfg & BIT_ULL(61))) {
5236 hw->cap.ipolicer = false;
5240 hw->cap.ipolicer = true;
5241 nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS,
5242 sizeof(*ipolicer), GFP_KERNEL);
5243 if (!nix_hw->ipolicer)
5246 cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST);
5248 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5249 ipolicer = &nix_hw->ipolicer[layer];
5251 case BAND_PROF_LEAF_LAYER:
5252 ipolicer->band_prof.max = cfg & 0XFFFF;
5254 case BAND_PROF_MID_LAYER:
5255 ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF;
5257 case BAND_PROF_TOP_LAYER:
5258 ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF;
5262 if (!ipolicer->band_prof.max)
5265 err = rvu_alloc_bitmap(&ipolicer->band_prof);
5269 ipolicer->pfvf_map = devm_kcalloc(rvu->dev,
5270 ipolicer->band_prof.max,
5271 sizeof(u16), GFP_KERNEL);
5272 if (!ipolicer->pfvf_map)
5275 ipolicer->match_id = devm_kcalloc(rvu->dev,
5276 ipolicer->band_prof.max,
5277 sizeof(u16), GFP_KERNEL);
5278 if (!ipolicer->match_id)
5282 prof_idx < ipolicer->band_prof.max; prof_idx++) {
5283 /* Set AF as current owner for INIT ops to succeed */
5284 ipolicer->pfvf_map[prof_idx] = 0x00;
5286 /* There is no enable bit in the profile context,
5287 * so no context disable. So let's INIT them here
5288 * so that PF/VF later on have to just do WRITE to
5289 * setup policer rates and config.
5291 err = nix_init_policer_context(rvu, nix_hw,
5297 /* Allocate memory for maintaining ref_counts for MID level
5298 * profiles, this will be needed for leaf layer profiles'
5301 if (layer != BAND_PROF_MID_LAYER)
5304 ipolicer->ref_count = devm_kcalloc(rvu->dev,
5305 ipolicer->band_prof.max,
5306 sizeof(u16), GFP_KERNEL);
5307 if (!ipolicer->ref_count)
5311 /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */
5312 rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19);
5314 nix_config_rx_pkt_policer_precolor(rvu, blkaddr);
5319 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw)
5321 struct nix_ipolicer *ipolicer;
5324 if (!rvu->hw->cap.ipolicer)
5327 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5328 ipolicer = &nix_hw->ipolicer[layer];
5330 if (!ipolicer->band_prof.max)
5333 kfree(ipolicer->band_prof.bmap);
5337 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
5338 struct nix_hw *nix_hw, u16 pcifunc)
5340 struct nix_ipolicer *ipolicer;
5341 int layer, hi_layer, prof_idx;
5343 /* Bits [15:14] in profile index represent layer */
5344 layer = (req->qidx >> 14) & 0x03;
5345 prof_idx = req->qidx & 0x3FFF;
5347 ipolicer = &nix_hw->ipolicer[layer];
5348 if (prof_idx >= ipolicer->band_prof.max)
5351 /* Check if the profile is allocated to the requesting PCIFUNC or not
5352 * with the exception of AF. AF is allowed to read and update contexts.
5354 if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc)
5357 /* If this profile is linked to higher layer profile then check
5358 * if that profile is also allocated to the requesting PCIFUNC
5361 if (!req->prof.hl_en)
5364 /* Leaf layer profile can link only to mid layer and
5365 * mid layer to top layer.
5367 if (layer == BAND_PROF_LEAF_LAYER)
5368 hi_layer = BAND_PROF_MID_LAYER;
5369 else if (layer == BAND_PROF_MID_LAYER)
5370 hi_layer = BAND_PROF_TOP_LAYER;
5374 ipolicer = &nix_hw->ipolicer[hi_layer];
5375 prof_idx = req->prof.band_prof_id;
5376 if (prof_idx >= ipolicer->band_prof.max ||
5377 ipolicer->pfvf_map[prof_idx] != pcifunc)
5383 int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu,
5384 struct nix_bandprof_alloc_req *req,
5385 struct nix_bandprof_alloc_rsp *rsp)
5387 int blkaddr, layer, prof, idx, err;
5388 u16 pcifunc = req->hdr.pcifunc;
5389 struct nix_ipolicer *ipolicer;
5390 struct nix_hw *nix_hw;
5392 if (!rvu->hw->cap.ipolicer)
5393 return NIX_AF_ERR_IPOLICER_NOTSUPP;
5395 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5399 mutex_lock(&rvu->rsrc_lock);
5400 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5401 if (layer == BAND_PROF_INVAL_LAYER)
5403 if (!req->prof_count[layer])
5406 ipolicer = &nix_hw->ipolicer[layer];
5407 for (idx = 0; idx < req->prof_count[layer]; idx++) {
5408 /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */
5409 if (idx == MAX_BANDPROF_PER_PFFUNC)
5412 prof = rvu_alloc_rsrc(&ipolicer->band_prof);
5415 rsp->prof_count[layer]++;
5416 rsp->prof_idx[layer][idx] = prof;
5417 ipolicer->pfvf_map[prof] = pcifunc;
5420 mutex_unlock(&rvu->rsrc_lock);
5424 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc)
5426 int blkaddr, layer, prof_idx, err;
5427 struct nix_ipolicer *ipolicer;
5428 struct nix_hw *nix_hw;
5430 if (!rvu->hw->cap.ipolicer)
5431 return NIX_AF_ERR_IPOLICER_NOTSUPP;
5433 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5437 mutex_lock(&rvu->rsrc_lock);
5438 /* Free all the profiles allocated to the PCIFUNC */
5439 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5440 if (layer == BAND_PROF_INVAL_LAYER)
5442 ipolicer = &nix_hw->ipolicer[layer];
5444 for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) {
5445 if (ipolicer->pfvf_map[prof_idx] != pcifunc)
5448 /* Clear ratelimit aggregation, if any */
5449 if (layer == BAND_PROF_LEAF_LAYER &&
5450 ipolicer->match_id[prof_idx])
5451 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
5453 ipolicer->pfvf_map[prof_idx] = 0x00;
5454 ipolicer->match_id[prof_idx] = 0;
5455 rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
5458 mutex_unlock(&rvu->rsrc_lock);
5462 int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
5463 struct nix_bandprof_free_req *req,
5464 struct msg_rsp *rsp)
5466 int blkaddr, layer, prof_idx, idx, err;
5467 u16 pcifunc = req->hdr.pcifunc;
5468 struct nix_ipolicer *ipolicer;
5469 struct nix_hw *nix_hw;
5472 return nix_free_all_bandprof(rvu, pcifunc);
5474 if (!rvu->hw->cap.ipolicer)
5475 return NIX_AF_ERR_IPOLICER_NOTSUPP;
5477 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5481 mutex_lock(&rvu->rsrc_lock);
5482 /* Free the requested profile indices */
5483 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5484 if (layer == BAND_PROF_INVAL_LAYER)
5486 if (!req->prof_count[layer])
5489 ipolicer = &nix_hw->ipolicer[layer];
5490 for (idx = 0; idx < req->prof_count[layer]; idx++) {
5491 prof_idx = req->prof_idx[layer][idx];
5492 if (prof_idx >= ipolicer->band_prof.max ||
5493 ipolicer->pfvf_map[prof_idx] != pcifunc)
5496 /* Clear ratelimit aggregation, if any */
5497 if (layer == BAND_PROF_LEAF_LAYER &&
5498 ipolicer->match_id[prof_idx])
5499 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
5501 ipolicer->pfvf_map[prof_idx] = 0x00;
5502 ipolicer->match_id[prof_idx] = 0;
5503 rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
5504 if (idx == MAX_BANDPROF_PER_PFFUNC)
5508 mutex_unlock(&rvu->rsrc_lock);
5512 int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
5513 struct nix_cn10k_aq_enq_req *aq_req,
5514 struct nix_cn10k_aq_enq_rsp *aq_rsp,
5515 u16 pcifunc, u8 ctype, u32 qidx)
5517 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5518 aq_req->hdr.pcifunc = pcifunc;
5519 aq_req->ctype = ctype;
5520 aq_req->op = NIX_AQ_INSTOP_READ;
5521 aq_req->qidx = qidx;
5523 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5524 (struct nix_aq_enq_req *)aq_req,
5525 (struct nix_aq_enq_rsp *)aq_rsp);
5528 static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu,
5529 struct nix_hw *nix_hw,
5530 struct nix_cn10k_aq_enq_req *aq_req,
5531 struct nix_cn10k_aq_enq_rsp *aq_rsp,
5532 u32 leaf_prof, u16 mid_prof)
5534 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5535 aq_req->hdr.pcifunc = 0x00;
5536 aq_req->ctype = NIX_AQ_CTYPE_BANDPROF;
5537 aq_req->op = NIX_AQ_INSTOP_WRITE;
5538 aq_req->qidx = leaf_prof;
5540 aq_req->prof.band_prof_id = mid_prof;
5541 aq_req->prof_mask.band_prof_id = GENMASK(6, 0);
5542 aq_req->prof.hl_en = 1;
5543 aq_req->prof_mask.hl_en = 1;
5545 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5546 (struct nix_aq_enq_req *)aq_req,
5547 (struct nix_aq_enq_rsp *)aq_rsp);
5550 int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
5551 u16 rq_idx, u16 match_id)
5553 int leaf_prof, mid_prof, leaf_match;
5554 struct nix_cn10k_aq_enq_req aq_req;
5555 struct nix_cn10k_aq_enq_rsp aq_rsp;
5556 struct nix_ipolicer *ipolicer;
5557 struct nix_hw *nix_hw;
5558 int blkaddr, idx, rc;
5560 if (!rvu->hw->cap.ipolicer)
5563 rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5567 /* Fetch the RQ's context to see if policing is enabled */
5568 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc,
5569 NIX_AQ_CTYPE_RQ, rq_idx);
5572 "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n",
5573 __func__, rq_idx, pcifunc);
5577 if (!aq_rsp.rq.policer_ena)
5580 /* Get the bandwidth profile ID mapped to this RQ */
5581 leaf_prof = aq_rsp.rq.band_prof_id;
5583 ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER];
5584 ipolicer->match_id[leaf_prof] = match_id;
5586 /* Check if any other leaf profile is marked with same match_id */
5587 for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
5588 if (idx == leaf_prof)
5590 if (ipolicer->match_id[idx] != match_id)
5597 if (idx == ipolicer->band_prof.max)
5600 /* Fetch the matching profile's context to check if it's already
5601 * mapped to a mid level profile.
5603 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
5604 NIX_AQ_CTYPE_BANDPROF, leaf_match);
5607 "%s: Failed to fetch context of leaf profile %d\n",
5608 __func__, leaf_match);
5612 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
5613 if (aq_rsp.prof.hl_en) {
5614 /* Get Mid layer prof index and map leaf_prof index
5615 * also such that flows that are being steered
5616 * to different RQs and marked with same match_id
5617 * are rate limited in a aggregate fashion
5619 mid_prof = aq_rsp.prof.band_prof_id;
5620 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
5622 leaf_prof, mid_prof);
5625 "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
5626 __func__, leaf_prof, mid_prof);
5630 mutex_lock(&rvu->rsrc_lock);
5631 ipolicer->ref_count[mid_prof]++;
5632 mutex_unlock(&rvu->rsrc_lock);
5636 /* Allocate a mid layer profile and
5637 * map both 'leaf_prof' and 'leaf_match' profiles to it.
5639 mutex_lock(&rvu->rsrc_lock);
5640 mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof);
5643 "%s: Unable to allocate mid layer profile\n", __func__);
5644 mutex_unlock(&rvu->rsrc_lock);
5647 mutex_unlock(&rvu->rsrc_lock);
5648 ipolicer->pfvf_map[mid_prof] = 0x00;
5649 ipolicer->ref_count[mid_prof] = 0;
5651 /* Initialize mid layer profile same as 'leaf_prof' */
5652 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
5653 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
5656 "%s: Failed to fetch context of leaf profile %d\n",
5657 __func__, leaf_prof);
5661 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5662 aq_req.hdr.pcifunc = 0x00;
5663 aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14);
5664 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
5665 aq_req.op = NIX_AQ_INSTOP_WRITE;
5666 memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
5667 memset((char *)&aq_req.prof_mask, 0xff, sizeof(struct nix_bandprof_s));
5668 /* Clear higher layer enable bit in the mid profile, just in case */
5669 aq_req.prof.hl_en = 0;
5670 aq_req.prof_mask.hl_en = 1;
5672 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5673 (struct nix_aq_enq_req *)&aq_req, NULL);
5676 "%s: Failed to INIT context of mid layer profile %d\n",
5677 __func__, mid_prof);
5681 /* Map both leaf profiles to this mid layer profile */
5682 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
5684 leaf_prof, mid_prof);
5687 "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
5688 __func__, leaf_prof, mid_prof);
5692 mutex_lock(&rvu->rsrc_lock);
5693 ipolicer->ref_count[mid_prof]++;
5694 mutex_unlock(&rvu->rsrc_lock);
5696 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
5698 leaf_match, mid_prof);
5701 "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
5702 __func__, leaf_match, mid_prof);
5703 ipolicer->ref_count[mid_prof]--;
5707 mutex_lock(&rvu->rsrc_lock);
5708 ipolicer->ref_count[mid_prof]++;
5709 mutex_unlock(&rvu->rsrc_lock);
5715 /* Called with mutex rsrc_lock */
5716 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
5719 struct nix_cn10k_aq_enq_req aq_req;
5720 struct nix_cn10k_aq_enq_rsp aq_rsp;
5721 struct nix_ipolicer *ipolicer;
5725 mutex_unlock(&rvu->rsrc_lock);
5727 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
5728 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
5730 mutex_lock(&rvu->rsrc_lock);
5733 "%s: Failed to fetch context of leaf profile %d\n",
5734 __func__, leaf_prof);
5738 if (!aq_rsp.prof.hl_en)
5741 mid_prof = aq_rsp.prof.band_prof_id;
5742 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
5743 ipolicer->ref_count[mid_prof]--;
5744 /* If ref_count is zero, free mid layer profile */
5745 if (!ipolicer->ref_count[mid_prof]) {
5746 ipolicer->pfvf_map[mid_prof] = 0x00;
5747 rvu_free_rsrc(&ipolicer->band_prof, mid_prof);
5751 int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *req,
5752 struct nix_bandprof_get_hwinfo_rsp *rsp)
5754 struct nix_ipolicer *ipolicer;
5755 int blkaddr, layer, err;
5756 struct nix_hw *nix_hw;
5759 if (!rvu->hw->cap.ipolicer)
5760 return NIX_AF_ERR_IPOLICER_NOTSUPP;
5762 err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
5766 /* Return number of bandwidth profiles free at each layer */
5767 mutex_lock(&rvu->rsrc_lock);
5768 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5769 if (layer == BAND_PROF_INVAL_LAYER)
5772 ipolicer = &nix_hw->ipolicer[layer];
5773 rsp->prof_count[layer] = rvu_rsrc_free_count(&ipolicer->band_prof);
5775 mutex_unlock(&rvu->rsrc_lock);
5777 /* Set the policer timeunit in nanosec */
5778 tu = rvu_read64(rvu, blkaddr, NIX_AF_PL_TS) & GENMASK_ULL(9, 0);
5779 rsp->policer_timeunit = (tu + 1) * 100;