1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
4 * Copyright (C) 2018 Marvell.
8 #include <linux/module.h>
9 #include <linux/interrupt.h>
10 #include <linux/delay.h>
11 #include <linux/irq.h>
12 #include <linux/pci.h>
13 #include <linux/sysfs.h>
20 #include "rvu_trace.h"
21 #include "rvu_npc_hash.h"
23 #define DRV_NAME "rvu_af"
24 #define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
26 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
28 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
29 struct rvu_block *block, int lf);
30 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
31 struct rvu_block *block, int lf);
32 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc);
34 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
36 void (mbox_handler)(struct work_struct *),
37 void (mbox_up_handler)(struct work_struct *));
43 /* Supported devices */
44 static const struct pci_device_id rvu_id_table[] = {
45 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) },
46 { 0, } /* end of table */
49 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
50 MODULE_DESCRIPTION(DRV_STRING);
51 MODULE_LICENSE("GPL v2");
52 MODULE_DEVICE_TABLE(pci, rvu_id_table);
54 static char *mkex_profile; /* MKEX profile name */
55 module_param(mkex_profile, charp, 0000);
56 MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
58 static char *kpu_profile; /* KPU profile name */
59 module_param(kpu_profile, charp, 0000);
60 MODULE_PARM_DESC(kpu_profile, "KPU profile name string");
62 static void rvu_setup_hw_capabilities(struct rvu *rvu)
64 struct rvu_hwinfo *hw = rvu->hw;
66 hw->cap.nix_tx_aggr_lvl = NIX_TXSCH_LVL_TL1;
67 hw->cap.nix_fixed_txschq_mapping = false;
68 hw->cap.nix_shaping = true;
69 hw->cap.nix_tx_link_bp = true;
70 hw->cap.nix_rx_multicast = true;
71 hw->cap.nix_shaper_toggle_wait = false;
72 hw->cap.npc_hash_extract = false;
73 hw->cap.npc_exact_match_enabled = false;
76 if (is_rvu_pre_96xx_C0(rvu)) {
77 hw->cap.nix_fixed_txschq_mapping = true;
78 hw->cap.nix_txsch_per_cgx_lmac = 4;
79 hw->cap.nix_txsch_per_lbk_lmac = 132;
80 hw->cap.nix_txsch_per_sdp_lmac = 76;
81 hw->cap.nix_shaping = false;
82 hw->cap.nix_tx_link_bp = false;
83 if (is_rvu_96xx_A0(rvu) || is_rvu_95xx_A0(rvu))
84 hw->cap.nix_rx_multicast = false;
86 if (!is_rvu_pre_96xx_C0(rvu))
87 hw->cap.nix_shaper_toggle_wait = true;
89 if (!is_rvu_otx2(rvu))
90 hw->cap.per_pf_mbox_regs = true;
92 if (is_rvu_npc_hash_extract_en(rvu))
93 hw->cap.npc_hash_extract = true;
96 /* Poll a RVU block's register 'offset', for a 'zero'
97 * or 'nonzero' at bits specified by 'mask'
99 int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
101 unsigned long timeout = jiffies + usecs_to_jiffies(20000);
106 reg = rvu->afreg_base + ((block << 28) | offset);
108 reg_val = readq(reg);
109 if (zero && !(reg_val & mask))
111 if (!zero && (reg_val & mask))
113 if (time_before(jiffies, timeout)) {
117 /* In scenarios where CPU is scheduled out before checking
118 * 'time_before' (above) and gets scheduled in such that
119 * jiffies are beyond timeout value, then check again if HW is
120 * done with the operation in the meantime.
129 int rvu_alloc_rsrc(struct rsrc_bmap *rsrc)
136 id = find_first_zero_bit(rsrc->bmap, rsrc->max);
140 __set_bit(id, rsrc->bmap);
145 int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc)
152 start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
153 if (start >= rsrc->max)
156 bitmap_set(rsrc->bmap, start, nrsrc);
160 static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
164 if (start >= rsrc->max)
167 bitmap_clear(rsrc->bmap, start, nrsrc);
170 bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc)
177 start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
178 if (start >= rsrc->max)
184 void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id)
189 __clear_bit(id, rsrc->bmap);
192 int rvu_rsrc_free_count(struct rsrc_bmap *rsrc)
199 used = bitmap_weight(rsrc->bmap, rsrc->max);
200 return (rsrc->max - used);
203 bool is_rsrc_free(struct rsrc_bmap *rsrc, int id)
208 return !test_bit(id, rsrc->bmap);
211 int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
213 rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max),
214 sizeof(long), GFP_KERNEL);
220 void rvu_free_bitmap(struct rsrc_bmap *rsrc)
225 /* Get block LF's HW index from a PF_FUNC's block slot number */
226 int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
231 mutex_lock(&rvu->rsrc_lock);
232 for (lf = 0; lf < block->lf.max; lf++) {
233 if (block->fn_map[lf] == pcifunc) {
235 mutex_unlock(&rvu->rsrc_lock);
241 mutex_unlock(&rvu->rsrc_lock);
245 /* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E.
246 * Some silicon variants of OcteonTX2 supports
247 * multiple blocks of same type.
249 * @pcifunc has to be zero when no LF is yet attached.
251 * For a pcifunc if LFs are attached from multiple blocks of same type, then
252 * return blkaddr of first encountered block.
254 int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
256 int devnum, blkaddr = -ENODEV;
262 blkaddr = BLKADDR_NPC;
265 blkaddr = BLKADDR_NPA;
268 /* For now assume NIX0 */
270 blkaddr = BLKADDR_NIX0;
275 blkaddr = BLKADDR_SSO;
278 blkaddr = BLKADDR_SSOW;
281 blkaddr = BLKADDR_TIM;
284 /* For now assume CPT0 */
286 blkaddr = BLKADDR_CPT0;
292 /* Check if this is a RVU PF or VF */
293 if (pcifunc & RVU_PFVF_FUNC_MASK) {
295 devnum = rvu_get_hwvf(rvu, pcifunc);
298 devnum = rvu_get_pf(pcifunc);
301 /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' or
304 if (blktype == BLKTYPE_NIX) {
305 reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(0) :
306 RVU_PRIV_HWVFX_NIXX_CFG(0);
307 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
309 blkaddr = BLKADDR_NIX0;
313 reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(1) :
314 RVU_PRIV_HWVFX_NIXX_CFG(1);
315 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
317 blkaddr = BLKADDR_NIX1;
320 if (blktype == BLKTYPE_CPT) {
321 reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(0) :
322 RVU_PRIV_HWVFX_CPTX_CFG(0);
323 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
325 blkaddr = BLKADDR_CPT0;
329 reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(1) :
330 RVU_PRIV_HWVFX_CPTX_CFG(1);
331 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
333 blkaddr = BLKADDR_CPT1;
337 if (is_block_implemented(rvu->hw, blkaddr))
342 static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
343 struct rvu_block *block, u16 pcifunc,
346 int devnum, num_lfs = 0;
350 if (lf >= block->lf.max) {
351 dev_err(&rvu->pdev->dev,
352 "%s: FATAL: LF %d is >= %s's max lfs i.e %d\n",
353 __func__, lf, block->name, block->lf.max);
357 /* Check if this is for a RVU PF or VF */
358 if (pcifunc & RVU_PFVF_FUNC_MASK) {
360 devnum = rvu_get_hwvf(rvu, pcifunc);
363 devnum = rvu_get_pf(pcifunc);
366 block->fn_map[lf] = attach ? pcifunc : 0;
368 switch (block->addr) {
370 pfvf->npalf = attach ? true : false;
371 num_lfs = pfvf->npalf;
375 pfvf->nixlf = attach ? true : false;
376 num_lfs = pfvf->nixlf;
379 attach ? pfvf->sso++ : pfvf->sso--;
383 attach ? pfvf->ssow++ : pfvf->ssow--;
384 num_lfs = pfvf->ssow;
387 attach ? pfvf->timlfs++ : pfvf->timlfs--;
388 num_lfs = pfvf->timlfs;
391 attach ? pfvf->cptlfs++ : pfvf->cptlfs--;
392 num_lfs = pfvf->cptlfs;
395 attach ? pfvf->cpt1_lfs++ : pfvf->cpt1_lfs--;
396 num_lfs = pfvf->cpt1_lfs;
400 reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg;
401 rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs);
404 inline int rvu_get_pf(u16 pcifunc)
406 return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
409 void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
413 /* Get numVFs attached to this PF and first HWVF */
414 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
416 *numvfs = (cfg >> 12) & 0xFF;
421 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
426 pf = rvu_get_pf(pcifunc);
427 func = pcifunc & RVU_PFVF_FUNC_MASK;
429 /* Get first HWVF attached to this PF */
430 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
432 return ((cfg & 0xFFF) + func - 1);
435 struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc)
437 /* Check if it is a PF or VF */
438 if (pcifunc & RVU_PFVF_FUNC_MASK)
439 return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)];
441 return &rvu->pf[rvu_get_pf(pcifunc)];
444 static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc)
449 pf = rvu_get_pf(pcifunc);
450 if (pf >= rvu->hw->total_pfs)
453 if (!(pcifunc & RVU_PFVF_FUNC_MASK))
456 /* Check if VF is within number of VFs attached to this PF */
457 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
458 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
459 nvfs = (cfg >> 12) & 0xFF;
466 bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr)
468 struct rvu_block *block;
470 if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT)
473 block = &hw->block[blkaddr];
474 return block->implemented;
477 static void rvu_check_block_implemented(struct rvu *rvu)
479 struct rvu_hwinfo *hw = rvu->hw;
480 struct rvu_block *block;
484 /* For each block check if 'implemented' bit is set */
485 for (blkid = 0; blkid < BLK_COUNT; blkid++) {
486 block = &hw->block[blkid];
487 cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid));
488 if (cfg & BIT_ULL(11))
489 block->implemented = true;
493 static void rvu_setup_rvum_blk_revid(struct rvu *rvu)
495 rvu_write64(rvu, BLKADDR_RVUM,
496 RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM),
500 static void rvu_clear_rvum_blk_revid(struct rvu *rvu)
502 rvu_write64(rvu, BLKADDR_RVUM,
503 RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), 0x00);
506 int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
510 if (!block->implemented)
513 rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12));
514 err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12),
519 static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
521 struct rvu_block *block = &rvu->hw->block[blkaddr];
524 if (!block->implemented)
527 rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0));
528 err = rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
530 dev_err(rvu->dev, "HW block:%d reset timeout retrying again\n", blkaddr);
531 while (rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true) == -EBUSY)
536 static void rvu_reset_all_blocks(struct rvu *rvu)
538 /* Do a HW reset of all RVU blocks */
539 rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST);
540 rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST);
541 rvu_block_reset(rvu, BLKADDR_NIX1, NIX_AF_BLK_RST);
542 rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST);
543 rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST);
544 rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST);
545 rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST);
546 rvu_block_reset(rvu, BLKADDR_CPT1, CPT_AF_BLK_RST);
547 rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST);
548 rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST);
549 rvu_block_reset(rvu, BLKADDR_NDC_NIX1_RX, NDC_AF_BLK_RST);
550 rvu_block_reset(rvu, BLKADDR_NDC_NIX1_TX, NDC_AF_BLK_RST);
551 rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST);
554 static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block)
556 struct rvu_pfvf *pfvf;
560 for (lf = 0; lf < block->lf.max; lf++) {
561 cfg = rvu_read64(rvu, block->addr,
562 block->lfcfg_reg | (lf << block->lfshift));
563 if (!(cfg & BIT_ULL(63)))
566 /* Set this resource as being used */
567 __set_bit(lf, block->lf.bmap);
569 /* Get, to whom this LF is attached */
570 pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF);
571 rvu_update_rsrc_map(rvu, pfvf, block,
572 (cfg >> 8) & 0xFFFF, lf, true);
574 /* Set start MSIX vector for this LF within this PF/VF */
575 rvu_set_msix_offset(rvu, pfvf, block, lf);
579 static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf)
588 "PF%d:VF%d is configured with zero msix vectors, %d\n",
595 min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT;
597 min_vecs = RVU_PF_INT_VEC_CNT;
599 if (!(nvecs < min_vecs))
602 "PF%d is configured with too few vectors, %d, min is %d\n",
603 pf, nvecs, min_vecs);
606 static int rvu_setup_msix_resources(struct rvu *rvu)
608 struct rvu_hwinfo *hw = rvu->hw;
609 int pf, vf, numvfs, hwvf, err;
610 int nvecs, offset, max_msix;
611 struct rvu_pfvf *pfvf;
615 for (pf = 0; pf < hw->total_pfs; pf++) {
616 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
617 /* If PF is not enabled, nothing to do */
618 if (!((cfg >> 20) & 0x01))
621 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
624 /* Get num of MSIX vectors attached to this PF */
625 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf));
626 pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1;
627 rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0);
629 /* Alloc msix bitmap for this PF */
630 err = rvu_alloc_bitmap(&pfvf->msix);
634 /* Allocate memory for MSIX vector to RVU block LF mapping */
635 pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max,
636 sizeof(u16), GFP_KERNEL);
637 if (!pfvf->msix_lfmap)
640 /* For PF0 (AF) firmware will set msix vector offsets for
641 * AF, block AF and PF0_INT vectors, so jump to VFs.
646 /* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors.
647 * These are allocated on driver init and never freed,
648 * so no need to set 'msix_lfmap' for these.
650 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf));
651 nvecs = (cfg >> 12) & 0xFF;
653 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
654 rvu_write64(rvu, BLKADDR_RVUM,
655 RVU_PRIV_PFX_INT_CFG(pf), cfg | offset);
657 /* Alloc msix bitmap for VFs */
658 for (vf = 0; vf < numvfs; vf++) {
659 pfvf = &rvu->hwvf[hwvf + vf];
660 /* Get num of MSIX vectors attached to this VF */
661 cfg = rvu_read64(rvu, BLKADDR_RVUM,
662 RVU_PRIV_PFX_MSIX_CFG(pf));
663 pfvf->msix.max = (cfg & 0xFFF) + 1;
664 rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1);
666 /* Alloc msix bitmap for this VF */
667 err = rvu_alloc_bitmap(&pfvf->msix);
672 devm_kcalloc(rvu->dev, pfvf->msix.max,
673 sizeof(u16), GFP_KERNEL);
674 if (!pfvf->msix_lfmap)
677 /* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors.
678 * These are allocated on driver init and never freed,
679 * so no need to set 'msix_lfmap' for these.
681 cfg = rvu_read64(rvu, BLKADDR_RVUM,
682 RVU_PRIV_HWVFX_INT_CFG(hwvf + vf));
683 nvecs = (cfg >> 12) & 0xFF;
685 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
686 rvu_write64(rvu, BLKADDR_RVUM,
687 RVU_PRIV_HWVFX_INT_CFG(hwvf + vf),
692 /* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence
693 * create an IOMMU mapping for the physical address configured by
694 * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA.
696 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
697 max_msix = cfg & 0xFFFFF;
698 if (rvu->fwdata && rvu->fwdata->msixtr_base)
699 phy_addr = rvu->fwdata->msixtr_base;
701 phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
703 iova = dma_map_resource(rvu->dev, phy_addr,
704 max_msix * PCI_MSIX_ENTRY_SIZE,
705 DMA_BIDIRECTIONAL, 0);
707 if (dma_mapping_error(rvu->dev, iova))
710 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova);
711 rvu->msix_base_iova = iova;
712 rvu->msixtr_base_phy = phy_addr;
717 static void rvu_reset_msix(struct rvu *rvu)
719 /* Restore msixtr base register */
720 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE,
721 rvu->msixtr_base_phy);
724 static void rvu_free_hw_resources(struct rvu *rvu)
726 struct rvu_hwinfo *hw = rvu->hw;
727 struct rvu_block *block;
728 struct rvu_pfvf *pfvf;
732 rvu_npa_freemem(rvu);
733 rvu_npc_freemem(rvu);
734 rvu_nix_freemem(rvu);
736 /* Free block LF bitmaps */
737 for (id = 0; id < BLK_COUNT; id++) {
738 block = &hw->block[id];
739 kfree(block->lf.bmap);
742 /* Free MSIX bitmaps */
743 for (id = 0; id < hw->total_pfs; id++) {
745 kfree(pfvf->msix.bmap);
748 for (id = 0; id < hw->total_vfs; id++) {
749 pfvf = &rvu->hwvf[id];
750 kfree(pfvf->msix.bmap);
753 /* Unmap MSIX vector base IOVA mapping */
754 if (!rvu->msix_base_iova)
756 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
757 max_msix = cfg & 0xFFFFF;
758 dma_unmap_resource(rvu->dev, rvu->msix_base_iova,
759 max_msix * PCI_MSIX_ENTRY_SIZE,
760 DMA_BIDIRECTIONAL, 0);
763 mutex_destroy(&rvu->rsrc_lock);
766 static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
768 struct rvu_hwinfo *hw = rvu->hw;
769 int pf, vf, numvfs, hwvf;
770 struct rvu_pfvf *pfvf;
773 for (pf = 0; pf < hw->total_pfs; pf++) {
774 /* For PF0(AF), Assign MAC address to only VFs (LBKVFs) */
778 if (!is_pf_cgxmapped(rvu, pf))
780 /* Assign MAC address to PF */
782 if (rvu->fwdata && pf < PF_MACNUM_MAX) {
783 mac = &rvu->fwdata->pf_macs[pf];
785 u64_to_ether_addr(*mac, pfvf->mac_addr);
787 eth_random_addr(pfvf->mac_addr);
789 eth_random_addr(pfvf->mac_addr);
791 ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
794 /* Assign MAC address to VFs*/
795 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
796 for (vf = 0; vf < numvfs; vf++, hwvf++) {
797 pfvf = &rvu->hwvf[hwvf];
798 if (rvu->fwdata && hwvf < VF_MACNUM_MAX) {
799 mac = &rvu->fwdata->vf_macs[hwvf];
801 u64_to_ether_addr(*mac, pfvf->mac_addr);
803 eth_random_addr(pfvf->mac_addr);
805 eth_random_addr(pfvf->mac_addr);
807 ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
812 static int rvu_fwdata_init(struct rvu *rvu)
817 /* Get firmware data base address */
818 err = cgx_get_fwdata_base(&fwdbase);
821 rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata));
824 if (!is_rvu_fwdata_valid(rvu)) {
826 "Mismatch in 'fwdata' struct btw kernel and firmware\n");
827 iounmap(rvu->fwdata);
833 dev_info(rvu->dev, "Unable to fetch 'fwdata' from firmware\n");
837 static void rvu_fwdata_exit(struct rvu *rvu)
840 iounmap(rvu->fwdata);
843 static int rvu_setup_nix_hw_resource(struct rvu *rvu, int blkaddr)
845 struct rvu_hwinfo *hw = rvu->hw;
846 struct rvu_block *block;
850 /* Init NIX LF's bitmap */
851 block = &hw->block[blkaddr];
852 if (!block->implemented)
854 blkid = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
855 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
856 block->lf.max = cfg & 0xFFF;
857 block->addr = blkaddr;
858 block->type = BLKTYPE_NIX;
860 block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG;
861 block->pf_lfcnt_reg = RVU_PRIV_PFX_NIXX_CFG(blkid);
862 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIXX_CFG(blkid);
863 block->lfcfg_reg = NIX_PRIV_LFX_CFG;
864 block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
865 block->lfreset_reg = NIX_AF_LF_RST;
867 sprintf(block->name, "NIX%d", blkid);
868 rvu->nix_blkaddr[blkid] = blkaddr;
869 return rvu_alloc_bitmap(&block->lf);
872 static int rvu_setup_cpt_hw_resource(struct rvu *rvu, int blkaddr)
874 struct rvu_hwinfo *hw = rvu->hw;
875 struct rvu_block *block;
879 /* Init CPT LF's bitmap */
880 block = &hw->block[blkaddr];
881 if (!block->implemented)
883 blkid = (blkaddr == BLKADDR_CPT0) ? 0 : 1;
884 cfg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0);
885 block->lf.max = cfg & 0xFF;
886 block->addr = blkaddr;
887 block->type = BLKTYPE_CPT;
888 block->multislot = true;
890 block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG;
891 block->pf_lfcnt_reg = RVU_PRIV_PFX_CPTX_CFG(blkid);
892 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPTX_CFG(blkid);
893 block->lfcfg_reg = CPT_PRIV_LFX_CFG;
894 block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
895 block->lfreset_reg = CPT_AF_LF_RST;
897 sprintf(block->name, "CPT%d", blkid);
898 return rvu_alloc_bitmap(&block->lf);
901 static void rvu_get_lbk_bufsize(struct rvu *rvu)
903 struct pci_dev *pdev = NULL;
907 pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
908 PCI_DEVID_OCTEONTX2_LBK, pdev);
912 base = pci_ioremap_bar(pdev, 0);
916 lbk_const = readq(base + LBK_CONST);
918 /* cache fifo size */
919 rvu->hw->lbk_bufsize = FIELD_GET(LBK_CONST_BUF_SIZE, lbk_const);
926 static int rvu_setup_hw_resources(struct rvu *rvu)
928 struct rvu_hwinfo *hw = rvu->hw;
929 struct rvu_block *block;
933 /* Get HW supported max RVU PF & VF count */
934 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
935 hw->total_pfs = (cfg >> 32) & 0xFF;
936 hw->total_vfs = (cfg >> 20) & 0xFFF;
937 hw->max_vfs_per_pf = (cfg >> 40) & 0xFF;
939 /* Init NPA LF's bitmap */
940 block = &hw->block[BLKADDR_NPA];
941 if (!block->implemented)
943 cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST);
944 block->lf.max = (cfg >> 16) & 0xFFF;
945 block->addr = BLKADDR_NPA;
946 block->type = BLKTYPE_NPA;
948 block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG;
949 block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG;
950 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG;
951 block->lfcfg_reg = NPA_PRIV_LFX_CFG;
952 block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG;
953 block->lfreset_reg = NPA_AF_LF_RST;
955 sprintf(block->name, "NPA");
956 err = rvu_alloc_bitmap(&block->lf);
959 "%s: Failed to allocate NPA LF bitmap\n", __func__);
964 err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX0);
967 "%s: Failed to allocate NIX0 LFs bitmap\n", __func__);
971 err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX1);
974 "%s: Failed to allocate NIX1 LFs bitmap\n", __func__);
978 /* Init SSO group's bitmap */
979 block = &hw->block[BLKADDR_SSO];
980 if (!block->implemented)
982 cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST);
983 block->lf.max = cfg & 0xFFFF;
984 block->addr = BLKADDR_SSO;
985 block->type = BLKTYPE_SSO;
986 block->multislot = true;
988 block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG;
989 block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG;
990 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG;
991 block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG;
992 block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG;
993 block->lfreset_reg = SSO_AF_LF_HWGRP_RST;
995 sprintf(block->name, "SSO GROUP");
996 err = rvu_alloc_bitmap(&block->lf);
999 "%s: Failed to allocate SSO LF bitmap\n", __func__);
1004 /* Init SSO workslot's bitmap */
1005 block = &hw->block[BLKADDR_SSOW];
1006 if (!block->implemented)
1008 block->lf.max = (cfg >> 56) & 0xFF;
1009 block->addr = BLKADDR_SSOW;
1010 block->type = BLKTYPE_SSOW;
1011 block->multislot = true;
1013 block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG;
1014 block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG;
1015 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG;
1016 block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG;
1017 block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG;
1018 block->lfreset_reg = SSOW_AF_LF_HWS_RST;
1020 sprintf(block->name, "SSOWS");
1021 err = rvu_alloc_bitmap(&block->lf);
1024 "%s: Failed to allocate SSOW LF bitmap\n", __func__);
1029 /* Init TIM LF's bitmap */
1030 block = &hw->block[BLKADDR_TIM];
1031 if (!block->implemented)
1033 cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST);
1034 block->lf.max = cfg & 0xFFFF;
1035 block->addr = BLKADDR_TIM;
1036 block->type = BLKTYPE_TIM;
1037 block->multislot = true;
1039 block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG;
1040 block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG;
1041 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG;
1042 block->lfcfg_reg = TIM_PRIV_LFX_CFG;
1043 block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG;
1044 block->lfreset_reg = TIM_AF_LF_RST;
1046 sprintf(block->name, "TIM");
1047 err = rvu_alloc_bitmap(&block->lf);
1050 "%s: Failed to allocate TIM LF bitmap\n", __func__);
1055 err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT0);
1058 "%s: Failed to allocate CPT0 LF bitmap\n", __func__);
1061 err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT1);
1064 "%s: Failed to allocate CPT1 LF bitmap\n", __func__);
1068 /* Allocate memory for PFVF data */
1069 rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs,
1070 sizeof(struct rvu_pfvf), GFP_KERNEL);
1073 "%s: Failed to allocate memory for PF's rvu_pfvf struct\n", __func__);
1077 rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs,
1078 sizeof(struct rvu_pfvf), GFP_KERNEL);
1081 "%s: Failed to allocate memory for VF's rvu_pfvf struct\n", __func__);
1085 mutex_init(&rvu->rsrc_lock);
1087 rvu_fwdata_init(rvu);
1089 err = rvu_setup_msix_resources(rvu);
1092 "%s: Failed to setup MSIX resources\n", __func__);
1096 for (blkid = 0; blkid < BLK_COUNT; blkid++) {
1097 block = &hw->block[blkid];
1098 if (!block->lf.bmap)
1101 /* Allocate memory for block LF/slot to pcifunc mapping info */
1102 block->fn_map = devm_kcalloc(rvu->dev, block->lf.max,
1103 sizeof(u16), GFP_KERNEL);
1104 if (!block->fn_map) {
1109 /* Scan all blocks to check if low level firmware has
1110 * already provisioned any of the resources to a PF/VF.
1112 rvu_scan_block(rvu, block);
1115 err = rvu_set_channels_base(rvu);
1119 err = rvu_npc_init(rvu);
1121 dev_err(rvu->dev, "%s: Failed to initialize npc\n", __func__);
1125 err = rvu_cgx_init(rvu);
1127 dev_err(rvu->dev, "%s: Failed to initialize cgx\n", __func__);
1131 /* Assign MACs for CGX mapped functions */
1132 rvu_setup_pfvf_macaddress(rvu);
1134 err = rvu_npa_init(rvu);
1136 dev_err(rvu->dev, "%s: Failed to initialize npa\n", __func__);
1140 rvu_get_lbk_bufsize(rvu);
1142 err = rvu_nix_init(rvu);
1144 dev_err(rvu->dev, "%s: Failed to initialize nix\n", __func__);
1148 err = rvu_sdp_init(rvu);
1150 dev_err(rvu->dev, "%s: Failed to initialize sdp\n", __func__);
1154 rvu_program_channels(rvu);
1159 rvu_nix_freemem(rvu);
1161 rvu_npa_freemem(rvu);
1165 rvu_npc_freemem(rvu);
1166 rvu_fwdata_exit(rvu);
1168 rvu_reset_msix(rvu);
1172 /* NPA and NIX admin queue APIs */
1173 void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq)
1178 qmem_free(rvu->dev, aq->inst);
1179 qmem_free(rvu->dev, aq->res);
1180 devm_kfree(rvu->dev, aq);
1183 int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
1184 int qsize, int inst_size, int res_size)
1186 struct admin_queue *aq;
1189 *ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL);
1194 /* Alloc memory for instructions i.e AQ */
1195 err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size);
1197 devm_kfree(rvu->dev, aq);
1201 /* Alloc memory for results */
1202 err = qmem_alloc(rvu->dev, &aq->res, qsize, res_size);
1204 rvu_aq_free(rvu, aq);
1208 spin_lock_init(&aq->lock);
1212 int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
1213 struct ready_msg_rsp *rsp)
1216 rsp->rclk_freq = rvu->fwdata->rclk;
1217 rsp->sclk_freq = rvu->fwdata->sclk;
1222 /* Get current count of a RVU block's LF/slots
1223 * provisioned to a given RVU func.
1225 u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr)
1229 return pfvf->npalf ? 1 : 0;
1232 return pfvf->nixlf ? 1 : 0;
1238 return pfvf->timlfs;
1240 return pfvf->cptlfs;
1242 return pfvf->cpt1_lfs;
1247 /* Return true if LFs of block type are attached to pcifunc */
1248 static bool is_blktype_attached(struct rvu_pfvf *pfvf, int blktype)
1252 return pfvf->npalf ? 1 : 0;
1254 return pfvf->nixlf ? 1 : 0;
1258 return !!pfvf->ssow;
1260 return !!pfvf->timlfs;
1262 return pfvf->cptlfs || pfvf->cpt1_lfs;
1268 bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
1270 struct rvu_pfvf *pfvf;
1272 if (!is_pf_func_valid(rvu, pcifunc))
1275 pfvf = rvu_get_pfvf(rvu, pcifunc);
1277 /* Check if this PFFUNC has a LF of type blktype attached */
1278 if (!is_blktype_attached(pfvf, blktype))
1284 static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
1285 int pcifunc, int slot)
1289 val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13);
1290 rvu_write64(rvu, block->addr, block->lookup_reg, val);
1291 /* Wait for the lookup to finish */
1292 /* TODO: put some timeout here */
1293 while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13))
1296 val = rvu_read64(rvu, block->addr, block->lookup_reg);
1298 /* Check LF valid bit */
1299 if (!(val & (1ULL << 12)))
1302 return (val & 0xFFF);
1305 int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc,
1306 u16 global_slot, u16 *slot_in_block)
1308 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1309 int numlfs, total_lfs = 0, nr_blocks = 0;
1310 int i, num_blkaddr[BLK_COUNT] = { 0 };
1311 struct rvu_block *block;
1315 if (!is_blktype_attached(pfvf, blktype))
1318 /* Get all the block addresses from which LFs are attached to
1319 * the given pcifunc in num_blkaddr[].
1321 for (blkaddr = BLKADDR_RVUM; blkaddr < BLK_COUNT; blkaddr++) {
1322 block = &rvu->hw->block[blkaddr];
1323 if (block->type != blktype)
1325 if (!is_block_implemented(rvu->hw, blkaddr))
1328 numlfs = rvu_get_rsrc_mapcount(pfvf, blkaddr);
1330 total_lfs += numlfs;
1331 num_blkaddr[nr_blocks] = blkaddr;
1336 if (global_slot >= total_lfs)
1339 /* Based on the given global slot number retrieve the
1340 * correct block address out of all attached block
1341 * addresses and slot number in that block.
1345 for (i = 0; i < nr_blocks; i++) {
1346 numlfs = rvu_get_rsrc_mapcount(pfvf, num_blkaddr[i]);
1347 total_lfs += numlfs;
1348 if (global_slot < total_lfs) {
1349 blkaddr = num_blkaddr[i];
1350 start_slot = total_lfs - numlfs;
1351 *slot_in_block = global_slot - start_slot;
1359 static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
1361 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1362 struct rvu_hwinfo *hw = rvu->hw;
1363 struct rvu_block *block;
1364 int slot, lf, num_lfs;
1367 blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc);
1371 if (blktype == BLKTYPE_NIX)
1372 rvu_nix_reset_mac(pfvf, pcifunc);
1374 block = &hw->block[blkaddr];
1376 num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1380 for (slot = 0; slot < num_lfs; slot++) {
1381 lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot);
1382 if (lf < 0) /* This should never happen */
1385 /* Disable the LF */
1386 rvu_write64(rvu, blkaddr, block->lfcfg_reg |
1387 (lf << block->lfshift), 0x00ULL);
1389 /* Update SW maintained mapping info as well */
1390 rvu_update_rsrc_map(rvu, pfvf, block,
1391 pcifunc, lf, false);
1393 /* Free the resource */
1394 rvu_free_rsrc(&block->lf, lf);
1396 /* Clear MSIX vector offset for this LF */
1397 rvu_clear_msix_offset(rvu, pfvf, block, lf);
1401 static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
1404 struct rvu_hwinfo *hw = rvu->hw;
1405 bool detach_all = true;
1406 struct rvu_block *block;
1409 mutex_lock(&rvu->rsrc_lock);
1411 /* Check for partial resource detach */
1412 if (detach && detach->partial)
1415 /* Check for RVU block's LFs attached to this func,
1416 * if so, detach them.
1418 for (blkid = 0; blkid < BLK_COUNT; blkid++) {
1419 block = &hw->block[blkid];
1420 if (!block->lf.bmap)
1422 if (!detach_all && detach) {
1423 if (blkid == BLKADDR_NPA && !detach->npalf)
1425 else if ((blkid == BLKADDR_NIX0) && !detach->nixlf)
1427 else if ((blkid == BLKADDR_NIX1) && !detach->nixlf)
1429 else if ((blkid == BLKADDR_SSO) && !detach->sso)
1431 else if ((blkid == BLKADDR_SSOW) && !detach->ssow)
1433 else if ((blkid == BLKADDR_TIM) && !detach->timlfs)
1435 else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs)
1437 else if ((blkid == BLKADDR_CPT1) && !detach->cptlfs)
1440 rvu_detach_block(rvu, pcifunc, block->type);
1443 mutex_unlock(&rvu->rsrc_lock);
1447 int rvu_mbox_handler_detach_resources(struct rvu *rvu,
1448 struct rsrc_detach *detach,
1449 struct msg_rsp *rsp)
1451 return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
1454 int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
1456 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1457 int blkaddr = BLKADDR_NIX0, vf;
1458 struct rvu_pfvf *pf;
1460 pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
1462 /* All CGX mapped PFs are set with assigned NIX block during init */
1463 if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
1464 blkaddr = pf->nix_blkaddr;
1465 } else if (is_afvf(pcifunc)) {
1467 /* Assign NIX based on VF number. All even numbered VFs get
1468 * NIX0 and odd numbered gets NIX1
1470 blkaddr = (vf & 1) ? BLKADDR_NIX1 : BLKADDR_NIX0;
1471 /* NIX1 is not present on all silicons */
1472 if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
1473 blkaddr = BLKADDR_NIX0;
1476 /* if SDP1 then the blkaddr is NIX1 */
1477 if (is_sdp_pfvf(pcifunc) && pf->sdp_info->node_id == 1)
1478 blkaddr = BLKADDR_NIX1;
1482 pfvf->nix_blkaddr = BLKADDR_NIX1;
1483 pfvf->nix_rx_intf = NIX_INTFX_RX(1);
1484 pfvf->nix_tx_intf = NIX_INTFX_TX(1);
1488 pfvf->nix_blkaddr = BLKADDR_NIX0;
1489 pfvf->nix_rx_intf = NIX_INTFX_RX(0);
1490 pfvf->nix_tx_intf = NIX_INTFX_TX(0);
1494 return pfvf->nix_blkaddr;
1497 static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype,
1498 u16 pcifunc, struct rsrc_attach *attach)
1504 blkaddr = rvu_get_nix_blkaddr(rvu, pcifunc);
1507 if (attach->hdr.ver < RVU_MULTI_BLK_VER)
1508 return rvu_get_blkaddr(rvu, blktype, 0);
1509 blkaddr = attach->cpt_blkaddr ? attach->cpt_blkaddr :
1511 if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
1515 return rvu_get_blkaddr(rvu, blktype, 0);
1518 if (is_block_implemented(rvu->hw, blkaddr))
1524 static void rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype,
1525 int num_lfs, struct rsrc_attach *attach)
1527 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1528 struct rvu_hwinfo *hw = rvu->hw;
1529 struct rvu_block *block;
1537 blkaddr = rvu_get_attach_blkaddr(rvu, blktype, pcifunc, attach);
1541 block = &hw->block[blkaddr];
1542 if (!block->lf.bmap)
1545 for (slot = 0; slot < num_lfs; slot++) {
1546 /* Allocate the resource */
1547 lf = rvu_alloc_rsrc(&block->lf);
1551 cfg = (1ULL << 63) | (pcifunc << 8) | slot;
1552 rvu_write64(rvu, blkaddr, block->lfcfg_reg |
1553 (lf << block->lfshift), cfg);
1554 rvu_update_rsrc_map(rvu, pfvf, block,
1557 /* Set start MSIX vector for this LF within this PF/VF */
1558 rvu_set_msix_offset(rvu, pfvf, block, lf);
1562 static int rvu_check_rsrc_availability(struct rvu *rvu,
1563 struct rsrc_attach *req, u16 pcifunc)
1565 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1566 int free_lfs, mappedlfs, blkaddr;
1567 struct rvu_hwinfo *hw = rvu->hw;
1568 struct rvu_block *block;
1570 /* Only one NPA LF can be attached */
1571 if (req->npalf && !is_blktype_attached(pfvf, BLKTYPE_NPA)) {
1572 block = &hw->block[BLKADDR_NPA];
1573 free_lfs = rvu_rsrc_free_count(&block->lf);
1576 } else if (req->npalf) {
1577 dev_err(&rvu->pdev->dev,
1578 "Func 0x%x: Invalid req, already has NPA\n",
1583 /* Only one NIX LF can be attached */
1584 if (req->nixlf && !is_blktype_attached(pfvf, BLKTYPE_NIX)) {
1585 blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_NIX,
1589 block = &hw->block[blkaddr];
1590 free_lfs = rvu_rsrc_free_count(&block->lf);
1593 } else if (req->nixlf) {
1594 dev_err(&rvu->pdev->dev,
1595 "Func 0x%x: Invalid req, already has NIX\n",
1601 block = &hw->block[BLKADDR_SSO];
1602 /* Is request within limits ? */
1603 if (req->sso > block->lf.max) {
1604 dev_err(&rvu->pdev->dev,
1605 "Func 0x%x: Invalid SSO req, %d > max %d\n",
1606 pcifunc, req->sso, block->lf.max);
1609 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1610 free_lfs = rvu_rsrc_free_count(&block->lf);
1611 /* Check if additional resources are available */
1612 if (req->sso > mappedlfs &&
1613 ((req->sso - mappedlfs) > free_lfs))
1618 block = &hw->block[BLKADDR_SSOW];
1619 if (req->ssow > block->lf.max) {
1620 dev_err(&rvu->pdev->dev,
1621 "Func 0x%x: Invalid SSOW req, %d > max %d\n",
1622 pcifunc, req->sso, block->lf.max);
1625 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1626 free_lfs = rvu_rsrc_free_count(&block->lf);
1627 if (req->ssow > mappedlfs &&
1628 ((req->ssow - mappedlfs) > free_lfs))
1633 block = &hw->block[BLKADDR_TIM];
1634 if (req->timlfs > block->lf.max) {
1635 dev_err(&rvu->pdev->dev,
1636 "Func 0x%x: Invalid TIMLF req, %d > max %d\n",
1637 pcifunc, req->timlfs, block->lf.max);
1640 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1641 free_lfs = rvu_rsrc_free_count(&block->lf);
1642 if (req->timlfs > mappedlfs &&
1643 ((req->timlfs - mappedlfs) > free_lfs))
1648 blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_CPT,
1652 block = &hw->block[blkaddr];
1653 if (req->cptlfs > block->lf.max) {
1654 dev_err(&rvu->pdev->dev,
1655 "Func 0x%x: Invalid CPTLF req, %d > max %d\n",
1656 pcifunc, req->cptlfs, block->lf.max);
1659 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1660 free_lfs = rvu_rsrc_free_count(&block->lf);
1661 if (req->cptlfs > mappedlfs &&
1662 ((req->cptlfs - mappedlfs) > free_lfs))
1669 dev_info(rvu->dev, "Request for %s failed\n", block->name);
1673 static bool rvu_attach_from_same_block(struct rvu *rvu, int blktype,
1674 struct rsrc_attach *attach)
1676 int blkaddr, num_lfs;
1678 blkaddr = rvu_get_attach_blkaddr(rvu, blktype,
1679 attach->hdr.pcifunc, attach);
1683 num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, attach->hdr.pcifunc),
1685 /* Requester already has LFs from given block ? */
1689 int rvu_mbox_handler_attach_resources(struct rvu *rvu,
1690 struct rsrc_attach *attach,
1691 struct msg_rsp *rsp)
1693 u16 pcifunc = attach->hdr.pcifunc;
1696 /* If first request, detach all existing attached resources */
1697 if (!attach->modify)
1698 rvu_detach_rsrcs(rvu, NULL, pcifunc);
1700 mutex_lock(&rvu->rsrc_lock);
1702 /* Check if the request can be accommodated */
1703 err = rvu_check_rsrc_availability(rvu, attach, pcifunc);
1707 /* Now attach the requested resources */
1709 rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1, attach);
1712 rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1, attach);
1715 /* RVU func doesn't know which exact LF or slot is attached
1716 * to it, it always sees as slot 0,1,2. So for a 'modify'
1717 * request, simply detach all existing attached LFs/slots
1718 * and attach a fresh.
1721 rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
1722 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO,
1723 attach->sso, attach);
1728 rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
1729 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW,
1730 attach->ssow, attach);
1733 if (attach->timlfs) {
1735 rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
1736 rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM,
1737 attach->timlfs, attach);
1740 if (attach->cptlfs) {
1741 if (attach->modify &&
1742 rvu_attach_from_same_block(rvu, BLKTYPE_CPT, attach))
1743 rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT);
1744 rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT,
1745 attach->cptlfs, attach);
1749 mutex_unlock(&rvu->rsrc_lock);
1753 static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1754 int blkaddr, int lf)
1759 return MSIX_VECTOR_INVALID;
1761 for (vec = 0; vec < pfvf->msix.max; vec++) {
1762 if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf))
1765 return MSIX_VECTOR_INVALID;
1768 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1769 struct rvu_block *block, int lf)
1771 u16 nvecs, vec, offset;
1774 cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1775 (lf << block->lfshift));
1776 nvecs = (cfg >> 12) & 0xFF;
1778 /* Check and alloc MSIX vectors, must be contiguous */
1779 if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs))
1782 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
1784 /* Config MSIX offset in LF */
1785 rvu_write64(rvu, block->addr, block->msixcfg_reg |
1786 (lf << block->lfshift), (cfg & ~0x7FFULL) | offset);
1788 /* Update the bitmap as well */
1789 for (vec = 0; vec < nvecs; vec++)
1790 pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf);
1793 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1794 struct rvu_block *block, int lf)
1796 u16 nvecs, vec, offset;
1799 cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1800 (lf << block->lfshift));
1801 nvecs = (cfg >> 12) & 0xFF;
1803 /* Clear MSIX offset in LF */
1804 rvu_write64(rvu, block->addr, block->msixcfg_reg |
1805 (lf << block->lfshift), cfg & ~0x7FFULL);
1807 offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf);
1809 /* Update the mapping */
1810 for (vec = 0; vec < nvecs; vec++)
1811 pfvf->msix_lfmap[offset + vec] = 0;
1813 /* Free the same in MSIX bitmap */
1814 rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
1817 int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
1818 struct msix_offset_rsp *rsp)
1820 struct rvu_hwinfo *hw = rvu->hw;
1821 u16 pcifunc = req->hdr.pcifunc;
1822 struct rvu_pfvf *pfvf;
1823 int lf, slot, blkaddr;
1825 pfvf = rvu_get_pfvf(rvu, pcifunc);
1826 if (!pfvf->msix.bmap)
1829 /* Set MSIX offsets for each block's LFs attached to this PF/VF */
1830 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0);
1831 rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf);
1833 /* Get BLKADDR from which LFs are attached to pcifunc */
1834 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1836 rsp->nix_msixoff = MSIX_VECTOR_INVALID;
1838 lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1839 rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, blkaddr, lf);
1842 rsp->sso = pfvf->sso;
1843 for (slot = 0; slot < rsp->sso; slot++) {
1844 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot);
1845 rsp->sso_msixoff[slot] =
1846 rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf);
1849 rsp->ssow = pfvf->ssow;
1850 for (slot = 0; slot < rsp->ssow; slot++) {
1851 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot);
1852 rsp->ssow_msixoff[slot] =
1853 rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf);
1856 rsp->timlfs = pfvf->timlfs;
1857 for (slot = 0; slot < rsp->timlfs; slot++) {
1858 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot);
1859 rsp->timlf_msixoff[slot] =
1860 rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf);
1863 rsp->cptlfs = pfvf->cptlfs;
1864 for (slot = 0; slot < rsp->cptlfs; slot++) {
1865 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot);
1866 rsp->cptlf_msixoff[slot] =
1867 rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf);
1870 rsp->cpt1_lfs = pfvf->cpt1_lfs;
1871 for (slot = 0; slot < rsp->cpt1_lfs; slot++) {
1872 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT1], pcifunc, slot);
1873 rsp->cpt1_lf_msixoff[slot] =
1874 rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT1, lf);
1880 int rvu_mbox_handler_free_rsrc_cnt(struct rvu *rvu, struct msg_req *req,
1881 struct free_rsrcs_rsp *rsp)
1883 struct rvu_hwinfo *hw = rvu->hw;
1884 struct rvu_block *block;
1885 struct nix_txsch *txsch;
1886 struct nix_hw *nix_hw;
1888 mutex_lock(&rvu->rsrc_lock);
1890 block = &hw->block[BLKADDR_NPA];
1891 rsp->npa = rvu_rsrc_free_count(&block->lf);
1893 block = &hw->block[BLKADDR_NIX0];
1894 rsp->nix = rvu_rsrc_free_count(&block->lf);
1896 block = &hw->block[BLKADDR_NIX1];
1897 rsp->nix1 = rvu_rsrc_free_count(&block->lf);
1899 block = &hw->block[BLKADDR_SSO];
1900 rsp->sso = rvu_rsrc_free_count(&block->lf);
1902 block = &hw->block[BLKADDR_SSOW];
1903 rsp->ssow = rvu_rsrc_free_count(&block->lf);
1905 block = &hw->block[BLKADDR_TIM];
1906 rsp->tim = rvu_rsrc_free_count(&block->lf);
1908 block = &hw->block[BLKADDR_CPT0];
1909 rsp->cpt = rvu_rsrc_free_count(&block->lf);
1911 block = &hw->block[BLKADDR_CPT1];
1912 rsp->cpt1 = rvu_rsrc_free_count(&block->lf);
1914 if (rvu->hw->cap.nix_fixed_txschq_mapping) {
1915 rsp->schq[NIX_TXSCH_LVL_SMQ] = 1;
1916 rsp->schq[NIX_TXSCH_LVL_TL4] = 1;
1917 rsp->schq[NIX_TXSCH_LVL_TL3] = 1;
1918 rsp->schq[NIX_TXSCH_LVL_TL2] = 1;
1920 if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
1922 rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] = 1;
1923 rsp->schq_nix1[NIX_TXSCH_LVL_TL4] = 1;
1924 rsp->schq_nix1[NIX_TXSCH_LVL_TL3] = 1;
1925 rsp->schq_nix1[NIX_TXSCH_LVL_TL2] = 1;
1927 nix_hw = get_nix_hw(hw, BLKADDR_NIX0);
1928 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1929 rsp->schq[NIX_TXSCH_LVL_SMQ] =
1930 rvu_rsrc_free_count(&txsch->schq);
1932 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4];
1933 rsp->schq[NIX_TXSCH_LVL_TL4] =
1934 rvu_rsrc_free_count(&txsch->schq);
1936 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3];
1937 rsp->schq[NIX_TXSCH_LVL_TL3] =
1938 rvu_rsrc_free_count(&txsch->schq);
1940 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
1941 rsp->schq[NIX_TXSCH_LVL_TL2] =
1942 rvu_rsrc_free_count(&txsch->schq);
1944 if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
1947 nix_hw = get_nix_hw(hw, BLKADDR_NIX1);
1948 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1949 rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] =
1950 rvu_rsrc_free_count(&txsch->schq);
1952 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4];
1953 rsp->schq_nix1[NIX_TXSCH_LVL_TL4] =
1954 rvu_rsrc_free_count(&txsch->schq);
1956 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3];
1957 rsp->schq_nix1[NIX_TXSCH_LVL_TL3] =
1958 rvu_rsrc_free_count(&txsch->schq);
1960 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
1961 rsp->schq_nix1[NIX_TXSCH_LVL_TL2] =
1962 rvu_rsrc_free_count(&txsch->schq);
1965 rsp->schq_nix1[NIX_TXSCH_LVL_TL1] = 1;
1967 rsp->schq[NIX_TXSCH_LVL_TL1] = 1;
1968 mutex_unlock(&rvu->rsrc_lock);
1973 int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
1974 struct msg_rsp *rsp)
1976 u16 pcifunc = req->hdr.pcifunc;
1980 vf = pcifunc & RVU_PFVF_FUNC_MASK;
1981 cfg = rvu_read64(rvu, BLKADDR_RVUM,
1982 RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc)));
1983 numvfs = (cfg >> 12) & 0xFF;
1985 if (vf && vf <= numvfs)
1986 __rvu_flr_handler(rvu, pcifunc);
1988 return RVU_INVALID_VF_ID;
1993 int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
1994 struct get_hw_cap_rsp *rsp)
1996 struct rvu_hwinfo *hw = rvu->hw;
1998 rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping;
1999 rsp->nix_shaping = hw->cap.nix_shaping;
2000 rsp->npc_hash_extract = hw->cap.npc_hash_extract;
2005 int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req,
2006 struct msg_rsp *rsp)
2008 struct rvu_hwinfo *hw = rvu->hw;
2009 u16 pcifunc = req->hdr.pcifunc;
2010 struct rvu_pfvf *pfvf;
2014 /* Only PF can add VF permissions */
2015 if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_afvf(pcifunc))
2018 target = (pcifunc & ~RVU_PFVF_FUNC_MASK) | (req->vf + 1);
2019 pfvf = rvu_get_pfvf(rvu, target);
2021 if (req->flags & RESET_VF_PERM) {
2022 pfvf->flags &= RVU_CLEAR_VF_PERM;
2023 } else if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) ^
2024 (req->flags & VF_TRUSTED)) {
2025 change_bit(PF_SET_VF_TRUSTED, &pfvf->flags);
2026 /* disable multicast and promisc entries */
2027 if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags)) {
2028 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, target);
2031 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
2035 npc_enadis_default_mce_entry(rvu, target, nixlf,
2036 NIXLF_ALLMULTI_ENTRY,
2038 npc_enadis_default_mce_entry(rvu, target, nixlf,
2039 NIXLF_PROMISC_ENTRY,
2047 static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
2048 struct mbox_msghdr *req)
2050 struct rvu *rvu = pci_get_drvdata(mbox->pdev);
2052 /* Check if valid, if not reply with a invalid msg */
2053 if (req->sig != OTX2_MBOX_REQ_SIG)
2057 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
2059 struct _rsp_type *rsp; \
2062 rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
2064 sizeof(struct _rsp_type)); \
2065 /* some handlers should complete even if reply */ \
2066 /* could not be allocated */ \
2068 _id != MBOX_MSG_DETACH_RESOURCES && \
2069 _id != MBOX_MSG_NIX_TXSCH_FREE && \
2070 _id != MBOX_MSG_VF_FLR) \
2073 rsp->hdr.id = _id; \
2074 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
2075 rsp->hdr.pcifunc = req->pcifunc; \
2079 err = rvu_mbox_handler_ ## _fn_name(rvu, \
2080 (struct _req_type *)req, \
2083 rsp->hdr.rc = err; \
2085 trace_otx2_msg_process(mbox->pdev, _id, err); \
2086 return rsp ? err : -ENOMEM; \
2093 otx2_reply_invalid_msg(mbox, devid, req->pcifunc, req->id);
2098 static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
2100 struct rvu *rvu = mwork->rvu;
2101 int offset, err, id, devid;
2102 struct otx2_mbox_dev *mdev;
2103 struct mbox_hdr *req_hdr;
2104 struct mbox_msghdr *msg;
2105 struct mbox_wq_info *mw;
2106 struct otx2_mbox *mbox;
2110 mw = &rvu->afpf_wq_info;
2113 mw = &rvu->afvf_wq_info;
2119 devid = mwork - mw->mbox_wrk;
2121 mdev = &mbox->dev[devid];
2123 /* Process received mbox messages */
2124 req_hdr = mdev->mbase + mbox->rx_start;
2125 if (mw->mbox_wrk[devid].num_msgs == 0)
2128 offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
2130 for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) {
2131 msg = mdev->mbase + offset;
2133 /* Set which PF/VF sent this message based on mbox IRQ */
2137 ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
2138 msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT);
2142 ~(RVU_PFVF_FUNC_MASK << RVU_PFVF_FUNC_SHIFT);
2143 msg->pcifunc |= (devid << RVU_PFVF_FUNC_SHIFT) + 1;
2147 err = rvu_process_mbox_msg(mbox, devid, msg);
2149 offset = mbox->rx_start + msg->next_msgoff;
2153 if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
2154 dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
2155 err, otx2_mbox_id2name(msg->id),
2156 msg->id, rvu_get_pf(msg->pcifunc),
2157 (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2159 dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
2160 err, otx2_mbox_id2name(msg->id),
2163 mw->mbox_wrk[devid].num_msgs = 0;
2165 /* Send mbox responses to VF/PF */
2166 otx2_mbox_msg_send(mbox, devid);
2169 static inline void rvu_afpf_mbox_handler(struct work_struct *work)
2171 struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2173 __rvu_mbox_handler(mwork, TYPE_AFPF);
2176 static inline void rvu_afvf_mbox_handler(struct work_struct *work)
2178 struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2180 __rvu_mbox_handler(mwork, TYPE_AFVF);
2183 static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
2185 struct rvu *rvu = mwork->rvu;
2186 struct otx2_mbox_dev *mdev;
2187 struct mbox_hdr *rsp_hdr;
2188 struct mbox_msghdr *msg;
2189 struct mbox_wq_info *mw;
2190 struct otx2_mbox *mbox;
2191 int offset, id, devid;
2195 mw = &rvu->afpf_wq_info;
2198 mw = &rvu->afvf_wq_info;
2204 devid = mwork - mw->mbox_wrk_up;
2205 mbox = &mw->mbox_up;
2206 mdev = &mbox->dev[devid];
2208 rsp_hdr = mdev->mbase + mbox->rx_start;
2209 if (mw->mbox_wrk_up[devid].up_num_msgs == 0) {
2210 dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n");
2214 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
2216 for (id = 0; id < mw->mbox_wrk_up[devid].up_num_msgs; id++) {
2217 msg = mdev->mbase + offset;
2219 if (msg->id >= MBOX_MSG_MAX) {
2221 "Mbox msg with unknown ID 0x%x\n", msg->id);
2225 if (msg->sig != OTX2_MBOX_RSP_SIG) {
2227 "Mbox msg with wrong signature %x, ID 0x%x\n",
2233 case MBOX_MSG_CGX_LINK_EVENT:
2238 "Mbox msg response has err %d, ID 0x%x\n",
2243 offset = mbox->rx_start + msg->next_msgoff;
2246 mw->mbox_wrk_up[devid].up_num_msgs = 0;
2248 otx2_mbox_reset(mbox, devid);
2251 static inline void rvu_afpf_mbox_up_handler(struct work_struct *work)
2253 struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2255 __rvu_mbox_up_handler(mwork, TYPE_AFPF);
2258 static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
2260 struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2262 __rvu_mbox_up_handler(mwork, TYPE_AFVF);
2265 static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
2268 struct rvu_hwinfo *hw = rvu->hw;
2272 /* For cn10k platform VF mailbox regions of a PF follows after the
2273 * PF <-> AF mailbox region. Whereas for Octeontx2 it is read from
2274 * RVU_PF_VF_BAR4_ADDR register.
2276 if (type == TYPE_AFVF) {
2277 for (region = 0; region < num; region++) {
2278 if (hw->cap.per_pf_mbox_regs) {
2279 bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2280 RVU_AF_PFX_BAR4_ADDR(0)) +
2282 bar4 += region * MBOX_SIZE;
2284 bar4 = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
2285 bar4 += region * MBOX_SIZE;
2287 mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
2288 if (!mbox_addr[region])
2294 /* For cn10k platform AF <-> PF mailbox region of a PF is read from per
2295 * PF registers. Whereas for Octeontx2 it is read from
2296 * RVU_AF_PF_BAR4_ADDR register.
2298 for (region = 0; region < num; region++) {
2299 if (hw->cap.per_pf_mbox_regs) {
2300 bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2301 RVU_AF_PFX_BAR4_ADDR(region));
2303 bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2304 RVU_AF_PF_BAR4_ADDR);
2305 bar4 += region * MBOX_SIZE;
2307 mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
2308 if (!mbox_addr[region])
2315 iounmap((void __iomem *)mbox_addr[region]);
2319 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
2321 void (mbox_handler)(struct work_struct *),
2322 void (mbox_up_handler)(struct work_struct *))
2324 int err = -EINVAL, i, dir, dir_up;
2325 void __iomem *reg_base;
2326 struct rvu_work *mwork;
2327 void **mbox_regions;
2330 mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
2336 name = "rvu_afpf_mailbox";
2337 dir = MBOX_DIR_AFPF;
2338 dir_up = MBOX_DIR_AFPF_UP;
2339 reg_base = rvu->afreg_base;
2340 err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF);
2345 name = "rvu_afvf_mailbox";
2346 dir = MBOX_DIR_PFVF;
2347 dir_up = MBOX_DIR_PFVF_UP;
2348 reg_base = rvu->pfreg_base;
2349 err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF);
2357 mw->mbox_wq = alloc_workqueue(name,
2358 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
2365 mw->mbox_wrk = devm_kcalloc(rvu->dev, num,
2366 sizeof(struct rvu_work), GFP_KERNEL);
2367 if (!mw->mbox_wrk) {
2372 mw->mbox_wrk_up = devm_kcalloc(rvu->dev, num,
2373 sizeof(struct rvu_work), GFP_KERNEL);
2374 if (!mw->mbox_wrk_up) {
2379 err = otx2_mbox_regions_init(&mw->mbox, mbox_regions, rvu->pdev,
2380 reg_base, dir, num);
2384 err = otx2_mbox_regions_init(&mw->mbox_up, mbox_regions, rvu->pdev,
2385 reg_base, dir_up, num);
2389 for (i = 0; i < num; i++) {
2390 mwork = &mw->mbox_wrk[i];
2392 INIT_WORK(&mwork->work, mbox_handler);
2394 mwork = &mw->mbox_wrk_up[i];
2396 INIT_WORK(&mwork->work, mbox_up_handler);
2398 kfree(mbox_regions);
2402 destroy_workqueue(mw->mbox_wq);
2405 iounmap((void __iomem *)mbox_regions[num]);
2407 kfree(mbox_regions);
2411 static void rvu_mbox_destroy(struct mbox_wq_info *mw)
2413 struct otx2_mbox *mbox = &mw->mbox;
2414 struct otx2_mbox_dev *mdev;
2418 destroy_workqueue(mw->mbox_wq);
2422 for (devid = 0; devid < mbox->ndevs; devid++) {
2423 mdev = &mbox->dev[devid];
2425 iounmap((void __iomem *)mdev->hwbase);
2428 otx2_mbox_destroy(&mw->mbox);
2429 otx2_mbox_destroy(&mw->mbox_up);
2432 static void rvu_queue_work(struct mbox_wq_info *mw, int first,
2433 int mdevs, u64 intr)
2435 struct otx2_mbox_dev *mdev;
2436 struct otx2_mbox *mbox;
2437 struct mbox_hdr *hdr;
2440 for (i = first; i < mdevs; i++) {
2442 if (!(intr & BIT_ULL(i - first)))
2446 mdev = &mbox->dev[i];
2447 hdr = mdev->mbase + mbox->rx_start;
2449 /*The hdr->num_msgs is set to zero immediately in the interrupt
2450 * handler to ensure that it holds a correct value next time
2451 * when the interrupt handler is called.
2452 * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
2453 * pf>mbox.up_num_msgs holds the data for use in
2454 * pfaf_mbox_up_handler.
2457 if (hdr->num_msgs) {
2458 mw->mbox_wrk[i].num_msgs = hdr->num_msgs;
2460 queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
2462 mbox = &mw->mbox_up;
2463 mdev = &mbox->dev[i];
2464 hdr = mdev->mbase + mbox->rx_start;
2465 if (hdr->num_msgs) {
2466 mw->mbox_wrk_up[i].up_num_msgs = hdr->num_msgs;
2468 queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work);
2473 static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
2475 struct rvu *rvu = (struct rvu *)rvu_irq;
2479 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
2480 /* Clear interrupts */
2481 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr);
2483 trace_otx2_msg_interrupt(rvu->pdev, "PF(s) to AF", intr);
2485 /* Sync with mbox memory region */
2488 rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr);
2490 /* Handle VF interrupts */
2492 intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1));
2493 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr);
2495 rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr);
2499 intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0));
2500 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr);
2502 trace_otx2_msg_interrupt(rvu->pdev, "VF(s) to AF", intr);
2504 rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr);
2509 static void rvu_enable_mbox_intr(struct rvu *rvu)
2511 struct rvu_hwinfo *hw = rvu->hw;
2513 /* Clear spurious irqs, if any */
2514 rvu_write64(rvu, BLKADDR_RVUM,
2515 RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs));
2517 /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */
2518 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S,
2519 INTR_MASK(hw->total_pfs) & ~1ULL);
2522 static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
2524 struct rvu_block *block;
2525 int slot, lf, num_lfs;
2528 block = &rvu->hw->block[blkaddr];
2529 num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
2533 for (slot = 0; slot < num_lfs; slot++) {
2534 lf = rvu_get_lf(rvu, block, pcifunc, slot);
2538 /* Cleanup LF and reset it */
2539 if (block->addr == BLKADDR_NIX0 || block->addr == BLKADDR_NIX1)
2540 rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf);
2541 else if (block->addr == BLKADDR_NPA)
2542 rvu_npa_lf_teardown(rvu, pcifunc, lf);
2543 else if ((block->addr == BLKADDR_CPT0) ||
2544 (block->addr == BLKADDR_CPT1))
2545 rvu_cpt_lf_teardown(rvu, pcifunc, block->addr, lf,
2548 err = rvu_lf_reset(rvu, block, lf);
2550 dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
2556 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
2558 mutex_lock(&rvu->flr_lock);
2559 /* Reset order should reflect inter-block dependencies:
2560 * 1. Reset any packet/work sources (NIX, CPT, TIM)
2561 * 2. Flush and reset SSO/SSOW
2562 * 3. Cleanup pools (NPA)
2564 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0);
2565 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX1);
2566 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0);
2567 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT1);
2568 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM);
2569 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
2570 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
2571 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
2572 rvu_reset_lmt_map_tbl(rvu, pcifunc);
2573 rvu_detach_rsrcs(rvu, NULL, pcifunc);
2574 mutex_unlock(&rvu->flr_lock);
2577 static void rvu_afvf_flr_handler(struct rvu *rvu, int vf)
2581 /* pcifunc = 0(PF0) | (vf + 1) */
2582 __rvu_flr_handler(rvu, vf + 1);
2589 /* Signal FLR finish and enable IRQ */
2590 rvupf_write64(rvu, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
2591 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
2594 static void rvu_flr_handler(struct work_struct *work)
2596 struct rvu_work *flrwork = container_of(work, struct rvu_work, work);
2597 struct rvu *rvu = flrwork->rvu;
2598 u16 pcifunc, numvfs, vf;
2602 pf = flrwork - rvu->flr_wrk;
2603 if (pf >= rvu->hw->total_pfs) {
2604 rvu_afvf_flr_handler(rvu, pf - rvu->hw->total_pfs);
2608 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2609 numvfs = (cfg >> 12) & 0xFF;
2610 pcifunc = pf << RVU_PFVF_PF_SHIFT;
2612 for (vf = 0; vf < numvfs; vf++)
2613 __rvu_flr_handler(rvu, (pcifunc | (vf + 1)));
2615 __rvu_flr_handler(rvu, pcifunc);
2617 /* Signal FLR finish */
2618 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf));
2620 /* Enable interrupt */
2621 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S, BIT_ULL(pf));
2624 static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs)
2626 int dev, vf, reg = 0;
2632 intr = rvupf_read64(rvu, RVU_PF_VFFLR_INTX(reg));
2636 for (vf = 0; vf < numvfs; vf++) {
2637 if (!(intr & BIT_ULL(vf)))
2639 /* Clear and disable the interrupt */
2640 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
2641 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf));
2643 dev = vf + start_vf + rvu->hw->total_pfs;
2644 queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work);
2648 static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq)
2650 struct rvu *rvu = (struct rvu *)rvu_irq;
2654 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT);
2658 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2659 if (intr & (1ULL << pf)) {
2660 /* clear interrupt */
2661 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT,
2663 /* Disable the interrupt */
2664 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
2666 /* PF is already dead do only AF related operations */
2667 queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work);
2672 rvu_afvf_queue_flr_work(rvu, 0, 64);
2674 rvu_afvf_queue_flr_work(rvu, 64, rvu->vfs - 64);
2679 static void rvu_me_handle_vfset(struct rvu *rvu, int idx, u64 intr)
2683 /* Nothing to be done here other than clearing the
2686 for (vf = 0; vf < 64; vf++) {
2687 if (intr & (1ULL << vf)) {
2688 /* clear the trpend due to ME(master enable) */
2689 rvupf_write64(rvu, RVU_PF_VFTRPENDX(idx), BIT_ULL(vf));
2690 /* clear interrupt */
2691 rvupf_write64(rvu, RVU_PF_VFME_INTX(idx), BIT_ULL(vf));
2696 /* Handles ME interrupts from VFs of AF */
2697 static irqreturn_t rvu_me_vf_intr_handler(int irq, void *rvu_irq)
2699 struct rvu *rvu = (struct rvu *)rvu_irq;
2703 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
2705 for (vfset = 0; vfset <= 1; vfset++) {
2706 intr = rvupf_read64(rvu, RVU_PF_VFME_INTX(vfset));
2708 rvu_me_handle_vfset(rvu, vfset, intr);
2714 /* Handles ME interrupts from PFs */
2715 static irqreturn_t rvu_me_pf_intr_handler(int irq, void *rvu_irq)
2717 struct rvu *rvu = (struct rvu *)rvu_irq;
2721 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
2723 /* Nothing to be done here other than clearing the
2726 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2727 if (intr & (1ULL << pf)) {
2728 /* clear the trpend due to ME(master enable) */
2729 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND,
2731 /* clear interrupt */
2732 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT,
2740 static void rvu_unregister_interrupts(struct rvu *rvu)
2744 rvu_cpt_unregister_interrupts(rvu);
2746 /* Disable the Mbox interrupt */
2747 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
2748 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2750 /* Disable the PF FLR interrupt */
2751 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
2752 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2754 /* Disable the PF ME interrupt */
2755 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1C,
2756 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2758 for (irq = 0; irq < rvu->num_vec; irq++) {
2759 if (rvu->irq_allocated[irq]) {
2760 free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
2761 rvu->irq_allocated[irq] = false;
2765 pci_free_irq_vectors(rvu->pdev);
2769 static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu)
2771 struct rvu_pfvf *pfvf = &rvu->pf[0];
2775 offset = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2777 /* Make sure there are enough MSIX vectors configured so that
2778 * VF interrupts can be handled. Offset equal to zero means
2779 * that PF vectors are not configured and overlapping AF vectors.
2781 return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) &&
2785 static int rvu_register_interrupts(struct rvu *rvu)
2787 int ret, offset, pf_vec_start;
2789 rvu->num_vec = pci_msix_vec_count(rvu->pdev);
2791 rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec,
2792 NAME_SIZE, GFP_KERNEL);
2796 rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec,
2797 sizeof(bool), GFP_KERNEL);
2798 if (!rvu->irq_allocated)
2802 ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec,
2803 rvu->num_vec, PCI_IRQ_MSIX);
2806 "RVUAF: Request for %d msix vectors failed, ret %d\n",
2811 /* Register mailbox interrupt handler */
2812 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
2813 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
2814 rvu_mbox_intr_handler, 0,
2815 &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
2818 "RVUAF: IRQ registration failed for mbox irq\n");
2822 rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true;
2824 /* Enable mailbox interrupts from all PFs */
2825 rvu_enable_mbox_intr(rvu);
2827 /* Register FLR interrupt handler */
2828 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
2830 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFFLR),
2831 rvu_flr_intr_handler, 0,
2832 &rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
2836 "RVUAF: IRQ registration failed for FLR\n");
2839 rvu->irq_allocated[RVU_AF_INT_VEC_PFFLR] = true;
2841 /* Enable FLR interrupt for all PFs*/
2842 rvu_write64(rvu, BLKADDR_RVUM,
2843 RVU_AF_PFFLR_INT, INTR_MASK(rvu->hw->total_pfs));
2845 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,
2846 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2848 /* Register ME interrupt handler */
2849 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
2851 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFME),
2852 rvu_me_pf_intr_handler, 0,
2853 &rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
2857 "RVUAF: IRQ registration failed for ME\n");
2859 rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true;
2861 /* Clear TRPEND bit for all PF */
2862 rvu_write64(rvu, BLKADDR_RVUM,
2863 RVU_AF_PFTRPEND, INTR_MASK(rvu->hw->total_pfs));
2864 /* Enable ME interrupt for all PFs*/
2865 rvu_write64(rvu, BLKADDR_RVUM,
2866 RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs));
2868 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1S,
2869 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2871 if (!rvu_afvf_msix_vectors_num_ok(rvu))
2874 /* Get PF MSIX vectors offset. */
2875 pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM,
2876 RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2878 /* Register MBOX0 interrupt. */
2879 offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0;
2880 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0");
2881 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2882 rvu_mbox_intr_handler, 0,
2883 &rvu->irq_name[offset * NAME_SIZE],
2887 "RVUAF: IRQ registration failed for Mbox0\n");
2889 rvu->irq_allocated[offset] = true;
2891 /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so
2892 * simply increment current offset by 1.
2894 offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1;
2895 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1");
2896 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2897 rvu_mbox_intr_handler, 0,
2898 &rvu->irq_name[offset * NAME_SIZE],
2902 "RVUAF: IRQ registration failed for Mbox1\n");
2904 rvu->irq_allocated[offset] = true;
2906 /* Register FLR interrupt handler for AF's VFs */
2907 offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0;
2908 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR0");
2909 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2910 rvu_flr_intr_handler, 0,
2911 &rvu->irq_name[offset * NAME_SIZE], rvu);
2914 "RVUAF: IRQ registration failed for RVUAFVF FLR0\n");
2917 rvu->irq_allocated[offset] = true;
2919 offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR1;
2920 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR1");
2921 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2922 rvu_flr_intr_handler, 0,
2923 &rvu->irq_name[offset * NAME_SIZE], rvu);
2926 "RVUAF: IRQ registration failed for RVUAFVF FLR1\n");
2929 rvu->irq_allocated[offset] = true;
2931 /* Register ME interrupt handler for AF's VFs */
2932 offset = pf_vec_start + RVU_PF_INT_VEC_VFME0;
2933 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME0");
2934 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2935 rvu_me_vf_intr_handler, 0,
2936 &rvu->irq_name[offset * NAME_SIZE], rvu);
2939 "RVUAF: IRQ registration failed for RVUAFVF ME0\n");
2942 rvu->irq_allocated[offset] = true;
2944 offset = pf_vec_start + RVU_PF_INT_VEC_VFME1;
2945 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME1");
2946 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2947 rvu_me_vf_intr_handler, 0,
2948 &rvu->irq_name[offset * NAME_SIZE], rvu);
2951 "RVUAF: IRQ registration failed for RVUAFVF ME1\n");
2954 rvu->irq_allocated[offset] = true;
2956 ret = rvu_cpt_register_interrupts(rvu);
2963 rvu_unregister_interrupts(rvu);
2967 static void rvu_flr_wq_destroy(struct rvu *rvu)
2970 destroy_workqueue(rvu->flr_wq);
2975 static int rvu_flr_init(struct rvu *rvu)
2981 /* Enable FLR for all PFs*/
2982 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2983 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2984 rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf),
2988 rvu->flr_wq = alloc_workqueue("rvu_afpf_flr",
2989 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
2994 num_devs = rvu->hw->total_pfs + pci_sriov_get_totalvfs(rvu->pdev);
2995 rvu->flr_wrk = devm_kcalloc(rvu->dev, num_devs,
2996 sizeof(struct rvu_work), GFP_KERNEL);
2997 if (!rvu->flr_wrk) {
2998 destroy_workqueue(rvu->flr_wq);
3002 for (dev = 0; dev < num_devs; dev++) {
3003 rvu->flr_wrk[dev].rvu = rvu;
3004 INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler);
3007 mutex_init(&rvu->flr_lock);
3012 static void rvu_disable_afvf_intr(struct rvu *rvu)
3016 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs));
3017 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
3018 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
3022 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1),
3023 INTR_MASK(vfs - 64));
3024 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
3025 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
3028 static void rvu_enable_afvf_intr(struct rvu *rvu)
3032 /* Clear any pending interrupts and enable AF VF interrupts for
3036 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs));
3037 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs));
3040 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs));
3041 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs));
3042 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs));
3044 /* Same for remaining VFs, if any. */
3048 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs - 64));
3049 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
3050 INTR_MASK(vfs - 64));
3052 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64));
3053 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
3054 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
3057 int rvu_get_num_lbk_chans(void)
3059 struct pci_dev *pdev;
3063 pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_LBK,
3068 base = pci_ioremap_bar(pdev, 0);
3072 /* Read number of available LBK channels from LBK(0)_CONST register. */
3073 ret = (readq(base + 0x10) >> 32) & 0xffff;
3081 static int rvu_enable_sriov(struct rvu *rvu)
3083 struct pci_dev *pdev = rvu->pdev;
3084 int err, chans, vfs;
3086 if (!rvu_afvf_msix_vectors_num_ok(rvu)) {
3087 dev_warn(&pdev->dev,
3088 "Skipping SRIOV enablement since not enough IRQs are available\n");
3092 chans = rvu_get_num_lbk_chans();
3096 vfs = pci_sriov_get_totalvfs(pdev);
3098 /* Limit VFs in case we have more VFs than LBK channels available. */
3105 /* LBK channel number 63 is used for switching packets between
3106 * CGX mapped VFs. Hence limit LBK pairs till 62 only.
3111 /* Save VFs number for reference in VF interrupts handlers.
3112 * Since interrupts might start arriving during SRIOV enablement
3113 * ordinary API cannot be used to get number of enabled VFs.
3117 err = rvu_mbox_init(rvu, &rvu->afvf_wq_info, TYPE_AFVF, vfs,
3118 rvu_afvf_mbox_handler, rvu_afvf_mbox_up_handler);
3122 rvu_enable_afvf_intr(rvu);
3123 /* Make sure IRQs are enabled before SRIOV. */
3126 err = pci_enable_sriov(pdev, vfs);
3128 rvu_disable_afvf_intr(rvu);
3129 rvu_mbox_destroy(&rvu->afvf_wq_info);
3136 static void rvu_disable_sriov(struct rvu *rvu)
3138 rvu_disable_afvf_intr(rvu);
3139 rvu_mbox_destroy(&rvu->afvf_wq_info);
3140 pci_disable_sriov(rvu->pdev);
3143 static void rvu_update_module_params(struct rvu *rvu)
3145 const char *default_pfl_name = "default";
3147 strscpy(rvu->mkex_pfl_name,
3148 mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN);
3149 strscpy(rvu->kpu_pfl_name,
3150 kpu_profile ? kpu_profile : default_pfl_name, KPU_NAME_LEN);
3153 static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3155 struct device *dev = &pdev->dev;
3159 rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL);
3163 rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL);
3165 devm_kfree(dev, rvu);
3169 pci_set_drvdata(pdev, rvu);
3171 rvu->dev = &pdev->dev;
3173 err = pci_enable_device(pdev);
3175 dev_err(dev, "Failed to enable PCI device\n");
3179 err = pci_request_regions(pdev, DRV_NAME);
3181 dev_err(dev, "PCI request regions failed 0x%x\n", err);
3182 goto err_disable_device;
3185 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
3187 dev_err(dev, "DMA mask config failed, abort\n");
3188 goto err_release_regions;
3191 pci_set_master(pdev);
3193 rvu->ptp = ptp_get();
3194 if (IS_ERR(rvu->ptp)) {
3195 err = PTR_ERR(rvu->ptp);
3196 if (err == -EPROBE_DEFER)
3197 goto err_release_regions;
3201 /* Map Admin function CSRs */
3202 rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
3203 rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
3204 if (!rvu->afreg_base || !rvu->pfreg_base) {
3205 dev_err(dev, "Unable to map admin function CSRs, aborting\n");
3210 /* Store module params in rvu structure */
3211 rvu_update_module_params(rvu);
3213 /* Check which blocks the HW supports */
3214 rvu_check_block_implemented(rvu);
3216 rvu_reset_all_blocks(rvu);
3218 rvu_setup_hw_capabilities(rvu);
3220 err = rvu_setup_hw_resources(rvu);
3224 /* Init mailbox btw AF and PFs */
3225 err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF,
3226 rvu->hw->total_pfs, rvu_afpf_mbox_handler,
3227 rvu_afpf_mbox_up_handler);
3229 dev_err(dev, "%s: Failed to initialize mbox\n", __func__);
3233 err = rvu_flr_init(rvu);
3235 dev_err(dev, "%s: Failed to initialize flr\n", __func__);
3239 err = rvu_register_interrupts(rvu);
3241 dev_err(dev, "%s: Failed to register interrupts\n", __func__);
3245 err = rvu_register_dl(rvu);
3247 dev_err(dev, "%s: Failed to register devlink\n", __func__);
3251 rvu_setup_rvum_blk_revid(rvu);
3253 /* Enable AF's VFs (if any) */
3254 err = rvu_enable_sriov(rvu);
3256 dev_err(dev, "%s: Failed to enable sriov\n", __func__);
3260 /* Initialize debugfs */
3263 mutex_init(&rvu->rswitch.switch_lock);
3266 ptp_start(rvu->ptp, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate,
3267 rvu->fwdata->ptp_ext_tstamp);
3271 rvu_unregister_dl(rvu);
3273 rvu_unregister_interrupts(rvu);
3275 rvu_flr_wq_destroy(rvu);
3277 rvu_mbox_destroy(&rvu->afpf_wq_info);
3280 rvu_fwdata_exit(rvu);
3281 rvu_reset_all_blocks(rvu);
3282 rvu_free_hw_resources(rvu);
3283 rvu_clear_rvum_blk_revid(rvu);
3286 err_release_regions:
3287 pci_release_regions(pdev);
3289 pci_disable_device(pdev);
3291 pci_set_drvdata(pdev, NULL);
3292 devm_kfree(&pdev->dev, rvu->hw);
3293 devm_kfree(dev, rvu);
3297 static void rvu_remove(struct pci_dev *pdev)
3299 struct rvu *rvu = pci_get_drvdata(pdev);
3302 rvu_unregister_dl(rvu);
3303 rvu_unregister_interrupts(rvu);
3304 rvu_flr_wq_destroy(rvu);
3306 rvu_fwdata_exit(rvu);
3307 rvu_mbox_destroy(&rvu->afpf_wq_info);
3308 rvu_disable_sriov(rvu);
3309 rvu_reset_all_blocks(rvu);
3310 rvu_free_hw_resources(rvu);
3311 rvu_clear_rvum_blk_revid(rvu);
3313 pci_release_regions(pdev);
3314 pci_disable_device(pdev);
3315 pci_set_drvdata(pdev, NULL);
3317 devm_kfree(&pdev->dev, rvu->hw);
3318 devm_kfree(&pdev->dev, rvu);
3321 static struct pci_driver rvu_driver = {
3323 .id_table = rvu_id_table,
3325 .remove = rvu_remove,
3328 static int __init rvu_init_module(void)
3332 pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
3334 err = pci_register_driver(&cgx_driver);
3338 err = pci_register_driver(&ptp_driver);
3342 err = pci_register_driver(&rvu_driver);
3348 pci_unregister_driver(&ptp_driver);
3350 pci_unregister_driver(&cgx_driver);
3355 static void __exit rvu_cleanup_module(void)
3357 pci_unregister_driver(&rvu_driver);
3358 pci_unregister_driver(&ptp_driver);
3359 pci_unregister_driver(&cgx_driver);
3362 module_init(rvu_init_module);
3363 module_exit(rvu_cleanup_module);