1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
4 * Copyright (C) 2018 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/delay.h>
14 #include <linux/irq.h>
15 #include <linux/pci.h>
16 #include <linux/sysfs.h>
23 #include "rvu_trace.h"
25 #define DRV_NAME "rvu_af"
26 #define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
28 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
30 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
31 struct rvu_block *block, int lf);
32 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
33 struct rvu_block *block, int lf);
34 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc);
36 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
38 void (mbox_handler)(struct work_struct *),
39 void (mbox_up_handler)(struct work_struct *));
45 /* Supported devices */
46 static const struct pci_device_id rvu_id_table[] = {
47 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) },
48 { 0, } /* end of table */
51 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
52 MODULE_DESCRIPTION(DRV_STRING);
53 MODULE_LICENSE("GPL v2");
54 MODULE_DEVICE_TABLE(pci, rvu_id_table);
56 static char *mkex_profile; /* MKEX profile name */
57 module_param(mkex_profile, charp, 0000);
58 MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
60 static char *kpu_profile; /* KPU profile name */
61 module_param(kpu_profile, charp, 0000);
62 MODULE_PARM_DESC(kpu_profile, "KPU profile name string");
64 static void rvu_setup_hw_capabilities(struct rvu *rvu)
66 struct rvu_hwinfo *hw = rvu->hw;
68 hw->cap.nix_tx_aggr_lvl = NIX_TXSCH_LVL_TL1;
69 hw->cap.nix_fixed_txschq_mapping = false;
70 hw->cap.nix_shaping = true;
71 hw->cap.nix_tx_link_bp = true;
72 hw->cap.nix_rx_multicast = true;
75 if (is_rvu_96xx_B0(rvu)) {
76 hw->cap.nix_fixed_txschq_mapping = true;
77 hw->cap.nix_txsch_per_cgx_lmac = 4;
78 hw->cap.nix_txsch_per_lbk_lmac = 132;
79 hw->cap.nix_txsch_per_sdp_lmac = 76;
80 hw->cap.nix_shaping = false;
81 hw->cap.nix_tx_link_bp = false;
82 if (is_rvu_96xx_A0(rvu))
83 hw->cap.nix_rx_multicast = false;
86 if (!is_rvu_otx2(rvu))
87 hw->cap.per_pf_mbox_regs = true;
90 /* Poll a RVU block's register 'offset', for a 'zero'
91 * or 'nonzero' at bits specified by 'mask'
93 int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
95 unsigned long timeout = jiffies + usecs_to_jiffies(10000);
99 reg = rvu->afreg_base + ((block << 28) | offset);
101 reg_val = readq(reg);
102 if (zero && !(reg_val & mask))
104 if (!zero && (reg_val & mask))
106 if (time_before(jiffies, timeout)) {
113 int rvu_alloc_rsrc(struct rsrc_bmap *rsrc)
120 id = find_first_zero_bit(rsrc->bmap, rsrc->max);
124 __set_bit(id, rsrc->bmap);
129 int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc)
136 start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
137 if (start >= rsrc->max)
140 bitmap_set(rsrc->bmap, start, nrsrc);
144 static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
148 if (start >= rsrc->max)
151 bitmap_clear(rsrc->bmap, start, nrsrc);
154 bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc)
161 start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
162 if (start >= rsrc->max)
168 void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id)
173 __clear_bit(id, rsrc->bmap);
176 int rvu_rsrc_free_count(struct rsrc_bmap *rsrc)
183 used = bitmap_weight(rsrc->bmap, rsrc->max);
184 return (rsrc->max - used);
187 int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
189 rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max),
190 sizeof(long), GFP_KERNEL);
196 /* Get block LF's HW index from a PF_FUNC's block slot number */
197 int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
202 mutex_lock(&rvu->rsrc_lock);
203 for (lf = 0; lf < block->lf.max; lf++) {
204 if (block->fn_map[lf] == pcifunc) {
206 mutex_unlock(&rvu->rsrc_lock);
212 mutex_unlock(&rvu->rsrc_lock);
216 /* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E.
217 * Some silicon variants of OcteonTX2 supports
218 * multiple blocks of same type.
220 * @pcifunc has to be zero when no LF is yet attached.
222 * For a pcifunc if LFs are attached from multiple blocks of same type, then
223 * return blkaddr of first encountered block.
225 int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
227 int devnum, blkaddr = -ENODEV;
233 blkaddr = BLKADDR_NPC;
236 blkaddr = BLKADDR_NPA;
239 /* For now assume NIX0 */
241 blkaddr = BLKADDR_NIX0;
246 blkaddr = BLKADDR_SSO;
249 blkaddr = BLKADDR_SSOW;
252 blkaddr = BLKADDR_TIM;
255 /* For now assume CPT0 */
257 blkaddr = BLKADDR_CPT0;
263 /* Check if this is a RVU PF or VF */
264 if (pcifunc & RVU_PFVF_FUNC_MASK) {
266 devnum = rvu_get_hwvf(rvu, pcifunc);
269 devnum = rvu_get_pf(pcifunc);
272 /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' or
275 if (blktype == BLKTYPE_NIX) {
276 reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(0) :
277 RVU_PRIV_HWVFX_NIXX_CFG(0);
278 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
280 blkaddr = BLKADDR_NIX0;
284 reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(1) :
285 RVU_PRIV_HWVFX_NIXX_CFG(1);
286 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
288 blkaddr = BLKADDR_NIX1;
291 if (blktype == BLKTYPE_CPT) {
292 reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(0) :
293 RVU_PRIV_HWVFX_CPTX_CFG(0);
294 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
296 blkaddr = BLKADDR_CPT0;
300 reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(1) :
301 RVU_PRIV_HWVFX_CPTX_CFG(1);
302 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
304 blkaddr = BLKADDR_CPT1;
308 if (is_block_implemented(rvu->hw, blkaddr))
313 static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
314 struct rvu_block *block, u16 pcifunc,
317 int devnum, num_lfs = 0;
321 if (lf >= block->lf.max) {
322 dev_err(&rvu->pdev->dev,
323 "%s: FATAL: LF %d is >= %s's max lfs i.e %d\n",
324 __func__, lf, block->name, block->lf.max);
328 /* Check if this is for a RVU PF or VF */
329 if (pcifunc & RVU_PFVF_FUNC_MASK) {
331 devnum = rvu_get_hwvf(rvu, pcifunc);
334 devnum = rvu_get_pf(pcifunc);
337 block->fn_map[lf] = attach ? pcifunc : 0;
339 switch (block->addr) {
341 pfvf->npalf = attach ? true : false;
342 num_lfs = pfvf->npalf;
346 pfvf->nixlf = attach ? true : false;
347 num_lfs = pfvf->nixlf;
350 attach ? pfvf->sso++ : pfvf->sso--;
354 attach ? pfvf->ssow++ : pfvf->ssow--;
355 num_lfs = pfvf->ssow;
358 attach ? pfvf->timlfs++ : pfvf->timlfs--;
359 num_lfs = pfvf->timlfs;
362 attach ? pfvf->cptlfs++ : pfvf->cptlfs--;
363 num_lfs = pfvf->cptlfs;
366 attach ? pfvf->cpt1_lfs++ : pfvf->cpt1_lfs--;
367 num_lfs = pfvf->cpt1_lfs;
371 reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg;
372 rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs);
375 inline int rvu_get_pf(u16 pcifunc)
377 return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
380 void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
384 /* Get numVFs attached to this PF and first HWVF */
385 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
386 *numvfs = (cfg >> 12) & 0xFF;
390 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
395 pf = rvu_get_pf(pcifunc);
396 func = pcifunc & RVU_PFVF_FUNC_MASK;
398 /* Get first HWVF attached to this PF */
399 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
401 return ((cfg & 0xFFF) + func - 1);
404 struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc)
406 /* Check if it is a PF or VF */
407 if (pcifunc & RVU_PFVF_FUNC_MASK)
408 return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)];
410 return &rvu->pf[rvu_get_pf(pcifunc)];
413 static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc)
418 pf = rvu_get_pf(pcifunc);
419 if (pf >= rvu->hw->total_pfs)
422 if (!(pcifunc & RVU_PFVF_FUNC_MASK))
425 /* Check if VF is within number of VFs attached to this PF */
426 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
427 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
428 nvfs = (cfg >> 12) & 0xFF;
435 bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr)
437 struct rvu_block *block;
439 if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT)
442 block = &hw->block[blkaddr];
443 return block->implemented;
446 static void rvu_check_block_implemented(struct rvu *rvu)
448 struct rvu_hwinfo *hw = rvu->hw;
449 struct rvu_block *block;
453 /* For each block check if 'implemented' bit is set */
454 for (blkid = 0; blkid < BLK_COUNT; blkid++) {
455 block = &hw->block[blkid];
456 cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid));
457 if (cfg & BIT_ULL(11))
458 block->implemented = true;
462 static void rvu_setup_rvum_blk_revid(struct rvu *rvu)
464 rvu_write64(rvu, BLKADDR_RVUM,
465 RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM),
469 static void rvu_clear_rvum_blk_revid(struct rvu *rvu)
471 rvu_write64(rvu, BLKADDR_RVUM,
472 RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), 0x00);
475 int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
479 if (!block->implemented)
482 rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12));
483 err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12),
488 static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
490 struct rvu_block *block = &rvu->hw->block[blkaddr];
492 if (!block->implemented)
495 rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0));
496 rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
499 static void rvu_reset_all_blocks(struct rvu *rvu)
501 /* Do a HW reset of all RVU blocks */
502 rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST);
503 rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST);
504 rvu_block_reset(rvu, BLKADDR_NIX1, NIX_AF_BLK_RST);
505 rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST);
506 rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST);
507 rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST);
508 rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST);
509 rvu_block_reset(rvu, BLKADDR_CPT1, CPT_AF_BLK_RST);
510 rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST);
511 rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST);
512 rvu_block_reset(rvu, BLKADDR_NDC_NIX1_RX, NDC_AF_BLK_RST);
513 rvu_block_reset(rvu, BLKADDR_NDC_NIX1_TX, NDC_AF_BLK_RST);
514 rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST);
517 static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block)
519 struct rvu_pfvf *pfvf;
523 for (lf = 0; lf < block->lf.max; lf++) {
524 cfg = rvu_read64(rvu, block->addr,
525 block->lfcfg_reg | (lf << block->lfshift));
526 if (!(cfg & BIT_ULL(63)))
529 /* Set this resource as being used */
530 __set_bit(lf, block->lf.bmap);
532 /* Get, to whom this LF is attached */
533 pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF);
534 rvu_update_rsrc_map(rvu, pfvf, block,
535 (cfg >> 8) & 0xFFFF, lf, true);
537 /* Set start MSIX vector for this LF within this PF/VF */
538 rvu_set_msix_offset(rvu, pfvf, block, lf);
542 static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf)
551 "PF%d:VF%d is configured with zero msix vectors, %d\n",
558 min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT;
560 min_vecs = RVU_PF_INT_VEC_CNT;
562 if (!(nvecs < min_vecs))
565 "PF%d is configured with too few vectors, %d, min is %d\n",
566 pf, nvecs, min_vecs);
569 static int rvu_setup_msix_resources(struct rvu *rvu)
571 struct rvu_hwinfo *hw = rvu->hw;
572 int pf, vf, numvfs, hwvf, err;
573 int nvecs, offset, max_msix;
574 struct rvu_pfvf *pfvf;
578 for (pf = 0; pf < hw->total_pfs; pf++) {
579 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
580 /* If PF is not enabled, nothing to do */
581 if (!((cfg >> 20) & 0x01))
584 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
587 /* Get num of MSIX vectors attached to this PF */
588 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf));
589 pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1;
590 rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0);
592 /* Alloc msix bitmap for this PF */
593 err = rvu_alloc_bitmap(&pfvf->msix);
597 /* Allocate memory for MSIX vector to RVU block LF mapping */
598 pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max,
599 sizeof(u16), GFP_KERNEL);
600 if (!pfvf->msix_lfmap)
603 /* For PF0 (AF) firmware will set msix vector offsets for
604 * AF, block AF and PF0_INT vectors, so jump to VFs.
609 /* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors.
610 * These are allocated on driver init and never freed,
611 * so no need to set 'msix_lfmap' for these.
613 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf));
614 nvecs = (cfg >> 12) & 0xFF;
616 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
617 rvu_write64(rvu, BLKADDR_RVUM,
618 RVU_PRIV_PFX_INT_CFG(pf), cfg | offset);
620 /* Alloc msix bitmap for VFs */
621 for (vf = 0; vf < numvfs; vf++) {
622 pfvf = &rvu->hwvf[hwvf + vf];
623 /* Get num of MSIX vectors attached to this VF */
624 cfg = rvu_read64(rvu, BLKADDR_RVUM,
625 RVU_PRIV_PFX_MSIX_CFG(pf));
626 pfvf->msix.max = (cfg & 0xFFF) + 1;
627 rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1);
629 /* Alloc msix bitmap for this VF */
630 err = rvu_alloc_bitmap(&pfvf->msix);
635 devm_kcalloc(rvu->dev, pfvf->msix.max,
636 sizeof(u16), GFP_KERNEL);
637 if (!pfvf->msix_lfmap)
640 /* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors.
641 * These are allocated on driver init and never freed,
642 * so no need to set 'msix_lfmap' for these.
644 cfg = rvu_read64(rvu, BLKADDR_RVUM,
645 RVU_PRIV_HWVFX_INT_CFG(hwvf + vf));
646 nvecs = (cfg >> 12) & 0xFF;
648 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
649 rvu_write64(rvu, BLKADDR_RVUM,
650 RVU_PRIV_HWVFX_INT_CFG(hwvf + vf),
655 /* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence
656 * create an IOMMU mapping for the physical address configured by
657 * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA.
659 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
660 max_msix = cfg & 0xFFFFF;
661 if (rvu->fwdata && rvu->fwdata->msixtr_base)
662 phy_addr = rvu->fwdata->msixtr_base;
664 phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
666 iova = dma_map_resource(rvu->dev, phy_addr,
667 max_msix * PCI_MSIX_ENTRY_SIZE,
668 DMA_BIDIRECTIONAL, 0);
670 if (dma_mapping_error(rvu->dev, iova))
673 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova);
674 rvu->msix_base_iova = iova;
675 rvu->msixtr_base_phy = phy_addr;
680 static void rvu_reset_msix(struct rvu *rvu)
682 /* Restore msixtr base register */
683 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE,
684 rvu->msixtr_base_phy);
687 static void rvu_free_hw_resources(struct rvu *rvu)
689 struct rvu_hwinfo *hw = rvu->hw;
690 struct rvu_block *block;
691 struct rvu_pfvf *pfvf;
695 rvu_npa_freemem(rvu);
696 rvu_npc_freemem(rvu);
697 rvu_nix_freemem(rvu);
699 /* Free block LF bitmaps */
700 for (id = 0; id < BLK_COUNT; id++) {
701 block = &hw->block[id];
702 kfree(block->lf.bmap);
705 /* Free MSIX bitmaps */
706 for (id = 0; id < hw->total_pfs; id++) {
708 kfree(pfvf->msix.bmap);
711 for (id = 0; id < hw->total_vfs; id++) {
712 pfvf = &rvu->hwvf[id];
713 kfree(pfvf->msix.bmap);
716 /* Unmap MSIX vector base IOVA mapping */
717 if (!rvu->msix_base_iova)
719 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
720 max_msix = cfg & 0xFFFFF;
721 dma_unmap_resource(rvu->dev, rvu->msix_base_iova,
722 max_msix * PCI_MSIX_ENTRY_SIZE,
723 DMA_BIDIRECTIONAL, 0);
726 mutex_destroy(&rvu->rsrc_lock);
729 static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
731 struct rvu_hwinfo *hw = rvu->hw;
732 int pf, vf, numvfs, hwvf;
733 struct rvu_pfvf *pfvf;
736 for (pf = 0; pf < hw->total_pfs; pf++) {
737 /* For PF0(AF), Assign MAC address to only VFs (LBKVFs) */
741 if (!is_pf_cgxmapped(rvu, pf))
743 /* Assign MAC address to PF */
745 if (rvu->fwdata && pf < PF_MACNUM_MAX) {
746 mac = &rvu->fwdata->pf_macs[pf];
748 u64_to_ether_addr(*mac, pfvf->mac_addr);
750 eth_random_addr(pfvf->mac_addr);
752 eth_random_addr(pfvf->mac_addr);
754 ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
757 /* Assign MAC address to VFs*/
758 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
759 for (vf = 0; vf < numvfs; vf++, hwvf++) {
760 pfvf = &rvu->hwvf[hwvf];
761 if (rvu->fwdata && hwvf < VF_MACNUM_MAX) {
762 mac = &rvu->fwdata->vf_macs[hwvf];
764 u64_to_ether_addr(*mac, pfvf->mac_addr);
766 eth_random_addr(pfvf->mac_addr);
768 eth_random_addr(pfvf->mac_addr);
770 ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
775 static int rvu_fwdata_init(struct rvu *rvu)
780 /* Get firmware data base address */
781 err = cgx_get_fwdata_base(&fwdbase);
784 rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata));
787 if (!is_rvu_fwdata_valid(rvu)) {
789 "Mismatch in 'fwdata' struct btw kernel and firmware\n");
790 iounmap(rvu->fwdata);
796 dev_info(rvu->dev, "Unable to fetch 'fwdata' from firmware\n");
800 static void rvu_fwdata_exit(struct rvu *rvu)
803 iounmap(rvu->fwdata);
806 static int rvu_setup_nix_hw_resource(struct rvu *rvu, int blkaddr)
808 struct rvu_hwinfo *hw = rvu->hw;
809 struct rvu_block *block;
813 /* Init NIX LF's bitmap */
814 block = &hw->block[blkaddr];
815 if (!block->implemented)
817 blkid = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
818 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
819 block->lf.max = cfg & 0xFFF;
820 block->addr = blkaddr;
821 block->type = BLKTYPE_NIX;
823 block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG;
824 block->pf_lfcnt_reg = RVU_PRIV_PFX_NIXX_CFG(blkid);
825 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIXX_CFG(blkid);
826 block->lfcfg_reg = NIX_PRIV_LFX_CFG;
827 block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
828 block->lfreset_reg = NIX_AF_LF_RST;
829 sprintf(block->name, "NIX%d", blkid);
830 rvu->nix_blkaddr[blkid] = blkaddr;
831 return rvu_alloc_bitmap(&block->lf);
834 static int rvu_setup_cpt_hw_resource(struct rvu *rvu, int blkaddr)
836 struct rvu_hwinfo *hw = rvu->hw;
837 struct rvu_block *block;
841 /* Init CPT LF's bitmap */
842 block = &hw->block[blkaddr];
843 if (!block->implemented)
845 blkid = (blkaddr == BLKADDR_CPT0) ? 0 : 1;
846 cfg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0);
847 block->lf.max = cfg & 0xFF;
848 block->addr = blkaddr;
849 block->type = BLKTYPE_CPT;
850 block->multislot = true;
852 block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG;
853 block->pf_lfcnt_reg = RVU_PRIV_PFX_CPTX_CFG(blkid);
854 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPTX_CFG(blkid);
855 block->lfcfg_reg = CPT_PRIV_LFX_CFG;
856 block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
857 block->lfreset_reg = CPT_AF_LF_RST;
858 sprintf(block->name, "CPT%d", blkid);
859 return rvu_alloc_bitmap(&block->lf);
862 static void rvu_get_lbk_bufsize(struct rvu *rvu)
864 struct pci_dev *pdev = NULL;
868 pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
869 PCI_DEVID_OCTEONTX2_LBK, pdev);
873 base = pci_ioremap_bar(pdev, 0);
877 lbk_const = readq(base + LBK_CONST);
879 /* cache fifo size */
880 rvu->hw->lbk_bufsize = FIELD_GET(LBK_CONST_BUF_SIZE, lbk_const);
887 static int rvu_setup_hw_resources(struct rvu *rvu)
889 struct rvu_hwinfo *hw = rvu->hw;
890 struct rvu_block *block;
894 /* Get HW supported max RVU PF & VF count */
895 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
896 hw->total_pfs = (cfg >> 32) & 0xFF;
897 hw->total_vfs = (cfg >> 20) & 0xFFF;
898 hw->max_vfs_per_pf = (cfg >> 40) & 0xFF;
900 /* Init NPA LF's bitmap */
901 block = &hw->block[BLKADDR_NPA];
902 if (!block->implemented)
904 cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST);
905 block->lf.max = (cfg >> 16) & 0xFFF;
906 block->addr = BLKADDR_NPA;
907 block->type = BLKTYPE_NPA;
909 block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG;
910 block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG;
911 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG;
912 block->lfcfg_reg = NPA_PRIV_LFX_CFG;
913 block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG;
914 block->lfreset_reg = NPA_AF_LF_RST;
915 sprintf(block->name, "NPA");
916 err = rvu_alloc_bitmap(&block->lf);
921 err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX0);
924 err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX1);
928 /* Init SSO group's bitmap */
929 block = &hw->block[BLKADDR_SSO];
930 if (!block->implemented)
932 cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST);
933 block->lf.max = cfg & 0xFFFF;
934 block->addr = BLKADDR_SSO;
935 block->type = BLKTYPE_SSO;
936 block->multislot = true;
938 block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG;
939 block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG;
940 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG;
941 block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG;
942 block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG;
943 block->lfreset_reg = SSO_AF_LF_HWGRP_RST;
944 sprintf(block->name, "SSO GROUP");
945 err = rvu_alloc_bitmap(&block->lf);
950 /* Init SSO workslot's bitmap */
951 block = &hw->block[BLKADDR_SSOW];
952 if (!block->implemented)
954 block->lf.max = (cfg >> 56) & 0xFF;
955 block->addr = BLKADDR_SSOW;
956 block->type = BLKTYPE_SSOW;
957 block->multislot = true;
959 block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG;
960 block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG;
961 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG;
962 block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG;
963 block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG;
964 block->lfreset_reg = SSOW_AF_LF_HWS_RST;
965 sprintf(block->name, "SSOWS");
966 err = rvu_alloc_bitmap(&block->lf);
971 /* Init TIM LF's bitmap */
972 block = &hw->block[BLKADDR_TIM];
973 if (!block->implemented)
975 cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST);
976 block->lf.max = cfg & 0xFFFF;
977 block->addr = BLKADDR_TIM;
978 block->type = BLKTYPE_TIM;
979 block->multislot = true;
981 block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG;
982 block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG;
983 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG;
984 block->lfcfg_reg = TIM_PRIV_LFX_CFG;
985 block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG;
986 block->lfreset_reg = TIM_AF_LF_RST;
987 sprintf(block->name, "TIM");
988 err = rvu_alloc_bitmap(&block->lf);
993 err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT0);
996 err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT1);
1000 /* Allocate memory for PFVF data */
1001 rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs,
1002 sizeof(struct rvu_pfvf), GFP_KERNEL);
1006 rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs,
1007 sizeof(struct rvu_pfvf), GFP_KERNEL);
1011 mutex_init(&rvu->rsrc_lock);
1013 rvu_fwdata_init(rvu);
1015 err = rvu_setup_msix_resources(rvu);
1019 for (blkid = 0; blkid < BLK_COUNT; blkid++) {
1020 block = &hw->block[blkid];
1021 if (!block->lf.bmap)
1024 /* Allocate memory for block LF/slot to pcifunc mapping info */
1025 block->fn_map = devm_kcalloc(rvu->dev, block->lf.max,
1026 sizeof(u16), GFP_KERNEL);
1027 if (!block->fn_map) {
1032 /* Scan all blocks to check if low level firmware has
1033 * already provisioned any of the resources to a PF/VF.
1035 rvu_scan_block(rvu, block);
1038 err = rvu_set_channels_base(rvu);
1042 err = rvu_npc_init(rvu);
1046 err = rvu_cgx_init(rvu);
1050 /* Assign MACs for CGX mapped functions */
1051 rvu_setup_pfvf_macaddress(rvu);
1053 err = rvu_npa_init(rvu);
1057 rvu_get_lbk_bufsize(rvu);
1059 err = rvu_nix_init(rvu);
1063 rvu_program_channels(rvu);
1068 rvu_nix_freemem(rvu);
1070 rvu_npa_freemem(rvu);
1074 rvu_npc_freemem(rvu);
1075 rvu_fwdata_exit(rvu);
1077 rvu_reset_msix(rvu);
1081 /* NPA and NIX admin queue APIs */
1082 void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq)
1087 qmem_free(rvu->dev, aq->inst);
1088 qmem_free(rvu->dev, aq->res);
1089 devm_kfree(rvu->dev, aq);
1092 int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
1093 int qsize, int inst_size, int res_size)
1095 struct admin_queue *aq;
1098 *ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL);
1103 /* Alloc memory for instructions i.e AQ */
1104 err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size);
1106 devm_kfree(rvu->dev, aq);
1110 /* Alloc memory for results */
1111 err = qmem_alloc(rvu->dev, &aq->res, qsize, res_size);
1113 rvu_aq_free(rvu, aq);
1117 spin_lock_init(&aq->lock);
1121 int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
1122 struct ready_msg_rsp *rsp)
1125 rsp->rclk_freq = rvu->fwdata->rclk;
1126 rsp->sclk_freq = rvu->fwdata->sclk;
1131 /* Get current count of a RVU block's LF/slots
1132 * provisioned to a given RVU func.
1134 u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr)
1138 return pfvf->npalf ? 1 : 0;
1141 return pfvf->nixlf ? 1 : 0;
1147 return pfvf->timlfs;
1149 return pfvf->cptlfs;
1151 return pfvf->cpt1_lfs;
1156 /* Return true if LFs of block type are attached to pcifunc */
1157 static bool is_blktype_attached(struct rvu_pfvf *pfvf, int blktype)
1161 return pfvf->npalf ? 1 : 0;
1163 return pfvf->nixlf ? 1 : 0;
1167 return !!pfvf->ssow;
1169 return !!pfvf->timlfs;
1171 return pfvf->cptlfs || pfvf->cpt1_lfs;
1177 bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
1179 struct rvu_pfvf *pfvf;
1181 if (!is_pf_func_valid(rvu, pcifunc))
1184 pfvf = rvu_get_pfvf(rvu, pcifunc);
1186 /* Check if this PFFUNC has a LF of type blktype attached */
1187 if (!is_blktype_attached(pfvf, blktype))
1193 static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
1194 int pcifunc, int slot)
1198 val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13);
1199 rvu_write64(rvu, block->addr, block->lookup_reg, val);
1200 /* Wait for the lookup to finish */
1201 /* TODO: put some timeout here */
1202 while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13))
1205 val = rvu_read64(rvu, block->addr, block->lookup_reg);
1207 /* Check LF valid bit */
1208 if (!(val & (1ULL << 12)))
1211 return (val & 0xFFF);
1214 static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
1216 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1217 struct rvu_hwinfo *hw = rvu->hw;
1218 struct rvu_block *block;
1219 int slot, lf, num_lfs;
1222 blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc);
1226 if (blktype == BLKTYPE_NIX)
1227 rvu_nix_reset_mac(pfvf, pcifunc);
1229 block = &hw->block[blkaddr];
1231 num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1235 for (slot = 0; slot < num_lfs; slot++) {
1236 lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot);
1237 if (lf < 0) /* This should never happen */
1240 /* Disable the LF */
1241 rvu_write64(rvu, blkaddr, block->lfcfg_reg |
1242 (lf << block->lfshift), 0x00ULL);
1244 /* Update SW maintained mapping info as well */
1245 rvu_update_rsrc_map(rvu, pfvf, block,
1246 pcifunc, lf, false);
1248 /* Free the resource */
1249 rvu_free_rsrc(&block->lf, lf);
1251 /* Clear MSIX vector offset for this LF */
1252 rvu_clear_msix_offset(rvu, pfvf, block, lf);
1256 static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
1259 struct rvu_hwinfo *hw = rvu->hw;
1260 bool detach_all = true;
1261 struct rvu_block *block;
1264 mutex_lock(&rvu->rsrc_lock);
1266 /* Check for partial resource detach */
1267 if (detach && detach->partial)
1270 /* Check for RVU block's LFs attached to this func,
1271 * if so, detach them.
1273 for (blkid = 0; blkid < BLK_COUNT; blkid++) {
1274 block = &hw->block[blkid];
1275 if (!block->lf.bmap)
1277 if (!detach_all && detach) {
1278 if (blkid == BLKADDR_NPA && !detach->npalf)
1280 else if ((blkid == BLKADDR_NIX0) && !detach->nixlf)
1282 else if ((blkid == BLKADDR_NIX1) && !detach->nixlf)
1284 else if ((blkid == BLKADDR_SSO) && !detach->sso)
1286 else if ((blkid == BLKADDR_SSOW) && !detach->ssow)
1288 else if ((blkid == BLKADDR_TIM) && !detach->timlfs)
1290 else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs)
1292 else if ((blkid == BLKADDR_CPT1) && !detach->cptlfs)
1295 rvu_detach_block(rvu, pcifunc, block->type);
1298 mutex_unlock(&rvu->rsrc_lock);
1302 int rvu_mbox_handler_detach_resources(struct rvu *rvu,
1303 struct rsrc_detach *detach,
1304 struct msg_rsp *rsp)
1306 return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
1309 static int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
1311 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1312 int blkaddr = BLKADDR_NIX0, vf;
1313 struct rvu_pfvf *pf;
1315 /* All CGX mapped PFs are set with assigned NIX block during init */
1316 if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
1317 pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
1318 blkaddr = pf->nix_blkaddr;
1319 } else if (is_afvf(pcifunc)) {
1321 /* Assign NIX based on VF number. All even numbered VFs get
1322 * NIX0 and odd numbered gets NIX1
1324 blkaddr = (vf & 1) ? BLKADDR_NIX1 : BLKADDR_NIX0;
1325 /* NIX1 is not present on all silicons */
1326 if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
1327 blkaddr = BLKADDR_NIX0;
1332 pfvf->nix_blkaddr = BLKADDR_NIX1;
1333 pfvf->nix_rx_intf = NIX_INTFX_RX(1);
1334 pfvf->nix_tx_intf = NIX_INTFX_TX(1);
1338 pfvf->nix_blkaddr = BLKADDR_NIX0;
1339 pfvf->nix_rx_intf = NIX_INTFX_RX(0);
1340 pfvf->nix_tx_intf = NIX_INTFX_TX(0);
1344 return pfvf->nix_blkaddr;
1347 static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype,
1348 u16 pcifunc, struct rsrc_attach *attach)
1354 blkaddr = rvu_get_nix_blkaddr(rvu, pcifunc);
1357 if (attach->hdr.ver < RVU_MULTI_BLK_VER)
1358 return rvu_get_blkaddr(rvu, blktype, 0);
1359 blkaddr = attach->cpt_blkaddr ? attach->cpt_blkaddr :
1361 if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
1365 return rvu_get_blkaddr(rvu, blktype, 0);
1368 if (is_block_implemented(rvu->hw, blkaddr))
1374 static void rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype,
1375 int num_lfs, struct rsrc_attach *attach)
1377 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1378 struct rvu_hwinfo *hw = rvu->hw;
1379 struct rvu_block *block;
1387 blkaddr = rvu_get_attach_blkaddr(rvu, blktype, pcifunc, attach);
1391 block = &hw->block[blkaddr];
1392 if (!block->lf.bmap)
1395 for (slot = 0; slot < num_lfs; slot++) {
1396 /* Allocate the resource */
1397 lf = rvu_alloc_rsrc(&block->lf);
1401 cfg = (1ULL << 63) | (pcifunc << 8) | slot;
1402 rvu_write64(rvu, blkaddr, block->lfcfg_reg |
1403 (lf << block->lfshift), cfg);
1404 rvu_update_rsrc_map(rvu, pfvf, block,
1407 /* Set start MSIX vector for this LF within this PF/VF */
1408 rvu_set_msix_offset(rvu, pfvf, block, lf);
1412 static int rvu_check_rsrc_availability(struct rvu *rvu,
1413 struct rsrc_attach *req, u16 pcifunc)
1415 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1416 int free_lfs, mappedlfs, blkaddr;
1417 struct rvu_hwinfo *hw = rvu->hw;
1418 struct rvu_block *block;
1420 /* Only one NPA LF can be attached */
1421 if (req->npalf && !is_blktype_attached(pfvf, BLKTYPE_NPA)) {
1422 block = &hw->block[BLKADDR_NPA];
1423 free_lfs = rvu_rsrc_free_count(&block->lf);
1426 } else if (req->npalf) {
1427 dev_err(&rvu->pdev->dev,
1428 "Func 0x%x: Invalid req, already has NPA\n",
1433 /* Only one NIX LF can be attached */
1434 if (req->nixlf && !is_blktype_attached(pfvf, BLKTYPE_NIX)) {
1435 blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_NIX,
1439 block = &hw->block[blkaddr];
1440 free_lfs = rvu_rsrc_free_count(&block->lf);
1443 } else if (req->nixlf) {
1444 dev_err(&rvu->pdev->dev,
1445 "Func 0x%x: Invalid req, already has NIX\n",
1451 block = &hw->block[BLKADDR_SSO];
1452 /* Is request within limits ? */
1453 if (req->sso > block->lf.max) {
1454 dev_err(&rvu->pdev->dev,
1455 "Func 0x%x: Invalid SSO req, %d > max %d\n",
1456 pcifunc, req->sso, block->lf.max);
1459 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1460 free_lfs = rvu_rsrc_free_count(&block->lf);
1461 /* Check if additional resources are available */
1462 if (req->sso > mappedlfs &&
1463 ((req->sso - mappedlfs) > free_lfs))
1468 block = &hw->block[BLKADDR_SSOW];
1469 if (req->ssow > block->lf.max) {
1470 dev_err(&rvu->pdev->dev,
1471 "Func 0x%x: Invalid SSOW req, %d > max %d\n",
1472 pcifunc, req->sso, block->lf.max);
1475 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1476 free_lfs = rvu_rsrc_free_count(&block->lf);
1477 if (req->ssow > mappedlfs &&
1478 ((req->ssow - mappedlfs) > free_lfs))
1483 block = &hw->block[BLKADDR_TIM];
1484 if (req->timlfs > block->lf.max) {
1485 dev_err(&rvu->pdev->dev,
1486 "Func 0x%x: Invalid TIMLF req, %d > max %d\n",
1487 pcifunc, req->timlfs, block->lf.max);
1490 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1491 free_lfs = rvu_rsrc_free_count(&block->lf);
1492 if (req->timlfs > mappedlfs &&
1493 ((req->timlfs - mappedlfs) > free_lfs))
1498 blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_CPT,
1502 block = &hw->block[blkaddr];
1503 if (req->cptlfs > block->lf.max) {
1504 dev_err(&rvu->pdev->dev,
1505 "Func 0x%x: Invalid CPTLF req, %d > max %d\n",
1506 pcifunc, req->cptlfs, block->lf.max);
1509 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1510 free_lfs = rvu_rsrc_free_count(&block->lf);
1511 if (req->cptlfs > mappedlfs &&
1512 ((req->cptlfs - mappedlfs) > free_lfs))
1519 dev_info(rvu->dev, "Request for %s failed\n", block->name);
1523 static bool rvu_attach_from_same_block(struct rvu *rvu, int blktype,
1524 struct rsrc_attach *attach)
1526 int blkaddr, num_lfs;
1528 blkaddr = rvu_get_attach_blkaddr(rvu, blktype,
1529 attach->hdr.pcifunc, attach);
1533 num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, attach->hdr.pcifunc),
1535 /* Requester already has LFs from given block ? */
1539 int rvu_mbox_handler_attach_resources(struct rvu *rvu,
1540 struct rsrc_attach *attach,
1541 struct msg_rsp *rsp)
1543 u16 pcifunc = attach->hdr.pcifunc;
1546 /* If first request, detach all existing attached resources */
1547 if (!attach->modify)
1548 rvu_detach_rsrcs(rvu, NULL, pcifunc);
1550 mutex_lock(&rvu->rsrc_lock);
1552 /* Check if the request can be accommodated */
1553 err = rvu_check_rsrc_availability(rvu, attach, pcifunc);
1557 /* Now attach the requested resources */
1559 rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1, attach);
1562 rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1, attach);
1565 /* RVU func doesn't know which exact LF or slot is attached
1566 * to it, it always sees as slot 0,1,2. So for a 'modify'
1567 * request, simply detach all existing attached LFs/slots
1568 * and attach a fresh.
1571 rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
1572 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO,
1573 attach->sso, attach);
1578 rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
1579 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW,
1580 attach->ssow, attach);
1583 if (attach->timlfs) {
1585 rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
1586 rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM,
1587 attach->timlfs, attach);
1590 if (attach->cptlfs) {
1591 if (attach->modify &&
1592 rvu_attach_from_same_block(rvu, BLKTYPE_CPT, attach))
1593 rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT);
1594 rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT,
1595 attach->cptlfs, attach);
1599 mutex_unlock(&rvu->rsrc_lock);
1603 static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1604 int blkaddr, int lf)
1609 return MSIX_VECTOR_INVALID;
1611 for (vec = 0; vec < pfvf->msix.max; vec++) {
1612 if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf))
1615 return MSIX_VECTOR_INVALID;
1618 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1619 struct rvu_block *block, int lf)
1621 u16 nvecs, vec, offset;
1624 cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1625 (lf << block->lfshift));
1626 nvecs = (cfg >> 12) & 0xFF;
1628 /* Check and alloc MSIX vectors, must be contiguous */
1629 if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs))
1632 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
1634 /* Config MSIX offset in LF */
1635 rvu_write64(rvu, block->addr, block->msixcfg_reg |
1636 (lf << block->lfshift), (cfg & ~0x7FFULL) | offset);
1638 /* Update the bitmap as well */
1639 for (vec = 0; vec < nvecs; vec++)
1640 pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf);
1643 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1644 struct rvu_block *block, int lf)
1646 u16 nvecs, vec, offset;
1649 cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1650 (lf << block->lfshift));
1651 nvecs = (cfg >> 12) & 0xFF;
1653 /* Clear MSIX offset in LF */
1654 rvu_write64(rvu, block->addr, block->msixcfg_reg |
1655 (lf << block->lfshift), cfg & ~0x7FFULL);
1657 offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf);
1659 /* Update the mapping */
1660 for (vec = 0; vec < nvecs; vec++)
1661 pfvf->msix_lfmap[offset + vec] = 0;
1663 /* Free the same in MSIX bitmap */
1664 rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
1667 int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
1668 struct msix_offset_rsp *rsp)
1670 struct rvu_hwinfo *hw = rvu->hw;
1671 u16 pcifunc = req->hdr.pcifunc;
1672 struct rvu_pfvf *pfvf;
1673 int lf, slot, blkaddr;
1675 pfvf = rvu_get_pfvf(rvu, pcifunc);
1676 if (!pfvf->msix.bmap)
1679 /* Set MSIX offsets for each block's LFs attached to this PF/VF */
1680 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0);
1681 rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf);
1683 /* Get BLKADDR from which LFs are attached to pcifunc */
1684 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1686 rsp->nix_msixoff = MSIX_VECTOR_INVALID;
1688 lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1689 rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, blkaddr, lf);
1692 rsp->sso = pfvf->sso;
1693 for (slot = 0; slot < rsp->sso; slot++) {
1694 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot);
1695 rsp->sso_msixoff[slot] =
1696 rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf);
1699 rsp->ssow = pfvf->ssow;
1700 for (slot = 0; slot < rsp->ssow; slot++) {
1701 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot);
1702 rsp->ssow_msixoff[slot] =
1703 rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf);
1706 rsp->timlfs = pfvf->timlfs;
1707 for (slot = 0; slot < rsp->timlfs; slot++) {
1708 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot);
1709 rsp->timlf_msixoff[slot] =
1710 rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf);
1713 rsp->cptlfs = pfvf->cptlfs;
1714 for (slot = 0; slot < rsp->cptlfs; slot++) {
1715 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot);
1716 rsp->cptlf_msixoff[slot] =
1717 rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf);
1720 rsp->cpt1_lfs = pfvf->cpt1_lfs;
1721 for (slot = 0; slot < rsp->cpt1_lfs; slot++) {
1722 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT1], pcifunc, slot);
1723 rsp->cpt1_lf_msixoff[slot] =
1724 rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT1, lf);
1730 int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
1731 struct msg_rsp *rsp)
1733 u16 pcifunc = req->hdr.pcifunc;
1737 vf = pcifunc & RVU_PFVF_FUNC_MASK;
1738 cfg = rvu_read64(rvu, BLKADDR_RVUM,
1739 RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc)));
1740 numvfs = (cfg >> 12) & 0xFF;
1742 if (vf && vf <= numvfs)
1743 __rvu_flr_handler(rvu, pcifunc);
1745 return RVU_INVALID_VF_ID;
1750 int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
1751 struct get_hw_cap_rsp *rsp)
1753 struct rvu_hwinfo *hw = rvu->hw;
1755 rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping;
1756 rsp->nix_shaping = hw->cap.nix_shaping;
1761 int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req,
1762 struct msg_rsp *rsp)
1764 struct rvu_hwinfo *hw = rvu->hw;
1765 u16 pcifunc = req->hdr.pcifunc;
1766 struct rvu_pfvf *pfvf;
1770 /* Only PF can add VF permissions */
1771 if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_afvf(pcifunc))
1774 target = (pcifunc & ~RVU_PFVF_FUNC_MASK) | (req->vf + 1);
1775 pfvf = rvu_get_pfvf(rvu, target);
1777 if (req->flags & RESET_VF_PERM) {
1778 pfvf->flags &= RVU_CLEAR_VF_PERM;
1779 } else if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) ^
1780 (req->flags & VF_TRUSTED)) {
1781 change_bit(PF_SET_VF_TRUSTED, &pfvf->flags);
1782 /* disable multicast and promisc entries */
1783 if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags)) {
1784 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, target);
1787 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
1791 npc_enadis_default_mce_entry(rvu, target, nixlf,
1792 NIXLF_ALLMULTI_ENTRY,
1794 npc_enadis_default_mce_entry(rvu, target, nixlf,
1795 NIXLF_PROMISC_ENTRY,
1803 static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
1804 struct mbox_msghdr *req)
1806 struct rvu *rvu = pci_get_drvdata(mbox->pdev);
1808 /* Check if valid, if not reply with a invalid msg */
1809 if (req->sig != OTX2_MBOX_REQ_SIG)
1813 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
1815 struct _rsp_type *rsp; \
1818 rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
1820 sizeof(struct _rsp_type)); \
1821 /* some handlers should complete even if reply */ \
1822 /* could not be allocated */ \
1824 _id != MBOX_MSG_DETACH_RESOURCES && \
1825 _id != MBOX_MSG_NIX_TXSCH_FREE && \
1826 _id != MBOX_MSG_VF_FLR) \
1829 rsp->hdr.id = _id; \
1830 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
1831 rsp->hdr.pcifunc = req->pcifunc; \
1835 err = rvu_mbox_handler_ ## _fn_name(rvu, \
1836 (struct _req_type *)req, \
1839 rsp->hdr.rc = err; \
1841 trace_otx2_msg_process(mbox->pdev, _id, err); \
1842 return rsp ? err : -ENOMEM; \
1849 otx2_reply_invalid_msg(mbox, devid, req->pcifunc, req->id);
1854 static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
1856 struct rvu *rvu = mwork->rvu;
1857 int offset, err, id, devid;
1858 struct otx2_mbox_dev *mdev;
1859 struct mbox_hdr *req_hdr;
1860 struct mbox_msghdr *msg;
1861 struct mbox_wq_info *mw;
1862 struct otx2_mbox *mbox;
1866 mw = &rvu->afpf_wq_info;
1869 mw = &rvu->afvf_wq_info;
1875 devid = mwork - mw->mbox_wrk;
1877 mdev = &mbox->dev[devid];
1879 /* Process received mbox messages */
1880 req_hdr = mdev->mbase + mbox->rx_start;
1881 if (mw->mbox_wrk[devid].num_msgs == 0)
1884 offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
1886 for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) {
1887 msg = mdev->mbase + offset;
1889 /* Set which PF/VF sent this message based on mbox IRQ */
1893 ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
1894 msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT);
1898 ~(RVU_PFVF_FUNC_MASK << RVU_PFVF_FUNC_SHIFT);
1899 msg->pcifunc |= (devid << RVU_PFVF_FUNC_SHIFT) + 1;
1903 err = rvu_process_mbox_msg(mbox, devid, msg);
1905 offset = mbox->rx_start + msg->next_msgoff;
1909 if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
1910 dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
1911 err, otx2_mbox_id2name(msg->id),
1912 msg->id, rvu_get_pf(msg->pcifunc),
1913 (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
1915 dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
1916 err, otx2_mbox_id2name(msg->id),
1919 mw->mbox_wrk[devid].num_msgs = 0;
1921 /* Send mbox responses to VF/PF */
1922 otx2_mbox_msg_send(mbox, devid);
1925 static inline void rvu_afpf_mbox_handler(struct work_struct *work)
1927 struct rvu_work *mwork = container_of(work, struct rvu_work, work);
1929 __rvu_mbox_handler(mwork, TYPE_AFPF);
1932 static inline void rvu_afvf_mbox_handler(struct work_struct *work)
1934 struct rvu_work *mwork = container_of(work, struct rvu_work, work);
1936 __rvu_mbox_handler(mwork, TYPE_AFVF);
1939 static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
1941 struct rvu *rvu = mwork->rvu;
1942 struct otx2_mbox_dev *mdev;
1943 struct mbox_hdr *rsp_hdr;
1944 struct mbox_msghdr *msg;
1945 struct mbox_wq_info *mw;
1946 struct otx2_mbox *mbox;
1947 int offset, id, devid;
1951 mw = &rvu->afpf_wq_info;
1954 mw = &rvu->afvf_wq_info;
1960 devid = mwork - mw->mbox_wrk_up;
1961 mbox = &mw->mbox_up;
1962 mdev = &mbox->dev[devid];
1964 rsp_hdr = mdev->mbase + mbox->rx_start;
1965 if (mw->mbox_wrk_up[devid].up_num_msgs == 0) {
1966 dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n");
1970 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
1972 for (id = 0; id < mw->mbox_wrk_up[devid].up_num_msgs; id++) {
1973 msg = mdev->mbase + offset;
1975 if (msg->id >= MBOX_MSG_MAX) {
1977 "Mbox msg with unknown ID 0x%x\n", msg->id);
1981 if (msg->sig != OTX2_MBOX_RSP_SIG) {
1983 "Mbox msg with wrong signature %x, ID 0x%x\n",
1989 case MBOX_MSG_CGX_LINK_EVENT:
1994 "Mbox msg response has err %d, ID 0x%x\n",
1999 offset = mbox->rx_start + msg->next_msgoff;
2002 mw->mbox_wrk_up[devid].up_num_msgs = 0;
2004 otx2_mbox_reset(mbox, devid);
2007 static inline void rvu_afpf_mbox_up_handler(struct work_struct *work)
2009 struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2011 __rvu_mbox_up_handler(mwork, TYPE_AFPF);
2014 static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
2016 struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2018 __rvu_mbox_up_handler(mwork, TYPE_AFVF);
2021 static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
2024 struct rvu_hwinfo *hw = rvu->hw;
2028 /* For cn10k platform VF mailbox regions of a PF follows after the
2029 * PF <-> AF mailbox region. Whereas for Octeontx2 it is read from
2030 * RVU_PF_VF_BAR4_ADDR register.
2032 if (type == TYPE_AFVF) {
2033 for (region = 0; region < num; region++) {
2034 if (hw->cap.per_pf_mbox_regs) {
2035 bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2036 RVU_AF_PFX_BAR4_ADDR(0)) +
2038 bar4 += region * MBOX_SIZE;
2040 bar4 = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
2041 bar4 += region * MBOX_SIZE;
2043 mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
2044 if (!mbox_addr[region])
2050 /* For cn10k platform AF <-> PF mailbox region of a PF is read from per
2051 * PF registers. Whereas for Octeontx2 it is read from
2052 * RVU_AF_PF_BAR4_ADDR register.
2054 for (region = 0; region < num; region++) {
2055 if (hw->cap.per_pf_mbox_regs) {
2056 bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2057 RVU_AF_PFX_BAR4_ADDR(region));
2059 bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2060 RVU_AF_PF_BAR4_ADDR);
2061 bar4 += region * MBOX_SIZE;
2063 mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
2064 if (!mbox_addr[region])
2071 iounmap((void __iomem *)mbox_addr[region]);
2075 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
2077 void (mbox_handler)(struct work_struct *),
2078 void (mbox_up_handler)(struct work_struct *))
2080 int err = -EINVAL, i, dir, dir_up;
2081 void __iomem *reg_base;
2082 struct rvu_work *mwork;
2083 void **mbox_regions;
2086 mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
2092 name = "rvu_afpf_mailbox";
2093 dir = MBOX_DIR_AFPF;
2094 dir_up = MBOX_DIR_AFPF_UP;
2095 reg_base = rvu->afreg_base;
2096 err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF);
2101 name = "rvu_afvf_mailbox";
2102 dir = MBOX_DIR_PFVF;
2103 dir_up = MBOX_DIR_PFVF_UP;
2104 reg_base = rvu->pfreg_base;
2105 err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF);
2113 mw->mbox_wq = alloc_workqueue(name,
2114 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
2121 mw->mbox_wrk = devm_kcalloc(rvu->dev, num,
2122 sizeof(struct rvu_work), GFP_KERNEL);
2123 if (!mw->mbox_wrk) {
2128 mw->mbox_wrk_up = devm_kcalloc(rvu->dev, num,
2129 sizeof(struct rvu_work), GFP_KERNEL);
2130 if (!mw->mbox_wrk_up) {
2135 err = otx2_mbox_regions_init(&mw->mbox, mbox_regions, rvu->pdev,
2136 reg_base, dir, num);
2140 err = otx2_mbox_regions_init(&mw->mbox_up, mbox_regions, rvu->pdev,
2141 reg_base, dir_up, num);
2145 for (i = 0; i < num; i++) {
2146 mwork = &mw->mbox_wrk[i];
2148 INIT_WORK(&mwork->work, mbox_handler);
2150 mwork = &mw->mbox_wrk_up[i];
2152 INIT_WORK(&mwork->work, mbox_up_handler);
2154 kfree(mbox_regions);
2158 destroy_workqueue(mw->mbox_wq);
2161 iounmap((void __iomem *)mbox_regions[num]);
2163 kfree(mbox_regions);
2167 static void rvu_mbox_destroy(struct mbox_wq_info *mw)
2169 struct otx2_mbox *mbox = &mw->mbox;
2170 struct otx2_mbox_dev *mdev;
2174 flush_workqueue(mw->mbox_wq);
2175 destroy_workqueue(mw->mbox_wq);
2179 for (devid = 0; devid < mbox->ndevs; devid++) {
2180 mdev = &mbox->dev[devid];
2182 iounmap((void __iomem *)mdev->hwbase);
2185 otx2_mbox_destroy(&mw->mbox);
2186 otx2_mbox_destroy(&mw->mbox_up);
2189 static void rvu_queue_work(struct mbox_wq_info *mw, int first,
2190 int mdevs, u64 intr)
2192 struct otx2_mbox_dev *mdev;
2193 struct otx2_mbox *mbox;
2194 struct mbox_hdr *hdr;
2197 for (i = first; i < mdevs; i++) {
2199 if (!(intr & BIT_ULL(i - first)))
2203 mdev = &mbox->dev[i];
2204 hdr = mdev->mbase + mbox->rx_start;
2206 /*The hdr->num_msgs is set to zero immediately in the interrupt
2207 * handler to ensure that it holds a correct value next time
2208 * when the interrupt handler is called.
2209 * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
2210 * pf>mbox.up_num_msgs holds the data for use in
2211 * pfaf_mbox_up_handler.
2214 if (hdr->num_msgs) {
2215 mw->mbox_wrk[i].num_msgs = hdr->num_msgs;
2217 queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
2219 mbox = &mw->mbox_up;
2220 mdev = &mbox->dev[i];
2221 hdr = mdev->mbase + mbox->rx_start;
2222 if (hdr->num_msgs) {
2223 mw->mbox_wrk_up[i].up_num_msgs = hdr->num_msgs;
2225 queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work);
2230 static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
2232 struct rvu *rvu = (struct rvu *)rvu_irq;
2236 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
2237 /* Clear interrupts */
2238 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr);
2240 trace_otx2_msg_interrupt(rvu->pdev, "PF(s) to AF", intr);
2242 /* Sync with mbox memory region */
2245 rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr);
2247 /* Handle VF interrupts */
2249 intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1));
2250 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr);
2252 rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr);
2256 intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0));
2257 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr);
2259 trace_otx2_msg_interrupt(rvu->pdev, "VF(s) to AF", intr);
2261 rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr);
2266 static void rvu_enable_mbox_intr(struct rvu *rvu)
2268 struct rvu_hwinfo *hw = rvu->hw;
2270 /* Clear spurious irqs, if any */
2271 rvu_write64(rvu, BLKADDR_RVUM,
2272 RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs));
2274 /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */
2275 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S,
2276 INTR_MASK(hw->total_pfs) & ~1ULL);
2279 static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
2281 struct rvu_block *block;
2282 int slot, lf, num_lfs;
2285 block = &rvu->hw->block[blkaddr];
2286 num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
2290 for (slot = 0; slot < num_lfs; slot++) {
2291 lf = rvu_get_lf(rvu, block, pcifunc, slot);
2295 /* Cleanup LF and reset it */
2296 if (block->addr == BLKADDR_NIX0 || block->addr == BLKADDR_NIX1)
2297 rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf);
2298 else if (block->addr == BLKADDR_NPA)
2299 rvu_npa_lf_teardown(rvu, pcifunc, lf);
2300 else if ((block->addr == BLKADDR_CPT0) ||
2301 (block->addr == BLKADDR_CPT1))
2302 rvu_cpt_lf_teardown(rvu, pcifunc, lf, slot);
2304 err = rvu_lf_reset(rvu, block, lf);
2306 dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
2312 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
2314 mutex_lock(&rvu->flr_lock);
2315 /* Reset order should reflect inter-block dependencies:
2316 * 1. Reset any packet/work sources (NIX, CPT, TIM)
2317 * 2. Flush and reset SSO/SSOW
2318 * 3. Cleanup pools (NPA)
2320 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0);
2321 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX1);
2322 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0);
2323 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT1);
2324 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM);
2325 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
2326 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
2327 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
2328 rvu_detach_rsrcs(rvu, NULL, pcifunc);
2329 mutex_unlock(&rvu->flr_lock);
2332 static void rvu_afvf_flr_handler(struct rvu *rvu, int vf)
2336 /* pcifunc = 0(PF0) | (vf + 1) */
2337 __rvu_flr_handler(rvu, vf + 1);
2344 /* Signal FLR finish and enable IRQ */
2345 rvupf_write64(rvu, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
2346 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
2349 static void rvu_flr_handler(struct work_struct *work)
2351 struct rvu_work *flrwork = container_of(work, struct rvu_work, work);
2352 struct rvu *rvu = flrwork->rvu;
2353 u16 pcifunc, numvfs, vf;
2357 pf = flrwork - rvu->flr_wrk;
2358 if (pf >= rvu->hw->total_pfs) {
2359 rvu_afvf_flr_handler(rvu, pf - rvu->hw->total_pfs);
2363 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2364 numvfs = (cfg >> 12) & 0xFF;
2365 pcifunc = pf << RVU_PFVF_PF_SHIFT;
2367 for (vf = 0; vf < numvfs; vf++)
2368 __rvu_flr_handler(rvu, (pcifunc | (vf + 1)));
2370 __rvu_flr_handler(rvu, pcifunc);
2372 /* Signal FLR finish */
2373 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf));
2375 /* Enable interrupt */
2376 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S, BIT_ULL(pf));
2379 static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs)
2381 int dev, vf, reg = 0;
2387 intr = rvupf_read64(rvu, RVU_PF_VFFLR_INTX(reg));
2391 for (vf = 0; vf < numvfs; vf++) {
2392 if (!(intr & BIT_ULL(vf)))
2394 dev = vf + start_vf + rvu->hw->total_pfs;
2395 queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work);
2396 /* Clear and disable the interrupt */
2397 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
2398 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf));
2402 static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq)
2404 struct rvu *rvu = (struct rvu *)rvu_irq;
2408 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT);
2412 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2413 if (intr & (1ULL << pf)) {
2414 /* PF is already dead do only AF related operations */
2415 queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work);
2416 /* clear interrupt */
2417 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT,
2419 /* Disable the interrupt */
2420 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
2426 rvu_afvf_queue_flr_work(rvu, 0, 64);
2428 rvu_afvf_queue_flr_work(rvu, 64, rvu->vfs - 64);
2433 static void rvu_me_handle_vfset(struct rvu *rvu, int idx, u64 intr)
2437 /* Nothing to be done here other than clearing the
2440 for (vf = 0; vf < 64; vf++) {
2441 if (intr & (1ULL << vf)) {
2442 /* clear the trpend due to ME(master enable) */
2443 rvupf_write64(rvu, RVU_PF_VFTRPENDX(idx), BIT_ULL(vf));
2444 /* clear interrupt */
2445 rvupf_write64(rvu, RVU_PF_VFME_INTX(idx), BIT_ULL(vf));
2450 /* Handles ME interrupts from VFs of AF */
2451 static irqreturn_t rvu_me_vf_intr_handler(int irq, void *rvu_irq)
2453 struct rvu *rvu = (struct rvu *)rvu_irq;
2457 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
2459 for (vfset = 0; vfset <= 1; vfset++) {
2460 intr = rvupf_read64(rvu, RVU_PF_VFME_INTX(vfset));
2462 rvu_me_handle_vfset(rvu, vfset, intr);
2468 /* Handles ME interrupts from PFs */
2469 static irqreturn_t rvu_me_pf_intr_handler(int irq, void *rvu_irq)
2471 struct rvu *rvu = (struct rvu *)rvu_irq;
2475 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
2477 /* Nothing to be done here other than clearing the
2480 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2481 if (intr & (1ULL << pf)) {
2482 /* clear the trpend due to ME(master enable) */
2483 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND,
2485 /* clear interrupt */
2486 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT,
2494 static void rvu_unregister_interrupts(struct rvu *rvu)
2498 /* Disable the Mbox interrupt */
2499 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
2500 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2502 /* Disable the PF FLR interrupt */
2503 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
2504 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2506 /* Disable the PF ME interrupt */
2507 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1C,
2508 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2510 for (irq = 0; irq < rvu->num_vec; irq++) {
2511 if (rvu->irq_allocated[irq]) {
2512 free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
2513 rvu->irq_allocated[irq] = false;
2517 pci_free_irq_vectors(rvu->pdev);
2521 static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu)
2523 struct rvu_pfvf *pfvf = &rvu->pf[0];
2527 offset = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2529 /* Make sure there are enough MSIX vectors configured so that
2530 * VF interrupts can be handled. Offset equal to zero means
2531 * that PF vectors are not configured and overlapping AF vectors.
2533 return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) &&
2537 static int rvu_register_interrupts(struct rvu *rvu)
2539 int ret, offset, pf_vec_start;
2541 rvu->num_vec = pci_msix_vec_count(rvu->pdev);
2543 rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec,
2544 NAME_SIZE, GFP_KERNEL);
2548 rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec,
2549 sizeof(bool), GFP_KERNEL);
2550 if (!rvu->irq_allocated)
2554 ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec,
2555 rvu->num_vec, PCI_IRQ_MSIX);
2558 "RVUAF: Request for %d msix vectors failed, ret %d\n",
2563 /* Register mailbox interrupt handler */
2564 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
2565 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
2566 rvu_mbox_intr_handler, 0,
2567 &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
2570 "RVUAF: IRQ registration failed for mbox irq\n");
2574 rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true;
2576 /* Enable mailbox interrupts from all PFs */
2577 rvu_enable_mbox_intr(rvu);
2579 /* Register FLR interrupt handler */
2580 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
2582 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFFLR),
2583 rvu_flr_intr_handler, 0,
2584 &rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
2588 "RVUAF: IRQ registration failed for FLR\n");
2591 rvu->irq_allocated[RVU_AF_INT_VEC_PFFLR] = true;
2593 /* Enable FLR interrupt for all PFs*/
2594 rvu_write64(rvu, BLKADDR_RVUM,
2595 RVU_AF_PFFLR_INT, INTR_MASK(rvu->hw->total_pfs));
2597 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,
2598 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2600 /* Register ME interrupt handler */
2601 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
2603 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFME),
2604 rvu_me_pf_intr_handler, 0,
2605 &rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
2609 "RVUAF: IRQ registration failed for ME\n");
2611 rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true;
2613 /* Clear TRPEND bit for all PF */
2614 rvu_write64(rvu, BLKADDR_RVUM,
2615 RVU_AF_PFTRPEND, INTR_MASK(rvu->hw->total_pfs));
2616 /* Enable ME interrupt for all PFs*/
2617 rvu_write64(rvu, BLKADDR_RVUM,
2618 RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs));
2620 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1S,
2621 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2623 if (!rvu_afvf_msix_vectors_num_ok(rvu))
2626 /* Get PF MSIX vectors offset. */
2627 pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM,
2628 RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2630 /* Register MBOX0 interrupt. */
2631 offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0;
2632 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0");
2633 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2634 rvu_mbox_intr_handler, 0,
2635 &rvu->irq_name[offset * NAME_SIZE],
2639 "RVUAF: IRQ registration failed for Mbox0\n");
2641 rvu->irq_allocated[offset] = true;
2643 /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so
2644 * simply increment current offset by 1.
2646 offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1;
2647 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1");
2648 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2649 rvu_mbox_intr_handler, 0,
2650 &rvu->irq_name[offset * NAME_SIZE],
2654 "RVUAF: IRQ registration failed for Mbox1\n");
2656 rvu->irq_allocated[offset] = true;
2658 /* Register FLR interrupt handler for AF's VFs */
2659 offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0;
2660 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR0");
2661 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2662 rvu_flr_intr_handler, 0,
2663 &rvu->irq_name[offset * NAME_SIZE], rvu);
2666 "RVUAF: IRQ registration failed for RVUAFVF FLR0\n");
2669 rvu->irq_allocated[offset] = true;
2671 offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR1;
2672 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR1");
2673 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2674 rvu_flr_intr_handler, 0,
2675 &rvu->irq_name[offset * NAME_SIZE], rvu);
2678 "RVUAF: IRQ registration failed for RVUAFVF FLR1\n");
2681 rvu->irq_allocated[offset] = true;
2683 /* Register ME interrupt handler for AF's VFs */
2684 offset = pf_vec_start + RVU_PF_INT_VEC_VFME0;
2685 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME0");
2686 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2687 rvu_me_vf_intr_handler, 0,
2688 &rvu->irq_name[offset * NAME_SIZE], rvu);
2691 "RVUAF: IRQ registration failed for RVUAFVF ME0\n");
2694 rvu->irq_allocated[offset] = true;
2696 offset = pf_vec_start + RVU_PF_INT_VEC_VFME1;
2697 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME1");
2698 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2699 rvu_me_vf_intr_handler, 0,
2700 &rvu->irq_name[offset * NAME_SIZE], rvu);
2703 "RVUAF: IRQ registration failed for RVUAFVF ME1\n");
2706 rvu->irq_allocated[offset] = true;
2710 rvu_unregister_interrupts(rvu);
2714 static void rvu_flr_wq_destroy(struct rvu *rvu)
2717 flush_workqueue(rvu->flr_wq);
2718 destroy_workqueue(rvu->flr_wq);
2723 static int rvu_flr_init(struct rvu *rvu)
2729 /* Enable FLR for all PFs*/
2730 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2731 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2732 rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf),
2736 rvu->flr_wq = alloc_workqueue("rvu_afpf_flr",
2737 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
2742 num_devs = rvu->hw->total_pfs + pci_sriov_get_totalvfs(rvu->pdev);
2743 rvu->flr_wrk = devm_kcalloc(rvu->dev, num_devs,
2744 sizeof(struct rvu_work), GFP_KERNEL);
2745 if (!rvu->flr_wrk) {
2746 destroy_workqueue(rvu->flr_wq);
2750 for (dev = 0; dev < num_devs; dev++) {
2751 rvu->flr_wrk[dev].rvu = rvu;
2752 INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler);
2755 mutex_init(&rvu->flr_lock);
2760 static void rvu_disable_afvf_intr(struct rvu *rvu)
2764 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs));
2765 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
2766 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
2770 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1),
2771 INTR_MASK(vfs - 64));
2772 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
2773 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
2776 static void rvu_enable_afvf_intr(struct rvu *rvu)
2780 /* Clear any pending interrupts and enable AF VF interrupts for
2784 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs));
2785 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs));
2788 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs));
2789 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs));
2790 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs));
2792 /* Same for remaining VFs, if any. */
2796 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs - 64));
2797 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
2798 INTR_MASK(vfs - 64));
2800 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64));
2801 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
2802 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
2805 int rvu_get_num_lbk_chans(void)
2807 struct pci_dev *pdev;
2811 pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_LBK,
2816 base = pci_ioremap_bar(pdev, 0);
2820 /* Read number of available LBK channels from LBK(0)_CONST register. */
2821 ret = (readq(base + 0x10) >> 32) & 0xffff;
2829 static int rvu_enable_sriov(struct rvu *rvu)
2831 struct pci_dev *pdev = rvu->pdev;
2832 int err, chans, vfs;
2834 if (!rvu_afvf_msix_vectors_num_ok(rvu)) {
2835 dev_warn(&pdev->dev,
2836 "Skipping SRIOV enablement since not enough IRQs are available\n");
2840 chans = rvu_get_num_lbk_chans();
2844 vfs = pci_sriov_get_totalvfs(pdev);
2846 /* Limit VFs in case we have more VFs than LBK channels available. */
2853 /* Save VFs number for reference in VF interrupts handlers.
2854 * Since interrupts might start arriving during SRIOV enablement
2855 * ordinary API cannot be used to get number of enabled VFs.
2859 err = rvu_mbox_init(rvu, &rvu->afvf_wq_info, TYPE_AFVF, vfs,
2860 rvu_afvf_mbox_handler, rvu_afvf_mbox_up_handler);
2864 rvu_enable_afvf_intr(rvu);
2865 /* Make sure IRQs are enabled before SRIOV. */
2868 err = pci_enable_sriov(pdev, vfs);
2870 rvu_disable_afvf_intr(rvu);
2871 rvu_mbox_destroy(&rvu->afvf_wq_info);
2878 static void rvu_disable_sriov(struct rvu *rvu)
2880 rvu_disable_afvf_intr(rvu);
2881 rvu_mbox_destroy(&rvu->afvf_wq_info);
2882 pci_disable_sriov(rvu->pdev);
2885 static void rvu_update_module_params(struct rvu *rvu)
2887 const char *default_pfl_name = "default";
2889 strscpy(rvu->mkex_pfl_name,
2890 mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN);
2891 strscpy(rvu->kpu_pfl_name,
2892 kpu_profile ? kpu_profile : default_pfl_name, KPU_NAME_LEN);
2895 static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2897 struct device *dev = &pdev->dev;
2901 rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL);
2905 rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL);
2907 devm_kfree(dev, rvu);
2911 pci_set_drvdata(pdev, rvu);
2913 rvu->dev = &pdev->dev;
2915 err = pci_enable_device(pdev);
2917 dev_err(dev, "Failed to enable PCI device\n");
2921 err = pci_request_regions(pdev, DRV_NAME);
2923 dev_err(dev, "PCI request regions failed 0x%x\n", err);
2924 goto err_disable_device;
2927 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
2929 dev_err(dev, "DMA mask config failed, abort\n");
2930 goto err_release_regions;
2933 pci_set_master(pdev);
2935 rvu->ptp = ptp_get();
2936 if (IS_ERR(rvu->ptp)) {
2937 err = PTR_ERR(rvu->ptp);
2938 if (err == -EPROBE_DEFER)
2939 goto err_release_regions;
2943 /* Map Admin function CSRs */
2944 rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
2945 rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
2946 if (!rvu->afreg_base || !rvu->pfreg_base) {
2947 dev_err(dev, "Unable to map admin function CSRs, aborting\n");
2952 /* Store module params in rvu structure */
2953 rvu_update_module_params(rvu);
2955 /* Check which blocks the HW supports */
2956 rvu_check_block_implemented(rvu);
2958 rvu_reset_all_blocks(rvu);
2960 rvu_setup_hw_capabilities(rvu);
2962 err = rvu_setup_hw_resources(rvu);
2966 /* Init mailbox btw AF and PFs */
2967 err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF,
2968 rvu->hw->total_pfs, rvu_afpf_mbox_handler,
2969 rvu_afpf_mbox_up_handler);
2973 err = rvu_flr_init(rvu);
2977 err = rvu_register_interrupts(rvu);
2981 err = rvu_register_dl(rvu);
2985 rvu_setup_rvum_blk_revid(rvu);
2987 /* Enable AF's VFs (if any) */
2988 err = rvu_enable_sriov(rvu);
2992 /* Initialize debugfs */
2997 rvu_unregister_dl(rvu);
2999 rvu_unregister_interrupts(rvu);
3001 rvu_flr_wq_destroy(rvu);
3003 rvu_mbox_destroy(&rvu->afpf_wq_info);
3006 rvu_fwdata_exit(rvu);
3007 rvu_reset_all_blocks(rvu);
3008 rvu_free_hw_resources(rvu);
3009 rvu_clear_rvum_blk_revid(rvu);
3012 err_release_regions:
3013 pci_release_regions(pdev);
3015 pci_disable_device(pdev);
3017 pci_set_drvdata(pdev, NULL);
3018 devm_kfree(&pdev->dev, rvu->hw);
3019 devm_kfree(dev, rvu);
3023 static void rvu_remove(struct pci_dev *pdev)
3025 struct rvu *rvu = pci_get_drvdata(pdev);
3028 rvu_unregister_dl(rvu);
3029 rvu_unregister_interrupts(rvu);
3030 rvu_flr_wq_destroy(rvu);
3032 rvu_fwdata_exit(rvu);
3033 rvu_mbox_destroy(&rvu->afpf_wq_info);
3034 rvu_disable_sriov(rvu);
3035 rvu_reset_all_blocks(rvu);
3036 rvu_free_hw_resources(rvu);
3037 rvu_clear_rvum_blk_revid(rvu);
3039 pci_release_regions(pdev);
3040 pci_disable_device(pdev);
3041 pci_set_drvdata(pdev, NULL);
3043 devm_kfree(&pdev->dev, rvu->hw);
3044 devm_kfree(&pdev->dev, rvu);
3047 static struct pci_driver rvu_driver = {
3049 .id_table = rvu_id_table,
3051 .remove = rvu_remove,
3054 static int __init rvu_init_module(void)
3058 pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
3060 err = pci_register_driver(&cgx_driver);
3064 err = pci_register_driver(&ptp_driver);
3068 err = pci_register_driver(&rvu_driver);
3074 pci_unregister_driver(&ptp_driver);
3076 pci_unregister_driver(&cgx_driver);
3081 static void __exit rvu_cleanup_module(void)
3083 pci_unregister_driver(&rvu_driver);
3084 pci_unregister_driver(&ptp_driver);
3085 pci_unregister_driver(&cgx_driver);
3088 module_init(rvu_init_module);
3089 module_exit(rvu_cleanup_module);