octeontx2-af: add new mailbox to configure VF trust mode
[platform/kernel/linux-starfive.git] / drivers / net / ethernet / marvell / octeontx2 / af / rvu.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10
11 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/delay.h>
14 #include <linux/irq.h>
15 #include <linux/pci.h>
16 #include <linux/sysfs.h>
17
18 #include "cgx.h"
19 #include "rvu.h"
20 #include "rvu_reg.h"
21 #include "ptp.h"
22
23 #include "rvu_trace.h"
24
25 #define DRV_NAME        "rvu_af"
26 #define DRV_STRING      "Marvell OcteonTX2 RVU Admin Function Driver"
27
28 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
29
30 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
31                                 struct rvu_block *block, int lf);
32 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
33                                   struct rvu_block *block, int lf);
34 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc);
35
36 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
37                          int type, int num,
38                          void (mbox_handler)(struct work_struct *),
39                          void (mbox_up_handler)(struct work_struct *));
40 enum {
41         TYPE_AFVF,
42         TYPE_AFPF,
43 };
44
45 /* Supported devices */
46 static const struct pci_device_id rvu_id_table[] = {
47         { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) },
48         { 0, }  /* end of table */
49 };
50
51 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
52 MODULE_DESCRIPTION(DRV_STRING);
53 MODULE_LICENSE("GPL v2");
54 MODULE_DEVICE_TABLE(pci, rvu_id_table);
55
56 static char *mkex_profile; /* MKEX profile name */
57 module_param(mkex_profile, charp, 0000);
58 MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
59
60 static char *kpu_profile; /* KPU profile name */
61 module_param(kpu_profile, charp, 0000);
62 MODULE_PARM_DESC(kpu_profile, "KPU profile name string");
63
64 static void rvu_setup_hw_capabilities(struct rvu *rvu)
65 {
66         struct rvu_hwinfo *hw = rvu->hw;
67
68         hw->cap.nix_tx_aggr_lvl = NIX_TXSCH_LVL_TL1;
69         hw->cap.nix_fixed_txschq_mapping = false;
70         hw->cap.nix_shaping = true;
71         hw->cap.nix_tx_link_bp = true;
72         hw->cap.nix_rx_multicast = true;
73         hw->rvu = rvu;
74
75         if (is_rvu_96xx_B0(rvu)) {
76                 hw->cap.nix_fixed_txschq_mapping = true;
77                 hw->cap.nix_txsch_per_cgx_lmac = 4;
78                 hw->cap.nix_txsch_per_lbk_lmac = 132;
79                 hw->cap.nix_txsch_per_sdp_lmac = 76;
80                 hw->cap.nix_shaping = false;
81                 hw->cap.nix_tx_link_bp = false;
82                 if (is_rvu_96xx_A0(rvu))
83                         hw->cap.nix_rx_multicast = false;
84         }
85
86         if (!is_rvu_otx2(rvu))
87                 hw->cap.per_pf_mbox_regs = true;
88 }
89
90 /* Poll a RVU block's register 'offset', for a 'zero'
91  * or 'nonzero' at bits specified by 'mask'
92  */
93 int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
94 {
95         unsigned long timeout = jiffies + usecs_to_jiffies(10000);
96         void __iomem *reg;
97         u64 reg_val;
98
99         reg = rvu->afreg_base + ((block << 28) | offset);
100 again:
101         reg_val = readq(reg);
102         if (zero && !(reg_val & mask))
103                 return 0;
104         if (!zero && (reg_val & mask))
105                 return 0;
106         if (time_before(jiffies, timeout)) {
107                 usleep_range(1, 5);
108                 goto again;
109         }
110         return -EBUSY;
111 }
112
113 int rvu_alloc_rsrc(struct rsrc_bmap *rsrc)
114 {
115         int id;
116
117         if (!rsrc->bmap)
118                 return -EINVAL;
119
120         id = find_first_zero_bit(rsrc->bmap, rsrc->max);
121         if (id >= rsrc->max)
122                 return -ENOSPC;
123
124         __set_bit(id, rsrc->bmap);
125
126         return id;
127 }
128
129 int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc)
130 {
131         int start;
132
133         if (!rsrc->bmap)
134                 return -EINVAL;
135
136         start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
137         if (start >= rsrc->max)
138                 return -ENOSPC;
139
140         bitmap_set(rsrc->bmap, start, nrsrc);
141         return start;
142 }
143
144 static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
145 {
146         if (!rsrc->bmap)
147                 return;
148         if (start >= rsrc->max)
149                 return;
150
151         bitmap_clear(rsrc->bmap, start, nrsrc);
152 }
153
154 bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc)
155 {
156         int start;
157
158         if (!rsrc->bmap)
159                 return false;
160
161         start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
162         if (start >= rsrc->max)
163                 return false;
164
165         return true;
166 }
167
168 void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id)
169 {
170         if (!rsrc->bmap)
171                 return;
172
173         __clear_bit(id, rsrc->bmap);
174 }
175
176 int rvu_rsrc_free_count(struct rsrc_bmap *rsrc)
177 {
178         int used;
179
180         if (!rsrc->bmap)
181                 return 0;
182
183         used = bitmap_weight(rsrc->bmap, rsrc->max);
184         return (rsrc->max - used);
185 }
186
187 int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
188 {
189         rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max),
190                              sizeof(long), GFP_KERNEL);
191         if (!rsrc->bmap)
192                 return -ENOMEM;
193         return 0;
194 }
195
196 /* Get block LF's HW index from a PF_FUNC's block slot number */
197 int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
198 {
199         u16 match = 0;
200         int lf;
201
202         mutex_lock(&rvu->rsrc_lock);
203         for (lf = 0; lf < block->lf.max; lf++) {
204                 if (block->fn_map[lf] == pcifunc) {
205                         if (slot == match) {
206                                 mutex_unlock(&rvu->rsrc_lock);
207                                 return lf;
208                         }
209                         match++;
210                 }
211         }
212         mutex_unlock(&rvu->rsrc_lock);
213         return -ENODEV;
214 }
215
216 /* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E.
217  * Some silicon variants of OcteonTX2 supports
218  * multiple blocks of same type.
219  *
220  * @pcifunc has to be zero when no LF is yet attached.
221  *
222  * For a pcifunc if LFs are attached from multiple blocks of same type, then
223  * return blkaddr of first encountered block.
224  */
225 int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
226 {
227         int devnum, blkaddr = -ENODEV;
228         u64 cfg, reg;
229         bool is_pf;
230
231         switch (blktype) {
232         case BLKTYPE_NPC:
233                 blkaddr = BLKADDR_NPC;
234                 goto exit;
235         case BLKTYPE_NPA:
236                 blkaddr = BLKADDR_NPA;
237                 goto exit;
238         case BLKTYPE_NIX:
239                 /* For now assume NIX0 */
240                 if (!pcifunc) {
241                         blkaddr = BLKADDR_NIX0;
242                         goto exit;
243                 }
244                 break;
245         case BLKTYPE_SSO:
246                 blkaddr = BLKADDR_SSO;
247                 goto exit;
248         case BLKTYPE_SSOW:
249                 blkaddr = BLKADDR_SSOW;
250                 goto exit;
251         case BLKTYPE_TIM:
252                 blkaddr = BLKADDR_TIM;
253                 goto exit;
254         case BLKTYPE_CPT:
255                 /* For now assume CPT0 */
256                 if (!pcifunc) {
257                         blkaddr = BLKADDR_CPT0;
258                         goto exit;
259                 }
260                 break;
261         }
262
263         /* Check if this is a RVU PF or VF */
264         if (pcifunc & RVU_PFVF_FUNC_MASK) {
265                 is_pf = false;
266                 devnum = rvu_get_hwvf(rvu, pcifunc);
267         } else {
268                 is_pf = true;
269                 devnum = rvu_get_pf(pcifunc);
270         }
271
272         /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' or
273          * 'BLKADDR_NIX1'.
274          */
275         if (blktype == BLKTYPE_NIX) {
276                 reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(0) :
277                         RVU_PRIV_HWVFX_NIXX_CFG(0);
278                 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
279                 if (cfg) {
280                         blkaddr = BLKADDR_NIX0;
281                         goto exit;
282                 }
283
284                 reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(1) :
285                         RVU_PRIV_HWVFX_NIXX_CFG(1);
286                 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
287                 if (cfg)
288                         blkaddr = BLKADDR_NIX1;
289         }
290
291         if (blktype == BLKTYPE_CPT) {
292                 reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(0) :
293                         RVU_PRIV_HWVFX_CPTX_CFG(0);
294                 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
295                 if (cfg) {
296                         blkaddr = BLKADDR_CPT0;
297                         goto exit;
298                 }
299
300                 reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(1) :
301                         RVU_PRIV_HWVFX_CPTX_CFG(1);
302                 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
303                 if (cfg)
304                         blkaddr = BLKADDR_CPT1;
305         }
306
307 exit:
308         if (is_block_implemented(rvu->hw, blkaddr))
309                 return blkaddr;
310         return -ENODEV;
311 }
312
313 static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
314                                 struct rvu_block *block, u16 pcifunc,
315                                 u16 lf, bool attach)
316 {
317         int devnum, num_lfs = 0;
318         bool is_pf;
319         u64 reg;
320
321         if (lf >= block->lf.max) {
322                 dev_err(&rvu->pdev->dev,
323                         "%s: FATAL: LF %d is >= %s's max lfs i.e %d\n",
324                         __func__, lf, block->name, block->lf.max);
325                 return;
326         }
327
328         /* Check if this is for a RVU PF or VF */
329         if (pcifunc & RVU_PFVF_FUNC_MASK) {
330                 is_pf = false;
331                 devnum = rvu_get_hwvf(rvu, pcifunc);
332         } else {
333                 is_pf = true;
334                 devnum = rvu_get_pf(pcifunc);
335         }
336
337         block->fn_map[lf] = attach ? pcifunc : 0;
338
339         switch (block->addr) {
340         case BLKADDR_NPA:
341                 pfvf->npalf = attach ? true : false;
342                 num_lfs = pfvf->npalf;
343                 break;
344         case BLKADDR_NIX0:
345         case BLKADDR_NIX1:
346                 pfvf->nixlf = attach ? true : false;
347                 num_lfs = pfvf->nixlf;
348                 break;
349         case BLKADDR_SSO:
350                 attach ? pfvf->sso++ : pfvf->sso--;
351                 num_lfs = pfvf->sso;
352                 break;
353         case BLKADDR_SSOW:
354                 attach ? pfvf->ssow++ : pfvf->ssow--;
355                 num_lfs = pfvf->ssow;
356                 break;
357         case BLKADDR_TIM:
358                 attach ? pfvf->timlfs++ : pfvf->timlfs--;
359                 num_lfs = pfvf->timlfs;
360                 break;
361         case BLKADDR_CPT0:
362                 attach ? pfvf->cptlfs++ : pfvf->cptlfs--;
363                 num_lfs = pfvf->cptlfs;
364                 break;
365         case BLKADDR_CPT1:
366                 attach ? pfvf->cpt1_lfs++ : pfvf->cpt1_lfs--;
367                 num_lfs = pfvf->cpt1_lfs;
368                 break;
369         }
370
371         reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg;
372         rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs);
373 }
374
375 inline int rvu_get_pf(u16 pcifunc)
376 {
377         return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
378 }
379
380 void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
381 {
382         u64 cfg;
383
384         /* Get numVFs attached to this PF and first HWVF */
385         cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
386         *numvfs = (cfg >> 12) & 0xFF;
387         *hwvf = cfg & 0xFFF;
388 }
389
390 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
391 {
392         int pf, func;
393         u64 cfg;
394
395         pf = rvu_get_pf(pcifunc);
396         func = pcifunc & RVU_PFVF_FUNC_MASK;
397
398         /* Get first HWVF attached to this PF */
399         cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
400
401         return ((cfg & 0xFFF) + func - 1);
402 }
403
404 struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc)
405 {
406         /* Check if it is a PF or VF */
407         if (pcifunc & RVU_PFVF_FUNC_MASK)
408                 return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)];
409         else
410                 return &rvu->pf[rvu_get_pf(pcifunc)];
411 }
412
413 static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc)
414 {
415         int pf, vf, nvfs;
416         u64 cfg;
417
418         pf = rvu_get_pf(pcifunc);
419         if (pf >= rvu->hw->total_pfs)
420                 return false;
421
422         if (!(pcifunc & RVU_PFVF_FUNC_MASK))
423                 return true;
424
425         /* Check if VF is within number of VFs attached to this PF */
426         vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
427         cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
428         nvfs = (cfg >> 12) & 0xFF;
429         if (vf >= nvfs)
430                 return false;
431
432         return true;
433 }
434
435 bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr)
436 {
437         struct rvu_block *block;
438
439         if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT)
440                 return false;
441
442         block = &hw->block[blkaddr];
443         return block->implemented;
444 }
445
446 static void rvu_check_block_implemented(struct rvu *rvu)
447 {
448         struct rvu_hwinfo *hw = rvu->hw;
449         struct rvu_block *block;
450         int blkid;
451         u64 cfg;
452
453         /* For each block check if 'implemented' bit is set */
454         for (blkid = 0; blkid < BLK_COUNT; blkid++) {
455                 block = &hw->block[blkid];
456                 cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid));
457                 if (cfg & BIT_ULL(11))
458                         block->implemented = true;
459         }
460 }
461
462 static void rvu_setup_rvum_blk_revid(struct rvu *rvu)
463 {
464         rvu_write64(rvu, BLKADDR_RVUM,
465                     RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM),
466                     RVU_BLK_RVUM_REVID);
467 }
468
469 static void rvu_clear_rvum_blk_revid(struct rvu *rvu)
470 {
471         rvu_write64(rvu, BLKADDR_RVUM,
472                     RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), 0x00);
473 }
474
475 int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
476 {
477         int err;
478
479         if (!block->implemented)
480                 return 0;
481
482         rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12));
483         err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12),
484                            true);
485         return err;
486 }
487
488 static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
489 {
490         struct rvu_block *block = &rvu->hw->block[blkaddr];
491
492         if (!block->implemented)
493                 return;
494
495         rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0));
496         rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
497 }
498
499 static void rvu_reset_all_blocks(struct rvu *rvu)
500 {
501         /* Do a HW reset of all RVU blocks */
502         rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST);
503         rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST);
504         rvu_block_reset(rvu, BLKADDR_NIX1, NIX_AF_BLK_RST);
505         rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST);
506         rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST);
507         rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST);
508         rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST);
509         rvu_block_reset(rvu, BLKADDR_CPT1, CPT_AF_BLK_RST);
510         rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST);
511         rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST);
512         rvu_block_reset(rvu, BLKADDR_NDC_NIX1_RX, NDC_AF_BLK_RST);
513         rvu_block_reset(rvu, BLKADDR_NDC_NIX1_TX, NDC_AF_BLK_RST);
514         rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST);
515 }
516
517 static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block)
518 {
519         struct rvu_pfvf *pfvf;
520         u64 cfg;
521         int lf;
522
523         for (lf = 0; lf < block->lf.max; lf++) {
524                 cfg = rvu_read64(rvu, block->addr,
525                                  block->lfcfg_reg | (lf << block->lfshift));
526                 if (!(cfg & BIT_ULL(63)))
527                         continue;
528
529                 /* Set this resource as being used */
530                 __set_bit(lf, block->lf.bmap);
531
532                 /* Get, to whom this LF is attached */
533                 pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF);
534                 rvu_update_rsrc_map(rvu, pfvf, block,
535                                     (cfg >> 8) & 0xFFFF, lf, true);
536
537                 /* Set start MSIX vector for this LF within this PF/VF */
538                 rvu_set_msix_offset(rvu, pfvf, block, lf);
539         }
540 }
541
542 static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf)
543 {
544         int min_vecs;
545
546         if (!vf)
547                 goto check_pf;
548
549         if (!nvecs) {
550                 dev_warn(rvu->dev,
551                          "PF%d:VF%d is configured with zero msix vectors, %d\n",
552                          pf, vf - 1, nvecs);
553         }
554         return;
555
556 check_pf:
557         if (pf == 0)
558                 min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT;
559         else
560                 min_vecs = RVU_PF_INT_VEC_CNT;
561
562         if (!(nvecs < min_vecs))
563                 return;
564         dev_warn(rvu->dev,
565                  "PF%d is configured with too few vectors, %d, min is %d\n",
566                  pf, nvecs, min_vecs);
567 }
568
569 static int rvu_setup_msix_resources(struct rvu *rvu)
570 {
571         struct rvu_hwinfo *hw = rvu->hw;
572         int pf, vf, numvfs, hwvf, err;
573         int nvecs, offset, max_msix;
574         struct rvu_pfvf *pfvf;
575         u64 cfg, phy_addr;
576         dma_addr_t iova;
577
578         for (pf = 0; pf < hw->total_pfs; pf++) {
579                 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
580                 /* If PF is not enabled, nothing to do */
581                 if (!((cfg >> 20) & 0x01))
582                         continue;
583
584                 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
585
586                 pfvf = &rvu->pf[pf];
587                 /* Get num of MSIX vectors attached to this PF */
588                 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf));
589                 pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1;
590                 rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0);
591
592                 /* Alloc msix bitmap for this PF */
593                 err = rvu_alloc_bitmap(&pfvf->msix);
594                 if (err)
595                         return err;
596
597                 /* Allocate memory for MSIX vector to RVU block LF mapping */
598                 pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max,
599                                                 sizeof(u16), GFP_KERNEL);
600                 if (!pfvf->msix_lfmap)
601                         return -ENOMEM;
602
603                 /* For PF0 (AF) firmware will set msix vector offsets for
604                  * AF, block AF and PF0_INT vectors, so jump to VFs.
605                  */
606                 if (!pf)
607                         goto setup_vfmsix;
608
609                 /* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors.
610                  * These are allocated on driver init and never freed,
611                  * so no need to set 'msix_lfmap' for these.
612                  */
613                 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf));
614                 nvecs = (cfg >> 12) & 0xFF;
615                 cfg &= ~0x7FFULL;
616                 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
617                 rvu_write64(rvu, BLKADDR_RVUM,
618                             RVU_PRIV_PFX_INT_CFG(pf), cfg | offset);
619 setup_vfmsix:
620                 /* Alloc msix bitmap for VFs */
621                 for (vf = 0; vf < numvfs; vf++) {
622                         pfvf =  &rvu->hwvf[hwvf + vf];
623                         /* Get num of MSIX vectors attached to this VF */
624                         cfg = rvu_read64(rvu, BLKADDR_RVUM,
625                                          RVU_PRIV_PFX_MSIX_CFG(pf));
626                         pfvf->msix.max = (cfg & 0xFFF) + 1;
627                         rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1);
628
629                         /* Alloc msix bitmap for this VF */
630                         err = rvu_alloc_bitmap(&pfvf->msix);
631                         if (err)
632                                 return err;
633
634                         pfvf->msix_lfmap =
635                                 devm_kcalloc(rvu->dev, pfvf->msix.max,
636                                              sizeof(u16), GFP_KERNEL);
637                         if (!pfvf->msix_lfmap)
638                                 return -ENOMEM;
639
640                         /* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors.
641                          * These are allocated on driver init and never freed,
642                          * so no need to set 'msix_lfmap' for these.
643                          */
644                         cfg = rvu_read64(rvu, BLKADDR_RVUM,
645                                          RVU_PRIV_HWVFX_INT_CFG(hwvf + vf));
646                         nvecs = (cfg >> 12) & 0xFF;
647                         cfg &= ~0x7FFULL;
648                         offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
649                         rvu_write64(rvu, BLKADDR_RVUM,
650                                     RVU_PRIV_HWVFX_INT_CFG(hwvf + vf),
651                                     cfg | offset);
652                 }
653         }
654
655         /* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence
656          * create an IOMMU mapping for the physical address configured by
657          * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA.
658          */
659         cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
660         max_msix = cfg & 0xFFFFF;
661         if (rvu->fwdata && rvu->fwdata->msixtr_base)
662                 phy_addr = rvu->fwdata->msixtr_base;
663         else
664                 phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
665
666         iova = dma_map_resource(rvu->dev, phy_addr,
667                                 max_msix * PCI_MSIX_ENTRY_SIZE,
668                                 DMA_BIDIRECTIONAL, 0);
669
670         if (dma_mapping_error(rvu->dev, iova))
671                 return -ENOMEM;
672
673         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova);
674         rvu->msix_base_iova = iova;
675         rvu->msixtr_base_phy = phy_addr;
676
677         return 0;
678 }
679
680 static void rvu_reset_msix(struct rvu *rvu)
681 {
682         /* Restore msixtr base register */
683         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE,
684                     rvu->msixtr_base_phy);
685 }
686
687 static void rvu_free_hw_resources(struct rvu *rvu)
688 {
689         struct rvu_hwinfo *hw = rvu->hw;
690         struct rvu_block *block;
691         struct rvu_pfvf  *pfvf;
692         int id, max_msix;
693         u64 cfg;
694
695         rvu_npa_freemem(rvu);
696         rvu_npc_freemem(rvu);
697         rvu_nix_freemem(rvu);
698
699         /* Free block LF bitmaps */
700         for (id = 0; id < BLK_COUNT; id++) {
701                 block = &hw->block[id];
702                 kfree(block->lf.bmap);
703         }
704
705         /* Free MSIX bitmaps */
706         for (id = 0; id < hw->total_pfs; id++) {
707                 pfvf = &rvu->pf[id];
708                 kfree(pfvf->msix.bmap);
709         }
710
711         for (id = 0; id < hw->total_vfs; id++) {
712                 pfvf = &rvu->hwvf[id];
713                 kfree(pfvf->msix.bmap);
714         }
715
716         /* Unmap MSIX vector base IOVA mapping */
717         if (!rvu->msix_base_iova)
718                 return;
719         cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
720         max_msix = cfg & 0xFFFFF;
721         dma_unmap_resource(rvu->dev, rvu->msix_base_iova,
722                            max_msix * PCI_MSIX_ENTRY_SIZE,
723                            DMA_BIDIRECTIONAL, 0);
724
725         rvu_reset_msix(rvu);
726         mutex_destroy(&rvu->rsrc_lock);
727 }
728
729 static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
730 {
731         struct rvu_hwinfo *hw = rvu->hw;
732         int pf, vf, numvfs, hwvf;
733         struct rvu_pfvf *pfvf;
734         u64 *mac;
735
736         for (pf = 0; pf < hw->total_pfs; pf++) {
737                 /* For PF0(AF), Assign MAC address to only VFs (LBKVFs) */
738                 if (!pf)
739                         goto lbkvf;
740
741                 if (!is_pf_cgxmapped(rvu, pf))
742                         continue;
743                 /* Assign MAC address to PF */
744                 pfvf = &rvu->pf[pf];
745                 if (rvu->fwdata && pf < PF_MACNUM_MAX) {
746                         mac = &rvu->fwdata->pf_macs[pf];
747                         if (*mac)
748                                 u64_to_ether_addr(*mac, pfvf->mac_addr);
749                         else
750                                 eth_random_addr(pfvf->mac_addr);
751                 } else {
752                         eth_random_addr(pfvf->mac_addr);
753                 }
754                 ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
755
756 lbkvf:
757                 /* Assign MAC address to VFs*/
758                 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
759                 for (vf = 0; vf < numvfs; vf++, hwvf++) {
760                         pfvf = &rvu->hwvf[hwvf];
761                         if (rvu->fwdata && hwvf < VF_MACNUM_MAX) {
762                                 mac = &rvu->fwdata->vf_macs[hwvf];
763                                 if (*mac)
764                                         u64_to_ether_addr(*mac, pfvf->mac_addr);
765                                 else
766                                         eth_random_addr(pfvf->mac_addr);
767                         } else {
768                                 eth_random_addr(pfvf->mac_addr);
769                         }
770                         ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
771                 }
772         }
773 }
774
775 static int rvu_fwdata_init(struct rvu *rvu)
776 {
777         u64 fwdbase;
778         int err;
779
780         /* Get firmware data base address */
781         err = cgx_get_fwdata_base(&fwdbase);
782         if (err)
783                 goto fail;
784         rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata));
785         if (!rvu->fwdata)
786                 goto fail;
787         if (!is_rvu_fwdata_valid(rvu)) {
788                 dev_err(rvu->dev,
789                         "Mismatch in 'fwdata' struct btw kernel and firmware\n");
790                 iounmap(rvu->fwdata);
791                 rvu->fwdata = NULL;
792                 return -EINVAL;
793         }
794         return 0;
795 fail:
796         dev_info(rvu->dev, "Unable to fetch 'fwdata' from firmware\n");
797         return -EIO;
798 }
799
800 static void rvu_fwdata_exit(struct rvu *rvu)
801 {
802         if (rvu->fwdata)
803                 iounmap(rvu->fwdata);
804 }
805
806 static int rvu_setup_nix_hw_resource(struct rvu *rvu, int blkaddr)
807 {
808         struct rvu_hwinfo *hw = rvu->hw;
809         struct rvu_block *block;
810         int blkid;
811         u64 cfg;
812
813         /* Init NIX LF's bitmap */
814         block = &hw->block[blkaddr];
815         if (!block->implemented)
816                 return 0;
817         blkid = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
818         cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
819         block->lf.max = cfg & 0xFFF;
820         block->addr = blkaddr;
821         block->type = BLKTYPE_NIX;
822         block->lfshift = 8;
823         block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG;
824         block->pf_lfcnt_reg = RVU_PRIV_PFX_NIXX_CFG(blkid);
825         block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIXX_CFG(blkid);
826         block->lfcfg_reg = NIX_PRIV_LFX_CFG;
827         block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
828         block->lfreset_reg = NIX_AF_LF_RST;
829         sprintf(block->name, "NIX%d", blkid);
830         rvu->nix_blkaddr[blkid] = blkaddr;
831         return rvu_alloc_bitmap(&block->lf);
832 }
833
834 static int rvu_setup_cpt_hw_resource(struct rvu *rvu, int blkaddr)
835 {
836         struct rvu_hwinfo *hw = rvu->hw;
837         struct rvu_block *block;
838         int blkid;
839         u64 cfg;
840
841         /* Init CPT LF's bitmap */
842         block = &hw->block[blkaddr];
843         if (!block->implemented)
844                 return 0;
845         blkid = (blkaddr == BLKADDR_CPT0) ? 0 : 1;
846         cfg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0);
847         block->lf.max = cfg & 0xFF;
848         block->addr = blkaddr;
849         block->type = BLKTYPE_CPT;
850         block->multislot = true;
851         block->lfshift = 3;
852         block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG;
853         block->pf_lfcnt_reg = RVU_PRIV_PFX_CPTX_CFG(blkid);
854         block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPTX_CFG(blkid);
855         block->lfcfg_reg = CPT_PRIV_LFX_CFG;
856         block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
857         block->lfreset_reg = CPT_AF_LF_RST;
858         sprintf(block->name, "CPT%d", blkid);
859         return rvu_alloc_bitmap(&block->lf);
860 }
861
862 static void rvu_get_lbk_bufsize(struct rvu *rvu)
863 {
864         struct pci_dev *pdev = NULL;
865         void __iomem *base;
866         u64 lbk_const;
867
868         pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
869                               PCI_DEVID_OCTEONTX2_LBK, pdev);
870         if (!pdev)
871                 return;
872
873         base = pci_ioremap_bar(pdev, 0);
874         if (!base)
875                 goto err_put;
876
877         lbk_const = readq(base + LBK_CONST);
878
879         /* cache fifo size */
880         rvu->hw->lbk_bufsize = FIELD_GET(LBK_CONST_BUF_SIZE, lbk_const);
881
882         iounmap(base);
883 err_put:
884         pci_dev_put(pdev);
885 }
886
887 static int rvu_setup_hw_resources(struct rvu *rvu)
888 {
889         struct rvu_hwinfo *hw = rvu->hw;
890         struct rvu_block *block;
891         int blkid, err;
892         u64 cfg;
893
894         /* Get HW supported max RVU PF & VF count */
895         cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
896         hw->total_pfs = (cfg >> 32) & 0xFF;
897         hw->total_vfs = (cfg >> 20) & 0xFFF;
898         hw->max_vfs_per_pf = (cfg >> 40) & 0xFF;
899
900         /* Init NPA LF's bitmap */
901         block = &hw->block[BLKADDR_NPA];
902         if (!block->implemented)
903                 goto nix;
904         cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST);
905         block->lf.max = (cfg >> 16) & 0xFFF;
906         block->addr = BLKADDR_NPA;
907         block->type = BLKTYPE_NPA;
908         block->lfshift = 8;
909         block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG;
910         block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG;
911         block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG;
912         block->lfcfg_reg = NPA_PRIV_LFX_CFG;
913         block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG;
914         block->lfreset_reg = NPA_AF_LF_RST;
915         sprintf(block->name, "NPA");
916         err = rvu_alloc_bitmap(&block->lf);
917         if (err)
918                 return err;
919
920 nix:
921         err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX0);
922         if (err)
923                 return err;
924         err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX1);
925         if (err)
926                 return err;
927
928         /* Init SSO group's bitmap */
929         block = &hw->block[BLKADDR_SSO];
930         if (!block->implemented)
931                 goto ssow;
932         cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST);
933         block->lf.max = cfg & 0xFFFF;
934         block->addr = BLKADDR_SSO;
935         block->type = BLKTYPE_SSO;
936         block->multislot = true;
937         block->lfshift = 3;
938         block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG;
939         block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG;
940         block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG;
941         block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG;
942         block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG;
943         block->lfreset_reg = SSO_AF_LF_HWGRP_RST;
944         sprintf(block->name, "SSO GROUP");
945         err = rvu_alloc_bitmap(&block->lf);
946         if (err)
947                 return err;
948
949 ssow:
950         /* Init SSO workslot's bitmap */
951         block = &hw->block[BLKADDR_SSOW];
952         if (!block->implemented)
953                 goto tim;
954         block->lf.max = (cfg >> 56) & 0xFF;
955         block->addr = BLKADDR_SSOW;
956         block->type = BLKTYPE_SSOW;
957         block->multislot = true;
958         block->lfshift = 3;
959         block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG;
960         block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG;
961         block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG;
962         block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG;
963         block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG;
964         block->lfreset_reg = SSOW_AF_LF_HWS_RST;
965         sprintf(block->name, "SSOWS");
966         err = rvu_alloc_bitmap(&block->lf);
967         if (err)
968                 return err;
969
970 tim:
971         /* Init TIM LF's bitmap */
972         block = &hw->block[BLKADDR_TIM];
973         if (!block->implemented)
974                 goto cpt;
975         cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST);
976         block->lf.max = cfg & 0xFFFF;
977         block->addr = BLKADDR_TIM;
978         block->type = BLKTYPE_TIM;
979         block->multislot = true;
980         block->lfshift = 3;
981         block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG;
982         block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG;
983         block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG;
984         block->lfcfg_reg = TIM_PRIV_LFX_CFG;
985         block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG;
986         block->lfreset_reg = TIM_AF_LF_RST;
987         sprintf(block->name, "TIM");
988         err = rvu_alloc_bitmap(&block->lf);
989         if (err)
990                 return err;
991
992 cpt:
993         err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT0);
994         if (err)
995                 return err;
996         err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT1);
997         if (err)
998                 return err;
999
1000         /* Allocate memory for PFVF data */
1001         rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs,
1002                                sizeof(struct rvu_pfvf), GFP_KERNEL);
1003         if (!rvu->pf)
1004                 return -ENOMEM;
1005
1006         rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs,
1007                                  sizeof(struct rvu_pfvf), GFP_KERNEL);
1008         if (!rvu->hwvf)
1009                 return -ENOMEM;
1010
1011         mutex_init(&rvu->rsrc_lock);
1012
1013         rvu_fwdata_init(rvu);
1014
1015         err = rvu_setup_msix_resources(rvu);
1016         if (err)
1017                 return err;
1018
1019         for (blkid = 0; blkid < BLK_COUNT; blkid++) {
1020                 block = &hw->block[blkid];
1021                 if (!block->lf.bmap)
1022                         continue;
1023
1024                 /* Allocate memory for block LF/slot to pcifunc mapping info */
1025                 block->fn_map = devm_kcalloc(rvu->dev, block->lf.max,
1026                                              sizeof(u16), GFP_KERNEL);
1027                 if (!block->fn_map) {
1028                         err = -ENOMEM;
1029                         goto msix_err;
1030                 }
1031
1032                 /* Scan all blocks to check if low level firmware has
1033                  * already provisioned any of the resources to a PF/VF.
1034                  */
1035                 rvu_scan_block(rvu, block);
1036         }
1037
1038         err = rvu_set_channels_base(rvu);
1039         if (err)
1040                 goto msix_err;
1041
1042         err = rvu_npc_init(rvu);
1043         if (err)
1044                 goto npc_err;
1045
1046         err = rvu_cgx_init(rvu);
1047         if (err)
1048                 goto cgx_err;
1049
1050         /* Assign MACs for CGX mapped functions */
1051         rvu_setup_pfvf_macaddress(rvu);
1052
1053         err = rvu_npa_init(rvu);
1054         if (err)
1055                 goto npa_err;
1056
1057         rvu_get_lbk_bufsize(rvu);
1058
1059         err = rvu_nix_init(rvu);
1060         if (err)
1061                 goto nix_err;
1062
1063         rvu_program_channels(rvu);
1064
1065         return 0;
1066
1067 nix_err:
1068         rvu_nix_freemem(rvu);
1069 npa_err:
1070         rvu_npa_freemem(rvu);
1071 cgx_err:
1072         rvu_cgx_exit(rvu);
1073 npc_err:
1074         rvu_npc_freemem(rvu);
1075         rvu_fwdata_exit(rvu);
1076 msix_err:
1077         rvu_reset_msix(rvu);
1078         return err;
1079 }
1080
1081 /* NPA and NIX admin queue APIs */
1082 void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq)
1083 {
1084         if (!aq)
1085                 return;
1086
1087         qmem_free(rvu->dev, aq->inst);
1088         qmem_free(rvu->dev, aq->res);
1089         devm_kfree(rvu->dev, aq);
1090 }
1091
1092 int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
1093                  int qsize, int inst_size, int res_size)
1094 {
1095         struct admin_queue *aq;
1096         int err;
1097
1098         *ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL);
1099         if (!*ad_queue)
1100                 return -ENOMEM;
1101         aq = *ad_queue;
1102
1103         /* Alloc memory for instructions i.e AQ */
1104         err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size);
1105         if (err) {
1106                 devm_kfree(rvu->dev, aq);
1107                 return err;
1108         }
1109
1110         /* Alloc memory for results */
1111         err = qmem_alloc(rvu->dev, &aq->res, qsize, res_size);
1112         if (err) {
1113                 rvu_aq_free(rvu, aq);
1114                 return err;
1115         }
1116
1117         spin_lock_init(&aq->lock);
1118         return 0;
1119 }
1120
1121 int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
1122                            struct ready_msg_rsp *rsp)
1123 {
1124         if (rvu->fwdata) {
1125                 rsp->rclk_freq = rvu->fwdata->rclk;
1126                 rsp->sclk_freq = rvu->fwdata->sclk;
1127         }
1128         return 0;
1129 }
1130
1131 /* Get current count of a RVU block's LF/slots
1132  * provisioned to a given RVU func.
1133  */
1134 u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr)
1135 {
1136         switch (blkaddr) {
1137         case BLKADDR_NPA:
1138                 return pfvf->npalf ? 1 : 0;
1139         case BLKADDR_NIX0:
1140         case BLKADDR_NIX1:
1141                 return pfvf->nixlf ? 1 : 0;
1142         case BLKADDR_SSO:
1143                 return pfvf->sso;
1144         case BLKADDR_SSOW:
1145                 return pfvf->ssow;
1146         case BLKADDR_TIM:
1147                 return pfvf->timlfs;
1148         case BLKADDR_CPT0:
1149                 return pfvf->cptlfs;
1150         case BLKADDR_CPT1:
1151                 return pfvf->cpt1_lfs;
1152         }
1153         return 0;
1154 }
1155
1156 /* Return true if LFs of block type are attached to pcifunc */
1157 static bool is_blktype_attached(struct rvu_pfvf *pfvf, int blktype)
1158 {
1159         switch (blktype) {
1160         case BLKTYPE_NPA:
1161                 return pfvf->npalf ? 1 : 0;
1162         case BLKTYPE_NIX:
1163                 return pfvf->nixlf ? 1 : 0;
1164         case BLKTYPE_SSO:
1165                 return !!pfvf->sso;
1166         case BLKTYPE_SSOW:
1167                 return !!pfvf->ssow;
1168         case BLKTYPE_TIM:
1169                 return !!pfvf->timlfs;
1170         case BLKTYPE_CPT:
1171                 return pfvf->cptlfs || pfvf->cpt1_lfs;
1172         }
1173
1174         return false;
1175 }
1176
1177 bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
1178 {
1179         struct rvu_pfvf *pfvf;
1180
1181         if (!is_pf_func_valid(rvu, pcifunc))
1182                 return false;
1183
1184         pfvf = rvu_get_pfvf(rvu, pcifunc);
1185
1186         /* Check if this PFFUNC has a LF of type blktype attached */
1187         if (!is_blktype_attached(pfvf, blktype))
1188                 return false;
1189
1190         return true;
1191 }
1192
1193 static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
1194                            int pcifunc, int slot)
1195 {
1196         u64 val;
1197
1198         val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13);
1199         rvu_write64(rvu, block->addr, block->lookup_reg, val);
1200         /* Wait for the lookup to finish */
1201         /* TODO: put some timeout here */
1202         while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13))
1203                 ;
1204
1205         val = rvu_read64(rvu, block->addr, block->lookup_reg);
1206
1207         /* Check LF valid bit */
1208         if (!(val & (1ULL << 12)))
1209                 return -1;
1210
1211         return (val & 0xFFF);
1212 }
1213
1214 static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
1215 {
1216         struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1217         struct rvu_hwinfo *hw = rvu->hw;
1218         struct rvu_block *block;
1219         int slot, lf, num_lfs;
1220         int blkaddr;
1221
1222         blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc);
1223         if (blkaddr < 0)
1224                 return;
1225
1226         if (blktype == BLKTYPE_NIX)
1227                 rvu_nix_reset_mac(pfvf, pcifunc);
1228
1229         block = &hw->block[blkaddr];
1230
1231         num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1232         if (!num_lfs)
1233                 return;
1234
1235         for (slot = 0; slot < num_lfs; slot++) {
1236                 lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot);
1237                 if (lf < 0) /* This should never happen */
1238                         continue;
1239
1240                 /* Disable the LF */
1241                 rvu_write64(rvu, blkaddr, block->lfcfg_reg |
1242                             (lf << block->lfshift), 0x00ULL);
1243
1244                 /* Update SW maintained mapping info as well */
1245                 rvu_update_rsrc_map(rvu, pfvf, block,
1246                                     pcifunc, lf, false);
1247
1248                 /* Free the resource */
1249                 rvu_free_rsrc(&block->lf, lf);
1250
1251                 /* Clear MSIX vector offset for this LF */
1252                 rvu_clear_msix_offset(rvu, pfvf, block, lf);
1253         }
1254 }
1255
1256 static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
1257                             u16 pcifunc)
1258 {
1259         struct rvu_hwinfo *hw = rvu->hw;
1260         bool detach_all = true;
1261         struct rvu_block *block;
1262         int blkid;
1263
1264         mutex_lock(&rvu->rsrc_lock);
1265
1266         /* Check for partial resource detach */
1267         if (detach && detach->partial)
1268                 detach_all = false;
1269
1270         /* Check for RVU block's LFs attached to this func,
1271          * if so, detach them.
1272          */
1273         for (blkid = 0; blkid < BLK_COUNT; blkid++) {
1274                 block = &hw->block[blkid];
1275                 if (!block->lf.bmap)
1276                         continue;
1277                 if (!detach_all && detach) {
1278                         if (blkid == BLKADDR_NPA && !detach->npalf)
1279                                 continue;
1280                         else if ((blkid == BLKADDR_NIX0) && !detach->nixlf)
1281                                 continue;
1282                         else if ((blkid == BLKADDR_NIX1) && !detach->nixlf)
1283                                 continue;
1284                         else if ((blkid == BLKADDR_SSO) && !detach->sso)
1285                                 continue;
1286                         else if ((blkid == BLKADDR_SSOW) && !detach->ssow)
1287                                 continue;
1288                         else if ((blkid == BLKADDR_TIM) && !detach->timlfs)
1289                                 continue;
1290                         else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs)
1291                                 continue;
1292                         else if ((blkid == BLKADDR_CPT1) && !detach->cptlfs)
1293                                 continue;
1294                 }
1295                 rvu_detach_block(rvu, pcifunc, block->type);
1296         }
1297
1298         mutex_unlock(&rvu->rsrc_lock);
1299         return 0;
1300 }
1301
1302 int rvu_mbox_handler_detach_resources(struct rvu *rvu,
1303                                       struct rsrc_detach *detach,
1304                                       struct msg_rsp *rsp)
1305 {
1306         return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
1307 }
1308
1309 static int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
1310 {
1311         struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1312         int blkaddr = BLKADDR_NIX0, vf;
1313         struct rvu_pfvf *pf;
1314
1315         /* All CGX mapped PFs are set with assigned NIX block during init */
1316         if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
1317                 pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
1318                 blkaddr = pf->nix_blkaddr;
1319         } else if (is_afvf(pcifunc)) {
1320                 vf = pcifunc - 1;
1321                 /* Assign NIX based on VF number. All even numbered VFs get
1322                  * NIX0 and odd numbered gets NIX1
1323                  */
1324                 blkaddr = (vf & 1) ? BLKADDR_NIX1 : BLKADDR_NIX0;
1325                 /* NIX1 is not present on all silicons */
1326                 if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
1327                         blkaddr = BLKADDR_NIX0;
1328         }
1329
1330         switch (blkaddr) {
1331         case BLKADDR_NIX1:
1332                 pfvf->nix_blkaddr = BLKADDR_NIX1;
1333                 pfvf->nix_rx_intf = NIX_INTFX_RX(1);
1334                 pfvf->nix_tx_intf = NIX_INTFX_TX(1);
1335                 break;
1336         case BLKADDR_NIX0:
1337         default:
1338                 pfvf->nix_blkaddr = BLKADDR_NIX0;
1339                 pfvf->nix_rx_intf = NIX_INTFX_RX(0);
1340                 pfvf->nix_tx_intf = NIX_INTFX_TX(0);
1341                 break;
1342         }
1343
1344         return pfvf->nix_blkaddr;
1345 }
1346
1347 static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype,
1348                                   u16 pcifunc, struct rsrc_attach *attach)
1349 {
1350         int blkaddr;
1351
1352         switch (blktype) {
1353         case BLKTYPE_NIX:
1354                 blkaddr = rvu_get_nix_blkaddr(rvu, pcifunc);
1355                 break;
1356         case BLKTYPE_CPT:
1357                 if (attach->hdr.ver < RVU_MULTI_BLK_VER)
1358                         return rvu_get_blkaddr(rvu, blktype, 0);
1359                 blkaddr = attach->cpt_blkaddr ? attach->cpt_blkaddr :
1360                           BLKADDR_CPT0;
1361                 if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
1362                         return -ENODEV;
1363                 break;
1364         default:
1365                 return rvu_get_blkaddr(rvu, blktype, 0);
1366         }
1367
1368         if (is_block_implemented(rvu->hw, blkaddr))
1369                 return blkaddr;
1370
1371         return -ENODEV;
1372 }
1373
1374 static void rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype,
1375                              int num_lfs, struct rsrc_attach *attach)
1376 {
1377         struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1378         struct rvu_hwinfo *hw = rvu->hw;
1379         struct rvu_block *block;
1380         int slot, lf;
1381         int blkaddr;
1382         u64 cfg;
1383
1384         if (!num_lfs)
1385                 return;
1386
1387         blkaddr = rvu_get_attach_blkaddr(rvu, blktype, pcifunc, attach);
1388         if (blkaddr < 0)
1389                 return;
1390
1391         block = &hw->block[blkaddr];
1392         if (!block->lf.bmap)
1393                 return;
1394
1395         for (slot = 0; slot < num_lfs; slot++) {
1396                 /* Allocate the resource */
1397                 lf = rvu_alloc_rsrc(&block->lf);
1398                 if (lf < 0)
1399                         return;
1400
1401                 cfg = (1ULL << 63) | (pcifunc << 8) | slot;
1402                 rvu_write64(rvu, blkaddr, block->lfcfg_reg |
1403                             (lf << block->lfshift), cfg);
1404                 rvu_update_rsrc_map(rvu, pfvf, block,
1405                                     pcifunc, lf, true);
1406
1407                 /* Set start MSIX vector for this LF within this PF/VF */
1408                 rvu_set_msix_offset(rvu, pfvf, block, lf);
1409         }
1410 }
1411
1412 static int rvu_check_rsrc_availability(struct rvu *rvu,
1413                                        struct rsrc_attach *req, u16 pcifunc)
1414 {
1415         struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1416         int free_lfs, mappedlfs, blkaddr;
1417         struct rvu_hwinfo *hw = rvu->hw;
1418         struct rvu_block *block;
1419
1420         /* Only one NPA LF can be attached */
1421         if (req->npalf && !is_blktype_attached(pfvf, BLKTYPE_NPA)) {
1422                 block = &hw->block[BLKADDR_NPA];
1423                 free_lfs = rvu_rsrc_free_count(&block->lf);
1424                 if (!free_lfs)
1425                         goto fail;
1426         } else if (req->npalf) {
1427                 dev_err(&rvu->pdev->dev,
1428                         "Func 0x%x: Invalid req, already has NPA\n",
1429                          pcifunc);
1430                 return -EINVAL;
1431         }
1432
1433         /* Only one NIX LF can be attached */
1434         if (req->nixlf && !is_blktype_attached(pfvf, BLKTYPE_NIX)) {
1435                 blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_NIX,
1436                                                  pcifunc, req);
1437                 if (blkaddr < 0)
1438                         return blkaddr;
1439                 block = &hw->block[blkaddr];
1440                 free_lfs = rvu_rsrc_free_count(&block->lf);
1441                 if (!free_lfs)
1442                         goto fail;
1443         } else if (req->nixlf) {
1444                 dev_err(&rvu->pdev->dev,
1445                         "Func 0x%x: Invalid req, already has NIX\n",
1446                         pcifunc);
1447                 return -EINVAL;
1448         }
1449
1450         if (req->sso) {
1451                 block = &hw->block[BLKADDR_SSO];
1452                 /* Is request within limits ? */
1453                 if (req->sso > block->lf.max) {
1454                         dev_err(&rvu->pdev->dev,
1455                                 "Func 0x%x: Invalid SSO req, %d > max %d\n",
1456                                  pcifunc, req->sso, block->lf.max);
1457                         return -EINVAL;
1458                 }
1459                 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1460                 free_lfs = rvu_rsrc_free_count(&block->lf);
1461                 /* Check if additional resources are available */
1462                 if (req->sso > mappedlfs &&
1463                     ((req->sso - mappedlfs) > free_lfs))
1464                         goto fail;
1465         }
1466
1467         if (req->ssow) {
1468                 block = &hw->block[BLKADDR_SSOW];
1469                 if (req->ssow > block->lf.max) {
1470                         dev_err(&rvu->pdev->dev,
1471                                 "Func 0x%x: Invalid SSOW req, %d > max %d\n",
1472                                  pcifunc, req->sso, block->lf.max);
1473                         return -EINVAL;
1474                 }
1475                 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1476                 free_lfs = rvu_rsrc_free_count(&block->lf);
1477                 if (req->ssow > mappedlfs &&
1478                     ((req->ssow - mappedlfs) > free_lfs))
1479                         goto fail;
1480         }
1481
1482         if (req->timlfs) {
1483                 block = &hw->block[BLKADDR_TIM];
1484                 if (req->timlfs > block->lf.max) {
1485                         dev_err(&rvu->pdev->dev,
1486                                 "Func 0x%x: Invalid TIMLF req, %d > max %d\n",
1487                                  pcifunc, req->timlfs, block->lf.max);
1488                         return -EINVAL;
1489                 }
1490                 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1491                 free_lfs = rvu_rsrc_free_count(&block->lf);
1492                 if (req->timlfs > mappedlfs &&
1493                     ((req->timlfs - mappedlfs) > free_lfs))
1494                         goto fail;
1495         }
1496
1497         if (req->cptlfs) {
1498                 blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_CPT,
1499                                                  pcifunc, req);
1500                 if (blkaddr < 0)
1501                         return blkaddr;
1502                 block = &hw->block[blkaddr];
1503                 if (req->cptlfs > block->lf.max) {
1504                         dev_err(&rvu->pdev->dev,
1505                                 "Func 0x%x: Invalid CPTLF req, %d > max %d\n",
1506                                  pcifunc, req->cptlfs, block->lf.max);
1507                         return -EINVAL;
1508                 }
1509                 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1510                 free_lfs = rvu_rsrc_free_count(&block->lf);
1511                 if (req->cptlfs > mappedlfs &&
1512                     ((req->cptlfs - mappedlfs) > free_lfs))
1513                         goto fail;
1514         }
1515
1516         return 0;
1517
1518 fail:
1519         dev_info(rvu->dev, "Request for %s failed\n", block->name);
1520         return -ENOSPC;
1521 }
1522
1523 static bool rvu_attach_from_same_block(struct rvu *rvu, int blktype,
1524                                        struct rsrc_attach *attach)
1525 {
1526         int blkaddr, num_lfs;
1527
1528         blkaddr = rvu_get_attach_blkaddr(rvu, blktype,
1529                                          attach->hdr.pcifunc, attach);
1530         if (blkaddr < 0)
1531                 return false;
1532
1533         num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, attach->hdr.pcifunc),
1534                                         blkaddr);
1535         /* Requester already has LFs from given block ? */
1536         return !!num_lfs;
1537 }
1538
1539 int rvu_mbox_handler_attach_resources(struct rvu *rvu,
1540                                       struct rsrc_attach *attach,
1541                                       struct msg_rsp *rsp)
1542 {
1543         u16 pcifunc = attach->hdr.pcifunc;
1544         int err;
1545
1546         /* If first request, detach all existing attached resources */
1547         if (!attach->modify)
1548                 rvu_detach_rsrcs(rvu, NULL, pcifunc);
1549
1550         mutex_lock(&rvu->rsrc_lock);
1551
1552         /* Check if the request can be accommodated */
1553         err = rvu_check_rsrc_availability(rvu, attach, pcifunc);
1554         if (err)
1555                 goto exit;
1556
1557         /* Now attach the requested resources */
1558         if (attach->npalf)
1559                 rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1, attach);
1560
1561         if (attach->nixlf)
1562                 rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1, attach);
1563
1564         if (attach->sso) {
1565                 /* RVU func doesn't know which exact LF or slot is attached
1566                  * to it, it always sees as slot 0,1,2. So for a 'modify'
1567                  * request, simply detach all existing attached LFs/slots
1568                  * and attach a fresh.
1569                  */
1570                 if (attach->modify)
1571                         rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
1572                 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO,
1573                                  attach->sso, attach);
1574         }
1575
1576         if (attach->ssow) {
1577                 if (attach->modify)
1578                         rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
1579                 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW,
1580                                  attach->ssow, attach);
1581         }
1582
1583         if (attach->timlfs) {
1584                 if (attach->modify)
1585                         rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
1586                 rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM,
1587                                  attach->timlfs, attach);
1588         }
1589
1590         if (attach->cptlfs) {
1591                 if (attach->modify &&
1592                     rvu_attach_from_same_block(rvu, BLKTYPE_CPT, attach))
1593                         rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT);
1594                 rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT,
1595                                  attach->cptlfs, attach);
1596         }
1597
1598 exit:
1599         mutex_unlock(&rvu->rsrc_lock);
1600         return err;
1601 }
1602
1603 static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1604                                int blkaddr, int lf)
1605 {
1606         u16 vec;
1607
1608         if (lf < 0)
1609                 return MSIX_VECTOR_INVALID;
1610
1611         for (vec = 0; vec < pfvf->msix.max; vec++) {
1612                 if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf))
1613                         return vec;
1614         }
1615         return MSIX_VECTOR_INVALID;
1616 }
1617
1618 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1619                                 struct rvu_block *block, int lf)
1620 {
1621         u16 nvecs, vec, offset;
1622         u64 cfg;
1623
1624         cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1625                          (lf << block->lfshift));
1626         nvecs = (cfg >> 12) & 0xFF;
1627
1628         /* Check and alloc MSIX vectors, must be contiguous */
1629         if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs))
1630                 return;
1631
1632         offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
1633
1634         /* Config MSIX offset in LF */
1635         rvu_write64(rvu, block->addr, block->msixcfg_reg |
1636                     (lf << block->lfshift), (cfg & ~0x7FFULL) | offset);
1637
1638         /* Update the bitmap as well */
1639         for (vec = 0; vec < nvecs; vec++)
1640                 pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf);
1641 }
1642
1643 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1644                                   struct rvu_block *block, int lf)
1645 {
1646         u16 nvecs, vec, offset;
1647         u64 cfg;
1648
1649         cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1650                          (lf << block->lfshift));
1651         nvecs = (cfg >> 12) & 0xFF;
1652
1653         /* Clear MSIX offset in LF */
1654         rvu_write64(rvu, block->addr, block->msixcfg_reg |
1655                     (lf << block->lfshift), cfg & ~0x7FFULL);
1656
1657         offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf);
1658
1659         /* Update the mapping */
1660         for (vec = 0; vec < nvecs; vec++)
1661                 pfvf->msix_lfmap[offset + vec] = 0;
1662
1663         /* Free the same in MSIX bitmap */
1664         rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
1665 }
1666
1667 int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
1668                                  struct msix_offset_rsp *rsp)
1669 {
1670         struct rvu_hwinfo *hw = rvu->hw;
1671         u16 pcifunc = req->hdr.pcifunc;
1672         struct rvu_pfvf *pfvf;
1673         int lf, slot, blkaddr;
1674
1675         pfvf = rvu_get_pfvf(rvu, pcifunc);
1676         if (!pfvf->msix.bmap)
1677                 return 0;
1678
1679         /* Set MSIX offsets for each block's LFs attached to this PF/VF */
1680         lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0);
1681         rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf);
1682
1683         /* Get BLKADDR from which LFs are attached to pcifunc */
1684         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1685         if (blkaddr < 0) {
1686                 rsp->nix_msixoff = MSIX_VECTOR_INVALID;
1687         } else {
1688                 lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1689                 rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, blkaddr, lf);
1690         }
1691
1692         rsp->sso = pfvf->sso;
1693         for (slot = 0; slot < rsp->sso; slot++) {
1694                 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot);
1695                 rsp->sso_msixoff[slot] =
1696                         rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf);
1697         }
1698
1699         rsp->ssow = pfvf->ssow;
1700         for (slot = 0; slot < rsp->ssow; slot++) {
1701                 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot);
1702                 rsp->ssow_msixoff[slot] =
1703                         rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf);
1704         }
1705
1706         rsp->timlfs = pfvf->timlfs;
1707         for (slot = 0; slot < rsp->timlfs; slot++) {
1708                 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot);
1709                 rsp->timlf_msixoff[slot] =
1710                         rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf);
1711         }
1712
1713         rsp->cptlfs = pfvf->cptlfs;
1714         for (slot = 0; slot < rsp->cptlfs; slot++) {
1715                 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot);
1716                 rsp->cptlf_msixoff[slot] =
1717                         rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf);
1718         }
1719
1720         rsp->cpt1_lfs = pfvf->cpt1_lfs;
1721         for (slot = 0; slot < rsp->cpt1_lfs; slot++) {
1722                 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT1], pcifunc, slot);
1723                 rsp->cpt1_lf_msixoff[slot] =
1724                         rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT1, lf);
1725         }
1726
1727         return 0;
1728 }
1729
1730 int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
1731                             struct msg_rsp *rsp)
1732 {
1733         u16 pcifunc = req->hdr.pcifunc;
1734         u16 vf, numvfs;
1735         u64 cfg;
1736
1737         vf = pcifunc & RVU_PFVF_FUNC_MASK;
1738         cfg = rvu_read64(rvu, BLKADDR_RVUM,
1739                          RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc)));
1740         numvfs = (cfg >> 12) & 0xFF;
1741
1742         if (vf && vf <= numvfs)
1743                 __rvu_flr_handler(rvu, pcifunc);
1744         else
1745                 return RVU_INVALID_VF_ID;
1746
1747         return 0;
1748 }
1749
1750 int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
1751                                 struct get_hw_cap_rsp *rsp)
1752 {
1753         struct rvu_hwinfo *hw = rvu->hw;
1754
1755         rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping;
1756         rsp->nix_shaping = hw->cap.nix_shaping;
1757
1758         return 0;
1759 }
1760
1761 int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req,
1762                                  struct msg_rsp *rsp)
1763 {
1764         struct rvu_hwinfo *hw = rvu->hw;
1765         u16 pcifunc = req->hdr.pcifunc;
1766         struct rvu_pfvf *pfvf;
1767         int blkaddr, nixlf;
1768         u16 target;
1769
1770         /* Only PF can add VF permissions */
1771         if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_afvf(pcifunc))
1772                 return -EOPNOTSUPP;
1773
1774         target = (pcifunc & ~RVU_PFVF_FUNC_MASK) | (req->vf + 1);
1775         pfvf = rvu_get_pfvf(rvu, target);
1776
1777         if (req->flags & RESET_VF_PERM) {
1778                 pfvf->flags &= RVU_CLEAR_VF_PERM;
1779         } else if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) ^
1780                  (req->flags & VF_TRUSTED)) {
1781                 change_bit(PF_SET_VF_TRUSTED, &pfvf->flags);
1782                 /* disable multicast and promisc entries */
1783                 if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags)) {
1784                         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, target);
1785                         if (blkaddr < 0)
1786                                 return 0;
1787                         nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
1788                                            target, 0);
1789                         if (nixlf < 0)
1790                                 return 0;
1791                         npc_enadis_default_mce_entry(rvu, target, nixlf,
1792                                                      NIXLF_ALLMULTI_ENTRY,
1793                                                      false);
1794                         npc_enadis_default_mce_entry(rvu, target, nixlf,
1795                                                      NIXLF_PROMISC_ENTRY,
1796                                                      false);
1797                 }
1798         }
1799
1800         return 0;
1801 }
1802
1803 static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
1804                                 struct mbox_msghdr *req)
1805 {
1806         struct rvu *rvu = pci_get_drvdata(mbox->pdev);
1807
1808         /* Check if valid, if not reply with a invalid msg */
1809         if (req->sig != OTX2_MBOX_REQ_SIG)
1810                 goto bad_message;
1811
1812         switch (req->id) {
1813 #define M(_name, _id, _fn_name, _req_type, _rsp_type)                   \
1814         case _id: {                                                     \
1815                 struct _rsp_type *rsp;                                  \
1816                 int err;                                                \
1817                                                                         \
1818                 rsp = (struct _rsp_type *)otx2_mbox_alloc_msg(          \
1819                         mbox, devid,                                    \
1820                         sizeof(struct _rsp_type));                      \
1821                 /* some handlers should complete even if reply */       \
1822                 /* could not be allocated */                            \
1823                 if (!rsp &&                                             \
1824                     _id != MBOX_MSG_DETACH_RESOURCES &&                 \
1825                     _id != MBOX_MSG_NIX_TXSCH_FREE &&                   \
1826                     _id != MBOX_MSG_VF_FLR)                             \
1827                         return -ENOMEM;                                 \
1828                 if (rsp) {                                              \
1829                         rsp->hdr.id = _id;                              \
1830                         rsp->hdr.sig = OTX2_MBOX_RSP_SIG;               \
1831                         rsp->hdr.pcifunc = req->pcifunc;                \
1832                         rsp->hdr.rc = 0;                                \
1833                 }                                                       \
1834                                                                         \
1835                 err = rvu_mbox_handler_ ## _fn_name(rvu,                \
1836                                                     (struct _req_type *)req, \
1837                                                     rsp);               \
1838                 if (rsp && err)                                         \
1839                         rsp->hdr.rc = err;                              \
1840                                                                         \
1841                 trace_otx2_msg_process(mbox->pdev, _id, err);           \
1842                 return rsp ? err : -ENOMEM;                             \
1843         }
1844 MBOX_MESSAGES
1845 #undef M
1846
1847 bad_message:
1848         default:
1849                 otx2_reply_invalid_msg(mbox, devid, req->pcifunc, req->id);
1850                 return -ENODEV;
1851         }
1852 }
1853
1854 static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
1855 {
1856         struct rvu *rvu = mwork->rvu;
1857         int offset, err, id, devid;
1858         struct otx2_mbox_dev *mdev;
1859         struct mbox_hdr *req_hdr;
1860         struct mbox_msghdr *msg;
1861         struct mbox_wq_info *mw;
1862         struct otx2_mbox *mbox;
1863
1864         switch (type) {
1865         case TYPE_AFPF:
1866                 mw = &rvu->afpf_wq_info;
1867                 break;
1868         case TYPE_AFVF:
1869                 mw = &rvu->afvf_wq_info;
1870                 break;
1871         default:
1872                 return;
1873         }
1874
1875         devid = mwork - mw->mbox_wrk;
1876         mbox = &mw->mbox;
1877         mdev = &mbox->dev[devid];
1878
1879         /* Process received mbox messages */
1880         req_hdr = mdev->mbase + mbox->rx_start;
1881         if (mw->mbox_wrk[devid].num_msgs == 0)
1882                 return;
1883
1884         offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
1885
1886         for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) {
1887                 msg = mdev->mbase + offset;
1888
1889                 /* Set which PF/VF sent this message based on mbox IRQ */
1890                 switch (type) {
1891                 case TYPE_AFPF:
1892                         msg->pcifunc &=
1893                                 ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
1894                         msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT);
1895                         break;
1896                 case TYPE_AFVF:
1897                         msg->pcifunc &=
1898                                 ~(RVU_PFVF_FUNC_MASK << RVU_PFVF_FUNC_SHIFT);
1899                         msg->pcifunc |= (devid << RVU_PFVF_FUNC_SHIFT) + 1;
1900                         break;
1901                 }
1902
1903                 err = rvu_process_mbox_msg(mbox, devid, msg);
1904                 if (!err) {
1905                         offset = mbox->rx_start + msg->next_msgoff;
1906                         continue;
1907                 }
1908
1909                 if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
1910                         dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
1911                                  err, otx2_mbox_id2name(msg->id),
1912                                  msg->id, rvu_get_pf(msg->pcifunc),
1913                                  (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
1914                 else
1915                         dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
1916                                  err, otx2_mbox_id2name(msg->id),
1917                                  msg->id, devid);
1918         }
1919         mw->mbox_wrk[devid].num_msgs = 0;
1920
1921         /* Send mbox responses to VF/PF */
1922         otx2_mbox_msg_send(mbox, devid);
1923 }
1924
1925 static inline void rvu_afpf_mbox_handler(struct work_struct *work)
1926 {
1927         struct rvu_work *mwork = container_of(work, struct rvu_work, work);
1928
1929         __rvu_mbox_handler(mwork, TYPE_AFPF);
1930 }
1931
1932 static inline void rvu_afvf_mbox_handler(struct work_struct *work)
1933 {
1934         struct rvu_work *mwork = container_of(work, struct rvu_work, work);
1935
1936         __rvu_mbox_handler(mwork, TYPE_AFVF);
1937 }
1938
1939 static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
1940 {
1941         struct rvu *rvu = mwork->rvu;
1942         struct otx2_mbox_dev *mdev;
1943         struct mbox_hdr *rsp_hdr;
1944         struct mbox_msghdr *msg;
1945         struct mbox_wq_info *mw;
1946         struct otx2_mbox *mbox;
1947         int offset, id, devid;
1948
1949         switch (type) {
1950         case TYPE_AFPF:
1951                 mw = &rvu->afpf_wq_info;
1952                 break;
1953         case TYPE_AFVF:
1954                 mw = &rvu->afvf_wq_info;
1955                 break;
1956         default:
1957                 return;
1958         }
1959
1960         devid = mwork - mw->mbox_wrk_up;
1961         mbox = &mw->mbox_up;
1962         mdev = &mbox->dev[devid];
1963
1964         rsp_hdr = mdev->mbase + mbox->rx_start;
1965         if (mw->mbox_wrk_up[devid].up_num_msgs == 0) {
1966                 dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n");
1967                 return;
1968         }
1969
1970         offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
1971
1972         for (id = 0; id < mw->mbox_wrk_up[devid].up_num_msgs; id++) {
1973                 msg = mdev->mbase + offset;
1974
1975                 if (msg->id >= MBOX_MSG_MAX) {
1976                         dev_err(rvu->dev,
1977                                 "Mbox msg with unknown ID 0x%x\n", msg->id);
1978                         goto end;
1979                 }
1980
1981                 if (msg->sig != OTX2_MBOX_RSP_SIG) {
1982                         dev_err(rvu->dev,
1983                                 "Mbox msg with wrong signature %x, ID 0x%x\n",
1984                                 msg->sig, msg->id);
1985                         goto end;
1986                 }
1987
1988                 switch (msg->id) {
1989                 case MBOX_MSG_CGX_LINK_EVENT:
1990                         break;
1991                 default:
1992                         if (msg->rc)
1993                                 dev_err(rvu->dev,
1994                                         "Mbox msg response has err %d, ID 0x%x\n",
1995                                         msg->rc, msg->id);
1996                         break;
1997                 }
1998 end:
1999                 offset = mbox->rx_start + msg->next_msgoff;
2000                 mdev->msgs_acked++;
2001         }
2002         mw->mbox_wrk_up[devid].up_num_msgs = 0;
2003
2004         otx2_mbox_reset(mbox, devid);
2005 }
2006
2007 static inline void rvu_afpf_mbox_up_handler(struct work_struct *work)
2008 {
2009         struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2010
2011         __rvu_mbox_up_handler(mwork, TYPE_AFPF);
2012 }
2013
2014 static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
2015 {
2016         struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2017
2018         __rvu_mbox_up_handler(mwork, TYPE_AFVF);
2019 }
2020
2021 static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
2022                                 int num, int type)
2023 {
2024         struct rvu_hwinfo *hw = rvu->hw;
2025         int region;
2026         u64 bar4;
2027
2028         /* For cn10k platform VF mailbox regions of a PF follows after the
2029          * PF <-> AF mailbox region. Whereas for Octeontx2 it is read from
2030          * RVU_PF_VF_BAR4_ADDR register.
2031          */
2032         if (type == TYPE_AFVF) {
2033                 for (region = 0; region < num; region++) {
2034                         if (hw->cap.per_pf_mbox_regs) {
2035                                 bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2036                                                   RVU_AF_PFX_BAR4_ADDR(0)) +
2037                                                   MBOX_SIZE;
2038                                 bar4 += region * MBOX_SIZE;
2039                         } else {
2040                                 bar4 = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
2041                                 bar4 += region * MBOX_SIZE;
2042                         }
2043                         mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
2044                         if (!mbox_addr[region])
2045                                 goto error;
2046                 }
2047                 return 0;
2048         }
2049
2050         /* For cn10k platform AF <-> PF mailbox region of a PF is read from per
2051          * PF registers. Whereas for Octeontx2 it is read from
2052          * RVU_AF_PF_BAR4_ADDR register.
2053          */
2054         for (region = 0; region < num; region++) {
2055                 if (hw->cap.per_pf_mbox_regs) {
2056                         bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2057                                           RVU_AF_PFX_BAR4_ADDR(region));
2058                 } else {
2059                         bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2060                                           RVU_AF_PF_BAR4_ADDR);
2061                         bar4 += region * MBOX_SIZE;
2062                 }
2063                 mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
2064                 if (!mbox_addr[region])
2065                         goto error;
2066         }
2067         return 0;
2068
2069 error:
2070         while (region--)
2071                 iounmap((void __iomem *)mbox_addr[region]);
2072         return -ENOMEM;
2073 }
2074
2075 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
2076                          int type, int num,
2077                          void (mbox_handler)(struct work_struct *),
2078                          void (mbox_up_handler)(struct work_struct *))
2079 {
2080         int err = -EINVAL, i, dir, dir_up;
2081         void __iomem *reg_base;
2082         struct rvu_work *mwork;
2083         void **mbox_regions;
2084         const char *name;
2085
2086         mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
2087         if (!mbox_regions)
2088                 return -ENOMEM;
2089
2090         switch (type) {
2091         case TYPE_AFPF:
2092                 name = "rvu_afpf_mailbox";
2093                 dir = MBOX_DIR_AFPF;
2094                 dir_up = MBOX_DIR_AFPF_UP;
2095                 reg_base = rvu->afreg_base;
2096                 err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF);
2097                 if (err)
2098                         goto free_regions;
2099                 break;
2100         case TYPE_AFVF:
2101                 name = "rvu_afvf_mailbox";
2102                 dir = MBOX_DIR_PFVF;
2103                 dir_up = MBOX_DIR_PFVF_UP;
2104                 reg_base = rvu->pfreg_base;
2105                 err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF);
2106                 if (err)
2107                         goto free_regions;
2108                 break;
2109         default:
2110                 return err;
2111         }
2112
2113         mw->mbox_wq = alloc_workqueue(name,
2114                                       WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
2115                                       num);
2116         if (!mw->mbox_wq) {
2117                 err = -ENOMEM;
2118                 goto unmap_regions;
2119         }
2120
2121         mw->mbox_wrk = devm_kcalloc(rvu->dev, num,
2122                                     sizeof(struct rvu_work), GFP_KERNEL);
2123         if (!mw->mbox_wrk) {
2124                 err = -ENOMEM;
2125                 goto exit;
2126         }
2127
2128         mw->mbox_wrk_up = devm_kcalloc(rvu->dev, num,
2129                                        sizeof(struct rvu_work), GFP_KERNEL);
2130         if (!mw->mbox_wrk_up) {
2131                 err = -ENOMEM;
2132                 goto exit;
2133         }
2134
2135         err = otx2_mbox_regions_init(&mw->mbox, mbox_regions, rvu->pdev,
2136                                      reg_base, dir, num);
2137         if (err)
2138                 goto exit;
2139
2140         err = otx2_mbox_regions_init(&mw->mbox_up, mbox_regions, rvu->pdev,
2141                                      reg_base, dir_up, num);
2142         if (err)
2143                 goto exit;
2144
2145         for (i = 0; i < num; i++) {
2146                 mwork = &mw->mbox_wrk[i];
2147                 mwork->rvu = rvu;
2148                 INIT_WORK(&mwork->work, mbox_handler);
2149
2150                 mwork = &mw->mbox_wrk_up[i];
2151                 mwork->rvu = rvu;
2152                 INIT_WORK(&mwork->work, mbox_up_handler);
2153         }
2154         kfree(mbox_regions);
2155         return 0;
2156
2157 exit:
2158         destroy_workqueue(mw->mbox_wq);
2159 unmap_regions:
2160         while (num--)
2161                 iounmap((void __iomem *)mbox_regions[num]);
2162 free_regions:
2163         kfree(mbox_regions);
2164         return err;
2165 }
2166
2167 static void rvu_mbox_destroy(struct mbox_wq_info *mw)
2168 {
2169         struct otx2_mbox *mbox = &mw->mbox;
2170         struct otx2_mbox_dev *mdev;
2171         int devid;
2172
2173         if (mw->mbox_wq) {
2174                 flush_workqueue(mw->mbox_wq);
2175                 destroy_workqueue(mw->mbox_wq);
2176                 mw->mbox_wq = NULL;
2177         }
2178
2179         for (devid = 0; devid < mbox->ndevs; devid++) {
2180                 mdev = &mbox->dev[devid];
2181                 if (mdev->hwbase)
2182                         iounmap((void __iomem *)mdev->hwbase);
2183         }
2184
2185         otx2_mbox_destroy(&mw->mbox);
2186         otx2_mbox_destroy(&mw->mbox_up);
2187 }
2188
2189 static void rvu_queue_work(struct mbox_wq_info *mw, int first,
2190                            int mdevs, u64 intr)
2191 {
2192         struct otx2_mbox_dev *mdev;
2193         struct otx2_mbox *mbox;
2194         struct mbox_hdr *hdr;
2195         int i;
2196
2197         for (i = first; i < mdevs; i++) {
2198                 /* start from 0 */
2199                 if (!(intr & BIT_ULL(i - first)))
2200                         continue;
2201
2202                 mbox = &mw->mbox;
2203                 mdev = &mbox->dev[i];
2204                 hdr = mdev->mbase + mbox->rx_start;
2205
2206                 /*The hdr->num_msgs is set to zero immediately in the interrupt
2207                  * handler to  ensure that it holds a correct value next time
2208                  * when the interrupt handler is called.
2209                  * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
2210                  * pf>mbox.up_num_msgs holds the data for use in
2211                  * pfaf_mbox_up_handler.
2212                  */
2213
2214                 if (hdr->num_msgs) {
2215                         mw->mbox_wrk[i].num_msgs = hdr->num_msgs;
2216                         hdr->num_msgs = 0;
2217                         queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
2218                 }
2219                 mbox = &mw->mbox_up;
2220                 mdev = &mbox->dev[i];
2221                 hdr = mdev->mbase + mbox->rx_start;
2222                 if (hdr->num_msgs) {
2223                         mw->mbox_wrk_up[i].up_num_msgs = hdr->num_msgs;
2224                         hdr->num_msgs = 0;
2225                         queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work);
2226                 }
2227         }
2228 }
2229
2230 static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
2231 {
2232         struct rvu *rvu = (struct rvu *)rvu_irq;
2233         int vfs = rvu->vfs;
2234         u64 intr;
2235
2236         intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
2237         /* Clear interrupts */
2238         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr);
2239         if (intr)
2240                 trace_otx2_msg_interrupt(rvu->pdev, "PF(s) to AF", intr);
2241
2242         /* Sync with mbox memory region */
2243         rmb();
2244
2245         rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr);
2246
2247         /* Handle VF interrupts */
2248         if (vfs > 64) {
2249                 intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1));
2250                 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr);
2251
2252                 rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr);
2253                 vfs -= 64;
2254         }
2255
2256         intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0));
2257         rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr);
2258         if (intr)
2259                 trace_otx2_msg_interrupt(rvu->pdev, "VF(s) to AF", intr);
2260
2261         rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr);
2262
2263         return IRQ_HANDLED;
2264 }
2265
2266 static void rvu_enable_mbox_intr(struct rvu *rvu)
2267 {
2268         struct rvu_hwinfo *hw = rvu->hw;
2269
2270         /* Clear spurious irqs, if any */
2271         rvu_write64(rvu, BLKADDR_RVUM,
2272                     RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs));
2273
2274         /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */
2275         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S,
2276                     INTR_MASK(hw->total_pfs) & ~1ULL);
2277 }
2278
2279 static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
2280 {
2281         struct rvu_block *block;
2282         int slot, lf, num_lfs;
2283         int err;
2284
2285         block = &rvu->hw->block[blkaddr];
2286         num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
2287                                         block->addr);
2288         if (!num_lfs)
2289                 return;
2290         for (slot = 0; slot < num_lfs; slot++) {
2291                 lf = rvu_get_lf(rvu, block, pcifunc, slot);
2292                 if (lf < 0)
2293                         continue;
2294
2295                 /* Cleanup LF and reset it */
2296                 if (block->addr == BLKADDR_NIX0 || block->addr == BLKADDR_NIX1)
2297                         rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf);
2298                 else if (block->addr == BLKADDR_NPA)
2299                         rvu_npa_lf_teardown(rvu, pcifunc, lf);
2300                 else if ((block->addr == BLKADDR_CPT0) ||
2301                          (block->addr == BLKADDR_CPT1))
2302                         rvu_cpt_lf_teardown(rvu, pcifunc, lf, slot);
2303
2304                 err = rvu_lf_reset(rvu, block, lf);
2305                 if (err) {
2306                         dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
2307                                 block->addr, lf);
2308                 }
2309         }
2310 }
2311
2312 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
2313 {
2314         mutex_lock(&rvu->flr_lock);
2315         /* Reset order should reflect inter-block dependencies:
2316          * 1. Reset any packet/work sources (NIX, CPT, TIM)
2317          * 2. Flush and reset SSO/SSOW
2318          * 3. Cleanup pools (NPA)
2319          */
2320         rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0);
2321         rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX1);
2322         rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0);
2323         rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT1);
2324         rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM);
2325         rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
2326         rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
2327         rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
2328         rvu_detach_rsrcs(rvu, NULL, pcifunc);
2329         mutex_unlock(&rvu->flr_lock);
2330 }
2331
2332 static void rvu_afvf_flr_handler(struct rvu *rvu, int vf)
2333 {
2334         int reg = 0;
2335
2336         /* pcifunc = 0(PF0) | (vf + 1) */
2337         __rvu_flr_handler(rvu, vf + 1);
2338
2339         if (vf >= 64) {
2340                 reg = 1;
2341                 vf = vf - 64;
2342         }
2343
2344         /* Signal FLR finish and enable IRQ */
2345         rvupf_write64(rvu, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
2346         rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
2347 }
2348
2349 static void rvu_flr_handler(struct work_struct *work)
2350 {
2351         struct rvu_work *flrwork = container_of(work, struct rvu_work, work);
2352         struct rvu *rvu = flrwork->rvu;
2353         u16 pcifunc, numvfs, vf;
2354         u64 cfg;
2355         int pf;
2356
2357         pf = flrwork - rvu->flr_wrk;
2358         if (pf >= rvu->hw->total_pfs) {
2359                 rvu_afvf_flr_handler(rvu, pf - rvu->hw->total_pfs);
2360                 return;
2361         }
2362
2363         cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2364         numvfs = (cfg >> 12) & 0xFF;
2365         pcifunc  = pf << RVU_PFVF_PF_SHIFT;
2366
2367         for (vf = 0; vf < numvfs; vf++)
2368                 __rvu_flr_handler(rvu, (pcifunc | (vf + 1)));
2369
2370         __rvu_flr_handler(rvu, pcifunc);
2371
2372         /* Signal FLR finish */
2373         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf));
2374
2375         /* Enable interrupt */
2376         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,  BIT_ULL(pf));
2377 }
2378
2379 static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs)
2380 {
2381         int dev, vf, reg = 0;
2382         u64 intr;
2383
2384         if (start_vf >= 64)
2385                 reg = 1;
2386
2387         intr = rvupf_read64(rvu, RVU_PF_VFFLR_INTX(reg));
2388         if (!intr)
2389                 return;
2390
2391         for (vf = 0; vf < numvfs; vf++) {
2392                 if (!(intr & BIT_ULL(vf)))
2393                         continue;
2394                 dev = vf + start_vf + rvu->hw->total_pfs;
2395                 queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work);
2396                 /* Clear and disable the interrupt */
2397                 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
2398                 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf));
2399         }
2400 }
2401
2402 static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq)
2403 {
2404         struct rvu *rvu = (struct rvu *)rvu_irq;
2405         u64 intr;
2406         u8  pf;
2407
2408         intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT);
2409         if (!intr)
2410                 goto afvf_flr;
2411
2412         for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2413                 if (intr & (1ULL << pf)) {
2414                         /* PF is already dead do only AF related operations */
2415                         queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work);
2416                         /* clear interrupt */
2417                         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT,
2418                                     BIT_ULL(pf));
2419                         /* Disable the interrupt */
2420                         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
2421                                     BIT_ULL(pf));
2422                 }
2423         }
2424
2425 afvf_flr:
2426         rvu_afvf_queue_flr_work(rvu, 0, 64);
2427         if (rvu->vfs > 64)
2428                 rvu_afvf_queue_flr_work(rvu, 64, rvu->vfs - 64);
2429
2430         return IRQ_HANDLED;
2431 }
2432
2433 static void rvu_me_handle_vfset(struct rvu *rvu, int idx, u64 intr)
2434 {
2435         int vf;
2436
2437         /* Nothing to be done here other than clearing the
2438          * TRPEND bit.
2439          */
2440         for (vf = 0; vf < 64; vf++) {
2441                 if (intr & (1ULL << vf)) {
2442                         /* clear the trpend due to ME(master enable) */
2443                         rvupf_write64(rvu, RVU_PF_VFTRPENDX(idx), BIT_ULL(vf));
2444                         /* clear interrupt */
2445                         rvupf_write64(rvu, RVU_PF_VFME_INTX(idx), BIT_ULL(vf));
2446                 }
2447         }
2448 }
2449
2450 /* Handles ME interrupts from VFs of AF */
2451 static irqreturn_t rvu_me_vf_intr_handler(int irq, void *rvu_irq)
2452 {
2453         struct rvu *rvu = (struct rvu *)rvu_irq;
2454         int vfset;
2455         u64 intr;
2456
2457         intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
2458
2459         for (vfset = 0; vfset <= 1; vfset++) {
2460                 intr = rvupf_read64(rvu, RVU_PF_VFME_INTX(vfset));
2461                 if (intr)
2462                         rvu_me_handle_vfset(rvu, vfset, intr);
2463         }
2464
2465         return IRQ_HANDLED;
2466 }
2467
2468 /* Handles ME interrupts from PFs */
2469 static irqreturn_t rvu_me_pf_intr_handler(int irq, void *rvu_irq)
2470 {
2471         struct rvu *rvu = (struct rvu *)rvu_irq;
2472         u64 intr;
2473         u8  pf;
2474
2475         intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
2476
2477         /* Nothing to be done here other than clearing the
2478          * TRPEND bit.
2479          */
2480         for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2481                 if (intr & (1ULL << pf)) {
2482                         /* clear the trpend due to ME(master enable) */
2483                         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND,
2484                                     BIT_ULL(pf));
2485                         /* clear interrupt */
2486                         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT,
2487                                     BIT_ULL(pf));
2488                 }
2489         }
2490
2491         return IRQ_HANDLED;
2492 }
2493
2494 static void rvu_unregister_interrupts(struct rvu *rvu)
2495 {
2496         int irq;
2497
2498         /* Disable the Mbox interrupt */
2499         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
2500                     INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2501
2502         /* Disable the PF FLR interrupt */
2503         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
2504                     INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2505
2506         /* Disable the PF ME interrupt */
2507         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1C,
2508                     INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2509
2510         for (irq = 0; irq < rvu->num_vec; irq++) {
2511                 if (rvu->irq_allocated[irq]) {
2512                         free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
2513                         rvu->irq_allocated[irq] = false;
2514                 }
2515         }
2516
2517         pci_free_irq_vectors(rvu->pdev);
2518         rvu->num_vec = 0;
2519 }
2520
2521 static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu)
2522 {
2523         struct rvu_pfvf *pfvf = &rvu->pf[0];
2524         int offset;
2525
2526         pfvf = &rvu->pf[0];
2527         offset = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2528
2529         /* Make sure there are enough MSIX vectors configured so that
2530          * VF interrupts can be handled. Offset equal to zero means
2531          * that PF vectors are not configured and overlapping AF vectors.
2532          */
2533         return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) &&
2534                offset;
2535 }
2536
2537 static int rvu_register_interrupts(struct rvu *rvu)
2538 {
2539         int ret, offset, pf_vec_start;
2540
2541         rvu->num_vec = pci_msix_vec_count(rvu->pdev);
2542
2543         rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec,
2544                                            NAME_SIZE, GFP_KERNEL);
2545         if (!rvu->irq_name)
2546                 return -ENOMEM;
2547
2548         rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec,
2549                                           sizeof(bool), GFP_KERNEL);
2550         if (!rvu->irq_allocated)
2551                 return -ENOMEM;
2552
2553         /* Enable MSI-X */
2554         ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec,
2555                                     rvu->num_vec, PCI_IRQ_MSIX);
2556         if (ret < 0) {
2557                 dev_err(rvu->dev,
2558                         "RVUAF: Request for %d msix vectors failed, ret %d\n",
2559                         rvu->num_vec, ret);
2560                 return ret;
2561         }
2562
2563         /* Register mailbox interrupt handler */
2564         sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
2565         ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
2566                           rvu_mbox_intr_handler, 0,
2567                           &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
2568         if (ret) {
2569                 dev_err(rvu->dev,
2570                         "RVUAF: IRQ registration failed for mbox irq\n");
2571                 goto fail;
2572         }
2573
2574         rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true;
2575
2576         /* Enable mailbox interrupts from all PFs */
2577         rvu_enable_mbox_intr(rvu);
2578
2579         /* Register FLR interrupt handler */
2580         sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
2581                 "RVUAF FLR");
2582         ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFFLR),
2583                           rvu_flr_intr_handler, 0,
2584                           &rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
2585                           rvu);
2586         if (ret) {
2587                 dev_err(rvu->dev,
2588                         "RVUAF: IRQ registration failed for FLR\n");
2589                 goto fail;
2590         }
2591         rvu->irq_allocated[RVU_AF_INT_VEC_PFFLR] = true;
2592
2593         /* Enable FLR interrupt for all PFs*/
2594         rvu_write64(rvu, BLKADDR_RVUM,
2595                     RVU_AF_PFFLR_INT, INTR_MASK(rvu->hw->total_pfs));
2596
2597         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,
2598                     INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2599
2600         /* Register ME interrupt handler */
2601         sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
2602                 "RVUAF ME");
2603         ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFME),
2604                           rvu_me_pf_intr_handler, 0,
2605                           &rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
2606                           rvu);
2607         if (ret) {
2608                 dev_err(rvu->dev,
2609                         "RVUAF: IRQ registration failed for ME\n");
2610         }
2611         rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true;
2612
2613         /* Clear TRPEND bit for all PF */
2614         rvu_write64(rvu, BLKADDR_RVUM,
2615                     RVU_AF_PFTRPEND, INTR_MASK(rvu->hw->total_pfs));
2616         /* Enable ME interrupt for all PFs*/
2617         rvu_write64(rvu, BLKADDR_RVUM,
2618                     RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs));
2619
2620         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1S,
2621                     INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2622
2623         if (!rvu_afvf_msix_vectors_num_ok(rvu))
2624                 return 0;
2625
2626         /* Get PF MSIX vectors offset. */
2627         pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM,
2628                                   RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2629
2630         /* Register MBOX0 interrupt. */
2631         offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0;
2632         sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0");
2633         ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2634                           rvu_mbox_intr_handler, 0,
2635                           &rvu->irq_name[offset * NAME_SIZE],
2636                           rvu);
2637         if (ret)
2638                 dev_err(rvu->dev,
2639                         "RVUAF: IRQ registration failed for Mbox0\n");
2640
2641         rvu->irq_allocated[offset] = true;
2642
2643         /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so
2644          * simply increment current offset by 1.
2645          */
2646         offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1;
2647         sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1");
2648         ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2649                           rvu_mbox_intr_handler, 0,
2650                           &rvu->irq_name[offset * NAME_SIZE],
2651                           rvu);
2652         if (ret)
2653                 dev_err(rvu->dev,
2654                         "RVUAF: IRQ registration failed for Mbox1\n");
2655
2656         rvu->irq_allocated[offset] = true;
2657
2658         /* Register FLR interrupt handler for AF's VFs */
2659         offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0;
2660         sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR0");
2661         ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2662                           rvu_flr_intr_handler, 0,
2663                           &rvu->irq_name[offset * NAME_SIZE], rvu);
2664         if (ret) {
2665                 dev_err(rvu->dev,
2666                         "RVUAF: IRQ registration failed for RVUAFVF FLR0\n");
2667                 goto fail;
2668         }
2669         rvu->irq_allocated[offset] = true;
2670
2671         offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR1;
2672         sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR1");
2673         ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2674                           rvu_flr_intr_handler, 0,
2675                           &rvu->irq_name[offset * NAME_SIZE], rvu);
2676         if (ret) {
2677                 dev_err(rvu->dev,
2678                         "RVUAF: IRQ registration failed for RVUAFVF FLR1\n");
2679                 goto fail;
2680         }
2681         rvu->irq_allocated[offset] = true;
2682
2683         /* Register ME interrupt handler for AF's VFs */
2684         offset = pf_vec_start + RVU_PF_INT_VEC_VFME0;
2685         sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME0");
2686         ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2687                           rvu_me_vf_intr_handler, 0,
2688                           &rvu->irq_name[offset * NAME_SIZE], rvu);
2689         if (ret) {
2690                 dev_err(rvu->dev,
2691                         "RVUAF: IRQ registration failed for RVUAFVF ME0\n");
2692                 goto fail;
2693         }
2694         rvu->irq_allocated[offset] = true;
2695
2696         offset = pf_vec_start + RVU_PF_INT_VEC_VFME1;
2697         sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME1");
2698         ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2699                           rvu_me_vf_intr_handler, 0,
2700                           &rvu->irq_name[offset * NAME_SIZE], rvu);
2701         if (ret) {
2702                 dev_err(rvu->dev,
2703                         "RVUAF: IRQ registration failed for RVUAFVF ME1\n");
2704                 goto fail;
2705         }
2706         rvu->irq_allocated[offset] = true;
2707         return 0;
2708
2709 fail:
2710         rvu_unregister_interrupts(rvu);
2711         return ret;
2712 }
2713
2714 static void rvu_flr_wq_destroy(struct rvu *rvu)
2715 {
2716         if (rvu->flr_wq) {
2717                 flush_workqueue(rvu->flr_wq);
2718                 destroy_workqueue(rvu->flr_wq);
2719                 rvu->flr_wq = NULL;
2720         }
2721 }
2722
2723 static int rvu_flr_init(struct rvu *rvu)
2724 {
2725         int dev, num_devs;
2726         u64 cfg;
2727         int pf;
2728
2729         /* Enable FLR for all PFs*/
2730         for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2731                 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2732                 rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf),
2733                             cfg | BIT_ULL(22));
2734         }
2735
2736         rvu->flr_wq = alloc_workqueue("rvu_afpf_flr",
2737                                       WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
2738                                        1);
2739         if (!rvu->flr_wq)
2740                 return -ENOMEM;
2741
2742         num_devs = rvu->hw->total_pfs + pci_sriov_get_totalvfs(rvu->pdev);
2743         rvu->flr_wrk = devm_kcalloc(rvu->dev, num_devs,
2744                                     sizeof(struct rvu_work), GFP_KERNEL);
2745         if (!rvu->flr_wrk) {
2746                 destroy_workqueue(rvu->flr_wq);
2747                 return -ENOMEM;
2748         }
2749
2750         for (dev = 0; dev < num_devs; dev++) {
2751                 rvu->flr_wrk[dev].rvu = rvu;
2752                 INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler);
2753         }
2754
2755         mutex_init(&rvu->flr_lock);
2756
2757         return 0;
2758 }
2759
2760 static void rvu_disable_afvf_intr(struct rvu *rvu)
2761 {
2762         int vfs = rvu->vfs;
2763
2764         rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs));
2765         rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
2766         rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
2767         if (vfs <= 64)
2768                 return;
2769
2770         rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1),
2771                       INTR_MASK(vfs - 64));
2772         rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
2773         rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
2774 }
2775
2776 static void rvu_enable_afvf_intr(struct rvu *rvu)
2777 {
2778         int vfs = rvu->vfs;
2779
2780         /* Clear any pending interrupts and enable AF VF interrupts for
2781          * the first 64 VFs.
2782          */
2783         /* Mbox */
2784         rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs));
2785         rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs));
2786
2787         /* FLR */
2788         rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs));
2789         rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs));
2790         rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs));
2791
2792         /* Same for remaining VFs, if any. */
2793         if (vfs <= 64)
2794                 return;
2795
2796         rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs - 64));
2797         rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
2798                       INTR_MASK(vfs - 64));
2799
2800         rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64));
2801         rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
2802         rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
2803 }
2804
2805 int rvu_get_num_lbk_chans(void)
2806 {
2807         struct pci_dev *pdev;
2808         void __iomem *base;
2809         int ret = -EIO;
2810
2811         pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_LBK,
2812                               NULL);
2813         if (!pdev)
2814                 goto err;
2815
2816         base = pci_ioremap_bar(pdev, 0);
2817         if (!base)
2818                 goto err_put;
2819
2820         /* Read number of available LBK channels from LBK(0)_CONST register. */
2821         ret = (readq(base + 0x10) >> 32) & 0xffff;
2822         iounmap(base);
2823 err_put:
2824         pci_dev_put(pdev);
2825 err:
2826         return ret;
2827 }
2828
2829 static int rvu_enable_sriov(struct rvu *rvu)
2830 {
2831         struct pci_dev *pdev = rvu->pdev;
2832         int err, chans, vfs;
2833
2834         if (!rvu_afvf_msix_vectors_num_ok(rvu)) {
2835                 dev_warn(&pdev->dev,
2836                          "Skipping SRIOV enablement since not enough IRQs are available\n");
2837                 return 0;
2838         }
2839
2840         chans = rvu_get_num_lbk_chans();
2841         if (chans < 0)
2842                 return chans;
2843
2844         vfs = pci_sriov_get_totalvfs(pdev);
2845
2846         /* Limit VFs in case we have more VFs than LBK channels available. */
2847         if (vfs > chans)
2848                 vfs = chans;
2849
2850         if (!vfs)
2851                 return 0;
2852
2853         /* Save VFs number for reference in VF interrupts handlers.
2854          * Since interrupts might start arriving during SRIOV enablement
2855          * ordinary API cannot be used to get number of enabled VFs.
2856          */
2857         rvu->vfs = vfs;
2858
2859         err = rvu_mbox_init(rvu, &rvu->afvf_wq_info, TYPE_AFVF, vfs,
2860                             rvu_afvf_mbox_handler, rvu_afvf_mbox_up_handler);
2861         if (err)
2862                 return err;
2863
2864         rvu_enable_afvf_intr(rvu);
2865         /* Make sure IRQs are enabled before SRIOV. */
2866         mb();
2867
2868         err = pci_enable_sriov(pdev, vfs);
2869         if (err) {
2870                 rvu_disable_afvf_intr(rvu);
2871                 rvu_mbox_destroy(&rvu->afvf_wq_info);
2872                 return err;
2873         }
2874
2875         return 0;
2876 }
2877
2878 static void rvu_disable_sriov(struct rvu *rvu)
2879 {
2880         rvu_disable_afvf_intr(rvu);
2881         rvu_mbox_destroy(&rvu->afvf_wq_info);
2882         pci_disable_sriov(rvu->pdev);
2883 }
2884
2885 static void rvu_update_module_params(struct rvu *rvu)
2886 {
2887         const char *default_pfl_name = "default";
2888
2889         strscpy(rvu->mkex_pfl_name,
2890                 mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN);
2891         strscpy(rvu->kpu_pfl_name,
2892                 kpu_profile ? kpu_profile : default_pfl_name, KPU_NAME_LEN);
2893 }
2894
2895 static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2896 {
2897         struct device *dev = &pdev->dev;
2898         struct rvu *rvu;
2899         int    err;
2900
2901         rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL);
2902         if (!rvu)
2903                 return -ENOMEM;
2904
2905         rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL);
2906         if (!rvu->hw) {
2907                 devm_kfree(dev, rvu);
2908                 return -ENOMEM;
2909         }
2910
2911         pci_set_drvdata(pdev, rvu);
2912         rvu->pdev = pdev;
2913         rvu->dev = &pdev->dev;
2914
2915         err = pci_enable_device(pdev);
2916         if (err) {
2917                 dev_err(dev, "Failed to enable PCI device\n");
2918                 goto err_freemem;
2919         }
2920
2921         err = pci_request_regions(pdev, DRV_NAME);
2922         if (err) {
2923                 dev_err(dev, "PCI request regions failed 0x%x\n", err);
2924                 goto err_disable_device;
2925         }
2926
2927         err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
2928         if (err) {
2929                 dev_err(dev, "DMA mask config failed, abort\n");
2930                 goto err_release_regions;
2931         }
2932
2933         pci_set_master(pdev);
2934
2935         rvu->ptp = ptp_get();
2936         if (IS_ERR(rvu->ptp)) {
2937                 err = PTR_ERR(rvu->ptp);
2938                 if (err == -EPROBE_DEFER)
2939                         goto err_release_regions;
2940                 rvu->ptp = NULL;
2941         }
2942
2943         /* Map Admin function CSRs */
2944         rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
2945         rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
2946         if (!rvu->afreg_base || !rvu->pfreg_base) {
2947                 dev_err(dev, "Unable to map admin function CSRs, aborting\n");
2948                 err = -ENOMEM;
2949                 goto err_put_ptp;
2950         }
2951
2952         /* Store module params in rvu structure */
2953         rvu_update_module_params(rvu);
2954
2955         /* Check which blocks the HW supports */
2956         rvu_check_block_implemented(rvu);
2957
2958         rvu_reset_all_blocks(rvu);
2959
2960         rvu_setup_hw_capabilities(rvu);
2961
2962         err = rvu_setup_hw_resources(rvu);
2963         if (err)
2964                 goto err_put_ptp;
2965
2966         /* Init mailbox btw AF and PFs */
2967         err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF,
2968                             rvu->hw->total_pfs, rvu_afpf_mbox_handler,
2969                             rvu_afpf_mbox_up_handler);
2970         if (err)
2971                 goto err_hwsetup;
2972
2973         err = rvu_flr_init(rvu);
2974         if (err)
2975                 goto err_mbox;
2976
2977         err = rvu_register_interrupts(rvu);
2978         if (err)
2979                 goto err_flr;
2980
2981         err = rvu_register_dl(rvu);
2982         if (err)
2983                 goto err_irq;
2984
2985         rvu_setup_rvum_blk_revid(rvu);
2986
2987         /* Enable AF's VFs (if any) */
2988         err = rvu_enable_sriov(rvu);
2989         if (err)
2990                 goto err_dl;
2991
2992         /* Initialize debugfs */
2993         rvu_dbg_init(rvu);
2994
2995         return 0;
2996 err_dl:
2997         rvu_unregister_dl(rvu);
2998 err_irq:
2999         rvu_unregister_interrupts(rvu);
3000 err_flr:
3001         rvu_flr_wq_destroy(rvu);
3002 err_mbox:
3003         rvu_mbox_destroy(&rvu->afpf_wq_info);
3004 err_hwsetup:
3005         rvu_cgx_exit(rvu);
3006         rvu_fwdata_exit(rvu);
3007         rvu_reset_all_blocks(rvu);
3008         rvu_free_hw_resources(rvu);
3009         rvu_clear_rvum_blk_revid(rvu);
3010 err_put_ptp:
3011         ptp_put(rvu->ptp);
3012 err_release_regions:
3013         pci_release_regions(pdev);
3014 err_disable_device:
3015         pci_disable_device(pdev);
3016 err_freemem:
3017         pci_set_drvdata(pdev, NULL);
3018         devm_kfree(&pdev->dev, rvu->hw);
3019         devm_kfree(dev, rvu);
3020         return err;
3021 }
3022
3023 static void rvu_remove(struct pci_dev *pdev)
3024 {
3025         struct rvu *rvu = pci_get_drvdata(pdev);
3026
3027         rvu_dbg_exit(rvu);
3028         rvu_unregister_dl(rvu);
3029         rvu_unregister_interrupts(rvu);
3030         rvu_flr_wq_destroy(rvu);
3031         rvu_cgx_exit(rvu);
3032         rvu_fwdata_exit(rvu);
3033         rvu_mbox_destroy(&rvu->afpf_wq_info);
3034         rvu_disable_sriov(rvu);
3035         rvu_reset_all_blocks(rvu);
3036         rvu_free_hw_resources(rvu);
3037         rvu_clear_rvum_blk_revid(rvu);
3038         ptp_put(rvu->ptp);
3039         pci_release_regions(pdev);
3040         pci_disable_device(pdev);
3041         pci_set_drvdata(pdev, NULL);
3042
3043         devm_kfree(&pdev->dev, rvu->hw);
3044         devm_kfree(&pdev->dev, rvu);
3045 }
3046
3047 static struct pci_driver rvu_driver = {
3048         .name = DRV_NAME,
3049         .id_table = rvu_id_table,
3050         .probe = rvu_probe,
3051         .remove = rvu_remove,
3052 };
3053
3054 static int __init rvu_init_module(void)
3055 {
3056         int err;
3057
3058         pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
3059
3060         err = pci_register_driver(&cgx_driver);
3061         if (err < 0)
3062                 return err;
3063
3064         err = pci_register_driver(&ptp_driver);
3065         if (err < 0)
3066                 goto ptp_err;
3067
3068         err =  pci_register_driver(&rvu_driver);
3069         if (err < 0)
3070                 goto rvu_err;
3071
3072         return 0;
3073 rvu_err:
3074         pci_unregister_driver(&ptp_driver);
3075 ptp_err:
3076         pci_unregister_driver(&cgx_driver);
3077
3078         return err;
3079 }
3080
3081 static void __exit rvu_cleanup_module(void)
3082 {
3083         pci_unregister_driver(&rvu_driver);
3084         pci_unregister_driver(&ptp_driver);
3085         pci_unregister_driver(&cgx_driver);
3086 }
3087
3088 module_init(rvu_init_module);
3089 module_exit(rvu_cleanup_module);