1b6e9efbb8ec6c8794e20b25d5ed156263e78225
[platform/kernel/linux-rpi.git] / drivers / net / ethernet / marvell / octeontx2 / af / rvu.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell.
5  *
6  */
7
8 #include <linux/module.h>
9 #include <linux/interrupt.h>
10 #include <linux/delay.h>
11 #include <linux/irq.h>
12 #include <linux/pci.h>
13 #include <linux/sysfs.h>
14
15 #include "cgx.h"
16 #include "rvu.h"
17 #include "rvu_reg.h"
18 #include "ptp.h"
19
20 #include "rvu_trace.h"
21 #include "rvu_npc_hash.h"
22
23 #define DRV_NAME        "rvu_af"
24 #define DRV_STRING      "Marvell OcteonTX2 RVU Admin Function Driver"
25
26 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
27
28 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
29                                 struct rvu_block *block, int lf);
30 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
31                                   struct rvu_block *block, int lf);
32 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc);
33
34 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
35                          int type, int num,
36                          void (mbox_handler)(struct work_struct *),
37                          void (mbox_up_handler)(struct work_struct *));
38 enum {
39         TYPE_AFVF,
40         TYPE_AFPF,
41 };
42
43 /* Supported devices */
44 static const struct pci_device_id rvu_id_table[] = {
45         { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) },
46         { 0, }  /* end of table */
47 };
48
49 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
50 MODULE_DESCRIPTION(DRV_STRING);
51 MODULE_LICENSE("GPL v2");
52 MODULE_DEVICE_TABLE(pci, rvu_id_table);
53
54 static char *mkex_profile; /* MKEX profile name */
55 module_param(mkex_profile, charp, 0000);
56 MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
57
58 static char *kpu_profile; /* KPU profile name */
59 module_param(kpu_profile, charp, 0000);
60 MODULE_PARM_DESC(kpu_profile, "KPU profile name string");
61
62 static void rvu_setup_hw_capabilities(struct rvu *rvu)
63 {
64         struct rvu_hwinfo *hw = rvu->hw;
65
66         hw->cap.nix_tx_aggr_lvl = NIX_TXSCH_LVL_TL1;
67         hw->cap.nix_fixed_txschq_mapping = false;
68         hw->cap.nix_shaping = true;
69         hw->cap.nix_tx_link_bp = true;
70         hw->cap.nix_rx_multicast = true;
71         hw->cap.nix_shaper_toggle_wait = false;
72         hw->cap.npc_hash_extract = false;
73         hw->cap.npc_exact_match_enabled = false;
74         hw->rvu = rvu;
75
76         if (is_rvu_pre_96xx_C0(rvu)) {
77                 hw->cap.nix_fixed_txschq_mapping = true;
78                 hw->cap.nix_txsch_per_cgx_lmac = 4;
79                 hw->cap.nix_txsch_per_lbk_lmac = 132;
80                 hw->cap.nix_txsch_per_sdp_lmac = 76;
81                 hw->cap.nix_shaping = false;
82                 hw->cap.nix_tx_link_bp = false;
83                 if (is_rvu_96xx_A0(rvu) || is_rvu_95xx_A0(rvu))
84                         hw->cap.nix_rx_multicast = false;
85         }
86         if (!is_rvu_pre_96xx_C0(rvu))
87                 hw->cap.nix_shaper_toggle_wait = true;
88
89         if (!is_rvu_otx2(rvu))
90                 hw->cap.per_pf_mbox_regs = true;
91
92         if (is_rvu_npc_hash_extract_en(rvu))
93                 hw->cap.npc_hash_extract = true;
94 }
95
96 /* Poll a RVU block's register 'offset', for a 'zero'
97  * or 'nonzero' at bits specified by 'mask'
98  */
99 int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
100 {
101         unsigned long timeout = jiffies + usecs_to_jiffies(20000);
102         bool twice = false;
103         void __iomem *reg;
104         u64 reg_val;
105
106         reg = rvu->afreg_base + ((block << 28) | offset);
107 again:
108         reg_val = readq(reg);
109         if (zero && !(reg_val & mask))
110                 return 0;
111         if (!zero && (reg_val & mask))
112                 return 0;
113         if (time_before(jiffies, timeout)) {
114                 usleep_range(1, 5);
115                 goto again;
116         }
117         /* In scenarios where CPU is scheduled out before checking
118          * 'time_before' (above) and gets scheduled in such that
119          * jiffies are beyond timeout value, then check again if HW is
120          * done with the operation in the meantime.
121          */
122         if (!twice) {
123                 twice = true;
124                 goto again;
125         }
126         return -EBUSY;
127 }
128
129 int rvu_alloc_rsrc(struct rsrc_bmap *rsrc)
130 {
131         int id;
132
133         if (!rsrc->bmap)
134                 return -EINVAL;
135
136         id = find_first_zero_bit(rsrc->bmap, rsrc->max);
137         if (id >= rsrc->max)
138                 return -ENOSPC;
139
140         __set_bit(id, rsrc->bmap);
141
142         return id;
143 }
144
145 int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc)
146 {
147         int start;
148
149         if (!rsrc->bmap)
150                 return -EINVAL;
151
152         start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
153         if (start >= rsrc->max)
154                 return -ENOSPC;
155
156         bitmap_set(rsrc->bmap, start, nrsrc);
157         return start;
158 }
159
160 static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
161 {
162         if (!rsrc->bmap)
163                 return;
164         if (start >= rsrc->max)
165                 return;
166
167         bitmap_clear(rsrc->bmap, start, nrsrc);
168 }
169
170 bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc)
171 {
172         int start;
173
174         if (!rsrc->bmap)
175                 return false;
176
177         start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
178         if (start >= rsrc->max)
179                 return false;
180
181         return true;
182 }
183
184 void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id)
185 {
186         if (!rsrc->bmap)
187                 return;
188
189         __clear_bit(id, rsrc->bmap);
190 }
191
192 int rvu_rsrc_free_count(struct rsrc_bmap *rsrc)
193 {
194         int used;
195
196         if (!rsrc->bmap)
197                 return 0;
198
199         used = bitmap_weight(rsrc->bmap, rsrc->max);
200         return (rsrc->max - used);
201 }
202
203 bool is_rsrc_free(struct rsrc_bmap *rsrc, int id)
204 {
205         if (!rsrc->bmap)
206                 return false;
207
208         return !test_bit(id, rsrc->bmap);
209 }
210
211 int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
212 {
213         rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max),
214                              sizeof(long), GFP_KERNEL);
215         if (!rsrc->bmap)
216                 return -ENOMEM;
217         return 0;
218 }
219
220 void rvu_free_bitmap(struct rsrc_bmap *rsrc)
221 {
222         kfree(rsrc->bmap);
223 }
224
225 /* Get block LF's HW index from a PF_FUNC's block slot number */
226 int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
227 {
228         u16 match = 0;
229         int lf;
230
231         mutex_lock(&rvu->rsrc_lock);
232         for (lf = 0; lf < block->lf.max; lf++) {
233                 if (block->fn_map[lf] == pcifunc) {
234                         if (slot == match) {
235                                 mutex_unlock(&rvu->rsrc_lock);
236                                 return lf;
237                         }
238                         match++;
239                 }
240         }
241         mutex_unlock(&rvu->rsrc_lock);
242         return -ENODEV;
243 }
244
245 /* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E.
246  * Some silicon variants of OcteonTX2 supports
247  * multiple blocks of same type.
248  *
249  * @pcifunc has to be zero when no LF is yet attached.
250  *
251  * For a pcifunc if LFs are attached from multiple blocks of same type, then
252  * return blkaddr of first encountered block.
253  */
254 int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
255 {
256         int devnum, blkaddr = -ENODEV;
257         u64 cfg, reg;
258         bool is_pf;
259
260         switch (blktype) {
261         case BLKTYPE_NPC:
262                 blkaddr = BLKADDR_NPC;
263                 goto exit;
264         case BLKTYPE_NPA:
265                 blkaddr = BLKADDR_NPA;
266                 goto exit;
267         case BLKTYPE_NIX:
268                 /* For now assume NIX0 */
269                 if (!pcifunc) {
270                         blkaddr = BLKADDR_NIX0;
271                         goto exit;
272                 }
273                 break;
274         case BLKTYPE_SSO:
275                 blkaddr = BLKADDR_SSO;
276                 goto exit;
277         case BLKTYPE_SSOW:
278                 blkaddr = BLKADDR_SSOW;
279                 goto exit;
280         case BLKTYPE_TIM:
281                 blkaddr = BLKADDR_TIM;
282                 goto exit;
283         case BLKTYPE_CPT:
284                 /* For now assume CPT0 */
285                 if (!pcifunc) {
286                         blkaddr = BLKADDR_CPT0;
287                         goto exit;
288                 }
289                 break;
290         }
291
292         /* Check if this is a RVU PF or VF */
293         if (pcifunc & RVU_PFVF_FUNC_MASK) {
294                 is_pf = false;
295                 devnum = rvu_get_hwvf(rvu, pcifunc);
296         } else {
297                 is_pf = true;
298                 devnum = rvu_get_pf(pcifunc);
299         }
300
301         /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' or
302          * 'BLKADDR_NIX1'.
303          */
304         if (blktype == BLKTYPE_NIX) {
305                 reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(0) :
306                         RVU_PRIV_HWVFX_NIXX_CFG(0);
307                 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
308                 if (cfg) {
309                         blkaddr = BLKADDR_NIX0;
310                         goto exit;
311                 }
312
313                 reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(1) :
314                         RVU_PRIV_HWVFX_NIXX_CFG(1);
315                 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
316                 if (cfg)
317                         blkaddr = BLKADDR_NIX1;
318         }
319
320         if (blktype == BLKTYPE_CPT) {
321                 reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(0) :
322                         RVU_PRIV_HWVFX_CPTX_CFG(0);
323                 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
324                 if (cfg) {
325                         blkaddr = BLKADDR_CPT0;
326                         goto exit;
327                 }
328
329                 reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(1) :
330                         RVU_PRIV_HWVFX_CPTX_CFG(1);
331                 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
332                 if (cfg)
333                         blkaddr = BLKADDR_CPT1;
334         }
335
336 exit:
337         if (is_block_implemented(rvu->hw, blkaddr))
338                 return blkaddr;
339         return -ENODEV;
340 }
341
342 static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
343                                 struct rvu_block *block, u16 pcifunc,
344                                 u16 lf, bool attach)
345 {
346         int devnum, num_lfs = 0;
347         bool is_pf;
348         u64 reg;
349
350         if (lf >= block->lf.max) {
351                 dev_err(&rvu->pdev->dev,
352                         "%s: FATAL: LF %d is >= %s's max lfs i.e %d\n",
353                         __func__, lf, block->name, block->lf.max);
354                 return;
355         }
356
357         /* Check if this is for a RVU PF or VF */
358         if (pcifunc & RVU_PFVF_FUNC_MASK) {
359                 is_pf = false;
360                 devnum = rvu_get_hwvf(rvu, pcifunc);
361         } else {
362                 is_pf = true;
363                 devnum = rvu_get_pf(pcifunc);
364         }
365
366         block->fn_map[lf] = attach ? pcifunc : 0;
367
368         switch (block->addr) {
369         case BLKADDR_NPA:
370                 pfvf->npalf = attach ? true : false;
371                 num_lfs = pfvf->npalf;
372                 break;
373         case BLKADDR_NIX0:
374         case BLKADDR_NIX1:
375                 pfvf->nixlf = attach ? true : false;
376                 num_lfs = pfvf->nixlf;
377                 break;
378         case BLKADDR_SSO:
379                 attach ? pfvf->sso++ : pfvf->sso--;
380                 num_lfs = pfvf->sso;
381                 break;
382         case BLKADDR_SSOW:
383                 attach ? pfvf->ssow++ : pfvf->ssow--;
384                 num_lfs = pfvf->ssow;
385                 break;
386         case BLKADDR_TIM:
387                 attach ? pfvf->timlfs++ : pfvf->timlfs--;
388                 num_lfs = pfvf->timlfs;
389                 break;
390         case BLKADDR_CPT0:
391                 attach ? pfvf->cptlfs++ : pfvf->cptlfs--;
392                 num_lfs = pfvf->cptlfs;
393                 break;
394         case BLKADDR_CPT1:
395                 attach ? pfvf->cpt1_lfs++ : pfvf->cpt1_lfs--;
396                 num_lfs = pfvf->cpt1_lfs;
397                 break;
398         }
399
400         reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg;
401         rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs);
402 }
403
404 inline int rvu_get_pf(u16 pcifunc)
405 {
406         return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
407 }
408
409 void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
410 {
411         u64 cfg;
412
413         /* Get numVFs attached to this PF and first HWVF */
414         cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
415         if (numvfs)
416                 *numvfs = (cfg >> 12) & 0xFF;
417         if (hwvf)
418                 *hwvf = cfg & 0xFFF;
419 }
420
421 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
422 {
423         int pf, func;
424         u64 cfg;
425
426         pf = rvu_get_pf(pcifunc);
427         func = pcifunc & RVU_PFVF_FUNC_MASK;
428
429         /* Get first HWVF attached to this PF */
430         cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
431
432         return ((cfg & 0xFFF) + func - 1);
433 }
434
435 struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc)
436 {
437         /* Check if it is a PF or VF */
438         if (pcifunc & RVU_PFVF_FUNC_MASK)
439                 return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)];
440         else
441                 return &rvu->pf[rvu_get_pf(pcifunc)];
442 }
443
444 static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc)
445 {
446         int pf, vf, nvfs;
447         u64 cfg;
448
449         pf = rvu_get_pf(pcifunc);
450         if (pf >= rvu->hw->total_pfs)
451                 return false;
452
453         if (!(pcifunc & RVU_PFVF_FUNC_MASK))
454                 return true;
455
456         /* Check if VF is within number of VFs attached to this PF */
457         vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
458         cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
459         nvfs = (cfg >> 12) & 0xFF;
460         if (vf >= nvfs)
461                 return false;
462
463         return true;
464 }
465
466 bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr)
467 {
468         struct rvu_block *block;
469
470         if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT)
471                 return false;
472
473         block = &hw->block[blkaddr];
474         return block->implemented;
475 }
476
477 static void rvu_check_block_implemented(struct rvu *rvu)
478 {
479         struct rvu_hwinfo *hw = rvu->hw;
480         struct rvu_block *block;
481         int blkid;
482         u64 cfg;
483
484         /* For each block check if 'implemented' bit is set */
485         for (blkid = 0; blkid < BLK_COUNT; blkid++) {
486                 block = &hw->block[blkid];
487                 cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid));
488                 if (cfg & BIT_ULL(11))
489                         block->implemented = true;
490         }
491 }
492
493 static void rvu_setup_rvum_blk_revid(struct rvu *rvu)
494 {
495         rvu_write64(rvu, BLKADDR_RVUM,
496                     RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM),
497                     RVU_BLK_RVUM_REVID);
498 }
499
500 static void rvu_clear_rvum_blk_revid(struct rvu *rvu)
501 {
502         rvu_write64(rvu, BLKADDR_RVUM,
503                     RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), 0x00);
504 }
505
506 int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
507 {
508         int err;
509
510         if (!block->implemented)
511                 return 0;
512
513         rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12));
514         err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12),
515                            true);
516         return err;
517 }
518
519 static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
520 {
521         struct rvu_block *block = &rvu->hw->block[blkaddr];
522         int err;
523
524         if (!block->implemented)
525                 return;
526
527         rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0));
528         err = rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
529         if (err) {
530                 dev_err(rvu->dev, "HW block:%d reset timeout retrying again\n", blkaddr);
531                 while (rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true) == -EBUSY)
532                         ;
533         }
534 }
535
536 static void rvu_reset_all_blocks(struct rvu *rvu)
537 {
538         /* Do a HW reset of all RVU blocks */
539         rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST);
540         rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST);
541         rvu_block_reset(rvu, BLKADDR_NIX1, NIX_AF_BLK_RST);
542         rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST);
543         rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST);
544         rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST);
545         rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST);
546         rvu_block_reset(rvu, BLKADDR_CPT1, CPT_AF_BLK_RST);
547         rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST);
548         rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST);
549         rvu_block_reset(rvu, BLKADDR_NDC_NIX1_RX, NDC_AF_BLK_RST);
550         rvu_block_reset(rvu, BLKADDR_NDC_NIX1_TX, NDC_AF_BLK_RST);
551         rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST);
552 }
553
554 static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block)
555 {
556         struct rvu_pfvf *pfvf;
557         u64 cfg;
558         int lf;
559
560         for (lf = 0; lf < block->lf.max; lf++) {
561                 cfg = rvu_read64(rvu, block->addr,
562                                  block->lfcfg_reg | (lf << block->lfshift));
563                 if (!(cfg & BIT_ULL(63)))
564                         continue;
565
566                 /* Set this resource as being used */
567                 __set_bit(lf, block->lf.bmap);
568
569                 /* Get, to whom this LF is attached */
570                 pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF);
571                 rvu_update_rsrc_map(rvu, pfvf, block,
572                                     (cfg >> 8) & 0xFFFF, lf, true);
573
574                 /* Set start MSIX vector for this LF within this PF/VF */
575                 rvu_set_msix_offset(rvu, pfvf, block, lf);
576         }
577 }
578
579 static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf)
580 {
581         int min_vecs;
582
583         if (!vf)
584                 goto check_pf;
585
586         if (!nvecs) {
587                 dev_warn(rvu->dev,
588                          "PF%d:VF%d is configured with zero msix vectors, %d\n",
589                          pf, vf - 1, nvecs);
590         }
591         return;
592
593 check_pf:
594         if (pf == 0)
595                 min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT;
596         else
597                 min_vecs = RVU_PF_INT_VEC_CNT;
598
599         if (!(nvecs < min_vecs))
600                 return;
601         dev_warn(rvu->dev,
602                  "PF%d is configured with too few vectors, %d, min is %d\n",
603                  pf, nvecs, min_vecs);
604 }
605
606 static int rvu_setup_msix_resources(struct rvu *rvu)
607 {
608         struct rvu_hwinfo *hw = rvu->hw;
609         int pf, vf, numvfs, hwvf, err;
610         int nvecs, offset, max_msix;
611         struct rvu_pfvf *pfvf;
612         u64 cfg, phy_addr;
613         dma_addr_t iova;
614
615         for (pf = 0; pf < hw->total_pfs; pf++) {
616                 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
617                 /* If PF is not enabled, nothing to do */
618                 if (!((cfg >> 20) & 0x01))
619                         continue;
620
621                 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
622
623                 pfvf = &rvu->pf[pf];
624                 /* Get num of MSIX vectors attached to this PF */
625                 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf));
626                 pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1;
627                 rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0);
628
629                 /* Alloc msix bitmap for this PF */
630                 err = rvu_alloc_bitmap(&pfvf->msix);
631                 if (err)
632                         return err;
633
634                 /* Allocate memory for MSIX vector to RVU block LF mapping */
635                 pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max,
636                                                 sizeof(u16), GFP_KERNEL);
637                 if (!pfvf->msix_lfmap)
638                         return -ENOMEM;
639
640                 /* For PF0 (AF) firmware will set msix vector offsets for
641                  * AF, block AF and PF0_INT vectors, so jump to VFs.
642                  */
643                 if (!pf)
644                         goto setup_vfmsix;
645
646                 /* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors.
647                  * These are allocated on driver init and never freed,
648                  * so no need to set 'msix_lfmap' for these.
649                  */
650                 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf));
651                 nvecs = (cfg >> 12) & 0xFF;
652                 cfg &= ~0x7FFULL;
653                 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
654                 rvu_write64(rvu, BLKADDR_RVUM,
655                             RVU_PRIV_PFX_INT_CFG(pf), cfg | offset);
656 setup_vfmsix:
657                 /* Alloc msix bitmap for VFs */
658                 for (vf = 0; vf < numvfs; vf++) {
659                         pfvf =  &rvu->hwvf[hwvf + vf];
660                         /* Get num of MSIX vectors attached to this VF */
661                         cfg = rvu_read64(rvu, BLKADDR_RVUM,
662                                          RVU_PRIV_PFX_MSIX_CFG(pf));
663                         pfvf->msix.max = (cfg & 0xFFF) + 1;
664                         rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1);
665
666                         /* Alloc msix bitmap for this VF */
667                         err = rvu_alloc_bitmap(&pfvf->msix);
668                         if (err)
669                                 return err;
670
671                         pfvf->msix_lfmap =
672                                 devm_kcalloc(rvu->dev, pfvf->msix.max,
673                                              sizeof(u16), GFP_KERNEL);
674                         if (!pfvf->msix_lfmap)
675                                 return -ENOMEM;
676
677                         /* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors.
678                          * These are allocated on driver init and never freed,
679                          * so no need to set 'msix_lfmap' for these.
680                          */
681                         cfg = rvu_read64(rvu, BLKADDR_RVUM,
682                                          RVU_PRIV_HWVFX_INT_CFG(hwvf + vf));
683                         nvecs = (cfg >> 12) & 0xFF;
684                         cfg &= ~0x7FFULL;
685                         offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
686                         rvu_write64(rvu, BLKADDR_RVUM,
687                                     RVU_PRIV_HWVFX_INT_CFG(hwvf + vf),
688                                     cfg | offset);
689                 }
690         }
691
692         /* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence
693          * create an IOMMU mapping for the physical address configured by
694          * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA.
695          */
696         cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
697         max_msix = cfg & 0xFFFFF;
698         if (rvu->fwdata && rvu->fwdata->msixtr_base)
699                 phy_addr = rvu->fwdata->msixtr_base;
700         else
701                 phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
702
703         iova = dma_map_resource(rvu->dev, phy_addr,
704                                 max_msix * PCI_MSIX_ENTRY_SIZE,
705                                 DMA_BIDIRECTIONAL, 0);
706
707         if (dma_mapping_error(rvu->dev, iova))
708                 return -ENOMEM;
709
710         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova);
711         rvu->msix_base_iova = iova;
712         rvu->msixtr_base_phy = phy_addr;
713
714         return 0;
715 }
716
717 static void rvu_reset_msix(struct rvu *rvu)
718 {
719         /* Restore msixtr base register */
720         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE,
721                     rvu->msixtr_base_phy);
722 }
723
724 static void rvu_free_hw_resources(struct rvu *rvu)
725 {
726         struct rvu_hwinfo *hw = rvu->hw;
727         struct rvu_block *block;
728         struct rvu_pfvf  *pfvf;
729         int id, max_msix;
730         u64 cfg;
731
732         rvu_npa_freemem(rvu);
733         rvu_npc_freemem(rvu);
734         rvu_nix_freemem(rvu);
735
736         /* Free block LF bitmaps */
737         for (id = 0; id < BLK_COUNT; id++) {
738                 block = &hw->block[id];
739                 kfree(block->lf.bmap);
740         }
741
742         /* Free MSIX bitmaps */
743         for (id = 0; id < hw->total_pfs; id++) {
744                 pfvf = &rvu->pf[id];
745                 kfree(pfvf->msix.bmap);
746         }
747
748         for (id = 0; id < hw->total_vfs; id++) {
749                 pfvf = &rvu->hwvf[id];
750                 kfree(pfvf->msix.bmap);
751         }
752
753         /* Unmap MSIX vector base IOVA mapping */
754         if (!rvu->msix_base_iova)
755                 return;
756         cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
757         max_msix = cfg & 0xFFFFF;
758         dma_unmap_resource(rvu->dev, rvu->msix_base_iova,
759                            max_msix * PCI_MSIX_ENTRY_SIZE,
760                            DMA_BIDIRECTIONAL, 0);
761
762         rvu_reset_msix(rvu);
763         mutex_destroy(&rvu->rsrc_lock);
764 }
765
766 static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
767 {
768         struct rvu_hwinfo *hw = rvu->hw;
769         int pf, vf, numvfs, hwvf;
770         struct rvu_pfvf *pfvf;
771         u64 *mac;
772
773         for (pf = 0; pf < hw->total_pfs; pf++) {
774                 /* For PF0(AF), Assign MAC address to only VFs (LBKVFs) */
775                 if (!pf)
776                         goto lbkvf;
777
778                 if (!is_pf_cgxmapped(rvu, pf))
779                         continue;
780                 /* Assign MAC address to PF */
781                 pfvf = &rvu->pf[pf];
782                 if (rvu->fwdata && pf < PF_MACNUM_MAX) {
783                         mac = &rvu->fwdata->pf_macs[pf];
784                         if (*mac)
785                                 u64_to_ether_addr(*mac, pfvf->mac_addr);
786                         else
787                                 eth_random_addr(pfvf->mac_addr);
788                 } else {
789                         eth_random_addr(pfvf->mac_addr);
790                 }
791                 ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
792
793 lbkvf:
794                 /* Assign MAC address to VFs*/
795                 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
796                 for (vf = 0; vf < numvfs; vf++, hwvf++) {
797                         pfvf = &rvu->hwvf[hwvf];
798                         if (rvu->fwdata && hwvf < VF_MACNUM_MAX) {
799                                 mac = &rvu->fwdata->vf_macs[hwvf];
800                                 if (*mac)
801                                         u64_to_ether_addr(*mac, pfvf->mac_addr);
802                                 else
803                                         eth_random_addr(pfvf->mac_addr);
804                         } else {
805                                 eth_random_addr(pfvf->mac_addr);
806                         }
807                         ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
808                 }
809         }
810 }
811
812 static int rvu_fwdata_init(struct rvu *rvu)
813 {
814         u64 fwdbase;
815         int err;
816
817         /* Get firmware data base address */
818         err = cgx_get_fwdata_base(&fwdbase);
819         if (err)
820                 goto fail;
821         rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata));
822         if (!rvu->fwdata)
823                 goto fail;
824         if (!is_rvu_fwdata_valid(rvu)) {
825                 dev_err(rvu->dev,
826                         "Mismatch in 'fwdata' struct btw kernel and firmware\n");
827                 iounmap(rvu->fwdata);
828                 rvu->fwdata = NULL;
829                 return -EINVAL;
830         }
831         return 0;
832 fail:
833         dev_info(rvu->dev, "Unable to fetch 'fwdata' from firmware\n");
834         return -EIO;
835 }
836
837 static void rvu_fwdata_exit(struct rvu *rvu)
838 {
839         if (rvu->fwdata)
840                 iounmap(rvu->fwdata);
841 }
842
843 static int rvu_setup_nix_hw_resource(struct rvu *rvu, int blkaddr)
844 {
845         struct rvu_hwinfo *hw = rvu->hw;
846         struct rvu_block *block;
847         int blkid;
848         u64 cfg;
849
850         /* Init NIX LF's bitmap */
851         block = &hw->block[blkaddr];
852         if (!block->implemented)
853                 return 0;
854         blkid = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
855         cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
856         block->lf.max = cfg & 0xFFF;
857         block->addr = blkaddr;
858         block->type = BLKTYPE_NIX;
859         block->lfshift = 8;
860         block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG;
861         block->pf_lfcnt_reg = RVU_PRIV_PFX_NIXX_CFG(blkid);
862         block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIXX_CFG(blkid);
863         block->lfcfg_reg = NIX_PRIV_LFX_CFG;
864         block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
865         block->lfreset_reg = NIX_AF_LF_RST;
866         block->rvu = rvu;
867         sprintf(block->name, "NIX%d", blkid);
868         rvu->nix_blkaddr[blkid] = blkaddr;
869         return rvu_alloc_bitmap(&block->lf);
870 }
871
872 static int rvu_setup_cpt_hw_resource(struct rvu *rvu, int blkaddr)
873 {
874         struct rvu_hwinfo *hw = rvu->hw;
875         struct rvu_block *block;
876         int blkid;
877         u64 cfg;
878
879         /* Init CPT LF's bitmap */
880         block = &hw->block[blkaddr];
881         if (!block->implemented)
882                 return 0;
883         blkid = (blkaddr == BLKADDR_CPT0) ? 0 : 1;
884         cfg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0);
885         block->lf.max = cfg & 0xFF;
886         block->addr = blkaddr;
887         block->type = BLKTYPE_CPT;
888         block->multislot = true;
889         block->lfshift = 3;
890         block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG;
891         block->pf_lfcnt_reg = RVU_PRIV_PFX_CPTX_CFG(blkid);
892         block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPTX_CFG(blkid);
893         block->lfcfg_reg = CPT_PRIV_LFX_CFG;
894         block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
895         block->lfreset_reg = CPT_AF_LF_RST;
896         block->rvu = rvu;
897         sprintf(block->name, "CPT%d", blkid);
898         return rvu_alloc_bitmap(&block->lf);
899 }
900
901 static void rvu_get_lbk_bufsize(struct rvu *rvu)
902 {
903         struct pci_dev *pdev = NULL;
904         void __iomem *base;
905         u64 lbk_const;
906
907         pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
908                               PCI_DEVID_OCTEONTX2_LBK, pdev);
909         if (!pdev)
910                 return;
911
912         base = pci_ioremap_bar(pdev, 0);
913         if (!base)
914                 goto err_put;
915
916         lbk_const = readq(base + LBK_CONST);
917
918         /* cache fifo size */
919         rvu->hw->lbk_bufsize = FIELD_GET(LBK_CONST_BUF_SIZE, lbk_const);
920
921         iounmap(base);
922 err_put:
923         pci_dev_put(pdev);
924 }
925
926 static int rvu_setup_hw_resources(struct rvu *rvu)
927 {
928         struct rvu_hwinfo *hw = rvu->hw;
929         struct rvu_block *block;
930         int blkid, err;
931         u64 cfg;
932
933         /* Get HW supported max RVU PF & VF count */
934         cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
935         hw->total_pfs = (cfg >> 32) & 0xFF;
936         hw->total_vfs = (cfg >> 20) & 0xFFF;
937         hw->max_vfs_per_pf = (cfg >> 40) & 0xFF;
938
939         /* Init NPA LF's bitmap */
940         block = &hw->block[BLKADDR_NPA];
941         if (!block->implemented)
942                 goto nix;
943         cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST);
944         block->lf.max = (cfg >> 16) & 0xFFF;
945         block->addr = BLKADDR_NPA;
946         block->type = BLKTYPE_NPA;
947         block->lfshift = 8;
948         block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG;
949         block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG;
950         block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG;
951         block->lfcfg_reg = NPA_PRIV_LFX_CFG;
952         block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG;
953         block->lfreset_reg = NPA_AF_LF_RST;
954         block->rvu = rvu;
955         sprintf(block->name, "NPA");
956         err = rvu_alloc_bitmap(&block->lf);
957         if (err) {
958                 dev_err(rvu->dev,
959                         "%s: Failed to allocate NPA LF bitmap\n", __func__);
960                 return err;
961         }
962
963 nix:
964         err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX0);
965         if (err) {
966                 dev_err(rvu->dev,
967                         "%s: Failed to allocate NIX0 LFs bitmap\n", __func__);
968                 return err;
969         }
970
971         err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX1);
972         if (err) {
973                 dev_err(rvu->dev,
974                         "%s: Failed to allocate NIX1 LFs bitmap\n", __func__);
975                 return err;
976         }
977
978         /* Init SSO group's bitmap */
979         block = &hw->block[BLKADDR_SSO];
980         if (!block->implemented)
981                 goto ssow;
982         cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST);
983         block->lf.max = cfg & 0xFFFF;
984         block->addr = BLKADDR_SSO;
985         block->type = BLKTYPE_SSO;
986         block->multislot = true;
987         block->lfshift = 3;
988         block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG;
989         block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG;
990         block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG;
991         block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG;
992         block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG;
993         block->lfreset_reg = SSO_AF_LF_HWGRP_RST;
994         block->rvu = rvu;
995         sprintf(block->name, "SSO GROUP");
996         err = rvu_alloc_bitmap(&block->lf);
997         if (err) {
998                 dev_err(rvu->dev,
999                         "%s: Failed to allocate SSO LF bitmap\n", __func__);
1000                 return err;
1001         }
1002
1003 ssow:
1004         /* Init SSO workslot's bitmap */
1005         block = &hw->block[BLKADDR_SSOW];
1006         if (!block->implemented)
1007                 goto tim;
1008         block->lf.max = (cfg >> 56) & 0xFF;
1009         block->addr = BLKADDR_SSOW;
1010         block->type = BLKTYPE_SSOW;
1011         block->multislot = true;
1012         block->lfshift = 3;
1013         block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG;
1014         block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG;
1015         block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG;
1016         block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG;
1017         block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG;
1018         block->lfreset_reg = SSOW_AF_LF_HWS_RST;
1019         block->rvu = rvu;
1020         sprintf(block->name, "SSOWS");
1021         err = rvu_alloc_bitmap(&block->lf);
1022         if (err) {
1023                 dev_err(rvu->dev,
1024                         "%s: Failed to allocate SSOW LF bitmap\n", __func__);
1025                 return err;
1026         }
1027
1028 tim:
1029         /* Init TIM LF's bitmap */
1030         block = &hw->block[BLKADDR_TIM];
1031         if (!block->implemented)
1032                 goto cpt;
1033         cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST);
1034         block->lf.max = cfg & 0xFFFF;
1035         block->addr = BLKADDR_TIM;
1036         block->type = BLKTYPE_TIM;
1037         block->multislot = true;
1038         block->lfshift = 3;
1039         block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG;
1040         block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG;
1041         block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG;
1042         block->lfcfg_reg = TIM_PRIV_LFX_CFG;
1043         block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG;
1044         block->lfreset_reg = TIM_AF_LF_RST;
1045         block->rvu = rvu;
1046         sprintf(block->name, "TIM");
1047         err = rvu_alloc_bitmap(&block->lf);
1048         if (err) {
1049                 dev_err(rvu->dev,
1050                         "%s: Failed to allocate TIM LF bitmap\n", __func__);
1051                 return err;
1052         }
1053
1054 cpt:
1055         err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT0);
1056         if (err) {
1057                 dev_err(rvu->dev,
1058                         "%s: Failed to allocate CPT0 LF bitmap\n", __func__);
1059                 return err;
1060         }
1061         err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT1);
1062         if (err) {
1063                 dev_err(rvu->dev,
1064                         "%s: Failed to allocate CPT1 LF bitmap\n", __func__);
1065                 return err;
1066         }
1067
1068         /* Allocate memory for PFVF data */
1069         rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs,
1070                                sizeof(struct rvu_pfvf), GFP_KERNEL);
1071         if (!rvu->pf) {
1072                 dev_err(rvu->dev,
1073                         "%s: Failed to allocate memory for PF's rvu_pfvf struct\n", __func__);
1074                 return -ENOMEM;
1075         }
1076
1077         rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs,
1078                                  sizeof(struct rvu_pfvf), GFP_KERNEL);
1079         if (!rvu->hwvf) {
1080                 dev_err(rvu->dev,
1081                         "%s: Failed to allocate memory for VF's rvu_pfvf struct\n", __func__);
1082                 return -ENOMEM;
1083         }
1084
1085         mutex_init(&rvu->rsrc_lock);
1086
1087         rvu_fwdata_init(rvu);
1088
1089         err = rvu_setup_msix_resources(rvu);
1090         if (err) {
1091                 dev_err(rvu->dev,
1092                         "%s: Failed to setup MSIX resources\n", __func__);
1093                 return err;
1094         }
1095
1096         for (blkid = 0; blkid < BLK_COUNT; blkid++) {
1097                 block = &hw->block[blkid];
1098                 if (!block->lf.bmap)
1099                         continue;
1100
1101                 /* Allocate memory for block LF/slot to pcifunc mapping info */
1102                 block->fn_map = devm_kcalloc(rvu->dev, block->lf.max,
1103                                              sizeof(u16), GFP_KERNEL);
1104                 if (!block->fn_map) {
1105                         err = -ENOMEM;
1106                         goto msix_err;
1107                 }
1108
1109                 /* Scan all blocks to check if low level firmware has
1110                  * already provisioned any of the resources to a PF/VF.
1111                  */
1112                 rvu_scan_block(rvu, block);
1113         }
1114
1115         err = rvu_set_channels_base(rvu);
1116         if (err)
1117                 goto msix_err;
1118
1119         err = rvu_npc_init(rvu);
1120         if (err) {
1121                 dev_err(rvu->dev, "%s: Failed to initialize npc\n", __func__);
1122                 goto npc_err;
1123         }
1124
1125         err = rvu_cgx_init(rvu);
1126         if (err) {
1127                 dev_err(rvu->dev, "%s: Failed to initialize cgx\n", __func__);
1128                 goto cgx_err;
1129         }
1130
1131         /* Assign MACs for CGX mapped functions */
1132         rvu_setup_pfvf_macaddress(rvu);
1133
1134         err = rvu_npa_init(rvu);
1135         if (err) {
1136                 dev_err(rvu->dev, "%s: Failed to initialize npa\n", __func__);
1137                 goto npa_err;
1138         }
1139
1140         rvu_get_lbk_bufsize(rvu);
1141
1142         err = rvu_nix_init(rvu);
1143         if (err) {
1144                 dev_err(rvu->dev, "%s: Failed to initialize nix\n", __func__);
1145                 goto nix_err;
1146         }
1147
1148         err = rvu_sdp_init(rvu);
1149         if (err) {
1150                 dev_err(rvu->dev, "%s: Failed to initialize sdp\n", __func__);
1151                 goto nix_err;
1152         }
1153
1154         rvu_program_channels(rvu);
1155
1156         return 0;
1157
1158 nix_err:
1159         rvu_nix_freemem(rvu);
1160 npa_err:
1161         rvu_npa_freemem(rvu);
1162 cgx_err:
1163         rvu_cgx_exit(rvu);
1164 npc_err:
1165         rvu_npc_freemem(rvu);
1166         rvu_fwdata_exit(rvu);
1167 msix_err:
1168         rvu_reset_msix(rvu);
1169         return err;
1170 }
1171
1172 /* NPA and NIX admin queue APIs */
1173 void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq)
1174 {
1175         if (!aq)
1176                 return;
1177
1178         qmem_free(rvu->dev, aq->inst);
1179         qmem_free(rvu->dev, aq->res);
1180         devm_kfree(rvu->dev, aq);
1181 }
1182
1183 int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
1184                  int qsize, int inst_size, int res_size)
1185 {
1186         struct admin_queue *aq;
1187         int err;
1188
1189         *ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL);
1190         if (!*ad_queue)
1191                 return -ENOMEM;
1192         aq = *ad_queue;
1193
1194         /* Alloc memory for instructions i.e AQ */
1195         err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size);
1196         if (err) {
1197                 devm_kfree(rvu->dev, aq);
1198                 return err;
1199         }
1200
1201         /* Alloc memory for results */
1202         err = qmem_alloc(rvu->dev, &aq->res, qsize, res_size);
1203         if (err) {
1204                 rvu_aq_free(rvu, aq);
1205                 return err;
1206         }
1207
1208         spin_lock_init(&aq->lock);
1209         return 0;
1210 }
1211
1212 int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
1213                            struct ready_msg_rsp *rsp)
1214 {
1215         if (rvu->fwdata) {
1216                 rsp->rclk_freq = rvu->fwdata->rclk;
1217                 rsp->sclk_freq = rvu->fwdata->sclk;
1218         }
1219         return 0;
1220 }
1221
1222 /* Get current count of a RVU block's LF/slots
1223  * provisioned to a given RVU func.
1224  */
1225 u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr)
1226 {
1227         switch (blkaddr) {
1228         case BLKADDR_NPA:
1229                 return pfvf->npalf ? 1 : 0;
1230         case BLKADDR_NIX0:
1231         case BLKADDR_NIX1:
1232                 return pfvf->nixlf ? 1 : 0;
1233         case BLKADDR_SSO:
1234                 return pfvf->sso;
1235         case BLKADDR_SSOW:
1236                 return pfvf->ssow;
1237         case BLKADDR_TIM:
1238                 return pfvf->timlfs;
1239         case BLKADDR_CPT0:
1240                 return pfvf->cptlfs;
1241         case BLKADDR_CPT1:
1242                 return pfvf->cpt1_lfs;
1243         }
1244         return 0;
1245 }
1246
1247 /* Return true if LFs of block type are attached to pcifunc */
1248 static bool is_blktype_attached(struct rvu_pfvf *pfvf, int blktype)
1249 {
1250         switch (blktype) {
1251         case BLKTYPE_NPA:
1252                 return pfvf->npalf ? 1 : 0;
1253         case BLKTYPE_NIX:
1254                 return pfvf->nixlf ? 1 : 0;
1255         case BLKTYPE_SSO:
1256                 return !!pfvf->sso;
1257         case BLKTYPE_SSOW:
1258                 return !!pfvf->ssow;
1259         case BLKTYPE_TIM:
1260                 return !!pfvf->timlfs;
1261         case BLKTYPE_CPT:
1262                 return pfvf->cptlfs || pfvf->cpt1_lfs;
1263         }
1264
1265         return false;
1266 }
1267
1268 bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
1269 {
1270         struct rvu_pfvf *pfvf;
1271
1272         if (!is_pf_func_valid(rvu, pcifunc))
1273                 return false;
1274
1275         pfvf = rvu_get_pfvf(rvu, pcifunc);
1276
1277         /* Check if this PFFUNC has a LF of type blktype attached */
1278         if (!is_blktype_attached(pfvf, blktype))
1279                 return false;
1280
1281         return true;
1282 }
1283
1284 static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
1285                            int pcifunc, int slot)
1286 {
1287         u64 val;
1288
1289         val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13);
1290         rvu_write64(rvu, block->addr, block->lookup_reg, val);
1291         /* Wait for the lookup to finish */
1292         /* TODO: put some timeout here */
1293         while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13))
1294                 ;
1295
1296         val = rvu_read64(rvu, block->addr, block->lookup_reg);
1297
1298         /* Check LF valid bit */
1299         if (!(val & (1ULL << 12)))
1300                 return -1;
1301
1302         return (val & 0xFFF);
1303 }
1304
1305 int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc,
1306                               u16 global_slot, u16 *slot_in_block)
1307 {
1308         struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1309         int numlfs, total_lfs = 0, nr_blocks = 0;
1310         int i, num_blkaddr[BLK_COUNT] = { 0 };
1311         struct rvu_block *block;
1312         int blkaddr;
1313         u16 start_slot;
1314
1315         if (!is_blktype_attached(pfvf, blktype))
1316                 return -ENODEV;
1317
1318         /* Get all the block addresses from which LFs are attached to
1319          * the given pcifunc in num_blkaddr[].
1320          */
1321         for (blkaddr = BLKADDR_RVUM; blkaddr < BLK_COUNT; blkaddr++) {
1322                 block = &rvu->hw->block[blkaddr];
1323                 if (block->type != blktype)
1324                         continue;
1325                 if (!is_block_implemented(rvu->hw, blkaddr))
1326                         continue;
1327
1328                 numlfs = rvu_get_rsrc_mapcount(pfvf, blkaddr);
1329                 if (numlfs) {
1330                         total_lfs += numlfs;
1331                         num_blkaddr[nr_blocks] = blkaddr;
1332                         nr_blocks++;
1333                 }
1334         }
1335
1336         if (global_slot >= total_lfs)
1337                 return -ENODEV;
1338
1339         /* Based on the given global slot number retrieve the
1340          * correct block address out of all attached block
1341          * addresses and slot number in that block.
1342          */
1343         total_lfs = 0;
1344         blkaddr = -ENODEV;
1345         for (i = 0; i < nr_blocks; i++) {
1346                 numlfs = rvu_get_rsrc_mapcount(pfvf, num_blkaddr[i]);
1347                 total_lfs += numlfs;
1348                 if (global_slot < total_lfs) {
1349                         blkaddr = num_blkaddr[i];
1350                         start_slot = total_lfs - numlfs;
1351                         *slot_in_block = global_slot - start_slot;
1352                         break;
1353                 }
1354         }
1355
1356         return blkaddr;
1357 }
1358
1359 static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
1360 {
1361         struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1362         struct rvu_hwinfo *hw = rvu->hw;
1363         struct rvu_block *block;
1364         int slot, lf, num_lfs;
1365         int blkaddr;
1366
1367         blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc);
1368         if (blkaddr < 0)
1369                 return;
1370
1371         if (blktype == BLKTYPE_NIX)
1372                 rvu_nix_reset_mac(pfvf, pcifunc);
1373
1374         block = &hw->block[blkaddr];
1375
1376         num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1377         if (!num_lfs)
1378                 return;
1379
1380         for (slot = 0; slot < num_lfs; slot++) {
1381                 lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot);
1382                 if (lf < 0) /* This should never happen */
1383                         continue;
1384
1385                 /* Disable the LF */
1386                 rvu_write64(rvu, blkaddr, block->lfcfg_reg |
1387                             (lf << block->lfshift), 0x00ULL);
1388
1389                 /* Update SW maintained mapping info as well */
1390                 rvu_update_rsrc_map(rvu, pfvf, block,
1391                                     pcifunc, lf, false);
1392
1393                 /* Free the resource */
1394                 rvu_free_rsrc(&block->lf, lf);
1395
1396                 /* Clear MSIX vector offset for this LF */
1397                 rvu_clear_msix_offset(rvu, pfvf, block, lf);
1398         }
1399 }
1400
1401 static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
1402                             u16 pcifunc)
1403 {
1404         struct rvu_hwinfo *hw = rvu->hw;
1405         bool detach_all = true;
1406         struct rvu_block *block;
1407         int blkid;
1408
1409         mutex_lock(&rvu->rsrc_lock);
1410
1411         /* Check for partial resource detach */
1412         if (detach && detach->partial)
1413                 detach_all = false;
1414
1415         /* Check for RVU block's LFs attached to this func,
1416          * if so, detach them.
1417          */
1418         for (blkid = 0; blkid < BLK_COUNT; blkid++) {
1419                 block = &hw->block[blkid];
1420                 if (!block->lf.bmap)
1421                         continue;
1422                 if (!detach_all && detach) {
1423                         if (blkid == BLKADDR_NPA && !detach->npalf)
1424                                 continue;
1425                         else if ((blkid == BLKADDR_NIX0) && !detach->nixlf)
1426                                 continue;
1427                         else if ((blkid == BLKADDR_NIX1) && !detach->nixlf)
1428                                 continue;
1429                         else if ((blkid == BLKADDR_SSO) && !detach->sso)
1430                                 continue;
1431                         else if ((blkid == BLKADDR_SSOW) && !detach->ssow)
1432                                 continue;
1433                         else if ((blkid == BLKADDR_TIM) && !detach->timlfs)
1434                                 continue;
1435                         else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs)
1436                                 continue;
1437                         else if ((blkid == BLKADDR_CPT1) && !detach->cptlfs)
1438                                 continue;
1439                 }
1440                 rvu_detach_block(rvu, pcifunc, block->type);
1441         }
1442
1443         mutex_unlock(&rvu->rsrc_lock);
1444         return 0;
1445 }
1446
1447 int rvu_mbox_handler_detach_resources(struct rvu *rvu,
1448                                       struct rsrc_detach *detach,
1449                                       struct msg_rsp *rsp)
1450 {
1451         return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
1452 }
1453
1454 int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
1455 {
1456         struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1457         int blkaddr = BLKADDR_NIX0, vf;
1458         struct rvu_pfvf *pf;
1459
1460         pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
1461
1462         /* All CGX mapped PFs are set with assigned NIX block during init */
1463         if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
1464                 blkaddr = pf->nix_blkaddr;
1465         } else if (is_afvf(pcifunc)) {
1466                 vf = pcifunc - 1;
1467                 /* Assign NIX based on VF number. All even numbered VFs get
1468                  * NIX0 and odd numbered gets NIX1
1469                  */
1470                 blkaddr = (vf & 1) ? BLKADDR_NIX1 : BLKADDR_NIX0;
1471                 /* NIX1 is not present on all silicons */
1472                 if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
1473                         blkaddr = BLKADDR_NIX0;
1474         }
1475
1476         /* if SDP1 then the blkaddr is NIX1 */
1477         if (is_sdp_pfvf(pcifunc) && pf->sdp_info->node_id == 1)
1478                 blkaddr = BLKADDR_NIX1;
1479
1480         switch (blkaddr) {
1481         case BLKADDR_NIX1:
1482                 pfvf->nix_blkaddr = BLKADDR_NIX1;
1483                 pfvf->nix_rx_intf = NIX_INTFX_RX(1);
1484                 pfvf->nix_tx_intf = NIX_INTFX_TX(1);
1485                 break;
1486         case BLKADDR_NIX0:
1487         default:
1488                 pfvf->nix_blkaddr = BLKADDR_NIX0;
1489                 pfvf->nix_rx_intf = NIX_INTFX_RX(0);
1490                 pfvf->nix_tx_intf = NIX_INTFX_TX(0);
1491                 break;
1492         }
1493
1494         return pfvf->nix_blkaddr;
1495 }
1496
1497 static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype,
1498                                   u16 pcifunc, struct rsrc_attach *attach)
1499 {
1500         int blkaddr;
1501
1502         switch (blktype) {
1503         case BLKTYPE_NIX:
1504                 blkaddr = rvu_get_nix_blkaddr(rvu, pcifunc);
1505                 break;
1506         case BLKTYPE_CPT:
1507                 if (attach->hdr.ver < RVU_MULTI_BLK_VER)
1508                         return rvu_get_blkaddr(rvu, blktype, 0);
1509                 blkaddr = attach->cpt_blkaddr ? attach->cpt_blkaddr :
1510                           BLKADDR_CPT0;
1511                 if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
1512                         return -ENODEV;
1513                 break;
1514         default:
1515                 return rvu_get_blkaddr(rvu, blktype, 0);
1516         }
1517
1518         if (is_block_implemented(rvu->hw, blkaddr))
1519                 return blkaddr;
1520
1521         return -ENODEV;
1522 }
1523
1524 static void rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype,
1525                              int num_lfs, struct rsrc_attach *attach)
1526 {
1527         struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1528         struct rvu_hwinfo *hw = rvu->hw;
1529         struct rvu_block *block;
1530         int slot, lf;
1531         int blkaddr;
1532         u64 cfg;
1533
1534         if (!num_lfs)
1535                 return;
1536
1537         blkaddr = rvu_get_attach_blkaddr(rvu, blktype, pcifunc, attach);
1538         if (blkaddr < 0)
1539                 return;
1540
1541         block = &hw->block[blkaddr];
1542         if (!block->lf.bmap)
1543                 return;
1544
1545         for (slot = 0; slot < num_lfs; slot++) {
1546                 /* Allocate the resource */
1547                 lf = rvu_alloc_rsrc(&block->lf);
1548                 if (lf < 0)
1549                         return;
1550
1551                 cfg = (1ULL << 63) | (pcifunc << 8) | slot;
1552                 rvu_write64(rvu, blkaddr, block->lfcfg_reg |
1553                             (lf << block->lfshift), cfg);
1554                 rvu_update_rsrc_map(rvu, pfvf, block,
1555                                     pcifunc, lf, true);
1556
1557                 /* Set start MSIX vector for this LF within this PF/VF */
1558                 rvu_set_msix_offset(rvu, pfvf, block, lf);
1559         }
1560 }
1561
1562 static int rvu_check_rsrc_availability(struct rvu *rvu,
1563                                        struct rsrc_attach *req, u16 pcifunc)
1564 {
1565         struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1566         int free_lfs, mappedlfs, blkaddr;
1567         struct rvu_hwinfo *hw = rvu->hw;
1568         struct rvu_block *block;
1569
1570         /* Only one NPA LF can be attached */
1571         if (req->npalf && !is_blktype_attached(pfvf, BLKTYPE_NPA)) {
1572                 block = &hw->block[BLKADDR_NPA];
1573                 free_lfs = rvu_rsrc_free_count(&block->lf);
1574                 if (!free_lfs)
1575                         goto fail;
1576         } else if (req->npalf) {
1577                 dev_err(&rvu->pdev->dev,
1578                         "Func 0x%x: Invalid req, already has NPA\n",
1579                          pcifunc);
1580                 return -EINVAL;
1581         }
1582
1583         /* Only one NIX LF can be attached */
1584         if (req->nixlf && !is_blktype_attached(pfvf, BLKTYPE_NIX)) {
1585                 blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_NIX,
1586                                                  pcifunc, req);
1587                 if (blkaddr < 0)
1588                         return blkaddr;
1589                 block = &hw->block[blkaddr];
1590                 free_lfs = rvu_rsrc_free_count(&block->lf);
1591                 if (!free_lfs)
1592                         goto fail;
1593         } else if (req->nixlf) {
1594                 dev_err(&rvu->pdev->dev,
1595                         "Func 0x%x: Invalid req, already has NIX\n",
1596                         pcifunc);
1597                 return -EINVAL;
1598         }
1599
1600         if (req->sso) {
1601                 block = &hw->block[BLKADDR_SSO];
1602                 /* Is request within limits ? */
1603                 if (req->sso > block->lf.max) {
1604                         dev_err(&rvu->pdev->dev,
1605                                 "Func 0x%x: Invalid SSO req, %d > max %d\n",
1606                                  pcifunc, req->sso, block->lf.max);
1607                         return -EINVAL;
1608                 }
1609                 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1610                 free_lfs = rvu_rsrc_free_count(&block->lf);
1611                 /* Check if additional resources are available */
1612                 if (req->sso > mappedlfs &&
1613                     ((req->sso - mappedlfs) > free_lfs))
1614                         goto fail;
1615         }
1616
1617         if (req->ssow) {
1618                 block = &hw->block[BLKADDR_SSOW];
1619                 if (req->ssow > block->lf.max) {
1620                         dev_err(&rvu->pdev->dev,
1621                                 "Func 0x%x: Invalid SSOW req, %d > max %d\n",
1622                                  pcifunc, req->sso, block->lf.max);
1623                         return -EINVAL;
1624                 }
1625                 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1626                 free_lfs = rvu_rsrc_free_count(&block->lf);
1627                 if (req->ssow > mappedlfs &&
1628                     ((req->ssow - mappedlfs) > free_lfs))
1629                         goto fail;
1630         }
1631
1632         if (req->timlfs) {
1633                 block = &hw->block[BLKADDR_TIM];
1634                 if (req->timlfs > block->lf.max) {
1635                         dev_err(&rvu->pdev->dev,
1636                                 "Func 0x%x: Invalid TIMLF req, %d > max %d\n",
1637                                  pcifunc, req->timlfs, block->lf.max);
1638                         return -EINVAL;
1639                 }
1640                 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1641                 free_lfs = rvu_rsrc_free_count(&block->lf);
1642                 if (req->timlfs > mappedlfs &&
1643                     ((req->timlfs - mappedlfs) > free_lfs))
1644                         goto fail;
1645         }
1646
1647         if (req->cptlfs) {
1648                 blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_CPT,
1649                                                  pcifunc, req);
1650                 if (blkaddr < 0)
1651                         return blkaddr;
1652                 block = &hw->block[blkaddr];
1653                 if (req->cptlfs > block->lf.max) {
1654                         dev_err(&rvu->pdev->dev,
1655                                 "Func 0x%x: Invalid CPTLF req, %d > max %d\n",
1656                                  pcifunc, req->cptlfs, block->lf.max);
1657                         return -EINVAL;
1658                 }
1659                 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1660                 free_lfs = rvu_rsrc_free_count(&block->lf);
1661                 if (req->cptlfs > mappedlfs &&
1662                     ((req->cptlfs - mappedlfs) > free_lfs))
1663                         goto fail;
1664         }
1665
1666         return 0;
1667
1668 fail:
1669         dev_info(rvu->dev, "Request for %s failed\n", block->name);
1670         return -ENOSPC;
1671 }
1672
1673 static bool rvu_attach_from_same_block(struct rvu *rvu, int blktype,
1674                                        struct rsrc_attach *attach)
1675 {
1676         int blkaddr, num_lfs;
1677
1678         blkaddr = rvu_get_attach_blkaddr(rvu, blktype,
1679                                          attach->hdr.pcifunc, attach);
1680         if (blkaddr < 0)
1681                 return false;
1682
1683         num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, attach->hdr.pcifunc),
1684                                         blkaddr);
1685         /* Requester already has LFs from given block ? */
1686         return !!num_lfs;
1687 }
1688
1689 int rvu_mbox_handler_attach_resources(struct rvu *rvu,
1690                                       struct rsrc_attach *attach,
1691                                       struct msg_rsp *rsp)
1692 {
1693         u16 pcifunc = attach->hdr.pcifunc;
1694         int err;
1695
1696         /* If first request, detach all existing attached resources */
1697         if (!attach->modify)
1698                 rvu_detach_rsrcs(rvu, NULL, pcifunc);
1699
1700         mutex_lock(&rvu->rsrc_lock);
1701
1702         /* Check if the request can be accommodated */
1703         err = rvu_check_rsrc_availability(rvu, attach, pcifunc);
1704         if (err)
1705                 goto exit;
1706
1707         /* Now attach the requested resources */
1708         if (attach->npalf)
1709                 rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1, attach);
1710
1711         if (attach->nixlf)
1712                 rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1, attach);
1713
1714         if (attach->sso) {
1715                 /* RVU func doesn't know which exact LF or slot is attached
1716                  * to it, it always sees as slot 0,1,2. So for a 'modify'
1717                  * request, simply detach all existing attached LFs/slots
1718                  * and attach a fresh.
1719                  */
1720                 if (attach->modify)
1721                         rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
1722                 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO,
1723                                  attach->sso, attach);
1724         }
1725
1726         if (attach->ssow) {
1727                 if (attach->modify)
1728                         rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
1729                 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW,
1730                                  attach->ssow, attach);
1731         }
1732
1733         if (attach->timlfs) {
1734                 if (attach->modify)
1735                         rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
1736                 rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM,
1737                                  attach->timlfs, attach);
1738         }
1739
1740         if (attach->cptlfs) {
1741                 if (attach->modify &&
1742                     rvu_attach_from_same_block(rvu, BLKTYPE_CPT, attach))
1743                         rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT);
1744                 rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT,
1745                                  attach->cptlfs, attach);
1746         }
1747
1748 exit:
1749         mutex_unlock(&rvu->rsrc_lock);
1750         return err;
1751 }
1752
1753 static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1754                                int blkaddr, int lf)
1755 {
1756         u16 vec;
1757
1758         if (lf < 0)
1759                 return MSIX_VECTOR_INVALID;
1760
1761         for (vec = 0; vec < pfvf->msix.max; vec++) {
1762                 if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf))
1763                         return vec;
1764         }
1765         return MSIX_VECTOR_INVALID;
1766 }
1767
1768 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1769                                 struct rvu_block *block, int lf)
1770 {
1771         u16 nvecs, vec, offset;
1772         u64 cfg;
1773
1774         cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1775                          (lf << block->lfshift));
1776         nvecs = (cfg >> 12) & 0xFF;
1777
1778         /* Check and alloc MSIX vectors, must be contiguous */
1779         if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs))
1780                 return;
1781
1782         offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
1783
1784         /* Config MSIX offset in LF */
1785         rvu_write64(rvu, block->addr, block->msixcfg_reg |
1786                     (lf << block->lfshift), (cfg & ~0x7FFULL) | offset);
1787
1788         /* Update the bitmap as well */
1789         for (vec = 0; vec < nvecs; vec++)
1790                 pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf);
1791 }
1792
1793 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1794                                   struct rvu_block *block, int lf)
1795 {
1796         u16 nvecs, vec, offset;
1797         u64 cfg;
1798
1799         cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1800                          (lf << block->lfshift));
1801         nvecs = (cfg >> 12) & 0xFF;
1802
1803         /* Clear MSIX offset in LF */
1804         rvu_write64(rvu, block->addr, block->msixcfg_reg |
1805                     (lf << block->lfshift), cfg & ~0x7FFULL);
1806
1807         offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf);
1808
1809         /* Update the mapping */
1810         for (vec = 0; vec < nvecs; vec++)
1811                 pfvf->msix_lfmap[offset + vec] = 0;
1812
1813         /* Free the same in MSIX bitmap */
1814         rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
1815 }
1816
1817 int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
1818                                  struct msix_offset_rsp *rsp)
1819 {
1820         struct rvu_hwinfo *hw = rvu->hw;
1821         u16 pcifunc = req->hdr.pcifunc;
1822         struct rvu_pfvf *pfvf;
1823         int lf, slot, blkaddr;
1824
1825         pfvf = rvu_get_pfvf(rvu, pcifunc);
1826         if (!pfvf->msix.bmap)
1827                 return 0;
1828
1829         /* Set MSIX offsets for each block's LFs attached to this PF/VF */
1830         lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0);
1831         rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf);
1832
1833         /* Get BLKADDR from which LFs are attached to pcifunc */
1834         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1835         if (blkaddr < 0) {
1836                 rsp->nix_msixoff = MSIX_VECTOR_INVALID;
1837         } else {
1838                 lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1839                 rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, blkaddr, lf);
1840         }
1841
1842         rsp->sso = pfvf->sso;
1843         for (slot = 0; slot < rsp->sso; slot++) {
1844                 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot);
1845                 rsp->sso_msixoff[slot] =
1846                         rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf);
1847         }
1848
1849         rsp->ssow = pfvf->ssow;
1850         for (slot = 0; slot < rsp->ssow; slot++) {
1851                 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot);
1852                 rsp->ssow_msixoff[slot] =
1853                         rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf);
1854         }
1855
1856         rsp->timlfs = pfvf->timlfs;
1857         for (slot = 0; slot < rsp->timlfs; slot++) {
1858                 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot);
1859                 rsp->timlf_msixoff[slot] =
1860                         rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf);
1861         }
1862
1863         rsp->cptlfs = pfvf->cptlfs;
1864         for (slot = 0; slot < rsp->cptlfs; slot++) {
1865                 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot);
1866                 rsp->cptlf_msixoff[slot] =
1867                         rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf);
1868         }
1869
1870         rsp->cpt1_lfs = pfvf->cpt1_lfs;
1871         for (slot = 0; slot < rsp->cpt1_lfs; slot++) {
1872                 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT1], pcifunc, slot);
1873                 rsp->cpt1_lf_msixoff[slot] =
1874                         rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT1, lf);
1875         }
1876
1877         return 0;
1878 }
1879
1880 int rvu_mbox_handler_free_rsrc_cnt(struct rvu *rvu, struct msg_req *req,
1881                                    struct free_rsrcs_rsp *rsp)
1882 {
1883         struct rvu_hwinfo *hw = rvu->hw;
1884         struct rvu_block *block;
1885         struct nix_txsch *txsch;
1886         struct nix_hw *nix_hw;
1887
1888         mutex_lock(&rvu->rsrc_lock);
1889
1890         block = &hw->block[BLKADDR_NPA];
1891         rsp->npa = rvu_rsrc_free_count(&block->lf);
1892
1893         block = &hw->block[BLKADDR_NIX0];
1894         rsp->nix = rvu_rsrc_free_count(&block->lf);
1895
1896         block = &hw->block[BLKADDR_NIX1];
1897         rsp->nix1 = rvu_rsrc_free_count(&block->lf);
1898
1899         block = &hw->block[BLKADDR_SSO];
1900         rsp->sso = rvu_rsrc_free_count(&block->lf);
1901
1902         block = &hw->block[BLKADDR_SSOW];
1903         rsp->ssow = rvu_rsrc_free_count(&block->lf);
1904
1905         block = &hw->block[BLKADDR_TIM];
1906         rsp->tim = rvu_rsrc_free_count(&block->lf);
1907
1908         block = &hw->block[BLKADDR_CPT0];
1909         rsp->cpt = rvu_rsrc_free_count(&block->lf);
1910
1911         block = &hw->block[BLKADDR_CPT1];
1912         rsp->cpt1 = rvu_rsrc_free_count(&block->lf);
1913
1914         if (rvu->hw->cap.nix_fixed_txschq_mapping) {
1915                 rsp->schq[NIX_TXSCH_LVL_SMQ] = 1;
1916                 rsp->schq[NIX_TXSCH_LVL_TL4] = 1;
1917                 rsp->schq[NIX_TXSCH_LVL_TL3] = 1;
1918                 rsp->schq[NIX_TXSCH_LVL_TL2] = 1;
1919                 /* NIX1 */
1920                 if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
1921                         goto out;
1922                 rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] = 1;
1923                 rsp->schq_nix1[NIX_TXSCH_LVL_TL4] = 1;
1924                 rsp->schq_nix1[NIX_TXSCH_LVL_TL3] = 1;
1925                 rsp->schq_nix1[NIX_TXSCH_LVL_TL2] = 1;
1926         } else {
1927                 nix_hw = get_nix_hw(hw, BLKADDR_NIX0);
1928                 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1929                 rsp->schq[NIX_TXSCH_LVL_SMQ] =
1930                                 rvu_rsrc_free_count(&txsch->schq);
1931
1932                 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4];
1933                 rsp->schq[NIX_TXSCH_LVL_TL4] =
1934                                 rvu_rsrc_free_count(&txsch->schq);
1935
1936                 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3];
1937                 rsp->schq[NIX_TXSCH_LVL_TL3] =
1938                                 rvu_rsrc_free_count(&txsch->schq);
1939
1940                 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
1941                 rsp->schq[NIX_TXSCH_LVL_TL2] =
1942                                 rvu_rsrc_free_count(&txsch->schq);
1943
1944                 if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
1945                         goto out;
1946
1947                 nix_hw = get_nix_hw(hw, BLKADDR_NIX1);
1948                 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1949                 rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] =
1950                                 rvu_rsrc_free_count(&txsch->schq);
1951
1952                 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4];
1953                 rsp->schq_nix1[NIX_TXSCH_LVL_TL4] =
1954                                 rvu_rsrc_free_count(&txsch->schq);
1955
1956                 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3];
1957                 rsp->schq_nix1[NIX_TXSCH_LVL_TL3] =
1958                                 rvu_rsrc_free_count(&txsch->schq);
1959
1960                 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
1961                 rsp->schq_nix1[NIX_TXSCH_LVL_TL2] =
1962                                 rvu_rsrc_free_count(&txsch->schq);
1963         }
1964
1965         rsp->schq_nix1[NIX_TXSCH_LVL_TL1] = 1;
1966 out:
1967         rsp->schq[NIX_TXSCH_LVL_TL1] = 1;
1968         mutex_unlock(&rvu->rsrc_lock);
1969
1970         return 0;
1971 }
1972
1973 int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
1974                             struct msg_rsp *rsp)
1975 {
1976         u16 pcifunc = req->hdr.pcifunc;
1977         u16 vf, numvfs;
1978         u64 cfg;
1979
1980         vf = pcifunc & RVU_PFVF_FUNC_MASK;
1981         cfg = rvu_read64(rvu, BLKADDR_RVUM,
1982                          RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc)));
1983         numvfs = (cfg >> 12) & 0xFF;
1984
1985         if (vf && vf <= numvfs)
1986                 __rvu_flr_handler(rvu, pcifunc);
1987         else
1988                 return RVU_INVALID_VF_ID;
1989
1990         return 0;
1991 }
1992
1993 int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
1994                                 struct get_hw_cap_rsp *rsp)
1995 {
1996         struct rvu_hwinfo *hw = rvu->hw;
1997
1998         rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping;
1999         rsp->nix_shaping = hw->cap.nix_shaping;
2000         rsp->npc_hash_extract = hw->cap.npc_hash_extract;
2001
2002         return 0;
2003 }
2004
2005 int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req,
2006                                  struct msg_rsp *rsp)
2007 {
2008         struct rvu_hwinfo *hw = rvu->hw;
2009         u16 pcifunc = req->hdr.pcifunc;
2010         struct rvu_pfvf *pfvf;
2011         int blkaddr, nixlf;
2012         u16 target;
2013
2014         /* Only PF can add VF permissions */
2015         if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_afvf(pcifunc))
2016                 return -EOPNOTSUPP;
2017
2018         target = (pcifunc & ~RVU_PFVF_FUNC_MASK) | (req->vf + 1);
2019         pfvf = rvu_get_pfvf(rvu, target);
2020
2021         if (req->flags & RESET_VF_PERM) {
2022                 pfvf->flags &= RVU_CLEAR_VF_PERM;
2023         } else if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) ^
2024                  (req->flags & VF_TRUSTED)) {
2025                 change_bit(PF_SET_VF_TRUSTED, &pfvf->flags);
2026                 /* disable multicast and promisc entries */
2027                 if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags)) {
2028                         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, target);
2029                         if (blkaddr < 0)
2030                                 return 0;
2031                         nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
2032                                            target, 0);
2033                         if (nixlf < 0)
2034                                 return 0;
2035                         npc_enadis_default_mce_entry(rvu, target, nixlf,
2036                                                      NIXLF_ALLMULTI_ENTRY,
2037                                                      false);
2038                         npc_enadis_default_mce_entry(rvu, target, nixlf,
2039                                                      NIXLF_PROMISC_ENTRY,
2040                                                      false);
2041                 }
2042         }
2043
2044         return 0;
2045 }
2046
2047 static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
2048                                 struct mbox_msghdr *req)
2049 {
2050         struct rvu *rvu = pci_get_drvdata(mbox->pdev);
2051
2052         /* Check if valid, if not reply with a invalid msg */
2053         if (req->sig != OTX2_MBOX_REQ_SIG)
2054                 goto bad_message;
2055
2056         switch (req->id) {
2057 #define M(_name, _id, _fn_name, _req_type, _rsp_type)                   \
2058         case _id: {                                                     \
2059                 struct _rsp_type *rsp;                                  \
2060                 int err;                                                \
2061                                                                         \
2062                 rsp = (struct _rsp_type *)otx2_mbox_alloc_msg(          \
2063                         mbox, devid,                                    \
2064                         sizeof(struct _rsp_type));                      \
2065                 /* some handlers should complete even if reply */       \
2066                 /* could not be allocated */                            \
2067                 if (!rsp &&                                             \
2068                     _id != MBOX_MSG_DETACH_RESOURCES &&                 \
2069                     _id != MBOX_MSG_NIX_TXSCH_FREE &&                   \
2070                     _id != MBOX_MSG_VF_FLR)                             \
2071                         return -ENOMEM;                                 \
2072                 if (rsp) {                                              \
2073                         rsp->hdr.id = _id;                              \
2074                         rsp->hdr.sig = OTX2_MBOX_RSP_SIG;               \
2075                         rsp->hdr.pcifunc = req->pcifunc;                \
2076                         rsp->hdr.rc = 0;                                \
2077                 }                                                       \
2078                                                                         \
2079                 err = rvu_mbox_handler_ ## _fn_name(rvu,                \
2080                                                     (struct _req_type *)req, \
2081                                                     rsp);               \
2082                 if (rsp && err)                                         \
2083                         rsp->hdr.rc = err;                              \
2084                                                                         \
2085                 trace_otx2_msg_process(mbox->pdev, _id, err);           \
2086                 return rsp ? err : -ENOMEM;                             \
2087         }
2088 MBOX_MESSAGES
2089 #undef M
2090
2091 bad_message:
2092         default:
2093                 otx2_reply_invalid_msg(mbox, devid, req->pcifunc, req->id);
2094                 return -ENODEV;
2095         }
2096 }
2097
2098 static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
2099 {
2100         struct rvu *rvu = mwork->rvu;
2101         int offset, err, id, devid;
2102         struct otx2_mbox_dev *mdev;
2103         struct mbox_hdr *req_hdr;
2104         struct mbox_msghdr *msg;
2105         struct mbox_wq_info *mw;
2106         struct otx2_mbox *mbox;
2107
2108         switch (type) {
2109         case TYPE_AFPF:
2110                 mw = &rvu->afpf_wq_info;
2111                 break;
2112         case TYPE_AFVF:
2113                 mw = &rvu->afvf_wq_info;
2114                 break;
2115         default:
2116                 return;
2117         }
2118
2119         devid = mwork - mw->mbox_wrk;
2120         mbox = &mw->mbox;
2121         mdev = &mbox->dev[devid];
2122
2123         /* Process received mbox messages */
2124         req_hdr = mdev->mbase + mbox->rx_start;
2125         if (mw->mbox_wrk[devid].num_msgs == 0)
2126                 return;
2127
2128         offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
2129
2130         for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) {
2131                 msg = mdev->mbase + offset;
2132
2133                 /* Set which PF/VF sent this message based on mbox IRQ */
2134                 switch (type) {
2135                 case TYPE_AFPF:
2136                         msg->pcifunc &=
2137                                 ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
2138                         msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT);
2139                         break;
2140                 case TYPE_AFVF:
2141                         msg->pcifunc &=
2142                                 ~(RVU_PFVF_FUNC_MASK << RVU_PFVF_FUNC_SHIFT);
2143                         msg->pcifunc |= (devid << RVU_PFVF_FUNC_SHIFT) + 1;
2144                         break;
2145                 }
2146
2147                 err = rvu_process_mbox_msg(mbox, devid, msg);
2148                 if (!err) {
2149                         offset = mbox->rx_start + msg->next_msgoff;
2150                         continue;
2151                 }
2152
2153                 if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
2154                         dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
2155                                  err, otx2_mbox_id2name(msg->id),
2156                                  msg->id, rvu_get_pf(msg->pcifunc),
2157                                  (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2158                 else
2159                         dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
2160                                  err, otx2_mbox_id2name(msg->id),
2161                                  msg->id, devid);
2162         }
2163         mw->mbox_wrk[devid].num_msgs = 0;
2164
2165         /* Send mbox responses to VF/PF */
2166         otx2_mbox_msg_send(mbox, devid);
2167 }
2168
2169 static inline void rvu_afpf_mbox_handler(struct work_struct *work)
2170 {
2171         struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2172
2173         __rvu_mbox_handler(mwork, TYPE_AFPF);
2174 }
2175
2176 static inline void rvu_afvf_mbox_handler(struct work_struct *work)
2177 {
2178         struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2179
2180         __rvu_mbox_handler(mwork, TYPE_AFVF);
2181 }
2182
2183 static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
2184 {
2185         struct rvu *rvu = mwork->rvu;
2186         struct otx2_mbox_dev *mdev;
2187         struct mbox_hdr *rsp_hdr;
2188         struct mbox_msghdr *msg;
2189         struct mbox_wq_info *mw;
2190         struct otx2_mbox *mbox;
2191         int offset, id, devid;
2192
2193         switch (type) {
2194         case TYPE_AFPF:
2195                 mw = &rvu->afpf_wq_info;
2196                 break;
2197         case TYPE_AFVF:
2198                 mw = &rvu->afvf_wq_info;
2199                 break;
2200         default:
2201                 return;
2202         }
2203
2204         devid = mwork - mw->mbox_wrk_up;
2205         mbox = &mw->mbox_up;
2206         mdev = &mbox->dev[devid];
2207
2208         rsp_hdr = mdev->mbase + mbox->rx_start;
2209         if (mw->mbox_wrk_up[devid].up_num_msgs == 0) {
2210                 dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n");
2211                 return;
2212         }
2213
2214         offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
2215
2216         for (id = 0; id < mw->mbox_wrk_up[devid].up_num_msgs; id++) {
2217                 msg = mdev->mbase + offset;
2218
2219                 if (msg->id >= MBOX_MSG_MAX) {
2220                         dev_err(rvu->dev,
2221                                 "Mbox msg with unknown ID 0x%x\n", msg->id);
2222                         goto end;
2223                 }
2224
2225                 if (msg->sig != OTX2_MBOX_RSP_SIG) {
2226                         dev_err(rvu->dev,
2227                                 "Mbox msg with wrong signature %x, ID 0x%x\n",
2228                                 msg->sig, msg->id);
2229                         goto end;
2230                 }
2231
2232                 switch (msg->id) {
2233                 case MBOX_MSG_CGX_LINK_EVENT:
2234                         break;
2235                 default:
2236                         if (msg->rc)
2237                                 dev_err(rvu->dev,
2238                                         "Mbox msg response has err %d, ID 0x%x\n",
2239                                         msg->rc, msg->id);
2240                         break;
2241                 }
2242 end:
2243                 offset = mbox->rx_start + msg->next_msgoff;
2244                 mdev->msgs_acked++;
2245         }
2246         mw->mbox_wrk_up[devid].up_num_msgs = 0;
2247
2248         otx2_mbox_reset(mbox, devid);
2249 }
2250
2251 static inline void rvu_afpf_mbox_up_handler(struct work_struct *work)
2252 {
2253         struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2254
2255         __rvu_mbox_up_handler(mwork, TYPE_AFPF);
2256 }
2257
2258 static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
2259 {
2260         struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2261
2262         __rvu_mbox_up_handler(mwork, TYPE_AFVF);
2263 }
2264
2265 static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
2266                                 int num, int type)
2267 {
2268         struct rvu_hwinfo *hw = rvu->hw;
2269         int region;
2270         u64 bar4;
2271
2272         /* For cn10k platform VF mailbox regions of a PF follows after the
2273          * PF <-> AF mailbox region. Whereas for Octeontx2 it is read from
2274          * RVU_PF_VF_BAR4_ADDR register.
2275          */
2276         if (type == TYPE_AFVF) {
2277                 for (region = 0; region < num; region++) {
2278                         if (hw->cap.per_pf_mbox_regs) {
2279                                 bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2280                                                   RVU_AF_PFX_BAR4_ADDR(0)) +
2281                                                   MBOX_SIZE;
2282                                 bar4 += region * MBOX_SIZE;
2283                         } else {
2284                                 bar4 = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
2285                                 bar4 += region * MBOX_SIZE;
2286                         }
2287                         mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
2288                         if (!mbox_addr[region])
2289                                 goto error;
2290                 }
2291                 return 0;
2292         }
2293
2294         /* For cn10k platform AF <-> PF mailbox region of a PF is read from per
2295          * PF registers. Whereas for Octeontx2 it is read from
2296          * RVU_AF_PF_BAR4_ADDR register.
2297          */
2298         for (region = 0; region < num; region++) {
2299                 if (hw->cap.per_pf_mbox_regs) {
2300                         bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2301                                           RVU_AF_PFX_BAR4_ADDR(region));
2302                 } else {
2303                         bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2304                                           RVU_AF_PF_BAR4_ADDR);
2305                         bar4 += region * MBOX_SIZE;
2306                 }
2307                 mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
2308                 if (!mbox_addr[region])
2309                         goto error;
2310         }
2311         return 0;
2312
2313 error:
2314         while (region--)
2315                 iounmap((void __iomem *)mbox_addr[region]);
2316         return -ENOMEM;
2317 }
2318
2319 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
2320                          int type, int num,
2321                          void (mbox_handler)(struct work_struct *),
2322                          void (mbox_up_handler)(struct work_struct *))
2323 {
2324         int err = -EINVAL, i, dir, dir_up;
2325         void __iomem *reg_base;
2326         struct rvu_work *mwork;
2327         void **mbox_regions;
2328         const char *name;
2329
2330         mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
2331         if (!mbox_regions)
2332                 return -ENOMEM;
2333
2334         switch (type) {
2335         case TYPE_AFPF:
2336                 name = "rvu_afpf_mailbox";
2337                 dir = MBOX_DIR_AFPF;
2338                 dir_up = MBOX_DIR_AFPF_UP;
2339                 reg_base = rvu->afreg_base;
2340                 err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF);
2341                 if (err)
2342                         goto free_regions;
2343                 break;
2344         case TYPE_AFVF:
2345                 name = "rvu_afvf_mailbox";
2346                 dir = MBOX_DIR_PFVF;
2347                 dir_up = MBOX_DIR_PFVF_UP;
2348                 reg_base = rvu->pfreg_base;
2349                 err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF);
2350                 if (err)
2351                         goto free_regions;
2352                 break;
2353         default:
2354                 goto free_regions;
2355         }
2356
2357         mw->mbox_wq = alloc_workqueue(name,
2358                                       WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
2359                                       num);
2360         if (!mw->mbox_wq) {
2361                 err = -ENOMEM;
2362                 goto unmap_regions;
2363         }
2364
2365         mw->mbox_wrk = devm_kcalloc(rvu->dev, num,
2366                                     sizeof(struct rvu_work), GFP_KERNEL);
2367         if (!mw->mbox_wrk) {
2368                 err = -ENOMEM;
2369                 goto exit;
2370         }
2371
2372         mw->mbox_wrk_up = devm_kcalloc(rvu->dev, num,
2373                                        sizeof(struct rvu_work), GFP_KERNEL);
2374         if (!mw->mbox_wrk_up) {
2375                 err = -ENOMEM;
2376                 goto exit;
2377         }
2378
2379         err = otx2_mbox_regions_init(&mw->mbox, mbox_regions, rvu->pdev,
2380                                      reg_base, dir, num);
2381         if (err)
2382                 goto exit;
2383
2384         err = otx2_mbox_regions_init(&mw->mbox_up, mbox_regions, rvu->pdev,
2385                                      reg_base, dir_up, num);
2386         if (err)
2387                 goto exit;
2388
2389         for (i = 0; i < num; i++) {
2390                 mwork = &mw->mbox_wrk[i];
2391                 mwork->rvu = rvu;
2392                 INIT_WORK(&mwork->work, mbox_handler);
2393
2394                 mwork = &mw->mbox_wrk_up[i];
2395                 mwork->rvu = rvu;
2396                 INIT_WORK(&mwork->work, mbox_up_handler);
2397         }
2398         kfree(mbox_regions);
2399         return 0;
2400
2401 exit:
2402         destroy_workqueue(mw->mbox_wq);
2403 unmap_regions:
2404         while (num--)
2405                 iounmap((void __iomem *)mbox_regions[num]);
2406 free_regions:
2407         kfree(mbox_regions);
2408         return err;
2409 }
2410
2411 static void rvu_mbox_destroy(struct mbox_wq_info *mw)
2412 {
2413         struct otx2_mbox *mbox = &mw->mbox;
2414         struct otx2_mbox_dev *mdev;
2415         int devid;
2416
2417         if (mw->mbox_wq) {
2418                 destroy_workqueue(mw->mbox_wq);
2419                 mw->mbox_wq = NULL;
2420         }
2421
2422         for (devid = 0; devid < mbox->ndevs; devid++) {
2423                 mdev = &mbox->dev[devid];
2424                 if (mdev->hwbase)
2425                         iounmap((void __iomem *)mdev->hwbase);
2426         }
2427
2428         otx2_mbox_destroy(&mw->mbox);
2429         otx2_mbox_destroy(&mw->mbox_up);
2430 }
2431
2432 static void rvu_queue_work(struct mbox_wq_info *mw, int first,
2433                            int mdevs, u64 intr)
2434 {
2435         struct otx2_mbox_dev *mdev;
2436         struct otx2_mbox *mbox;
2437         struct mbox_hdr *hdr;
2438         int i;
2439
2440         for (i = first; i < mdevs; i++) {
2441                 /* start from 0 */
2442                 if (!(intr & BIT_ULL(i - first)))
2443                         continue;
2444
2445                 mbox = &mw->mbox;
2446                 mdev = &mbox->dev[i];
2447                 hdr = mdev->mbase + mbox->rx_start;
2448
2449                 /*The hdr->num_msgs is set to zero immediately in the interrupt
2450                  * handler to  ensure that it holds a correct value next time
2451                  * when the interrupt handler is called.
2452                  * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
2453                  * pf>mbox.up_num_msgs holds the data for use in
2454                  * pfaf_mbox_up_handler.
2455                  */
2456
2457                 if (hdr->num_msgs) {
2458                         mw->mbox_wrk[i].num_msgs = hdr->num_msgs;
2459                         hdr->num_msgs = 0;
2460                         queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
2461                 }
2462                 mbox = &mw->mbox_up;
2463                 mdev = &mbox->dev[i];
2464                 hdr = mdev->mbase + mbox->rx_start;
2465                 if (hdr->num_msgs) {
2466                         mw->mbox_wrk_up[i].up_num_msgs = hdr->num_msgs;
2467                         hdr->num_msgs = 0;
2468                         queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work);
2469                 }
2470         }
2471 }
2472
2473 static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
2474 {
2475         struct rvu *rvu = (struct rvu *)rvu_irq;
2476         int vfs = rvu->vfs;
2477         u64 intr;
2478
2479         intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
2480         /* Clear interrupts */
2481         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr);
2482         if (intr)
2483                 trace_otx2_msg_interrupt(rvu->pdev, "PF(s) to AF", intr);
2484
2485         /* Sync with mbox memory region */
2486         rmb();
2487
2488         rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr);
2489
2490         /* Handle VF interrupts */
2491         if (vfs > 64) {
2492                 intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1));
2493                 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr);
2494
2495                 rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr);
2496                 vfs -= 64;
2497         }
2498
2499         intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0));
2500         rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr);
2501         if (intr)
2502                 trace_otx2_msg_interrupt(rvu->pdev, "VF(s) to AF", intr);
2503
2504         rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr);
2505
2506         return IRQ_HANDLED;
2507 }
2508
2509 static void rvu_enable_mbox_intr(struct rvu *rvu)
2510 {
2511         struct rvu_hwinfo *hw = rvu->hw;
2512
2513         /* Clear spurious irqs, if any */
2514         rvu_write64(rvu, BLKADDR_RVUM,
2515                     RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs));
2516
2517         /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */
2518         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S,
2519                     INTR_MASK(hw->total_pfs) & ~1ULL);
2520 }
2521
2522 static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
2523 {
2524         struct rvu_block *block;
2525         int slot, lf, num_lfs;
2526         int err;
2527
2528         block = &rvu->hw->block[blkaddr];
2529         num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
2530                                         block->addr);
2531         if (!num_lfs)
2532                 return;
2533         for (slot = 0; slot < num_lfs; slot++) {
2534                 lf = rvu_get_lf(rvu, block, pcifunc, slot);
2535                 if (lf < 0)
2536                         continue;
2537
2538                 /* Cleanup LF and reset it */
2539                 if (block->addr == BLKADDR_NIX0 || block->addr == BLKADDR_NIX1)
2540                         rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf);
2541                 else if (block->addr == BLKADDR_NPA)
2542                         rvu_npa_lf_teardown(rvu, pcifunc, lf);
2543                 else if ((block->addr == BLKADDR_CPT0) ||
2544                          (block->addr == BLKADDR_CPT1))
2545                         rvu_cpt_lf_teardown(rvu, pcifunc, block->addr, lf,
2546                                             slot);
2547
2548                 err = rvu_lf_reset(rvu, block, lf);
2549                 if (err) {
2550                         dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
2551                                 block->addr, lf);
2552                 }
2553         }
2554 }
2555
2556 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
2557 {
2558         mutex_lock(&rvu->flr_lock);
2559         /* Reset order should reflect inter-block dependencies:
2560          * 1. Reset any packet/work sources (NIX, CPT, TIM)
2561          * 2. Flush and reset SSO/SSOW
2562          * 3. Cleanup pools (NPA)
2563          */
2564         rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0);
2565         rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX1);
2566         rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0);
2567         rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT1);
2568         rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM);
2569         rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
2570         rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
2571         rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
2572         rvu_reset_lmt_map_tbl(rvu, pcifunc);
2573         rvu_detach_rsrcs(rvu, NULL, pcifunc);
2574         mutex_unlock(&rvu->flr_lock);
2575 }
2576
2577 static void rvu_afvf_flr_handler(struct rvu *rvu, int vf)
2578 {
2579         int reg = 0;
2580
2581         /* pcifunc = 0(PF0) | (vf + 1) */
2582         __rvu_flr_handler(rvu, vf + 1);
2583
2584         if (vf >= 64) {
2585                 reg = 1;
2586                 vf = vf - 64;
2587         }
2588
2589         /* Signal FLR finish and enable IRQ */
2590         rvupf_write64(rvu, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
2591         rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
2592 }
2593
2594 static void rvu_flr_handler(struct work_struct *work)
2595 {
2596         struct rvu_work *flrwork = container_of(work, struct rvu_work, work);
2597         struct rvu *rvu = flrwork->rvu;
2598         u16 pcifunc, numvfs, vf;
2599         u64 cfg;
2600         int pf;
2601
2602         pf = flrwork - rvu->flr_wrk;
2603         if (pf >= rvu->hw->total_pfs) {
2604                 rvu_afvf_flr_handler(rvu, pf - rvu->hw->total_pfs);
2605                 return;
2606         }
2607
2608         cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2609         numvfs = (cfg >> 12) & 0xFF;
2610         pcifunc  = pf << RVU_PFVF_PF_SHIFT;
2611
2612         for (vf = 0; vf < numvfs; vf++)
2613                 __rvu_flr_handler(rvu, (pcifunc | (vf + 1)));
2614
2615         __rvu_flr_handler(rvu, pcifunc);
2616
2617         /* Signal FLR finish */
2618         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf));
2619
2620         /* Enable interrupt */
2621         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,  BIT_ULL(pf));
2622 }
2623
2624 static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs)
2625 {
2626         int dev, vf, reg = 0;
2627         u64 intr;
2628
2629         if (start_vf >= 64)
2630                 reg = 1;
2631
2632         intr = rvupf_read64(rvu, RVU_PF_VFFLR_INTX(reg));
2633         if (!intr)
2634                 return;
2635
2636         for (vf = 0; vf < numvfs; vf++) {
2637                 if (!(intr & BIT_ULL(vf)))
2638                         continue;
2639                 /* Clear and disable the interrupt */
2640                 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
2641                 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf));
2642
2643                 dev = vf + start_vf + rvu->hw->total_pfs;
2644                 queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work);
2645         }
2646 }
2647
2648 static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq)
2649 {
2650         struct rvu *rvu = (struct rvu *)rvu_irq;
2651         u64 intr;
2652         u8  pf;
2653
2654         intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT);
2655         if (!intr)
2656                 goto afvf_flr;
2657
2658         for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2659                 if (intr & (1ULL << pf)) {
2660                         /* clear interrupt */
2661                         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT,
2662                                     BIT_ULL(pf));
2663                         /* Disable the interrupt */
2664                         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
2665                                     BIT_ULL(pf));
2666                         /* PF is already dead do only AF related operations */
2667                         queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work);
2668                 }
2669         }
2670
2671 afvf_flr:
2672         rvu_afvf_queue_flr_work(rvu, 0, 64);
2673         if (rvu->vfs > 64)
2674                 rvu_afvf_queue_flr_work(rvu, 64, rvu->vfs - 64);
2675
2676         return IRQ_HANDLED;
2677 }
2678
2679 static void rvu_me_handle_vfset(struct rvu *rvu, int idx, u64 intr)
2680 {
2681         int vf;
2682
2683         /* Nothing to be done here other than clearing the
2684          * TRPEND bit.
2685          */
2686         for (vf = 0; vf < 64; vf++) {
2687                 if (intr & (1ULL << vf)) {
2688                         /* clear the trpend due to ME(master enable) */
2689                         rvupf_write64(rvu, RVU_PF_VFTRPENDX(idx), BIT_ULL(vf));
2690                         /* clear interrupt */
2691                         rvupf_write64(rvu, RVU_PF_VFME_INTX(idx), BIT_ULL(vf));
2692                 }
2693         }
2694 }
2695
2696 /* Handles ME interrupts from VFs of AF */
2697 static irqreturn_t rvu_me_vf_intr_handler(int irq, void *rvu_irq)
2698 {
2699         struct rvu *rvu = (struct rvu *)rvu_irq;
2700         int vfset;
2701         u64 intr;
2702
2703         intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
2704
2705         for (vfset = 0; vfset <= 1; vfset++) {
2706                 intr = rvupf_read64(rvu, RVU_PF_VFME_INTX(vfset));
2707                 if (intr)
2708                         rvu_me_handle_vfset(rvu, vfset, intr);
2709         }
2710
2711         return IRQ_HANDLED;
2712 }
2713
2714 /* Handles ME interrupts from PFs */
2715 static irqreturn_t rvu_me_pf_intr_handler(int irq, void *rvu_irq)
2716 {
2717         struct rvu *rvu = (struct rvu *)rvu_irq;
2718         u64 intr;
2719         u8  pf;
2720
2721         intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
2722
2723         /* Nothing to be done here other than clearing the
2724          * TRPEND bit.
2725          */
2726         for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2727                 if (intr & (1ULL << pf)) {
2728                         /* clear the trpend due to ME(master enable) */
2729                         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND,
2730                                     BIT_ULL(pf));
2731                         /* clear interrupt */
2732                         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT,
2733                                     BIT_ULL(pf));
2734                 }
2735         }
2736
2737         return IRQ_HANDLED;
2738 }
2739
2740 static void rvu_unregister_interrupts(struct rvu *rvu)
2741 {
2742         int irq;
2743
2744         rvu_cpt_unregister_interrupts(rvu);
2745
2746         /* Disable the Mbox interrupt */
2747         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
2748                     INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2749
2750         /* Disable the PF FLR interrupt */
2751         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
2752                     INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2753
2754         /* Disable the PF ME interrupt */
2755         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1C,
2756                     INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2757
2758         for (irq = 0; irq < rvu->num_vec; irq++) {
2759                 if (rvu->irq_allocated[irq]) {
2760                         free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
2761                         rvu->irq_allocated[irq] = false;
2762                 }
2763         }
2764
2765         pci_free_irq_vectors(rvu->pdev);
2766         rvu->num_vec = 0;
2767 }
2768
2769 static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu)
2770 {
2771         struct rvu_pfvf *pfvf = &rvu->pf[0];
2772         int offset;
2773
2774         pfvf = &rvu->pf[0];
2775         offset = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2776
2777         /* Make sure there are enough MSIX vectors configured so that
2778          * VF interrupts can be handled. Offset equal to zero means
2779          * that PF vectors are not configured and overlapping AF vectors.
2780          */
2781         return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) &&
2782                offset;
2783 }
2784
2785 static int rvu_register_interrupts(struct rvu *rvu)
2786 {
2787         int ret, offset, pf_vec_start;
2788
2789         rvu->num_vec = pci_msix_vec_count(rvu->pdev);
2790
2791         rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec,
2792                                            NAME_SIZE, GFP_KERNEL);
2793         if (!rvu->irq_name)
2794                 return -ENOMEM;
2795
2796         rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec,
2797                                           sizeof(bool), GFP_KERNEL);
2798         if (!rvu->irq_allocated)
2799                 return -ENOMEM;
2800
2801         /* Enable MSI-X */
2802         ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec,
2803                                     rvu->num_vec, PCI_IRQ_MSIX);
2804         if (ret < 0) {
2805                 dev_err(rvu->dev,
2806                         "RVUAF: Request for %d msix vectors failed, ret %d\n",
2807                         rvu->num_vec, ret);
2808                 return ret;
2809         }
2810
2811         /* Register mailbox interrupt handler */
2812         sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
2813         ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
2814                           rvu_mbox_intr_handler, 0,
2815                           &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
2816         if (ret) {
2817                 dev_err(rvu->dev,
2818                         "RVUAF: IRQ registration failed for mbox irq\n");
2819                 goto fail;
2820         }
2821
2822         rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true;
2823
2824         /* Enable mailbox interrupts from all PFs */
2825         rvu_enable_mbox_intr(rvu);
2826
2827         /* Register FLR interrupt handler */
2828         sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
2829                 "RVUAF FLR");
2830         ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFFLR),
2831                           rvu_flr_intr_handler, 0,
2832                           &rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
2833                           rvu);
2834         if (ret) {
2835                 dev_err(rvu->dev,
2836                         "RVUAF: IRQ registration failed for FLR\n");
2837                 goto fail;
2838         }
2839         rvu->irq_allocated[RVU_AF_INT_VEC_PFFLR] = true;
2840
2841         /* Enable FLR interrupt for all PFs*/
2842         rvu_write64(rvu, BLKADDR_RVUM,
2843                     RVU_AF_PFFLR_INT, INTR_MASK(rvu->hw->total_pfs));
2844
2845         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,
2846                     INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2847
2848         /* Register ME interrupt handler */
2849         sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
2850                 "RVUAF ME");
2851         ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFME),
2852                           rvu_me_pf_intr_handler, 0,
2853                           &rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
2854                           rvu);
2855         if (ret) {
2856                 dev_err(rvu->dev,
2857                         "RVUAF: IRQ registration failed for ME\n");
2858         }
2859         rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true;
2860
2861         /* Clear TRPEND bit for all PF */
2862         rvu_write64(rvu, BLKADDR_RVUM,
2863                     RVU_AF_PFTRPEND, INTR_MASK(rvu->hw->total_pfs));
2864         /* Enable ME interrupt for all PFs*/
2865         rvu_write64(rvu, BLKADDR_RVUM,
2866                     RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs));
2867
2868         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1S,
2869                     INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2870
2871         if (!rvu_afvf_msix_vectors_num_ok(rvu))
2872                 return 0;
2873
2874         /* Get PF MSIX vectors offset. */
2875         pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM,
2876                                   RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2877
2878         /* Register MBOX0 interrupt. */
2879         offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0;
2880         sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0");
2881         ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2882                           rvu_mbox_intr_handler, 0,
2883                           &rvu->irq_name[offset * NAME_SIZE],
2884                           rvu);
2885         if (ret)
2886                 dev_err(rvu->dev,
2887                         "RVUAF: IRQ registration failed for Mbox0\n");
2888
2889         rvu->irq_allocated[offset] = true;
2890
2891         /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so
2892          * simply increment current offset by 1.
2893          */
2894         offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1;
2895         sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1");
2896         ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2897                           rvu_mbox_intr_handler, 0,
2898                           &rvu->irq_name[offset * NAME_SIZE],
2899                           rvu);
2900         if (ret)
2901                 dev_err(rvu->dev,
2902                         "RVUAF: IRQ registration failed for Mbox1\n");
2903
2904         rvu->irq_allocated[offset] = true;
2905
2906         /* Register FLR interrupt handler for AF's VFs */
2907         offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0;
2908         sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR0");
2909         ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2910                           rvu_flr_intr_handler, 0,
2911                           &rvu->irq_name[offset * NAME_SIZE], rvu);
2912         if (ret) {
2913                 dev_err(rvu->dev,
2914                         "RVUAF: IRQ registration failed for RVUAFVF FLR0\n");
2915                 goto fail;
2916         }
2917         rvu->irq_allocated[offset] = true;
2918
2919         offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR1;
2920         sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR1");
2921         ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2922                           rvu_flr_intr_handler, 0,
2923                           &rvu->irq_name[offset * NAME_SIZE], rvu);
2924         if (ret) {
2925                 dev_err(rvu->dev,
2926                         "RVUAF: IRQ registration failed for RVUAFVF FLR1\n");
2927                 goto fail;
2928         }
2929         rvu->irq_allocated[offset] = true;
2930
2931         /* Register ME interrupt handler for AF's VFs */
2932         offset = pf_vec_start + RVU_PF_INT_VEC_VFME0;
2933         sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME0");
2934         ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2935                           rvu_me_vf_intr_handler, 0,
2936                           &rvu->irq_name[offset * NAME_SIZE], rvu);
2937         if (ret) {
2938                 dev_err(rvu->dev,
2939                         "RVUAF: IRQ registration failed for RVUAFVF ME0\n");
2940                 goto fail;
2941         }
2942         rvu->irq_allocated[offset] = true;
2943
2944         offset = pf_vec_start + RVU_PF_INT_VEC_VFME1;
2945         sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME1");
2946         ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2947                           rvu_me_vf_intr_handler, 0,
2948                           &rvu->irq_name[offset * NAME_SIZE], rvu);
2949         if (ret) {
2950                 dev_err(rvu->dev,
2951                         "RVUAF: IRQ registration failed for RVUAFVF ME1\n");
2952                 goto fail;
2953         }
2954         rvu->irq_allocated[offset] = true;
2955
2956         ret = rvu_cpt_register_interrupts(rvu);
2957         if (ret)
2958                 goto fail;
2959
2960         return 0;
2961
2962 fail:
2963         rvu_unregister_interrupts(rvu);
2964         return ret;
2965 }
2966
2967 static void rvu_flr_wq_destroy(struct rvu *rvu)
2968 {
2969         if (rvu->flr_wq) {
2970                 destroy_workqueue(rvu->flr_wq);
2971                 rvu->flr_wq = NULL;
2972         }
2973 }
2974
2975 static int rvu_flr_init(struct rvu *rvu)
2976 {
2977         int dev, num_devs;
2978         u64 cfg;
2979         int pf;
2980
2981         /* Enable FLR for all PFs*/
2982         for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2983                 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2984                 rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf),
2985                             cfg | BIT_ULL(22));
2986         }
2987
2988         rvu->flr_wq = alloc_workqueue("rvu_afpf_flr",
2989                                       WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
2990                                        1);
2991         if (!rvu->flr_wq)
2992                 return -ENOMEM;
2993
2994         num_devs = rvu->hw->total_pfs + pci_sriov_get_totalvfs(rvu->pdev);
2995         rvu->flr_wrk = devm_kcalloc(rvu->dev, num_devs,
2996                                     sizeof(struct rvu_work), GFP_KERNEL);
2997         if (!rvu->flr_wrk) {
2998                 destroy_workqueue(rvu->flr_wq);
2999                 return -ENOMEM;
3000         }
3001
3002         for (dev = 0; dev < num_devs; dev++) {
3003                 rvu->flr_wrk[dev].rvu = rvu;
3004                 INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler);
3005         }
3006
3007         mutex_init(&rvu->flr_lock);
3008
3009         return 0;
3010 }
3011
3012 static void rvu_disable_afvf_intr(struct rvu *rvu)
3013 {
3014         int vfs = rvu->vfs;
3015
3016         rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs));
3017         rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
3018         rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
3019         if (vfs <= 64)
3020                 return;
3021
3022         rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1),
3023                       INTR_MASK(vfs - 64));
3024         rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
3025         rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
3026 }
3027
3028 static void rvu_enable_afvf_intr(struct rvu *rvu)
3029 {
3030         int vfs = rvu->vfs;
3031
3032         /* Clear any pending interrupts and enable AF VF interrupts for
3033          * the first 64 VFs.
3034          */
3035         /* Mbox */
3036         rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs));
3037         rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs));
3038
3039         /* FLR */
3040         rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs));
3041         rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs));
3042         rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs));
3043
3044         /* Same for remaining VFs, if any. */
3045         if (vfs <= 64)
3046                 return;
3047
3048         rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs - 64));
3049         rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
3050                       INTR_MASK(vfs - 64));
3051
3052         rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64));
3053         rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
3054         rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
3055 }
3056
3057 int rvu_get_num_lbk_chans(void)
3058 {
3059         struct pci_dev *pdev;
3060         void __iomem *base;
3061         int ret = -EIO;
3062
3063         pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_LBK,
3064                               NULL);
3065         if (!pdev)
3066                 goto err;
3067
3068         base = pci_ioremap_bar(pdev, 0);
3069         if (!base)
3070                 goto err_put;
3071
3072         /* Read number of available LBK channels from LBK(0)_CONST register. */
3073         ret = (readq(base + 0x10) >> 32) & 0xffff;
3074         iounmap(base);
3075 err_put:
3076         pci_dev_put(pdev);
3077 err:
3078         return ret;
3079 }
3080
3081 static int rvu_enable_sriov(struct rvu *rvu)
3082 {
3083         struct pci_dev *pdev = rvu->pdev;
3084         int err, chans, vfs;
3085
3086         if (!rvu_afvf_msix_vectors_num_ok(rvu)) {
3087                 dev_warn(&pdev->dev,
3088                          "Skipping SRIOV enablement since not enough IRQs are available\n");
3089                 return 0;
3090         }
3091
3092         chans = rvu_get_num_lbk_chans();
3093         if (chans < 0)
3094                 return chans;
3095
3096         vfs = pci_sriov_get_totalvfs(pdev);
3097
3098         /* Limit VFs in case we have more VFs than LBK channels available. */
3099         if (vfs > chans)
3100                 vfs = chans;
3101
3102         if (!vfs)
3103                 return 0;
3104
3105         /* LBK channel number 63 is used for switching packets between
3106          * CGX mapped VFs. Hence limit LBK pairs till 62 only.
3107          */
3108         if (vfs > 62)
3109                 vfs = 62;
3110
3111         /* Save VFs number for reference in VF interrupts handlers.
3112          * Since interrupts might start arriving during SRIOV enablement
3113          * ordinary API cannot be used to get number of enabled VFs.
3114          */
3115         rvu->vfs = vfs;
3116
3117         err = rvu_mbox_init(rvu, &rvu->afvf_wq_info, TYPE_AFVF, vfs,
3118                             rvu_afvf_mbox_handler, rvu_afvf_mbox_up_handler);
3119         if (err)
3120                 return err;
3121
3122         rvu_enable_afvf_intr(rvu);
3123         /* Make sure IRQs are enabled before SRIOV. */
3124         mb();
3125
3126         err = pci_enable_sriov(pdev, vfs);
3127         if (err) {
3128                 rvu_disable_afvf_intr(rvu);
3129                 rvu_mbox_destroy(&rvu->afvf_wq_info);
3130                 return err;
3131         }
3132
3133         return 0;
3134 }
3135
3136 static void rvu_disable_sriov(struct rvu *rvu)
3137 {
3138         rvu_disable_afvf_intr(rvu);
3139         rvu_mbox_destroy(&rvu->afvf_wq_info);
3140         pci_disable_sriov(rvu->pdev);
3141 }
3142
3143 static void rvu_update_module_params(struct rvu *rvu)
3144 {
3145         const char *default_pfl_name = "default";
3146
3147         strscpy(rvu->mkex_pfl_name,
3148                 mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN);
3149         strscpy(rvu->kpu_pfl_name,
3150                 kpu_profile ? kpu_profile : default_pfl_name, KPU_NAME_LEN);
3151 }
3152
3153 static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3154 {
3155         struct device *dev = &pdev->dev;
3156         struct rvu *rvu;
3157         int    err;
3158
3159         rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL);
3160         if (!rvu)
3161                 return -ENOMEM;
3162
3163         rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL);
3164         if (!rvu->hw) {
3165                 devm_kfree(dev, rvu);
3166                 return -ENOMEM;
3167         }
3168
3169         pci_set_drvdata(pdev, rvu);
3170         rvu->pdev = pdev;
3171         rvu->dev = &pdev->dev;
3172
3173         err = pci_enable_device(pdev);
3174         if (err) {
3175                 dev_err(dev, "Failed to enable PCI device\n");
3176                 goto err_freemem;
3177         }
3178
3179         err = pci_request_regions(pdev, DRV_NAME);
3180         if (err) {
3181                 dev_err(dev, "PCI request regions failed 0x%x\n", err);
3182                 goto err_disable_device;
3183         }
3184
3185         err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
3186         if (err) {
3187                 dev_err(dev, "DMA mask config failed, abort\n");
3188                 goto err_release_regions;
3189         }
3190
3191         pci_set_master(pdev);
3192
3193         rvu->ptp = ptp_get();
3194         if (IS_ERR(rvu->ptp)) {
3195                 err = PTR_ERR(rvu->ptp);
3196                 if (err == -EPROBE_DEFER)
3197                         goto err_release_regions;
3198                 rvu->ptp = NULL;
3199         }
3200
3201         /* Map Admin function CSRs */
3202         rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
3203         rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
3204         if (!rvu->afreg_base || !rvu->pfreg_base) {
3205                 dev_err(dev, "Unable to map admin function CSRs, aborting\n");
3206                 err = -ENOMEM;
3207                 goto err_put_ptp;
3208         }
3209
3210         /* Store module params in rvu structure */
3211         rvu_update_module_params(rvu);
3212
3213         /* Check which blocks the HW supports */
3214         rvu_check_block_implemented(rvu);
3215
3216         rvu_reset_all_blocks(rvu);
3217
3218         rvu_setup_hw_capabilities(rvu);
3219
3220         err = rvu_setup_hw_resources(rvu);
3221         if (err)
3222                 goto err_put_ptp;
3223
3224         /* Init mailbox btw AF and PFs */
3225         err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF,
3226                             rvu->hw->total_pfs, rvu_afpf_mbox_handler,
3227                             rvu_afpf_mbox_up_handler);
3228         if (err) {
3229                 dev_err(dev, "%s: Failed to initialize mbox\n", __func__);
3230                 goto err_hwsetup;
3231         }
3232
3233         err = rvu_flr_init(rvu);
3234         if (err) {
3235                 dev_err(dev, "%s: Failed to initialize flr\n", __func__);
3236                 goto err_mbox;
3237         }
3238
3239         err = rvu_register_interrupts(rvu);
3240         if (err) {
3241                 dev_err(dev, "%s: Failed to register interrupts\n", __func__);
3242                 goto err_flr;
3243         }
3244
3245         err = rvu_register_dl(rvu);
3246         if (err) {
3247                 dev_err(dev, "%s: Failed to register devlink\n", __func__);
3248                 goto err_irq;
3249         }
3250
3251         rvu_setup_rvum_blk_revid(rvu);
3252
3253         /* Enable AF's VFs (if any) */
3254         err = rvu_enable_sriov(rvu);
3255         if (err) {
3256                 dev_err(dev, "%s: Failed to enable sriov\n", __func__);
3257                 goto err_dl;
3258         }
3259
3260         /* Initialize debugfs */
3261         rvu_dbg_init(rvu);
3262
3263         mutex_init(&rvu->rswitch.switch_lock);
3264
3265         if (rvu->fwdata)
3266                 ptp_start(rvu->ptp, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate,
3267                           rvu->fwdata->ptp_ext_tstamp);
3268
3269         return 0;
3270 err_dl:
3271         rvu_unregister_dl(rvu);
3272 err_irq:
3273         rvu_unregister_interrupts(rvu);
3274 err_flr:
3275         rvu_flr_wq_destroy(rvu);
3276 err_mbox:
3277         rvu_mbox_destroy(&rvu->afpf_wq_info);
3278 err_hwsetup:
3279         rvu_cgx_exit(rvu);
3280         rvu_fwdata_exit(rvu);
3281         rvu_reset_all_blocks(rvu);
3282         rvu_free_hw_resources(rvu);
3283         rvu_clear_rvum_blk_revid(rvu);
3284 err_put_ptp:
3285         ptp_put(rvu->ptp);
3286 err_release_regions:
3287         pci_release_regions(pdev);
3288 err_disable_device:
3289         pci_disable_device(pdev);
3290 err_freemem:
3291         pci_set_drvdata(pdev, NULL);
3292         devm_kfree(&pdev->dev, rvu->hw);
3293         devm_kfree(dev, rvu);
3294         return err;
3295 }
3296
3297 static void rvu_remove(struct pci_dev *pdev)
3298 {
3299         struct rvu *rvu = pci_get_drvdata(pdev);
3300
3301         rvu_dbg_exit(rvu);
3302         rvu_unregister_dl(rvu);
3303         rvu_unregister_interrupts(rvu);
3304         rvu_flr_wq_destroy(rvu);
3305         rvu_cgx_exit(rvu);
3306         rvu_fwdata_exit(rvu);
3307         rvu_mbox_destroy(&rvu->afpf_wq_info);
3308         rvu_disable_sriov(rvu);
3309         rvu_reset_all_blocks(rvu);
3310         rvu_free_hw_resources(rvu);
3311         rvu_clear_rvum_blk_revid(rvu);
3312         ptp_put(rvu->ptp);
3313         pci_release_regions(pdev);
3314         pci_disable_device(pdev);
3315         pci_set_drvdata(pdev, NULL);
3316
3317         devm_kfree(&pdev->dev, rvu->hw);
3318         devm_kfree(&pdev->dev, rvu);
3319 }
3320
3321 static struct pci_driver rvu_driver = {
3322         .name = DRV_NAME,
3323         .id_table = rvu_id_table,
3324         .probe = rvu_probe,
3325         .remove = rvu_remove,
3326 };
3327
3328 static int __init rvu_init_module(void)
3329 {
3330         int err;
3331
3332         pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
3333
3334         err = pci_register_driver(&cgx_driver);
3335         if (err < 0)
3336                 return err;
3337
3338         err = pci_register_driver(&ptp_driver);
3339         if (err < 0)
3340                 goto ptp_err;
3341
3342         err =  pci_register_driver(&rvu_driver);
3343         if (err < 0)
3344                 goto rvu_err;
3345
3346         return 0;
3347 rvu_err:
3348         pci_unregister_driver(&ptp_driver);
3349 ptp_err:
3350         pci_unregister_driver(&cgx_driver);
3351
3352         return err;
3353 }
3354
3355 static void __exit rvu_cleanup_module(void)
3356 {
3357         pci_unregister_driver(&rvu_driver);
3358         pci_unregister_driver(&ptp_driver);
3359         pci_unregister_driver(&cgx_driver);
3360 }
3361
3362 module_init(rvu_init_module);
3363 module_exit(rvu_cleanup_module);