Merge tag 'for-linus-2023030901' of git://git.kernel.org/pub/scm/linux/kernel/git...
[platform/kernel/linux-rpi.git] / drivers / net / ethernet / marvell / octeontx2 / af / rvu_debugfs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2019 Marvell.
5  *
6  */
7
8 #ifdef CONFIG_DEBUG_FS
9
10 #include <linux/fs.h>
11 #include <linux/debugfs.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14
15 #include "rvu_struct.h"
16 #include "rvu_reg.h"
17 #include "rvu.h"
18 #include "cgx.h"
19 #include "lmac_common.h"
20 #include "npc.h"
21 #include "rvu_npc_hash.h"
22 #include "mcs.h"
23
24 #define DEBUGFS_DIR_NAME "octeontx2"
25
26 enum {
27         CGX_STAT0,
28         CGX_STAT1,
29         CGX_STAT2,
30         CGX_STAT3,
31         CGX_STAT4,
32         CGX_STAT5,
33         CGX_STAT6,
34         CGX_STAT7,
35         CGX_STAT8,
36         CGX_STAT9,
37         CGX_STAT10,
38         CGX_STAT11,
39         CGX_STAT12,
40         CGX_STAT13,
41         CGX_STAT14,
42         CGX_STAT15,
43         CGX_STAT16,
44         CGX_STAT17,
45         CGX_STAT18,
46 };
47
48 /* NIX TX stats */
49 enum nix_stat_lf_tx {
50         TX_UCAST        = 0x0,
51         TX_BCAST        = 0x1,
52         TX_MCAST        = 0x2,
53         TX_DROP         = 0x3,
54         TX_OCTS         = 0x4,
55         TX_STATS_ENUM_LAST,
56 };
57
58 /* NIX RX stats */
59 enum nix_stat_lf_rx {
60         RX_OCTS         = 0x0,
61         RX_UCAST        = 0x1,
62         RX_BCAST        = 0x2,
63         RX_MCAST        = 0x3,
64         RX_DROP         = 0x4,
65         RX_DROP_OCTS    = 0x5,
66         RX_FCS          = 0x6,
67         RX_ERR          = 0x7,
68         RX_DRP_BCAST    = 0x8,
69         RX_DRP_MCAST    = 0x9,
70         RX_DRP_L3BCAST  = 0xa,
71         RX_DRP_L3MCAST  = 0xb,
72         RX_STATS_ENUM_LAST,
73 };
74
75 static char *cgx_rx_stats_fields[] = {
76         [CGX_STAT0]     = "Received packets",
77         [CGX_STAT1]     = "Octets of received packets",
78         [CGX_STAT2]     = "Received PAUSE packets",
79         [CGX_STAT3]     = "Received PAUSE and control packets",
80         [CGX_STAT4]     = "Filtered DMAC0 (NIX-bound) packets",
81         [CGX_STAT5]     = "Filtered DMAC0 (NIX-bound) octets",
82         [CGX_STAT6]     = "Packets dropped due to RX FIFO full",
83         [CGX_STAT7]     = "Octets dropped due to RX FIFO full",
84         [CGX_STAT8]     = "Error packets",
85         [CGX_STAT9]     = "Filtered DMAC1 (NCSI-bound) packets",
86         [CGX_STAT10]    = "Filtered DMAC1 (NCSI-bound) octets",
87         [CGX_STAT11]    = "NCSI-bound packets dropped",
88         [CGX_STAT12]    = "NCSI-bound octets dropped",
89 };
90
91 static char *cgx_tx_stats_fields[] = {
92         [CGX_STAT0]     = "Packets dropped due to excessive collisions",
93         [CGX_STAT1]     = "Packets dropped due to excessive deferral",
94         [CGX_STAT2]     = "Multiple collisions before successful transmission",
95         [CGX_STAT3]     = "Single collisions before successful transmission",
96         [CGX_STAT4]     = "Total octets sent on the interface",
97         [CGX_STAT5]     = "Total frames sent on the interface",
98         [CGX_STAT6]     = "Packets sent with an octet count < 64",
99         [CGX_STAT7]     = "Packets sent with an octet count == 64",
100         [CGX_STAT8]     = "Packets sent with an octet count of 65-127",
101         [CGX_STAT9]     = "Packets sent with an octet count of 128-255",
102         [CGX_STAT10]    = "Packets sent with an octet count of 256-511",
103         [CGX_STAT11]    = "Packets sent with an octet count of 512-1023",
104         [CGX_STAT12]    = "Packets sent with an octet count of 1024-1518",
105         [CGX_STAT13]    = "Packets sent with an octet count of > 1518",
106         [CGX_STAT14]    = "Packets sent to a broadcast DMAC",
107         [CGX_STAT15]    = "Packets sent to the multicast DMAC",
108         [CGX_STAT16]    = "Transmit underflow and were truncated",
109         [CGX_STAT17]    = "Control/PAUSE packets sent",
110 };
111
112 static char *rpm_rx_stats_fields[] = {
113         "Octets of received packets",
114         "Octets of received packets with out error",
115         "Received packets with alignment errors",
116         "Control/PAUSE packets received",
117         "Packets received with Frame too long Errors",
118         "Packets received with a1nrange length Errors",
119         "Received packets",
120         "Packets received with FrameCheckSequenceErrors",
121         "Packets received with VLAN header",
122         "Error packets",
123         "Packets received with unicast DMAC",
124         "Packets received with multicast DMAC",
125         "Packets received with broadcast DMAC",
126         "Dropped packets",
127         "Total frames received on interface",
128         "Packets received with an octet count < 64",
129         "Packets received with an octet count == 64",
130         "Packets received with an octet count of 65-127",
131         "Packets received with an octet count of 128-255",
132         "Packets received with an octet count of 256-511",
133         "Packets received with an octet count of 512-1023",
134         "Packets received with an octet count of 1024-1518",
135         "Packets received with an octet count of > 1518",
136         "Oversized Packets",
137         "Jabber Packets",
138         "Fragmented Packets",
139         "CBFC(class based flow control) pause frames received for class 0",
140         "CBFC pause frames received for class 1",
141         "CBFC pause frames received for class 2",
142         "CBFC pause frames received for class 3",
143         "CBFC pause frames received for class 4",
144         "CBFC pause frames received for class 5",
145         "CBFC pause frames received for class 6",
146         "CBFC pause frames received for class 7",
147         "CBFC pause frames received for class 8",
148         "CBFC pause frames received for class 9",
149         "CBFC pause frames received for class 10",
150         "CBFC pause frames received for class 11",
151         "CBFC pause frames received for class 12",
152         "CBFC pause frames received for class 13",
153         "CBFC pause frames received for class 14",
154         "CBFC pause frames received for class 15",
155         "MAC control packets received",
156 };
157
158 static char *rpm_tx_stats_fields[] = {
159         "Total octets sent on the interface",
160         "Total octets transmitted OK",
161         "Control/Pause frames sent",
162         "Total frames transmitted OK",
163         "Total frames sent with VLAN header",
164         "Error Packets",
165         "Packets sent to unicast DMAC",
166         "Packets sent to the multicast DMAC",
167         "Packets sent to a broadcast DMAC",
168         "Packets sent with an octet count == 64",
169         "Packets sent with an octet count of 65-127",
170         "Packets sent with an octet count of 128-255",
171         "Packets sent with an octet count of 256-511",
172         "Packets sent with an octet count of 512-1023",
173         "Packets sent with an octet count of 1024-1518",
174         "Packets sent with an octet count of > 1518",
175         "CBFC(class based flow control) pause frames transmitted for class 0",
176         "CBFC pause frames transmitted for class 1",
177         "CBFC pause frames transmitted for class 2",
178         "CBFC pause frames transmitted for class 3",
179         "CBFC pause frames transmitted for class 4",
180         "CBFC pause frames transmitted for class 5",
181         "CBFC pause frames transmitted for class 6",
182         "CBFC pause frames transmitted for class 7",
183         "CBFC pause frames transmitted for class 8",
184         "CBFC pause frames transmitted for class 9",
185         "CBFC pause frames transmitted for class 10",
186         "CBFC pause frames transmitted for class 11",
187         "CBFC pause frames transmitted for class 12",
188         "CBFC pause frames transmitted for class 13",
189         "CBFC pause frames transmitted for class 14",
190         "CBFC pause frames transmitted for class 15",
191         "MAC control packets sent",
192         "Total frames sent on the interface"
193 };
194
195 enum cpt_eng_type {
196         CPT_AE_TYPE = 1,
197         CPT_SE_TYPE = 2,
198         CPT_IE_TYPE = 3,
199 };
200
201 #define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
202                                                 blk_addr, NDC_AF_CONST) & 0xFF)
203
204 #define rvu_dbg_NULL NULL
205 #define rvu_dbg_open_NULL NULL
206
207 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op)     \
208 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
209 { \
210         return single_open(file, rvu_dbg_##read_op, inode->i_private); \
211 } \
212 static const struct file_operations rvu_dbg_##name##_fops = { \
213         .owner          = THIS_MODULE, \
214         .open           = rvu_dbg_open_##name, \
215         .read           = seq_read, \
216         .write          = rvu_dbg_##write_op, \
217         .llseek         = seq_lseek, \
218         .release        = single_release, \
219 }
220
221 #define RVU_DEBUG_FOPS(name, read_op, write_op) \
222 static const struct file_operations rvu_dbg_##name##_fops = { \
223         .owner = THIS_MODULE, \
224         .open = simple_open, \
225         .read = rvu_dbg_##read_op, \
226         .write = rvu_dbg_##write_op \
227 }
228
229 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
230
231 static int rvu_dbg_mcs_port_stats_display(struct seq_file *filp, void *unused, int dir)
232 {
233         struct mcs *mcs = filp->private;
234         struct mcs_port_stats stats;
235         int lmac;
236
237         seq_puts(filp, "\n port stats\n");
238         mutex_lock(&mcs->stats_lock);
239         for_each_set_bit(lmac, &mcs->hw->lmac_bmap, mcs->hw->lmac_cnt) {
240                 mcs_get_port_stats(mcs, &stats, lmac, dir);
241                 seq_printf(filp, "port%d: Tcam Miss: %lld\n", lmac, stats.tcam_miss_cnt);
242                 seq_printf(filp, "port%d: Parser errors: %lld\n", lmac, stats.parser_err_cnt);
243
244                 if (dir == MCS_RX && mcs->hw->mcs_blks > 1)
245                         seq_printf(filp, "port%d: Preempt error: %lld\n", lmac,
246                                    stats.preempt_err_cnt);
247                 if (dir == MCS_TX)
248                         seq_printf(filp, "port%d: Sectag insert error: %lld\n", lmac,
249                                    stats.sectag_insert_err_cnt);
250         }
251         mutex_unlock(&mcs->stats_lock);
252         return 0;
253 }
254
255 static int rvu_dbg_mcs_rx_port_stats_display(struct seq_file *filp, void *unused)
256 {
257         return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_RX);
258 }
259
260 RVU_DEBUG_SEQ_FOPS(mcs_rx_port_stats, mcs_rx_port_stats_display, NULL);
261
262 static int rvu_dbg_mcs_tx_port_stats_display(struct seq_file *filp, void *unused)
263 {
264         return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_TX);
265 }
266
267 RVU_DEBUG_SEQ_FOPS(mcs_tx_port_stats, mcs_tx_port_stats_display, NULL);
268
269 static int rvu_dbg_mcs_sa_stats_display(struct seq_file *filp, void *unused, int dir)
270 {
271         struct mcs *mcs = filp->private;
272         struct mcs_sa_stats stats;
273         struct rsrc_bmap *map;
274         int sa_id;
275
276         if (dir == MCS_TX) {
277                 map = &mcs->tx.sa;
278                 mutex_lock(&mcs->stats_lock);
279                 for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
280                         seq_puts(filp, "\n TX SA stats\n");
281                         mcs_get_sa_stats(mcs, &stats, sa_id, MCS_TX);
282                         seq_printf(filp, "sa%d: Pkts encrypted: %lld\n", sa_id,
283                                    stats.pkt_encrypt_cnt);
284
285                         seq_printf(filp, "sa%d: Pkts protected: %lld\n", sa_id,
286                                    stats.pkt_protected_cnt);
287                 }
288                 mutex_unlock(&mcs->stats_lock);
289                 return 0;
290         }
291
292         /* RX stats */
293         map = &mcs->rx.sa;
294         mutex_lock(&mcs->stats_lock);
295         for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
296                 seq_puts(filp, "\n RX SA stats\n");
297                 mcs_get_sa_stats(mcs, &stats, sa_id, MCS_RX);
298                 seq_printf(filp, "sa%d: Invalid pkts: %lld\n", sa_id, stats.pkt_invalid_cnt);
299                 seq_printf(filp, "sa%d: Pkts no sa error: %lld\n", sa_id, stats.pkt_nosaerror_cnt);
300                 seq_printf(filp, "sa%d: Pkts not valid: %lld\n", sa_id, stats.pkt_notvalid_cnt);
301                 seq_printf(filp, "sa%d: Pkts ok: %lld\n", sa_id, stats.pkt_ok_cnt);
302                 seq_printf(filp, "sa%d: Pkts no sa: %lld\n", sa_id, stats.pkt_nosa_cnt);
303         }
304         mutex_unlock(&mcs->stats_lock);
305         return 0;
306 }
307
308 static int rvu_dbg_mcs_rx_sa_stats_display(struct seq_file *filp, void *unused)
309 {
310         return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_RX);
311 }
312
313 RVU_DEBUG_SEQ_FOPS(mcs_rx_sa_stats, mcs_rx_sa_stats_display, NULL);
314
315 static int rvu_dbg_mcs_tx_sa_stats_display(struct seq_file *filp, void *unused)
316 {
317         return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_TX);
318 }
319
320 RVU_DEBUG_SEQ_FOPS(mcs_tx_sa_stats, mcs_tx_sa_stats_display, NULL);
321
322 static int rvu_dbg_mcs_tx_sc_stats_display(struct seq_file *filp, void *unused)
323 {
324         struct mcs *mcs = filp->private;
325         struct mcs_sc_stats stats;
326         struct rsrc_bmap *map;
327         int sc_id;
328
329         map = &mcs->tx.sc;
330         seq_puts(filp, "\n SC stats\n");
331
332         mutex_lock(&mcs->stats_lock);
333         for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
334                 mcs_get_sc_stats(mcs, &stats, sc_id, MCS_TX);
335                 seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
336                 seq_printf(filp, "sc%d: Pkts encrypted: %lld\n", sc_id, stats.pkt_encrypt_cnt);
337                 seq_printf(filp, "sc%d: Pkts protected: %lld\n", sc_id, stats.pkt_protected_cnt);
338
339                 if (mcs->hw->mcs_blks == 1) {
340                         seq_printf(filp, "sc%d: Octets encrypted: %lld\n", sc_id,
341                                    stats.octet_encrypt_cnt);
342                         seq_printf(filp, "sc%d: Octets protected: %lld\n", sc_id,
343                                    stats.octet_protected_cnt);
344                 }
345         }
346         mutex_unlock(&mcs->stats_lock);
347         return 0;
348 }
349
350 RVU_DEBUG_SEQ_FOPS(mcs_tx_sc_stats, mcs_tx_sc_stats_display, NULL);
351
352 static int rvu_dbg_mcs_rx_sc_stats_display(struct seq_file *filp, void *unused)
353 {
354         struct mcs *mcs = filp->private;
355         struct mcs_sc_stats stats;
356         struct rsrc_bmap *map;
357         int sc_id;
358
359         map = &mcs->rx.sc;
360         seq_puts(filp, "\n SC stats\n");
361
362         mutex_lock(&mcs->stats_lock);
363         for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
364                 mcs_get_sc_stats(mcs, &stats, sc_id, MCS_RX);
365                 seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
366                 seq_printf(filp, "sc%d: Cam hits: %lld\n", sc_id, stats.hit_cnt);
367                 seq_printf(filp, "sc%d: Invalid pkts: %lld\n", sc_id, stats.pkt_invalid_cnt);
368                 seq_printf(filp, "sc%d: Late pkts: %lld\n", sc_id, stats.pkt_late_cnt);
369                 seq_printf(filp, "sc%d: Notvalid pkts: %lld\n", sc_id, stats.pkt_notvalid_cnt);
370                 seq_printf(filp, "sc%d: Unchecked pkts: %lld\n", sc_id, stats.pkt_unchecked_cnt);
371
372                 if (mcs->hw->mcs_blks > 1) {
373                         seq_printf(filp, "sc%d: Delay pkts: %lld\n", sc_id, stats.pkt_delay_cnt);
374                         seq_printf(filp, "sc%d: Pkts ok: %lld\n", sc_id, stats.pkt_ok_cnt);
375                 }
376                 if (mcs->hw->mcs_blks == 1) {
377                         seq_printf(filp, "sc%d: Octets decrypted: %lld\n", sc_id,
378                                    stats.octet_decrypt_cnt);
379                         seq_printf(filp, "sc%d: Octets validated: %lld\n", sc_id,
380                                    stats.octet_validate_cnt);
381                 }
382         }
383         mutex_unlock(&mcs->stats_lock);
384         return 0;
385 }
386
387 RVU_DEBUG_SEQ_FOPS(mcs_rx_sc_stats, mcs_rx_sc_stats_display, NULL);
388
389 static int rvu_dbg_mcs_flowid_stats_display(struct seq_file *filp, void *unused, int dir)
390 {
391         struct mcs *mcs = filp->private;
392         struct mcs_flowid_stats stats;
393         struct rsrc_bmap *map;
394         int flow_id;
395
396         seq_puts(filp, "\n Flowid stats\n");
397
398         if (dir == MCS_RX)
399                 map = &mcs->rx.flow_ids;
400         else
401                 map = &mcs->tx.flow_ids;
402
403         mutex_lock(&mcs->stats_lock);
404         for_each_set_bit(flow_id, map->bmap, mcs->hw->tcam_entries) {
405                 mcs_get_flowid_stats(mcs, &stats, flow_id, dir);
406                 seq_printf(filp, "Flowid%d: Hit:%lld\n", flow_id, stats.tcam_hit_cnt);
407         }
408         mutex_unlock(&mcs->stats_lock);
409         return 0;
410 }
411
412 static int rvu_dbg_mcs_tx_flowid_stats_display(struct seq_file *filp, void *unused)
413 {
414         return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_TX);
415 }
416
417 RVU_DEBUG_SEQ_FOPS(mcs_tx_flowid_stats, mcs_tx_flowid_stats_display, NULL);
418
419 static int rvu_dbg_mcs_rx_flowid_stats_display(struct seq_file *filp, void *unused)
420 {
421         return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_RX);
422 }
423
424 RVU_DEBUG_SEQ_FOPS(mcs_rx_flowid_stats, mcs_rx_flowid_stats_display, NULL);
425
426 static int rvu_dbg_mcs_tx_secy_stats_display(struct seq_file *filp, void *unused)
427 {
428         struct mcs *mcs = filp->private;
429         struct mcs_secy_stats stats;
430         struct rsrc_bmap *map;
431         int secy_id;
432
433         map = &mcs->tx.secy;
434         seq_puts(filp, "\n MCS TX secy stats\n");
435
436         mutex_lock(&mcs->stats_lock);
437         for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
438                 mcs_get_tx_secy_stats(mcs, &stats, secy_id);
439                 seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
440                 seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
441                            stats.ctl_pkt_bcast_cnt);
442                 seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
443                            stats.ctl_pkt_mcast_cnt);
444                 seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
445                            stats.ctl_pkt_ucast_cnt);
446                 seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
447                 seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
448                            stats.unctl_pkt_bcast_cnt);
449                 seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
450                            stats.unctl_pkt_mcast_cnt);
451                 seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
452                            stats.unctl_pkt_ucast_cnt);
453                 seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
454                 seq_printf(filp, "secy%d: Octet encrypted: %lld\n", secy_id,
455                            stats.octet_encrypted_cnt);
456                 seq_printf(filp, "secy%d: octet protected: %lld\n", secy_id,
457                            stats.octet_protected_cnt);
458                 seq_printf(filp, "secy%d: Pkts on active sa: %lld\n", secy_id,
459                            stats.pkt_noactivesa_cnt);
460                 seq_printf(filp, "secy%d: Pkts too long: %lld\n", secy_id, stats.pkt_toolong_cnt);
461                 seq_printf(filp, "secy%d: Pkts untagged: %lld\n", secy_id, stats.pkt_untagged_cnt);
462         }
463         mutex_unlock(&mcs->stats_lock);
464         return 0;
465 }
466
467 RVU_DEBUG_SEQ_FOPS(mcs_tx_secy_stats, mcs_tx_secy_stats_display, NULL);
468
469 static int rvu_dbg_mcs_rx_secy_stats_display(struct seq_file *filp, void *unused)
470 {
471         struct mcs *mcs = filp->private;
472         struct mcs_secy_stats stats;
473         struct rsrc_bmap *map;
474         int secy_id;
475
476         map = &mcs->rx.secy;
477         seq_puts(filp, "\n MCS secy stats\n");
478
479         mutex_lock(&mcs->stats_lock);
480         for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
481                 mcs_get_rx_secy_stats(mcs, &stats, secy_id);
482                 seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
483                 seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
484                            stats.ctl_pkt_bcast_cnt);
485                 seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
486                            stats.ctl_pkt_mcast_cnt);
487                 seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
488                            stats.ctl_pkt_ucast_cnt);
489                 seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
490                 seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
491                            stats.unctl_pkt_bcast_cnt);
492                 seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
493                            stats.unctl_pkt_mcast_cnt);
494                 seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
495                            stats.unctl_pkt_ucast_cnt);
496                 seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
497                 seq_printf(filp, "secy%d: Octet decrypted: %lld\n", secy_id,
498                            stats.octet_decrypted_cnt);
499                 seq_printf(filp, "secy%d: octet validated: %lld\n", secy_id,
500                            stats.octet_validated_cnt);
501                 seq_printf(filp, "secy%d: Pkts on disable port: %lld\n", secy_id,
502                            stats.pkt_port_disabled_cnt);
503                 seq_printf(filp, "secy%d: Octets validated: %lld\n", secy_id, stats.pkt_badtag_cnt);
504                 seq_printf(filp, "secy%d: Octets validated: %lld\n", secy_id, stats.pkt_nosa_cnt);
505                 seq_printf(filp, "secy%d: Pkts with nosaerror: %lld\n", secy_id,
506                            stats.pkt_nosaerror_cnt);
507                 seq_printf(filp, "secy%d: Tagged ctrl pkts: %lld\n", secy_id,
508                            stats.pkt_tagged_ctl_cnt);
509                 seq_printf(filp, "secy%d: Untaged pkts: %lld\n", secy_id, stats.pkt_untaged_cnt);
510                 seq_printf(filp, "secy%d: Ctrl pkts: %lld\n", secy_id, stats.pkt_ctl_cnt);
511                 if (mcs->hw->mcs_blks > 1)
512                         seq_printf(filp, "secy%d: pkts notag: %lld\n", secy_id,
513                                    stats.pkt_notag_cnt);
514         }
515         mutex_unlock(&mcs->stats_lock);
516         return 0;
517 }
518
519 RVU_DEBUG_SEQ_FOPS(mcs_rx_secy_stats, mcs_rx_secy_stats_display, NULL);
520
521 static void rvu_dbg_mcs_init(struct rvu *rvu)
522 {
523         struct mcs *mcs;
524         char dname[10];
525         int i;
526
527         if (!rvu->mcs_blk_cnt)
528                 return;
529
530         rvu->rvu_dbg.mcs_root = debugfs_create_dir("mcs", rvu->rvu_dbg.root);
531
532         for (i = 0; i < rvu->mcs_blk_cnt; i++) {
533                 mcs = mcs_get_pdata(i);
534
535                 sprintf(dname, "mcs%d", i);
536                 rvu->rvu_dbg.mcs = debugfs_create_dir(dname,
537                                                       rvu->rvu_dbg.mcs_root);
538
539                 rvu->rvu_dbg.mcs_rx = debugfs_create_dir("rx_stats", rvu->rvu_dbg.mcs);
540
541                 debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_rx, mcs,
542                                     &rvu_dbg_mcs_rx_flowid_stats_fops);
543
544                 debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_rx, mcs,
545                                     &rvu_dbg_mcs_rx_secy_stats_fops);
546
547                 debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_rx, mcs,
548                                     &rvu_dbg_mcs_rx_sc_stats_fops);
549
550                 debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_rx, mcs,
551                                     &rvu_dbg_mcs_rx_sa_stats_fops);
552
553                 debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_rx, mcs,
554                                     &rvu_dbg_mcs_rx_port_stats_fops);
555
556                 rvu->rvu_dbg.mcs_tx = debugfs_create_dir("tx_stats", rvu->rvu_dbg.mcs);
557
558                 debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_tx, mcs,
559                                     &rvu_dbg_mcs_tx_flowid_stats_fops);
560
561                 debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_tx, mcs,
562                                     &rvu_dbg_mcs_tx_secy_stats_fops);
563
564                 debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_tx, mcs,
565                                     &rvu_dbg_mcs_tx_sc_stats_fops);
566
567                 debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_tx, mcs,
568                                     &rvu_dbg_mcs_tx_sa_stats_fops);
569
570                 debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_tx, mcs,
571                                     &rvu_dbg_mcs_tx_port_stats_fops);
572         }
573 }
574
575 #define LMT_MAPTBL_ENTRY_SIZE 16
576 /* Dump LMTST map table */
577 static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
578                                                char __user *buffer,
579                                                size_t count, loff_t *ppos)
580 {
581         struct rvu *rvu = filp->private_data;
582         u64 lmt_addr, val, tbl_base;
583         int pf, vf, num_vfs, hw_vfs;
584         void __iomem *lmt_map_base;
585         int buf_size = 10240;
586         size_t off = 0;
587         int index = 0;
588         char *buf;
589         int ret;
590
591         /* don't allow partial reads */
592         if (*ppos != 0)
593                 return 0;
594
595         buf = kzalloc(buf_size, GFP_KERNEL);
596         if (!buf)
597                 return -ENOMEM;
598
599         tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
600
601         lmt_map_base = ioremap_wc(tbl_base, 128 * 1024);
602         if (!lmt_map_base) {
603                 dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
604                 kfree(buf);
605                 return false;
606         }
607
608         off +=  scnprintf(&buf[off], buf_size - 1 - off,
609                           "\n\t\t\t\t\tLmtst Map Table Entries");
610         off +=  scnprintf(&buf[off], buf_size - 1 - off,
611                           "\n\t\t\t\t\t=======================");
612         off +=  scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t");
613         off +=  scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t");
614         off +=  scnprintf(&buf[off], buf_size - 1 - off,
615                           "Lmtline Base (word 0)\t\t");
616         off +=  scnprintf(&buf[off], buf_size - 1 - off,
617                           "Lmt Map Entry (word 1)");
618         off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
619         for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
620                 off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d  \t\t\t",
621                                     pf);
622
623                 index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE;
624                 off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
625                                  (tbl_base + index));
626                 lmt_addr = readq(lmt_map_base + index);
627                 off += scnprintf(&buf[off], buf_size - 1 - off,
628                                  " 0x%016llx\t\t", lmt_addr);
629                 index += 8;
630                 val = readq(lmt_map_base + index);
631                 off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n",
632                                  val);
633                 /* Reading num of VFs per PF */
634                 rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
635                 for (vf = 0; vf < num_vfs; vf++) {
636                         index = (pf * rvu->hw->total_vfs * 16) +
637                                 ((vf + 1)  * LMT_MAPTBL_ENTRY_SIZE);
638                         off += scnprintf(&buf[off], buf_size - 1 - off,
639                                             "PF%d:VF%d  \t\t", pf, vf);
640                         off += scnprintf(&buf[off], buf_size - 1 - off,
641                                          " 0x%llx\t\t", (tbl_base + index));
642                         lmt_addr = readq(lmt_map_base + index);
643                         off += scnprintf(&buf[off], buf_size - 1 - off,
644                                          " 0x%016llx\t\t", lmt_addr);
645                         index += 8;
646                         val = readq(lmt_map_base + index);
647                         off += scnprintf(&buf[off], buf_size - 1 - off,
648                                          " 0x%016llx\n", val);
649                 }
650         }
651         off +=  scnprintf(&buf[off], buf_size - 1 - off, "\n");
652
653         ret = min(off, count);
654         if (copy_to_user(buffer, buf, ret))
655                 ret = -EFAULT;
656         kfree(buf);
657
658         iounmap(lmt_map_base);
659         if (ret < 0)
660                 return ret;
661
662         *ppos = ret;
663         return ret;
664 }
665
666 RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
667
668 static void get_lf_str_list(struct rvu_block block, int pcifunc,
669                             char *lfs)
670 {
671         int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max;
672
673         for_each_set_bit(lf, block.lf.bmap, block.lf.max) {
674                 if (lf >= block.lf.max)
675                         break;
676
677                 if (block.fn_map[lf] != pcifunc)
678                         continue;
679
680                 if (lf == prev_lf + 1) {
681                         prev_lf = lf;
682                         seq = 1;
683                         continue;
684                 }
685
686                 if (seq)
687                         len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
688                 else
689                         len += (len ? sprintf(lfs + len, ",%d", lf) :
690                                       sprintf(lfs + len, "%d", lf));
691
692                 prev_lf = lf;
693                 seq = 0;
694         }
695
696         if (seq)
697                 len += sprintf(lfs + len, "-%d", prev_lf);
698
699         lfs[len] = '\0';
700 }
701
702 static int get_max_column_width(struct rvu *rvu)
703 {
704         int index, pf, vf, lf_str_size = 12, buf_size = 256;
705         struct rvu_block block;
706         u16 pcifunc;
707         char *buf;
708
709         buf = kzalloc(buf_size, GFP_KERNEL);
710         if (!buf)
711                 return -ENOMEM;
712
713         for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
714                 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
715                         pcifunc = pf << 10 | vf;
716                         if (!pcifunc)
717                                 continue;
718
719                         for (index = 0; index < BLK_COUNT; index++) {
720                                 block = rvu->hw->block[index];
721                                 if (!strlen(block.name))
722                                         continue;
723
724                                 get_lf_str_list(block, pcifunc, buf);
725                                 if (lf_str_size <= strlen(buf))
726                                         lf_str_size = strlen(buf) + 1;
727                         }
728                 }
729         }
730
731         kfree(buf);
732         return lf_str_size;
733 }
734
735 /* Dumps current provisioning status of all RVU block LFs */
736 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
737                                           char __user *buffer,
738                                           size_t count, loff_t *ppos)
739 {
740         int index, off = 0, flag = 0, len = 0, i = 0;
741         struct rvu *rvu = filp->private_data;
742         int bytes_not_copied = 0;
743         struct rvu_block block;
744         int pf, vf, pcifunc;
745         int buf_size = 2048;
746         int lf_str_size;
747         char *lfs;
748         char *buf;
749
750         /* don't allow partial reads */
751         if (*ppos != 0)
752                 return 0;
753
754         buf = kzalloc(buf_size, GFP_KERNEL);
755         if (!buf)
756                 return -ENOMEM;
757
758         /* Get the maximum width of a column */
759         lf_str_size = get_max_column_width(rvu);
760
761         lfs = kzalloc(lf_str_size, GFP_KERNEL);
762         if (!lfs) {
763                 kfree(buf);
764                 return -ENOMEM;
765         }
766         off +=  scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
767                           "pcifunc");
768         for (index = 0; index < BLK_COUNT; index++)
769                 if (strlen(rvu->hw->block[index].name)) {
770                         off += scnprintf(&buf[off], buf_size - 1 - off,
771                                          "%-*s", lf_str_size,
772                                          rvu->hw->block[index].name);
773                 }
774
775         off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
776         bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
777         if (bytes_not_copied)
778                 goto out;
779
780         i++;
781         *ppos += off;
782         for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
783                 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
784                         off = 0;
785                         flag = 0;
786                         pcifunc = pf << 10 | vf;
787                         if (!pcifunc)
788                                 continue;
789
790                         if (vf) {
791                                 sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
792                                 off = scnprintf(&buf[off],
793                                                 buf_size - 1 - off,
794                                                 "%-*s", lf_str_size, lfs);
795                         } else {
796                                 sprintf(lfs, "PF%d", pf);
797                                 off = scnprintf(&buf[off],
798                                                 buf_size - 1 - off,
799                                                 "%-*s", lf_str_size, lfs);
800                         }
801
802                         for (index = 0; index < BLK_COUNT; index++) {
803                                 block = rvu->hw->block[index];
804                                 if (!strlen(block.name))
805                                         continue;
806                                 len = 0;
807                                 lfs[len] = '\0';
808                                 get_lf_str_list(block, pcifunc, lfs);
809                                 if (strlen(lfs))
810                                         flag = 1;
811
812                                 off += scnprintf(&buf[off], buf_size - 1 - off,
813                                                  "%-*s", lf_str_size, lfs);
814                         }
815                         if (flag) {
816                                 off +=  scnprintf(&buf[off],
817                                                   buf_size - 1 - off, "\n");
818                                 bytes_not_copied = copy_to_user(buffer +
819                                                                 (i * off),
820                                                                 buf, off);
821                                 if (bytes_not_copied)
822                                         goto out;
823
824                                 i++;
825                                 *ppos += off;
826                         }
827                 }
828         }
829
830 out:
831         kfree(lfs);
832         kfree(buf);
833         if (bytes_not_copied)
834                 return -EFAULT;
835
836         return *ppos;
837 }
838
839 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
840
841 static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
842 {
843         struct rvu *rvu = filp->private;
844         struct pci_dev *pdev = NULL;
845         struct mac_ops *mac_ops;
846         char cgx[10], lmac[10];
847         struct rvu_pfvf *pfvf;
848         int pf, domain, blkid;
849         u8 cgx_id, lmac_id;
850         u16 pcifunc;
851
852         domain = 2;
853         mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
854         /* There can be no CGX devices at all */
855         if (!mac_ops)
856                 return 0;
857         seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
858                    mac_ops->name);
859         for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
860                 if (!is_pf_cgxmapped(rvu, pf))
861                         continue;
862
863                 pdev =  pci_get_domain_bus_and_slot(domain, pf + 1, 0);
864                 if (!pdev)
865                         continue;
866
867                 cgx[0] = 0;
868                 lmac[0] = 0;
869                 pcifunc = pf << 10;
870                 pfvf = rvu_get_pfvf(rvu, pcifunc);
871
872                 if (pfvf->nix_blkaddr == BLKADDR_NIX0)
873                         blkid = 0;
874                 else
875                         blkid = 1;
876
877                 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
878                                     &lmac_id);
879                 sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
880                 sprintf(lmac, "LMAC%d", lmac_id);
881                 seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
882                            dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
883
884                 pci_dev_put(pdev);
885         }
886         return 0;
887 }
888
889 RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
890
891 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
892                                 u16 *pcifunc)
893 {
894         struct rvu_block *block;
895         struct rvu_hwinfo *hw;
896
897         hw = rvu->hw;
898         block = &hw->block[blkaddr];
899
900         if (lf < 0 || lf >= block->lf.max) {
901                 dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
902                          block->lf.max - 1);
903                 return false;
904         }
905
906         *pcifunc = block->fn_map[lf];
907         if (!*pcifunc) {
908                 dev_warn(rvu->dev,
909                          "This LF is not attached to any RVU PFFUNC\n");
910                 return false;
911         }
912         return true;
913 }
914
915 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
916 {
917         char *buf;
918
919         buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
920         if (!buf)
921                 return;
922
923         if (!pfvf->aura_ctx) {
924                 seq_puts(m, "Aura context is not initialized\n");
925         } else {
926                 bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
927                                         pfvf->aura_ctx->qsize);
928                 seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
929                 seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
930         }
931
932         if (!pfvf->pool_ctx) {
933                 seq_puts(m, "Pool context is not initialized\n");
934         } else {
935                 bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
936                                         pfvf->pool_ctx->qsize);
937                 seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
938                 seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
939         }
940         kfree(buf);
941 }
942
943 /* The 'qsize' entry dumps current Aura/Pool context Qsize
944  * and each context's current enable/disable status in a bitmap.
945  */
946 static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
947                                  int blktype)
948 {
949         void (*print_qsize)(struct seq_file *filp,
950                             struct rvu_pfvf *pfvf) = NULL;
951         struct dentry *current_dir;
952         struct rvu_pfvf *pfvf;
953         struct rvu *rvu;
954         int qsize_id;
955         u16 pcifunc;
956         int blkaddr;
957
958         rvu = filp->private;
959         switch (blktype) {
960         case BLKTYPE_NPA:
961                 qsize_id = rvu->rvu_dbg.npa_qsize_id;
962                 print_qsize = print_npa_qsize;
963                 break;
964
965         case BLKTYPE_NIX:
966                 qsize_id = rvu->rvu_dbg.nix_qsize_id;
967                 print_qsize = print_nix_qsize;
968                 break;
969
970         default:
971                 return -EINVAL;
972         }
973
974         if (blktype == BLKTYPE_NPA) {
975                 blkaddr = BLKADDR_NPA;
976         } else {
977                 current_dir = filp->file->f_path.dentry->d_parent;
978                 blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
979                                    BLKADDR_NIX1 : BLKADDR_NIX0);
980         }
981
982         if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
983                 return -EINVAL;
984
985         pfvf = rvu_get_pfvf(rvu, pcifunc);
986         print_qsize(filp, pfvf);
987
988         return 0;
989 }
990
991 static ssize_t rvu_dbg_qsize_write(struct file *filp,
992                                    const char __user *buffer, size_t count,
993                                    loff_t *ppos, int blktype)
994 {
995         char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
996         struct seq_file *seqfile = filp->private_data;
997         char *cmd_buf, *cmd_buf_tmp, *subtoken;
998         struct rvu *rvu = seqfile->private;
999         struct dentry *current_dir;
1000         int blkaddr;
1001         u16 pcifunc;
1002         int ret, lf;
1003
1004         cmd_buf = memdup_user(buffer, count + 1);
1005         if (IS_ERR(cmd_buf))
1006                 return -ENOMEM;
1007
1008         cmd_buf[count] = '\0';
1009
1010         cmd_buf_tmp = strchr(cmd_buf, '\n');
1011         if (cmd_buf_tmp) {
1012                 *cmd_buf_tmp = '\0';
1013                 count = cmd_buf_tmp - cmd_buf + 1;
1014         }
1015
1016         cmd_buf_tmp = cmd_buf;
1017         subtoken = strsep(&cmd_buf, " ");
1018         ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
1019         if (cmd_buf)
1020                 ret = -EINVAL;
1021
1022         if (ret < 0 || !strncmp(subtoken, "help", 4)) {
1023                 dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
1024                 goto qsize_write_done;
1025         }
1026
1027         if (blktype == BLKTYPE_NPA) {
1028                 blkaddr = BLKADDR_NPA;
1029         } else {
1030                 current_dir = filp->f_path.dentry->d_parent;
1031                 blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
1032                                    BLKADDR_NIX1 : BLKADDR_NIX0);
1033         }
1034
1035         if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
1036                 ret = -EINVAL;
1037                 goto qsize_write_done;
1038         }
1039         if (blktype  == BLKTYPE_NPA)
1040                 rvu->rvu_dbg.npa_qsize_id = lf;
1041         else
1042                 rvu->rvu_dbg.nix_qsize_id = lf;
1043
1044 qsize_write_done:
1045         kfree(cmd_buf_tmp);
1046         return ret ? ret : count;
1047 }
1048
1049 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
1050                                        const char __user *buffer,
1051                                        size_t count, loff_t *ppos)
1052 {
1053         return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1054                                             BLKTYPE_NPA);
1055 }
1056
1057 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
1058 {
1059         return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
1060 }
1061
1062 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
1063
1064 /* Dumps given NPA Aura's context */
1065 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
1066 {
1067         struct npa_aura_s *aura = &rsp->aura;
1068         struct rvu *rvu = m->private;
1069
1070         seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
1071
1072         seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
1073                    aura->ena, aura->pool_caching);
1074         seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
1075                    aura->pool_way_mask, aura->avg_con);
1076         seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
1077                    aura->pool_drop_ena, aura->aura_drop_ena);
1078         seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
1079                    aura->bp_ena, aura->aura_drop);
1080         seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
1081                    aura->shift, aura->avg_level);
1082
1083         seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
1084                    (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
1085
1086         seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
1087                    (u64)aura->limit, aura->bp, aura->fc_ena);
1088
1089         if (!is_rvu_otx2(rvu))
1090                 seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
1091         seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
1092                    aura->fc_up_crossing, aura->fc_stype);
1093         seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
1094
1095         seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
1096
1097         seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
1098                    aura->pool_drop, aura->update_time);
1099         seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
1100                    aura->err_int, aura->err_int_ena);
1101         seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
1102                    aura->thresh_int, aura->thresh_int_ena);
1103         seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
1104                    aura->thresh_up, aura->thresh_qint_idx);
1105         seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
1106
1107         seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
1108         if (!is_rvu_otx2(rvu))
1109                 seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
1110 }
1111
1112 /* Dumps given NPA Pool's context */
1113 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
1114 {
1115         struct npa_pool_s *pool = &rsp->pool;
1116         struct rvu *rvu = m->private;
1117
1118         seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
1119
1120         seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
1121                    pool->ena, pool->nat_align);
1122         seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
1123                    pool->stack_caching, pool->stack_way_mask);
1124         seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
1125                    pool->buf_offset, pool->buf_size);
1126
1127         seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
1128                    pool->stack_max_pages, pool->stack_pages);
1129
1130         seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
1131
1132         seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
1133                    pool->stack_offset, pool->shift, pool->avg_level);
1134         seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
1135                    pool->avg_con, pool->fc_ena, pool->fc_stype);
1136         seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
1137                    pool->fc_hyst_bits, pool->fc_up_crossing);
1138         if (!is_rvu_otx2(rvu))
1139                 seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
1140         seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
1141
1142         seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
1143
1144         seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
1145
1146         seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
1147
1148         seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
1149                    pool->err_int, pool->err_int_ena);
1150         seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
1151         seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
1152                    pool->thresh_int_ena, pool->thresh_up);
1153         seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
1154                    pool->thresh_qint_idx, pool->err_qint_idx);
1155         if (!is_rvu_otx2(rvu))
1156                 seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
1157 }
1158
1159 /* Reads aura/pool's ctx from admin queue */
1160 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
1161 {
1162         void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
1163         struct npa_aq_enq_req aq_req;
1164         struct npa_aq_enq_rsp rsp;
1165         struct rvu_pfvf *pfvf;
1166         int aura, rc, max_id;
1167         int npalf, id, all;
1168         struct rvu *rvu;
1169         u16 pcifunc;
1170
1171         rvu = m->private;
1172
1173         switch (ctype) {
1174         case NPA_AQ_CTYPE_AURA:
1175                 npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
1176                 id = rvu->rvu_dbg.npa_aura_ctx.id;
1177                 all = rvu->rvu_dbg.npa_aura_ctx.all;
1178                 break;
1179
1180         case NPA_AQ_CTYPE_POOL:
1181                 npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
1182                 id = rvu->rvu_dbg.npa_pool_ctx.id;
1183                 all = rvu->rvu_dbg.npa_pool_ctx.all;
1184                 break;
1185         default:
1186                 return -EINVAL;
1187         }
1188
1189         if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
1190                 return -EINVAL;
1191
1192         pfvf = rvu_get_pfvf(rvu, pcifunc);
1193         if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
1194                 seq_puts(m, "Aura context is not initialized\n");
1195                 return -EINVAL;
1196         } else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
1197                 seq_puts(m, "Pool context is not initialized\n");
1198                 return -EINVAL;
1199         }
1200
1201         memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
1202         aq_req.hdr.pcifunc = pcifunc;
1203         aq_req.ctype = ctype;
1204         aq_req.op = NPA_AQ_INSTOP_READ;
1205         if (ctype == NPA_AQ_CTYPE_AURA) {
1206                 max_id = pfvf->aura_ctx->qsize;
1207                 print_npa_ctx = print_npa_aura_ctx;
1208         } else {
1209                 max_id = pfvf->pool_ctx->qsize;
1210                 print_npa_ctx = print_npa_pool_ctx;
1211         }
1212
1213         if (id < 0 || id >= max_id) {
1214                 seq_printf(m, "Invalid %s, valid range is 0-%d\n",
1215                            (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
1216                         max_id - 1);
1217                 return -EINVAL;
1218         }
1219
1220         if (all)
1221                 id = 0;
1222         else
1223                 max_id = id + 1;
1224
1225         for (aura = id; aura < max_id; aura++) {
1226                 aq_req.aura_id = aura;
1227                 seq_printf(m, "======%s : %d=======\n",
1228                            (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
1229                         aq_req.aura_id);
1230                 rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
1231                 if (rc) {
1232                         seq_puts(m, "Failed to read context\n");
1233                         return -EINVAL;
1234                 }
1235                 print_npa_ctx(m, &rsp);
1236         }
1237         return 0;
1238 }
1239
1240 static int write_npa_ctx(struct rvu *rvu, bool all,
1241                          int npalf, int id, int ctype)
1242 {
1243         struct rvu_pfvf *pfvf;
1244         int max_id = 0;
1245         u16 pcifunc;
1246
1247         if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
1248                 return -EINVAL;
1249
1250         pfvf = rvu_get_pfvf(rvu, pcifunc);
1251
1252         if (ctype == NPA_AQ_CTYPE_AURA) {
1253                 if (!pfvf->aura_ctx) {
1254                         dev_warn(rvu->dev, "Aura context is not initialized\n");
1255                         return -EINVAL;
1256                 }
1257                 max_id = pfvf->aura_ctx->qsize;
1258         } else if (ctype == NPA_AQ_CTYPE_POOL) {
1259                 if (!pfvf->pool_ctx) {
1260                         dev_warn(rvu->dev, "Pool context is not initialized\n");
1261                         return -EINVAL;
1262                 }
1263                 max_id = pfvf->pool_ctx->qsize;
1264         }
1265
1266         if (id < 0 || id >= max_id) {
1267                 dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
1268                          (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
1269                         max_id - 1);
1270                 return -EINVAL;
1271         }
1272
1273         switch (ctype) {
1274         case NPA_AQ_CTYPE_AURA:
1275                 rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
1276                 rvu->rvu_dbg.npa_aura_ctx.id = id;
1277                 rvu->rvu_dbg.npa_aura_ctx.all = all;
1278                 break;
1279
1280         case NPA_AQ_CTYPE_POOL:
1281                 rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
1282                 rvu->rvu_dbg.npa_pool_ctx.id = id;
1283                 rvu->rvu_dbg.npa_pool_ctx.all = all;
1284                 break;
1285         default:
1286                 return -EINVAL;
1287         }
1288         return 0;
1289 }
1290
1291 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
1292                                 const char __user *buffer, int *npalf,
1293                                 int *id, bool *all)
1294 {
1295         int bytes_not_copied;
1296         char *cmd_buf_tmp;
1297         char *subtoken;
1298         int ret;
1299
1300         bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
1301         if (bytes_not_copied)
1302                 return -EFAULT;
1303
1304         cmd_buf[*count] = '\0';
1305         cmd_buf_tmp = strchr(cmd_buf, '\n');
1306
1307         if (cmd_buf_tmp) {
1308                 *cmd_buf_tmp = '\0';
1309                 *count = cmd_buf_tmp - cmd_buf + 1;
1310         }
1311
1312         subtoken = strsep(&cmd_buf, " ");
1313         ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
1314         if (ret < 0)
1315                 return ret;
1316         subtoken = strsep(&cmd_buf, " ");
1317         if (subtoken && strcmp(subtoken, "all") == 0) {
1318                 *all = true;
1319         } else {
1320                 ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
1321                 if (ret < 0)
1322                         return ret;
1323         }
1324         if (cmd_buf)
1325                 return -EINVAL;
1326         return ret;
1327 }
1328
1329 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
1330                                      const char __user *buffer,
1331                                      size_t count, loff_t *ppos, int ctype)
1332 {
1333         char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
1334                                         "aura" : "pool";
1335         struct seq_file *seqfp = filp->private_data;
1336         struct rvu *rvu = seqfp->private;
1337         int npalf, id = 0, ret;
1338         bool all = false;
1339
1340         if ((*ppos != 0) || !count)
1341                 return -EINVAL;
1342
1343         cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1344         if (!cmd_buf)
1345                 return count;
1346         ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1347                                    &npalf, &id, &all);
1348         if (ret < 0) {
1349                 dev_info(rvu->dev,
1350                          "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
1351                          ctype_string, ctype_string);
1352                 goto done;
1353         } else {
1354                 ret = write_npa_ctx(rvu, all, npalf, id, ctype);
1355         }
1356 done:
1357         kfree(cmd_buf);
1358         return ret ? ret : count;
1359 }
1360
1361 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
1362                                           const char __user *buffer,
1363                                           size_t count, loff_t *ppos)
1364 {
1365         return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1366                                      NPA_AQ_CTYPE_AURA);
1367 }
1368
1369 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
1370 {
1371         return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
1372 }
1373
1374 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
1375
1376 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
1377                                           const char __user *buffer,
1378                                           size_t count, loff_t *ppos)
1379 {
1380         return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1381                                      NPA_AQ_CTYPE_POOL);
1382 }
1383
1384 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
1385 {
1386         return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
1387 }
1388
1389 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
1390
1391 static void ndc_cache_stats(struct seq_file *s, int blk_addr,
1392                             int ctype, int transaction)
1393 {
1394         u64 req, out_req, lat, cant_alloc;
1395         struct nix_hw *nix_hw;
1396         struct rvu *rvu;
1397         int port;
1398
1399         if (blk_addr == BLKADDR_NDC_NPA0) {
1400                 rvu = s->private;
1401         } else {
1402                 nix_hw = s->private;
1403                 rvu = nix_hw->rvu;
1404         }
1405
1406         for (port = 0; port < NDC_MAX_PORT; port++) {
1407                 req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
1408                                                 (port, ctype, transaction));
1409                 lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
1410                                                 (port, ctype, transaction));
1411                 out_req = rvu_read64(rvu, blk_addr,
1412                                      NDC_AF_PORTX_RTX_RWX_OSTDN_PC
1413                                      (port, ctype, transaction));
1414                 cant_alloc = rvu_read64(rvu, blk_addr,
1415                                         NDC_AF_PORTX_RTX_CANT_ALLOC_PC
1416                                         (port, transaction));
1417                 seq_printf(s, "\nPort:%d\n", port);
1418                 seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
1419                 seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
1420                 seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
1421                 seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
1422                 seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
1423         }
1424 }
1425
1426 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
1427 {
1428         seq_puts(s, "\n***** CACHE mode read stats *****\n");
1429         ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
1430         seq_puts(s, "\n***** CACHE mode write stats *****\n");
1431         ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
1432         seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
1433         ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
1434         seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
1435         ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
1436         return 0;
1437 }
1438
1439 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
1440 {
1441         return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1442 }
1443
1444 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
1445
1446 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
1447 {
1448         struct nix_hw *nix_hw;
1449         struct rvu *rvu;
1450         int bank, max_bank;
1451
1452         if (blk_addr == BLKADDR_NDC_NPA0) {
1453                 rvu = s->private;
1454         } else {
1455                 nix_hw = s->private;
1456                 rvu = nix_hw->rvu;
1457         }
1458
1459         max_bank = NDC_MAX_BANK(rvu, blk_addr);
1460         for (bank = 0; bank < max_bank; bank++) {
1461                 seq_printf(s, "BANK:%d\n", bank);
1462                 seq_printf(s, "\tHits:\t%lld\n",
1463                            (u64)rvu_read64(rvu, blk_addr,
1464                            NDC_AF_BANKX_HIT_PC(bank)));
1465                 seq_printf(s, "\tMiss:\t%lld\n",
1466                            (u64)rvu_read64(rvu, blk_addr,
1467                             NDC_AF_BANKX_MISS_PC(bank)));
1468         }
1469         return 0;
1470 }
1471
1472 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
1473 {
1474         struct nix_hw *nix_hw = filp->private;
1475         int blkaddr = 0;
1476         int ndc_idx = 0;
1477
1478         blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1479                    BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1480         ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
1481
1482         return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1483 }
1484
1485 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
1486
1487 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
1488 {
1489         struct nix_hw *nix_hw = filp->private;
1490         int blkaddr = 0;
1491         int ndc_idx = 0;
1492
1493         blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1494                    BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1495         ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
1496
1497         return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1498 }
1499
1500 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
1501
1502 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
1503                                              void *unused)
1504 {
1505         return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1506 }
1507
1508 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
1509
1510 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
1511                                                 void *unused)
1512 {
1513         struct nix_hw *nix_hw = filp->private;
1514         int ndc_idx = NPA0_U;
1515         int blkaddr = 0;
1516
1517         blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1518                    BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1519
1520         return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1521 }
1522
1523 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
1524
1525 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
1526                                                 void *unused)
1527 {
1528         struct nix_hw *nix_hw = filp->private;
1529         int ndc_idx = NPA0_U;
1530         int blkaddr = 0;
1531
1532         blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1533                    BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1534
1535         return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1536 }
1537
1538 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
1539
1540 static void print_nix_cn10k_sq_ctx(struct seq_file *m,
1541                                    struct nix_cn10k_sq_ctx_s *sq_ctx)
1542 {
1543         seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
1544                    sq_ctx->ena, sq_ctx->qint_idx);
1545         seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
1546                    sq_ctx->substream, sq_ctx->sdp_mcast);
1547         seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
1548                    sq_ctx->cq, sq_ctx->sqe_way_mask);
1549
1550         seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
1551                    sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
1552         seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
1553                    sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
1554         seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
1555                    sq_ctx->default_chan, sq_ctx->sqb_count);
1556
1557         seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
1558         seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
1559         seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
1560                    sq_ctx->sqb_aura, sq_ctx->sq_int);
1561         seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
1562                    sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
1563
1564         seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
1565                    sq_ctx->max_sqe_size, sq_ctx->cq_limit);
1566         seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
1567                    sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1568         seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
1569                    sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
1570         seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
1571                    sq_ctx->tail_offset, sq_ctx->smenq_offset);
1572         seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
1573                    sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
1574
1575         seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1576                    sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1577         seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1578         seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1579         seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1580         seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1581                    sq_ctx->smenq_next_sqb);
1582
1583         seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1584
1585         seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
1586         seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
1587                    sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
1588         seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
1589                    sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
1590         seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
1591                    sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1592
1593         seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1594                    (u64)sq_ctx->scm_lso_rem);
1595         seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1596         seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1597         seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1598                    (u64)sq_ctx->dropped_octs);
1599         seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1600                    (u64)sq_ctx->dropped_pkts);
1601 }
1602
1603 /* Dumps given nix_sq's context */
1604 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1605 {
1606         struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
1607         struct nix_hw *nix_hw = m->private;
1608         struct rvu *rvu = nix_hw->rvu;
1609
1610         if (!is_rvu_otx2(rvu)) {
1611                 print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
1612                 return;
1613         }
1614         seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
1615                    sq_ctx->sqe_way_mask, sq_ctx->cq);
1616         seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1617                    sq_ctx->sdp_mcast, sq_ctx->substream);
1618         seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
1619                    sq_ctx->qint_idx, sq_ctx->ena);
1620
1621         seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
1622                    sq_ctx->sqb_count, sq_ctx->default_chan);
1623         seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
1624                    sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
1625         seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
1626                    sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
1627
1628         seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
1629                    sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
1630         seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
1631                    sq_ctx->sq_int, sq_ctx->sqb_aura);
1632         seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
1633
1634         seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1635                    sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1636         seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
1637                    sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
1638         seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
1639                    sq_ctx->smenq_offset, sq_ctx->tail_offset);
1640         seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
1641                    sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
1642         seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
1643                    sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1644         seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
1645                    sq_ctx->cq_limit, sq_ctx->max_sqe_size);
1646
1647         seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1648         seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1649         seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1650         seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1651                    sq_ctx->smenq_next_sqb);
1652
1653         seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1654
1655         seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
1656                    sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1657         seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
1658                    sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
1659         seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
1660                    sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
1661         seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
1662
1663         seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1664                    (u64)sq_ctx->scm_lso_rem);
1665         seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1666         seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1667         seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1668                    (u64)sq_ctx->dropped_octs);
1669         seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1670                    (u64)sq_ctx->dropped_pkts);
1671 }
1672
1673 static void print_nix_cn10k_rq_ctx(struct seq_file *m,
1674                                    struct nix_cn10k_rq_ctx_s *rq_ctx)
1675 {
1676         seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1677                    rq_ctx->ena, rq_ctx->sso_ena);
1678         seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1679                    rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
1680         seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
1681                    rq_ctx->cq, rq_ctx->lenerr_dis);
1682         seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
1683                    rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
1684         seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
1685                    rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
1686         seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
1687                    rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
1688         seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
1689
1690         seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1691                    rq_ctx->spb_aura, rq_ctx->lpb_aura);
1692         seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
1693         seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1694                    rq_ctx->sso_grp, rq_ctx->sso_tt);
1695         seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
1696                    rq_ctx->pb_caching, rq_ctx->wqe_caching);
1697         seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1698                    rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
1699         seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
1700                    rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
1701         seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
1702                    rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
1703
1704         seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
1705         seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
1706         seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
1707         seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
1708                    rq_ctx->wqe_skip, rq_ctx->spb_ena);
1709         seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
1710                    rq_ctx->lpb_sizem1, rq_ctx->first_skip);
1711         seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
1712                    rq_ctx->later_skip, rq_ctx->xqe_imm_size);
1713         seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
1714                    rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
1715
1716         seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
1717                    rq_ctx->xqe_drop, rq_ctx->xqe_pass);
1718         seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
1719                    rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
1720         seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
1721                    rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
1722         seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
1723                    rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1724
1725         seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
1726                    rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
1727         seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
1728                    rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
1729         seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
1730                    rq_ctx->rq_int, rq_ctx->rq_int_ena);
1731         seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
1732
1733         seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
1734                    rq_ctx->ltag, rq_ctx->good_utag);
1735         seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
1736                    rq_ctx->bad_utag, rq_ctx->flow_tagw);
1737         seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
1738                    rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
1739         seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
1740                    rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
1741         seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
1742
1743         seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1744         seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1745         seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1746         seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1747         seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1748 }
1749
1750 /* Dumps given nix_rq's context */
1751 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1752 {
1753         struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
1754         struct nix_hw *nix_hw = m->private;
1755         struct rvu *rvu = nix_hw->rvu;
1756
1757         if (!is_rvu_otx2(rvu)) {
1758                 print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
1759                 return;
1760         }
1761
1762         seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1763                    rq_ctx->wqe_aura, rq_ctx->substream);
1764         seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1765                    rq_ctx->cq, rq_ctx->ena_wqwd);
1766         seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1767                    rq_ctx->ipsech_ena, rq_ctx->sso_ena);
1768         seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
1769
1770         seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1771                    rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
1772         seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
1773                    rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
1774         seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1775                    rq_ctx->pb_caching, rq_ctx->sso_tt);
1776         seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1777                    rq_ctx->sso_grp, rq_ctx->lpb_aura);
1778         seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
1779
1780         seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
1781                    rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
1782         seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
1783                    rq_ctx->xqe_imm_size, rq_ctx->later_skip);
1784         seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
1785                    rq_ctx->first_skip, rq_ctx->lpb_sizem1);
1786         seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
1787                    rq_ctx->spb_ena, rq_ctx->wqe_skip);
1788         seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
1789
1790         seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
1791                    rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
1792         seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
1793                    rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1794         seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
1795                    rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
1796         seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
1797                    rq_ctx->xqe_pass, rq_ctx->xqe_drop);
1798
1799         seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
1800                    rq_ctx->qint_idx, rq_ctx->rq_int_ena);
1801         seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
1802                    rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
1803         seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
1804                    rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
1805         seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
1806
1807         seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
1808                    rq_ctx->flow_tagw, rq_ctx->bad_utag);
1809         seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
1810                    rq_ctx->good_utag, rq_ctx->ltag);
1811
1812         seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1813         seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1814         seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1815         seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1816         seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1817 }
1818
1819 /* Dumps given nix_cq's context */
1820 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1821 {
1822         struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
1823
1824         seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
1825
1826         seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
1827         seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
1828                    cq_ctx->avg_con, cq_ctx->cint_idx);
1829         seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
1830                    cq_ctx->cq_err, cq_ctx->qint_idx);
1831         seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
1832                    cq_ctx->bpid, cq_ctx->bp_ena);
1833
1834         seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
1835                    cq_ctx->update_time, cq_ctx->avg_level);
1836         seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
1837                    cq_ctx->head, cq_ctx->tail);
1838
1839         seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
1840                    cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
1841         seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
1842                    cq_ctx->qsize, cq_ctx->caching);
1843         seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
1844                    cq_ctx->substream, cq_ctx->ena);
1845         seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
1846                    cq_ctx->drop_ena, cq_ctx->drop);
1847         seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
1848 }
1849
1850 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
1851                                          void *unused, int ctype)
1852 {
1853         void (*print_nix_ctx)(struct seq_file *filp,
1854                               struct nix_aq_enq_rsp *rsp) = NULL;
1855         struct nix_hw *nix_hw = filp->private;
1856         struct rvu *rvu = nix_hw->rvu;
1857         struct nix_aq_enq_req aq_req;
1858         struct nix_aq_enq_rsp rsp;
1859         char *ctype_string = NULL;
1860         int qidx, rc, max_id = 0;
1861         struct rvu_pfvf *pfvf;
1862         int nixlf, id, all;
1863         u16 pcifunc;
1864
1865         switch (ctype) {
1866         case NIX_AQ_CTYPE_CQ:
1867                 nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
1868                 id = rvu->rvu_dbg.nix_cq_ctx.id;
1869                 all = rvu->rvu_dbg.nix_cq_ctx.all;
1870                 break;
1871
1872         case NIX_AQ_CTYPE_SQ:
1873                 nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
1874                 id = rvu->rvu_dbg.nix_sq_ctx.id;
1875                 all = rvu->rvu_dbg.nix_sq_ctx.all;
1876                 break;
1877
1878         case NIX_AQ_CTYPE_RQ:
1879                 nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
1880                 id = rvu->rvu_dbg.nix_rq_ctx.id;
1881                 all = rvu->rvu_dbg.nix_rq_ctx.all;
1882                 break;
1883
1884         default:
1885                 return -EINVAL;
1886         }
1887
1888         if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1889                 return -EINVAL;
1890
1891         pfvf = rvu_get_pfvf(rvu, pcifunc);
1892         if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
1893                 seq_puts(filp, "SQ context is not initialized\n");
1894                 return -EINVAL;
1895         } else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
1896                 seq_puts(filp, "RQ context is not initialized\n");
1897                 return -EINVAL;
1898         } else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
1899                 seq_puts(filp, "CQ context is not initialized\n");
1900                 return -EINVAL;
1901         }
1902
1903         if (ctype == NIX_AQ_CTYPE_SQ) {
1904                 max_id = pfvf->sq_ctx->qsize;
1905                 ctype_string = "sq";
1906                 print_nix_ctx = print_nix_sq_ctx;
1907         } else if (ctype == NIX_AQ_CTYPE_RQ) {
1908                 max_id = pfvf->rq_ctx->qsize;
1909                 ctype_string = "rq";
1910                 print_nix_ctx = print_nix_rq_ctx;
1911         } else if (ctype == NIX_AQ_CTYPE_CQ) {
1912                 max_id = pfvf->cq_ctx->qsize;
1913                 ctype_string = "cq";
1914                 print_nix_ctx = print_nix_cq_ctx;
1915         }
1916
1917         memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1918         aq_req.hdr.pcifunc = pcifunc;
1919         aq_req.ctype = ctype;
1920         aq_req.op = NIX_AQ_INSTOP_READ;
1921         if (all)
1922                 id = 0;
1923         else
1924                 max_id = id + 1;
1925         for (qidx = id; qidx < max_id; qidx++) {
1926                 aq_req.qidx = qidx;
1927                 seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
1928                            ctype_string, nixlf, aq_req.qidx);
1929                 rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
1930                 if (rc) {
1931                         seq_puts(filp, "Failed to read the context\n");
1932                         return -EINVAL;
1933                 }
1934                 print_nix_ctx(filp, &rsp);
1935         }
1936         return 0;
1937 }
1938
1939 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
1940                                int id, int ctype, char *ctype_string,
1941                                struct seq_file *m)
1942 {
1943         struct nix_hw *nix_hw = m->private;
1944         struct rvu_pfvf *pfvf;
1945         int max_id = 0;
1946         u16 pcifunc;
1947
1948         if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1949                 return -EINVAL;
1950
1951         pfvf = rvu_get_pfvf(rvu, pcifunc);
1952
1953         if (ctype == NIX_AQ_CTYPE_SQ) {
1954                 if (!pfvf->sq_ctx) {
1955                         dev_warn(rvu->dev, "SQ context is not initialized\n");
1956                         return -EINVAL;
1957                 }
1958                 max_id = pfvf->sq_ctx->qsize;
1959         } else if (ctype == NIX_AQ_CTYPE_RQ) {
1960                 if (!pfvf->rq_ctx) {
1961                         dev_warn(rvu->dev, "RQ context is not initialized\n");
1962                         return -EINVAL;
1963                 }
1964                 max_id = pfvf->rq_ctx->qsize;
1965         } else if (ctype == NIX_AQ_CTYPE_CQ) {
1966                 if (!pfvf->cq_ctx) {
1967                         dev_warn(rvu->dev, "CQ context is not initialized\n");
1968                         return -EINVAL;
1969                 }
1970                 max_id = pfvf->cq_ctx->qsize;
1971         }
1972
1973         if (id < 0 || id >= max_id) {
1974                 dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
1975                          ctype_string, max_id - 1);
1976                 return -EINVAL;
1977         }
1978         switch (ctype) {
1979         case NIX_AQ_CTYPE_CQ:
1980                 rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
1981                 rvu->rvu_dbg.nix_cq_ctx.id = id;
1982                 rvu->rvu_dbg.nix_cq_ctx.all = all;
1983                 break;
1984
1985         case NIX_AQ_CTYPE_SQ:
1986                 rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
1987                 rvu->rvu_dbg.nix_sq_ctx.id = id;
1988                 rvu->rvu_dbg.nix_sq_ctx.all = all;
1989                 break;
1990
1991         case NIX_AQ_CTYPE_RQ:
1992                 rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
1993                 rvu->rvu_dbg.nix_rq_ctx.id = id;
1994                 rvu->rvu_dbg.nix_rq_ctx.all = all;
1995                 break;
1996         default:
1997                 return -EINVAL;
1998         }
1999         return 0;
2000 }
2001
2002 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
2003                                            const char __user *buffer,
2004                                            size_t count, loff_t *ppos,
2005                                            int ctype)
2006 {
2007         struct seq_file *m = filp->private_data;
2008         struct nix_hw *nix_hw = m->private;
2009         struct rvu *rvu = nix_hw->rvu;
2010         char *cmd_buf, *ctype_string;
2011         int nixlf, id = 0, ret;
2012         bool all = false;
2013
2014         if ((*ppos != 0) || !count)
2015                 return -EINVAL;
2016
2017         switch (ctype) {
2018         case NIX_AQ_CTYPE_SQ:
2019                 ctype_string = "sq";
2020                 break;
2021         case NIX_AQ_CTYPE_RQ:
2022                 ctype_string = "rq";
2023                 break;
2024         case NIX_AQ_CTYPE_CQ:
2025                 ctype_string = "cq";
2026                 break;
2027         default:
2028                 return -EINVAL;
2029         }
2030
2031         cmd_buf = kzalloc(count + 1, GFP_KERNEL);
2032
2033         if (!cmd_buf)
2034                 return count;
2035
2036         ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
2037                                    &nixlf, &id, &all);
2038         if (ret < 0) {
2039                 dev_info(rvu->dev,
2040                          "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
2041                          ctype_string, ctype_string);
2042                 goto done;
2043         } else {
2044                 ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
2045                                           ctype_string, m);
2046         }
2047 done:
2048         kfree(cmd_buf);
2049         return ret ? ret : count;
2050 }
2051
2052 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
2053                                         const char __user *buffer,
2054                                         size_t count, loff_t *ppos)
2055 {
2056         return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2057                                             NIX_AQ_CTYPE_SQ);
2058 }
2059
2060 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
2061 {
2062         return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
2063 }
2064
2065 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
2066
2067 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
2068                                         const char __user *buffer,
2069                                         size_t count, loff_t *ppos)
2070 {
2071         return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2072                                             NIX_AQ_CTYPE_RQ);
2073 }
2074
2075 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void  *unused)
2076 {
2077         return rvu_dbg_nix_queue_ctx_display(filp, unused,  NIX_AQ_CTYPE_RQ);
2078 }
2079
2080 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
2081
2082 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
2083                                         const char __user *buffer,
2084                                         size_t count, loff_t *ppos)
2085 {
2086         return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2087                                             NIX_AQ_CTYPE_CQ);
2088 }
2089
2090 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
2091 {
2092         return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
2093 }
2094
2095 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
2096
2097 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
2098                                  unsigned long *bmap, char *qtype)
2099 {
2100         char *buf;
2101
2102         buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2103         if (!buf)
2104                 return;
2105
2106         bitmap_print_to_pagebuf(false, buf, bmap, qsize);
2107         seq_printf(filp, "%s context count : %d\n", qtype, qsize);
2108         seq_printf(filp, "%s context ena/dis bitmap : %s\n",
2109                    qtype, buf);
2110         kfree(buf);
2111 }
2112
2113 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
2114 {
2115         if (!pfvf->cq_ctx)
2116                 seq_puts(filp, "cq context is not initialized\n");
2117         else
2118                 print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
2119                                      "cq");
2120
2121         if (!pfvf->rq_ctx)
2122                 seq_puts(filp, "rq context is not initialized\n");
2123         else
2124                 print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
2125                                      "rq");
2126
2127         if (!pfvf->sq_ctx)
2128                 seq_puts(filp, "sq context is not initialized\n");
2129         else
2130                 print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
2131                                      "sq");
2132 }
2133
2134 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
2135                                        const char __user *buffer,
2136                                        size_t count, loff_t *ppos)
2137 {
2138         return rvu_dbg_qsize_write(filp, buffer, count, ppos,
2139                                    BLKTYPE_NIX);
2140 }
2141
2142 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
2143 {
2144         return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
2145 }
2146
2147 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
2148
2149 static void print_band_prof_ctx(struct seq_file *m,
2150                                 struct nix_bandprof_s *prof)
2151 {
2152         char *str;
2153
2154         switch (prof->pc_mode) {
2155         case NIX_RX_PC_MODE_VLAN:
2156                 str = "VLAN";
2157                 break;
2158         case NIX_RX_PC_MODE_DSCP:
2159                 str = "DSCP";
2160                 break;
2161         case NIX_RX_PC_MODE_GEN:
2162                 str = "Generic";
2163                 break;
2164         case NIX_RX_PC_MODE_RSVD:
2165                 str = "Reserved";
2166                 break;
2167         }
2168         seq_printf(m, "W0: pc_mode\t\t%s\n", str);
2169         str = (prof->icolor == 3) ? "Color blind" :
2170                 (prof->icolor == 0) ? "Green" :
2171                 (prof->icolor == 1) ? "Yellow" : "Red";
2172         seq_printf(m, "W0: icolor\t\t%s\n", str);
2173         seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
2174         seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
2175         seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
2176         seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
2177         seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
2178         seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
2179         seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
2180         seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
2181
2182         seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
2183         str = (prof->lmode == 0) ? "byte" : "packet";
2184         seq_printf(m, "W1: lmode\t\t%s\n", str);
2185         seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
2186         seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
2187         seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
2188         seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
2189         str = (prof->gc_action == 0) ? "PASS" :
2190                 (prof->gc_action == 1) ? "DROP" : "RED";
2191         seq_printf(m, "W1: gc_action\t\t%s\n", str);
2192         str = (prof->yc_action == 0) ? "PASS" :
2193                 (prof->yc_action == 1) ? "DROP" : "RED";
2194         seq_printf(m, "W1: yc_action\t\t%s\n", str);
2195         str = (prof->rc_action == 0) ? "PASS" :
2196                 (prof->rc_action == 1) ? "DROP" : "RED";
2197         seq_printf(m, "W1: rc_action\t\t%s\n", str);
2198         seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
2199         seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
2200         seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
2201
2202         seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
2203         seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
2204         seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
2205         seq_printf(m, "W4: green_pkt_pass\t%lld\n",
2206                    (u64)prof->green_pkt_pass);
2207         seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
2208                    (u64)prof->yellow_pkt_pass);
2209         seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
2210         seq_printf(m, "W7: green_octs_pass\t%lld\n",
2211                    (u64)prof->green_octs_pass);
2212         seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
2213                    (u64)prof->yellow_octs_pass);
2214         seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
2215         seq_printf(m, "W10: green_pkt_drop\t%lld\n",
2216                    (u64)prof->green_pkt_drop);
2217         seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
2218                    (u64)prof->yellow_pkt_drop);
2219         seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
2220         seq_printf(m, "W13: green_octs_drop\t%lld\n",
2221                    (u64)prof->green_octs_drop);
2222         seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
2223                    (u64)prof->yellow_octs_drop);
2224         seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
2225         seq_puts(m, "==============================\n");
2226 }
2227
2228 static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
2229 {
2230         struct nix_hw *nix_hw = m->private;
2231         struct nix_cn10k_aq_enq_req aq_req;
2232         struct nix_cn10k_aq_enq_rsp aq_rsp;
2233         struct rvu *rvu = nix_hw->rvu;
2234         struct nix_ipolicer *ipolicer;
2235         int layer, prof_idx, idx, rc;
2236         u16 pcifunc;
2237         char *str;
2238
2239         /* Ingress policers do not exist on all platforms */
2240         if (!nix_hw->ipolicer)
2241                 return 0;
2242
2243         for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
2244                 if (layer == BAND_PROF_INVAL_LAYER)
2245                         continue;
2246                 str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
2247                         (layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
2248
2249                 seq_printf(m, "\n%s bandwidth profiles\n", str);
2250                 seq_puts(m, "=======================\n");
2251
2252                 ipolicer = &nix_hw->ipolicer[layer];
2253
2254                 for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
2255                         if (is_rsrc_free(&ipolicer->band_prof, idx))
2256                                 continue;
2257
2258                         prof_idx = (idx & 0x3FFF) | (layer << 14);
2259                         rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
2260                                                  0x00, NIX_AQ_CTYPE_BANDPROF,
2261                                                  prof_idx);
2262                         if (rc) {
2263                                 dev_err(rvu->dev,
2264                                         "%s: Failed to fetch context of %s profile %d, err %d\n",
2265                                         __func__, str, idx, rc);
2266                                 return 0;
2267                         }
2268                         seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
2269                         pcifunc = ipolicer->pfvf_map[idx];
2270                         if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2271                                 seq_printf(m, "Allocated to :: PF %d\n",
2272                                            rvu_get_pf(pcifunc));
2273                         else
2274                                 seq_printf(m, "Allocated to :: PF %d VF %d\n",
2275                                            rvu_get_pf(pcifunc),
2276                                            (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2277                         print_band_prof_ctx(m, &aq_rsp.prof);
2278                 }
2279         }
2280         return 0;
2281 }
2282
2283 RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
2284
2285 static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
2286 {
2287         struct nix_hw *nix_hw = m->private;
2288         struct nix_ipolicer *ipolicer;
2289         int layer;
2290         char *str;
2291
2292         /* Ingress policers do not exist on all platforms */
2293         if (!nix_hw->ipolicer)
2294                 return 0;
2295
2296         seq_puts(m, "\nBandwidth profile resource free count\n");
2297         seq_puts(m, "=====================================\n");
2298         for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
2299                 if (layer == BAND_PROF_INVAL_LAYER)
2300                         continue;
2301                 str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
2302                         (layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
2303
2304                 ipolicer = &nix_hw->ipolicer[layer];
2305                 seq_printf(m, "%s :: Max: %4d  Free: %4d\n", str,
2306                            ipolicer->band_prof.max,
2307                            rvu_rsrc_free_count(&ipolicer->band_prof));
2308         }
2309         seq_puts(m, "=====================================\n");
2310
2311         return 0;
2312 }
2313
2314 RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
2315
2316 static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
2317 {
2318         struct nix_hw *nix_hw;
2319
2320         if (!is_block_implemented(rvu->hw, blkaddr))
2321                 return;
2322
2323         if (blkaddr == BLKADDR_NIX0) {
2324                 rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
2325                 nix_hw = &rvu->hw->nix[0];
2326         } else {
2327                 rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
2328                                                       rvu->rvu_dbg.root);
2329                 nix_hw = &rvu->hw->nix[1];
2330         }
2331
2332         debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2333                             &rvu_dbg_nix_sq_ctx_fops);
2334         debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2335                             &rvu_dbg_nix_rq_ctx_fops);
2336         debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2337                             &rvu_dbg_nix_cq_ctx_fops);
2338         debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
2339                             &rvu_dbg_nix_ndc_tx_cache_fops);
2340         debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
2341                             &rvu_dbg_nix_ndc_rx_cache_fops);
2342         debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
2343                             &rvu_dbg_nix_ndc_tx_hits_miss_fops);
2344         debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
2345                             &rvu_dbg_nix_ndc_rx_hits_miss_fops);
2346         debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
2347                             &rvu_dbg_nix_qsize_fops);
2348         debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2349                             &rvu_dbg_nix_band_prof_ctx_fops);
2350         debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
2351                             &rvu_dbg_nix_band_prof_rsrc_fops);
2352 }
2353
2354 static void rvu_dbg_npa_init(struct rvu *rvu)
2355 {
2356         rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
2357
2358         debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
2359                             &rvu_dbg_npa_qsize_fops);
2360         debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2361                             &rvu_dbg_npa_aura_ctx_fops);
2362         debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2363                             &rvu_dbg_npa_pool_ctx_fops);
2364         debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
2365                             &rvu_dbg_npa_ndc_cache_fops);
2366         debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
2367                             &rvu_dbg_npa_ndc_hits_miss_fops);
2368 }
2369
2370 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name)                          \
2371         ({                                                              \
2372                 u64 cnt;                                                \
2373                 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
2374                                              NIX_STATS_RX, &(cnt));     \
2375                 if (!err)                                               \
2376                         seq_printf(s, "%s: %llu\n", name, cnt);         \
2377                 cnt;                                                    \
2378         })
2379
2380 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name)                  \
2381         ({                                                              \
2382                 u64 cnt;                                                \
2383                 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
2384                                           NIX_STATS_TX, &(cnt));        \
2385                 if (!err)                                               \
2386                         seq_printf(s, "%s: %llu\n", name, cnt);         \
2387                 cnt;                                                    \
2388         })
2389
2390 static int cgx_print_stats(struct seq_file *s, int lmac_id)
2391 {
2392         struct cgx_link_user_info linfo;
2393         struct mac_ops *mac_ops;
2394         void *cgxd = s->private;
2395         u64 ucast, mcast, bcast;
2396         int stat = 0, err = 0;
2397         u64 tx_stat, rx_stat;
2398         struct rvu *rvu;
2399
2400         rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2401                                              PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2402         if (!rvu)
2403                 return -ENODEV;
2404
2405         mac_ops = get_mac_ops(cgxd);
2406         /* There can be no CGX devices at all */
2407         if (!mac_ops)
2408                 return 0;
2409
2410         /* Link status */
2411         seq_puts(s, "\n=======Link Status======\n\n");
2412         err = cgx_get_link_info(cgxd, lmac_id, &linfo);
2413         if (err)
2414                 seq_puts(s, "Failed to read link status\n");
2415         seq_printf(s, "\nLink is %s %d Mbps\n\n",
2416                    linfo.link_up ? "UP" : "DOWN", linfo.speed);
2417
2418         /* Rx stats */
2419         seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
2420                    mac_ops->name);
2421         ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
2422         if (err)
2423                 return err;
2424         mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
2425         if (err)
2426                 return err;
2427         bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
2428         if (err)
2429                 return err;
2430         seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
2431         PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
2432         if (err)
2433                 return err;
2434         PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
2435         if (err)
2436                 return err;
2437         PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
2438         if (err)
2439                 return err;
2440
2441         /* Tx stats */
2442         seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
2443                    mac_ops->name);
2444         ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
2445         if (err)
2446                 return err;
2447         mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
2448         if (err)
2449                 return err;
2450         bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
2451         if (err)
2452                 return err;
2453         seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
2454         PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
2455         if (err)
2456                 return err;
2457         PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
2458         if (err)
2459                 return err;
2460
2461         /* Rx stats */
2462         seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
2463         while (stat < mac_ops->rx_stats_cnt) {
2464                 err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
2465                 if (err)
2466                         return err;
2467                 if (is_rvu_otx2(rvu))
2468                         seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
2469                                    rx_stat);
2470                 else
2471                         seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
2472                                    rx_stat);
2473                 stat++;
2474         }
2475
2476         /* Tx stats */
2477         stat = 0;
2478         seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
2479         while (stat < mac_ops->tx_stats_cnt) {
2480                 err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
2481                 if (err)
2482                         return err;
2483
2484                 if (is_rvu_otx2(rvu))
2485                         seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
2486                                    tx_stat);
2487                 else
2488                         seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
2489                                    tx_stat);
2490                 stat++;
2491         }
2492
2493         return err;
2494 }
2495
2496 static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
2497 {
2498         struct dentry *current_dir;
2499         char *buf;
2500
2501         current_dir = filp->file->f_path.dentry->d_parent;
2502         buf = strrchr(current_dir->d_name.name, 'c');
2503         if (!buf)
2504                 return -EINVAL;
2505
2506         return kstrtoint(buf + 1, 10, lmac_id);
2507 }
2508
2509 static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
2510 {
2511         int lmac_id, err;
2512
2513         err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2514         if (!err)
2515                 return cgx_print_stats(filp, lmac_id);
2516
2517         return err;
2518 }
2519
2520 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
2521
2522 static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
2523 {
2524         struct pci_dev *pdev = NULL;
2525         void *cgxd = s->private;
2526         char *bcast, *mcast;
2527         u16 index, domain;
2528         u8 dmac[ETH_ALEN];
2529         struct rvu *rvu;
2530         u64 cfg, mac;
2531         int pf;
2532
2533         rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2534                                              PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2535         if (!rvu)
2536                 return -ENODEV;
2537
2538         pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
2539         domain = 2;
2540
2541         pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
2542         if (!pdev)
2543                 return 0;
2544
2545         cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
2546         bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
2547         mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
2548
2549         seq_puts(s,
2550                  "PCI dev       RVUPF   BROADCAST  MULTICAST  FILTER-MODE\n");
2551         seq_printf(s, "%s  PF%d  %9s  %9s",
2552                    dev_name(&pdev->dev), pf, bcast, mcast);
2553         if (cfg & CGX_DMAC_CAM_ACCEPT)
2554                 seq_printf(s, "%12s\n\n", "UNICAST");
2555         else
2556                 seq_printf(s, "%16s\n\n", "PROMISCUOUS");
2557
2558         seq_puts(s, "\nDMAC-INDEX  ADDRESS\n");
2559
2560         for (index = 0 ; index < 32 ; index++) {
2561                 cfg = cgx_read_dmac_entry(cgxd, index);
2562                 /* Display enabled dmac entries associated with current lmac */
2563                 if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
2564                     FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
2565                         mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
2566                         u64_to_ether_addr(mac, dmac);
2567                         seq_printf(s, "%7d     %pM\n", index, dmac);
2568                 }
2569         }
2570
2571         pci_dev_put(pdev);
2572         return 0;
2573 }
2574
2575 static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
2576 {
2577         int err, lmac_id;
2578
2579         err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2580         if (!err)
2581                 return cgx_print_dmac_flt(filp, lmac_id);
2582
2583         return err;
2584 }
2585
2586 RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
2587
2588 static void rvu_dbg_cgx_init(struct rvu *rvu)
2589 {
2590         struct mac_ops *mac_ops;
2591         unsigned long lmac_bmap;
2592         int i, lmac_id;
2593         char dname[20];
2594         void *cgx;
2595
2596         if (!cgx_get_cgxcnt_max())
2597                 return;
2598
2599         mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
2600         if (!mac_ops)
2601                 return;
2602
2603         rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
2604                                                    rvu->rvu_dbg.root);
2605
2606         for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
2607                 cgx = rvu_cgx_pdata(i, rvu);
2608                 if (!cgx)
2609                         continue;
2610                 lmac_bmap = cgx_get_lmac_bmap(cgx);
2611                 /* cgx debugfs dir */
2612                 sprintf(dname, "%s%d", mac_ops->name, i);
2613                 rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
2614                                                       rvu->rvu_dbg.cgx_root);
2615
2616                 for_each_set_bit(lmac_id, &lmac_bmap, rvu->hw->lmac_per_cgx) {
2617                         /* lmac debugfs dir */
2618                         sprintf(dname, "lmac%d", lmac_id);
2619                         rvu->rvu_dbg.lmac =
2620                                 debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
2621
2622                         debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
2623                                             cgx, &rvu_dbg_cgx_stat_fops);
2624                         debugfs_create_file("mac_filter", 0600,
2625                                             rvu->rvu_dbg.lmac, cgx,
2626                                             &rvu_dbg_cgx_dmac_flt_fops);
2627                 }
2628         }
2629 }
2630
2631 /* NPC debugfs APIs */
2632 static void rvu_print_npc_mcam_info(struct seq_file *s,
2633                                     u16 pcifunc, int blkaddr)
2634 {
2635         struct rvu *rvu = s->private;
2636         int entry_acnt, entry_ecnt;
2637         int cntr_acnt, cntr_ecnt;
2638
2639         rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
2640                                           &entry_acnt, &entry_ecnt);
2641         rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
2642                                             &cntr_acnt, &cntr_ecnt);
2643         if (!entry_acnt && !cntr_acnt)
2644                 return;
2645
2646         if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2647                 seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
2648                            rvu_get_pf(pcifunc));
2649         else
2650                 seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
2651                            rvu_get_pf(pcifunc),
2652                            (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2653
2654         if (entry_acnt) {
2655                 seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
2656                 seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
2657         }
2658         if (cntr_acnt) {
2659                 seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
2660                 seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
2661         }
2662 }
2663
2664 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
2665 {
2666         struct rvu *rvu = filp->private;
2667         int pf, vf, numvfs, blkaddr;
2668         struct npc_mcam *mcam;
2669         u16 pcifunc, counters;
2670         u64 cfg;
2671
2672         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2673         if (blkaddr < 0)
2674                 return -ENODEV;
2675
2676         mcam = &rvu->hw->mcam;
2677         counters = rvu->hw->npc_counters;
2678
2679         seq_puts(filp, "\nNPC MCAM info:\n");
2680         /* MCAM keywidth on receive and transmit sides */
2681         cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
2682         cfg = (cfg >> 32) & 0x07;
2683         seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2684                    "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2685                    "224bits" : "448bits"));
2686         cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
2687         cfg = (cfg >> 32) & 0x07;
2688         seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2689                    "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2690                    "224bits" : "448bits"));
2691
2692         mutex_lock(&mcam->lock);
2693         /* MCAM entries */
2694         seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
2695         seq_printf(filp, "\t\t Reserved \t: %d\n",
2696                    mcam->total_entries - mcam->bmap_entries);
2697         seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
2698
2699         /* MCAM counters */
2700         seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
2701         seq_printf(filp, "\t\t Reserved \t: %d\n",
2702                    counters - mcam->counters.max);
2703         seq_printf(filp, "\t\t Available \t: %d\n",
2704                    rvu_rsrc_free_count(&mcam->counters));
2705
2706         if (mcam->bmap_entries == mcam->bmap_fcnt) {
2707                 mutex_unlock(&mcam->lock);
2708                 return 0;
2709         }
2710
2711         seq_puts(filp, "\n\t\t Current allocation\n");
2712         seq_puts(filp, "\t\t====================\n");
2713         for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2714                 pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2715                 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2716
2717                 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2718                 numvfs = (cfg >> 12) & 0xFF;
2719                 for (vf = 0; vf < numvfs; vf++) {
2720                         pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
2721                         rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2722                 }
2723         }
2724
2725         mutex_unlock(&mcam->lock);
2726         return 0;
2727 }
2728
2729 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
2730
2731 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
2732                                              void *unused)
2733 {
2734         struct rvu *rvu = filp->private;
2735         struct npc_mcam *mcam;
2736         int blkaddr;
2737
2738         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2739         if (blkaddr < 0)
2740                 return -ENODEV;
2741
2742         mcam = &rvu->hw->mcam;
2743
2744         seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
2745         seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
2746                    rvu_read64(rvu, blkaddr,
2747                               NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
2748
2749         return 0;
2750 }
2751
2752 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
2753
2754 static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
2755                                         struct rvu_npc_mcam_rule *rule)
2756 {
2757         u8 bit;
2758
2759         for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
2760                 seq_printf(s, "\t%s  ", npc_get_field_name(bit));
2761                 switch (bit) {
2762                 case NPC_LXMB:
2763                         if (rule->lxmb == 1)
2764                                 seq_puts(s, "\tL2M nibble is set\n");
2765                         else
2766                                 seq_puts(s, "\tL2B nibble is set\n");
2767                         break;
2768                 case NPC_DMAC:
2769                         seq_printf(s, "%pM ", rule->packet.dmac);
2770                         seq_printf(s, "mask %pM\n", rule->mask.dmac);
2771                         break;
2772                 case NPC_SMAC:
2773                         seq_printf(s, "%pM ", rule->packet.smac);
2774                         seq_printf(s, "mask %pM\n", rule->mask.smac);
2775                         break;
2776                 case NPC_ETYPE:
2777                         seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
2778                         seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
2779                         break;
2780                 case NPC_OUTER_VID:
2781                         seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
2782                         seq_printf(s, "mask 0x%x\n",
2783                                    ntohs(rule->mask.vlan_tci));
2784                         break;
2785                 case NPC_TOS:
2786                         seq_printf(s, "%d ", rule->packet.tos);
2787                         seq_printf(s, "mask 0x%x\n", rule->mask.tos);
2788                         break;
2789                 case NPC_SIP_IPV4:
2790                         seq_printf(s, "%pI4 ", &rule->packet.ip4src);
2791                         seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
2792                         break;
2793                 case NPC_DIP_IPV4:
2794                         seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
2795                         seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
2796                         break;
2797                 case NPC_SIP_IPV6:
2798                         seq_printf(s, "%pI6 ", rule->packet.ip6src);
2799                         seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
2800                         break;
2801                 case NPC_DIP_IPV6:
2802                         seq_printf(s, "%pI6 ", rule->packet.ip6dst);
2803                         seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
2804                         break;
2805                 case NPC_IPFRAG_IPV6:
2806                         seq_printf(s, "0x%x ", rule->packet.next_header);
2807                         seq_printf(s, "mask 0x%x\n", rule->mask.next_header);
2808                         break;
2809                 case NPC_IPFRAG_IPV4:
2810                         seq_printf(s, "0x%x ", rule->packet.ip_flag);
2811                         seq_printf(s, "mask 0x%x\n", rule->mask.ip_flag);
2812                         break;
2813                 case NPC_SPORT_TCP:
2814                 case NPC_SPORT_UDP:
2815                 case NPC_SPORT_SCTP:
2816                         seq_printf(s, "%d ", ntohs(rule->packet.sport));
2817                         seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
2818                         break;
2819                 case NPC_DPORT_TCP:
2820                 case NPC_DPORT_UDP:
2821                 case NPC_DPORT_SCTP:
2822                         seq_printf(s, "%d ", ntohs(rule->packet.dport));
2823                         seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
2824                         break;
2825                 default:
2826                         seq_puts(s, "\n");
2827                         break;
2828                 }
2829         }
2830 }
2831
2832 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
2833                                          struct rvu_npc_mcam_rule *rule)
2834 {
2835         if (is_npc_intf_tx(rule->intf)) {
2836                 switch (rule->tx_action.op) {
2837                 case NIX_TX_ACTIONOP_DROP:
2838                         seq_puts(s, "\taction: Drop\n");
2839                         break;
2840                 case NIX_TX_ACTIONOP_UCAST_DEFAULT:
2841                         seq_puts(s, "\taction: Unicast to default channel\n");
2842                         break;
2843                 case NIX_TX_ACTIONOP_UCAST_CHAN:
2844                         seq_printf(s, "\taction: Unicast to channel %d\n",
2845                                    rule->tx_action.index);
2846                         break;
2847                 case NIX_TX_ACTIONOP_MCAST:
2848                         seq_puts(s, "\taction: Multicast\n");
2849                         break;
2850                 case NIX_TX_ACTIONOP_DROP_VIOL:
2851                         seq_puts(s, "\taction: Lockdown Violation Drop\n");
2852                         break;
2853                 default:
2854                         break;
2855                 }
2856         } else {
2857                 switch (rule->rx_action.op) {
2858                 case NIX_RX_ACTIONOP_DROP:
2859                         seq_puts(s, "\taction: Drop\n");
2860                         break;
2861                 case NIX_RX_ACTIONOP_UCAST:
2862                         seq_printf(s, "\taction: Direct to queue %d\n",
2863                                    rule->rx_action.index);
2864                         break;
2865                 case NIX_RX_ACTIONOP_RSS:
2866                         seq_puts(s, "\taction: RSS\n");
2867                         break;
2868                 case NIX_RX_ACTIONOP_UCAST_IPSEC:
2869                         seq_puts(s, "\taction: Unicast ipsec\n");
2870                         break;
2871                 case NIX_RX_ACTIONOP_MCAST:
2872                         seq_puts(s, "\taction: Multicast\n");
2873                         break;
2874                 default:
2875                         break;
2876                 }
2877         }
2878 }
2879
2880 static const char *rvu_dbg_get_intf_name(int intf)
2881 {
2882         switch (intf) {
2883         case NIX_INTFX_RX(0):
2884                 return "NIX0_RX";
2885         case NIX_INTFX_RX(1):
2886                 return "NIX1_RX";
2887         case NIX_INTFX_TX(0):
2888                 return "NIX0_TX";
2889         case NIX_INTFX_TX(1):
2890                 return "NIX1_TX";
2891         default:
2892                 break;
2893         }
2894
2895         return "unknown";
2896 }
2897
2898 static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
2899 {
2900         struct rvu_npc_mcam_rule *iter;
2901         struct rvu *rvu = s->private;
2902         struct npc_mcam *mcam;
2903         int pf, vf = -1;
2904         bool enabled;
2905         int blkaddr;
2906         u16 target;
2907         u64 hits;
2908
2909         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2910         if (blkaddr < 0)
2911                 return 0;
2912
2913         mcam = &rvu->hw->mcam;
2914
2915         mutex_lock(&mcam->lock);
2916         list_for_each_entry(iter, &mcam->mcam_rules, list) {
2917                 pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2918                 seq_printf(s, "\n\tInstalled by: PF%d ", pf);
2919
2920                 if (iter->owner & RVU_PFVF_FUNC_MASK) {
2921                         vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
2922                         seq_printf(s, "VF%d", vf);
2923                 }
2924                 seq_puts(s, "\n");
2925
2926                 seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
2927                                                     "RX" : "TX");
2928                 seq_printf(s, "\tinterface: %s\n",
2929                            rvu_dbg_get_intf_name(iter->intf));
2930                 seq_printf(s, "\tmcam entry: %d\n", iter->entry);
2931
2932                 rvu_dbg_npc_mcam_show_flows(s, iter);
2933                 if (is_npc_intf_rx(iter->intf)) {
2934                         target = iter->rx_action.pf_func;
2935                         pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2936                         seq_printf(s, "\tForward to: PF%d ", pf);
2937
2938                         if (target & RVU_PFVF_FUNC_MASK) {
2939                                 vf = (target & RVU_PFVF_FUNC_MASK) - 1;
2940                                 seq_printf(s, "VF%d", vf);
2941                         }
2942                         seq_puts(s, "\n");
2943                         seq_printf(s, "\tchannel: 0x%x\n", iter->chan);
2944                         seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask);
2945                 }
2946
2947                 rvu_dbg_npc_mcam_show_action(s, iter);
2948
2949                 enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
2950                 seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
2951
2952                 if (!iter->has_cntr)
2953                         continue;
2954                 seq_printf(s, "\tcounter: %d\n", iter->cntr);
2955
2956                 hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
2957                 seq_printf(s, "\thits: %lld\n", hits);
2958         }
2959         mutex_unlock(&mcam->lock);
2960
2961         return 0;
2962 }
2963
2964 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
2965
2966 static int rvu_dbg_npc_exact_show_entries(struct seq_file *s, void *unused)
2967 {
2968         struct npc_exact_table_entry *mem_entry[NPC_EXACT_TBL_MAX_WAYS] = { 0 };
2969         struct npc_exact_table_entry *cam_entry;
2970         struct npc_exact_table *table;
2971         struct rvu *rvu = s->private;
2972         int i, j;
2973
2974         u8 bitmap = 0;
2975
2976         table = rvu->hw->table;
2977
2978         mutex_lock(&table->lock);
2979
2980         /* Check if there is at least one entry in mem table */
2981         if (!table->mem_tbl_entry_cnt)
2982                 goto dump_cam_table;
2983
2984         /* Print table headers */
2985         seq_puts(s, "\n\tExact Match MEM Table\n");
2986         seq_puts(s, "Index\t");
2987
2988         for (i = 0; i < table->mem_table.ways; i++) {
2989                 mem_entry[i] = list_first_entry_or_null(&table->lhead_mem_tbl_entry[i],
2990                                                         struct npc_exact_table_entry, list);
2991
2992                 seq_printf(s, "Way-%d\t\t\t\t\t", i);
2993         }
2994
2995         seq_puts(s, "\n");
2996         for (i = 0; i < table->mem_table.ways; i++)
2997                 seq_puts(s, "\tChan  MAC                     \t");
2998
2999         seq_puts(s, "\n\n");
3000
3001         /* Print mem table entries */
3002         for (i = 0; i < table->mem_table.depth; i++) {
3003                 bitmap = 0;
3004                 for (j = 0; j < table->mem_table.ways; j++) {
3005                         if (!mem_entry[j])
3006                                 continue;
3007
3008                         if (mem_entry[j]->index != i)
3009                                 continue;
3010
3011                         bitmap |= BIT(j);
3012                 }
3013
3014                 /* No valid entries */
3015                 if (!bitmap)
3016                         continue;
3017
3018                 seq_printf(s, "%d\t", i);
3019                 for (j = 0; j < table->mem_table.ways; j++) {
3020                         if (!(bitmap & BIT(j))) {
3021                                 seq_puts(s, "nil\t\t\t\t\t");
3022                                 continue;
3023                         }
3024
3025                         seq_printf(s, "0x%x %pM\t\t\t", mem_entry[j]->chan,
3026                                    mem_entry[j]->mac);
3027                         mem_entry[j] = list_next_entry(mem_entry[j], list);
3028                 }
3029                 seq_puts(s, "\n");
3030         }
3031
3032 dump_cam_table:
3033
3034         if (!table->cam_tbl_entry_cnt)
3035                 goto done;
3036
3037         seq_puts(s, "\n\tExact Match CAM Table\n");
3038         seq_puts(s, "index\tchan\tMAC\n");
3039
3040         /* Traverse cam table entries */
3041         list_for_each_entry(cam_entry, &table->lhead_cam_tbl_entry, list) {
3042                 seq_printf(s, "%d\t0x%x\t%pM\n", cam_entry->index, cam_entry->chan,
3043                            cam_entry->mac);
3044         }
3045
3046 done:
3047         mutex_unlock(&table->lock);
3048         return 0;
3049 }
3050
3051 RVU_DEBUG_SEQ_FOPS(npc_exact_entries, npc_exact_show_entries, NULL);
3052
3053 static int rvu_dbg_npc_exact_show_info(struct seq_file *s, void *unused)
3054 {
3055         struct npc_exact_table *table;
3056         struct rvu *rvu = s->private;
3057         int i;
3058
3059         table = rvu->hw->table;
3060
3061         seq_puts(s, "\n\tExact Table Info\n");
3062         seq_printf(s, "Exact Match Feature : %s\n",
3063                    rvu->hw->cap.npc_exact_match_enabled ? "enabled" : "disable");
3064         if (!rvu->hw->cap.npc_exact_match_enabled)
3065                 return 0;
3066
3067         seq_puts(s, "\nMCAM Index\tMAC Filter Rules Count\n");
3068         for (i = 0; i < table->num_drop_rules; i++)
3069                 seq_printf(s, "%d\t\t%d\n", i, table->cnt_cmd_rules[i]);
3070
3071         seq_puts(s, "\nMcam Index\tPromisc Mode Status\n");
3072         for (i = 0; i < table->num_drop_rules; i++)
3073                 seq_printf(s, "%d\t\t%s\n", i, table->promisc_mode[i] ? "on" : "off");
3074
3075         seq_puts(s, "\n\tMEM Table Info\n");
3076         seq_printf(s, "Ways : %d\n", table->mem_table.ways);
3077         seq_printf(s, "Depth : %d\n", table->mem_table.depth);
3078         seq_printf(s, "Mask : 0x%llx\n", table->mem_table.mask);
3079         seq_printf(s, "Hash Mask : 0x%x\n", table->mem_table.hash_mask);
3080         seq_printf(s, "Hash Offset : 0x%x\n", table->mem_table.hash_offset);
3081
3082         seq_puts(s, "\n\tCAM Table Info\n");
3083         seq_printf(s, "Depth : %d\n", table->cam_table.depth);
3084
3085         return 0;
3086 }
3087
3088 RVU_DEBUG_SEQ_FOPS(npc_exact_info, npc_exact_show_info, NULL);
3089
3090 static int rvu_dbg_npc_exact_drop_cnt(struct seq_file *s, void *unused)
3091 {
3092         struct npc_exact_table *table;
3093         struct rvu *rvu = s->private;
3094         struct npc_key_field *field;
3095         u16 chan, pcifunc;
3096         int blkaddr, i;
3097         u64 cfg, cam1;
3098         char *str;
3099
3100         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3101         table = rvu->hw->table;
3102
3103         field = &rvu->hw->mcam.rx_key_fields[NPC_CHAN];
3104
3105         seq_puts(s, "\n\t Exact Hit on drop status\n");
3106         seq_puts(s, "\npcifunc\tmcam_idx\tHits\tchan\tstatus\n");
3107
3108         for (i = 0; i < table->num_drop_rules; i++) {
3109                 pcifunc = rvu_npc_exact_drop_rule_to_pcifunc(rvu, i);
3110                 cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(i, 0));
3111
3112                 /* channel will be always in keyword 0 */
3113                 cam1 = rvu_read64(rvu, blkaddr,
3114                                   NPC_AF_MCAMEX_BANKX_CAMX_W0(i, 0, 1));
3115                 chan = field->kw_mask[0] & cam1;
3116
3117                 str = (cfg & 1) ? "enabled" : "disabled";
3118
3119                 seq_printf(s, "0x%x\t%d\t\t%llu\t0x%x\t%s\n", pcifunc, i,
3120                            rvu_read64(rvu, blkaddr,
3121                                       NPC_AF_MATCH_STATX(table->counter_idx[i])),
3122                            chan, str);
3123         }
3124
3125         return 0;
3126 }
3127
3128 RVU_DEBUG_SEQ_FOPS(npc_exact_drop_cnt, npc_exact_drop_cnt, NULL);
3129
3130 static void rvu_dbg_npc_init(struct rvu *rvu)
3131 {
3132         rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
3133
3134         debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
3135                             &rvu_dbg_npc_mcam_info_fops);
3136         debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
3137                             &rvu_dbg_npc_mcam_rules_fops);
3138
3139         debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
3140                             &rvu_dbg_npc_rx_miss_act_fops);
3141
3142         if (!rvu->hw->cap.npc_exact_match_enabled)
3143                 return;
3144
3145         debugfs_create_file("exact_entries", 0444, rvu->rvu_dbg.npc, rvu,
3146                             &rvu_dbg_npc_exact_entries_fops);
3147
3148         debugfs_create_file("exact_info", 0444, rvu->rvu_dbg.npc, rvu,
3149                             &rvu_dbg_npc_exact_info_fops);
3150
3151         debugfs_create_file("exact_drop_cnt", 0444, rvu->rvu_dbg.npc, rvu,
3152                             &rvu_dbg_npc_exact_drop_cnt_fops);
3153
3154 }
3155
3156 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
3157 {
3158         struct cpt_ctx *ctx = filp->private;
3159         u64 busy_sts = 0, free_sts = 0;
3160         u32 e_min = 0, e_max = 0, e, i;
3161         u16 max_ses, max_ies, max_aes;
3162         struct rvu *rvu = ctx->rvu;
3163         int blkaddr = ctx->blkaddr;
3164         u64 reg;
3165
3166         reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
3167         max_ses = reg & 0xffff;
3168         max_ies = (reg >> 16) & 0xffff;
3169         max_aes = (reg >> 32) & 0xffff;
3170
3171         switch (eng_type) {
3172         case CPT_AE_TYPE:
3173                 e_min = max_ses + max_ies;
3174                 e_max = max_ses + max_ies + max_aes;
3175                 break;
3176         case CPT_SE_TYPE:
3177                 e_min = 0;
3178                 e_max = max_ses;
3179                 break;
3180         case CPT_IE_TYPE:
3181                 e_min = max_ses;
3182                 e_max = max_ses + max_ies;
3183                 break;
3184         default:
3185                 return -EINVAL;
3186         }
3187
3188         for (e = e_min, i = 0; e < e_max; e++, i++) {
3189                 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
3190                 if (reg & 0x1)
3191                         busy_sts |= 1ULL << i;
3192
3193                 if (reg & 0x2)
3194                         free_sts |= 1ULL << i;
3195         }
3196         seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
3197         seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
3198
3199         return 0;
3200 }
3201
3202 static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
3203 {
3204         return cpt_eng_sts_display(filp, CPT_AE_TYPE);
3205 }
3206
3207 RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
3208
3209 static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
3210 {
3211         return cpt_eng_sts_display(filp, CPT_SE_TYPE);
3212 }
3213
3214 RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
3215
3216 static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
3217 {
3218         return cpt_eng_sts_display(filp, CPT_IE_TYPE);
3219 }
3220
3221 RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
3222
3223 static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
3224 {
3225         struct cpt_ctx *ctx = filp->private;
3226         u16 max_ses, max_ies, max_aes;
3227         struct rvu *rvu = ctx->rvu;
3228         int blkaddr = ctx->blkaddr;
3229         u32 e_max, e;
3230         u64 reg;
3231
3232         reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
3233         max_ses = reg & 0xffff;
3234         max_ies = (reg >> 16) & 0xffff;
3235         max_aes = (reg >> 32) & 0xffff;
3236
3237         e_max = max_ses + max_ies + max_aes;
3238
3239         seq_puts(filp, "===========================================\n");
3240         for (e = 0; e < e_max; e++) {
3241                 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
3242                 seq_printf(filp, "CPT Engine[%u] Group Enable   0x%02llx\n", e,
3243                            reg & 0xff);
3244                 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
3245                 seq_printf(filp, "CPT Engine[%u] Active Info    0x%llx\n", e,
3246                            reg);
3247                 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
3248                 seq_printf(filp, "CPT Engine[%u] Control        0x%llx\n", e,
3249                            reg);
3250                 seq_puts(filp, "===========================================\n");
3251         }
3252         return 0;
3253 }
3254
3255 RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
3256
3257 static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
3258 {
3259         struct cpt_ctx *ctx = filp->private;
3260         int blkaddr = ctx->blkaddr;
3261         struct rvu *rvu = ctx->rvu;
3262         struct rvu_block *block;
3263         struct rvu_hwinfo *hw;
3264         u64 reg;
3265         u32 lf;
3266
3267         hw = rvu->hw;
3268         block = &hw->block[blkaddr];
3269         if (!block->lf.bmap)
3270                 return -ENODEV;
3271
3272         seq_puts(filp, "===========================================\n");
3273         for (lf = 0; lf < block->lf.max; lf++) {
3274                 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
3275                 seq_printf(filp, "CPT Lf[%u] CTL          0x%llx\n", lf, reg);
3276                 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
3277                 seq_printf(filp, "CPT Lf[%u] CTL2         0x%llx\n", lf, reg);
3278                 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
3279                 seq_printf(filp, "CPT Lf[%u] PTR_CTL      0x%llx\n", lf, reg);
3280                 reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
3281                                 (lf << block->lfshift));
3282                 seq_printf(filp, "CPT Lf[%u] CFG          0x%llx\n", lf, reg);
3283                 seq_puts(filp, "===========================================\n");
3284         }
3285         return 0;
3286 }
3287
3288 RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
3289
3290 static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
3291 {
3292         struct cpt_ctx *ctx = filp->private;
3293         struct rvu *rvu = ctx->rvu;
3294         int blkaddr = ctx->blkaddr;
3295         u64 reg0, reg1;
3296
3297         reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
3298         reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
3299         seq_printf(filp, "CPT_AF_FLTX_INT:       0x%llx 0x%llx\n", reg0, reg1);
3300         reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
3301         reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
3302         seq_printf(filp, "CPT_AF_PSNX_EXE:       0x%llx 0x%llx\n", reg0, reg1);
3303         reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
3304         seq_printf(filp, "CPT_AF_PSNX_LF:        0x%llx\n", reg0);
3305         reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
3306         seq_printf(filp, "CPT_AF_RVU_INT:        0x%llx\n", reg0);
3307         reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
3308         seq_printf(filp, "CPT_AF_RAS_INT:        0x%llx\n", reg0);
3309         reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
3310         seq_printf(filp, "CPT_AF_EXE_ERR_INFO:   0x%llx\n", reg0);
3311
3312         return 0;
3313 }
3314
3315 RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
3316
3317 static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
3318 {
3319         struct cpt_ctx *ctx = filp->private;
3320         struct rvu *rvu = ctx->rvu;
3321         int blkaddr = ctx->blkaddr;
3322         u64 reg;
3323
3324         reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
3325         seq_printf(filp, "CPT instruction requests   %llu\n", reg);
3326         reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
3327         seq_printf(filp, "CPT instruction latency    %llu\n", reg);
3328         reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
3329         seq_printf(filp, "CPT NCB read requests      %llu\n", reg);
3330         reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
3331         seq_printf(filp, "CPT NCB read latency       %llu\n", reg);
3332         reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
3333         seq_printf(filp, "CPT read requests caused by UC fills   %llu\n", reg);
3334         reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
3335         seq_printf(filp, "CPT active cycles pc       %llu\n", reg);
3336         reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
3337         seq_printf(filp, "CPT clock count pc         %llu\n", reg);
3338
3339         return 0;
3340 }
3341
3342 RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
3343
3344 static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
3345 {
3346         struct cpt_ctx *ctx;
3347
3348         if (!is_block_implemented(rvu->hw, blkaddr))
3349                 return;
3350
3351         if (blkaddr == BLKADDR_CPT0) {
3352                 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
3353                 ctx = &rvu->rvu_dbg.cpt_ctx[0];
3354                 ctx->blkaddr = BLKADDR_CPT0;
3355                 ctx->rvu = rvu;
3356         } else {
3357                 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
3358                                                       rvu->rvu_dbg.root);
3359                 ctx = &rvu->rvu_dbg.cpt_ctx[1];
3360                 ctx->blkaddr = BLKADDR_CPT1;
3361                 ctx->rvu = rvu;
3362         }
3363
3364         debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx,
3365                             &rvu_dbg_cpt_pc_fops);
3366         debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3367                             &rvu_dbg_cpt_ae_sts_fops);
3368         debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3369                             &rvu_dbg_cpt_se_sts_fops);
3370         debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3371                             &rvu_dbg_cpt_ie_sts_fops);
3372         debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx,
3373                             &rvu_dbg_cpt_engines_info_fops);
3374         debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx,
3375                             &rvu_dbg_cpt_lfs_info_fops);
3376         debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx,
3377                             &rvu_dbg_cpt_err_info_fops);
3378 }
3379
3380 static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
3381 {
3382         if (!is_rvu_otx2(rvu))
3383                 return "cn10k";
3384         else
3385                 return "octeontx2";
3386 }
3387
3388 void rvu_dbg_init(struct rvu *rvu)
3389 {
3390         rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
3391
3392         debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
3393                             &rvu_dbg_rsrc_status_fops);
3394
3395         if (!is_rvu_otx2(rvu))
3396                 debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root,
3397                                     rvu, &rvu_dbg_lmtst_map_table_fops);
3398
3399         if (!cgx_get_cgxcnt_max())
3400                 goto create;
3401
3402         if (is_rvu_otx2(rvu))
3403                 debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
3404                                     rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
3405         else
3406                 debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
3407                                     rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
3408
3409 create:
3410         rvu_dbg_npa_init(rvu);
3411         rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
3412
3413         rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
3414         rvu_dbg_cgx_init(rvu);
3415         rvu_dbg_npc_init(rvu);
3416         rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
3417         rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
3418         rvu_dbg_mcs_init(rvu);
3419 }
3420
3421 void rvu_dbg_exit(struct rvu *rvu)
3422 {
3423         debugfs_remove_recursive(rvu->rvu_dbg.root);
3424 }
3425
3426 #endif /* CONFIG_DEBUG_FS */