octeontx2-af: CN10KB: Add USGMII LMAC mode
[platform/kernel/linux-starfive.git] / drivers / net / ethernet / marvell / octeontx2 / af / cgx.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 CGX driver
3  *
4  * Copyright (C) 2018 Marvell.
5  *
6  */
7
8 #include <linux/acpi.h>
9 #include <linux/module.h>
10 #include <linux/interrupt.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/ethtool.h>
15 #include <linux/phy.h>
16 #include <linux/of.h>
17 #include <linux/of_mdio.h>
18 #include <linux/of_net.h>
19
20 #include "cgx.h"
21 #include "rvu.h"
22 #include "lmac_common.h"
23
24 #define DRV_NAME        "Marvell-CGX/RPM"
25 #define DRV_STRING      "Marvell CGX/RPM Driver"
26
27 static LIST_HEAD(cgx_list);
28
29 /* Convert firmware speed encoding to user format(Mbps) */
30 static const u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX] = {
31         [CGX_LINK_NONE] = 0,
32         [CGX_LINK_10M] = 10,
33         [CGX_LINK_100M] = 100,
34         [CGX_LINK_1G] = 1000,
35         [CGX_LINK_2HG] = 2500,
36         [CGX_LINK_5G] = 5000,
37         [CGX_LINK_10G] = 10000,
38         [CGX_LINK_20G] = 20000,
39         [CGX_LINK_25G] = 25000,
40         [CGX_LINK_40G] = 40000,
41         [CGX_LINK_50G] = 50000,
42         [CGX_LINK_80G] = 80000,
43         [CGX_LINK_100G] = 100000,
44 };
45
46 /* Convert firmware lmac type encoding to string */
47 static const char *cgx_lmactype_string[LMAC_MODE_MAX] = {
48         [LMAC_MODE_SGMII] = "SGMII",
49         [LMAC_MODE_XAUI] = "XAUI",
50         [LMAC_MODE_RXAUI] = "RXAUI",
51         [LMAC_MODE_10G_R] = "10G_R",
52         [LMAC_MODE_40G_R] = "40G_R",
53         [LMAC_MODE_QSGMII] = "QSGMII",
54         [LMAC_MODE_25G_R] = "25G_R",
55         [LMAC_MODE_50G_R] = "50G_R",
56         [LMAC_MODE_100G_R] = "100G_R",
57         [LMAC_MODE_USXGMII] = "USXGMII",
58         [LMAC_MODE_USGMII] = "USGMII",
59 };
60
61 /* CGX PHY management internal APIs */
62 static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
63
64 /* Supported devices */
65 static const struct pci_device_id cgx_id_table[] = {
66         { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
67         { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) },
68         { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM) },
69         { 0, }  /* end of table */
70 };
71
72 MODULE_DEVICE_TABLE(pci, cgx_id_table);
73
74 static bool is_dev_rpm(void *cgxd)
75 {
76         struct cgx *cgx = cgxd;
77
78         return (cgx->pdev->device == PCI_DEVID_CN10K_RPM) ||
79                (cgx->pdev->device == PCI_DEVID_CN10KB_RPM);
80 }
81
82 bool is_lmac_valid(struct cgx *cgx, int lmac_id)
83 {
84         if (!cgx || lmac_id < 0 || lmac_id >= cgx->max_lmac_per_mac)
85                 return false;
86         return test_bit(lmac_id, &cgx->lmac_bmap);
87 }
88
89 /* Helper function to get sequential index
90  * given the enabled LMAC of a CGX
91  */
92 static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id)
93 {
94         int tmp, id = 0;
95
96         for_each_set_bit(tmp, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
97                 if (tmp == lmac_id)
98                         break;
99                 id++;
100         }
101
102         return id;
103 }
104
105 struct mac_ops *get_mac_ops(void *cgxd)
106 {
107         if (!cgxd)
108                 return cgxd;
109
110         return ((struct cgx *)cgxd)->mac_ops;
111 }
112
113 void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
114 {
115         writeq(val, cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
116                offset);
117 }
118
119 u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
120 {
121         return readq(cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
122                      offset);
123 }
124
125 struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
126 {
127         if (!cgx || lmac_id >= cgx->max_lmac_per_mac)
128                 return NULL;
129
130         return cgx->lmac_idmap[lmac_id];
131 }
132
133 int cgx_get_cgxcnt_max(void)
134 {
135         struct cgx *cgx_dev;
136         int idmax = -ENODEV;
137
138         list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
139                 if (cgx_dev->cgx_id > idmax)
140                         idmax = cgx_dev->cgx_id;
141
142         if (idmax < 0)
143                 return 0;
144
145         return idmax + 1;
146 }
147
148 int cgx_get_lmac_cnt(void *cgxd)
149 {
150         struct cgx *cgx = cgxd;
151
152         if (!cgx)
153                 return -ENODEV;
154
155         return cgx->lmac_count;
156 }
157
158 void *cgx_get_pdata(int cgx_id)
159 {
160         struct cgx *cgx_dev;
161
162         list_for_each_entry(cgx_dev, &cgx_list, cgx_list) {
163                 if (cgx_dev->cgx_id == cgx_id)
164                         return cgx_dev;
165         }
166         return NULL;
167 }
168
169 void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val)
170 {
171         struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
172
173         /* Software must not access disabled LMAC registers */
174         if (!is_lmac_valid(cgx_dev, lmac_id))
175                 return;
176         cgx_write(cgx_dev, lmac_id, offset, val);
177 }
178
179 u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset)
180 {
181         struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
182
183         /* Software must not access disabled LMAC registers */
184         if (!is_lmac_valid(cgx_dev, lmac_id))
185                 return 0;
186
187         return cgx_read(cgx_dev, lmac_id, offset);
188 }
189
190 int cgx_get_cgxid(void *cgxd)
191 {
192         struct cgx *cgx = cgxd;
193
194         if (!cgx)
195                 return -EINVAL;
196
197         return cgx->cgx_id;
198 }
199
200 u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id)
201 {
202         struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
203         u64 cfg;
204
205         cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_CFG);
206
207         return (cfg & CMR_P2X_SEL_MASK) >> CMR_P2X_SEL_SHIFT;
208 }
209
210 /* Ensure the required lock for event queue(where asynchronous events are
211  * posted) is acquired before calling this API. Else an asynchronous event(with
212  * latest link status) can reach the destination before this function returns
213  * and could make the link status appear wrong.
214  */
215 int cgx_get_link_info(void *cgxd, int lmac_id,
216                       struct cgx_link_user_info *linfo)
217 {
218         struct lmac *lmac = lmac_pdata(lmac_id, cgxd);
219
220         if (!lmac)
221                 return -ENODEV;
222
223         *linfo = lmac->link_info;
224         return 0;
225 }
226
227 int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
228 {
229         struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
230         struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
231         struct mac_ops *mac_ops;
232         int index, id;
233         u64 cfg;
234
235         /* access mac_ops to know csr_offset */
236         mac_ops = cgx_dev->mac_ops;
237
238         /* copy 6bytes from macaddr */
239         /* memcpy(&cfg, mac_addr, 6); */
240
241         cfg = ether_addr_to_u64(mac_addr);
242
243         id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
244
245         index = id * lmac->mac_to_index_bmap.max;
246
247         cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)),
248                   cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
249
250         cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
251         cfg |= (CGX_DMAC_CTL0_CAM_ENABLE | CGX_DMAC_BCAST_MODE |
252                 CGX_DMAC_MCAST_MODE);
253         cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
254
255         return 0;
256 }
257
258 u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id)
259 {
260         struct mac_ops *mac_ops;
261         struct cgx *cgx = cgxd;
262
263         if (!cgxd || !is_lmac_valid(cgxd, lmac_id))
264                 return 0;
265
266         cgx = cgxd;
267         /* Get mac_ops to know csr offset */
268         mac_ops = cgx->mac_ops;
269
270         return cgx_read(cgxd, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
271 }
272
273 u64 cgx_read_dmac_entry(void *cgxd, int index)
274 {
275         struct mac_ops *mac_ops;
276         struct cgx *cgx;
277
278         if (!cgxd)
279                 return 0;
280
281         cgx = cgxd;
282         mac_ops = cgx->mac_ops;
283         return cgx_read(cgx, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 8)));
284 }
285
286 int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
287 {
288         struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
289         struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
290         struct mac_ops *mac_ops;
291         int index, idx;
292         u64 cfg = 0;
293         int id;
294
295         if (!lmac)
296                 return -ENODEV;
297
298         mac_ops = cgx_dev->mac_ops;
299         /* Get available index where entry is to be installed */
300         idx = rvu_alloc_rsrc(&lmac->mac_to_index_bmap);
301         if (idx < 0)
302                 return idx;
303
304         id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
305
306         index = id * lmac->mac_to_index_bmap.max + idx;
307
308         cfg = ether_addr_to_u64(mac_addr);
309         cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
310         cfg |= ((u64)lmac_id << 49);
311         cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
312
313         cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
314         cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_CAM_ACCEPT);
315
316         if (is_multicast_ether_addr(mac_addr)) {
317                 cfg &= ~GENMASK_ULL(2, 1);
318                 cfg |= CGX_DMAC_MCAST_MODE_CAM;
319                 lmac->mcast_filters_count++;
320         } else if (!lmac->mcast_filters_count) {
321                 cfg |= CGX_DMAC_MCAST_MODE;
322         }
323
324         cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
325
326         return idx;
327 }
328
329 int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id)
330 {
331         struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
332         struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
333         struct mac_ops *mac_ops;
334         u8 index = 0, id;
335         u64 cfg;
336
337         if (!lmac)
338                 return -ENODEV;
339
340         mac_ops = cgx_dev->mac_ops;
341         /* Restore index 0 to its default init value as done during
342          * cgx_lmac_init
343          */
344         set_bit(0, lmac->mac_to_index_bmap.bmap);
345
346         id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
347
348         index = id * lmac->mac_to_index_bmap.max + index;
349         cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
350
351         /* Reset CGXX_CMRX_RX_DMAC_CTL0 register to default state */
352         cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
353         cfg &= ~CGX_DMAC_CAM_ACCEPT;
354         cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
355         cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
356
357         return 0;
358 }
359
360 /* Allows caller to change macaddress associated with index
361  * in dmac filter table including index 0 reserved for
362  * interface mac address
363  */
364 int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index)
365 {
366         struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
367         struct mac_ops *mac_ops;
368         struct lmac *lmac;
369         u64 cfg;
370         int id;
371
372         lmac = lmac_pdata(lmac_id, cgx_dev);
373         if (!lmac)
374                 return -ENODEV;
375
376         mac_ops = cgx_dev->mac_ops;
377         /* Validate the index */
378         if (index >= lmac->mac_to_index_bmap.max)
379                 return -EINVAL;
380
381         /* ensure index is already set */
382         if (!test_bit(index, lmac->mac_to_index_bmap.bmap))
383                 return -EINVAL;
384
385         id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
386
387         index = id * lmac->mac_to_index_bmap.max + index;
388
389         cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
390         cfg &= ~CGX_RX_DMAC_ADR_MASK;
391         cfg |= ether_addr_to_u64(mac_addr);
392
393         cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
394         return 0;
395 }
396
397 int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index)
398 {
399         struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
400         struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
401         struct mac_ops *mac_ops;
402         u8 mac[ETH_ALEN];
403         u64 cfg;
404         int id;
405
406         if (!lmac)
407                 return -ENODEV;
408
409         mac_ops = cgx_dev->mac_ops;
410         /* Validate the index */
411         if (index >= lmac->mac_to_index_bmap.max)
412                 return -EINVAL;
413
414         /* Skip deletion for reserved index i.e. index 0 */
415         if (index == 0)
416                 return 0;
417
418         rvu_free_rsrc(&lmac->mac_to_index_bmap, index);
419
420         id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
421
422         index = id * lmac->mac_to_index_bmap.max + index;
423
424         /* Read MAC address to check whether it is ucast or mcast */
425         cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
426
427         u64_to_ether_addr(cfg, mac);
428         if (is_multicast_ether_addr(mac))
429                 lmac->mcast_filters_count--;
430
431         if (!lmac->mcast_filters_count) {
432                 cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
433                 cfg &= ~GENMASK_ULL(2, 1);
434                 cfg |= CGX_DMAC_MCAST_MODE;
435                 cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
436         }
437
438         cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
439
440         return 0;
441 }
442
443 int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id)
444 {
445         struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
446         struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
447
448         if (lmac)
449                 return lmac->mac_to_index_bmap.max;
450
451         return 0;
452 }
453
454 u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
455 {
456         struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
457         struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
458         struct mac_ops *mac_ops;
459         int index;
460         u64 cfg;
461         int id;
462
463         mac_ops = cgx_dev->mac_ops;
464
465         id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
466
467         index = id * lmac->mac_to_index_bmap.max;
468
469         cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8);
470         return cfg & CGX_RX_DMAC_ADR_MASK;
471 }
472
473 int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
474 {
475         struct cgx *cgx = cgxd;
476
477         if (!is_lmac_valid(cgx, lmac_id))
478                 return -ENODEV;
479
480         cgx_write(cgx, lmac_id, cgx->mac_ops->rxid_map_offset, (pkind & 0x3F));
481         return 0;
482 }
483
484 static u8 cgx_get_lmac_type(void *cgxd, int lmac_id)
485 {
486         struct cgx *cgx = cgxd;
487         u64 cfg;
488
489         cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
490         return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK;
491 }
492
493 static u32 cgx_get_lmac_fifo_len(void *cgxd, int lmac_id)
494 {
495         struct cgx *cgx = cgxd;
496         u8 num_lmacs;
497         u32 fifo_len;
498
499         fifo_len = cgx->mac_ops->fifo_len;
500         num_lmacs = cgx->mac_ops->get_nr_lmacs(cgx);
501
502         switch (num_lmacs) {
503         case 1:
504                 return fifo_len;
505         case 2:
506                 return fifo_len / 2;
507         case 3:
508                 /* LMAC0 gets half of the FIFO, reset 1/4th */
509                 if (lmac_id == 0)
510                         return fifo_len / 2;
511                 return fifo_len / 4;
512         case 4:
513         default:
514                 return fifo_len / 4;
515         }
516         return 0;
517 }
518
519 /* Configure CGX LMAC in internal loopback mode */
520 int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
521 {
522         struct cgx *cgx = cgxd;
523         struct lmac *lmac;
524         u64 cfg;
525
526         if (!is_lmac_valid(cgx, lmac_id))
527                 return -ENODEV;
528
529         lmac = lmac_pdata(lmac_id, cgx);
530         if (lmac->lmac_type == LMAC_MODE_SGMII ||
531             lmac->lmac_type == LMAC_MODE_QSGMII) {
532                 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL);
533                 if (enable)
534                         cfg |= CGXX_GMP_PCS_MRX_CTL_LBK;
535                 else
536                         cfg &= ~CGXX_GMP_PCS_MRX_CTL_LBK;
537                 cgx_write(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL, cfg);
538         } else {
539                 cfg = cgx_read(cgx, lmac_id, CGXX_SPUX_CONTROL1);
540                 if (enable)
541                         cfg |= CGXX_SPUX_CONTROL1_LBK;
542                 else
543                         cfg &= ~CGXX_SPUX_CONTROL1_LBK;
544                 cgx_write(cgx, lmac_id, CGXX_SPUX_CONTROL1, cfg);
545         }
546         return 0;
547 }
548
549 void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
550 {
551         struct cgx *cgx = cgx_get_pdata(cgx_id);
552         struct lmac *lmac = lmac_pdata(lmac_id, cgx);
553         u16 max_dmac = lmac->mac_to_index_bmap.max;
554         struct mac_ops *mac_ops;
555         int index, i;
556         u64 cfg = 0;
557         int id;
558
559         if (!cgx)
560                 return;
561
562         id = get_sequence_id_of_lmac(cgx, lmac_id);
563
564         mac_ops = cgx->mac_ops;
565         if (enable) {
566                 /* Enable promiscuous mode on LMAC */
567                 cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
568                 cfg &= ~CGX_DMAC_CAM_ACCEPT;
569                 cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
570                 cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
571
572                 for (i = 0; i < max_dmac; i++) {
573                         index = id * max_dmac + i;
574                         cfg = cgx_read(cgx, 0,
575                                        (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
576                         cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
577                         cgx_write(cgx, 0,
578                                   (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8), cfg);
579                 }
580         } else {
581                 /* Disable promiscuous mode */
582                 cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
583                 cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE;
584                 cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
585                 for (i = 0; i < max_dmac; i++) {
586                         index = id * max_dmac + i;
587                         cfg = cgx_read(cgx, 0,
588                                        (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
589                         if ((cfg & CGX_RX_DMAC_ADR_MASK) != 0) {
590                                 cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
591                                 cgx_write(cgx, 0,
592                                           (CGXX_CMRX_RX_DMAC_CAM0 +
593                                            index * 0x8),
594                                           cfg);
595                         }
596                 }
597         }
598 }
599
600 static int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id,
601                                          u8 *tx_pause, u8 *rx_pause)
602 {
603         struct cgx *cgx = cgxd;
604         u64 cfg;
605
606         if (is_dev_rpm(cgx))
607                 return 0;
608
609         if (!is_lmac_valid(cgx, lmac_id))
610                 return -ENODEV;
611
612         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
613         *rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK);
614
615         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
616         *tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV);
617         return 0;
618 }
619
620 /* Enable or disable forwarding received pause frames to Tx block */
621 void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
622 {
623         struct cgx *cgx = cgxd;
624         u8 rx_pause, tx_pause;
625         bool is_pfc_enabled;
626         struct lmac *lmac;
627         u64 cfg;
628
629         if (!cgx)
630                 return;
631
632         lmac = lmac_pdata(lmac_id, cgx);
633         if (!lmac)
634                 return;
635
636         /* Pause frames are not enabled just return */
637         if (!bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max))
638                 return;
639
640         cgx_lmac_get_pause_frm_status(cgx, lmac_id, &rx_pause, &tx_pause);
641         is_pfc_enabled = rx_pause ? false : true;
642
643         if (enable) {
644                 if (!is_pfc_enabled) {
645                         cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
646                         cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
647                         cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
648
649                         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
650                         cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
651                         cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
652                 } else {
653                         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
654                         cfg |= CGXX_SMUX_CBFC_CTL_BCK_EN;
655                         cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
656                 }
657         } else {
658
659                 if (!is_pfc_enabled) {
660                         cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
661                         cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
662                         cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
663
664                         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
665                         cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
666                         cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
667                 } else {
668                         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
669                         cfg &= ~CGXX_SMUX_CBFC_CTL_BCK_EN;
670                         cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
671                 }
672         }
673 }
674
675 int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
676 {
677         struct cgx *cgx = cgxd;
678
679         if (!is_lmac_valid(cgx, lmac_id))
680                 return -ENODEV;
681         *rx_stat =  cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
682         return 0;
683 }
684
685 int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
686 {
687         struct cgx *cgx = cgxd;
688
689         if (!is_lmac_valid(cgx, lmac_id))
690                 return -ENODEV;
691         *tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8));
692         return 0;
693 }
694
695 u64 cgx_features_get(void *cgxd)
696 {
697         return ((struct cgx *)cgxd)->hw_features;
698 }
699
700 static int cgx_set_fec_stats_count(struct cgx_link_user_info *linfo)
701 {
702         if (!linfo->fec)
703                 return 0;
704
705         switch (linfo->lmac_type_id) {
706         case LMAC_MODE_SGMII:
707         case LMAC_MODE_XAUI:
708         case LMAC_MODE_RXAUI:
709         case LMAC_MODE_QSGMII:
710                 return 0;
711         case LMAC_MODE_10G_R:
712         case LMAC_MODE_25G_R:
713         case LMAC_MODE_100G_R:
714         case LMAC_MODE_USXGMII:
715                 return 1;
716         case LMAC_MODE_40G_R:
717                 return 4;
718         case LMAC_MODE_50G_R:
719                 if (linfo->fec == OTX2_FEC_BASER)
720                         return 2;
721                 else
722                         return 1;
723         default:
724                 return 0;
725         }
726 }
727
728 int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp)
729 {
730         int stats, fec_stats_count = 0;
731         int corr_reg, uncorr_reg;
732         struct cgx *cgx = cgxd;
733
734         if (!cgx || lmac_id >= cgx->lmac_count)
735                 return -ENODEV;
736
737         if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_NONE)
738                 return 0;
739
740         fec_stats_count =
741                 cgx_set_fec_stats_count(&cgx->lmac_idmap[lmac_id]->link_info);
742         if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) {
743                 corr_reg = CGXX_SPUX_LNX_FEC_CORR_BLOCKS;
744                 uncorr_reg = CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS;
745         } else {
746                 corr_reg = CGXX_SPUX_RSFEC_CORR;
747                 uncorr_reg = CGXX_SPUX_RSFEC_UNCORR;
748         }
749         for (stats = 0; stats < fec_stats_count; stats++) {
750                 rsp->fec_corr_blks +=
751                         cgx_read(cgx, lmac_id, corr_reg + (stats * 8));
752                 rsp->fec_uncorr_blks +=
753                         cgx_read(cgx, lmac_id, uncorr_reg + (stats * 8));
754         }
755         return 0;
756 }
757
758 int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
759 {
760         struct cgx *cgx = cgxd;
761         u64 cfg;
762
763         if (!is_lmac_valid(cgx, lmac_id))
764                 return -ENODEV;
765
766         cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
767         if (enable)
768                 cfg |= DATA_PKT_RX_EN | DATA_PKT_TX_EN;
769         else
770                 cfg &= ~(DATA_PKT_RX_EN | DATA_PKT_TX_EN);
771         cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
772         return 0;
773 }
774
775 int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
776 {
777         struct cgx *cgx = cgxd;
778         u64 cfg, last;
779
780         if (!is_lmac_valid(cgx, lmac_id))
781                 return -ENODEV;
782
783         cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
784         last = cfg;
785         if (enable)
786                 cfg |= DATA_PKT_TX_EN;
787         else
788                 cfg &= ~DATA_PKT_TX_EN;
789
790         if (cfg != last)
791                 cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
792         return !!(last & DATA_PKT_TX_EN);
793 }
794
795 static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id,
796                                      u8 tx_pause, u8 rx_pause)
797 {
798         struct cgx *cgx = cgxd;
799         u64 cfg;
800
801         if (is_dev_rpm(cgx))
802                 return 0;
803
804         if (!is_lmac_valid(cgx, lmac_id))
805                 return -ENODEV;
806
807         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
808         cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
809         cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0;
810         cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
811
812         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
813         cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
814         cfg |= tx_pause ? CGX_SMUX_TX_CTL_L2P_BP_CONV : 0x0;
815         cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
816
817         cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
818         if (tx_pause) {
819                 cfg &= ~CGX_CMR_RX_OVR_BP_EN(lmac_id);
820         } else {
821                 cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
822                 cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
823         }
824         cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
825         return 0;
826 }
827
828 static void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable)
829 {
830         struct cgx *cgx = cgxd;
831         u64 cfg;
832
833         if (!is_lmac_valid(cgx, lmac_id))
834                 return;
835
836         if (enable) {
837                 /* Set pause time and interval */
838                 cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_TIME,
839                           DEFAULT_PAUSE_TIME);
840                 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL);
841                 cfg &= ~0xFFFFULL;
842                 cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL,
843                           cfg | (DEFAULT_PAUSE_TIME / 2));
844
845                 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_TIME,
846                           DEFAULT_PAUSE_TIME);
847
848                 cfg = cgx_read(cgx, lmac_id,
849                                CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL);
850                 cfg &= ~0xFFFFULL;
851                 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL,
852                           cfg | (DEFAULT_PAUSE_TIME / 2));
853         }
854
855         /* ALL pause frames received are completely ignored */
856         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
857         cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
858         cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
859
860         cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
861         cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
862         cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
863
864         /* Disable pause frames transmission */
865         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
866         cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
867         cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
868
869         cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
870         cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
871         cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
872         cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
873
874         /* Disable all PFC classes by default */
875         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
876         cfg = FIELD_SET(CGX_PFC_CLASS_MASK, 0, cfg);
877         cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
878 }
879
880 int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
881                        int pfvf_idx)
882 {
883         struct cgx *cgx = cgxd;
884         struct lmac *lmac;
885
886         lmac = lmac_pdata(lmac_id, cgx);
887         if (!lmac)
888                 return -ENODEV;
889
890         if (!rx_pause)
891                 clear_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap);
892         else
893                 set_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap);
894
895         if (!tx_pause)
896                 clear_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap);
897         else
898                 set_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap);
899
900         /* check if other pfvfs are using flow control */
901         if (!rx_pause && bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max)) {
902                 dev_warn(&cgx->pdev->dev,
903                          "Receive Flow control disable not permitted as its used by other PFVFs\n");
904                 return -EPERM;
905         }
906
907         if (!tx_pause && bitmap_weight(lmac->tx_fc_pfvf_bmap.bmap, lmac->tx_fc_pfvf_bmap.max)) {
908                 dev_warn(&cgx->pdev->dev,
909                          "Transmit Flow control disable not permitted as its used by other PFVFs\n");
910                 return -EPERM;
911         }
912
913         return 0;
914 }
915
916 int cgx_lmac_pfc_config(void *cgxd, int lmac_id, u8 tx_pause,
917                         u8 rx_pause, u16 pfc_en)
918 {
919         struct cgx *cgx = cgxd;
920         u64 cfg;
921
922         if (!is_lmac_valid(cgx, lmac_id))
923                 return -ENODEV;
924
925         /* Return as no traffic classes are requested */
926         if (tx_pause && !pfc_en)
927                 return 0;
928
929         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
930         pfc_en |= FIELD_GET(CGX_PFC_CLASS_MASK, cfg);
931
932         if (rx_pause) {
933                 cfg |= (CGXX_SMUX_CBFC_CTL_RX_EN |
934                         CGXX_SMUX_CBFC_CTL_BCK_EN |
935                         CGXX_SMUX_CBFC_CTL_DRP_EN);
936         } else {
937                 cfg &= ~(CGXX_SMUX_CBFC_CTL_RX_EN |
938                         CGXX_SMUX_CBFC_CTL_BCK_EN |
939                         CGXX_SMUX_CBFC_CTL_DRP_EN);
940         }
941
942         if (tx_pause) {
943                 cfg |= CGXX_SMUX_CBFC_CTL_TX_EN;
944                 cfg = FIELD_SET(CGX_PFC_CLASS_MASK, pfc_en, cfg);
945         } else {
946                 cfg &= ~CGXX_SMUX_CBFC_CTL_TX_EN;
947                 cfg = FIELD_SET(CGX_PFC_CLASS_MASK, 0, cfg);
948         }
949
950         cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
951
952         /* Write source MAC address which will be filled into PFC packet */
953         cfg = cgx_lmac_addr_get(cgx->cgx_id, lmac_id);
954         cgx_write(cgx, lmac_id, CGXX_SMUX_SMAC, cfg);
955
956         return 0;
957 }
958
959 int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause,
960                              u8 *rx_pause)
961 {
962         struct cgx *cgx = cgxd;
963         u64 cfg;
964
965         if (!is_lmac_valid(cgx, lmac_id))
966                 return -ENODEV;
967
968         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
969
970         *rx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_RX_EN);
971         *tx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_TX_EN);
972
973         return 0;
974 }
975
976 void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
977 {
978         struct cgx *cgx = cgxd;
979         u64 cfg;
980
981         if (!cgx)
982                 return;
983
984         if (enable) {
985                 /* Enable inbound PTP timestamping */
986                 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
987                 cfg |= CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
988                 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
989
990                 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
991                 cfg |= CGX_SMUX_RX_FRM_CTL_PTP_MODE;
992                 cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
993         } else {
994                 /* Disable inbound PTP stamping */
995                 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
996                 cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
997                 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
998
999                 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
1000                 cfg &= ~CGX_SMUX_RX_FRM_CTL_PTP_MODE;
1001                 cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
1002         }
1003 }
1004
1005 /* CGX Firmware interface low level support */
1006 int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
1007 {
1008         struct cgx *cgx = lmac->cgx;
1009         struct device *dev;
1010         int err = 0;
1011         u64 cmd;
1012
1013         /* Ensure no other command is in progress */
1014         err = mutex_lock_interruptible(&lmac->cmd_lock);
1015         if (err)
1016                 return err;
1017
1018         /* Ensure command register is free */
1019         cmd = cgx_read(cgx, lmac->lmac_id,  CGX_COMMAND_REG);
1020         if (FIELD_GET(CMDREG_OWN, cmd) != CGX_CMD_OWN_NS) {
1021                 err = -EBUSY;
1022                 goto unlock;
1023         }
1024
1025         /* Update ownership in command request */
1026         req = FIELD_SET(CMDREG_OWN, CGX_CMD_OWN_FIRMWARE, req);
1027
1028         /* Mark this lmac as pending, before we start */
1029         lmac->cmd_pend = true;
1030
1031         /* Start command in hardware */
1032         cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req);
1033
1034         /* Ensure command is completed without errors */
1035         if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend,
1036                                 msecs_to_jiffies(CGX_CMD_TIMEOUT))) {
1037                 dev = &cgx->pdev->dev;
1038                 dev_err(dev, "cgx port %d:%d cmd %lld timeout\n",
1039                         cgx->cgx_id, lmac->lmac_id, FIELD_GET(CMDREG_ID, req));
1040                 err = LMAC_AF_ERR_CMD_TIMEOUT;
1041                 goto unlock;
1042         }
1043
1044         /* we have a valid command response */
1045         smp_rmb(); /* Ensure the latest updates are visible */
1046         *resp = lmac->resp;
1047
1048 unlock:
1049         mutex_unlock(&lmac->cmd_lock);
1050
1051         return err;
1052 }
1053
1054 int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id)
1055 {
1056         struct lmac *lmac;
1057         int err;
1058
1059         lmac = lmac_pdata(lmac_id, cgx);
1060         if (!lmac)
1061                 return -ENODEV;
1062
1063         err = cgx_fwi_cmd_send(req, resp, lmac);
1064
1065         /* Check for valid response */
1066         if (!err) {
1067                 if (FIELD_GET(EVTREG_STAT, *resp) == CGX_STAT_FAIL)
1068                         return -EIO;
1069                 else
1070                         return 0;
1071         }
1072
1073         return err;
1074 }
1075
1076 static int cgx_link_usertable_index_map(int speed)
1077 {
1078         switch (speed) {
1079         case SPEED_10:
1080                 return CGX_LINK_10M;
1081         case SPEED_100:
1082                 return CGX_LINK_100M;
1083         case SPEED_1000:
1084                 return CGX_LINK_1G;
1085         case SPEED_2500:
1086                 return CGX_LINK_2HG;
1087         case SPEED_5000:
1088                 return CGX_LINK_5G;
1089         case SPEED_10000:
1090                 return CGX_LINK_10G;
1091         case SPEED_20000:
1092                 return CGX_LINK_20G;
1093         case SPEED_25000:
1094                 return CGX_LINK_25G;
1095         case SPEED_40000:
1096                 return CGX_LINK_40G;
1097         case SPEED_50000:
1098                 return CGX_LINK_50G;
1099         case 80000:
1100                 return CGX_LINK_80G;
1101         case SPEED_100000:
1102                 return CGX_LINK_100G;
1103         case SPEED_UNKNOWN:
1104                 return CGX_LINK_NONE;
1105         }
1106         return CGX_LINK_NONE;
1107 }
1108
1109 static void set_mod_args(struct cgx_set_link_mode_args *args,
1110                          u32 speed, u8 duplex, u8 autoneg, u64 mode)
1111 {
1112         /* Fill default values incase of user did not pass
1113          * valid parameters
1114          */
1115         if (args->duplex == DUPLEX_UNKNOWN)
1116                 args->duplex = duplex;
1117         if (args->speed == SPEED_UNKNOWN)
1118                 args->speed = speed;
1119         if (args->an == AUTONEG_UNKNOWN)
1120                 args->an = autoneg;
1121         args->mode = mode;
1122         args->ports = 0;
1123 }
1124
1125 static void otx2_map_ethtool_link_modes(u64 bitmask,
1126                                         struct cgx_set_link_mode_args *args)
1127 {
1128         switch (bitmask) {
1129         case ETHTOOL_LINK_MODE_10baseT_Half_BIT:
1130                 set_mod_args(args, 10, 1, 1, BIT_ULL(CGX_MODE_SGMII));
1131                 break;
1132         case  ETHTOOL_LINK_MODE_10baseT_Full_BIT:
1133                 set_mod_args(args, 10, 0, 1, BIT_ULL(CGX_MODE_SGMII));
1134                 break;
1135         case  ETHTOOL_LINK_MODE_100baseT_Half_BIT:
1136                 set_mod_args(args, 100, 1, 1, BIT_ULL(CGX_MODE_SGMII));
1137                 break;
1138         case  ETHTOOL_LINK_MODE_100baseT_Full_BIT:
1139                 set_mod_args(args, 100, 0, 1, BIT_ULL(CGX_MODE_SGMII));
1140                 break;
1141         case  ETHTOOL_LINK_MODE_1000baseT_Half_BIT:
1142                 set_mod_args(args, 1000, 1, 1, BIT_ULL(CGX_MODE_SGMII));
1143                 break;
1144         case  ETHTOOL_LINK_MODE_1000baseT_Full_BIT:
1145                 set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_SGMII));
1146                 break;
1147         case  ETHTOOL_LINK_MODE_1000baseX_Full_BIT:
1148                 set_mod_args(args, 1000, 0, 0, BIT_ULL(CGX_MODE_1000_BASEX));
1149                 break;
1150         case  ETHTOOL_LINK_MODE_10000baseT_Full_BIT:
1151                 set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_QSGMII));
1152                 break;
1153         case  ETHTOOL_LINK_MODE_10000baseSR_Full_BIT:
1154                 set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2C));
1155                 break;
1156         case  ETHTOOL_LINK_MODE_10000baseLR_Full_BIT:
1157                 set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2M));
1158                 break;
1159         case  ETHTOOL_LINK_MODE_10000baseKR_Full_BIT:
1160                 set_mod_args(args, 10000, 0, 1, BIT_ULL(CGX_MODE_10G_KR));
1161                 break;
1162         case  ETHTOOL_LINK_MODE_25000baseSR_Full_BIT:
1163                 set_mod_args(args, 25000, 0, 0, BIT_ULL(CGX_MODE_25G_C2C));
1164                 break;
1165         case  ETHTOOL_LINK_MODE_25000baseCR_Full_BIT:
1166                 set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_CR));
1167                 break;
1168         case  ETHTOOL_LINK_MODE_25000baseKR_Full_BIT:
1169                 set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_KR));
1170                 break;
1171         case  ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT:
1172                 set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2C));
1173                 break;
1174         case  ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT:
1175                 set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2M));
1176                 break;
1177         case  ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT:
1178                 set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_CR4));
1179                 break;
1180         case  ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT:
1181                 set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_KR4));
1182                 break;
1183         case  ETHTOOL_LINK_MODE_50000baseSR_Full_BIT:
1184                 set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2C));
1185                 break;
1186         case  ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT:
1187                 set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2M));
1188                 break;
1189         case  ETHTOOL_LINK_MODE_50000baseCR_Full_BIT:
1190                 set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_CR));
1191                 break;
1192         case  ETHTOOL_LINK_MODE_50000baseKR_Full_BIT:
1193                 set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_KR));
1194                 break;
1195         case  ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT:
1196                 set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2C));
1197                 break;
1198         case  ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT:
1199                 set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2M));
1200                 break;
1201         case  ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT:
1202                 set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_CR4));
1203                 break;
1204         case  ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT:
1205                 set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_KR4));
1206                 break;
1207         default:
1208                 set_mod_args(args, 0, 1, 0, BIT_ULL(CGX_MODE_MAX));
1209                 break;
1210         }
1211 }
1212
1213 static inline void link_status_user_format(u64 lstat,
1214                                            struct cgx_link_user_info *linfo,
1215                                            struct cgx *cgx, u8 lmac_id)
1216 {
1217         const char *lmac_string;
1218
1219         linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
1220         linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
1221         linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
1222         linfo->an = FIELD_GET(RESP_LINKSTAT_AN, lstat);
1223         linfo->fec = FIELD_GET(RESP_LINKSTAT_FEC, lstat);
1224         linfo->lmac_type_id = FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, lstat);
1225
1226         if (linfo->lmac_type_id >= LMAC_MODE_MAX) {
1227                 dev_err(&cgx->pdev->dev, "Unknown lmac_type_id %d reported by firmware on cgx port%d:%d",
1228                         linfo->lmac_type_id, cgx->cgx_id, lmac_id);
1229                 strncpy(linfo->lmac_type, "Unknown", LMACTYPE_STR_LEN - 1);
1230                 return;
1231         }
1232
1233         lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
1234         strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
1235 }
1236
1237 /* Hardware event handlers */
1238 static inline void cgx_link_change_handler(u64 lstat,
1239                                            struct lmac *lmac)
1240 {
1241         struct cgx_link_user_info *linfo;
1242         struct cgx *cgx = lmac->cgx;
1243         struct cgx_link_event event;
1244         struct device *dev;
1245         int err_type;
1246
1247         dev = &cgx->pdev->dev;
1248
1249         link_status_user_format(lstat, &event.link_uinfo, cgx, lmac->lmac_id);
1250         err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat);
1251
1252         event.cgx_id = cgx->cgx_id;
1253         event.lmac_id = lmac->lmac_id;
1254
1255         /* update the local copy of link status */
1256         lmac->link_info = event.link_uinfo;
1257         linfo = &lmac->link_info;
1258
1259         if (err_type == CGX_ERR_SPEED_CHANGE_INVALID)
1260                 return;
1261
1262         /* Ensure callback doesn't get unregistered until we finish it */
1263         spin_lock(&lmac->event_cb_lock);
1264
1265         if (!lmac->event_cb.notify_link_chg) {
1266                 dev_dbg(dev, "cgx port %d:%d Link change handler null",
1267                         cgx->cgx_id, lmac->lmac_id);
1268                 if (err_type != CGX_ERR_NONE) {
1269                         dev_err(dev, "cgx port %d:%d Link error %d\n",
1270                                 cgx->cgx_id, lmac->lmac_id, err_type);
1271                 }
1272                 dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n",
1273                          cgx->cgx_id, lmac->lmac_id,
1274                          linfo->link_up ? "UP" : "DOWN", linfo->speed);
1275                 goto err;
1276         }
1277
1278         if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data))
1279                 dev_err(dev, "event notification failure\n");
1280 err:
1281         spin_unlock(&lmac->event_cb_lock);
1282 }
1283
1284 static inline bool cgx_cmdresp_is_linkevent(u64 event)
1285 {
1286         u8 id;
1287
1288         id = FIELD_GET(EVTREG_ID, event);
1289         if (id == CGX_CMD_LINK_BRING_UP ||
1290             id == CGX_CMD_LINK_BRING_DOWN ||
1291             id == CGX_CMD_MODE_CHANGE)
1292                 return true;
1293         else
1294                 return false;
1295 }
1296
1297 static inline bool cgx_event_is_linkevent(u64 event)
1298 {
1299         if (FIELD_GET(EVTREG_ID, event) == CGX_EVT_LINK_CHANGE)
1300                 return true;
1301         else
1302                 return false;
1303 }
1304
1305 static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
1306 {
1307         u64 event, offset, clear_bit;
1308         struct lmac *lmac = data;
1309         struct cgx *cgx;
1310
1311         cgx = lmac->cgx;
1312
1313         /* Clear SW_INT for RPM and CMR_INT for CGX */
1314         offset     = cgx->mac_ops->int_register;
1315         clear_bit  = cgx->mac_ops->int_ena_bit;
1316
1317         event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG);
1318
1319         if (!FIELD_GET(EVTREG_ACK, event))
1320                 return IRQ_NONE;
1321
1322         switch (FIELD_GET(EVTREG_EVT_TYPE, event)) {
1323         case CGX_EVT_CMD_RESP:
1324                 /* Copy the response. Since only one command is active at a
1325                  * time, there is no way a response can get overwritten
1326                  */
1327                 lmac->resp = event;
1328                 /* Ensure response is updated before thread context starts */
1329                 smp_wmb();
1330
1331                 /* There wont be separate events for link change initiated from
1332                  * software; Hence report the command responses as events
1333                  */
1334                 if (cgx_cmdresp_is_linkevent(event))
1335                         cgx_link_change_handler(event, lmac);
1336
1337                 /* Release thread waiting for completion  */
1338                 lmac->cmd_pend = false;
1339                 wake_up_interruptible(&lmac->wq_cmd_cmplt);
1340                 break;
1341         case CGX_EVT_ASYNC:
1342                 if (cgx_event_is_linkevent(event))
1343                         cgx_link_change_handler(event, lmac);
1344                 break;
1345         }
1346
1347         /* Any new event or command response will be posted by firmware
1348          * only after the current status is acked.
1349          * Ack the interrupt register as well.
1350          */
1351         cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0);
1352         cgx_write(lmac->cgx, lmac->lmac_id, offset, clear_bit);
1353
1354         return IRQ_HANDLED;
1355 }
1356
1357 /* APIs for PHY management using CGX firmware interface */
1358
1359 /* callback registration for hardware events like link change */
1360 int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
1361 {
1362         struct cgx *cgx = cgxd;
1363         struct lmac *lmac;
1364
1365         lmac = lmac_pdata(lmac_id, cgx);
1366         if (!lmac)
1367                 return -ENODEV;
1368
1369         lmac->event_cb = *cb;
1370
1371         return 0;
1372 }
1373
1374 int cgx_lmac_evh_unregister(void *cgxd, int lmac_id)
1375 {
1376         struct lmac *lmac;
1377         unsigned long flags;
1378         struct cgx *cgx = cgxd;
1379
1380         lmac = lmac_pdata(lmac_id, cgx);
1381         if (!lmac)
1382                 return -ENODEV;
1383
1384         spin_lock_irqsave(&lmac->event_cb_lock, flags);
1385         lmac->event_cb.notify_link_chg = NULL;
1386         lmac->event_cb.data = NULL;
1387         spin_unlock_irqrestore(&lmac->event_cb_lock, flags);
1388
1389         return 0;
1390 }
1391
1392 int cgx_get_fwdata_base(u64 *base)
1393 {
1394         u64 req = 0, resp;
1395         struct cgx *cgx;
1396         int first_lmac;
1397         int err;
1398
1399         cgx = list_first_entry_or_null(&cgx_list, struct cgx, cgx_list);
1400         if (!cgx)
1401                 return -ENXIO;
1402
1403         first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac);
1404         req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req);
1405         err = cgx_fwi_cmd_generic(req, &resp, cgx, first_lmac);
1406         if (!err)
1407                 *base = FIELD_GET(RESP_FWD_BASE, resp);
1408
1409         return err;
1410 }
1411
1412 int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args,
1413                       int cgx_id, int lmac_id)
1414 {
1415         struct cgx *cgx = cgxd;
1416         u64 req = 0, resp;
1417
1418         if (!cgx)
1419                 return -ENODEV;
1420
1421         if (args.mode)
1422                 otx2_map_ethtool_link_modes(args.mode, &args);
1423         if (!args.speed && args.duplex && !args.an)
1424                 return -EINVAL;
1425
1426         req = FIELD_SET(CMDREG_ID, CGX_CMD_MODE_CHANGE, req);
1427         req = FIELD_SET(CMDMODECHANGE_SPEED,
1428                         cgx_link_usertable_index_map(args.speed), req);
1429         req = FIELD_SET(CMDMODECHANGE_DUPLEX, args.duplex, req);
1430         req = FIELD_SET(CMDMODECHANGE_AN, args.an, req);
1431         req = FIELD_SET(CMDMODECHANGE_PORT, args.ports, req);
1432         req = FIELD_SET(CMDMODECHANGE_FLAGS, args.mode, req);
1433
1434         return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1435 }
1436 int cgx_set_fec(u64 fec, int cgx_id, int lmac_id)
1437 {
1438         u64 req = 0, resp;
1439         struct cgx *cgx;
1440         int err = 0;
1441
1442         cgx = cgx_get_pdata(cgx_id);
1443         if (!cgx)
1444                 return -ENXIO;
1445
1446         req = FIELD_SET(CMDREG_ID, CGX_CMD_SET_FEC, req);
1447         req = FIELD_SET(CMDSETFEC, fec, req);
1448         err = cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1449         if (err)
1450                 return err;
1451
1452         cgx->lmac_idmap[lmac_id]->link_info.fec =
1453                         FIELD_GET(RESP_LINKSTAT_FEC, resp);
1454         return cgx->lmac_idmap[lmac_id]->link_info.fec;
1455 }
1456
1457 int cgx_get_phy_fec_stats(void *cgxd, int lmac_id)
1458 {
1459         struct cgx *cgx = cgxd;
1460         u64 req = 0, resp;
1461
1462         if (!cgx)
1463                 return -ENODEV;
1464
1465         req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_PHY_FEC_STATS, req);
1466         return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1467 }
1468
1469 static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
1470 {
1471         u64 req = 0;
1472         u64 resp;
1473
1474         if (enable) {
1475                 req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req);
1476                 /* On CN10K firmware offloads link bring up/down operations to ECP
1477                  * On Octeontx2 link operations are handled by firmware itself
1478                  * which can cause mbox errors so configure maximum time firmware
1479                  * poll for Link as 1000 ms
1480                  */
1481                 if (!is_dev_rpm(cgx))
1482                         req = FIELD_SET(LINKCFG_TIMEOUT, 1000, req);
1483
1484         } else {
1485                 req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req);
1486         }
1487         return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1488 }
1489
1490 static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
1491 {
1492         int first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac);
1493         u64 req = 0;
1494
1495         req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
1496         return cgx_fwi_cmd_generic(req, resp, cgx, first_lmac);
1497 }
1498
1499 static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
1500 {
1501         struct device *dev = &cgx->pdev->dev;
1502         int major_ver, minor_ver;
1503         u64 resp;
1504         int err;
1505
1506         if (!cgx->lmac_count)
1507                 return 0;
1508
1509         err = cgx_fwi_read_version(&resp, cgx);
1510         if (err)
1511                 return err;
1512
1513         major_ver = FIELD_GET(RESP_MAJOR_VER, resp);
1514         minor_ver = FIELD_GET(RESP_MINOR_VER, resp);
1515         dev_dbg(dev, "Firmware command interface version = %d.%d\n",
1516                 major_ver, minor_ver);
1517         if (major_ver != CGX_FIRMWARE_MAJOR_VER)
1518                 return -EIO;
1519         else
1520                 return 0;
1521 }
1522
1523 static void cgx_lmac_linkup_work(struct work_struct *work)
1524 {
1525         struct cgx *cgx = container_of(work, struct cgx, cgx_cmd_work);
1526         struct device *dev = &cgx->pdev->dev;
1527         int i, err;
1528
1529         /* Do Link up for all the enabled lmacs */
1530         for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
1531                 err = cgx_fwi_link_change(cgx, i, true);
1532                 if (err)
1533                         dev_info(dev, "cgx port %d:%d Link up command failed\n",
1534                                  cgx->cgx_id, i);
1535         }
1536 }
1537
1538 int cgx_lmac_linkup_start(void *cgxd)
1539 {
1540         struct cgx *cgx = cgxd;
1541
1542         if (!cgx)
1543                 return -ENODEV;
1544
1545         queue_work(cgx->cgx_cmd_workq, &cgx->cgx_cmd_work);
1546
1547         return 0;
1548 }
1549
1550 int cgx_lmac_reset(void *cgxd, int lmac_id, u8 pf_req_flr)
1551 {
1552         struct cgx *cgx = cgxd;
1553         u64 cfg;
1554
1555         if (!is_lmac_valid(cgx, lmac_id))
1556                 return -ENODEV;
1557
1558         /* Resetting PFC related CSRs */
1559         cfg = 0xff;
1560         cgx_write(cgxd, lmac_id, CGXX_CMRX_RX_LOGL_XON, cfg);
1561
1562         if (pf_req_flr)
1563                 cgx_lmac_internal_loopback(cgxd, lmac_id, false);
1564         return 0;
1565 }
1566
1567 static int cgx_configure_interrupt(struct cgx *cgx, struct lmac *lmac,
1568                                    int cnt, bool req_free)
1569 {
1570         struct mac_ops *mac_ops = cgx->mac_ops;
1571         u64 offset, ena_bit;
1572         unsigned int irq;
1573         int err;
1574
1575         irq      = pci_irq_vector(cgx->pdev, mac_ops->lmac_fwi +
1576                                   cnt * mac_ops->irq_offset);
1577         offset   = mac_ops->int_set_reg;
1578         ena_bit  = mac_ops->int_ena_bit;
1579
1580         if (req_free) {
1581                 free_irq(irq, lmac);
1582                 return 0;
1583         }
1584
1585         err = request_irq(irq, cgx_fwi_event_handler, 0, lmac->name, lmac);
1586         if (err)
1587                 return err;
1588
1589         /* Enable interrupt */
1590         cgx_write(cgx, lmac->lmac_id, offset, ena_bit);
1591         return 0;
1592 }
1593
1594 int cgx_get_nr_lmacs(void *cgxd)
1595 {
1596         struct cgx *cgx = cgxd;
1597
1598         return cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7ULL;
1599 }
1600
1601 u8 cgx_get_lmacid(void *cgxd, u8 lmac_index)
1602 {
1603         struct cgx *cgx = cgxd;
1604
1605         return cgx->lmac_idmap[lmac_index]->lmac_id;
1606 }
1607
1608 unsigned long cgx_get_lmac_bmap(void *cgxd)
1609 {
1610         struct cgx *cgx = cgxd;
1611
1612         return cgx->lmac_bmap;
1613 }
1614
1615 static int cgx_lmac_init(struct cgx *cgx)
1616 {
1617         struct lmac *lmac;
1618         u64 lmac_list;
1619         int i, err;
1620
1621         /* lmac_list specifies which lmacs are enabled
1622          * when bit n is set to 1, LMAC[n] is enabled
1623          */
1624         if (cgx->mac_ops->non_contiguous_serdes_lane) {
1625                 if (is_dev_rpm2(cgx))
1626                         lmac_list =
1627                                 cgx_read(cgx, 0, RPM2_CMRX_RX_LMACS) & 0xFFULL;
1628                 else
1629                         lmac_list =
1630                                 cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL;
1631         }
1632
1633         if (cgx->lmac_count > cgx->max_lmac_per_mac)
1634                 cgx->lmac_count = cgx->max_lmac_per_mac;
1635
1636         for (i = 0; i < cgx->lmac_count; i++) {
1637                 lmac = kzalloc(sizeof(struct lmac), GFP_KERNEL);
1638                 if (!lmac)
1639                         return -ENOMEM;
1640                 lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
1641                 if (!lmac->name) {
1642                         err = -ENOMEM;
1643                         goto err_lmac_free;
1644                 }
1645                 sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
1646                 if (cgx->mac_ops->non_contiguous_serdes_lane) {
1647                         lmac->lmac_id = __ffs64(lmac_list);
1648                         lmac_list   &= ~BIT_ULL(lmac->lmac_id);
1649                 } else {
1650                         lmac->lmac_id = i;
1651                 }
1652
1653                 lmac->cgx = cgx;
1654                 lmac->mac_to_index_bmap.max =
1655                                 cgx->mac_ops->dmac_filter_count /
1656                                 cgx->lmac_count;
1657
1658                 err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap);
1659                 if (err)
1660                         goto err_name_free;
1661
1662                 /* Reserve first entry for default MAC address */
1663                 set_bit(0, lmac->mac_to_index_bmap.bmap);
1664
1665                 lmac->rx_fc_pfvf_bmap.max = 128;
1666                 err = rvu_alloc_bitmap(&lmac->rx_fc_pfvf_bmap);
1667                 if (err)
1668                         goto err_dmac_bmap_free;
1669
1670                 lmac->tx_fc_pfvf_bmap.max = 128;
1671                 err = rvu_alloc_bitmap(&lmac->tx_fc_pfvf_bmap);
1672                 if (err)
1673                         goto err_rx_fc_bmap_free;
1674
1675                 init_waitqueue_head(&lmac->wq_cmd_cmplt);
1676                 mutex_init(&lmac->cmd_lock);
1677                 spin_lock_init(&lmac->event_cb_lock);
1678                 err = cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, false);
1679                 if (err)
1680                         goto err_bitmap_free;
1681
1682                 /* Add reference */
1683                 cgx->lmac_idmap[lmac->lmac_id] = lmac;
1684                 set_bit(lmac->lmac_id, &cgx->lmac_bmap);
1685                 cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
1686                 lmac->lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac->lmac_id);
1687         }
1688
1689         return cgx_lmac_verify_fwi_version(cgx);
1690
1691 err_bitmap_free:
1692         rvu_free_bitmap(&lmac->tx_fc_pfvf_bmap);
1693 err_rx_fc_bmap_free:
1694         rvu_free_bitmap(&lmac->rx_fc_pfvf_bmap);
1695 err_dmac_bmap_free:
1696         rvu_free_bitmap(&lmac->mac_to_index_bmap);
1697 err_name_free:
1698         kfree(lmac->name);
1699 err_lmac_free:
1700         kfree(lmac);
1701         return err;
1702 }
1703
1704 static int cgx_lmac_exit(struct cgx *cgx)
1705 {
1706         struct lmac *lmac;
1707         int i;
1708
1709         if (cgx->cgx_cmd_workq) {
1710                 destroy_workqueue(cgx->cgx_cmd_workq);
1711                 cgx->cgx_cmd_workq = NULL;
1712         }
1713
1714         /* Free all lmac related resources */
1715         for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
1716                 lmac = cgx->lmac_idmap[i];
1717                 if (!lmac)
1718                         continue;
1719                 cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false);
1720                 cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true);
1721                 kfree(lmac->mac_to_index_bmap.bmap);
1722                 kfree(lmac->name);
1723                 kfree(lmac);
1724         }
1725
1726         return 0;
1727 }
1728
1729 static void cgx_populate_features(struct cgx *cgx)
1730 {
1731         u64 cfg;
1732
1733         cfg = cgx_read(cgx, 0, CGX_CONST);
1734         cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
1735         cgx->max_lmac_per_mac = FIELD_GET(CGX_CONST_MAX_LMACS, cfg);
1736
1737         if (is_dev_rpm(cgx))
1738                 cgx->hw_features = (RVU_LMAC_FEAT_DMACF | RVU_MAC_RPM |
1739                                     RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP);
1740         else
1741                 cgx->hw_features = (RVU_LMAC_FEAT_FC  | RVU_LMAC_FEAT_HIGIG2 |
1742                                     RVU_LMAC_FEAT_PTP | RVU_LMAC_FEAT_DMACF);
1743 }
1744
1745 static u8 cgx_get_rxid_mapoffset(struct cgx *cgx)
1746 {
1747         if (cgx->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10KB_RPM ||
1748             is_dev_rpm2(cgx))
1749                 return 0x80;
1750         else
1751                 return 0x60;
1752 }
1753
1754 static struct mac_ops   cgx_mac_ops    = {
1755         .name           =       "cgx",
1756         .csr_offset     =       0,
1757         .lmac_offset    =       18,
1758         .int_register   =       CGXX_CMRX_INT,
1759         .int_set_reg    =       CGXX_CMRX_INT_ENA_W1S,
1760         .irq_offset     =       9,
1761         .int_ena_bit    =       FW_CGX_INT,
1762         .lmac_fwi       =       CGX_LMAC_FWI,
1763         .non_contiguous_serdes_lane = false,
1764         .rx_stats_cnt   =       9,
1765         .tx_stats_cnt   =       18,
1766         .dmac_filter_count =    32,
1767         .get_nr_lmacs   =       cgx_get_nr_lmacs,
1768         .get_lmac_type  =       cgx_get_lmac_type,
1769         .lmac_fifo_len  =       cgx_get_lmac_fifo_len,
1770         .mac_lmac_intl_lbk =    cgx_lmac_internal_loopback,
1771         .mac_get_rx_stats  =    cgx_get_rx_stats,
1772         .mac_get_tx_stats  =    cgx_get_tx_stats,
1773         .get_fec_stats     =    cgx_get_fec_stats,
1774         .mac_enadis_rx_pause_fwding =   cgx_lmac_enadis_rx_pause_fwding,
1775         .mac_get_pause_frm_status =     cgx_lmac_get_pause_frm_status,
1776         .mac_enadis_pause_frm =         cgx_lmac_enadis_pause_frm,
1777         .mac_pause_frm_config =         cgx_lmac_pause_frm_config,
1778         .mac_enadis_ptp_config =        cgx_lmac_ptp_config,
1779         .mac_rx_tx_enable =             cgx_lmac_rx_tx_enable,
1780         .mac_tx_enable =                cgx_lmac_tx_enable,
1781         .pfc_config =                   cgx_lmac_pfc_config,
1782         .mac_get_pfc_frm_cfg   =        cgx_lmac_get_pfc_frm_cfg,
1783         .mac_reset   =                  cgx_lmac_reset,
1784 };
1785
1786 static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1787 {
1788         struct device *dev = &pdev->dev;
1789         struct cgx *cgx;
1790         int err, nvec;
1791
1792         cgx = devm_kzalloc(dev, sizeof(*cgx), GFP_KERNEL);
1793         if (!cgx)
1794                 return -ENOMEM;
1795         cgx->pdev = pdev;
1796
1797         pci_set_drvdata(pdev, cgx);
1798
1799         /* Use mac_ops to get MAC specific features */
1800         if (is_dev_rpm(cgx))
1801                 cgx->mac_ops = rpm_get_mac_ops(cgx);
1802         else
1803                 cgx->mac_ops = &cgx_mac_ops;
1804
1805         cgx->mac_ops->rxid_map_offset = cgx_get_rxid_mapoffset(cgx);
1806
1807         err = pci_enable_device(pdev);
1808         if (err) {
1809                 dev_err(dev, "Failed to enable PCI device\n");
1810                 pci_set_drvdata(pdev, NULL);
1811                 return err;
1812         }
1813
1814         err = pci_request_regions(pdev, DRV_NAME);
1815         if (err) {
1816                 dev_err(dev, "PCI request regions failed 0x%x\n", err);
1817                 goto err_disable_device;
1818         }
1819
1820         /* MAP configuration registers */
1821         cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1822         if (!cgx->reg_base) {
1823                 dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n");
1824                 err = -ENOMEM;
1825                 goto err_release_regions;
1826         }
1827
1828         cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx);
1829         if (!cgx->lmac_count) {
1830                 dev_notice(dev, "CGX %d LMAC count is zero, skipping probe\n", cgx->cgx_id);
1831                 err = -EOPNOTSUPP;
1832                 goto err_release_regions;
1833         }
1834
1835         nvec = pci_msix_vec_count(cgx->pdev);
1836         err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
1837         if (err < 0 || err != nvec) {
1838                 dev_err(dev, "Request for %d msix vectors failed, err %d\n",
1839                         nvec, err);
1840                 goto err_release_regions;
1841         }
1842
1843         cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
1844                 & CGX_ID_MASK;
1845
1846         /* init wq for processing linkup requests */
1847         INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work);
1848         cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0);
1849         if (!cgx->cgx_cmd_workq) {
1850                 dev_err(dev, "alloc workqueue failed for cgx cmd");
1851                 err = -ENOMEM;
1852                 goto err_free_irq_vectors;
1853         }
1854
1855         list_add(&cgx->cgx_list, &cgx_list);
1856
1857
1858         cgx_populate_features(cgx);
1859
1860         mutex_init(&cgx->lock);
1861
1862         err = cgx_lmac_init(cgx);
1863         if (err)
1864                 goto err_release_lmac;
1865
1866         return 0;
1867
1868 err_release_lmac:
1869         cgx_lmac_exit(cgx);
1870         list_del(&cgx->cgx_list);
1871 err_free_irq_vectors:
1872         pci_free_irq_vectors(pdev);
1873 err_release_regions:
1874         pci_release_regions(pdev);
1875 err_disable_device:
1876         pci_disable_device(pdev);
1877         pci_set_drvdata(pdev, NULL);
1878         return err;
1879 }
1880
1881 static void cgx_remove(struct pci_dev *pdev)
1882 {
1883         struct cgx *cgx = pci_get_drvdata(pdev);
1884
1885         if (cgx) {
1886                 cgx_lmac_exit(cgx);
1887                 list_del(&cgx->cgx_list);
1888         }
1889         pci_free_irq_vectors(pdev);
1890         pci_release_regions(pdev);
1891         pci_disable_device(pdev);
1892         pci_set_drvdata(pdev, NULL);
1893 }
1894
1895 struct pci_driver cgx_driver = {
1896         .name = DRV_NAME,
1897         .id_table = cgx_id_table,
1898         .probe = cgx_probe,
1899         .remove = cgx_remove,
1900 };