1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020 Marvell International Ltd.
5 * Helper functions for common, but complicated tasks.
9 #include <linux/delay.h>
11 #include <mach/cvmx-regs.h>
12 #include <mach/cvmx-csr.h>
13 #include <mach/cvmx-bootmem.h>
14 #include <mach/octeon-model.h>
15 #include <mach/cvmx-fuse.h>
16 #include <mach/octeon-feature.h>
17 #include <mach/cvmx-qlm.h>
18 #include <mach/octeon_qlm.h>
19 #include <mach/cvmx-pcie.h>
20 #include <mach/cvmx-coremask.h>
22 #include <mach/cvmx-agl-defs.h>
23 #include <mach/cvmx-asxx-defs.h>
24 #include <mach/cvmx-bgxx-defs.h>
25 #include <mach/cvmx-dbg-defs.h>
26 #include <mach/cvmx-gmxx-defs.h>
27 #include <mach/cvmx-gserx-defs.h>
28 #include <mach/cvmx-ipd-defs.h>
29 #include <mach/cvmx-l2c-defs.h>
30 #include <mach/cvmx-npi-defs.h>
31 #include <mach/cvmx-pcsx-defs.h>
32 #include <mach/cvmx-pexp-defs.h>
33 #include <mach/cvmx-pki-defs.h>
34 #include <mach/cvmx-pko-defs.h>
35 #include <mach/cvmx-smix-defs.h>
36 #include <mach/cvmx-sriox-defs.h>
37 #include <mach/cvmx-helper.h>
38 #include <mach/cvmx-helper-board.h>
39 #include <mach/cvmx-helper-fdt.h>
40 #include <mach/cvmx-helper-bgx.h>
41 #include <mach/cvmx-helper-cfg.h>
42 #include <mach/cvmx-helper-ipd.h>
43 #include <mach/cvmx-helper-util.h>
44 #include <mach/cvmx-helper-pki.h>
45 #include <mach/cvmx-helper-pko.h>
46 #include <mach/cvmx-helper-pko3.h>
47 #include <mach/cvmx-global-resources.h>
48 #include <mach/cvmx-pko-internal-ports-range.h>
49 #include <mach/cvmx-pko3-queue.h>
50 #include <mach/cvmx-gmx.h>
51 #include <mach/cvmx-hwpko.h>
52 #include <mach/cvmx-ilk.h>
53 #include <mach/cvmx-ipd.h>
54 #include <mach/cvmx-pip.h>
58 * This structure specifies the interface methods used by an interface.
60 * @param mode Interface mode.
62 * @param enumerate Method the get number of interface ports.
64 * @param probe Method to probe an interface to get the number of
67 * @param enable Method to enable an interface
69 * @param link_get Method to get the state of an interface link.
71 * @param link_set Method to configure an interface link to the specified
74 * @param loopback Method to configure a port in loopback.
77 cvmx_helper_interface_mode_t mode;
78 int (*enumerate)(int xiface);
79 int (*probe)(int xiface);
80 int (*enable)(int xiface);
81 cvmx_helper_link_info_t (*link_get)(int ipd_port);
82 int (*link_set)(int ipd_port, cvmx_helper_link_info_t link_info);
83 int (*loopback)(int ipd_port, int en_in, int en_ex);
88 * This structure is used by disabled interfaces.
90 static const struct iface_ops iface_ops_dis = {
91 .mode = CVMX_HELPER_INTERFACE_MODE_DISABLED,
96 * This structure specifies the interface methods used by interfaces
99 static const struct iface_ops iface_ops_gmii = {
100 .mode = CVMX_HELPER_INTERFACE_MODE_GMII,
101 .enumerate = __cvmx_helper_rgmii_probe,
102 .probe = __cvmx_helper_rgmii_probe,
103 .enable = __cvmx_helper_rgmii_enable,
104 .link_get = __cvmx_helper_gmii_link_get,
105 .link_set = __cvmx_helper_rgmii_link_set,
106 .loopback = __cvmx_helper_rgmii_configure_loopback,
111 * This structure specifies the interface methods used by interfaces
112 * configured as rgmii.
114 static const struct iface_ops iface_ops_rgmii = {
115 .mode = CVMX_HELPER_INTERFACE_MODE_RGMII,
116 .enumerate = __cvmx_helper_rgmii_probe,
117 .probe = __cvmx_helper_rgmii_probe,
118 .enable = __cvmx_helper_rgmii_enable,
119 .link_get = __cvmx_helper_rgmii_link_get,
120 .link_set = __cvmx_helper_rgmii_link_set,
121 .loopback = __cvmx_helper_rgmii_configure_loopback,
126 * This structure specifies the interface methods used by interfaces
127 * configured as sgmii that use the gmx mac.
129 static const struct iface_ops iface_ops_sgmii = {
130 .mode = CVMX_HELPER_INTERFACE_MODE_SGMII,
131 .enumerate = __cvmx_helper_sgmii_enumerate,
132 .probe = __cvmx_helper_sgmii_probe,
133 .enable = __cvmx_helper_sgmii_enable,
134 .link_get = __cvmx_helper_sgmii_link_get,
135 .link_set = __cvmx_helper_sgmii_link_set,
136 .loopback = __cvmx_helper_sgmii_configure_loopback,
141 * This structure specifies the interface methods used by interfaces
142 * configured as sgmii that use the bgx mac.
144 static const struct iface_ops iface_ops_bgx_sgmii = {
145 .mode = CVMX_HELPER_INTERFACE_MODE_SGMII,
146 .enumerate = __cvmx_helper_bgx_enumerate,
147 .probe = __cvmx_helper_bgx_probe,
148 .enable = __cvmx_helper_bgx_sgmii_enable,
149 .link_get = __cvmx_helper_bgx_sgmii_link_get,
150 .link_set = __cvmx_helper_bgx_sgmii_link_set,
151 .loopback = __cvmx_helper_bgx_sgmii_configure_loopback,
156 * This structure specifies the interface methods used by interfaces
157 * configured as qsgmii.
159 static const struct iface_ops iface_ops_qsgmii = {
160 .mode = CVMX_HELPER_INTERFACE_MODE_QSGMII,
161 .enumerate = __cvmx_helper_sgmii_enumerate,
162 .probe = __cvmx_helper_sgmii_probe,
163 .enable = __cvmx_helper_sgmii_enable,
164 .link_get = __cvmx_helper_sgmii_link_get,
165 .link_set = __cvmx_helper_sgmii_link_set,
166 .loopback = __cvmx_helper_sgmii_configure_loopback,
171 * This structure specifies the interface methods used by interfaces
172 * configured as xaui using the gmx mac.
174 static const struct iface_ops iface_ops_xaui = {
175 .mode = CVMX_HELPER_INTERFACE_MODE_XAUI,
176 .enumerate = __cvmx_helper_xaui_enumerate,
177 .probe = __cvmx_helper_xaui_probe,
178 .enable = __cvmx_helper_xaui_enable,
179 .link_get = __cvmx_helper_xaui_link_get,
180 .link_set = __cvmx_helper_xaui_link_set,
181 .loopback = __cvmx_helper_xaui_configure_loopback,
186 * This structure specifies the interface methods used by interfaces
187 * configured as xaui using the gmx mac.
189 static const struct iface_ops iface_ops_bgx_xaui = {
190 .mode = CVMX_HELPER_INTERFACE_MODE_XAUI,
191 .enumerate = __cvmx_helper_bgx_enumerate,
192 .probe = __cvmx_helper_bgx_probe,
193 .enable = __cvmx_helper_bgx_xaui_enable,
194 .link_get = __cvmx_helper_bgx_xaui_link_get,
195 .link_set = __cvmx_helper_bgx_xaui_link_set,
196 .loopback = __cvmx_helper_bgx_xaui_configure_loopback,
201 * This structure specifies the interface methods used by interfaces
202 * configured as rxaui.
204 static const struct iface_ops iface_ops_rxaui = {
205 .mode = CVMX_HELPER_INTERFACE_MODE_RXAUI,
206 .enumerate = __cvmx_helper_xaui_enumerate,
207 .probe = __cvmx_helper_xaui_probe,
208 .enable = __cvmx_helper_xaui_enable,
209 .link_get = __cvmx_helper_xaui_link_get,
210 .link_set = __cvmx_helper_xaui_link_set,
211 .loopback = __cvmx_helper_xaui_configure_loopback,
216 * This structure specifies the interface methods used by interfaces
217 * configured as xaui using the gmx mac.
219 static const struct iface_ops iface_ops_bgx_rxaui = {
220 .mode = CVMX_HELPER_INTERFACE_MODE_RXAUI,
221 .enumerate = __cvmx_helper_bgx_enumerate,
222 .probe = __cvmx_helper_bgx_probe,
223 .enable = __cvmx_helper_bgx_xaui_enable,
224 .link_get = __cvmx_helper_bgx_xaui_link_get,
225 .link_set = __cvmx_helper_bgx_xaui_link_set,
226 .loopback = __cvmx_helper_bgx_xaui_configure_loopback,
231 * This structure specifies the interface methods used by interfaces
232 * configured as xlaui.
234 static const struct iface_ops iface_ops_bgx_xlaui = {
235 .mode = CVMX_HELPER_INTERFACE_MODE_XLAUI,
236 .enumerate = __cvmx_helper_bgx_enumerate,
237 .probe = __cvmx_helper_bgx_probe,
238 .enable = __cvmx_helper_bgx_xaui_enable,
239 .link_get = __cvmx_helper_bgx_xaui_link_get,
240 .link_set = __cvmx_helper_bgx_xaui_link_set,
241 .loopback = __cvmx_helper_bgx_xaui_configure_loopback,
246 * This structure specifies the interface methods used by interfaces
249 static const struct iface_ops iface_ops_bgx_xfi = {
250 .mode = CVMX_HELPER_INTERFACE_MODE_XFI,
251 .enumerate = __cvmx_helper_bgx_enumerate,
252 .probe = __cvmx_helper_bgx_probe,
253 .enable = __cvmx_helper_bgx_xaui_enable,
254 .link_get = __cvmx_helper_bgx_xaui_link_get,
255 .link_set = __cvmx_helper_bgx_xaui_link_set,
256 .loopback = __cvmx_helper_bgx_xaui_configure_loopback,
259 static const struct iface_ops iface_ops_bgx_10G_KR = {
260 .mode = CVMX_HELPER_INTERFACE_MODE_10G_KR,
261 .enumerate = __cvmx_helper_bgx_enumerate,
262 .probe = __cvmx_helper_bgx_probe,
263 .enable = __cvmx_helper_bgx_xaui_enable,
264 .link_get = __cvmx_helper_bgx_xaui_link_get,
265 .link_set = __cvmx_helper_bgx_xaui_link_set,
266 .loopback = __cvmx_helper_bgx_xaui_configure_loopback,
269 static const struct iface_ops iface_ops_bgx_40G_KR4 = {
270 .mode = CVMX_HELPER_INTERFACE_MODE_40G_KR4,
271 .enumerate = __cvmx_helper_bgx_enumerate,
272 .probe = __cvmx_helper_bgx_probe,
273 .enable = __cvmx_helper_bgx_xaui_enable,
274 .link_get = __cvmx_helper_bgx_xaui_link_get,
275 .link_set = __cvmx_helper_bgx_xaui_link_set,
276 .loopback = __cvmx_helper_bgx_xaui_configure_loopback,
281 * This structure specifies the interface methods used by interfaces
284 static const struct iface_ops iface_ops_ilk = {
285 .mode = CVMX_HELPER_INTERFACE_MODE_ILK,
286 .enumerate = __cvmx_helper_ilk_enumerate,
287 .probe = __cvmx_helper_ilk_probe,
288 .enable = __cvmx_helper_ilk_enable,
289 .link_get = __cvmx_helper_ilk_link_get,
290 .link_set = __cvmx_helper_ilk_link_set,
295 * This structure specifies the interface methods used by interfaces
298 static const struct iface_ops iface_ops_npi = {
299 .mode = CVMX_HELPER_INTERFACE_MODE_NPI,
300 .enumerate = __cvmx_helper_npi_probe,
301 .probe = __cvmx_helper_npi_probe,
302 .enable = __cvmx_helper_npi_enable,
307 * This structure specifies the interface methods used by interfaces
308 * configured as srio.
310 static const struct iface_ops iface_ops_srio = {
311 .mode = CVMX_HELPER_INTERFACE_MODE_SRIO,
312 .enumerate = __cvmx_helper_srio_probe,
313 .probe = __cvmx_helper_srio_probe,
314 .enable = __cvmx_helper_srio_enable,
315 .link_get = __cvmx_helper_srio_link_get,
316 .link_set = __cvmx_helper_srio_link_set,
321 * This structure specifies the interface methods used by interfaces
324 static const struct iface_ops iface_ops_agl = {
325 .mode = CVMX_HELPER_INTERFACE_MODE_AGL,
326 .enumerate = __cvmx_helper_agl_enumerate,
327 .probe = __cvmx_helper_agl_probe,
328 .enable = __cvmx_helper_agl_enable,
329 .link_get = __cvmx_helper_agl_link_get,
330 .link_set = __cvmx_helper_agl_link_set,
335 * This structure specifies the interface methods used by interfaces
336 * configured as mixed mode, some ports are sgmii and some are xfi.
338 static const struct iface_ops iface_ops_bgx_mixed = {
339 .mode = CVMX_HELPER_INTERFACE_MODE_MIXED,
340 .enumerate = __cvmx_helper_bgx_enumerate,
341 .probe = __cvmx_helper_bgx_probe,
342 .enable = __cvmx_helper_bgx_mixed_enable,
343 .link_get = __cvmx_helper_bgx_mixed_link_get,
344 .link_set = __cvmx_helper_bgx_mixed_link_set,
345 .loopback = __cvmx_helper_bgx_mixed_configure_loopback,
350 * This structure specifies the interface methods used by interfaces
351 * configured as loop.
353 static const struct iface_ops iface_ops_loop = {
354 .mode = CVMX_HELPER_INTERFACE_MODE_LOOP,
355 .enumerate = __cvmx_helper_loop_enumerate,
356 .probe = __cvmx_helper_loop_probe,
359 const struct iface_ops *iface_node_ops[CVMX_MAX_NODES][CVMX_HELPER_MAX_IFACE];
360 #define iface_ops iface_node_ops[0]
364 int cvif_has_fcs; /* PKO fcs for this interface. */
365 enum cvmx_pko_padding cvif_padding;
366 cvmx_helper_link_info_t *cvif_ipd_port_link_info;
370 * This has to be static as u-boot expects to probe an interface and
371 * gets the number of its ports.
373 static struct cvmx_iface cvmx_interfaces[CVMX_MAX_NODES][CVMX_HELPER_MAX_IFACE];
375 int __cvmx_helper_get_num_ipd_ports(int xiface)
377 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
378 struct cvmx_iface *piface;
380 if (xi.interface >= cvmx_helper_get_number_of_interfaces())
383 piface = &cvmx_interfaces[xi.node][xi.interface];
384 return piface->cvif_ipd_nports;
387 enum cvmx_pko_padding __cvmx_helper_get_pko_padding(int xiface)
389 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
390 struct cvmx_iface *piface;
392 if (xi.interface >= cvmx_helper_get_number_of_interfaces())
393 return CVMX_PKO_PADDING_NONE;
395 piface = &cvmx_interfaces[xi.node][xi.interface];
396 return piface->cvif_padding;
399 int __cvmx_helper_init_interface(int xiface, int num_ipd_ports, int has_fcs,
400 enum cvmx_pko_padding pad)
402 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
403 struct cvmx_iface *piface;
404 cvmx_helper_link_info_t *p;
410 if (xi.interface >= cvmx_helper_get_number_of_interfaces())
413 piface = &cvmx_interfaces[xi.node][xi.interface];
414 piface->cvif_ipd_nports = num_ipd_ports;
415 piface->cvif_padding = pad;
417 piface->cvif_has_fcs = has_fcs;
420 * allocate the per-ipd_port link_info structure
422 sz = piface->cvif_ipd_nports * sizeof(cvmx_helper_link_info_t);
423 snprintf(name, sizeof(name), "__int_%d_link_info", xi.interface);
424 addr = CAST64(cvmx_bootmem_alloc_named_range_once(sz, 0, 0,
425 __alignof(cvmx_helper_link_info_t),
427 piface->cvif_ipd_port_link_info =
428 (cvmx_helper_link_info_t *)__cvmx_phys_addr_to_ptr(addr, sz);
429 if (!piface->cvif_ipd_port_link_info) {
431 debug("iface %d failed to alloc link info\n", xi.interface);
435 /* Initialize them */
436 p = piface->cvif_ipd_port_link_info;
438 for (i = 0; i < piface->cvif_ipd_nports; i++) {
446 * Shut down the interfaces; free the resources.
449 void __cvmx_helper_shutdown_interfaces_node(unsigned int node)
452 int nifaces; /* number of interfaces */
453 struct cvmx_iface *piface;
455 nifaces = cvmx_helper_get_number_of_interfaces();
456 for (i = 0; i < nifaces; i++) {
457 piface = &cvmx_interfaces[node][i];
460 * For SE apps, bootmem was meant to be allocated and never
463 piface->cvif_ipd_port_link_info = 0;
467 void __cvmx_helper_shutdown_interfaces(void)
469 unsigned int node = cvmx_get_node_num();
471 __cvmx_helper_shutdown_interfaces_node(node);
474 int __cvmx_helper_set_link_info(int xiface, int index, cvmx_helper_link_info_t link_info)
476 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
477 struct cvmx_iface *piface;
479 if (xi.interface >= cvmx_helper_get_number_of_interfaces())
482 piface = &cvmx_interfaces[xi.node][xi.interface];
484 if (piface->cvif_ipd_port_link_info) {
485 piface->cvif_ipd_port_link_info[index] = link_info;
492 cvmx_helper_link_info_t __cvmx_helper_get_link_info(int xiface, int port)
494 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
495 struct cvmx_iface *piface;
496 cvmx_helper_link_info_t err;
500 if (xi.interface >= cvmx_helper_get_number_of_interfaces())
502 piface = &cvmx_interfaces[xi.node][xi.interface];
504 if (piface->cvif_ipd_port_link_info)
505 return piface->cvif_ipd_port_link_info[port];
511 * Returns if FCS is enabled for the specified interface and port
513 * @param xiface - interface to check
515 * @return zero if FCS is not used, otherwise FCS is used.
517 int __cvmx_helper_get_has_fcs(int xiface)
519 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
520 return cvmx_interfaces[xi.node][xi.interface].cvif_has_fcs;
523 u64 cvmx_rgmii_backpressure_dis = 1;
525 typedef int (*cvmx_export_config_t)(void);
526 cvmx_export_config_t cvmx_export_app_config;
528 void cvmx_rgmii_set_back_pressure(uint64_t backpressure_dis)
530 cvmx_rgmii_backpressure_dis = backpressure_dis;
534 * internal functions that are not exported in the .h file but must be
535 * declared to make gcc happy.
537 extern cvmx_helper_link_info_t __cvmx_helper_get_link_info(int interface, int port);
540 * cvmx_override_iface_phy_mode(int interface, int index) is a function pointer.
541 * It is meant to allow customization of interfaces which do not have a PHY.
543 * @returns 0 if MAC decides TX_CONFIG_REG or 1 if PHY decides TX_CONFIG_REG.
545 * If this function pointer is NULL then it defaults to the MAC.
547 int (*cvmx_override_iface_phy_mode)(int interface, int index);
550 * cvmx_override_ipd_port_setup(int ipd_port) is a function
551 * pointer. It is meant to allow customization of the IPD
552 * port/port kind setup before packet input/output comes online.
553 * It is called after cvmx-helper does the default IPD configuration,
554 * but before IPD is enabled. Users should set this pointer to a
555 * function before calling any cvmx-helper operations.
557 void (*cvmx_override_ipd_port_setup)(int ipd_port) = NULL;
560 * Return the number of interfaces the chip has. Each interface
561 * may have multiple ports. Most chips support two interfaces,
562 * but the CNX0XX and CNX1XX are exceptions. These only support
565 * @return Number of interfaces on chip
567 int cvmx_helper_get_number_of_interfaces(void)
569 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
571 else if (OCTEON_IS_MODEL(OCTEON_CN66XX))
572 if (OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_0))
576 else if (OCTEON_IS_MODEL(OCTEON_CN63XX))
578 else if (OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
580 else if (OCTEON_IS_MODEL(OCTEON_CN70XX))
582 else if (OCTEON_IS_MODEL(OCTEON_CN78XX))
584 else if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
586 else if (OCTEON_IS_MODEL(OCTEON_CN73XX))
592 int __cvmx_helper_early_ports_on_interface(int interface)
596 if (octeon_has_feature(OCTEON_FEATURE_PKND))
597 return cvmx_helper_interface_enumerate(interface);
599 ports = cvmx_helper_interface_enumerate(interface);
600 ports = __cvmx_helper_board_interface_probe(interface, ports);
606 * Return the number of ports on an interface. Depending on the
607 * chip and configuration, this can be 1-16. A value of 0
608 * specifies that the interface doesn't exist or isn't usable.
610 * @param xiface xiface to get the port count for
612 * @return Number of ports on interface. Can be Zero.
614 int cvmx_helper_ports_on_interface(int xiface)
616 if (octeon_has_feature(OCTEON_FEATURE_PKND))
617 return cvmx_helper_interface_enumerate(xiface);
619 return __cvmx_helper_get_num_ipd_ports(xiface);
624 * Return interface mode for CN70XX.
626 static cvmx_helper_interface_mode_t __cvmx_get_mode_cn70xx(int interface)
628 /* SGMII/RXAUI/QSGMII */
630 enum cvmx_qlm_mode qlm_mode =
631 cvmx_qlm_get_dlm_mode(0, interface);
633 if (qlm_mode == CVMX_QLM_MODE_SGMII)
634 iface_ops[interface] = &iface_ops_sgmii;
635 else if (qlm_mode == CVMX_QLM_MODE_QSGMII)
636 iface_ops[interface] = &iface_ops_qsgmii;
637 else if (qlm_mode == CVMX_QLM_MODE_RXAUI)
638 iface_ops[interface] = &iface_ops_rxaui;
640 iface_ops[interface] = &iface_ops_dis;
641 } else if (interface == 2) { /* DPI */
642 iface_ops[interface] = &iface_ops_npi;
643 } else if (interface == 3) { /* LOOP */
644 iface_ops[interface] = &iface_ops_loop;
645 } else if (interface == 4) { /* RGMII (AGL) */
646 cvmx_agl_prtx_ctl_t prtx_ctl;
648 prtx_ctl.u64 = csr_rd(CVMX_AGL_PRTX_CTL(0));
649 if (prtx_ctl.s.mode == 0)
650 iface_ops[interface] = &iface_ops_agl;
652 iface_ops[interface] = &iface_ops_dis;
654 iface_ops[interface] = &iface_ops_dis;
657 return iface_ops[interface]->mode;
662 * Return interface mode for CN78XX.
664 static cvmx_helper_interface_mode_t __cvmx_get_mode_cn78xx(int xiface)
666 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
667 /* SGMII/RXAUI/XAUI */
668 if (xi.interface < 6) {
669 int qlm = cvmx_qlm_lmac(xiface, 0);
670 enum cvmx_qlm_mode qlm_mode;
673 iface_node_ops[xi.node][xi.interface] = &iface_ops_dis;
674 return iface_node_ops[xi.node][xi.interface]->mode;
676 qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, qlm);
678 if (qlm_mode == CVMX_QLM_MODE_SGMII)
679 iface_node_ops[xi.node][xi.interface] = &iface_ops_bgx_sgmii;
680 else if (qlm_mode == CVMX_QLM_MODE_XAUI)
681 iface_node_ops[xi.node][xi.interface] = &iface_ops_bgx_xaui;
682 else if (qlm_mode == CVMX_QLM_MODE_XLAUI)
683 iface_node_ops[xi.node][xi.interface] = &iface_ops_bgx_xlaui;
684 else if (qlm_mode == CVMX_QLM_MODE_XFI)
685 iface_node_ops[xi.node][xi.interface] = &iface_ops_bgx_xfi;
686 else if (qlm_mode == CVMX_QLM_MODE_RXAUI)
687 iface_node_ops[xi.node][xi.interface] = &iface_ops_bgx_rxaui;
689 iface_node_ops[xi.node][xi.interface] = &iface_ops_dis;
690 } else if (xi.interface < 8) {
691 enum cvmx_qlm_mode qlm_mode;
696 if (xi.interface == 6) {
698 lane_mask = cvmx_ilk_lane_mask[xi.node][0];
701 lane_mask = cvmx_ilk_lane_mask[xi.node][1];
706 iface_node_ops[xi.node][intf] = &iface_ops_dis;
709 qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, 4);
710 if (qlm_mode == CVMX_QLM_MODE_ILK)
711 iface_node_ops[xi.node][intf] = &iface_ops_ilk;
713 iface_node_ops[xi.node][intf] = &iface_ops_dis;
717 for (i = 4; i < 6; i++) {
718 qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, i);
719 if (qlm_mode == CVMX_QLM_MODE_ILK)
723 iface_node_ops[xi.node][intf] = &iface_ops_ilk;
725 iface_node_ops[xi.node][intf] = &iface_ops_dis;
729 for (i = 4; i < 7; i++) {
730 qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, i);
731 if (qlm_mode == CVMX_QLM_MODE_ILK)
735 iface_node_ops[xi.node][intf] = &iface_ops_ilk;
737 iface_node_ops[xi.node][intf] = &iface_ops_dis;
741 for (i = 6; i < 8; i++) {
742 qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, i);
743 if (qlm_mode == CVMX_QLM_MODE_ILK)
747 iface_node_ops[xi.node][intf] = &iface_ops_ilk;
749 iface_node_ops[xi.node][intf] = &iface_ops_dis;
752 qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, 5);
753 if (qlm_mode == CVMX_QLM_MODE_ILK)
754 iface_node_ops[xi.node][intf] = &iface_ops_ilk;
756 iface_node_ops[xi.node][intf] = &iface_ops_dis;
759 qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, 6);
760 if (qlm_mode == CVMX_QLM_MODE_ILK)
761 iface_node_ops[xi.node][intf] = &iface_ops_ilk;
763 iface_node_ops[xi.node][intf] = &iface_ops_dis;
766 qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, 7);
767 if (qlm_mode == CVMX_QLM_MODE_ILK)
768 iface_node_ops[xi.node][intf] = &iface_ops_ilk;
770 iface_node_ops[xi.node][intf] = &iface_ops_dis;
774 for (i = 5; i < 8; i++) {
775 qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, i);
776 if (qlm_mode == CVMX_QLM_MODE_ILK)
780 iface_node_ops[xi.node][intf] = &iface_ops_ilk;
782 iface_node_ops[xi.node][intf] = &iface_ops_dis;
785 } else if (xi.interface == 8) { /* DPI */
788 for (qlm = 0; qlm < 5; qlm++) {
789 /* if GSERX_CFG[pcie] == 1, then enable npi */
790 if (csr_rd_node(xi.node, CVMX_GSERX_CFG(qlm)) & 0x1) {
791 iface_node_ops[xi.node][xi.interface] =
793 return iface_node_ops[xi.node][xi.interface]->mode;
796 iface_node_ops[xi.node][xi.interface] = &iface_ops_dis;
797 } else if (xi.interface == 9) { /* LOOP */
798 iface_node_ops[xi.node][xi.interface] = &iface_ops_loop;
800 iface_node_ops[xi.node][xi.interface] = &iface_ops_dis;
803 return iface_node_ops[xi.node][xi.interface]->mode;
808 * Return interface mode for CN73XX.
810 static cvmx_helper_interface_mode_t __cvmx_get_mode_cn73xx(int xiface)
812 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
813 int interface = xi.interface;
815 /* SGMII/XAUI/XLAUI/XFI */
817 int qlm = cvmx_qlm_lmac(xiface, 0);
818 enum cvmx_qlm_mode qlm_mode;
821 iface_ops[interface] = &iface_ops_dis;
822 return iface_ops[interface]->mode;
824 qlm_mode = cvmx_qlm_get_mode(qlm);
827 case CVMX_QLM_MODE_SGMII:
828 case CVMX_QLM_MODE_SGMII_2X1:
829 case CVMX_QLM_MODE_RGMII_SGMII:
830 case CVMX_QLM_MODE_RGMII_SGMII_1X1:
831 iface_ops[interface] = &iface_ops_bgx_sgmii;
833 case CVMX_QLM_MODE_XAUI:
834 case CVMX_QLM_MODE_RGMII_XAUI:
835 iface_ops[interface] = &iface_ops_bgx_xaui;
837 case CVMX_QLM_MODE_RXAUI:
838 case CVMX_QLM_MODE_RXAUI_1X2:
839 case CVMX_QLM_MODE_RGMII_RXAUI:
840 iface_ops[interface] = &iface_ops_bgx_rxaui;
842 case CVMX_QLM_MODE_XLAUI:
843 case CVMX_QLM_MODE_RGMII_XLAUI:
844 iface_ops[interface] = &iface_ops_bgx_xlaui;
846 case CVMX_QLM_MODE_XFI:
847 case CVMX_QLM_MODE_XFI_1X2:
848 case CVMX_QLM_MODE_RGMII_XFI:
849 iface_ops[interface] = &iface_ops_bgx_xfi;
851 case CVMX_QLM_MODE_10G_KR:
852 case CVMX_QLM_MODE_10G_KR_1X2:
853 case CVMX_QLM_MODE_RGMII_10G_KR:
854 iface_ops[interface] = &iface_ops_bgx_10G_KR;
856 case CVMX_QLM_MODE_40G_KR4:
857 case CVMX_QLM_MODE_RGMII_40G_KR4:
858 iface_ops[interface] = &iface_ops_bgx_40G_KR4;
860 case CVMX_QLM_MODE_MIXED:
861 iface_ops[interface] = &iface_ops_bgx_mixed;
864 iface_ops[interface] = &iface_ops_dis;
867 } else if (interface == 3) { /* DPI */
868 iface_ops[interface] = &iface_ops_npi;
869 } else if (interface == 4) { /* LOOP */
870 iface_ops[interface] = &iface_ops_loop;
872 iface_ops[interface] = &iface_ops_dis;
875 return iface_ops[interface]->mode;
880 * Return interface mode for CNF75XX.
882 * CNF75XX has a single BGX block, which is attached to two DLMs,
883 * the first, GSER4 only supports SGMII mode, while the second,
884 * GSER5 supports 1G/10G single late modes, i.e. SGMII, XFI, 10G-KR.
885 * Each half-BGX is thus designated as a separate interface with two ports each.
887 static cvmx_helper_interface_mode_t __cvmx_get_mode_cnf75xx(int xiface)
889 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
890 int interface = xi.interface;
892 /* BGX0: SGMII (DLM4/DLM5)/XFI(DLM5) */
894 enum cvmx_qlm_mode qlm_mode;
895 int qlm = cvmx_qlm_lmac(xiface, 0);
898 iface_ops[interface] = &iface_ops_dis;
899 return iface_ops[interface]->mode;
901 qlm_mode = cvmx_qlm_get_mode(qlm);
904 case CVMX_QLM_MODE_SGMII:
905 case CVMX_QLM_MODE_SGMII_2X1:
906 iface_ops[interface] = &iface_ops_bgx_sgmii;
908 case CVMX_QLM_MODE_XFI_1X2:
909 iface_ops[interface] = &iface_ops_bgx_xfi;
911 case CVMX_QLM_MODE_10G_KR_1X2:
912 iface_ops[interface] = &iface_ops_bgx_10G_KR;
914 case CVMX_QLM_MODE_MIXED:
915 iface_ops[interface] = &iface_ops_bgx_mixed;
918 iface_ops[interface] = &iface_ops_dis;
921 } else if ((interface < 3) && OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
922 cvmx_sriox_status_reg_t sriox_status_reg;
923 int srio_port = interface - 1;
925 sriox_status_reg.u64 = csr_rd(CVMX_SRIOX_STATUS_REG(srio_port));
927 if (sriox_status_reg.s.srio)
928 iface_ops[interface] = &iface_ops_srio;
930 iface_ops[interface] = &iface_ops_dis;
931 } else if (interface == 3) { /* DPI */
932 iface_ops[interface] = &iface_ops_npi;
933 } else if (interface == 4) { /* LOOP */
934 iface_ops[interface] = &iface_ops_loop;
936 iface_ops[interface] = &iface_ops_dis;
939 return iface_ops[interface]->mode;
944 * Return interface mode for CN68xx.
946 static cvmx_helper_interface_mode_t __cvmx_get_mode_cn68xx(int interface)
948 union cvmx_mio_qlmx_cfg qlm_cfg;
952 qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(0));
953 /* QLM is disabled when QLM SPD is 15. */
954 if (qlm_cfg.s.qlm_spd == 15)
955 iface_ops[interface] = &iface_ops_dis;
956 else if (qlm_cfg.s.qlm_cfg == 7)
957 iface_ops[interface] = &iface_ops_rxaui;
958 else if (qlm_cfg.s.qlm_cfg == 2)
959 iface_ops[interface] = &iface_ops_sgmii;
960 else if (qlm_cfg.s.qlm_cfg == 3)
961 iface_ops[interface] = &iface_ops_xaui;
963 iface_ops[interface] = &iface_ops_dis;
967 qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(0));
968 /* QLM is disabled when QLM SPD is 15. */
969 if (qlm_cfg.s.qlm_spd == 15)
970 iface_ops[interface] = &iface_ops_dis;
971 else if (qlm_cfg.s.qlm_cfg == 7)
972 iface_ops[interface] = &iface_ops_rxaui;
974 iface_ops[interface] = &iface_ops_dis;
980 qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(interface));
981 /* QLM is disabled when QLM SPD is 15. */
982 if (qlm_cfg.s.qlm_spd == 15)
983 iface_ops[interface] = &iface_ops_dis;
984 else if (qlm_cfg.s.qlm_cfg == 2)
985 iface_ops[interface] = &iface_ops_sgmii;
986 else if (qlm_cfg.s.qlm_cfg == 3)
987 iface_ops[interface] = &iface_ops_xaui;
989 iface_ops[interface] = &iface_ops_dis;
994 qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(interface - 4));
995 /* QLM is disabled when QLM SPD is 15. */
996 if (qlm_cfg.s.qlm_spd == 15)
997 iface_ops[interface] = &iface_ops_dis;
998 else if (qlm_cfg.s.qlm_cfg == 1)
999 iface_ops[interface] = &iface_ops_ilk;
1001 iface_ops[interface] = &iface_ops_dis;
1005 union cvmx_mio_qlmx_cfg qlm_cfg1;
1006 /* Check if PCIe0/PCIe1 is configured for PCIe */
1007 qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(3));
1008 qlm_cfg1.u64 = csr_rd(CVMX_MIO_QLMX_CFG(1));
1009 /* QLM is disabled when QLM SPD is 15. */
1010 if ((qlm_cfg.s.qlm_spd != 15 && qlm_cfg.s.qlm_cfg == 0) ||
1011 (qlm_cfg1.s.qlm_spd != 15 && qlm_cfg1.s.qlm_cfg == 0))
1012 iface_ops[interface] = &iface_ops_npi;
1014 iface_ops[interface] = &iface_ops_dis;
1018 iface_ops[interface] = &iface_ops_loop;
1022 iface_ops[interface] = &iface_ops_dis;
1026 return iface_ops[interface]->mode;
1031 * Return interface mode for an Octeon II
1033 static cvmx_helper_interface_mode_t __cvmx_get_mode_octeon2(int interface)
1035 union cvmx_gmxx_inf_mode mode;
1037 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
1038 return __cvmx_get_mode_cn68xx(interface);
1040 if (interface == 2) {
1041 iface_ops[interface] = &iface_ops_npi;
1042 } else if (interface == 3) {
1043 iface_ops[interface] = &iface_ops_loop;
1044 } else if ((OCTEON_IS_MODEL(OCTEON_CN63XX) &&
1045 (interface == 4 || interface == 5)) ||
1046 (OCTEON_IS_MODEL(OCTEON_CN66XX) && interface >= 4 &&
1048 /* Only present in CN63XX & CN66XX Octeon model */
1049 union cvmx_sriox_status_reg sriox_status_reg;
1051 /* cn66xx pass1.0 has only 2 SRIO interfaces. */
1052 if ((interface == 5 || interface == 7) &&
1053 OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_0)) {
1054 iface_ops[interface] = &iface_ops_dis;
1055 } else if (interface == 5 && OCTEON_IS_MODEL(OCTEON_CN66XX)) {
1057 * Later passes of cn66xx support SRIO0 - x4/x2/x1,
1058 * SRIO2 - x2/x1, SRIO3 - x1
1060 iface_ops[interface] = &iface_ops_dis;
1062 sriox_status_reg.u64 =
1063 csr_rd(CVMX_SRIOX_STATUS_REG(interface - 4));
1064 if (sriox_status_reg.s.srio)
1065 iface_ops[interface] = &iface_ops_srio;
1067 iface_ops[interface] = &iface_ops_dis;
1069 } else if (OCTEON_IS_MODEL(OCTEON_CN66XX)) {
1070 union cvmx_mio_qlmx_cfg mio_qlm_cfg;
1072 /* QLM2 is SGMII0 and QLM1 is SGMII1 */
1073 if (interface == 0) {
1074 mio_qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(2));
1075 } else if (interface == 1) {
1076 mio_qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(1));
1078 iface_ops[interface] = &iface_ops_dis;
1079 return iface_ops[interface]->mode;
1082 if (mio_qlm_cfg.s.qlm_spd == 15)
1083 iface_ops[interface] = &iface_ops_dis;
1084 else if (mio_qlm_cfg.s.qlm_cfg == 9)
1085 iface_ops[interface] = &iface_ops_sgmii;
1086 else if (mio_qlm_cfg.s.qlm_cfg == 11)
1087 iface_ops[interface] = &iface_ops_xaui;
1089 iface_ops[interface] = &iface_ops_dis;
1090 } else if (OCTEON_IS_MODEL(OCTEON_CN61XX)) {
1091 union cvmx_mio_qlmx_cfg qlm_cfg;
1093 if (interface == 0) {
1094 qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(2));
1095 } else if (interface == 1) {
1096 qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(0));
1098 iface_ops[interface] = &iface_ops_dis;
1099 return iface_ops[interface]->mode;
1102 if (qlm_cfg.s.qlm_spd == 15)
1103 iface_ops[interface] = &iface_ops_dis;
1104 else if (qlm_cfg.s.qlm_cfg == 2)
1105 iface_ops[interface] = &iface_ops_sgmii;
1106 else if (qlm_cfg.s.qlm_cfg == 3)
1107 iface_ops[interface] = &iface_ops_xaui;
1109 iface_ops[interface] = &iface_ops_dis;
1110 } else if (OCTEON_IS_MODEL(OCTEON_CNF71XX)) {
1111 if (interface == 0) {
1112 union cvmx_mio_qlmx_cfg qlm_cfg;
1114 qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(0));
1115 if (qlm_cfg.s.qlm_cfg == 2)
1116 iface_ops[interface] = &iface_ops_sgmii;
1118 iface_ops[interface] = &iface_ops_dis;
1120 iface_ops[interface] = &iface_ops_dis;
1122 } else if (interface == 1 && OCTEON_IS_MODEL(OCTEON_CN63XX)) {
1123 iface_ops[interface] = &iface_ops_dis;
1125 mode.u64 = csr_rd(CVMX_GMXX_INF_MODE(interface));
1127 if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
1128 switch (mode.cn63xx.mode) {
1130 iface_ops[interface] = &iface_ops_sgmii;
1134 iface_ops[interface] = &iface_ops_xaui;
1138 iface_ops[interface] = &iface_ops_dis;
1143 iface_ops[interface] = &iface_ops_dis;
1144 else if (mode.s.type)
1145 iface_ops[interface] = &iface_ops_gmii;
1147 iface_ops[interface] = &iface_ops_rgmii;
1151 return iface_ops[interface]->mode;
1155 * Get the operating mode of an interface. Depending on the Octeon
1156 * chip and configuration, this function returns an enumeration
1157 * of the type of packet I/O supported by an interface.
1159 * @param xiface Interface to probe
1161 * @return Mode of the interface. Unknown or unsupported interfaces return
1164 cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int xiface)
1166 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1168 if (xi.interface < 0 ||
1169 xi.interface >= cvmx_helper_get_number_of_interfaces())
1170 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
1173 * Check if the interface mode has been already cached. If it has,
1174 * simply return it. Otherwise, fall through the rest of the code to
1175 * determine the interface mode and cache it in iface_ops.
1177 if (iface_node_ops[xi.node][xi.interface]) {
1178 cvmx_helper_interface_mode_t mode;
1180 mode = iface_node_ops[xi.node][xi.interface]->mode;
1187 if (OCTEON_IS_MODEL(OCTEON_CN70XX))
1188 return __cvmx_get_mode_cn70xx(xi.interface);
1190 if (OCTEON_IS_MODEL(OCTEON_CN78XX))
1191 return __cvmx_get_mode_cn78xx(xiface);
1193 if (OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
1194 cvmx_helper_interface_mode_t mode;
1196 mode = __cvmx_get_mode_cnf75xx(xiface);
1200 if (OCTEON_IS_MODEL(OCTEON_CN73XX)) {
1201 cvmx_helper_interface_mode_t mode;
1203 mode = __cvmx_get_mode_cn73xx(xiface);
1210 if (OCTEON_IS_OCTEON2())
1211 return __cvmx_get_mode_octeon2(xi.interface);
1214 * Octeon and Octeon Plus models
1216 if (xi.interface == 2) {
1217 iface_ops[xi.interface] = &iface_ops_npi;
1218 } else if (xi.interface == 3) {
1219 iface_ops[xi.interface] = &iface_ops_dis;
1221 union cvmx_gmxx_inf_mode mode;
1223 mode.u64 = csr_rd(CVMX_GMXX_INF_MODE(xi.interface));
1226 iface_ops[xi.interface] = &iface_ops_dis;
1227 else if (mode.s.type)
1228 iface_ops[xi.interface] = &iface_ops_gmii;
1230 iface_ops[xi.interface] = &iface_ops_rgmii;
1233 return iface_ops[xi.interface]->mode;
1237 * Determine the actual number of hardware ports connected to an
1238 * interface. It doesn't setup the ports or enable them.
1240 * @param xiface Interface to enumerate
1242 * @return The number of ports on the interface, negative on failure
1244 int cvmx_helper_interface_enumerate(int xiface)
1246 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1249 cvmx_helper_interface_get_mode(xiface);
1250 if (iface_node_ops[xi.node][xi.interface]->enumerate)
1251 result = iface_node_ops[xi.node][xi.interface]->enumerate(xiface);
1257 * This function probes an interface to determine the actual number of
1258 * hardware ports connected to it. It does some setup the ports but
1259 * doesn't enable them. The main goal here is to set the global
1260 * interface_port_count[interface] correctly. Final hardware setup of
1261 * the ports will be performed later.
1263 * @param xiface Interface to probe
1265 * @return Zero on success, negative on failure
1267 int cvmx_helper_interface_probe(int xiface)
1270 * At this stage in the game we don't want packets to be
1271 * moving yet. The following probe calls should perform
1272 * hardware setup needed to determine port counts. Receive
1273 * must still be disabled.
1277 enum cvmx_pko_padding padding = CVMX_PKO_PADDING_NONE;
1278 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1283 cvmx_helper_interface_get_mode(xiface);
1284 if (iface_node_ops[xi.node][xi.interface]->probe)
1285 nports = iface_node_ops[xi.node][xi.interface]->probe(xiface);
1287 switch (iface_node_ops[xi.node][xi.interface]->mode) {
1288 /* These types don't support ports to IPD/PKO */
1289 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
1290 case CVMX_HELPER_INTERFACE_MODE_PCIE:
1293 /* XAUI is a single high speed port */
1294 case CVMX_HELPER_INTERFACE_MODE_XAUI:
1295 case CVMX_HELPER_INTERFACE_MODE_RXAUI:
1296 case CVMX_HELPER_INTERFACE_MODE_XLAUI:
1297 case CVMX_HELPER_INTERFACE_MODE_XFI:
1298 case CVMX_HELPER_INTERFACE_MODE_10G_KR:
1299 case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
1300 case CVMX_HELPER_INTERFACE_MODE_MIXED:
1302 padding = CVMX_PKO_PADDING_60;
1305 * RGMII/GMII/MII are all treated about the same. Most
1306 * functions refer to these ports as RGMII.
1308 case CVMX_HELPER_INTERFACE_MODE_RGMII:
1309 case CVMX_HELPER_INTERFACE_MODE_GMII:
1310 padding = CVMX_PKO_PADDING_60;
1313 * SPI4 can have 1-16 ports depending on the device at
1316 case CVMX_HELPER_INTERFACE_MODE_SPI:
1317 padding = CVMX_PKO_PADDING_60;
1320 * SGMII can have 1-4 ports depending on how many are
1323 case CVMX_HELPER_INTERFACE_MODE_SGMII:
1324 case CVMX_HELPER_INTERFACE_MODE_QSGMII:
1325 padding = CVMX_PKO_PADDING_60;
1326 case CVMX_HELPER_INTERFACE_MODE_PICMG:
1329 /* PCI target Network Packet Interface */
1330 case CVMX_HELPER_INTERFACE_MODE_NPI:
1333 * Special loopback only ports. These are not the same
1334 * as other ports in loopback mode.
1336 case CVMX_HELPER_INTERFACE_MODE_LOOP:
1338 /* SRIO has 2^N ports, where N is number of interfaces */
1339 case CVMX_HELPER_INTERFACE_MODE_SRIO:
1341 case CVMX_HELPER_INTERFACE_MODE_ILK:
1342 padding = CVMX_PKO_PADDING_60;
1345 case CVMX_HELPER_INTERFACE_MODE_AGL:
1353 if (!octeon_has_feature(OCTEON_FEATURE_PKND))
1356 nports = __cvmx_helper_board_interface_probe(xiface, nports);
1357 __cvmx_helper_init_interface(xiface, nports, has_fcs, padding);
1358 /* Make sure all global variables propagate to other cores */
1366 * Setup backpressure.
1368 * @return Zero on success, negative on failure
1370 static int __cvmx_helper_global_setup_backpressure(int node)
1372 cvmx_qos_proto_t qos_proto;
1373 cvmx_qos_pkt_mode_t qos_mode;
1375 unsigned int bpmask;
1376 int interface, xiface, ports;
1377 int num_interfaces = cvmx_helper_get_number_of_interfaces();
1379 if (cvmx_rgmii_backpressure_dis) {
1380 qos_proto = CVMX_QOS_PROTO_NONE;
1381 qos_mode = CVMX_QOS_PKT_MODE_DROP;
1383 qos_proto = CVMX_QOS_PROTO_PAUSE;
1384 qos_mode = CVMX_QOS_PKT_MODE_HWONLY;
1387 for (interface = 0; interface < num_interfaces; interface++) {
1388 xiface = cvmx_helper_node_interface_to_xiface(node, interface);
1389 ports = cvmx_helper_ports_on_interface(xiface);
1391 switch (cvmx_helper_interface_get_mode(xiface)) {
1392 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
1393 case CVMX_HELPER_INTERFACE_MODE_PCIE:
1394 case CVMX_HELPER_INTERFACE_MODE_SRIO:
1395 case CVMX_HELPER_INTERFACE_MODE_ILK:
1396 case CVMX_HELPER_INTERFACE_MODE_NPI:
1397 case CVMX_HELPER_INTERFACE_MODE_PICMG:
1399 case CVMX_HELPER_INTERFACE_MODE_LOOP:
1400 case CVMX_HELPER_INTERFACE_MODE_XAUI:
1401 case CVMX_HELPER_INTERFACE_MODE_RXAUI:
1402 case CVMX_HELPER_INTERFACE_MODE_XLAUI:
1403 case CVMX_HELPER_INTERFACE_MODE_XFI:
1404 case CVMX_HELPER_INTERFACE_MODE_10G_KR:
1405 case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
1406 bpmask = (cvmx_rgmii_backpressure_dis) ? 0xF : 0;
1407 if (octeon_has_feature(OCTEON_FEATURE_BGX)) {
1408 for (port = 0; port < ports; port++) {
1409 xipdport = cvmx_helper_get_ipd_port(xiface, port);
1410 cvmx_bgx_set_flowctl_mode(xipdport, qos_proto, qos_mode);
1412 cvmx_bgx_set_backpressure_override(xiface, bpmask);
1415 case CVMX_HELPER_INTERFACE_MODE_RGMII:
1416 case CVMX_HELPER_INTERFACE_MODE_GMII:
1417 case CVMX_HELPER_INTERFACE_MODE_SPI:
1418 case CVMX_HELPER_INTERFACE_MODE_SGMII:
1419 case CVMX_HELPER_INTERFACE_MODE_QSGMII:
1420 case CVMX_HELPER_INTERFACE_MODE_MIXED:
1421 bpmask = (cvmx_rgmii_backpressure_dis) ? 0xF : 0;
1422 if (octeon_has_feature(OCTEON_FEATURE_BGX)) {
1423 for (port = 0; port < ports; port++) {
1424 xipdport = cvmx_helper_get_ipd_port(xiface, port);
1425 cvmx_bgx_set_flowctl_mode(xipdport, qos_proto, qos_mode);
1427 cvmx_bgx_set_backpressure_override(xiface, bpmask);
1429 cvmx_gmx_set_backpressure_override(interface, bpmask);
1432 case CVMX_HELPER_INTERFACE_MODE_AGL:
1433 bpmask = (cvmx_rgmii_backpressure_dis) ? 0x1 : 0;
1434 cvmx_agl_set_backpressure_override(interface, bpmask);
1443 * Verify the per port IPD backpressure is aligned properly.
1444 * @return Zero if working, non zero if misaligned
1446 int __cvmx_helper_backpressure_is_misaligned(void)
1453 * Enable packet input/output from the hardware. This function is
1454 * called after all internal setup is complete and IPD is enabled.
1455 * After this function completes, packets will be accepted from the
1456 * hardware ports. PKO should still be disabled to make sure packets
1457 * aren't sent out partially setup hardware.
1459 * @param xiface Interface to enable
1461 * @return Zero on success, negative on failure
1463 int __cvmx_helper_packet_hardware_enable(int xiface)
1466 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1468 if (iface_node_ops[xi.node][xi.interface]->enable)
1469 result = iface_node_ops[xi.node][xi.interface]->enable(xiface);
1470 result |= __cvmx_helper_board_hardware_enable(xiface);
1474 int cvmx_helper_ipd_and_packet_input_enable(void)
1476 return cvmx_helper_ipd_and_packet_input_enable_node(cvmx_get_node_num());
1480 * Called after all internal packet IO paths are setup. This
1481 * function enables IPD/PIP and begins packet input and output.
1483 * @return Zero on success, negative on failure
1485 int cvmx_helper_ipd_and_packet_input_enable_node(int node)
1491 if (octeon_has_feature(OCTEON_FEATURE_PKI)) {
1492 cvmx_helper_pki_enable(node);
1499 * Time to enable hardware ports packet input and output. Note
1500 * that at this point IPD/PIP must be fully functional and PKO
1501 * must be disabled .
1503 num_interfaces = cvmx_helper_get_number_of_interfaces();
1504 for (interface = 0; interface < num_interfaces; interface++) {
1505 int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
1507 num_ports = cvmx_helper_ports_on_interface(xiface);
1509 __cvmx_helper_packet_hardware_enable(xiface);
1512 /* Finally enable PKO now that the entire path is up and running */
1514 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
1515 ; // cvmx_pko_enable_78xx(0); already enabled
1523 * Initialize the PIP, IPD, and PKO hardware to support
1524 * simple priority based queues for the ethernet ports. Each
1525 * port is configured with a number of priority queues based
1526 * on CVMX_PKO_QUEUES_PER_PORT_* where each queue is lower
1527 * priority than the previous.
1529 * @return Zero on success, non-zero on failure
1531 int cvmx_helper_initialize_packet_io_node(unsigned int node)
1536 union cvmx_l2c_cfg l2c_cfg;
1537 union cvmx_smix_en smix_en;
1538 const int num_interfaces = cvmx_helper_get_number_of_interfaces();
1541 * Tell L2 to give the IOB statically higher priority compared
1542 * to the cores. This avoids conditions where IO blocks might
1543 * be starved under very high L2 loads.
1545 if (OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) {
1546 union cvmx_l2c_ctl l2c_ctl;
1548 l2c_ctl.u64 = csr_rd_node(node, CVMX_L2C_CTL);
1549 l2c_ctl.s.rsp_arb_mode = 1;
1550 l2c_ctl.s.xmc_arb_mode = 0;
1551 csr_wr_node(node, CVMX_L2C_CTL, l2c_ctl.u64);
1553 l2c_cfg.u64 = csr_rd(CVMX_L2C_CFG);
1554 l2c_cfg.s.lrf_arb_mode = 0;
1555 l2c_cfg.s.rfb_arb_mode = 0;
1556 csr_wr(CVMX_L2C_CFG, l2c_cfg.u64);
1562 /* Newer chips have more than one SMI/MDIO interface */
1563 if (OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CN78XX))
1565 else if (OCTEON_IS_MODEL(OCTEON_CN73XX) || OCTEON_IS_MODEL(OCTEON_CNF75XX))
1570 for (i = 0; i < smi_inf; i++) {
1571 /* Make sure SMI/MDIO is enabled so we can query PHYs */
1572 smix_en.u64 = csr_rd_node(node, CVMX_SMIX_EN(i));
1573 if (!smix_en.s.en) {
1575 csr_wr_node(node, CVMX_SMIX_EN(i), smix_en.u64);
1579 //vinita_to_do ask it need to be modify for multinode
1580 __cvmx_helper_init_port_valid();
1582 for (interface = 0; interface < num_interfaces; interface++) {
1583 xiface = cvmx_helper_node_interface_to_xiface(node, interface);
1584 result |= cvmx_helper_interface_probe(xiface);
1587 /* PKO3 init precedes that of interfaces */
1588 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1589 __cvmx_helper_init_port_config_data(node);
1590 result = cvmx_helper_pko3_init_global(node);
1592 result = cvmx_helper_pko_init();
1595 /* Errata SSO-29000, Disabling power saving SSO conditional clocking */
1596 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1597 cvmx_sso_ws_cfg_t cfg;
1599 cfg.u64 = csr_rd_node(node, CVMX_SSO_WS_CFG);
1600 cfg.s.sso_cclk_dis = 1;
1601 csr_wr_node(node, CVMX_SSO_WS_CFG, cfg.u64);
1607 for (interface = 0; interface < num_interfaces; interface++) {
1608 xiface = cvmx_helper_node_interface_to_xiface(node, interface);
1609 /* Skip invalid/disabled interfaces */
1610 if (cvmx_helper_ports_on_interface(xiface) <= 0)
1612 printf("Node %d Interface %d has %d ports (%s)\n", node, interface,
1613 cvmx_helper_ports_on_interface(xiface),
1614 cvmx_helper_interface_mode_to_string(
1615 cvmx_helper_interface_get_mode(xiface)));
1617 result |= __cvmx_helper_ipd_setup_interface(xiface);
1618 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
1619 result |= cvmx_helper_pko3_init_interface(xiface);
1621 result |= __cvmx_helper_interface_setup_pko(interface);
1624 if (octeon_has_feature(OCTEON_FEATURE_PKI))
1625 result |= __cvmx_helper_pki_global_setup(node);
1627 result |= __cvmx_helper_ipd_global_setup();
1629 /* Enable any flow control and backpressure */
1630 result |= __cvmx_helper_global_setup_backpressure(node);
1632 /* export app config if set */
1633 if (cvmx_export_app_config)
1634 result |= (*cvmx_export_app_config)();
1636 if (cvmx_ipd_cfg.ipd_enable && cvmx_pki_dflt_init[node])
1637 result |= cvmx_helper_ipd_and_packet_input_enable_node(node);
1642 * Initialize the PIP, IPD, and PKO hardware to support
1643 * simple priority based queues for the ethernet ports. Each
1644 * port is configured with a number of priority queues based
1645 * on CVMX_PKO_QUEUES_PER_PORT_* where each queue is lower
1646 * priority than the previous.
1648 * @return Zero on success, non-zero on failure
1650 int cvmx_helper_initialize_packet_io_global(void)
1652 unsigned int node = cvmx_get_node_num();
1654 return cvmx_helper_initialize_packet_io_node(node);
1658 * Does core local initialization for packet io
1660 * @return Zero on success, non-zero on failure
1662 int cvmx_helper_initialize_packet_io_local(void)
1664 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
1665 __cvmx_pko3_dq_table_setup();
1670 struct cvmx_buffer_list {
1671 struct cvmx_buffer_list *next;
1675 * Disables the sending of flow control (pause) frames on the specified
1678 * @param interface Which interface (0 or 1)
1679 * @param port_mask Mask (4bits) of which ports on the interface to disable
1681 * 1 => disable backpressure
1682 * 0 => enable backpressure
1684 * @return 0 on success
1687 int cvmx_gmx_set_backpressure_override(u32 interface, uint32_t port_mask)
1689 union cvmx_gmxx_tx_ovr_bp gmxx_tx_ovr_bp;
1690 /* Check for valid arguments */
1691 if (port_mask & ~0xf || interface & ~0x1)
1693 if (interface >= CVMX_HELPER_MAX_GMX)
1696 gmxx_tx_ovr_bp.u64 = 0;
1697 gmxx_tx_ovr_bp.s.en = port_mask; /* Per port Enable back pressure override */
1698 gmxx_tx_ovr_bp.s.ign_full = port_mask; /* Ignore the RX FIFO full when computing BP */
1699 csr_wr(CVMX_GMXX_TX_OVR_BP(interface), gmxx_tx_ovr_bp.u64);
1704 * Disables the sending of flow control (pause) frames on the specified
1705 * AGL (RGMII) port(s).
1707 * @param interface Which interface (0 or 1)
1708 * @param port_mask Mask (4bits) of which ports on the interface to disable
1710 * 1 => disable backpressure
1711 * 0 => enable backpressure
1713 * @return 0 on success
1716 int cvmx_agl_set_backpressure_override(u32 interface, uint32_t port_mask)
1718 union cvmx_agl_gmx_tx_ovr_bp agl_gmx_tx_ovr_bp;
1719 int port = cvmx_helper_agl_get_port(interface);
1723 /* Check for valid arguments */
1724 agl_gmx_tx_ovr_bp.u64 = 0;
1725 /* Per port Enable back pressure override */
1726 agl_gmx_tx_ovr_bp.s.en = port_mask;
1727 /* Ignore the RX FIFO full when computing BP */
1728 agl_gmx_tx_ovr_bp.s.ign_full = port_mask;
1729 csr_wr(CVMX_GMXX_TX_OVR_BP(port), agl_gmx_tx_ovr_bp.u64);
1734 * Helper function for global packet IO shutdown
1736 int cvmx_helper_shutdown_packet_io_global_cn78xx(int node)
1738 int num_interfaces = cvmx_helper_get_number_of_interfaces();
1743 /* Shut down all interfaces and disable TX and RX on all ports */
1744 for (interface = 0; interface < num_interfaces; interface++) {
1745 int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
1747 int num_ports = cvmx_helper_ports_on_interface(xiface);
1752 cvmx_bgx_set_backpressure_override(xiface, 0);
1753 for (index = 0; index < num_ports; index++) {
1754 cvmx_helper_link_info_t link_info;
1756 if (!cvmx_helper_is_port_valid(xiface, index))
1759 cvmx_helper_bgx_shutdown_port(xiface, index);
1761 /* Turn off link LEDs */
1763 cvmx_helper_update_link_led(xiface, index, link_info);
1767 /* Stop input first */
1768 cvmx_helper_pki_shutdown(node);
1770 /* Retrieve all packets from the SSO and free them */
1772 while ((work = cvmx_pow_work_request_sync(CVMX_POW_WAIT))) {
1773 cvmx_helper_free_pki_pkt_data(work);
1774 cvmx_wqe_pki_free(work);
1779 debug("%s: Purged %d packets from SSO\n", __func__, result);
1782 * No need to wait for PKO queues to drain,
1783 * dq_close() drains the queues to NULL.
1786 /* Shutdown PKO interfaces */
1787 for (interface = 0; interface < num_interfaces; interface++) {
1788 int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
1790 cvmx_helper_pko3_shut_interface(xiface);
1793 /* Disable MAC address filtering */
1794 for (interface = 0; interface < num_interfaces; interface++) {
1795 int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
1797 switch (cvmx_helper_interface_get_mode(xiface)) {
1798 case CVMX_HELPER_INTERFACE_MODE_XAUI:
1799 case CVMX_HELPER_INTERFACE_MODE_RXAUI:
1800 case CVMX_HELPER_INTERFACE_MODE_XLAUI:
1801 case CVMX_HELPER_INTERFACE_MODE_XFI:
1802 case CVMX_HELPER_INTERFACE_MODE_10G_KR:
1803 case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
1804 case CVMX_HELPER_INTERFACE_MODE_SGMII:
1805 case CVMX_HELPER_INTERFACE_MODE_MIXED: {
1807 int num_ports = cvmx_helper_ports_on_interface(xiface);
1809 for (index = 0; index < num_ports; index++) {
1810 if (!cvmx_helper_is_port_valid(xiface, index))
1813 /* Reset MAC filtering */
1814 cvmx_helper_bgx_rx_adr_ctl(node, interface, index, 0, 0, 0);
1823 for (interface = 0; interface < num_interfaces; interface++) {
1825 int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
1826 int num_ports = cvmx_helper_ports_on_interface(xiface);
1828 for (index = 0; index < num_ports; index++) {
1829 /* Doing this twice should clear it since no packets
1832 cvmx_update_rx_activity_led(xiface, index, false);
1833 cvmx_update_rx_activity_led(xiface, index, false);
1837 /* Shutdown the PKO unit */
1838 result = cvmx_helper_pko3_shutdown(node);
1840 /* Release interface structures */
1841 __cvmx_helper_shutdown_interfaces();
1847 * Undo the initialization performed in
1848 * cvmx_helper_initialize_packet_io_global(). After calling this routine and the
1849 * local version on each core, packet IO for Octeon will be disabled and placed
1850 * in the initial reset state. It will then be safe to call the initialize
1851 * later on. Note that this routine does not empty the FPA pools. It frees all
1852 * buffers used by the packet IO hardware to the FPA so a function emptying the
1853 * FPA after shutdown should find all packet buffers in the FPA.
1855 * @return Zero on success, negative on failure.
1857 int cvmx_helper_shutdown_packet_io_global(void)
1859 const int timeout = 5; /* Wait up to 5 seconds for timeouts */
1861 int num_interfaces = cvmx_helper_get_number_of_interfaces();
1865 struct cvmx_buffer_list *pool0_buffers;
1866 struct cvmx_buffer_list *pool0_buffers_tail;
1868 union cvmx_ipd_ctl_status ipd_ctl_status;
1869 int wqe_pool = (int)cvmx_fpa_get_wqe_pool();
1870 int node = cvmx_get_node_num();
1871 cvmx_pcsx_mrx_control_reg_t control_reg;
1873 if (octeon_has_feature(OCTEON_FEATURE_BGX))
1874 return cvmx_helper_shutdown_packet_io_global_cn78xx(node);
1876 /* Step 1: Disable all backpressure */
1877 for (interface = 0; interface < num_interfaces; interface++) {
1878 cvmx_helper_interface_mode_t mode =
1879 cvmx_helper_interface_get_mode(interface);
1881 if (mode == CVMX_HELPER_INTERFACE_MODE_AGL)
1882 cvmx_agl_set_backpressure_override(interface, 0x1);
1883 else if (mode != CVMX_HELPER_INTERFACE_MODE_DISABLED)
1884 cvmx_gmx_set_backpressure_override(interface, 0xf);
1887 /* Step 2: Wait for the PKO queues to drain */
1888 result = __cvmx_helper_pko_drain();
1890 debug("WARNING: %s: Failed to drain some PKO queues\n",
1894 /* Step 3: Disable TX and RX on all ports */
1895 for (interface = 0; interface < num_interfaces; interface++) {
1896 int xiface = cvmx_helper_node_interface_to_xiface(node,
1899 switch (cvmx_helper_interface_get_mode(interface)) {
1900 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
1901 case CVMX_HELPER_INTERFACE_MODE_PCIE:
1902 /* Not a packet interface */
1904 case CVMX_HELPER_INTERFACE_MODE_NPI:
1905 case CVMX_HELPER_INTERFACE_MODE_SRIO:
1906 case CVMX_HELPER_INTERFACE_MODE_ILK:
1908 * We don't handle the NPI/NPEI/SRIO packet
1909 * engines. The caller must know these are
1913 case CVMX_HELPER_INTERFACE_MODE_LOOP:
1915 * Nothing needed. Once PKO is idle, the
1916 * loopback devices must be idle.
1919 case CVMX_HELPER_INTERFACE_MODE_SPI:
1921 * SPI cannot be disabled from Octeon. It is
1922 * the responsibility of the caller to make
1923 * sure SPI is idle before doing shutdown.
1925 * Fall through and do the same processing as
1929 case CVMX_HELPER_INTERFACE_MODE_GMII:
1930 case CVMX_HELPER_INTERFACE_MODE_RGMII:
1931 /* Disable outermost RX at the ASX block */
1932 csr_wr(CVMX_ASXX_RX_PRT_EN(interface), 0);
1933 num_ports = cvmx_helper_ports_on_interface(xiface);
1936 for (index = 0; index < num_ports; index++) {
1937 union cvmx_gmxx_prtx_cfg gmx_cfg;
1939 if (!cvmx_helper_is_port_valid(interface, index))
1941 gmx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
1943 csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
1944 /* Poll the GMX state machine waiting for it to become idle */
1945 csr_wr(CVMX_NPI_DBG_SELECT,
1946 interface * 0x800 + index * 0x100 + 0x880);
1947 if (CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data,
1948 data & 7, ==, 0, timeout * 1000000)) {
1949 debug("GMX RX path timeout waiting for idle\n");
1952 if (CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data,
1953 data & 0xf, ==, 0, timeout * 1000000)) {
1954 debug("GMX TX path timeout waiting for idle\n");
1958 /* Disable outermost TX at the ASX block */
1959 csr_wr(CVMX_ASXX_TX_PRT_EN(interface), 0);
1960 /* Disable interrupts for interface */
1961 csr_wr(CVMX_ASXX_INT_EN(interface), 0);
1962 csr_wr(CVMX_GMXX_TX_INT_EN(interface), 0);
1964 case CVMX_HELPER_INTERFACE_MODE_XAUI:
1965 case CVMX_HELPER_INTERFACE_MODE_RXAUI:
1966 case CVMX_HELPER_INTERFACE_MODE_SGMII:
1967 case CVMX_HELPER_INTERFACE_MODE_QSGMII:
1968 case CVMX_HELPER_INTERFACE_MODE_PICMG:
1969 num_ports = cvmx_helper_ports_on_interface(xiface);
1972 for (index = 0; index < num_ports; index++) {
1973 union cvmx_gmxx_prtx_cfg gmx_cfg;
1975 if (!cvmx_helper_is_port_valid(interface, index))
1977 gmx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
1979 csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
1980 if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface),
1981 union cvmx_gmxx_prtx_cfg, rx_idle, ==, 1,
1982 timeout * 1000000)) {
1983 debug("GMX RX path timeout waiting for idle\n");
1986 if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface),
1987 union cvmx_gmxx_prtx_cfg, tx_idle, ==, 1,
1988 timeout * 1000000)) {
1989 debug("GMX TX path timeout waiting for idle\n");
1992 /* For SGMII some PHYs require that the PCS
1993 * interface be powered down and reset (i.e.
1994 * Atheros/Qualcomm PHYs).
1996 if (cvmx_helper_interface_get_mode(interface) ==
1997 CVMX_HELPER_INTERFACE_MODE_SGMII) {
2000 reg = CVMX_PCSX_MRX_CONTROL_REG(index, interface);
2001 /* Power down the interface */
2002 control_reg.u64 = csr_rd(reg);
2003 control_reg.s.pwr_dn = 1;
2004 csr_wr(reg, control_reg.u64);
2009 case CVMX_HELPER_INTERFACE_MODE_AGL: {
2010 int port = cvmx_helper_agl_get_port(interface);
2011 union cvmx_agl_gmx_prtx_cfg agl_gmx_cfg;
2013 agl_gmx_cfg.u64 = csr_rd(CVMX_AGL_GMX_PRTX_CFG(port));
2014 agl_gmx_cfg.s.en = 0;
2015 csr_wr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_cfg.u64);
2016 if (CVMX_WAIT_FOR_FIELD64(CVMX_AGL_GMX_PRTX_CFG(port),
2017 union cvmx_agl_gmx_prtx_cfg, rx_idle, ==, 1,
2018 timeout * 1000000)) {
2019 debug("AGL RX path timeout waiting for idle\n");
2022 if (CVMX_WAIT_FOR_FIELD64(CVMX_AGL_GMX_PRTX_CFG(port),
2023 union cvmx_agl_gmx_prtx_cfg, tx_idle, ==, 1,
2024 timeout * 1000000)) {
2025 debug("AGL TX path timeout waiting for idle\n");
2034 /* Step 4: Retrieve all packets from the POW and free them */
2035 while ((work = cvmx_pow_work_request_sync(CVMX_POW_WAIT))) {
2036 cvmx_helper_free_packet_data(work);
2037 cvmx_fpa1_free(work, wqe_pool, 0);
2044 * Step 6: Drain all prefetched buffers from IPD/PIP. Note that IPD/PIP
2045 * have not been reset yet
2047 __cvmx_ipd_free_ptr();
2049 /* Step 7: Free the PKO command buffers and put PKO in reset */
2050 cvmx_pko_shutdown();
2052 /* Step 8: Disable MAC address filtering */
2053 for (interface = 0; interface < num_interfaces; interface++) {
2054 int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
2056 switch (cvmx_helper_interface_get_mode(interface)) {
2057 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
2058 case CVMX_HELPER_INTERFACE_MODE_PCIE:
2059 case CVMX_HELPER_INTERFACE_MODE_SRIO:
2060 case CVMX_HELPER_INTERFACE_MODE_ILK:
2061 case CVMX_HELPER_INTERFACE_MODE_NPI:
2062 case CVMX_HELPER_INTERFACE_MODE_LOOP:
2064 case CVMX_HELPER_INTERFACE_MODE_XAUI:
2065 case CVMX_HELPER_INTERFACE_MODE_RXAUI:
2066 case CVMX_HELPER_INTERFACE_MODE_GMII:
2067 case CVMX_HELPER_INTERFACE_MODE_RGMII:
2068 case CVMX_HELPER_INTERFACE_MODE_SPI:
2069 case CVMX_HELPER_INTERFACE_MODE_SGMII:
2070 case CVMX_HELPER_INTERFACE_MODE_QSGMII:
2071 case CVMX_HELPER_INTERFACE_MODE_PICMG:
2072 num_ports = cvmx_helper_ports_on_interface(xiface);
2075 for (index = 0; index < num_ports; index++) {
2076 if (!cvmx_helper_is_port_valid(interface, index))
2078 csr_wr(CVMX_GMXX_RXX_ADR_CTL(index, interface), 1);
2079 csr_wr(CVMX_GMXX_RXX_ADR_CAM_EN(index, interface), 0);
2080 csr_wr(CVMX_GMXX_RXX_ADR_CAM0(index, interface), 0);
2081 csr_wr(CVMX_GMXX_RXX_ADR_CAM1(index, interface), 0);
2082 csr_wr(CVMX_GMXX_RXX_ADR_CAM2(index, interface), 0);
2083 csr_wr(CVMX_GMXX_RXX_ADR_CAM3(index, interface), 0);
2084 csr_wr(CVMX_GMXX_RXX_ADR_CAM4(index, interface), 0);
2085 csr_wr(CVMX_GMXX_RXX_ADR_CAM5(index, interface), 0);
2088 case CVMX_HELPER_INTERFACE_MODE_AGL: {
2089 int port = cvmx_helper_agl_get_port(interface);
2091 csr_wr(CVMX_AGL_GMX_RXX_ADR_CTL(port), 1);
2092 csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), 0);
2093 csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM0(port), 0);
2094 csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM1(port), 0);
2095 csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM2(port), 0);
2096 csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM3(port), 0);
2097 csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM4(port), 0);
2098 csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM5(port), 0);
2106 * Step 9: Drain all FPA buffers out of pool 0 before we reset
2107 * IPD/PIP. This is needed to keep IPD_QUE0_FREE_PAGE_CNT in
2108 * sync. We temporarily keep the buffers in the pool0_buffers
2111 pool0_buffers = NULL;
2112 pool0_buffers_tail = NULL;
2114 struct cvmx_buffer_list *buffer = cvmx_fpa1_alloc(0);
2117 buffer->next = NULL;
2120 pool0_buffers = buffer;
2122 pool0_buffers_tail->next = buffer;
2124 pool0_buffers_tail = buffer;
2130 /* Step 10: Reset IPD and PIP */
2131 ipd_ctl_status.u64 = csr_rd(CVMX_IPD_CTL_STATUS);
2132 ipd_ctl_status.s.reset = 1;
2133 csr_wr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
2135 /* Make sure IPD has finished reset. */
2136 if (OCTEON_IS_OCTEON2() || OCTEON_IS_MODEL(OCTEON_CN70XX)) {
2137 if (CVMX_WAIT_FOR_FIELD64(CVMX_IPD_CTL_STATUS, union cvmx_ipd_ctl_status, rst_done,
2139 debug("IPD reset timeout waiting for idle\n");
2144 /* Step 11: Restore the FPA buffers into pool 0 */
2145 while (pool0_buffers) {
2146 struct cvmx_buffer_list *n = pool0_buffers->next;
2148 cvmx_fpa1_free(pool0_buffers, 0, 0);
2152 /* Step 12: Release interface structures */
2153 __cvmx_helper_shutdown_interfaces();
2159 * Does core local shutdown of packet io
2161 * @return Zero on success, non-zero on failure
2163 int cvmx_helper_shutdown_packet_io_local(void)
2166 * Currently there is nothing to do per core. This may change
2173 * Auto configure an IPD/PKO port link state and speed. This
2174 * function basically does the equivalent of:
2175 * cvmx_helper_link_set(ipd_port, cvmx_helper_link_get(ipd_port));
2177 * @param xipd_port IPD/PKO port to auto configure
2179 * @return Link state after configure
2181 cvmx_helper_link_info_t cvmx_helper_link_autoconf(int xipd_port)
2183 cvmx_helper_link_info_t link_info;
2184 int xiface = cvmx_helper_get_interface_num(xipd_port);
2185 int index = cvmx_helper_get_interface_index_num(xipd_port);
2186 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
2187 int interface = xi.interface;
2189 if (interface == -1 || index == -1 || index >= cvmx_helper_ports_on_interface(xiface)) {
2194 link_info = cvmx_helper_link_get(xipd_port);
2195 if (link_info.u64 == (__cvmx_helper_get_link_info(xiface, index)).u64)
2198 if (!link_info.s.link_up)
2199 cvmx_error_disable_group(CVMX_ERROR_GROUP_ETHERNET, xipd_port);
2201 /* If we fail to set the link speed, port_link_info will not change */
2202 cvmx_helper_link_set(xipd_port, link_info);
2204 if (link_info.s.link_up)
2205 cvmx_error_enable_group(CVMX_ERROR_GROUP_ETHERNET, xipd_port);
2211 * Return the link state of an IPD/PKO port as returned by
2212 * auto negotiation. The result of this function may not match
2213 * Octeon's link config if auto negotiation has changed since
2214 * the last call to cvmx_helper_link_set().
2216 * @param xipd_port IPD/PKO port to query
2218 * @return Link state
2220 cvmx_helper_link_info_t cvmx_helper_link_get(int xipd_port)
2222 cvmx_helper_link_info_t result;
2223 int xiface = cvmx_helper_get_interface_num(xipd_port);
2224 int index = cvmx_helper_get_interface_index_num(xipd_port);
2225 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
2226 struct cvmx_fdt_sfp_info *sfp_info;
2229 * The default result will be a down link unless the code
2234 if (__cvmx_helper_xiface_is_null(xiface) || index == -1 ||
2235 index >= cvmx_helper_ports_on_interface(xiface)) {
2239 if (iface_node_ops[xi.node][xi.interface]->link_get)
2240 result = iface_node_ops[xi.node][xi.interface]->link_get(xipd_port);
2242 if (xipd_port >= 0) {
2243 cvmx_helper_update_link_led(xiface, index, result);
2245 sfp_info = cvmx_helper_cfg_get_sfp_info(xiface, index);
2248 if ((!result.s.link_up || (result.s.link_up && sfp_info->last_mod_abs)))
2249 cvmx_sfp_check_mod_abs(sfp_info, sfp_info->mod_abs_data);
2250 sfp_info = sfp_info->next_iface_sfp;
2258 * Configure an IPD/PKO port for the specified link state. This
2259 * function does not influence auto negotiation at the PHY level.
2260 * The passed link state must always match the link state returned
2261 * by cvmx_helper_link_get(). It is normally best to use
2262 * cvmx_helper_link_autoconf() instead.
2264 * @param xipd_port IPD/PKO port to configure
2265 * @param link_info The new link state
2267 * @return Zero on success, negative on failure
2269 int cvmx_helper_link_set(int xipd_port, cvmx_helper_link_info_t link_info)
2272 int xiface = cvmx_helper_get_interface_num(xipd_port);
2273 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
2274 int index = cvmx_helper_get_interface_index_num(xipd_port);
2276 if (__cvmx_helper_xiface_is_null(xiface) || index == -1 ||
2277 index >= cvmx_helper_ports_on_interface(xiface))
2280 if (iface_node_ops[xi.node][xi.interface]->link_set)
2281 result = iface_node_ops[xi.node][xi.interface]->link_set(xipd_port, link_info);
2284 * Set the port_link_info here so that the link status is
2285 * updated no matter how cvmx_helper_link_set is called. We
2286 * don't change the value if link_set failed.
2289 __cvmx_helper_set_link_info(xiface, index, link_info);
2294 * Configure a port for internal and/or external loopback. Internal loopback
2295 * causes packets sent by the port to be received by Octeon. External loopback
2296 * causes packets received from the wire to sent out again.
2298 * @param xipd_port IPD/PKO port to loopback.
2299 * @param enable_internal
2300 * Non zero if you want internal loopback
2301 * @param enable_external
2302 * Non zero if you want external loopback
2304 * @return Zero on success, negative on failure.
2306 int cvmx_helper_configure_loopback(int xipd_port, int enable_internal, int enable_external)
2309 int xiface = cvmx_helper_get_interface_num(xipd_port);
2310 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
2311 int index = cvmx_helper_get_interface_index_num(xipd_port);
2313 if (index >= cvmx_helper_ports_on_interface(xiface))
2316 cvmx_helper_interface_get_mode(xiface);
2317 if (iface_node_ops[xi.node][xi.interface]->loopback)
2318 result = iface_node_ops[xi.node][xi.interface]->loopback(xipd_port, enable_internal,
2324 void cvmx_helper_setup_simulator_io_buffer_counts(int node, int num_packet_buffers, int pko_buffers)
2326 if (octeon_has_feature(OCTEON_FEATURE_PKI)) {
2327 cvmx_helper_pki_set_dflt_pool_buffer(node, num_packet_buffers);
2328 cvmx_helper_pki_set_dflt_aura_buffer(node, num_packet_buffers);
2331 cvmx_ipd_set_packet_pool_buffer_count(num_packet_buffers);
2332 cvmx_ipd_set_wqe_pool_buffer_count(num_packet_buffers);
2333 cvmx_pko_set_cmd_queue_pool_buffer_count(pko_buffers);
2337 void *cvmx_helper_mem_alloc(int node, uint64_t alloc_size, uint64_t align)
2341 paddr = cvmx_bootmem_phy_alloc_range(alloc_size, align, cvmx_addr_on_node(node, 0ull),
2342 cvmx_addr_on_node(node, 0xffffffffff));
2344 printf("ERROR: %s failed size %u\n", __func__, (unsigned int)alloc_size);
2347 return cvmx_phys_to_ptr(paddr);
2350 void cvmx_helper_mem_free(void *buffer, uint64_t size)
2352 __cvmx_bootmem_phy_free(cvmx_ptr_to_phys(buffer), size, 0);
2355 int cvmx_helper_qos_config_init(cvmx_qos_proto_t qos_proto, cvmx_qos_config_t *qos_cfg)
2359 memset(qos_cfg, 0, sizeof(cvmx_qos_config_t));
2360 qos_cfg->pkt_mode = CVMX_QOS_PKT_MODE_HWONLY; /* Process PAUSEs in hardware only.*/
2361 qos_cfg->pool_mode = CVMX_QOS_POOL_PER_PORT; /* One Pool per BGX:LMAC.*/
2362 qos_cfg->pktbuf_size = 2048; /* Fit WQE + MTU in one buffer.*/
2363 qos_cfg->aura_size = 1024; /* 1K buffers typically enough for any application.*/
2364 qos_cfg->pko_pfc_en = 1; /* Enable PKO layout for PFC feature. */
2365 qos_cfg->vlan_num = 1; /* For Stacked VLAN, use 2nd VLAN in the QPG algorithm.*/
2366 qos_cfg->qos_proto = qos_proto; /* Use PFC flow-control protocol.*/
2367 qos_cfg->qpg_base = -1; /* QPG Table index is undefined.*/
2368 qos_cfg->p_time = 0x60; /* PAUSE packets time window.*/
2369 qos_cfg->p_interval = 0x10; /* PAUSE packets interval.*/
2370 for (i = 0; i < CVMX_QOS_NUM; i++) {
2371 qos_cfg->groups[i] = i; /* SSO Groups = 0...7 */
2372 qos_cfg->group_prio[i] = i; /* SSO Group priority = QOS. */
2373 qos_cfg->drop_thresh[i] = 99; /* 99% of the Aura size.*/
2374 qos_cfg->red_thresh[i] = 90; /* 90% of the Aura size.*/
2375 qos_cfg->bp_thresh[i] = 70; /* 70% of the Aura size.*/
2380 int cvmx_helper_qos_port_config_update(int xipdport, cvmx_qos_config_t *qos_cfg)
2382 cvmx_user_static_pko_queue_config_t pkocfg;
2383 cvmx_xport_t xp = cvmx_helper_ipd_port_to_xport(xipdport);
2384 int xiface = cvmx_helper_get_interface_num(xipdport);
2385 cvmx_xiface_t xi = cvmx_helper_xiface_to_node_interface(xiface);
2387 /* Configure PKO port for PFC SQ layout: */
2388 cvmx_helper_pko_queue_config_get(xp.node, &pkocfg);
2389 pkocfg.pknd.pko_cfg_iface[xi.interface].pfc_enable = 1;
2390 cvmx_helper_pko_queue_config_set(xp.node, &pkocfg);
2394 int cvmx_helper_qos_port_setup(int xipdport, cvmx_qos_config_t *qos_cfg)
2396 const int channles = CVMX_QOS_NUM;
2397 int bufsize = qos_cfg->pktbuf_size;
2398 int aura_size = qos_cfg->aura_size;
2399 cvmx_xport_t xp = cvmx_helper_ipd_port_to_xport(xipdport);
2401 int ipdport = xp.port;
2402 int port = cvmx_helper_get_interface_index_num(xp.port);
2403 int xiface = cvmx_helper_get_interface_num(xipdport);
2404 cvmx_xiface_t xi = cvmx_helper_xiface_to_node_interface(xiface);
2405 cvmx_fpa3_pool_t gpool;
2406 cvmx_fpa3_gaura_t gaura;
2407 cvmx_bgxx_cmr_rx_ovr_bp_t ovrbp;
2408 struct cvmx_pki_qpg_config qpgcfg;
2409 struct cvmx_pki_style_config stcfg, stcfg_dflt;
2410 struct cvmx_pki_pkind_config pkcfg;
2411 int chan, bpid, group, qpg;
2412 int bpen, reden, dropen, passthr, dropthr, bpthr;
2413 int nbufs, pkind, style;
2416 if (qos_cfg->pool_mode == CVMX_QOS_POOL_PER_PORT) {
2417 /* Allocate and setup packet Pool: */
2418 nbufs = aura_size * channles;
2419 sprintf(name, "QOS.P%d", ipdport);
2420 gpool = cvmx_fpa3_setup_fill_pool(node, -1 /*auto*/, name, bufsize, nbufs, NULL);
2421 if (!__cvmx_fpa3_pool_valid(gpool)) {
2422 printf("%s: Failed to setup FPA Pool\n", __func__);
2425 for (chan = 0; chan < channles; chan++)
2426 qos_cfg->gpools[chan] = gpool;
2428 printf("%s: Invalid pool_mode %d\n", __func__, qos_cfg->pool_mode);
2431 /* Allocate QPG entries: */
2432 qos_cfg->qpg_base = cvmx_pki_qpg_entry_alloc(node, -1 /*auto*/, channles);
2433 if (qos_cfg->qpg_base < 0) {
2434 printf("%s: Failed to allocate QPG entry\n", __func__);
2437 for (chan = 0; chan < channles; chan++) {
2438 /* Allocate and setup Aura, setup BP threshold: */
2439 gpool = qos_cfg->gpools[chan];
2440 sprintf(name, "QOS.A%d", ipdport + chan);
2441 gaura = cvmx_fpa3_set_aura_for_pool(gpool, -1 /*auto*/, name, bufsize, aura_size);
2442 if (!__cvmx_fpa3_aura_valid(gaura)) {
2443 printf("%s: Failed to setup FPA Aura for Channel %d\n", __func__, chan);
2446 qos_cfg->gauras[chan] = gaura;
2450 dropthr = (qos_cfg->drop_thresh[chan] * 10 * aura_size) / 1000;
2451 passthr = (qos_cfg->red_thresh[chan] * 10 * aura_size) / 1000;
2452 bpthr = (qos_cfg->bp_thresh[chan] * 10 * aura_size) / 1000;
2453 cvmx_fpa3_setup_aura_qos(gaura, reden, passthr, dropthr, bpen, bpthr);
2454 cvmx_pki_enable_aura_qos(node, gaura.laura, reden, dropen, bpen);
2456 /* Allocate BPID, link Aura and Channel using BPID: */
2457 bpid = cvmx_pki_bpid_alloc(node, -1 /*auto*/);
2459 printf("%s: Failed to allocate BPID for channel %d\n",
2463 qos_cfg->bpids[chan] = bpid;
2464 cvmx_pki_write_aura_bpid(node, gaura.laura, bpid);
2465 cvmx_pki_write_channel_bpid(node, ipdport + chan, bpid);
2467 /* Setup QPG entries: */
2468 group = qos_cfg->groups[chan];
2469 qpg = qos_cfg->qpg_base + chan;
2470 cvmx_pki_read_qpg_entry(node, qpg, &qpgcfg);
2471 qpgcfg.port_add = chan;
2472 qpgcfg.aura_num = gaura.laura;
2473 qpgcfg.grp_ok = (node << CVMX_WQE_GRP_NODE_SHIFT) | group;
2474 qpgcfg.grp_bad = (node << CVMX_WQE_GRP_NODE_SHIFT) | group;
2475 qpgcfg.grptag_ok = (node << CVMX_WQE_GRP_NODE_SHIFT) | 0;
2476 qpgcfg.grptag_bad = (node << CVMX_WQE_GRP_NODE_SHIFT) | 0;
2477 cvmx_pki_write_qpg_entry(node, qpg, &qpgcfg);
2479 /* Allocate and setup STYLE: */
2480 cvmx_helper_pki_get_dflt_style(node, &stcfg_dflt);
2481 style = cvmx_pki_style_alloc(node, -1 /*auto*/);
2482 cvmx_pki_read_style_config(node, style, CVMX_PKI_CLUSTER_ALL, &stcfg);
2483 stcfg.tag_cfg = stcfg_dflt.tag_cfg;
2484 stcfg.parm_cfg.tag_type = CVMX_POW_TAG_TYPE_ORDERED;
2485 stcfg.parm_cfg.qpg_qos = CVMX_PKI_QPG_QOS_VLAN;
2486 stcfg.parm_cfg.qpg_base = qos_cfg->qpg_base;
2487 stcfg.parm_cfg.qpg_port_msb = 0;
2488 stcfg.parm_cfg.qpg_port_sh = 0;
2489 stcfg.parm_cfg.qpg_dis_grptag = 1;
2490 stcfg.parm_cfg.fcs_strip = 1;
2491 stcfg.parm_cfg.mbuff_size = bufsize - 64; /* Do not use 100% of the buffer. */
2492 stcfg.parm_cfg.force_drop = 0;
2493 stcfg.parm_cfg.nodrop = 0;
2494 stcfg.parm_cfg.rawdrp = 0;
2495 stcfg.parm_cfg.cache_mode = 2; /* 1st buffer in L2 */
2496 stcfg.parm_cfg.wqe_vs = qos_cfg->vlan_num;
2497 cvmx_pki_write_style_config(node, style, CVMX_PKI_CLUSTER_ALL, &stcfg);
2500 pkind = cvmx_helper_get_pknd(xiface, port);
2501 cvmx_pki_read_pkind_config(node, pkind, &pkcfg);
2502 pkcfg.cluster_grp = 0; /* OCTEON3 has only one cluster group = 0 */
2503 pkcfg.initial_style = style;
2504 pkcfg.initial_parse_mode = CVMX_PKI_PARSE_LA_TO_LG;
2505 cvmx_pki_write_pkind_config(node, pkind, &pkcfg);
2507 /* Setup parameters of the QOS packet and enable QOS flow-control: */
2508 cvmx_bgx_set_pause_pkt_param(xipdport, 0, 0x0180c2000001, 0x8808, qos_cfg->p_time,
2509 qos_cfg->p_interval);
2510 cvmx_bgx_set_flowctl_mode(xipdport, qos_cfg->qos_proto, qos_cfg->pkt_mode);
2512 /* Enable PKI channel backpressure in the BGX: */
2513 ovrbp.u64 = csr_rd_node(node, CVMX_BGXX_CMR_RX_OVR_BP(xi.interface));
2514 ovrbp.s.en &= ~(1 << port);
2515 ovrbp.s.ign_fifo_bp &= ~(1 << port);
2516 csr_wr_node(node, CVMX_BGXX_CMR_RX_OVR_BP(xi.interface), ovrbp.u64);
2520 int cvmx_helper_qos_sso_setup(int xipdport, cvmx_qos_config_t *qos_cfg)
2522 const int channels = CVMX_QOS_NUM;
2523 cvmx_sso_grpx_pri_t grppri;
2524 int chan, qos, group;
2525 cvmx_xport_t xp = cvmx_helper_ipd_port_to_xport(xipdport);
2528 for (chan = 0; chan < channels; chan++) {
2529 qos = cvmx_helper_qos2prio(chan);
2530 group = qos_cfg->groups[qos];
2531 grppri.u64 = csr_rd_node(node, CVMX_SSO_GRPX_PRI(group));
2532 grppri.s.pri = qos_cfg->group_prio[chan];
2533 csr_wr_node(node, CVMX_SSO_GRPX_PRI(group), grppri.u64);
2538 int cvmx_helper_get_chan_e_name(int chan, char *namebuf, int buflen)
2542 if ((unsigned int)chan >= CVMX_PKO3_IPD_NUM_MAX) {
2543 printf("%s: Channel %d is out of range (0..4095)\n", __func__, chan);
2546 if (OCTEON_IS_MODEL(OCTEON_CN78XX))
2551 if (chan >= 0 && chan < 64)
2552 n = snprintf(namebuf, buflen, "LBK%d", chan);
2553 else if (chan >= 0x100 && chan < (0x100 + dpichans))
2554 n = snprintf(namebuf, buflen, "DPI%d", chan - 0x100);
2555 else if (chan == 0x200)
2556 n = snprintf(namebuf, buflen, "NQM");
2557 else if (chan >= 0x240 && chan < (0x240 + (1 << 1) + 2))
2558 n = snprintf(namebuf, buflen, "SRIO%d:%d", (chan - 0x240) >> 1,
2559 (chan - 0x240) & 0x1);
2560 else if (chan >= 0x400 && chan < (0x400 + (1 << 8) + 256))
2561 n = snprintf(namebuf, buflen, "ILK%d:%d", (chan - 0x400) >> 8,
2562 (chan - 0x400) & 0xFF);
2563 else if (chan >= 0x800 && chan < (0x800 + (5 << 8) + (3 << 4) + 16))
2564 n = snprintf(namebuf, buflen, "BGX%d:%d:%d", (chan - 0x800) >> 8,
2565 ((chan - 0x800) >> 4) & 0x3, (chan - 0x800) & 0xF);
2567 n = snprintf(namebuf, buflen, "--");
2571 #ifdef CVMX_DUMP_DIAGNOSTICS
2572 void cvmx_helper_dump_for_diagnostics(int node)
2574 if (!(OCTEON_IS_OCTEON3() && !OCTEON_IS_MODEL(OCTEON_CN70XX))) {
2575 printf("Diagnostics are not implemented for this model\n");
2578 #ifdef CVMX_DUMP_GSER
2582 num_qlms = cvmx_qlm_get_num();
2583 for (qlm = 0; qlm < num_qlms; qlm++) {
2584 cvmx_dump_gser_config_node(node, qlm);
2585 cvmx_dump_gser_status_node(node, qlm);
2589 #ifdef CVMX_DUMP_BGX
2593 for (bgx = 0; bgx < CVMX_HELPER_MAX_GMX; bgx++) {
2594 cvmx_dump_bgx_config_node(node, bgx);
2595 cvmx_dump_bgx_status_node(node, bgx);
2599 #ifdef CVMX_DUMP_PKI
2600 cvmx_pki_config_dump(node);
2601 cvmx_pki_stats_dump(node);
2603 #ifdef CVMX_DUMP_PKO
2604 cvmx_helper_pko3_config_dump(node);
2605 cvmx_helper_pko3_stats_dump(node);
2607 #ifdef CVMX_DUMO_SSO
2608 cvmx_sso_config_dump(node);