1 /* SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020 Marvell International Ltd.
5 * https://spdx.org/licenses
13 * Configuration and status register (CSR) address and type definitions for
16 * This file is auto generated. Do not edit.
21 * Enumeration cgx_bar_e
23 * CGX Base Address Register Enumeration Enumerates the base address
26 #define CGX_BAR_E_CGXX_PF_BAR0(a) (0x87e0e0000000ll + 0x1000000ll * (a))
27 #define CGX_BAR_E_CGXX_PF_BAR0_SIZE 0x100000ull
28 #define CGX_BAR_E_CGXX_PF_BAR4(a) (0x87e0e0400000ll + 0x1000000ll * (a))
29 #define CGX_BAR_E_CGXX_PF_BAR4_SIZE 0x100000ull
32 * Enumeration cgx_int_vec_e
34 * CGX MSI-X Vector Enumeration Enumeration the MSI-X interrupt vectors.
36 #define CGX_INT_VEC_E_CMRX_INT(a) (0 + 9 * (a))
37 #define CGX_INT_VEC_E_CMRX_SW(a) (0x26 + (a))
38 #define CGX_INT_VEC_E_CMR_MEM_INT (0x24)
39 #define CGX_INT_VEC_E_GMPX_GMI_RX_INT(a) (5 + 9 * (a))
40 #define CGX_INT_VEC_E_GMPX_GMI_TX_INT(a) (6 + 9 * (a))
41 #define CGX_INT_VEC_E_GMPX_GMI_WOL_INT(a) (7 + 9 * (a))
42 #define CGX_INT_VEC_E_GMPX_PCS_INT(a) (4 + 9 * (a))
43 #define CGX_INT_VEC_E_SMUX_RX_INT(a) (2 + 9 * (a))
44 #define CGX_INT_VEC_E_SMUX_RX_WOL_INT(a) (8 + 9 * (a))
45 #define CGX_INT_VEC_E_SMUX_TX_INT(a) (3 + 9 * (a))
46 #define CGX_INT_VEC_E_SPUX_INT(a) (1 + 9 * (a))
47 #define CGX_INT_VEC_E_SW (0x25)
50 * Enumeration cgx_lmac_types_e
52 * CGX LMAC Type Enumeration Enumerates the LMAC Types that CGX supports.
54 #define CGX_LMAC_TYPES_E_FIFTYG_R (8)
55 #define CGX_LMAC_TYPES_E_FORTYG_R (4)
56 #define CGX_LMAC_TYPES_E_HUNDREDG_R (9)
57 #define CGX_LMAC_TYPES_E_QSGMII (6)
58 #define CGX_LMAC_TYPES_E_RGMII (5)
59 #define CGX_LMAC_TYPES_E_RXAUI (2)
60 #define CGX_LMAC_TYPES_E_SGMII (0)
61 #define CGX_LMAC_TYPES_E_TENG_R (3)
62 #define CGX_LMAC_TYPES_E_TWENTYFIVEG_R (7)
63 #define CGX_LMAC_TYPES_E_USXGMII (0xa)
64 #define CGX_LMAC_TYPES_E_XAUI (1)
67 * Enumeration cgx_opcode_e
69 * INTERNAL: CGX Error Opcode Enumeration Enumerates the error opcodes
70 * created by CGX and presented to NCSI/NIX.
72 #define CGX_OPCODE_E_RE_FCS (7)
73 #define CGX_OPCODE_E_RE_FCS_RCV (8)
74 #define CGX_OPCODE_E_RE_JABBER (2)
75 #define CGX_OPCODE_E_RE_NONE (0)
76 #define CGX_OPCODE_E_RE_PARTIAL (1)
77 #define CGX_OPCODE_E_RE_RX_CTL (0xb)
78 #define CGX_OPCODE_E_RE_SKIP (0xc)
79 #define CGX_OPCODE_E_RE_TERMINATE (9)
82 * Enumeration cgx_spu_br_train_cst_e
84 * INTERNAL: CGX Training Coefficient Status Enumeration 2-bit status
85 * for each coefficient as defined in IEEE 802.3, Table 72-5.
87 #define CGX_SPU_BR_TRAIN_CST_E_MAXIMUM (3)
88 #define CGX_SPU_BR_TRAIN_CST_E_MINIMUM (2)
89 #define CGX_SPU_BR_TRAIN_CST_E_NOT_UPDATED (0)
90 #define CGX_SPU_BR_TRAIN_CST_E_UPDATED (1)
93 * Enumeration cgx_spu_br_train_cup_e
95 * INTERNAL:CGX Training Coefficient Enumeration 2-bit command for each
96 * coefficient as defined in IEEE 802.3, Table 72-4.
98 #define CGX_SPU_BR_TRAIN_CUP_E_DECREMENT (1)
99 #define CGX_SPU_BR_TRAIN_CUP_E_HOLD (0)
100 #define CGX_SPU_BR_TRAIN_CUP_E_INCREMENT (2)
101 #define CGX_SPU_BR_TRAIN_CUP_E_RSV_CMD (3)
104 * Enumeration cgx_usxgmii_rate_e
106 * CGX USXGMII Rate Enumeration Enumerates the USXGMII sub-port type
107 * rate, CGX()_SPU()_CONTROL1[USXGMII_RATE]. Selecting a rate higher
108 * than the maximum allowed for a given port sub-type (specified by
109 * CGX()_SPU()_CONTROL1[USXGMII_TYPE]), e.g., selecting ::RATE_2HG (2.5
110 * Gbps) for CGX_USXGMII_TYPE_E::SXGMII_2G, will cause unpredictable
111 * behavior. USXGMII hardware-based autonegotiation may change this
114 #define CGX_USXGMII_RATE_E_RATE_100M (1)
115 #define CGX_USXGMII_RATE_E_RATE_10G (5)
116 #define CGX_USXGMII_RATE_E_RATE_10M (0)
117 #define CGX_USXGMII_RATE_E_RATE_1G (2)
118 #define CGX_USXGMII_RATE_E_RATE_20G (6)
119 #define CGX_USXGMII_RATE_E_RATE_2HG (3)
120 #define CGX_USXGMII_RATE_E_RATE_5G (4)
121 #define CGX_USXGMII_RATE_E_RSV_RATE (7)
124 * Enumeration cgx_usxgmii_type_e
126 * CGX USXGMII Port Sub-Type Enumeration Enumerates the USXGMII sub-port
127 * type, CGX()_SPU()_CONTROL1[USXGMII_TYPE]. The description indicates
128 * the maximum rate and the maximum number of ports (LMACs) for each sub-
129 * type. The minimum rate for any port is 10M. The rate selection for
130 * each LMAC is made using CGX()_SPU()_CONTROL1[USXGMII_RATE] and the
131 * number of active ports/LMACs is implicitly determined by the value
132 * given to CGX()_CMR()_CONFIG[ENABLE] for each LMAC. Selecting a rate
133 * higher than the maximum allowed for a given port sub-type or enabling
134 * more LMACs than the maximum allowed for a given port sub-type will
135 * cause unpredictable behavior.
137 #define CGX_USXGMII_TYPE_E_DXGMII_10G (3)
138 #define CGX_USXGMII_TYPE_E_DXGMII_20G (5)
139 #define CGX_USXGMII_TYPE_E_DXGMII_5G (4)
140 #define CGX_USXGMII_TYPE_E_QXGMII_10G (7)
141 #define CGX_USXGMII_TYPE_E_QXGMII_20G (6)
142 #define CGX_USXGMII_TYPE_E_SXGMII_10G (0)
143 #define CGX_USXGMII_TYPE_E_SXGMII_2G (2)
144 #define CGX_USXGMII_TYPE_E_SXGMII_5G (1)
147 * Structure cgx_spu_br_lane_train_status_s
149 * INTERNAL:CGX Lane Training Status Structure This is the group of lane
150 * status bits for a single lane in the BASE-R PMD status register (MDIO
151 * address 1.151) as defined in IEEE 802.3ba-2010, Table 45-55.
153 union cgx_spu_br_lane_train_status_s {
155 struct cgx_spu_br_lane_train_status_s_s {
159 u32 training_failure : 1;
160 u32 reserved_4_31 : 28;
162 /* struct cgx_spu_br_lane_train_status_s_s cn; */
166 * Structure cgx_spu_br_train_cup_s
168 * INTERNAL:CGX Lane Training Coefficient Structure This is the
169 * coefficient update field of the BASE-R link training packet as defined
170 * in IEEE 802.3, Table 72-4.
172 union cgx_spu_br_train_cup_s {
174 struct cgx_spu_br_train_cup_s_s {
178 u32 reserved_6_11 : 6;
181 u32 reserved_14_31 : 18;
183 struct cgx_spu_br_train_cup_s_cn {
187 u32 reserved_6_11 : 6;
190 u32 reserved_14_15 : 2;
191 u32 reserved_16_31 : 16;
196 * Structure cgx_spu_br_train_rep_s
198 * INTERNAL:CGX Training Report Structure This is the status report
199 * field of the BASE-R link training packet as defined in IEEE 802.3,
202 union cgx_spu_br_train_rep_s {
204 struct cgx_spu_br_train_rep_s_s {
208 u32 reserved_6_14 : 9;
210 u32 reserved_16_31 : 16;
212 /* struct cgx_spu_br_train_rep_s_s cn; */
216 * Structure cgx_spu_sds_cu_s
218 * INTERNAL: CGX Training Coeffiecient Structure This structure is
219 * similar to CGX_SPU_BR_TRAIN_CUP_S format, but with reserved fields
220 * removed and [RCVR_READY] field added.
222 union cgx_spu_sds_cu_s {
224 struct cgx_spu_sds_cu_s_s {
231 u32 reserved_9_31 : 23;
233 /* struct cgx_spu_sds_cu_s_s cn; */
237 * Structure cgx_spu_sds_skew_status_s
239 * CGX Skew Status Structure Provides receive skew information detected
240 * for a physical SerDes lane when it is assigned to a multilane
241 * LMAC/LPCS. Contents are valid when RX deskew is done for the
242 * associated LMAC/LPCS.
244 union cgx_spu_sds_skew_status_s {
246 struct cgx_spu_sds_skew_status_s_s {
247 u32 am_timestamp : 12;
248 u32 reserved_12_15 : 4;
250 u32 reserved_21_22 : 2;
252 u32 reserved_30_31 : 2;
254 /* struct cgx_spu_sds_skew_status_s_s cn; */
258 * Structure cgx_spu_sds_sr_s
260 * INTERNAL: CGX Lane Training Coefficient Structure Similar to
261 * CGX_SPU_BR_TRAIN_REP_S format, but with reserved and RX ready fields
264 union cgx_spu_sds_sr_s {
266 struct cgx_spu_sds_sr_s_s {
270 u32 reserved_6_31 : 26;
272 /* struct cgx_spu_sds_sr_s_s cn; */
276 * Register (RSL) cgx#_active_pc
278 * CGX ACTIVE PC Register This register counts the conditional clocks for
281 union cgxx_active_pc {
283 struct cgxx_active_pc_s {
286 /* struct cgxx_active_pc_s cn; */
289 static inline u64 CGXX_ACTIVE_PC(void)
290 __attribute__ ((pure, always_inline));
291 static inline u64 CGXX_ACTIVE_PC(void)
297 * Register (RSL) cgx#_cmr#_activity
299 * CGX CMR Activity Registers
301 union cgxx_cmrx_activity {
303 struct cgxx_cmrx_activity_s {
310 u64 reserved_6_63 : 58;
312 /* struct cgxx_cmrx_activity_s cn; */
315 static inline u64 CGXX_CMRX_ACTIVITY(u64 a)
316 __attribute__ ((pure, always_inline));
317 static inline u64 CGXX_CMRX_ACTIVITY(u64 a)
319 return 0x5f8 + 0x40000 * a;
323 * Register (RSL) cgx#_cmr#_config
325 * CGX CMR Configuration Registers Logical MAC/PCS configuration
326 * registers; one per LMAC. The maximum number of LMACs (and maximum LMAC
327 * ID) that can be enabled by these registers is limited by
328 * CGX()_CMR_RX_LMACS[LMACS] and CGX()_CMR_TX_LMACS[LMACS]. Internal:
329 * \<pre\> Example configurations: ------------------------------------
330 * --------------------------------------- Configuration
331 * LMACS Register [ENABLE] [LMAC_TYPE] ----------------
332 * -----------------------------------------------------------
333 * 1x50G+1x25G+1xSGMII 4 CGXn_CMR0_CONFIG 1 8
334 * CGXn_CMR1_CONFIG 0 --
335 * CGXn_CMR2_CONFIG 1 7
336 * CGXn_CMR3_CONFIG 1 0 ---------------------------------
337 * ------------------------------------------ USXGMII
338 * 1-4 CGXn_CMR0_CONFIG 1 a
339 * CGXn_CMR1_CONFIG 1 a
340 * CGXn_CMR2_CONFIG 1 a
341 * CGXn_CMR3_CONFIG 1 a ---------------------------------
342 * ------------------------------------------ 1x100GBASE-R4 1
343 * CGXn_CMR0_CONFIG 1 9
344 * CGXn_CMR1_CONFIG 0 --
345 * CGXn_CMR2_CONFIG 0 --
346 * CGXn_CMR3_CONFIG 0 -- --------------------------------
347 * ------------------------------------------- 2x50GBASE-R2
348 * 2 CGXn_CMR0_CONFIG 1 8
349 * CGXn_CMR1_CONFIG 1 8
350 * CGXn_CMR2_CONFIG 0 --
351 * CGXn_CMR3_CONFIG 0 -- --------------------------------
352 * ------------------------------------------- 4x25GBASE-R
353 * 4 CGXn_CMR0_CONFIG 1 7
354 * CGXn_CMR1_CONFIG 1 7
355 * CGXn_CMR2_CONFIG 1 7
356 * CGXn_CMR3_CONFIG 1 7 ---------------------------------
357 * ------------------------------------------ QSGMII 4
358 * CGXn_CMR0_CONFIG 1 6
359 * CGXn_CMR1_CONFIG 1 6
360 * CGXn_CMR2_CONFIG 1 6
361 * CGXn_CMR3_CONFIG 1 6 ---------------------------------
362 * ------------------------------------------ 1x40GBASE-R4 1
363 * CGXn_CMR0_CONFIG 1 4
364 * CGXn_CMR1_CONFIG 0 --
365 * CGXn_CMR2_CONFIG 0 --
366 * CGXn_CMR3_CONFIG 0 -- --------------------------------
367 * ------------------------------------------- 4x10GBASE-R
368 * 4 CGXn_CMR0_CONFIG 1 3
369 * CGXn_CMR1_CONFIG 1 3
370 * CGXn_CMR2_CONFIG 1 3
371 * CGXn_CMR3_CONFIG 1 3 ---------------------------------
372 * ------------------------------------------ 2xRXAUI 2
373 * CGXn_CMR0_CONFIG 1 2
374 * CGXn_CMR1_CONFIG 1 2
375 * CGXn_CMR2_CONFIG 0 --
376 * CGXn_CMR3_CONFIG 0 -- --------------------------------
377 * ------------------------------------------- 1x10GBASE-X/XAUI/DXAUI
378 * 1 CGXn_CMR0_CONFIG 1 1
379 * CGXn_CMR1_CONFIG 0 --
380 * CGXn_CMR2_CONFIG 0 --
381 * CGXn_CMR3_CONFIG 0 -- --------------------------------
382 * ------------------------------------------- 4xSGMII/1000BASE-X
383 * 4 CGXn_CMR0_CONFIG 1 0
384 * CGXn_CMR1_CONFIG 1 0
385 * CGXn_CMR2_CONFIG 1 0
386 * CGXn_CMR3_CONFIG 1 0 ---------------------------------
387 * ------------------------------------------ \</pre\>
389 union cgxx_cmrx_config {
391 struct cgxx_cmrx_config_s {
393 u64 reserved_8_39 : 32;
396 u64 int_beat_gen : 1;
397 u64 data_pkt_tx_en : 1;
398 u64 data_pkt_rx_en : 1;
402 u64 reserved_62_63 : 2;
404 /* struct cgxx_cmrx_config_s cn; */
407 static inline u64 CGXX_CMRX_CONFIG(u64 a)
408 __attribute__ ((pure, always_inline));
409 static inline u64 CGXX_CMRX_CONFIG(u64 a)
411 return 0 + 0x40000 * a;
415 * Register (RSL) cgx#_cmr#_int
417 * CGX CMR Interrupt Register
419 union cgxx_cmrx_int {
421 struct cgxx_cmrx_int_s {
429 u64 reserved_7_63 : 57;
431 /* struct cgxx_cmrx_int_s cn; */
434 static inline u64 CGXX_CMRX_INT(u64 a)
435 __attribute__ ((pure, always_inline));
436 static inline u64 CGXX_CMRX_INT(u64 a)
438 return 0x40 + 0x40000 * a;
442 * Register (RSL) cgx#_cmr#_int_ena_w1c
444 * CGX CMR Interrupt Enable Clear Register This register clears interrupt
447 union cgxx_cmrx_int_ena_w1c {
449 struct cgxx_cmrx_int_ena_w1c_s {
457 u64 reserved_7_63 : 57;
459 /* struct cgxx_cmrx_int_ena_w1c_s cn; */
462 static inline u64 CGXX_CMRX_INT_ENA_W1C(u64 a)
463 __attribute__ ((pure, always_inline));
464 static inline u64 CGXX_CMRX_INT_ENA_W1C(u64 a)
466 return 0x50 + 0x40000 * a;
470 * Register (RSL) cgx#_cmr#_int_ena_w1s
472 * CGX CMR Interrupt Enable Set Register This register sets interrupt
475 union cgxx_cmrx_int_ena_w1s {
477 struct cgxx_cmrx_int_ena_w1s_s {
485 u64 reserved_7_63 : 57;
487 /* struct cgxx_cmrx_int_ena_w1s_s cn; */
490 static inline u64 CGXX_CMRX_INT_ENA_W1S(u64 a)
491 __attribute__ ((pure, always_inline));
492 static inline u64 CGXX_CMRX_INT_ENA_W1S(u64 a)
494 return 0x58 + 0x40000 * a;
498 * Register (RSL) cgx#_cmr#_int_w1s
500 * CGX CMR Interrupt Set Register This register sets interrupt bits.
502 union cgxx_cmrx_int_w1s {
504 struct cgxx_cmrx_int_w1s_s {
512 u64 reserved_7_63 : 57;
514 /* struct cgxx_cmrx_int_w1s_s cn; */
517 static inline u64 CGXX_CMRX_INT_W1S(u64 a)
518 __attribute__ ((pure, always_inline));
519 static inline u64 CGXX_CMRX_INT_W1S(u64 a)
521 return 0x48 + 0x40000 * a;
525 * Register (RSL) cgx#_cmr#_led_timing
527 * CGX MAC LED Activity Timing Registers
529 union cgxx_cmrx_led_timing {
531 struct cgxx_cmrx_led_timing_s {
533 u64 reserved_8_63 : 56;
535 /* struct cgxx_cmrx_led_timing_s cn; */
538 static inline u64 CGXX_CMRX_LED_TIMING(u64 a)
539 __attribute__ ((pure, always_inline));
540 static inline u64 CGXX_CMRX_LED_TIMING(u64 a)
542 return 0x5f0 + 0x40000 * a;
546 * Register (RSL) cgx#_cmr#_prt_cbfc_ctl
548 * CGX CMR LMAC PFC Control Registers See CGX()_CMR()_RX_LOGL_XOFF[XOFF].
550 union cgxx_cmrx_prt_cbfc_ctl {
552 struct cgxx_cmrx_prt_cbfc_ctl_s {
553 u64 reserved_0_15 : 16;
555 u64 reserved_32_63 : 32;
557 /* struct cgxx_cmrx_prt_cbfc_ctl_s cn; */
560 static inline u64 CGXX_CMRX_PRT_CBFC_CTL(u64 a)
561 __attribute__ ((pure, always_inline));
562 static inline u64 CGXX_CMRX_PRT_CBFC_CTL(u64 a)
564 return 0x608 + 0x40000 * a;
568 * Register (RSL) cgx#_cmr#_rx_bp_drop
570 * CGX Receive Backpressure Drop Register
572 union cgxx_cmrx_rx_bp_drop {
574 struct cgxx_cmrx_rx_bp_drop_s {
576 u64 reserved_7_63 : 57;
578 /* struct cgxx_cmrx_rx_bp_drop_s cn; */
581 static inline u64 CGXX_CMRX_RX_BP_DROP(u64 a)
582 __attribute__ ((pure, always_inline));
583 static inline u64 CGXX_CMRX_RX_BP_DROP(u64 a)
585 return 0xd8 + 0x40000 * a;
589 * Register (RSL) cgx#_cmr#_rx_bp_off
591 * CGX Receive Backpressure Off Register
593 union cgxx_cmrx_rx_bp_off {
595 struct cgxx_cmrx_rx_bp_off_s {
597 u64 reserved_7_63 : 57;
599 /* struct cgxx_cmrx_rx_bp_off_s cn; */
602 static inline u64 CGXX_CMRX_RX_BP_OFF(u64 a)
603 __attribute__ ((pure, always_inline));
604 static inline u64 CGXX_CMRX_RX_BP_OFF(u64 a)
606 return 0xe8 + 0x40000 * a;
610 * Register (RSL) cgx#_cmr#_rx_bp_on
612 * CGX Receive Backpressure On Register
614 union cgxx_cmrx_rx_bp_on {
616 struct cgxx_cmrx_rx_bp_on_s {
618 u64 reserved_13_63 : 51;
620 /* struct cgxx_cmrx_rx_bp_on_s cn; */
623 static inline u64 CGXX_CMRX_RX_BP_ON(u64 a)
624 __attribute__ ((pure, always_inline));
625 static inline u64 CGXX_CMRX_RX_BP_ON(u64 a)
627 return 0xe0 + 0x40000 * a;
631 * Register (RSL) cgx#_cmr#_rx_bp_status
633 * CGX CMR Receive Backpressure Status Registers
635 union cgxx_cmrx_rx_bp_status {
637 struct cgxx_cmrx_rx_bp_status_s {
639 u64 reserved_1_63 : 63;
641 /* struct cgxx_cmrx_rx_bp_status_s cn; */
644 static inline u64 CGXX_CMRX_RX_BP_STATUS(u64 a)
645 __attribute__ ((pure, always_inline));
646 static inline u64 CGXX_CMRX_RX_BP_STATUS(u64 a)
648 return 0xf0 + 0x40000 * a;
652 * Register (RSL) cgx#_cmr#_rx_dmac_ctl0
654 * CGX CMR Receive DMAC Address-Control0 Register DMAC CAM control
655 * register for use by X2P/NIX bound traffic. Received packets are only
656 * passed to X2P/NIX when the DMAC0 filter result is ACCEPT and STEERING0
657 * filter result is PASS. See also CGX()_CMR_RX_DMAC()_CAM0 and
658 * CGX()_CMR_RX_STEERING0(). Internal: "* ALGORITHM Here is some pseudo
659 * code that represents the address filter behavior. \<pre\>
660 * dmac_addr_filter(uint8 prt, uint48 dmac) { for (lmac=0, lmac\<4,
661 * lmac++) { if (is_bcst(dmac)) //
662 * broadcast accept return (CGX()_CMR(lmac)_RX_DMAC_CTL0[BCST_ACCEPT]
663 * ? ACCEPT : REJECT); if (is_mcst(dmac) &&
664 * CGX()_CMR(lmac)_RX_DMAC_CTL0[MCST_MODE] == 0) // multicast reject
665 * return REJECT; if (is_mcst(dmac) &&
666 * CGX()_CMR(lmac)_RX_DMAC_CTL0[MCST_MODE] == 1) // multicast accept
667 * return ACCEPT; else // DMAC CAM filter cam_hit = 0; for
668 * (i=0; i\<32; i++) { cam = CGX()_CMR_RX_DMAC(i)_CAM0; if
669 * (cam[EN] && cam[ID] == lmac && cam[ADR] == dmac) { cam_hit = 1;
670 * break; } } if (cam_hit) { return
671 * (CGX()_CMR(lmac)_RX_DMAC_CTL0[CAM_ACCEPT] ? ACCEPT : REJECT); else
672 * return (CGX()_CMR(lmac)_RX_DMAC_CTL0[CAM_ACCEPT] ? REJECT : ACCEPT);
675 union cgxx_cmrx_rx_dmac_ctl0 {
677 struct cgxx_cmrx_rx_dmac_ctl0_s {
681 u64 reserved_4_63 : 60;
683 /* struct cgxx_cmrx_rx_dmac_ctl0_s cn; */
686 static inline u64 CGXX_CMRX_RX_DMAC_CTL0(u64 a)
687 __attribute__ ((pure, always_inline));
688 static inline u64 CGXX_CMRX_RX_DMAC_CTL0(u64 a)
690 return 0x1f8 + 0x40000 * a;
694 * Register (RSL) cgx#_cmr#_rx_dmac_ctl1
696 * CGX CMR Receive DMAC Address-Control1 Register DMAC CAM control
697 * register for use by NCSI bound traffic. Received packets are only
698 * passed to NCSI when the DMAC1 filter result is ACCEPT and STEERING1
699 * filter result is PASS. See also CGX()_CMR_RX_DMAC()_CAM1 and
700 * CGX()_CMR_RX_STEERING1(). For use with the LMAC associated with NCSI;
701 * see CGX()_CMR_GLOBAL_CONFIG[NCSI_LMAC_ID]. Internal: ALGORITHM: See
702 * CGX()_CMR()_RX_DMAC_CTL0.
704 union cgxx_cmrx_rx_dmac_ctl1 {
706 struct cgxx_cmrx_rx_dmac_ctl1_s {
710 u64 reserved_4_63 : 60;
712 /* struct cgxx_cmrx_rx_dmac_ctl1_s cn; */
715 static inline u64 CGXX_CMRX_RX_DMAC_CTL1(u64 a)
716 __attribute__ ((pure, always_inline));
717 static inline u64 CGXX_CMRX_RX_DMAC_CTL1(u64 a)
719 return 0x3f8 + 0x40000 * a;
723 * Register (RSL) cgx#_cmr#_rx_fifo_len
725 * CGX CMR Receive Fifo Length Registers
727 union cgxx_cmrx_rx_fifo_len {
729 struct cgxx_cmrx_rx_fifo_len_s {
734 u64 reserved_30_63 : 34;
736 /* struct cgxx_cmrx_rx_fifo_len_s cn; */
739 static inline u64 CGXX_CMRX_RX_FIFO_LEN(u64 a)
740 __attribute__ ((pure, always_inline));
741 static inline u64 CGXX_CMRX_RX_FIFO_LEN(u64 a)
743 return 0x108 + 0x40000 * a;
747 * Register (RSL) cgx#_cmr#_rx_id_map
749 * CGX CMR Receive ID Map Register These registers set the RX LMAC ID
750 * mapping for X2P/NIX.
752 union cgxx_cmrx_rx_id_map {
754 struct cgxx_cmrx_rx_id_map_s {
758 u64 reserved_15_63 : 49;
760 /* struct cgxx_cmrx_rx_id_map_s cn; */
763 static inline u64 CGXX_CMRX_RX_ID_MAP(u64 a)
764 __attribute__ ((pure, always_inline));
765 static inline u64 CGXX_CMRX_RX_ID_MAP(u64 a)
767 return 0x60 + 0x40000 * a;
771 * Register (RSL) cgx#_cmr#_rx_logl_xoff
773 * CGX CMR Receive Logical XOFF Registers
775 union cgxx_cmrx_rx_logl_xoff {
777 struct cgxx_cmrx_rx_logl_xoff_s {
779 u64 reserved_16_63 : 48;
781 /* struct cgxx_cmrx_rx_logl_xoff_s cn; */
784 static inline u64 CGXX_CMRX_RX_LOGL_XOFF(u64 a)
785 __attribute__ ((pure, always_inline));
786 static inline u64 CGXX_CMRX_RX_LOGL_XOFF(u64 a)
788 return 0xf8 + 0x40000 * a;
792 * Register (RSL) cgx#_cmr#_rx_logl_xon
794 * CGX CMR Receive Logical XON Registers
796 union cgxx_cmrx_rx_logl_xon {
798 struct cgxx_cmrx_rx_logl_xon_s {
800 u64 reserved_16_63 : 48;
802 /* struct cgxx_cmrx_rx_logl_xon_s cn; */
805 static inline u64 CGXX_CMRX_RX_LOGL_XON(u64 a)
806 __attribute__ ((pure, always_inline));
807 static inline u64 CGXX_CMRX_RX_LOGL_XON(u64 a)
809 return 0x100 + 0x40000 * a;
813 * Register (RSL) cgx#_cmr#_rx_merge_stat0
815 * CGX RX Preemption Status Register 0
817 union cgxx_cmrx_rx_merge_stat0 {
819 struct cgxx_cmrx_rx_merge_stat0_s {
821 u64 reserved_48_63 : 16;
823 /* struct cgxx_cmrx_rx_merge_stat0_s cn; */
826 static inline u64 CGXX_CMRX_RX_MERGE_STAT0(u64 a)
827 __attribute__ ((pure, always_inline));
828 static inline u64 CGXX_CMRX_RX_MERGE_STAT0(u64 a)
830 return 0x138 + 0x40000 * a;
834 * Register (RSL) cgx#_cmr#_rx_merge_stat1
836 * CGX RX Preemption Status Register 1
838 union cgxx_cmrx_rx_merge_stat1 {
840 struct cgxx_cmrx_rx_merge_stat1_s {
842 u64 reserved_48_63 : 16;
844 /* struct cgxx_cmrx_rx_merge_stat1_s cn; */
847 static inline u64 CGXX_CMRX_RX_MERGE_STAT1(u64 a)
848 __attribute__ ((pure, always_inline));
849 static inline u64 CGXX_CMRX_RX_MERGE_STAT1(u64 a)
851 return 0x140 + 0x40000 * a;
855 * Register (RSL) cgx#_cmr#_rx_merge_stat2
857 * CGX RX Preemption Status Register 2
859 union cgxx_cmrx_rx_merge_stat2 {
861 struct cgxx_cmrx_rx_merge_stat2_s {
863 u64 reserved_48_63 : 16;
865 /* struct cgxx_cmrx_rx_merge_stat2_s cn; */
868 static inline u64 CGXX_CMRX_RX_MERGE_STAT2(u64 a)
869 __attribute__ ((pure, always_inline));
870 static inline u64 CGXX_CMRX_RX_MERGE_STAT2(u64 a)
872 return 0x148 + 0x40000 * a;
876 * Register (RSL) cgx#_cmr#_rx_merge_stat3
878 * CGX RX Preemption Status Register 3
880 union cgxx_cmrx_rx_merge_stat3 {
882 struct cgxx_cmrx_rx_merge_stat3_s {
884 u64 reserved_48_63 : 16;
886 /* struct cgxx_cmrx_rx_merge_stat3_s cn; */
889 static inline u64 CGXX_CMRX_RX_MERGE_STAT3(u64 a)
890 __attribute__ ((pure, always_inline));
891 static inline u64 CGXX_CMRX_RX_MERGE_STAT3(u64 a)
893 return 0x150 + 0x40000 * a;
897 * Register (RSL) cgx#_cmr#_rx_merge_stat4
899 * CGX RX Preemption Status Register 4
901 union cgxx_cmrx_rx_merge_stat4 {
903 struct cgxx_cmrx_rx_merge_stat4_s {
905 u64 reserved_48_63 : 16;
907 /* struct cgxx_cmrx_rx_merge_stat4_s cn; */
910 static inline u64 CGXX_CMRX_RX_MERGE_STAT4(u64 a)
911 __attribute__ ((pure, always_inline));
912 static inline u64 CGXX_CMRX_RX_MERGE_STAT4(u64 a)
914 return 0x158 + 0x40000 * a;
918 * Register (RSL) cgx#_cmr#_rx_pause_drop_time
920 * CGX CMR Receive Pause Drop-Time Register
922 union cgxx_cmrx_rx_pause_drop_time {
924 struct cgxx_cmrx_rx_pause_drop_time_s {
926 u64 pause_time_e : 16;
927 u64 reserved_32_63 : 32;
929 /* struct cgxx_cmrx_rx_pause_drop_time_s cn; */
932 static inline u64 CGXX_CMRX_RX_PAUSE_DROP_TIME(u64 a)
933 __attribute__ ((pure, always_inline));
934 static inline u64 CGXX_CMRX_RX_PAUSE_DROP_TIME(u64 a)
936 return 0x68 + 0x40000 * a;
940 * Register (RSL) cgx#_cmr#_rx_stat0
942 * CGX Receive Status Register 0 These registers provide a count of
943 * received packets that meet the following conditions: * are not
944 * recognized as ERROR packets(any OPCODE). * are not recognized as PAUSE
945 * packets. * are not dropped due FIFO full status. * are not dropped due
946 * DMAC0 or STEERING0 filtering. Internal: "This pseudo code represents
947 * the RX STAT0 through STAT8 accounting: \<pre\> If (errored) incr
948 * RX_STAT8 else if (ctrl packet, i.e. Pause/PFC) incr RX_STAT2,3 else
949 * if (fifo full drop) incr RX_STAT6,7 else if (DMAC0/VLAN0 filter
950 * drop) incr RX_STAT4,5 if not a filter+decision else incr
951 * RX_STAT0,1 end \</pre\>"
953 union cgxx_cmrx_rx_stat0 {
955 struct cgxx_cmrx_rx_stat0_s {
957 u64 reserved_48_63 : 16;
959 /* struct cgxx_cmrx_rx_stat0_s cn; */
962 static inline u64 CGXX_CMRX_RX_STAT0(u64 a)
963 __attribute__ ((pure, always_inline));
964 static inline u64 CGXX_CMRX_RX_STAT0(u64 a)
966 return 0x70 + 0x40000 * a;
970 * Register (RSL) cgx#_cmr#_rx_stat1
972 * CGX Receive Status Register 1 These registers provide a count of
973 * octets of received packets.
975 union cgxx_cmrx_rx_stat1 {
977 struct cgxx_cmrx_rx_stat1_s {
979 u64 reserved_48_63 : 16;
981 /* struct cgxx_cmrx_rx_stat1_s cn; */
984 static inline u64 CGXX_CMRX_RX_STAT1(u64 a)
985 __attribute__ ((pure, always_inline));
986 static inline u64 CGXX_CMRX_RX_STAT1(u64 a)
988 return 0x78 + 0x40000 * a;
992 * Register (RSL) cgx#_cmr#_rx_stat2
994 * CGX Receive Status Register 2 These registers provide a count of
995 * received packets that meet the following conditions: * are not
996 * recognized as ERROR packets(any OPCODE). * are recognized as PAUSE
997 * packets. Pause packets can be optionally dropped or forwarded based
999 * CGX()_SMU()_RX_FRM_CTL[CTL_DRP]/CGX()_GMP_GMI_RX()_FRM_CTL[CTL_DRP].
1000 * This count increments regardless of whether the packet is dropped.
1002 union cgxx_cmrx_rx_stat2 {
1004 struct cgxx_cmrx_rx_stat2_s {
1006 u64 reserved_48_63 : 16;
1008 /* struct cgxx_cmrx_rx_stat2_s cn; */
1011 static inline u64 CGXX_CMRX_RX_STAT2(u64 a)
1012 __attribute__ ((pure, always_inline));
1013 static inline u64 CGXX_CMRX_RX_STAT2(u64 a)
1015 return 0x80 + 0x40000 * a;
1019 * Register (RSL) cgx#_cmr#_rx_stat3
1021 * CGX Receive Status Register 3 These registers provide a count of
1022 * octets of received PAUSE and control packets.
1024 union cgxx_cmrx_rx_stat3 {
1026 struct cgxx_cmrx_rx_stat3_s {
1028 u64 reserved_48_63 : 16;
1030 /* struct cgxx_cmrx_rx_stat3_s cn; */
1033 static inline u64 CGXX_CMRX_RX_STAT3(u64 a)
1034 __attribute__ ((pure, always_inline));
1035 static inline u64 CGXX_CMRX_RX_STAT3(u64 a)
1037 return 0x88 + 0x40000 * a;
1041 * Register (RSL) cgx#_cmr#_rx_stat4
1043 * CGX Receive Status Register 4 These registers provide a count of
1044 * received packets that meet the following conditions: * are not
1045 * recognized as ERROR packets(any OPCODE). * are not recognized as PAUSE
1046 * packets. * are not dropped due FIFO full status. * are dropped due
1047 * DMAC0 or STEERING0 filtering. 16B packets or smaller (20B in case of
1048 * FCS strip) as the result of truncation or other means are not dropped
1049 * by CGX (unless filter and decision is also asserted) and will never
1050 * appear in this count. Should the MAC signal to the CMR that the packet
1051 * be filtered upon decision before the end of packet, then STAT4 and
1052 * STAT5 will not be updated.
1054 union cgxx_cmrx_rx_stat4 {
1056 struct cgxx_cmrx_rx_stat4_s {
1058 u64 reserved_48_63 : 16;
1060 /* struct cgxx_cmrx_rx_stat4_s cn; */
1063 static inline u64 CGXX_CMRX_RX_STAT4(u64 a)
1064 __attribute__ ((pure, always_inline));
1065 static inline u64 CGXX_CMRX_RX_STAT4(u64 a)
1067 return 0x90 + 0x40000 * a;
1071 * Register (RSL) cgx#_cmr#_rx_stat5
1073 * CGX Receive Status Register 5 These registers provide a count of
1074 * octets of filtered DMAC0 or VLAN STEERING0 packets.
1076 union cgxx_cmrx_rx_stat5 {
1078 struct cgxx_cmrx_rx_stat5_s {
1080 u64 reserved_48_63 : 16;
1082 /* struct cgxx_cmrx_rx_stat5_s cn; */
1085 static inline u64 CGXX_CMRX_RX_STAT5(u64 a)
1086 __attribute__ ((pure, always_inline));
1087 static inline u64 CGXX_CMRX_RX_STAT5(u64 a)
1089 return 0x98 + 0x40000 * a;
1093 * Register (RSL) cgx#_cmr#_rx_stat6
1095 * CGX Receive Status Register 6 These registers provide a count of
1096 * received packets that meet the following conditions: * are not
1097 * recognized as ERROR packets(any OPCODE). * are not recognized as PAUSE
1098 * packets. * are dropped due FIFO full status. They do not count any
1099 * packet that is truncated at the point of overflow and sent on to the
1100 * NIX. The truncated packet will be marked with error and increment
1101 * STAT8. These registers count all entire packets dropped by the FIFO
1104 union cgxx_cmrx_rx_stat6 {
1106 struct cgxx_cmrx_rx_stat6_s {
1108 u64 reserved_48_63 : 16;
1110 /* struct cgxx_cmrx_rx_stat6_s cn; */
1113 static inline u64 CGXX_CMRX_RX_STAT6(u64 a)
1114 __attribute__ ((pure, always_inline));
1115 static inline u64 CGXX_CMRX_RX_STAT6(u64 a)
1117 return 0xa0 + 0x40000 * a;
1121 * Register (RSL) cgx#_cmr#_rx_stat7
1123 * CGX Receive Status Register 7 These registers provide a count of
1124 * octets of received packets that were dropped due to a full receive
1127 union cgxx_cmrx_rx_stat7 {
1129 struct cgxx_cmrx_rx_stat7_s {
1131 u64 reserved_48_63 : 16;
1133 /* struct cgxx_cmrx_rx_stat7_s cn; */
1136 static inline u64 CGXX_CMRX_RX_STAT7(u64 a)
1137 __attribute__ ((pure, always_inline));
1138 static inline u64 CGXX_CMRX_RX_STAT7(u64 a)
1140 return 0xa8 + 0x40000 * a;
1144 * Register (RSL) cgx#_cmr#_rx_stat8
1146 * CGX Receive Status Register 8 These registers provide a count of
1147 * received packets that meet the following conditions: * are recognized
1148 * as ERROR packets(any OPCODE).
1150 union cgxx_cmrx_rx_stat8 {
1152 struct cgxx_cmrx_rx_stat8_s {
1154 u64 reserved_48_63 : 16;
1156 /* struct cgxx_cmrx_rx_stat8_s cn; */
1159 static inline u64 CGXX_CMRX_RX_STAT8(u64 a)
1160 __attribute__ ((pure, always_inline));
1161 static inline u64 CGXX_CMRX_RX_STAT8(u64 a)
1163 return 0xb0 + 0x40000 * a;
1167 * Register (RSL) cgx#_cmr#_rx_stat_pri#_xoff
1169 * CGX CMR RX XON to XOFF transition Registers
1171 union cgxx_cmrx_rx_stat_prix_xoff {
1173 struct cgxx_cmrx_rx_stat_prix_xoff_s {
1175 u64 reserved_48_63 : 16;
1177 /* struct cgxx_cmrx_rx_stat_prix_xoff_s cn; */
1180 static inline u64 CGXX_CMRX_RX_STAT_PRIX_XOFF(u64 a, u64 b)
1181 __attribute__ ((pure, always_inline));
1182 static inline u64 CGXX_CMRX_RX_STAT_PRIX_XOFF(u64 a, u64 b)
1184 return 0x7c0 + 0x40000 * a + 8 * b;
1188 * Register (RSL) cgx#_cmr#_scratch#
1190 * CGX CMR Scratch Registers
1192 union cgxx_cmrx_scratchx {
1194 struct cgxx_cmrx_scratchx_s {
1197 /* struct cgxx_cmrx_scratchx_s cn; */
1200 static inline u64 CGXX_CMRX_SCRATCHX(u64 a, u64 b)
1201 __attribute__ ((pure, always_inline));
1202 static inline u64 CGXX_CMRX_SCRATCHX(u64 a, u64 b)
1204 return 0x1050 + 0x40000 * a + 8 * b;
1208 * Register (RSL) cgx#_cmr#_sw_int
1210 * CGX CMR Interrupt Register
1212 union cgxx_cmrx_sw_int {
1214 struct cgxx_cmrx_sw_int_s {
1216 u64 reserved_1_63 : 63;
1218 /* struct cgxx_cmrx_sw_int_s cn; */
1221 static inline u64 CGXX_CMRX_SW_INT(u64 a)
1222 __attribute__ ((pure, always_inline));
1223 static inline u64 CGXX_CMRX_SW_INT(u64 a)
1225 return 0x180 + 0x40000 * a;
1229 * Register (RSL) cgx#_cmr#_sw_int_ena_w1c
1231 * CGX CMR Interrupt Enable Clear Register This register clears interrupt
1234 union cgxx_cmrx_sw_int_ena_w1c {
1236 struct cgxx_cmrx_sw_int_ena_w1c_s {
1238 u64 reserved_1_63 : 63;
1240 /* struct cgxx_cmrx_sw_int_ena_w1c_s cn; */
1243 static inline u64 CGXX_CMRX_SW_INT_ENA_W1C(u64 a)
1244 __attribute__ ((pure, always_inline));
1245 static inline u64 CGXX_CMRX_SW_INT_ENA_W1C(u64 a)
1247 return 0x190 + 0x40000 * a;
1251 * Register (RSL) cgx#_cmr#_sw_int_ena_w1s
1253 * CGX CMR Interrupt Enable Set Register This register sets interrupt
1256 union cgxx_cmrx_sw_int_ena_w1s {
1258 struct cgxx_cmrx_sw_int_ena_w1s_s {
1260 u64 reserved_1_63 : 63;
1262 /* struct cgxx_cmrx_sw_int_ena_w1s_s cn; */
1265 static inline u64 CGXX_CMRX_SW_INT_ENA_W1S(u64 a)
1266 __attribute__ ((pure, always_inline));
1267 static inline u64 CGXX_CMRX_SW_INT_ENA_W1S(u64 a)
1269 return 0x198 + 0x40000 * a;
1273 * Register (RSL) cgx#_cmr#_sw_int_w1s
1275 * CGX CMR Interrupt Set Register This register sets interrupt bits.
1277 union cgxx_cmrx_sw_int_w1s {
1279 struct cgxx_cmrx_sw_int_w1s_s {
1281 u64 reserved_1_63 : 63;
1283 /* struct cgxx_cmrx_sw_int_w1s_s cn; */
1286 static inline u64 CGXX_CMRX_SW_INT_W1S(u64 a)
1287 __attribute__ ((pure, always_inline));
1288 static inline u64 CGXX_CMRX_SW_INT_W1S(u64 a)
1290 return 0x188 + 0x40000 * a;
1294 * Register (RSL) cgx#_cmr#_tx_channel
1296 * CGX CMR Transmit-Channels Registers
1298 union cgxx_cmrx_tx_channel {
1300 struct cgxx_cmrx_tx_channel_s {
1302 u64 reserved_16_63 : 48;
1304 /* struct cgxx_cmrx_tx_channel_s cn; */
1307 static inline u64 CGXX_CMRX_TX_CHANNEL(u64 a)
1308 __attribute__ ((pure, always_inline));
1309 static inline u64 CGXX_CMRX_TX_CHANNEL(u64 a)
1311 return 0x600 + 0x40000 * a;
1315 * Register (RSL) cgx#_cmr#_tx_fifo_len
1317 * CGX CMR Transmit Fifo Length Registers
1319 union cgxx_cmrx_tx_fifo_len {
1321 struct cgxx_cmrx_tx_fifo_len_s {
1324 u64 fifo_e_len : 14;
1325 u64 lmac_e_idle : 1;
1326 u64 reserved_30_63 : 34;
1328 /* struct cgxx_cmrx_tx_fifo_len_s cn; */
1331 static inline u64 CGXX_CMRX_TX_FIFO_LEN(u64 a)
1332 __attribute__ ((pure, always_inline));
1333 static inline u64 CGXX_CMRX_TX_FIFO_LEN(u64 a)
1335 return 0x618 + 0x40000 * a;
1339 * Register (RSL) cgx#_cmr#_tx_hg2_status
1341 * CGX CMR Transmit HiGig2 Status Registers
1343 union cgxx_cmrx_tx_hg2_status {
1345 struct cgxx_cmrx_tx_hg2_status_s {
1348 u64 reserved_32_63 : 32;
1350 /* struct cgxx_cmrx_tx_hg2_status_s cn; */
1353 static inline u64 CGXX_CMRX_TX_HG2_STATUS(u64 a)
1354 __attribute__ ((pure, always_inline));
1355 static inline u64 CGXX_CMRX_TX_HG2_STATUS(u64 a)
1357 return 0x610 + 0x40000 * a;
1361 * Register (RSL) cgx#_cmr#_tx_merge_stat0
1363 * CGX TX Preemption Status Register 0
1365 union cgxx_cmrx_tx_merge_stat0 {
1367 struct cgxx_cmrx_tx_merge_stat0_s {
1369 u64 reserved_48_63 : 16;
1371 /* struct cgxx_cmrx_tx_merge_stat0_s cn; */
1374 static inline u64 CGXX_CMRX_TX_MERGE_STAT0(u64 a)
1375 __attribute__ ((pure, always_inline));
1376 static inline u64 CGXX_CMRX_TX_MERGE_STAT0(u64 a)
1378 return 0x160 + 0x40000 * a;
1382 * Register (RSL) cgx#_cmr#_tx_ovr_bp
1384 * CGX CMR Transmit-Channels Backpressure Override Registers
1386 union cgxx_cmrx_tx_ovr_bp {
1388 struct cgxx_cmrx_tx_ovr_bp_s {
1389 u64 tx_chan_bp : 16;
1390 u64 reserved_16_63 : 48;
1392 /* struct cgxx_cmrx_tx_ovr_bp_s cn; */
1395 static inline u64 CGXX_CMRX_TX_OVR_BP(u64 a)
1396 __attribute__ ((pure, always_inline));
1397 static inline u64 CGXX_CMRX_TX_OVR_BP(u64 a)
1399 return 0x620 + 0x40000 * a;
1403 * Register (RSL) cgx#_cmr#_tx_stat0
1405 * CGX CMR Transmit Statistics Registers 0
1407 union cgxx_cmrx_tx_stat0 {
1409 struct cgxx_cmrx_tx_stat0_s {
1411 u64 reserved_48_63 : 16;
1413 /* struct cgxx_cmrx_tx_stat0_s cn; */
1416 static inline u64 CGXX_CMRX_TX_STAT0(u64 a)
1417 __attribute__ ((pure, always_inline));
1418 static inline u64 CGXX_CMRX_TX_STAT0(u64 a)
1420 return 0x700 + 0x40000 * a;
1424 * Register (RSL) cgx#_cmr#_tx_stat1
1426 * CGX CMR Transmit Statistics Registers 1
1428 union cgxx_cmrx_tx_stat1 {
1430 struct cgxx_cmrx_tx_stat1_s {
1432 u64 reserved_48_63 : 16;
1434 /* struct cgxx_cmrx_tx_stat1_s cn; */
1437 static inline u64 CGXX_CMRX_TX_STAT1(u64 a)
1438 __attribute__ ((pure, always_inline));
1439 static inline u64 CGXX_CMRX_TX_STAT1(u64 a)
1441 return 0x708 + 0x40000 * a;
1445 * Register (RSL) cgx#_cmr#_tx_stat10
1447 * CGX CMR Transmit Statistics Registers 10
1449 union cgxx_cmrx_tx_stat10 {
1451 struct cgxx_cmrx_tx_stat10_s {
1453 u64 reserved_48_63 : 16;
1455 /* struct cgxx_cmrx_tx_stat10_s cn; */
1458 static inline u64 CGXX_CMRX_TX_STAT10(u64 a)
1459 __attribute__ ((pure, always_inline));
1460 static inline u64 CGXX_CMRX_TX_STAT10(u64 a)
1462 return 0x750 + 0x40000 * a;
1466 * Register (RSL) cgx#_cmr#_tx_stat11
1468 * CGX CMR Transmit Statistics Registers 11
1470 union cgxx_cmrx_tx_stat11 {
1472 struct cgxx_cmrx_tx_stat11_s {
1474 u64 reserved_48_63 : 16;
1476 /* struct cgxx_cmrx_tx_stat11_s cn; */
1479 static inline u64 CGXX_CMRX_TX_STAT11(u64 a)
1480 __attribute__ ((pure, always_inline));
1481 static inline u64 CGXX_CMRX_TX_STAT11(u64 a)
1483 return 0x758 + 0x40000 * a;
1487 * Register (RSL) cgx#_cmr#_tx_stat12
1489 * CGX CMR Transmit Statistics Registers 12
1491 union cgxx_cmrx_tx_stat12 {
1493 struct cgxx_cmrx_tx_stat12_s {
1495 u64 reserved_48_63 : 16;
1497 /* struct cgxx_cmrx_tx_stat12_s cn; */
1500 static inline u64 CGXX_CMRX_TX_STAT12(u64 a)
1501 __attribute__ ((pure, always_inline));
1502 static inline u64 CGXX_CMRX_TX_STAT12(u64 a)
1504 return 0x760 + 0x40000 * a;
1508 * Register (RSL) cgx#_cmr#_tx_stat13
1510 * CGX CMR Transmit Statistics Registers 13
1512 union cgxx_cmrx_tx_stat13 {
1514 struct cgxx_cmrx_tx_stat13_s {
1516 u64 reserved_48_63 : 16;
1518 /* struct cgxx_cmrx_tx_stat13_s cn; */
1521 static inline u64 CGXX_CMRX_TX_STAT13(u64 a)
1522 __attribute__ ((pure, always_inline));
1523 static inline u64 CGXX_CMRX_TX_STAT13(u64 a)
1525 return 0x768 + 0x40000 * a;
1529 * Register (RSL) cgx#_cmr#_tx_stat14
1531 * CGX CMR Transmit Statistics Registers 14
1533 union cgxx_cmrx_tx_stat14 {
1535 struct cgxx_cmrx_tx_stat14_s {
1537 u64 reserved_48_63 : 16;
1539 /* struct cgxx_cmrx_tx_stat14_s cn; */
1542 static inline u64 CGXX_CMRX_TX_STAT14(u64 a)
1543 __attribute__ ((pure, always_inline));
1544 static inline u64 CGXX_CMRX_TX_STAT14(u64 a)
1546 return 0x770 + 0x40000 * a;
1550 * Register (RSL) cgx#_cmr#_tx_stat15
1552 * CGX CMR Transmit Statistics Registers 15
1554 union cgxx_cmrx_tx_stat15 {
1556 struct cgxx_cmrx_tx_stat15_s {
1558 u64 reserved_48_63 : 16;
1560 /* struct cgxx_cmrx_tx_stat15_s cn; */
1563 static inline u64 CGXX_CMRX_TX_STAT15(u64 a)
1564 __attribute__ ((pure, always_inline));
1565 static inline u64 CGXX_CMRX_TX_STAT15(u64 a)
1567 return 0x778 + 0x40000 * a;
1571 * Register (RSL) cgx#_cmr#_tx_stat16
1573 * CGX CMR Transmit Statistics Registers 16
1575 union cgxx_cmrx_tx_stat16 {
1577 struct cgxx_cmrx_tx_stat16_s {
1579 u64 reserved_48_63 : 16;
1581 /* struct cgxx_cmrx_tx_stat16_s cn; */
1584 static inline u64 CGXX_CMRX_TX_STAT16(u64 a)
1585 __attribute__ ((pure, always_inline));
1586 static inline u64 CGXX_CMRX_TX_STAT16(u64 a)
1588 return 0x780 + 0x40000 * a;
1592 * Register (RSL) cgx#_cmr#_tx_stat17
1594 * CGX CMR Transmit Statistics Registers 17
1596 union cgxx_cmrx_tx_stat17 {
1598 struct cgxx_cmrx_tx_stat17_s {
1600 u64 reserved_48_63 : 16;
1602 /* struct cgxx_cmrx_tx_stat17_s cn; */
1605 static inline u64 CGXX_CMRX_TX_STAT17(u64 a)
1606 __attribute__ ((pure, always_inline));
1607 static inline u64 CGXX_CMRX_TX_STAT17(u64 a)
1609 return 0x788 + 0x40000 * a;
1613 * Register (RSL) cgx#_cmr#_tx_stat2
1615 * CGX CMR Transmit Statistics Registers 2
1617 union cgxx_cmrx_tx_stat2 {
1619 struct cgxx_cmrx_tx_stat2_s {
1621 u64 reserved_48_63 : 16;
1623 /* struct cgxx_cmrx_tx_stat2_s cn; */
1626 static inline u64 CGXX_CMRX_TX_STAT2(u64 a)
1627 __attribute__ ((pure, always_inline));
1628 static inline u64 CGXX_CMRX_TX_STAT2(u64 a)
1630 return 0x710 + 0x40000 * a;
1634 * Register (RSL) cgx#_cmr#_tx_stat3
1636 * CGX CMR Transmit Statistics Registers 3
1638 union cgxx_cmrx_tx_stat3 {
1640 struct cgxx_cmrx_tx_stat3_s {
1642 u64 reserved_48_63 : 16;
1644 /* struct cgxx_cmrx_tx_stat3_s cn; */
1647 static inline u64 CGXX_CMRX_TX_STAT3(u64 a)
1648 __attribute__ ((pure, always_inline));
1649 static inline u64 CGXX_CMRX_TX_STAT3(u64 a)
1651 return 0x718 + 0x40000 * a;
1655 * Register (RSL) cgx#_cmr#_tx_stat4
1657 * CGX CMR Transmit Statistics Registers 4
1659 union cgxx_cmrx_tx_stat4 {
1661 struct cgxx_cmrx_tx_stat4_s {
1663 u64 reserved_48_63 : 16;
1665 /* struct cgxx_cmrx_tx_stat4_s cn; */
1668 static inline u64 CGXX_CMRX_TX_STAT4(u64 a)
1669 __attribute__ ((pure, always_inline));
1670 static inline u64 CGXX_CMRX_TX_STAT4(u64 a)
1672 return 0x720 + 0x40000 * a;
1676 * Register (RSL) cgx#_cmr#_tx_stat5
1678 * CGX CMR Transmit Statistics Registers 5
1680 union cgxx_cmrx_tx_stat5 {
1682 struct cgxx_cmrx_tx_stat5_s {
1684 u64 reserved_48_63 : 16;
1686 /* struct cgxx_cmrx_tx_stat5_s cn; */
1689 static inline u64 CGXX_CMRX_TX_STAT5(u64 a)
1690 __attribute__ ((pure, always_inline));
1691 static inline u64 CGXX_CMRX_TX_STAT5(u64 a)
1693 return 0x728 + 0x40000 * a;
1697 * Register (RSL) cgx#_cmr#_tx_stat6
1699 * CGX CMR Transmit Statistics Registers 6
1701 union cgxx_cmrx_tx_stat6 {
1703 struct cgxx_cmrx_tx_stat6_s {
1705 u64 reserved_48_63 : 16;
1707 /* struct cgxx_cmrx_tx_stat6_s cn; */
1710 static inline u64 CGXX_CMRX_TX_STAT6(u64 a)
1711 __attribute__ ((pure, always_inline));
1712 static inline u64 CGXX_CMRX_TX_STAT6(u64 a)
1714 return 0x730 + 0x40000 * a;
1718 * Register (RSL) cgx#_cmr#_tx_stat7
1720 * CGX CMR Transmit Statistics Registers 7
1722 union cgxx_cmrx_tx_stat7 {
1724 struct cgxx_cmrx_tx_stat7_s {
1726 u64 reserved_48_63 : 16;
1728 /* struct cgxx_cmrx_tx_stat7_s cn; */
1731 static inline u64 CGXX_CMRX_TX_STAT7(u64 a)
1732 __attribute__ ((pure, always_inline));
1733 static inline u64 CGXX_CMRX_TX_STAT7(u64 a)
1735 return 0x738 + 0x40000 * a;
1739 * Register (RSL) cgx#_cmr#_tx_stat8
1741 * CGX CMR Transmit Statistics Registers 8
1743 union cgxx_cmrx_tx_stat8 {
1745 struct cgxx_cmrx_tx_stat8_s {
1747 u64 reserved_48_63 : 16;
1749 /* struct cgxx_cmrx_tx_stat8_s cn; */
1752 static inline u64 CGXX_CMRX_TX_STAT8(u64 a)
1753 __attribute__ ((pure, always_inline));
1754 static inline u64 CGXX_CMRX_TX_STAT8(u64 a)
1756 return 0x740 + 0x40000 * a;
1760 * Register (RSL) cgx#_cmr#_tx_stat9
1762 * CGX CMR Transmit Statistics Registers 9
1764 union cgxx_cmrx_tx_stat9 {
1766 struct cgxx_cmrx_tx_stat9_s {
1768 u64 reserved_48_63 : 16;
1770 /* struct cgxx_cmrx_tx_stat9_s cn; */
1773 static inline u64 CGXX_CMRX_TX_STAT9(u64 a)
1774 __attribute__ ((pure, always_inline));
1775 static inline u64 CGXX_CMRX_TX_STAT9(u64 a)
1777 return 0x748 + 0x40000 * a;
1781 * Register (RSL) cgx#_cmr#_tx_stat_pri#_xoff
1783 * CGX CMR TX XON to XOFF transition Registers
1785 union cgxx_cmrx_tx_stat_prix_xoff {
1787 struct cgxx_cmrx_tx_stat_prix_xoff_s {
1789 u64 reserved_48_63 : 16;
1791 /* struct cgxx_cmrx_tx_stat_prix_xoff_s cn; */
1794 static inline u64 CGXX_CMRX_TX_STAT_PRIX_XOFF(u64 a, u64 b)
1795 __attribute__ ((pure, always_inline));
1796 static inline u64 CGXX_CMRX_TX_STAT_PRIX_XOFF(u64 a, u64 b)
1798 return 0x800 + 0x40000 * a + 8 * b;
1802 * Register (RSL) cgx#_cmr_bad
1804 * CGX CMR Bad Registers
1806 union cgxx_cmr_bad {
1808 struct cgxx_cmr_bad_s {
1810 u64 reserved_1_63 : 63;
1812 /* struct cgxx_cmr_bad_s cn; */
1815 static inline u64 CGXX_CMR_BAD(void)
1816 __attribute__ ((pure, always_inline));
1817 static inline u64 CGXX_CMR_BAD(void)
1823 * Register (RSL) cgx#_cmr_chan_msk_and
1825 * CGX CMR Backpressure Channel Mask AND Registers
1827 union cgxx_cmr_chan_msk_and {
1829 struct cgxx_cmr_chan_msk_and_s {
1832 /* struct cgxx_cmr_chan_msk_and_s cn; */
1835 static inline u64 CGXX_CMR_CHAN_MSK_AND(void)
1836 __attribute__ ((pure, always_inline));
1837 static inline u64 CGXX_CMR_CHAN_MSK_AND(void)
1843 * Register (RSL) cgx#_cmr_chan_msk_or
1845 * CGX Backpressure Channel Mask OR Registers
1847 union cgxx_cmr_chan_msk_or {
1849 struct cgxx_cmr_chan_msk_or_s {
1852 /* struct cgxx_cmr_chan_msk_or_s cn; */
1855 static inline u64 CGXX_CMR_CHAN_MSK_OR(void)
1856 __attribute__ ((pure, always_inline));
1857 static inline u64 CGXX_CMR_CHAN_MSK_OR(void)
1863 * Register (RSL) cgx#_cmr_eco
1865 * INTERNAL: CGX ECO Registers
1867 union cgxx_cmr_eco {
1869 struct cgxx_cmr_eco_s {
1873 /* struct cgxx_cmr_eco_s cn; */
1876 static inline u64 CGXX_CMR_ECO(void)
1877 __attribute__ ((pure, always_inline));
1878 static inline u64 CGXX_CMR_ECO(void)
1884 * Register (RSL) cgx#_cmr_global_config
1886 * CGX CMR Global Configuration Register These registers configure the
1887 * global CMR, PCS, and MAC.
1889 union cgxx_cmr_global_config {
1891 struct cgxx_cmr_global_config_s {
1892 u64 pmux_sds_sel : 1;
1893 u64 cgx_clk_enable : 1;
1894 u64 cmr_x2p_reset : 3;
1895 u64 interleave_mode : 1;
1897 u64 ncsi_lmac_id : 2;
1898 u64 cmr_ncsi_drop : 1;
1899 u64 cmr_ncsi_reset : 1;
1900 u64 cmr_ncsi_tag_cnt : 13;
1901 u64 cmr_clken_ovrd : 1;
1902 u64 reserved_25_63 : 39;
1904 /* struct cgxx_cmr_global_config_s cn; */
1907 static inline u64 CGXX_CMR_GLOBAL_CONFIG(void)
1908 __attribute__ ((pure, always_inline));
1909 static inline u64 CGXX_CMR_GLOBAL_CONFIG(void)
1915 * Register (RSL) cgx#_cmr_mem_int
1917 * CGX CMR Memory Interrupt Register
1919 union cgxx_cmr_mem_int {
1921 struct cgxx_cmr_mem_int_s {
1922 u64 gmp_in_overfl : 1;
1923 u64 smu_in_overfl : 1;
1924 u64 reserved_2_63 : 62;
1926 /* struct cgxx_cmr_mem_int_s cn; */
1929 static inline u64 CGXX_CMR_MEM_INT(void)
1930 __attribute__ ((pure, always_inline));
1931 static inline u64 CGXX_CMR_MEM_INT(void)
1937 * Register (RSL) cgx#_cmr_mem_int_ena_w1c
1939 * CGX CMR Memory Interrupt Enable Clear Register This register clears
1940 * interrupt enable bits.
1942 union cgxx_cmr_mem_int_ena_w1c {
1944 struct cgxx_cmr_mem_int_ena_w1c_s {
1945 u64 gmp_in_overfl : 1;
1946 u64 smu_in_overfl : 1;
1947 u64 reserved_2_63 : 62;
1949 /* struct cgxx_cmr_mem_int_ena_w1c_s cn; */
1952 static inline u64 CGXX_CMR_MEM_INT_ENA_W1C(void)
1953 __attribute__ ((pure, always_inline));
1954 static inline u64 CGXX_CMR_MEM_INT_ENA_W1C(void)
1960 * Register (RSL) cgx#_cmr_mem_int_ena_w1s
1962 * CGX CMR Memory Interrupt Enable Set Register This register sets
1963 * interrupt enable bits.
1965 union cgxx_cmr_mem_int_ena_w1s {
1967 struct cgxx_cmr_mem_int_ena_w1s_s {
1968 u64 gmp_in_overfl : 1;
1969 u64 smu_in_overfl : 1;
1970 u64 reserved_2_63 : 62;
1972 /* struct cgxx_cmr_mem_int_ena_w1s_s cn; */
1975 static inline u64 CGXX_CMR_MEM_INT_ENA_W1S(void)
1976 __attribute__ ((pure, always_inline));
1977 static inline u64 CGXX_CMR_MEM_INT_ENA_W1S(void)
1983 * Register (RSL) cgx#_cmr_mem_int_w1s
1985 * CGX CMR Memory Interrupt Set Register This register sets interrupt
1988 union cgxx_cmr_mem_int_w1s {
1990 struct cgxx_cmr_mem_int_w1s_s {
1991 u64 gmp_in_overfl : 1;
1992 u64 smu_in_overfl : 1;
1993 u64 reserved_2_63 : 62;
1995 /* struct cgxx_cmr_mem_int_w1s_s cn; */
1998 static inline u64 CGXX_CMR_MEM_INT_W1S(void)
1999 __attribute__ ((pure, always_inline));
2000 static inline u64 CGXX_CMR_MEM_INT_W1S(void)
2006 * Register (RSL) cgx#_cmr_nic_nxc_adr
2008 * CGX CMR NIC NXC Exception Registers
2010 union cgxx_cmr_nic_nxc_adr {
2012 struct cgxx_cmr_nic_nxc_adr_s {
2015 u64 reserved_16_63 : 48;
2017 /* struct cgxx_cmr_nic_nxc_adr_s cn; */
2020 static inline u64 CGXX_CMR_NIC_NXC_ADR(void)
2021 __attribute__ ((pure, always_inline));
2022 static inline u64 CGXX_CMR_NIC_NXC_ADR(void)
2028 * Register (RSL) cgx#_cmr_nix0_nxc_adr
2030 * CGX CMR NIX0 NXC Exception Registers
2032 union cgxx_cmr_nix0_nxc_adr {
2034 struct cgxx_cmr_nix0_nxc_adr_s {
2039 u64 reserved_32_63 : 32;
2041 /* struct cgxx_cmr_nix0_nxc_adr_s cn; */
2044 static inline u64 CGXX_CMR_NIX0_NXC_ADR(void)
2045 __attribute__ ((pure, always_inline));
2046 static inline u64 CGXX_CMR_NIX0_NXC_ADR(void)
2052 * Register (RSL) cgx#_cmr_nix1_nxc_adr
2054 * CGX CMR NIX1 NXC Exception Registers
2056 union cgxx_cmr_nix1_nxc_adr {
2058 struct cgxx_cmr_nix1_nxc_adr_s {
2063 u64 reserved_32_63 : 32;
2065 /* struct cgxx_cmr_nix1_nxc_adr_s cn; */
2068 static inline u64 CGXX_CMR_NIX1_NXC_ADR(void)
2069 __attribute__ ((pure, always_inline));
2070 static inline u64 CGXX_CMR_NIX1_NXC_ADR(void)
2076 * Register (RSL) cgx#_cmr_p2x#_count
2078 * CGX P2X Activity Register
2080 union cgxx_cmr_p2xx_count {
2082 struct cgxx_cmr_p2xx_count_s {
2085 /* struct cgxx_cmr_p2xx_count_s cn; */
2088 static inline u64 CGXX_CMR_P2XX_COUNT(u64 a)
2089 __attribute__ ((pure, always_inline));
2090 static inline u64 CGXX_CMR_P2XX_COUNT(u64 a)
2092 return 0x168 + 0x1000 * a;
2096 * Register (RSL) cgx#_cmr_rx_dmac#_cam0
2098 * CGX CMR Receive CAM Registers These registers provide access to the 32
2099 * DMAC CAM0 entries in CGX, for use by X2P/NIX bound traffic.
2101 union cgxx_cmr_rx_dmacx_cam0 {
2103 struct cgxx_cmr_rx_dmacx_cam0_s {
2107 u64 reserved_51_63 : 13;
2109 /* struct cgxx_cmr_rx_dmacx_cam0_s cn; */
2112 static inline u64 CGXX_CMR_RX_DMACX_CAM0(u64 a)
2113 __attribute__ ((pure, always_inline));
2114 static inline u64 CGXX_CMR_RX_DMACX_CAM0(u64 a)
2116 return 0x200 + 8 * a;
2120 * Register (RSL) cgx#_cmr_rx_dmac#_cam1
2122 * CGX CMR Receive CAM Registers These registers provide access to the 32
2123 * DMAC CAM entries in CGX for use by NCSI bound traffic. See
2124 * CGX()_CMR_GLOBAL_CONFIG[NCSI_LMAC_ID] and CGX()_CMR_RX_STEERING1()
2127 union cgxx_cmr_rx_dmacx_cam1 {
2129 struct cgxx_cmr_rx_dmacx_cam1_s {
2133 u64 reserved_51_63 : 13;
2135 /* struct cgxx_cmr_rx_dmacx_cam1_s cn; */
2138 static inline u64 CGXX_CMR_RX_DMACX_CAM1(u64 a)
2139 __attribute__ ((pure, always_inline));
2140 static inline u64 CGXX_CMR_RX_DMACX_CAM1(u64 a)
2142 return 0x400 + 8 * a;
2146 * Register (RSL) cgx#_cmr_rx_lmacs
2148 * CGX CMR Receive Logical MACs Registers
2150 union cgxx_cmr_rx_lmacs {
2152 struct cgxx_cmr_rx_lmacs_s {
2154 u64 reserved_3_63 : 61;
2156 /* struct cgxx_cmr_rx_lmacs_s cn; */
2159 static inline u64 CGXX_CMR_RX_LMACS(void)
2160 __attribute__ ((pure, always_inline));
2161 static inline u64 CGXX_CMR_RX_LMACS(void)
2167 * Register (RSL) cgx#_cmr_rx_ovr_bp
2169 * CGX CMR Receive-Ports Backpressure Override Registers Per-LMAC
2170 * backpressure override register. For SMU, CGX()_CMR_RX_OVR_BP[EN]\<0\>
2171 * must be set to one and CGX()_CMR_RX_OVR_BP[BP]\<0\> must be cleared to
2172 * zero (to forcibly disable hardware-automatic 802.3 PAUSE packet
2173 * generation) with the HiGig2 Protocol when
2174 * CGX()_SMU()_HG2_CONTROL[HG2TX_EN]=0. (The HiGig2 protocol is indicated
2175 * by CGX()_SMU()_TX_CTL[HG_EN]=1 and CGX()_SMU()_RX_UDD_SKP[LEN]=16).
2176 * Hardware can only auto-generate backpressure through HiGig2 messages
2177 * (optionally, when CGX()_SMU()_HG2_CONTROL[HG2TX_EN]=1) with the HiGig2
2180 union cgxx_cmr_rx_ovr_bp {
2182 struct cgxx_cmr_rx_ovr_bp_s {
2183 u64 ign_fifo_bp : 4;
2186 u64 reserved_12_63 : 52;
2188 /* struct cgxx_cmr_rx_ovr_bp_s cn; */
2191 static inline u64 CGXX_CMR_RX_OVR_BP(void)
2192 __attribute__ ((pure, always_inline));
2193 static inline u64 CGXX_CMR_RX_OVR_BP(void)
2199 * Register (RSL) cgx#_cmr_rx_stat10
2201 * CGX Receive Status Register 10 These registers provide a count of
2202 * octets of filtered DMAC1 or VLAN STEERING1 packets.
2204 union cgxx_cmr_rx_stat10 {
2206 struct cgxx_cmr_rx_stat10_s {
2208 u64 reserved_48_63 : 16;
2210 /* struct cgxx_cmr_rx_stat10_s cn; */
2213 static inline u64 CGXX_CMR_RX_STAT10(void)
2214 __attribute__ ((pure, always_inline));
2215 static inline u64 CGXX_CMR_RX_STAT10(void)
2221 * Register (RSL) cgx#_cmr_rx_stat11
2223 * CGX Receive Status Register 11 This registers provides a count of
2224 * packets dropped at the NCSI interface. This includes drops due to
2225 * CGX()_CMR_GLOBAL_CONFIG[CMR_NCSI_DROP] or NCSI FIFO full. The count of
2226 * dropped NCSI packets is not accounted for in any other stats
2229 union cgxx_cmr_rx_stat11 {
2231 struct cgxx_cmr_rx_stat11_s {
2233 u64 reserved_48_63 : 16;
2235 /* struct cgxx_cmr_rx_stat11_s cn; */
2238 static inline u64 CGXX_CMR_RX_STAT11(void)
2239 __attribute__ ((pure, always_inline));
2240 static inline u64 CGXX_CMR_RX_STAT11(void)
2246 * Register (RSL) cgx#_cmr_rx_stat12
2248 * CGX Receive Status Register 12 This register provide a count of octets
2249 * of dropped at the NCSI interface.
2251 union cgxx_cmr_rx_stat12 {
2253 struct cgxx_cmr_rx_stat12_s {
2255 u64 reserved_48_63 : 16;
2257 /* struct cgxx_cmr_rx_stat12_s cn; */
2260 static inline u64 CGXX_CMR_RX_STAT12(void)
2261 __attribute__ ((pure, always_inline));
2262 static inline u64 CGXX_CMR_RX_STAT12(void)
2268 * Register (RSL) cgx#_cmr_rx_stat9
2270 * CGX Receive Status Register 9 These registers provide a count of all
2271 * received packets that were dropped by the DMAC1 or VLAN STEERING1
2272 * filter. Packets that are dropped by the DMAC1 or VLAN STEERING1
2273 * filters are counted here regardless of whether they were ERR packets,
2274 * but does not include those reported in CGX()_CMR()_RX_STAT6. 16B
2275 * packets or smaller (20B in case of FCS strip) as the result of
2276 * truncation or other means are not dropped by CGX (unless filter and
2277 * decision is also asserted) and will never appear in this count. Should
2278 * the MAC signal to the CMR that the packet be filtered upon decision
2279 * before the end of packet, then STAT9 and STAT10 will not be updated.
2281 union cgxx_cmr_rx_stat9 {
2283 struct cgxx_cmr_rx_stat9_s {
2285 u64 reserved_48_63 : 16;
2287 /* struct cgxx_cmr_rx_stat9_s cn; */
2290 static inline u64 CGXX_CMR_RX_STAT9(void)
2291 __attribute__ ((pure, always_inline));
2292 static inline u64 CGXX_CMR_RX_STAT9(void)
2298 * Register (RSL) cgx#_cmr_rx_steering0#
2300 * CGX CMR Receive Steering0 Registers These registers, along with
2301 * CGX()_CMR_RX_STEERING_VETYPE0(), provide eight filters for identifying
2302 * and steering receive traffic to X2P/NIX. Received packets are only
2303 * passed to X2P/NIX when the DMAC0 filter result is ACCEPT and STEERING0
2304 * filter result is PASS. See also CGX()_CMR()_RX_DMAC_CTL0. Internal:
2305 * "* ALGORITHM \<pre\> rx_steering(uint48 pkt_dmac, uint16 pkt_etype,
2306 * uint16 pkt_vlan_id) { for (int i = 0; i \< 8; i++) { steer =
2307 * CGX()_CMR_RX_STEERING0(i); vetype =
2308 * CGX()_CMR_RX_STEERING_VETYPE0(i); if (steer[MCST_EN] ||
2309 * steer[DMAC_EN] || vetype[VLAN_EN] || vetype[VLAN_TAG_EN]) {
2310 * // Filter is enabled. if ( (!steer[MCST_EN] ||
2311 * is_mcst(pkt_dmac)) && (!steer[DMAC_EN] || pkt_dmac ==
2312 * steer[DMAC]) && (!vetype[VLAN_EN] || pkt_vlan_id ==
2313 * vetype[VLAN_ID]) && (!vetype[VLAN_TAG_EN] || pkt_etype ==
2314 * vetype[VLAN_ETYPE]) ) { // Filter match (all
2315 * enabled matching criteria are met). return steer[PASS];
2316 * } } } return CGX()_CMR_RX_STEERING_DEFAULT0[PASS]; // No
2319 union cgxx_cmr_rx_steering0x {
2321 struct cgxx_cmr_rx_steering0x_s {
2326 u64 reserved_51_63 : 13;
2328 /* struct cgxx_cmr_rx_steering0x_s cn; */
2331 static inline u64 CGXX_CMR_RX_STEERING0X(u64 a)
2332 __attribute__ ((pure, always_inline));
2333 static inline u64 CGXX_CMR_RX_STEERING0X(u64 a)
2335 return 0x300 + 8 * a;
2339 * Register (RSL) cgx#_cmr_rx_steering1#
2341 * CGX CMR Receive Steering1 Registers These registers, along with
2342 * CGX()_CMR_RX_STEERING_VETYPE1(), provide eight filters for identifying
2343 * and steering NCSI receive traffic. Received packets are only passed to
2344 * NCSI when the DMAC1 filter result is ACCEPT and STEERING1 filter
2345 * result is PASS. See also CGX()_CMR_RX_DMAC()_CAM1 and
2346 * CGX()_CMR_RX_STEERING1(). For use with the LMAC associated with NCSI.
2347 * See CGX()_CMR_GLOBAL_CONFIG[NCSI_LMAC_ID]. Internal: ALGORITHM: See
2348 * CGX()_CMR_RX_STEERING0().
2350 union cgxx_cmr_rx_steering1x {
2352 struct cgxx_cmr_rx_steering1x_s {
2357 u64 reserved_51_63 : 13;
2359 /* struct cgxx_cmr_rx_steering1x_s cn; */
2362 static inline u64 CGXX_CMR_RX_STEERING1X(u64 a)
2363 __attribute__ ((pure, always_inline));
2364 static inline u64 CGXX_CMR_RX_STEERING1X(u64 a)
2366 return 0x500 + 8 * a;
2370 * Register (RSL) cgx#_cmr_rx_steering_default0
2372 * CGX CMR Receive Steering Default0 Destination Register For determining
2373 * destination of traffic that does not meet matching algorithm described
2374 * in registers CGX()_CMR_RX_STEERING0() and
2375 * CGX()_CMR_RX_STEERING_VETYPE0(). All 16B packets or smaller (20B in
2376 * case of FCS strip) as the result of truncation will steer to default
2379 union cgxx_cmr_rx_steering_default0 {
2381 struct cgxx_cmr_rx_steering_default0_s {
2383 u64 reserved_1_63 : 63;
2385 /* struct cgxx_cmr_rx_steering_default0_s cn; */
2388 static inline u64 CGXX_CMR_RX_STEERING_DEFAULT0(void)
2389 __attribute__ ((pure, always_inline));
2390 static inline u64 CGXX_CMR_RX_STEERING_DEFAULT0(void)
2396 * Register (RSL) cgx#_cmr_rx_steering_default1
2398 * CGX CMR Receive Steering Default1 Destination Register For use with
2399 * the lmac_id associated with NCSI. See
2400 * CGX()_CMR_GLOBAL_CONFIG[NCSI_LMAC_ID]. For determining destination of
2401 * traffic that does not meet matching algorithm described in registers
2402 * CGX()_CMR_RX_STEERING1() and CGX()_CMR_RX_STEERING_VETYPE1(). All 16B
2403 * packets or smaller (20B in case of FCS strip) as the result of
2404 * truncation will steer to default destination
2406 union cgxx_cmr_rx_steering_default1 {
2408 struct cgxx_cmr_rx_steering_default1_s {
2410 u64 reserved_1_63 : 63;
2412 /* struct cgxx_cmr_rx_steering_default1_s cn; */
2415 static inline u64 CGXX_CMR_RX_STEERING_DEFAULT1(void)
2416 __attribute__ ((pure, always_inline));
2417 static inline u64 CGXX_CMR_RX_STEERING_DEFAULT1(void)
2423 * Register (RSL) cgx#_cmr_rx_steering_vetype0#
2425 * CGX CMR Receive VLAN Ethertype1 Register These registers, along with
2426 * CGX()_CMR_RX_STEERING0(), provide eight filters for identifying and
2427 * steering X2P/NIX receive traffic.
2429 union cgxx_cmr_rx_steering_vetype0x {
2431 struct cgxx_cmr_rx_steering_vetype0x_s {
2432 u64 vlan_etype : 16;
2433 u64 vlan_tag_en : 1;
2436 u64 reserved_30_63 : 34;
2438 /* struct cgxx_cmr_rx_steering_vetype0x_s cn; */
2441 static inline u64 CGXX_CMR_RX_STEERING_VETYPE0X(u64 a)
2442 __attribute__ ((pure, always_inline));
2443 static inline u64 CGXX_CMR_RX_STEERING_VETYPE0X(u64 a)
2445 return 0x380 + 8 * a;
2449 * Register (RSL) cgx#_cmr_rx_steering_vetype1#
2451 * CGX CMR Receive VLAN Ethertype1 Register For use with the lmac_id
2452 * associated with NCSI. See CGX()_CMR_GLOBAL_CONFIG[NCSI_LMAC_ID]. These
2453 * registers, along with CGX()_CMR_RX_STEERING1(), provide eight filters
2454 * for identifying and steering NCSI receive traffic.
2456 union cgxx_cmr_rx_steering_vetype1x {
2458 struct cgxx_cmr_rx_steering_vetype1x_s {
2459 u64 vlan_etype : 16;
2460 u64 vlan_tag_en : 1;
2463 u64 reserved_30_63 : 34;
2465 /* struct cgxx_cmr_rx_steering_vetype1x_s cn; */
2468 static inline u64 CGXX_CMR_RX_STEERING_VETYPE1X(u64 a)
2469 __attribute__ ((pure, always_inline));
2470 static inline u64 CGXX_CMR_RX_STEERING_VETYPE1X(u64 a)
2472 return 0x580 + 8 * a;
2476 * Register (RSL) cgx#_cmr_tx_lmacs
2478 * CGX CMR Transmit Logical MACs Registers This register sets the number
2479 * of LMACs allowed on the TX interface. The value is important for
2480 * defining the partitioning of the transmit FIFO.
2482 union cgxx_cmr_tx_lmacs {
2484 struct cgxx_cmr_tx_lmacs_s {
2486 u64 reserved_3_63 : 61;
2488 /* struct cgxx_cmr_tx_lmacs_s cn; */
2491 static inline u64 CGXX_CMR_TX_LMACS(void)
2492 __attribute__ ((pure, always_inline));
2493 static inline u64 CGXX_CMR_TX_LMACS(void)
2499 * Register (RSL) cgx#_cmr_x2p#_count
2501 * CGX X2P Activity Register
2503 union cgxx_cmr_x2px_count {
2505 struct cgxx_cmr_x2px_count_s {
2508 /* struct cgxx_cmr_x2px_count_s cn; */
2511 static inline u64 CGXX_CMR_X2PX_COUNT(u64 a)
2512 __attribute__ ((pure, always_inline));
2513 static inline u64 CGXX_CMR_X2PX_COUNT(u64 a)
2515 return 0x170 + 0x1000 * a;
2519 * Register (RSL) cgx#_const
2521 * CGX CONST Registers This register contains constants for software
2526 struct cgxx_const_s {
2530 u64 reserved_56_63 : 8;
2532 /* struct cgxx_const_s cn; */
2535 static inline u64 CGXX_CONST(void)
2536 __attribute__ ((pure, always_inline));
2537 static inline u64 CGXX_CONST(void)
2543 * Register (RSL) cgx#_const1
2545 * CGX CONST1 Registers This register contains constants for software
2550 struct cgxx_const1_s {
2553 u64 reserved_32_63 : 32;
2555 /* struct cgxx_const1_s cn; */
2558 static inline u64 CGXX_CONST1(void)
2559 __attribute__ ((pure, always_inline));
2560 static inline u64 CGXX_CONST1(void)
2566 * Register (RSL) cgx#_gmp_gmi#_rx_wol_ctrl0
2568 * CGX GMP GMI RX Wake-on-LAN Control 0 Registers
2570 union cgxx_gmp_gmix_rx_wol_ctrl0 {
2572 struct cgxx_gmp_gmix_rx_wol_ctrl0_s {
2575 u64 reserved_52_63 : 12;
2577 /* struct cgxx_gmp_gmix_rx_wol_ctrl0_s cn; */
2580 static inline u64 CGXX_GMP_GMIX_RX_WOL_CTRL0(u64 a)
2581 __attribute__ ((pure, always_inline));
2582 static inline u64 CGXX_GMP_GMIX_RX_WOL_CTRL0(u64 a)
2584 return 0x38a00 + 0x40000 * a;
2588 * Register (RSL) cgx#_gmp_gmi#_rx_wol_ctrl1
2590 * CGX GMP GMI RX Wake-on-LAN Control 1 Registers
2592 union cgxx_gmp_gmix_rx_wol_ctrl1 {
2594 struct cgxx_gmp_gmix_rx_wol_ctrl1_s {
2597 /* struct cgxx_gmp_gmix_rx_wol_ctrl1_s cn; */
2600 static inline u64 CGXX_GMP_GMIX_RX_WOL_CTRL1(u64 a)
2601 __attribute__ ((pure, always_inline));
2602 static inline u64 CGXX_GMP_GMIX_RX_WOL_CTRL1(u64 a)
2604 return 0x38a08 + 0x40000 * a;
2608 * Register (RSL) cgx#_gmp_gmi#_tx_eee
2610 * INTERNAL: CGX GMP GMI TX EEE Configure Registers Reserved. Internal:
2611 * These registers control when GMP GMI TX requests to enter or exist
2612 * LPI. Those registers take effect only when EEE is supported and
2613 * enabled for a given LMAC.
2615 union cgxx_gmp_gmix_tx_eee {
2617 struct cgxx_gmp_gmix_tx_eee_s {
2618 u64 idle_thresh : 28;
2619 u64 reserved_28 : 1;
2625 u64 tx_lpi_wait : 1;
2626 u64 sync_status_lpi_enable : 1;
2627 u64 reserved_63 : 1;
2629 /* struct cgxx_gmp_gmix_tx_eee_s cn; */
2632 static inline u64 CGXX_GMP_GMIX_TX_EEE(u64 a)
2633 __attribute__ ((pure, always_inline));
2634 static inline u64 CGXX_GMP_GMIX_TX_EEE(u64 a)
2636 return 0x38800 + 0x40000 * a;
2640 * Register (RSL) cgx#_gmp_gmi#_tx_eee_cfg1
2642 * INTERNAL: CGX GMP GMI TX EEE Configure More Configuration Registers
2643 * Reserved. Internal: Controls the GMP exiting of LPI and starting to
2646 union cgxx_gmp_gmix_tx_eee_cfg1 {
2648 struct cgxx_gmp_gmix_tx_eee_cfg1_s {
2649 u64 wake2data_time : 24;
2650 u64 reserved_24_35 : 12;
2651 u64 tx_eee_enable : 1;
2652 u64 reserved_37_39 : 3;
2653 u64 sync2lpi_time : 21;
2654 u64 reserved_61_63 : 3;
2656 struct cgxx_gmp_gmix_tx_eee_cfg1_cn {
2657 u64 wake2data_time : 24;
2658 u64 reserved_24_31 : 8;
2659 u64 reserved_32_35 : 4;
2660 u64 tx_eee_enable : 1;
2661 u64 reserved_37_39 : 3;
2662 u64 sync2lpi_time : 21;
2663 u64 reserved_61_63 : 3;
2667 static inline u64 CGXX_GMP_GMIX_TX_EEE_CFG1(u64 a)
2668 __attribute__ ((pure, always_inline));
2669 static inline u64 CGXX_GMP_GMIX_TX_EEE_CFG1(u64 a)
2671 return 0x38808 + 0x40000 * a;
2675 * Register (RSL) cgx#_gmp_gmi#_wol_int
2677 * CGX GMP GMI RX WOL Interrupt Registers These registers allow WOL
2678 * interrupts to be sent to the control processor.
2680 union cgxx_gmp_gmix_wol_int {
2682 struct cgxx_gmp_gmix_wol_int_s {
2684 u64 reserved_1_63 : 63;
2686 /* struct cgxx_gmp_gmix_wol_int_s cn; */
2689 static inline u64 CGXX_GMP_GMIX_WOL_INT(u64 a)
2690 __attribute__ ((pure, always_inline));
2691 static inline u64 CGXX_GMP_GMIX_WOL_INT(u64 a)
2693 return 0x38a80 + 0x40000 * a;
2697 * Register (RSL) cgx#_gmp_gmi#_wol_int_ena_w1c
2699 * CGX GMP GMI RX WOL Interrupt Enable Clear Registers This register
2700 * clears interrupt enable bits.
2702 union cgxx_gmp_gmix_wol_int_ena_w1c {
2704 struct cgxx_gmp_gmix_wol_int_ena_w1c_s {
2706 u64 reserved_1_63 : 63;
2708 /* struct cgxx_gmp_gmix_wol_int_ena_w1c_s cn; */
2711 static inline u64 CGXX_GMP_GMIX_WOL_INT_ENA_W1C(u64 a)
2712 __attribute__ ((pure, always_inline));
2713 static inline u64 CGXX_GMP_GMIX_WOL_INT_ENA_W1C(u64 a)
2715 return 0x38a90 + 0x40000 * a;
2719 * Register (RSL) cgx#_gmp_gmi#_wol_int_ena_w1s
2721 * CGX GMP GMI RX WOL Interrupt Enable Set Registers This register sets
2722 * interrupt enable bits.
2724 union cgxx_gmp_gmix_wol_int_ena_w1s {
2726 struct cgxx_gmp_gmix_wol_int_ena_w1s_s {
2728 u64 reserved_1_63 : 63;
2730 /* struct cgxx_gmp_gmix_wol_int_ena_w1s_s cn; */
2733 static inline u64 CGXX_GMP_GMIX_WOL_INT_ENA_W1S(u64 a)
2734 __attribute__ ((pure, always_inline));
2735 static inline u64 CGXX_GMP_GMIX_WOL_INT_ENA_W1S(u64 a)
2737 return 0x38a98 + 0x40000 * a;
2741 * Register (RSL) cgx#_gmp_gmi#_wol_int_w1s
2743 * CGX GMP GMI RX WOL Interrupt Set Registers This register sets
2746 union cgxx_gmp_gmix_wol_int_w1s {
2748 struct cgxx_gmp_gmix_wol_int_w1s_s {
2750 u64 reserved_1_63 : 63;
2752 /* struct cgxx_gmp_gmix_wol_int_w1s_s cn; */
2755 static inline u64 CGXX_GMP_GMIX_WOL_INT_W1S(u64 a)
2756 __attribute__ ((pure, always_inline));
2757 static inline u64 CGXX_GMP_GMIX_WOL_INT_W1S(u64 a)
2759 return 0x38a88 + 0x40000 * a;
2763 * Register (RSL) cgx#_gmp_gmi_prt#_cfg
2765 * CGX GMP GMI LMAC Configuration Registers This register controls the
2766 * configuration of the LMAC.
2768 union cgxx_gmp_gmi_prtx_cfg {
2770 struct cgxx_gmp_gmi_prtx_cfg_s {
2775 u64 reserved_4_7 : 4;
2777 u64 reserved_9_11 : 3;
2780 u64 reserved_14_63 : 50;
2782 /* struct cgxx_gmp_gmi_prtx_cfg_s cn; */
2785 static inline u64 CGXX_GMP_GMI_PRTX_CFG(u64 a)
2786 __attribute__ ((pure, always_inline));
2787 static inline u64 CGXX_GMP_GMI_PRTX_CFG(u64 a)
2789 return 0x38020 + 0x40000 * a;
2793 * Register (RSL) cgx#_gmp_gmi_rx#_decision
2795 * CGX GMP Packet-Decision Registers This register specifies the byte
2796 * count used to determine when to accept or to filter a packet. As each
2797 * byte in a packet is received by GMI, the L2 byte count is compared
2798 * against [CNT]. In normal operation, the L2 header begins after the
2799 * PREAMBLE + SFD (CGX()_GMP_GMI_RX()_FRM_CTL[PRE_CHK] = 1) and any
2800 * optional UDD skip data (CGX()_GMP_GMI_RX()_UDD_SKP[LEN]). Internal:
2801 * Notes: As each byte in a packet is received by GMI, the L2 byte count
2802 * is compared against the [CNT]. The L2 byte count is the number of
2803 * bytes from the beginning of the L2 header (DMAC). In normal
2804 * operation, the L2 header begins after the PREAMBLE+SFD
2805 * (CGX()_GMP_GMI_RX()_FRM_CTL[PRE_CHK]=1) and any optional UDD skip data
2806 * (CGX()_GMP_GMI_RX()_UDD_SKP[LEN]). When
2807 * CGX()_GMP_GMI_RX()_FRM_CTL[PRE_CHK] is clear, PREAMBLE+SFD are
2808 * prepended to the packet and would require UDD skip length to account
2809 * for them. Full Duplex: _ L2 Size \< [CNT] - Accept packet. No
2810 * filtering is applied. _ L2 Size \>= [CNT] - Apply filter. Accept
2811 * packet based on PAUSE packet filter. Half Duplex: _ L2 Size \<
2812 * [CNT] - Drop packet. Packet is unconditionally dropped. _ L2 Size
2813 * \>= [CNT] - Accept packet. where L2_size = MAX(0, total_packet_size -
2814 * CGX()_GMP_GMI_RX()_UDD_SKP[LEN] -
2815 * ((CGX()_GMP_GMI_RX()_FRM_CTL[PRE_CHK]==1)*8)).
2817 union cgxx_gmp_gmi_rxx_decision {
2819 struct cgxx_gmp_gmi_rxx_decision_s {
2821 u64 reserved_5_63 : 59;
2823 /* struct cgxx_gmp_gmi_rxx_decision_s cn; */
2826 static inline u64 CGXX_GMP_GMI_RXX_DECISION(u64 a)
2827 __attribute__ ((pure, always_inline));
2828 static inline u64 CGXX_GMP_GMI_RXX_DECISION(u64 a)
2830 return 0x38040 + 0x40000 * a;
2834 * Register (RSL) cgx#_gmp_gmi_rx#_frm_chk
2836 * CGX GMP Frame Check Registers
2838 union cgxx_gmp_gmi_rxx_frm_chk {
2840 struct cgxx_gmp_gmi_rxx_frm_chk_s {
2846 u64 reserved_5_6 : 2;
2849 u64 reserved_9_63 : 55;
2851 /* struct cgxx_gmp_gmi_rxx_frm_chk_s cn; */
2854 static inline u64 CGXX_GMP_GMI_RXX_FRM_CHK(u64 a)
2855 __attribute__ ((pure, always_inline));
2856 static inline u64 CGXX_GMP_GMI_RXX_FRM_CHK(u64 a)
2858 return 0x38030 + 0x40000 * a;
2862 * Register (RSL) cgx#_gmp_gmi_rx#_frm_ctl
2864 * CGX GMP Frame Control Registers This register controls the handling of
2865 * the frames. The [CTL_BCK] and [CTL_DRP] bits control how the hardware
2866 * handles incoming PAUSE packets. The most common modes of operation: _
2867 * [CTL_BCK] = 1, [CTL_DRP] = 1: hardware handles everything. _ [CTL_BCK]
2868 * = 0, [CTL_DRP] = 0: software sees all PAUSE frames. _ [CTL_BCK] = 0,
2869 * [CTL_DRP] = 1: all PAUSE frames are completely ignored. These control
2870 * bits should be set to [CTL_BCK] = 0, [CTL_DRP] = 0 in half-duplex
2871 * mode. Since PAUSE packets only apply to full duplex operation, any
2872 * PAUSE packet would constitute an exception which should be handled by
2873 * the processing cores. PAUSE packets should not be forwarded.
2874 * Internal: Notes: [PRE_STRP]: When [PRE_CHK] is set (indicating that
2875 * the PREAMBLE will be sent), [PRE_STRP] determines if the PREAMBLE+SFD
2876 * bytes are thrown away or sent to the Octane core as part of the
2877 * packet. In either mode, the PREAMBLE+SFD bytes are not counted toward
2878 * the packet size when checking against the MIN and MAX bounds.
2879 * Furthermore, the bytes are skipped when locating the start of the L2
2880 * header for DMAC and Control frame recognition.
2882 union cgxx_gmp_gmi_rxx_frm_ctl {
2884 struct cgxx_gmp_gmi_rxx_frm_ctl_s {
2892 u64 reserved_7_8 : 2;
2895 u64 reserved_11 : 1;
2898 u64 reserved_14_63 : 50;
2900 struct cgxx_gmp_gmi_rxx_frm_ctl_cn {
2912 u64 reserved_11 : 1;
2915 u64 reserved_14_63 : 50;
2919 static inline u64 CGXX_GMP_GMI_RXX_FRM_CTL(u64 a)
2920 __attribute__ ((pure, always_inline));
2921 static inline u64 CGXX_GMP_GMI_RXX_FRM_CTL(u64 a)
2923 return 0x38028 + 0x40000 * a;
2927 * Register (RSL) cgx#_gmp_gmi_rx#_ifg
2929 * CGX GMI Minimum Interframe-Gap Cycles Registers This register
2930 * specifies the minimum number of interframe-gap (IFG) cycles between
2933 union cgxx_gmp_gmi_rxx_ifg {
2935 struct cgxx_gmp_gmi_rxx_ifg_s {
2937 u64 reserved_4_63 : 60;
2939 /* struct cgxx_gmp_gmi_rxx_ifg_s cn; */
2942 static inline u64 CGXX_GMP_GMI_RXX_IFG(u64 a)
2943 __attribute__ ((pure, always_inline));
2944 static inline u64 CGXX_GMP_GMI_RXX_IFG(u64 a)
2946 return 0x38058 + 0x40000 * a;
2950 * Register (RSL) cgx#_gmp_gmi_rx#_int
2952 * CGX GMP GMI RX Interrupt Registers These registers allow interrupts to
2953 * be sent to the control processor. * Exception conditions \<10:0\> can
2954 * also set the rcv/opcode in the received packet's work-queue entry.
2955 * CGX()_GMP_GMI_RX()_FRM_CHK provides a bit mask for configuring which
2956 * conditions set the error. In half duplex operation, the expectation is
2957 * that collisions will appear as either MINERR or CAREXT errors.
2958 * Internal: Notes: (1) exception conditions 10:0 can also set the
2959 * rcv/opcode in the received packet's workQ entry. The
2960 * CGX()_GMP_GMI_RX()_FRM_CHK register provides a bit mask for
2961 * configuring which conditions set the error. (2) in half duplex
2962 * operation, the expectation is that collisions will appear as either
2963 * MINERR o r CAREXT errors. (3) JABBER An RX jabber error indicates
2964 * that a packet was received which is longer than the maximum allowed
2965 * packet as defined by the system. GMI will truncate the packet at the
2966 * JABBER count. Failure to do so could lead to system instabilty. (4)
2967 * NIBERR This error is illegal at 1000Mbs speeds
2968 * (CGX()_GMP_GMI_PRT()_CFG[SPEED]==0) and will never assert. (5) MINERR
2969 * total frame DA+SA+TL+DATA+PAD+FCS \< 64 (6) ALNERR Indicates that the
2970 * packet received was not an integer number of bytes. If FCS checking
2971 * is enabled, ALNERR will only assert if the FCS is bad. If FCS
2972 * checking is disabled, ALNERR will assert in all non-integer frame
2973 * cases. (7) Collisions Collisions can only occur in half-duplex mode.
2974 * A collision is assumed by the receiver when the slottime
2975 * (CGX()_GMP_GMI_PRT()_CFG[SLOTTIME]) is not satisfied. In 10/100 mode,
2976 * this will result in a frame \< SLOTTIME. In 1000 mode, it could
2977 * result either in frame \< SLOTTIME or a carrier extend error with the
2978 * SLOTTIME. These conditions are visible by... . transfer ended before
2979 * slottime COLDET . carrier extend error CAREXT (A) LENERR
2980 * Length errors occur when the received packet does not match the length
2981 * field. LENERR is only checked for packets between 64 and 1500 bytes.
2982 * For untagged frames, the length must exact match. For tagged frames
2983 * the length or length+4 must match. (B) PCTERR checks that the frame
2984 * begins with a valid PREAMBLE sequence. Does not check the number of
2985 * PREAMBLE cycles. (C) OVRERR *DON'T PUT IN HRM* OVRERR is an
2986 * architectural assertion check internal to GMI to make sure no
2987 * assumption was violated. In a correctly operating system, this
2988 * interrupt can never fire. GMI has an internal arbiter which selects
2989 * which of four ports to buffer in the main RX FIFO. If we normally
2990 * buffer eight bytes, then each port will typically push a tick every
2991 * eight cycles if the packet interface is going as fast as possible. If
2992 * there are four ports, they push every two cycles. So that's the
2993 * assumption. That the inbound module will always be able to consume
2994 * the tick before another is produced. If that doesn't happen that's
2995 * when OVRERR will assert."
2997 union cgxx_gmp_gmi_rxx_int {
2999 struct cgxx_gmp_gmi_rxx_int_s {
3012 u64 reserved_12_63 : 52;
3014 struct cgxx_gmp_gmi_rxx_int_cn {
3027 u64 reserved_12_15 : 4;
3028 u64 reserved_16_63 : 48;
3032 static inline u64 CGXX_GMP_GMI_RXX_INT(u64 a)
3033 __attribute__ ((pure, always_inline));
3034 static inline u64 CGXX_GMP_GMI_RXX_INT(u64 a)
3036 return 0x38000 + 0x40000 * a;
3040 * Register (RSL) cgx#_gmp_gmi_rx#_int_ena_w1c
3042 * CGX GMP GMI RX Interrupt Enable Clear Registers This register clears
3043 * interrupt enable bits.
3045 union cgxx_gmp_gmi_rxx_int_ena_w1c {
3047 struct cgxx_gmp_gmi_rxx_int_ena_w1c_s {
3060 u64 reserved_12_63 : 52;
3062 struct cgxx_gmp_gmi_rxx_int_ena_w1c_cn {
3075 u64 reserved_12_15 : 4;
3076 u64 reserved_16_63 : 48;
3080 static inline u64 CGXX_GMP_GMI_RXX_INT_ENA_W1C(u64 a)
3081 __attribute__ ((pure, always_inline));
3082 static inline u64 CGXX_GMP_GMI_RXX_INT_ENA_W1C(u64 a)
3084 return 0x38010 + 0x40000 * a;
3088 * Register (RSL) cgx#_gmp_gmi_rx#_int_ena_w1s
3090 * CGX GMP GMI RX Interrupt Enable Set Registers This register sets
3091 * interrupt enable bits.
3093 union cgxx_gmp_gmi_rxx_int_ena_w1s {
3095 struct cgxx_gmp_gmi_rxx_int_ena_w1s_s {
3108 u64 reserved_12_63 : 52;
3110 struct cgxx_gmp_gmi_rxx_int_ena_w1s_cn {
3123 u64 reserved_12_15 : 4;
3124 u64 reserved_16_63 : 48;
3128 static inline u64 CGXX_GMP_GMI_RXX_INT_ENA_W1S(u64 a)
3129 __attribute__ ((pure, always_inline));
3130 static inline u64 CGXX_GMP_GMI_RXX_INT_ENA_W1S(u64 a)
3132 return 0x38018 + 0x40000 * a;
3136 * Register (RSL) cgx#_gmp_gmi_rx#_int_w1s
3138 * CGX GMP GMI RX Interrupt Set Registers This register sets interrupt
3141 union cgxx_gmp_gmi_rxx_int_w1s {
3143 struct cgxx_gmp_gmi_rxx_int_w1s_s {
3156 u64 reserved_12_63 : 52;
3158 struct cgxx_gmp_gmi_rxx_int_w1s_cn {
3171 u64 reserved_12_15 : 4;
3172 u64 reserved_16_63 : 48;
3176 static inline u64 CGXX_GMP_GMI_RXX_INT_W1S(u64 a)
3177 __attribute__ ((pure, always_inline));
3178 static inline u64 CGXX_GMP_GMI_RXX_INT_W1S(u64 a)
3180 return 0x38008 + 0x40000 * a;
3184 * Register (RSL) cgx#_gmp_gmi_rx#_jabber
3186 * CGX GMP Maximum Packet-Size Registers This register specifies the
3187 * maximum size for packets, beyond which the GMI truncates.
3189 union cgxx_gmp_gmi_rxx_jabber {
3191 struct cgxx_gmp_gmi_rxx_jabber_s {
3193 u64 reserved_16_63 : 48;
3195 /* struct cgxx_gmp_gmi_rxx_jabber_s cn; */
3198 static inline u64 CGXX_GMP_GMI_RXX_JABBER(u64 a)
3199 __attribute__ ((pure, always_inline));
3200 static inline u64 CGXX_GMP_GMI_RXX_JABBER(u64 a)
3202 return 0x38038 + 0x40000 * a;
3206 * Register (RSL) cgx#_gmp_gmi_rx#_udd_skp
3208 * CGX GMP GMI User-Defined Data Skip Registers This register specifies
3209 * the amount of user-defined data (UDD) added before the start of the
3210 * L2C data. Internal: Notes: (1) The skip bytes are part of the packet
3211 * and will be handled by NIX. (2) The system can determine if the UDD
3212 * bytes are included in the FCS check by using the FCSSEL field - if the
3213 * FCS check is enabled. (3) Assume that the preamble/sfd is always at
3214 * the start of the frame - even before UDD bytes. In most cases, there
3215 * will be no preamble in these cases since it will be packet interface
3216 * in direct communication to another packet interface (MAC to MAC)
3217 * without a PHY involved. (4) We can still do address filtering and
3218 * control packet filtering is the user desires. (5)
3219 * CGX()_GMP_GMI_RX()_UDD_SKP[LEN] must be 0 in half-duplex operation
3220 * unless CGX()_GMP_GMI_RX()_FRM_CTL[PRE_CHK] is clear. If
3221 * CGX()_GMP_GMI_RX()_FRM_CTL[PRE_CHK] is clear, then
3222 * CGX()_GMP_GMI_RX()_UDD_SKP[LEN] will normally be 8. (6) In all cases,
3223 * the UDD bytes will be sent down the packet interface as part of the
3224 * packet. The UDD bytes are never stripped from the actual packet.
3226 union cgxx_gmp_gmi_rxx_udd_skp {
3228 struct cgxx_gmp_gmi_rxx_udd_skp_s {
3232 u64 reserved_9_63 : 55;
3234 /* struct cgxx_gmp_gmi_rxx_udd_skp_s cn; */
3237 static inline u64 CGXX_GMP_GMI_RXX_UDD_SKP(u64 a)
3238 __attribute__ ((pure, always_inline));
3239 static inline u64 CGXX_GMP_GMI_RXX_UDD_SKP(u64 a)
3241 return 0x38048 + 0x40000 * a;
3245 * Register (RSL) cgx#_gmp_gmi_smac#
3247 * CGX GMI SMAC Registers
3249 union cgxx_gmp_gmi_smacx {
3251 struct cgxx_gmp_gmi_smacx_s {
3253 u64 reserved_48_63 : 16;
3255 /* struct cgxx_gmp_gmi_smacx_s cn; */
3258 static inline u64 CGXX_GMP_GMI_SMACX(u64 a)
3259 __attribute__ ((pure, always_inline));
3260 static inline u64 CGXX_GMP_GMI_SMACX(u64 a)
3262 return 0x38230 + 0x40000 * a;
3266 * Register (RSL) cgx#_gmp_gmi_tx#_append
3268 * CGX GMI TX Append Control Registers
3270 union cgxx_gmp_gmi_txx_append {
3272 struct cgxx_gmp_gmi_txx_append_s {
3277 u64 reserved_4_63 : 60;
3279 /* struct cgxx_gmp_gmi_txx_append_s cn; */
3282 static inline u64 CGXX_GMP_GMI_TXX_APPEND(u64 a)
3283 __attribute__ ((pure, always_inline));
3284 static inline u64 CGXX_GMP_GMI_TXX_APPEND(u64 a)
3286 return 0x38218 + 0x40000 * a;
3290 * Register (RSL) cgx#_gmp_gmi_tx#_burst
3292 * CGX GMI TX Burst-Counter Registers
3294 union cgxx_gmp_gmi_txx_burst {
3296 struct cgxx_gmp_gmi_txx_burst_s {
3298 u64 reserved_16_63 : 48;
3300 /* struct cgxx_gmp_gmi_txx_burst_s cn; */
3303 static inline u64 CGXX_GMP_GMI_TXX_BURST(u64 a)
3304 __attribute__ ((pure, always_inline));
3305 static inline u64 CGXX_GMP_GMI_TXX_BURST(u64 a)
3307 return 0x38228 + 0x40000 * a;
3311 * Register (RSL) cgx#_gmp_gmi_tx#_ctl
3313 * CGX GMI Transmit Control Registers
3315 union cgxx_gmp_gmi_txx_ctl {
3317 struct cgxx_gmp_gmi_txx_ctl_s {
3322 u64 reserved_4_63 : 60;
3324 /* struct cgxx_gmp_gmi_txx_ctl_s cn; */
3327 static inline u64 CGXX_GMP_GMI_TXX_CTL(u64 a)
3328 __attribute__ ((pure, always_inline));
3329 static inline u64 CGXX_GMP_GMI_TXX_CTL(u64 a)
3331 return 0x38270 + 0x40000 * a;
3335 * Register (RSL) cgx#_gmp_gmi_tx#_int
3337 * CGX GMI TX Interrupt Registers
3339 union cgxx_gmp_gmi_txx_int {
3341 struct cgxx_gmp_gmi_txx_int_s {
3347 u64 reserved_5_63 : 59;
3349 struct cgxx_gmp_gmi_txx_int_cn {
3355 u64 reserved_5_7 : 3;
3357 u64 reserved_9_63 : 55;
3361 static inline u64 CGXX_GMP_GMI_TXX_INT(u64 a)
3362 __attribute__ ((pure, always_inline));
3363 static inline u64 CGXX_GMP_GMI_TXX_INT(u64 a)
3365 return 0x38500 + 0x40000 * a;
3369 * Register (RSL) cgx#_gmp_gmi_tx#_int_ena_w1c
3371 * CGX GMI TX Interrupt Enable Clear Registers This register clears
3372 * interrupt enable bits.
3374 union cgxx_gmp_gmi_txx_int_ena_w1c {
3376 struct cgxx_gmp_gmi_txx_int_ena_w1c_s {
3382 u64 reserved_5_63 : 59;
3384 struct cgxx_gmp_gmi_txx_int_ena_w1c_cn {
3390 u64 reserved_5_7 : 3;
3392 u64 reserved_9_63 : 55;
3396 static inline u64 CGXX_GMP_GMI_TXX_INT_ENA_W1C(u64 a)
3397 __attribute__ ((pure, always_inline));
3398 static inline u64 CGXX_GMP_GMI_TXX_INT_ENA_W1C(u64 a)
3400 return 0x38510 + 0x40000 * a;
3404 * Register (RSL) cgx#_gmp_gmi_tx#_int_ena_w1s
3406 * CGX GMI TX Interrupt Enable Set Registers This register sets interrupt
3409 union cgxx_gmp_gmi_txx_int_ena_w1s {
3411 struct cgxx_gmp_gmi_txx_int_ena_w1s_s {
3417 u64 reserved_5_63 : 59;
3419 struct cgxx_gmp_gmi_txx_int_ena_w1s_cn {
3425 u64 reserved_5_7 : 3;
3427 u64 reserved_9_63 : 55;
3431 static inline u64 CGXX_GMP_GMI_TXX_INT_ENA_W1S(u64 a)
3432 __attribute__ ((pure, always_inline));
3433 static inline u64 CGXX_GMP_GMI_TXX_INT_ENA_W1S(u64 a)
3435 return 0x38518 + 0x40000 * a;
3439 * Register (RSL) cgx#_gmp_gmi_tx#_int_w1s
3441 * CGX GMI TX Interrupt Set Registers This register sets interrupt bits.
3443 union cgxx_gmp_gmi_txx_int_w1s {
3445 struct cgxx_gmp_gmi_txx_int_w1s_s {
3451 u64 reserved_5_63 : 59;
3453 struct cgxx_gmp_gmi_txx_int_w1s_cn {
3459 u64 reserved_5_7 : 3;
3461 u64 reserved_9_63 : 55;
3465 static inline u64 CGXX_GMP_GMI_TXX_INT_W1S(u64 a)
3466 __attribute__ ((pure, always_inline));
3467 static inline u64 CGXX_GMP_GMI_TXX_INT_W1S(u64 a)
3469 return 0x38508 + 0x40000 * a;
3473 * Register (RSL) cgx#_gmp_gmi_tx#_min_pkt
3475 * CGX GMI TX Minimum-Size-Packet Registers
3477 union cgxx_gmp_gmi_txx_min_pkt {
3479 struct cgxx_gmp_gmi_txx_min_pkt_s {
3481 u64 reserved_8_63 : 56;
3483 /* struct cgxx_gmp_gmi_txx_min_pkt_s cn; */
3486 static inline u64 CGXX_GMP_GMI_TXX_MIN_PKT(u64 a)
3487 __attribute__ ((pure, always_inline));
3488 static inline u64 CGXX_GMP_GMI_TXX_MIN_PKT(u64 a)
3490 return 0x38240 + 0x40000 * a;
3494 * Register (RSL) cgx#_gmp_gmi_tx#_pause_pkt_interval
3496 * CGX GMI TX PAUSE-Packet Transmission-Interval Registers This register
3497 * specifies how often PAUSE packets are sent. Internal: Notes: Choosing
3498 * proper values of CGX()_GMP_GMI_TX()_PAUSE_PKT_TIME[PTIME] and
3499 * CGX()_GMP_GMI_TX()_PAUSE_PKT_INTERVAL[INTERVAL] can be challenging to
3500 * the system designer. It is suggested that TIME be much greater than
3501 * INTERVAL and CGX()_GMP_GMI_TX()_PAUSE_ZERO[SEND] be set. This allows
3502 * a periodic refresh of the PAUSE count and then when the backpressure
3503 * condition is lifted, a PAUSE packet with TIME==0 will be sent
3504 * indicating that Octane is ready for additional data. If the system
3505 * chooses to not set CGX()_GMP_GMI_TX()_PAUSE_ZERO[SEND], then it is
3506 * suggested that TIME and INTERVAL are programmed such that they
3507 * satisify the following rule: _ INTERVAL \<= TIME - (largest_pkt_size
3508 * + IFG + pause_pkt_size) where largest_pkt_size is that largest packet
3509 * that the system can send (normally 1518B), IFG is the interframe gap
3510 * and pause_pkt_size is the size of the PAUSE packet (normally 64B).
3512 union cgxx_gmp_gmi_txx_pause_pkt_interval {
3514 struct cgxx_gmp_gmi_txx_pause_pkt_interval_s {
3516 u64 reserved_16_63 : 48;
3518 /* struct cgxx_gmp_gmi_txx_pause_pkt_interval_s cn; */
3521 static inline u64 CGXX_GMP_GMI_TXX_PAUSE_PKT_INTERVAL(u64 a)
3522 __attribute__ ((pure, always_inline));
3523 static inline u64 CGXX_GMP_GMI_TXX_PAUSE_PKT_INTERVAL(u64 a)
3525 return 0x38248 + 0x40000 * a;
3529 * Register (RSL) cgx#_gmp_gmi_tx#_pause_pkt_time
3531 * CGX GMI TX PAUSE Packet PAUSE-Time Registers
3533 union cgxx_gmp_gmi_txx_pause_pkt_time {
3535 struct cgxx_gmp_gmi_txx_pause_pkt_time_s {
3537 u64 reserved_16_63 : 48;
3539 /* struct cgxx_gmp_gmi_txx_pause_pkt_time_s cn; */
3542 static inline u64 CGXX_GMP_GMI_TXX_PAUSE_PKT_TIME(u64 a)
3543 __attribute__ ((pure, always_inline));
3544 static inline u64 CGXX_GMP_GMI_TXX_PAUSE_PKT_TIME(u64 a)
3546 return 0x38238 + 0x40000 * a;
3550 * Register (RSL) cgx#_gmp_gmi_tx#_pause_togo
3552 * CGX GMI TX Time-to-Backpressure Registers
3554 union cgxx_gmp_gmi_txx_pause_togo {
3556 struct cgxx_gmp_gmi_txx_pause_togo_s {
3558 u64 reserved_16_63 : 48;
3560 /* struct cgxx_gmp_gmi_txx_pause_togo_s cn; */
3563 static inline u64 CGXX_GMP_GMI_TXX_PAUSE_TOGO(u64 a)
3564 __attribute__ ((pure, always_inline));
3565 static inline u64 CGXX_GMP_GMI_TXX_PAUSE_TOGO(u64 a)
3567 return 0x38258 + 0x40000 * a;
3571 * Register (RSL) cgx#_gmp_gmi_tx#_pause_zero
3573 * CGX GMI TX PAUSE-Zero-Enable Registers
3575 union cgxx_gmp_gmi_txx_pause_zero {
3577 struct cgxx_gmp_gmi_txx_pause_zero_s {
3579 u64 reserved_1_63 : 63;
3581 /* struct cgxx_gmp_gmi_txx_pause_zero_s cn; */
3584 static inline u64 CGXX_GMP_GMI_TXX_PAUSE_ZERO(u64 a)
3585 __attribute__ ((pure, always_inline));
3586 static inline u64 CGXX_GMP_GMI_TXX_PAUSE_ZERO(u64 a)
3588 return 0x38260 + 0x40000 * a;
3592 * Register (RSL) cgx#_gmp_gmi_tx#_sgmii_ctl
3594 * CGX SGMII Control Registers
3596 union cgxx_gmp_gmi_txx_sgmii_ctl {
3598 struct cgxx_gmp_gmi_txx_sgmii_ctl_s {
3600 u64 reserved_1_63 : 63;
3602 /* struct cgxx_gmp_gmi_txx_sgmii_ctl_s cn; */
3605 static inline u64 CGXX_GMP_GMI_TXX_SGMII_CTL(u64 a)
3606 __attribute__ ((pure, always_inline));
3607 static inline u64 CGXX_GMP_GMI_TXX_SGMII_CTL(u64 a)
3609 return 0x38300 + 0x40000 * a;
3613 * Register (RSL) cgx#_gmp_gmi_tx#_slot
3615 * CGX GMI TX Slottime Counter Registers
3617 union cgxx_gmp_gmi_txx_slot {
3619 struct cgxx_gmp_gmi_txx_slot_s {
3621 u64 reserved_10_63 : 54;
3623 /* struct cgxx_gmp_gmi_txx_slot_s cn; */
3626 static inline u64 CGXX_GMP_GMI_TXX_SLOT(u64 a)
3627 __attribute__ ((pure, always_inline));
3628 static inline u64 CGXX_GMP_GMI_TXX_SLOT(u64 a)
3630 return 0x38220 + 0x40000 * a;
3634 * Register (RSL) cgx#_gmp_gmi_tx#_soft_pause
3636 * CGX GMI TX Software PAUSE Registers
3638 union cgxx_gmp_gmi_txx_soft_pause {
3640 struct cgxx_gmp_gmi_txx_soft_pause_s {
3642 u64 reserved_16_63 : 48;
3644 /* struct cgxx_gmp_gmi_txx_soft_pause_s cn; */
3647 static inline u64 CGXX_GMP_GMI_TXX_SOFT_PAUSE(u64 a)
3648 __attribute__ ((pure, always_inline));
3649 static inline u64 CGXX_GMP_GMI_TXX_SOFT_PAUSE(u64 a)
3651 return 0x38250 + 0x40000 * a;
3655 * Register (RSL) cgx#_gmp_gmi_tx#_thresh
3657 * CGX GMI TX Threshold Registers
3659 union cgxx_gmp_gmi_txx_thresh {
3661 struct cgxx_gmp_gmi_txx_thresh_s {
3663 u64 reserved_11_63 : 53;
3665 /* struct cgxx_gmp_gmi_txx_thresh_s cn; */
3668 static inline u64 CGXX_GMP_GMI_TXX_THRESH(u64 a)
3669 __attribute__ ((pure, always_inline));
3670 static inline u64 CGXX_GMP_GMI_TXX_THRESH(u64 a)
3672 return 0x38210 + 0x40000 * a;
3676 * Register (RSL) cgx#_gmp_gmi_tx_col_attempt
3678 * CGX TX Collision Attempts Before Dropping Frame Registers
3680 union cgxx_gmp_gmi_tx_col_attempt {
3682 struct cgxx_gmp_gmi_tx_col_attempt_s {
3684 u64 reserved_5_63 : 59;
3686 /* struct cgxx_gmp_gmi_tx_col_attempt_s cn; */
3689 static inline u64 CGXX_GMP_GMI_TX_COL_ATTEMPT(void)
3690 __attribute__ ((pure, always_inline));
3691 static inline u64 CGXX_GMP_GMI_TX_COL_ATTEMPT(void)
3697 * Register (RSL) cgx#_gmp_gmi_tx_ifg
3699 * CGX GMI TX Interframe-Gap Cycles Registers Consider the following when
3700 * programming IFG1 and IFG2: * For 10/100/1000 Mb/s half-duplex systems
3701 * that require IEEE 802.3 compatibility, IFG1 must be in the range of
3702 * 1-8, [IFG2] must be in the range of 4-12, and the [IFG1] + [IFG2] sum
3703 * must be 12. * For 10/100/1000 Mb/s full-duplex systems that require
3704 * IEEE 802.3 compatibility, IFG1 must be in the range of 1-11, [IFG2]
3705 * must be in the range of 1-11, and the [IFG1] + [IFG2] sum must be 12.
3706 * For all other systems, IFG1 and IFG2 can be any value in the range of
3707 * 1-15, allowing for a total possible IFG sum of 2-30.
3709 union cgxx_gmp_gmi_tx_ifg {
3711 struct cgxx_gmp_gmi_tx_ifg_s {
3714 u64 reserved_8_63 : 56;
3716 /* struct cgxx_gmp_gmi_tx_ifg_s cn; */
3719 static inline u64 CGXX_GMP_GMI_TX_IFG(void)
3720 __attribute__ ((pure, always_inline));
3721 static inline u64 CGXX_GMP_GMI_TX_IFG(void)
3727 * Register (RSL) cgx#_gmp_gmi_tx_jam
3729 * CGX GMI TX JAM Pattern Registers This register provides the pattern
3730 * used in JAM bytes.
3732 union cgxx_gmp_gmi_tx_jam {
3734 struct cgxx_gmp_gmi_tx_jam_s {
3736 u64 reserved_8_63 : 56;
3738 /* struct cgxx_gmp_gmi_tx_jam_s cn; */
3741 static inline u64 CGXX_GMP_GMI_TX_JAM(void)
3742 __attribute__ ((pure, always_inline));
3743 static inline u64 CGXX_GMP_GMI_TX_JAM(void)
3749 * Register (RSL) cgx#_gmp_gmi_tx_lfsr
3751 * CGX GMI TX LFSR Registers This register shows the contents of the
3752 * linear feedback shift register (LFSR), which is used to implement
3753 * truncated binary exponential backoff.
3755 union cgxx_gmp_gmi_tx_lfsr {
3757 struct cgxx_gmp_gmi_tx_lfsr_s {
3759 u64 reserved_16_63 : 48;
3761 /* struct cgxx_gmp_gmi_tx_lfsr_s cn; */
3764 static inline u64 CGXX_GMP_GMI_TX_LFSR(void)
3765 __attribute__ ((pure, always_inline));
3766 static inline u64 CGXX_GMP_GMI_TX_LFSR(void)
3772 * Register (RSL) cgx#_gmp_gmi_tx_pause_pkt_dmac
3774 * CGX TX PAUSE-Packet DMAC-Field Registers
3776 union cgxx_gmp_gmi_tx_pause_pkt_dmac {
3778 struct cgxx_gmp_gmi_tx_pause_pkt_dmac_s {
3780 u64 reserved_48_63 : 16;
3782 /* struct cgxx_gmp_gmi_tx_pause_pkt_dmac_s cn; */
3785 static inline u64 CGXX_GMP_GMI_TX_PAUSE_PKT_DMAC(void)
3786 __attribute__ ((pure, always_inline));
3787 static inline u64 CGXX_GMP_GMI_TX_PAUSE_PKT_DMAC(void)
3793 * Register (RSL) cgx#_gmp_gmi_tx_pause_pkt_type
3795 * CGX GMI TX PAUSE-Packet-PTYPE Field Registers This register provides
3796 * the PTYPE field that is placed in outbound PAUSE packets.
3798 union cgxx_gmp_gmi_tx_pause_pkt_type {
3800 struct cgxx_gmp_gmi_tx_pause_pkt_type_s {
3802 u64 reserved_16_63 : 48;
3804 /* struct cgxx_gmp_gmi_tx_pause_pkt_type_s cn; */
3807 static inline u64 CGXX_GMP_GMI_TX_PAUSE_PKT_TYPE(void)
3808 __attribute__ ((pure, always_inline));
3809 static inline u64 CGXX_GMP_GMI_TX_PAUSE_PKT_TYPE(void)
3815 * Register (RSL) cgx#_gmp_misc#_cfg
3817 * CGX GMP PCS Miscellaneous Control Registers This register contains
3818 * general configuration that should not need to be changed from reset
3819 * settings. Internal: Per lmac diagnostic and chicken bits.
3821 union cgxx_gmp_miscx_cfg {
3823 struct cgxx_gmp_miscx_cfg_s {
3824 u64 tx_eee_quiet_credit_mode : 1;
3825 u64 tx_eee_wait_gmi_fast_idle : 1;
3826 u64 tx_qsgmii_port0_init : 1;
3827 u64 tx_eee_rx_sync_status_enable : 1;
3829 u64 reserved_5_7 : 3;
3830 u64 rx_pcs_sync_signal_detect : 1;
3831 u64 rx_pcs_sync_timeout : 1;
3832 u64 rx_pcs_eee_mode_enable : 1;
3833 u64 rx_pcs_lpi_enable : 1;
3834 u64 rx_pcs_802_rx_k : 1;
3835 u64 rx_pcs_alt_qlb2i : 1;
3836 u64 reserved_14_15 : 2;
3837 u64 rx_cgp_gser_throttle : 1;
3838 u64 rx_cgp_edet_filter : 1;
3839 u64 rx_cgp_edet_qlm_val : 1;
3840 u64 reserved_19_63 : 45;
3842 /* struct cgxx_gmp_miscx_cfg_s cn; */
3845 static inline u64 CGXX_GMP_MISCX_CFG(u64 a)
3846 __attribute__ ((pure, always_inline));
3847 static inline u64 CGXX_GMP_MISCX_CFG(u64 a)
3849 return 0x34000 + 0x40000 * a;
3853 * Register (RSL) cgx#_gmp_pcs#_an_expansion
3855 * CGX GMP PCS AN Expansion register Register 6 AN status
3857 union cgxx_gmp_pcsx_an_expansion {
3859 struct cgxx_gmp_pcsx_an_expansion_s {
3861 u64 page_received : 1;
3862 u64 next_page_able : 1;
3863 u64 reserved_3_63 : 61;
3865 /* struct cgxx_gmp_pcsx_an_expansion_s cn; */
3868 static inline u64 CGXX_GMP_PCSX_AN_EXPANSION(u64 a)
3869 __attribute__ ((pure, always_inline));
3870 static inline u64 CGXX_GMP_PCSX_AN_EXPANSION(u64 a)
3872 return 0x30a60 + 0x40000 * a;
3876 * Register (RSL) cgx#_gmp_pcs#_an_lp_abil_np
3878 * CGX GMP PCS AN Link Partner Ability Next Page Register 8 This register
3879 * contains the advertised ability of the link partners Next Page. The
3880 * definition for this register is provided in 32.5.4.2 for changes to
3883 union cgxx_gmp_pcsx_an_lp_abil_np {
3885 struct cgxx_gmp_pcsx_an_lp_abil_np_s {
3892 u64 reserved_16_63 : 48;
3894 /* struct cgxx_gmp_pcsx_an_lp_abil_np_s cn; */
3897 static inline u64 CGXX_GMP_PCSX_AN_LP_ABIL_NP(u64 a)
3898 __attribute__ ((pure, always_inline));
3899 static inline u64 CGXX_GMP_PCSX_AN_LP_ABIL_NP(u64 a)
3901 return 0x30a80 + 0x40000 * a;
3905 * Register (RSL) cgx#_gmp_pcs#_an_np_tx
3907 * CGX GMP PCS AN Next Page Transmit Register 7 Software programs this
3908 * register with the contents of the AN message next page or unformatted
3909 * next page link code word to be transmitted during autonegotiation.
3910 * Next page exchange occurs after the base link code words have been
3911 * exchanged if either end of the link segment sets the NP bit to 1,
3912 * indicating that it has at least one next page to send. Once initiated,
3913 * next page exchange continues until both ends of the link segment set
3914 * their NP bits to 0. Both sides must be NP capable to use NP exchanges.
3916 union cgxx_gmp_pcsx_an_np_tx {
3918 struct cgxx_gmp_pcsx_an_np_tx_s {
3925 u64 reserved_16_63 : 48;
3927 /* struct cgxx_gmp_pcsx_an_np_tx_s cn; */
3930 static inline u64 CGXX_GMP_PCSX_AN_NP_TX(u64 a)
3931 __attribute__ ((pure, always_inline));
3932 static inline u64 CGXX_GMP_PCSX_AN_NP_TX(u64 a)
3934 return 0x30a70 + 0x40000 * a;
3938 * Register (RSL) cgx#_gmp_pcs#_dbg_control
3940 * CGX PCS Debug Control Registers
3942 union cgxx_gmp_pcsx_dbg_control {
3944 struct cgxx_gmp_pcsx_dbg_control_s {
3945 u64 us_clk_period : 7;
3946 u64 reserved_7_63 : 57;
3948 /* struct cgxx_gmp_pcsx_dbg_control_s cn; */
3951 static inline u64 CGXX_GMP_PCSX_DBG_CONTROL(u64 a)
3952 __attribute__ ((pure, always_inline));
3953 static inline u64 CGXX_GMP_PCSX_DBG_CONTROL(u64 a)
3955 return 0x31000 + 0x40000 * a;
3959 * Register (RSL) cgx#_gmp_pcs#_rx_eee_wake
3961 * INTERNAL: CGX GMP PCS RX EEE Wake Error Counter Registers Reserved.
3962 * Internal: This register is used by PHY types that support EEE to count
3963 * wake time faults where the PHY fails to complete its normal wake
3964 * sequence within the time required for the specific PHY type. The
3965 * definition of the fault event to be counted is defined for each PHY
3966 * and may occur during a refresh or a wake-up as defined by the PHY.
3967 * This 16-bit counter shall be reset to all zeros upon execution of the
3968 * PCS reset. This counter shall be held at all ones in the case of
3971 union cgxx_gmp_pcsx_rx_eee_wake {
3973 struct cgxx_gmp_pcsx_rx_eee_wake_s {
3974 u64 error_counter : 16;
3975 u64 reserved_16_63 : 48;
3977 /* struct cgxx_gmp_pcsx_rx_eee_wake_s cn; */
3980 static inline u64 CGXX_GMP_PCSX_RX_EEE_WAKE(u64 a)
3981 __attribute__ ((pure, always_inline));
3982 static inline u64 CGXX_GMP_PCSX_RX_EEE_WAKE(u64 a)
3984 return 0x30910 + 0x40000 * a;
3988 * Register (RSL) cgx#_gmp_pcs#_rx_lpi_timing
3990 * INTERNAL: CGX GMP PCS RX EEE LPI Timing Parameters Registers
3991 * Reserved. Internal: Receiver LPI timing parameters Tqr, Twr and Twtf.
3993 union cgxx_gmp_pcsx_rx_lpi_timing {
3995 struct cgxx_gmp_pcsx_rx_lpi_timing_s {
3997 u64 reserved_18_19 : 2;
4000 u64 reserved_52_63 : 12;
4002 /* struct cgxx_gmp_pcsx_rx_lpi_timing_s cn; */
4005 static inline u64 CGXX_GMP_PCSX_RX_LPI_TIMING(u64 a)
4006 __attribute__ ((pure, always_inline));
4007 static inline u64 CGXX_GMP_PCSX_RX_LPI_TIMING(u64 a)
4009 return 0x30900 + 0x40000 * a;
4013 * Register (RSL) cgx#_gmp_pcs#_status1
4015 * CGX GMP PCS Status 1 Register PCS LPI Status, Link OK. Register 3.1
4017 union cgxx_gmp_pcsx_status1 {
4019 struct cgxx_gmp_pcsx_status1_s {
4020 u64 reserved_0_1 : 2;
4021 u64 receive_link_status : 1;
4022 u64 reserved_3_7 : 5;
4023 u64 rx_lpi_indication : 1;
4024 u64 tx_lpi_indication : 1;
4025 u64 rx_lpi_received : 1;
4026 u64 tx_lpi_received : 1;
4027 u64 reserved_12_63 : 52;
4029 /* struct cgxx_gmp_pcsx_status1_s cn; */
4032 static inline u64 CGXX_GMP_PCSX_STATUS1(u64 a)
4033 __attribute__ ((pure, always_inline));
4034 static inline u64 CGXX_GMP_PCSX_STATUS1(u64 a)
4036 return 0x30880 + 0x40000 * a;
4040 * Register (RSL) cgx#_gmp_pcs#_tx_lpi_timing
4042 * INTERNAL: CGX GMP GMI TX EEE LPI Timing Parameters Registers
4043 * Reserved. Internal: Transmitter LPI timing parameters Tsl, Tql and
4046 union cgxx_gmp_pcsx_tx_lpi_timing {
4048 struct cgxx_gmp_pcsx_tx_lpi_timing_s {
4050 u64 reserved_19_31 : 13;
4052 u64 reserved_44_47 : 4;
4054 u64 reserved_60_63 : 4;
4056 /* struct cgxx_gmp_pcsx_tx_lpi_timing_s cn; */
4059 static inline u64 CGXX_GMP_PCSX_TX_LPI_TIMING(u64 a)
4060 __attribute__ ((pure, always_inline));
4061 static inline u64 CGXX_GMP_PCSX_TX_LPI_TIMING(u64 a)
4063 return 0x30800 + 0x40000 * a;
4067 * Register (RSL) cgx#_gmp_pcs_an#_adv
4069 * CGX GMP PCS Autonegotiation Advertisement Registers
4071 union cgxx_gmp_pcs_anx_adv {
4073 struct cgxx_gmp_pcs_anx_adv_s {
4074 u64 reserved_0_4 : 5;
4078 u64 reserved_9_11 : 3;
4080 u64 reserved_14 : 1;
4082 u64 reserved_16_63 : 48;
4084 /* struct cgxx_gmp_pcs_anx_adv_s cn; */
4087 static inline u64 CGXX_GMP_PCS_ANX_ADV(u64 a)
4088 __attribute__ ((pure, always_inline));
4089 static inline u64 CGXX_GMP_PCS_ANX_ADV(u64 a)
4091 return 0x30010 + 0x40000 * a;
4095 * Register (RSL) cgx#_gmp_pcs_an#_ext_st
4097 * CGX GMO PCS Autonegotiation Extended Status Registers
4099 union cgxx_gmp_pcs_anx_ext_st {
4101 struct cgxx_gmp_pcs_anx_ext_st_s {
4102 u64 reserved_0_11 : 12;
4107 u64 reserved_16_63 : 48;
4109 /* struct cgxx_gmp_pcs_anx_ext_st_s cn; */
4112 static inline u64 CGXX_GMP_PCS_ANX_EXT_ST(u64 a)
4113 __attribute__ ((pure, always_inline));
4114 static inline u64 CGXX_GMP_PCS_ANX_EXT_ST(u64 a)
4116 return 0x30028 + 0x40000 * a;
4120 * Register (RSL) cgx#_gmp_pcs_an#_lp_abil
4122 * CGX GMP PCS Autonegotiation Link Partner Ability Registers This is the
4123 * autonegotiation link partner ability register 5 as per IEEE 802.3,
4126 union cgxx_gmp_pcs_anx_lp_abil {
4128 struct cgxx_gmp_pcs_anx_lp_abil_s {
4129 u64 reserved_0_4 : 5;
4133 u64 reserved_9_11 : 3;
4137 u64 reserved_16_63 : 48;
4139 /* struct cgxx_gmp_pcs_anx_lp_abil_s cn; */
4142 static inline u64 CGXX_GMP_PCS_ANX_LP_ABIL(u64 a)
4143 __attribute__ ((pure, always_inline));
4144 static inline u64 CGXX_GMP_PCS_ANX_LP_ABIL(u64 a)
4146 return 0x30018 + 0x40000 * a;
4150 * Register (RSL) cgx#_gmp_pcs_an#_results
4152 * CGX GMP PCS Autonegotiation Results Registers This register is not
4153 * valid when CGX()_GMP_PCS_MISC()_CTL[AN_OVRD] is set to 1. If
4154 * CGX()_GMP_PCS_MISC()_CTL[AN_OVRD] is set to 0 and
4155 * CGX()_GMP_PCS_AN()_RESULTS[AN_CPT] is set to 1, this register is
4158 union cgxx_gmp_pcs_anx_results {
4160 struct cgxx_gmp_pcs_anx_results_s {
4166 u64 reserved_7_63 : 57;
4168 /* struct cgxx_gmp_pcs_anx_results_s cn; */
4171 static inline u64 CGXX_GMP_PCS_ANX_RESULTS(u64 a)
4172 __attribute__ ((pure, always_inline));
4173 static inline u64 CGXX_GMP_PCS_ANX_RESULTS(u64 a)
4175 return 0x30020 + 0x40000 * a;
4179 * Register (RSL) cgx#_gmp_pcs_int#
4181 * CGX GMP PCS Interrupt Registers
4183 union cgxx_gmp_pcs_intx {
4185 struct cgxx_gmp_pcs_intx_s {
4199 u64 reserved_13_15 : 3;
4200 u64 an_page_received : 1;
4201 u64 an_complete : 1;
4202 u64 reserved_18_19 : 2;
4203 u64 eee_tx_change : 1;
4204 u64 eee_rx_change : 1;
4205 u64 eee_rx_link_fail : 1;
4206 u64 reserved_23_63 : 41;
4208 /* struct cgxx_gmp_pcs_intx_s cn; */
4211 static inline u64 CGXX_GMP_PCS_INTX(u64 a)
4212 __attribute__ ((pure, always_inline));
4213 static inline u64 CGXX_GMP_PCS_INTX(u64 a)
4215 return 0x30080 + 0x40000 * a;
4219 * Register (RSL) cgx#_gmp_pcs_int#_ena_w1c
4221 * CGX GMP PCS Interrupt Enable Clear Registers This register clears
4222 * interrupt enable bits.
4224 union cgxx_gmp_pcs_intx_ena_w1c {
4226 struct cgxx_gmp_pcs_intx_ena_w1c_s {
4240 u64 reserved_13_15 : 3;
4241 u64 an_page_received : 1;
4242 u64 an_complete : 1;
4243 u64 reserved_18_19 : 2;
4244 u64 eee_tx_change : 1;
4245 u64 eee_rx_change : 1;
4246 u64 eee_rx_link_fail : 1;
4247 u64 reserved_23_63 : 41;
4249 /* struct cgxx_gmp_pcs_intx_ena_w1c_s cn; */
4252 static inline u64 CGXX_GMP_PCS_INTX_ENA_W1C(u64 a)
4253 __attribute__ ((pure, always_inline));
4254 static inline u64 CGXX_GMP_PCS_INTX_ENA_W1C(u64 a)
4256 return 0x30090 + 0x40000 * a;
4260 * Register (RSL) cgx#_gmp_pcs_int#_ena_w1s
4262 * CGX GMP PCS Interrupt Enable Set Registers This register sets
4263 * interrupt enable bits.
4265 union cgxx_gmp_pcs_intx_ena_w1s {
4267 struct cgxx_gmp_pcs_intx_ena_w1s_s {
4281 u64 reserved_13_15 : 3;
4282 u64 an_page_received : 1;
4283 u64 an_complete : 1;
4284 u64 reserved_18_19 : 2;
4285 u64 eee_tx_change : 1;
4286 u64 eee_rx_change : 1;
4287 u64 eee_rx_link_fail : 1;
4288 u64 reserved_23_63 : 41;
4290 /* struct cgxx_gmp_pcs_intx_ena_w1s_s cn; */
4293 static inline u64 CGXX_GMP_PCS_INTX_ENA_W1S(u64 a)
4294 __attribute__ ((pure, always_inline));
4295 static inline u64 CGXX_GMP_PCS_INTX_ENA_W1S(u64 a)
4297 return 0x30098 + 0x40000 * a;
4301 * Register (RSL) cgx#_gmp_pcs_int#_w1s
4303 * CGX GMP PCS Interrupt Set Registers This register sets interrupt bits.
4305 union cgxx_gmp_pcs_intx_w1s {
4307 struct cgxx_gmp_pcs_intx_w1s_s {
4321 u64 reserved_13_15 : 3;
4322 u64 an_page_received : 1;
4323 u64 an_complete : 1;
4324 u64 reserved_18_19 : 2;
4325 u64 eee_tx_change : 1;
4326 u64 eee_rx_change : 1;
4327 u64 eee_rx_link_fail : 1;
4328 u64 reserved_23_63 : 41;
4330 /* struct cgxx_gmp_pcs_intx_w1s_s cn; */
4333 static inline u64 CGXX_GMP_PCS_INTX_W1S(u64 a)
4334 __attribute__ ((pure, always_inline));
4335 static inline u64 CGXX_GMP_PCS_INTX_W1S(u64 a)
4337 return 0x30088 + 0x40000 * a;
4341 * Register (RSL) cgx#_gmp_pcs_link#_timer
4343 * CGX GMP PCS Link Timer Registers This is the 1.6 ms nominal link timer
4346 union cgxx_gmp_pcs_linkx_timer {
4348 struct cgxx_gmp_pcs_linkx_timer_s {
4350 u64 reserved_16_63 : 48;
4352 /* struct cgxx_gmp_pcs_linkx_timer_s cn; */
4355 static inline u64 CGXX_GMP_PCS_LINKX_TIMER(u64 a)
4356 __attribute__ ((pure, always_inline));
4357 static inline u64 CGXX_GMP_PCS_LINKX_TIMER(u64 a)
4359 return 0x30040 + 0x40000 * a;
4363 * Register (RSL) cgx#_gmp_pcs_misc#_ctl
4365 * CGX GMP SGMII Miscellaneous Control Registers Internal: SGMII bit [12]
4366 * is really a misnomer, it is a decode of pi_qlm_cfg pins to indicate
4367 * SGMII or 1000Base-X modes. Note: The SGMII AN Advertisement Register
4368 * above will be sent during Auto Negotiation if [MAC_PHY] is set (1=PHY
4369 * mode). If the bit is not set (0=MAC mode), the tx_Config_Reg\<14\>
4370 * becomes ACK bit and tx_Config_Reg\<0\> is always 1. All other bits in
4371 * tx_Config_Reg sent will be 0. The PHY dictates the Auto Negotiation
4374 union cgxx_gmp_pcs_miscx_ctl {
4376 struct cgxx_gmp_pcs_miscx_ctl_s {
4383 u64 reserved_12 : 1;
4385 u64 reserved_14_15 : 2;
4386 u64 qsgmii_comma_wd : 16;
4387 u64 qsgmii_comma_wd_en : 1;
4388 u64 reserved_33_63 : 31;
4390 struct cgxx_gmp_pcs_miscx_ctl_cn {
4397 u64 reserved_12 : 1;
4399 u64 reserved_14_15 : 2;
4400 u64 qsgmii_comma_wd : 16;
4401 u64 qsgmii_comma_wd_en : 1;
4402 u64 reserved_33_35 : 3;
4403 u64 reserved_36_63 : 28;
4407 static inline u64 CGXX_GMP_PCS_MISCX_CTL(u64 a)
4408 __attribute__ ((pure, always_inline));
4409 static inline u64 CGXX_GMP_PCS_MISCX_CTL(u64 a)
4411 return 0x30078 + 0x40000 * a;
4415 * Register (RSL) cgx#_gmp_pcs_mr#_control
4417 * CGX GMP PCS Control Registers
4419 union cgxx_gmp_pcs_mrx_control {
4421 struct cgxx_gmp_pcs_mrx_control_s {
4422 u64 reserved_0_4 : 5;
4428 u64 reserved_10 : 1;
4434 u64 reserved_16_63 : 48;
4436 /* struct cgxx_gmp_pcs_mrx_control_s cn; */
4439 static inline u64 CGXX_GMP_PCS_MRX_CONTROL(u64 a)
4440 __attribute__ ((pure, always_inline));
4441 static inline u64 CGXX_GMP_PCS_MRX_CONTROL(u64 a)
4443 return 0x30000 + 0x40000 * a;
4447 * Register (RSL) cgx#_gmp_pcs_mr#_status
4449 * CGX GMP PCS Status Registers Bits \<15:9\> in this register indicate
4450 * the ability to operate when CGX()_GMP_PCS_MISC()_CTL[MAC_PHY] is set
4451 * to MAC mode. Bits \<15:9\> are always read as 0, indicating that the
4452 * chip cannot operate in the corresponding modes. The field [RM_FLT] is
4453 * a 'don't care' when the selected mode is SGMII/QSGMII.
4455 union cgxx_gmp_pcs_mrx_status {
4457 struct cgxx_gmp_pcs_mrx_status_s {
4474 u64 reserved_16_63 : 48;
4476 /* struct cgxx_gmp_pcs_mrx_status_s cn; */
4479 static inline u64 CGXX_GMP_PCS_MRX_STATUS(u64 a)
4480 __attribute__ ((pure, always_inline));
4481 static inline u64 CGXX_GMP_PCS_MRX_STATUS(u64 a)
4483 return 0x30008 + 0x40000 * a;
4487 * Register (RSL) cgx#_gmp_pcs_rx#_states
4489 * CGX GMP PCS RX State-Machines States Registers
4491 union cgxx_gmp_pcs_rxx_states {
4493 struct cgxx_gmp_pcs_rxx_states_s {
4500 u64 reserved_16_63 : 48;
4502 /* struct cgxx_gmp_pcs_rxx_states_s cn; */
4505 static inline u64 CGXX_GMP_PCS_RXX_STATES(u64 a)
4506 __attribute__ ((pure, always_inline));
4507 static inline u64 CGXX_GMP_PCS_RXX_STATES(u64 a)
4509 return 0x30058 + 0x40000 * a;
4513 * Register (RSL) cgx#_gmp_pcs_rx#_sync
4515 * CGX GMP PCS Code Group Synchronization Registers
4517 union cgxx_gmp_pcs_rxx_sync {
4519 struct cgxx_gmp_pcs_rxx_sync_s {
4522 u64 reserved_2_63 : 62;
4524 /* struct cgxx_gmp_pcs_rxx_sync_s cn; */
4527 static inline u64 CGXX_GMP_PCS_RXX_SYNC(u64 a)
4528 __attribute__ ((pure, always_inline));
4529 static inline u64 CGXX_GMP_PCS_RXX_SYNC(u64 a)
4531 return 0x30050 + 0x40000 * a;
4535 * Register (RSL) cgx#_gmp_pcs_sgm#_an_adv
4537 * CGX GMP PCS SGMII Autonegotiation Advertisement Registers This is the
4538 * SGMII autonegotiation advertisement register (sent out as
4539 * tx_Config_Reg\<15:0\> as defined in IEEE 802.3 clause 37). This
4540 * register is sent during autonegotiation if
4541 * CGX()_GMP_PCS_MISC()_CTL[MAC_PHY] is set (1 = PHY mode). If the bit is
4542 * not set (0 = MAC mode), then tx_Config_Reg\<14\> becomes ACK bit and
4543 * tx_Config_Reg\<0\> is always 1. All other bits in tx_Config_Reg sent
4544 * will be 0. The PHY dictates the autonegotiation results.
4546 union cgxx_gmp_pcs_sgmx_an_adv {
4548 struct cgxx_gmp_pcs_sgmx_an_adv_s {
4550 u64 reserved_1_9 : 9;
4553 u64 reserved_13 : 1;
4556 u64 reserved_16_63 : 48;
4558 /* struct cgxx_gmp_pcs_sgmx_an_adv_s cn; */
4561 static inline u64 CGXX_GMP_PCS_SGMX_AN_ADV(u64 a)
4562 __attribute__ ((pure, always_inline));
4563 static inline u64 CGXX_GMP_PCS_SGMX_AN_ADV(u64 a)
4565 return 0x30068 + 0x40000 * a;
4569 * Register (RSL) cgx#_gmp_pcs_sgm#_lp_adv
4571 * CGX GMP PCS SGMII Link-Partner-Advertisement Registers This is the
4572 * SGMII link partner advertisement register (received as
4573 * rx_Config_Reg\<15:0\> as defined in IEEE 802.3 clause 37).
4575 union cgxx_gmp_pcs_sgmx_lp_adv {
4577 struct cgxx_gmp_pcs_sgmx_lp_adv_s {
4579 u64 reserved_1_9 : 9;
4582 u64 reserved_13_14 : 2;
4584 u64 reserved_16_63 : 48;
4586 struct cgxx_gmp_pcs_sgmx_lp_adv_cn {
4588 u64 reserved_1_9 : 9;
4591 u64 reserved_13 : 1;
4592 u64 reserved_14 : 1;
4594 u64 reserved_16_63 : 48;
4598 static inline u64 CGXX_GMP_PCS_SGMX_LP_ADV(u64 a)
4599 __attribute__ ((pure, always_inline));
4600 static inline u64 CGXX_GMP_PCS_SGMX_LP_ADV(u64 a)
4602 return 0x30070 + 0x40000 * a;
4606 * Register (RSL) cgx#_gmp_pcs_tx#_states
4608 * CGX GMP PCS TX State-Machines States Registers
4610 union cgxx_gmp_pcs_txx_states {
4612 struct cgxx_gmp_pcs_txx_states_s {
4616 u64 reserved_7_63 : 57;
4618 /* struct cgxx_gmp_pcs_txx_states_s cn; */
4621 static inline u64 CGXX_GMP_PCS_TXX_STATES(u64 a)
4622 __attribute__ ((pure, always_inline));
4623 static inline u64 CGXX_GMP_PCS_TXX_STATES(u64 a)
4625 return 0x30060 + 0x40000 * a;
4629 * Register (RSL) cgx#_gmp_pcs_tx_rx#_polarity
4631 * CGX GMP PCS TX/RX Polarity Registers
4632 * CGX()_GMP_PCS_TX_RX()_POLARITY[AUTORXPL] shows correct polarity needed
4633 * on the link receive path after code group synchronization is achieved.
4634 * When LMAC_TYPE=QSGMII, only lane 0 polarity data and settings are
4635 * relevant and settings for lanes 1, 2 and 3 are unused.
4637 union cgxx_gmp_pcs_tx_rxx_polarity {
4639 struct cgxx_gmp_pcs_tx_rxx_polarity_s {
4644 u64 reserved_4_63 : 60;
4646 /* struct cgxx_gmp_pcs_tx_rxx_polarity_s cn; */
4649 static inline u64 CGXX_GMP_PCS_TX_RXX_POLARITY(u64 a)
4650 __attribute__ ((pure, always_inline));
4651 static inline u64 CGXX_GMP_PCS_TX_RXX_POLARITY(u64 a)
4653 return 0x30048 + 0x40000 * a;
4657 * Register (RSL) cgx#_msix_pba#
4659 * CGX MSI-X Pending Bit Array Registers This register is the MSI-X PBA
4660 * table, the bit number is indexed by the CGX_INT_VEC_E enumeration.
4662 union cgxx_msix_pbax {
4664 struct cgxx_msix_pbax_s {
4667 /* struct cgxx_msix_pbax_s cn; */
4670 static inline u64 CGXX_MSIX_PBAX(u64 a)
4671 __attribute__ ((pure, always_inline));
4672 static inline u64 CGXX_MSIX_PBAX(u64 a)
4674 return 0xf0000 + 8 * a;
4678 * Register (RSL) cgx#_msix_vec#_addr
4680 * CGX MSI-X Vector Table Address Registers This register is the MSI-X
4681 * vector table, indexed by the CGX_INT_VEC_E enumeration.
4683 union cgxx_msix_vecx_addr {
4685 struct cgxx_msix_vecx_addr_s {
4689 u64 reserved_53_63 : 11;
4691 /* struct cgxx_msix_vecx_addr_s cn; */
4694 static inline u64 CGXX_MSIX_VECX_ADDR(u64 a)
4695 __attribute__ ((pure, always_inline));
4696 static inline u64 CGXX_MSIX_VECX_ADDR(u64 a)
4698 return 0 + 0x10 * a;
4702 * Register (RSL) cgx#_msix_vec#_ctl
4704 * CGX MSI-X Vector Table Control and Data Registers This register is the
4705 * MSI-X vector table, indexed by the CGX_INT_VEC_E enumeration.
4707 union cgxx_msix_vecx_ctl {
4709 struct cgxx_msix_vecx_ctl_s {
4712 u64 reserved_33_63 : 31;
4714 /* struct cgxx_msix_vecx_ctl_s cn; */
4717 static inline u64 CGXX_MSIX_VECX_CTL(u64 a)
4718 __attribute__ ((pure, always_inline));
4719 static inline u64 CGXX_MSIX_VECX_CTL(u64 a)
4721 return 8 + 0x10 * a;
4725 * Register (RSL) cgx#_smu#_bp_test
4727 * INTERNAL: CGX SMU TX Backpressure Test Registers
4729 union cgxx_smux_bp_test {
4731 struct cgxx_smux_bp_test_s {
4733 u64 reserved_12_15 : 4;
4735 u64 reserved_24_47 : 24;
4737 u64 reserved_52_63 : 12;
4739 /* struct cgxx_smux_bp_test_s cn; */
4742 static inline u64 CGXX_SMUX_BP_TEST(u64 a)
4743 __attribute__ ((pure, always_inline));
4744 static inline u64 CGXX_SMUX_BP_TEST(u64 a)
4746 return 0x20230 + 0x40000 * a;
4750 * Register (RSL) cgx#_smu#_cbfc_ctl
4752 * CGX SMU PFC Control Registers Internal: INTERNAL: XOFF for a specific
4753 * class/channel \<i\> is XOFF\<i\> = ([PHYS_EN]\<i\> & cmr_rx_phys_bp) |
4754 * ([LOGL_EN]\<i\> & cmr_rx_logl_xoff\<i\>).
4756 union cgxx_smux_cbfc_ctl {
4758 struct cgxx_smux_cbfc_ctl_s {
4763 u64 reserved_4_31 : 28;
4767 /* struct cgxx_smux_cbfc_ctl_s cn; */
4770 static inline u64 CGXX_SMUX_CBFC_CTL(u64 a)
4771 __attribute__ ((pure, always_inline));
4772 static inline u64 CGXX_SMUX_CBFC_CTL(u64 a)
4774 return 0x20218 + 0x40000 * a;
4778 * Register (RSL) cgx#_smu#_ctrl
4780 * CGX SMU Control Registers
4782 union cgxx_smux_ctrl {
4784 struct cgxx_smux_ctrl_s {
4787 u64 reserved_2_63 : 62;
4789 /* struct cgxx_smux_ctrl_s cn; */
4792 static inline u64 CGXX_SMUX_CTRL(u64 a)
4793 __attribute__ ((pure, always_inline));
4794 static inline u64 CGXX_SMUX_CTRL(u64 a)
4796 return 0x20200 + 0x40000 * a;
4800 * Register (RSL) cgx#_smu#_ext_loopback
4802 * CGX SMU External Loopback Registers In loopback mode, the IFG1+IFG2 of
4803 * local and remote parties must match exactly; otherwise loopback FIFO
4804 * will overrun: CGX()_SMU()_TX_INT[LB_OVRFLW].
4806 union cgxx_smux_ext_loopback {
4808 struct cgxx_smux_ext_loopback_s {
4810 u64 reserved_6_7 : 2;
4812 u64 reserved_14_15 : 2;
4814 u64 reserved_17_63 : 47;
4816 /* struct cgxx_smux_ext_loopback_s cn; */
4819 static inline u64 CGXX_SMUX_EXT_LOOPBACK(u64 a)
4820 __attribute__ ((pure, always_inline));
4821 static inline u64 CGXX_SMUX_EXT_LOOPBACK(u64 a)
4823 return 0x20208 + 0x40000 * a;
4827 * Register (RSL) cgx#_smu#_hg2_control
4829 * CGX SMU HiGig2 Control Registers HiGig2 TX- and RX-enable are normally
4830 * set together for HiGig2 messaging. Setting just the TX or RX bit
4831 * results in only the HG2 message transmit or receive capability.
4832 * Setting [PHYS_EN] and [LOGL_EN] to 1 allows link PAUSE or backpressure
4833 * to NIX as per the received HiGig2 message. Setting these fields to 0
4834 * disables link PAUSE and backpressure to NIX in response to received
4835 * messages. CGX()_SMU()_TX_CTL[HG_EN] must be set (to enable HiGig)
4836 * whenever either [HG2TX_EN] or [HG2RX_EN] are set.
4837 * CGX()_SMU()_RX_UDD_SKP[LEN] must be set to 16 (to select HiGig2)
4838 * whenever either [HG2TX_EN] or [HG2RX_EN] are set.
4839 * CGX()_CMR_RX_OVR_BP[EN]\<0\> must be set and
4840 * CGX()_CMR_RX_OVR_BP[BP]\<0\> must be cleared to 0 (to forcibly disable
4841 * hardware-automatic 802.3 PAUSE packet generation) with the HiGig2
4842 * Protocol when [HG2TX_EN] = 0. (The HiGig2 protocol is indicated by
4843 * CGX()_SMU()_TX_CTL[HG_EN] = 1 and CGX()_SMU()_RX_UDD_SKP[LEN]=16.)
4844 * Hardware can only autogenerate backpressure via HiGig2 messages
4845 * (optionally, when [HG2TX_EN] = 1) with the HiGig2 protocol.
4847 union cgxx_smux_hg2_control {
4849 struct cgxx_smux_hg2_control_s {
4854 u64 reserved_19_63 : 45;
4856 /* struct cgxx_smux_hg2_control_s cn; */
4859 static inline u64 CGXX_SMUX_HG2_CONTROL(u64 a)
4860 __attribute__ ((pure, always_inline));
4861 static inline u64 CGXX_SMUX_HG2_CONTROL(u64 a)
4863 return 0x20210 + 0x40000 * a;
4867 * Register (RSL) cgx#_smu#_mmsi_ctl_sta
4869 * CGX SMU MAC Merge Service Interface (MMSI) Control/Status Registers
4870 * MMSI control and status registers for frame preemption mode. Refer to
4871 * IEEE 802.3br, Clause 99.
4873 union cgxx_smux_mmsi_ctl_sta {
4875 struct cgxx_smux_mmsi_ctl_sta_s {
4881 u64 reserved_8_31 : 24;
4883 u64 reserved_56_63 : 8;
4885 /* struct cgxx_smux_mmsi_ctl_sta_s cn; */
4888 static inline u64 CGXX_SMUX_MMSI_CTL_STA(u64 a)
4889 __attribute__ ((pure, always_inline));
4890 static inline u64 CGXX_SMUX_MMSI_CTL_STA(u64 a)
4892 return 0x20220 + 0x40000 * a;
4896 * Register (RSL) cgx#_smu#_rx_bad_col_ctrl
4898 * CGX SMU RX Bad Column High Registers
4900 union cgxx_smux_rx_bad_col_ctrl {
4902 struct cgxx_smux_rx_bad_col_ctrl_s {
4906 u64 reserved_20_63 : 44;
4908 /* struct cgxx_smux_rx_bad_col_ctrl_s cn; */
4911 static inline u64 CGXX_SMUX_RX_BAD_COL_CTRL(u64 a)
4912 __attribute__ ((pure, always_inline));
4913 static inline u64 CGXX_SMUX_RX_BAD_COL_CTRL(u64 a)
4915 return 0x20060 + 0x40000 * a;
4919 * Register (RSL) cgx#_smu#_rx_bad_col_data_hi
4921 * CGX SMU RX Bad Column Low Registers
4923 union cgxx_smux_rx_bad_col_data_hi {
4925 struct cgxx_smux_rx_bad_col_data_hi_s {
4928 /* struct cgxx_smux_rx_bad_col_data_hi_s cn; */
4931 static inline u64 CGXX_SMUX_RX_BAD_COL_DATA_HI(u64 a)
4932 __attribute__ ((pure, always_inline));
4933 static inline u64 CGXX_SMUX_RX_BAD_COL_DATA_HI(u64 a)
4935 return 0x20058 + 0x40000 * a;
4939 * Register (RSL) cgx#_smu#_rx_bad_col_data_lo
4941 * CGX SMU RX Bad Column Low Registers
4943 union cgxx_smux_rx_bad_col_data_lo {
4945 struct cgxx_smux_rx_bad_col_data_lo_s {
4948 /* struct cgxx_smux_rx_bad_col_data_lo_s cn; */
4951 static inline u64 CGXX_SMUX_RX_BAD_COL_DATA_LO(u64 a)
4952 __attribute__ ((pure, always_inline));
4953 static inline u64 CGXX_SMUX_RX_BAD_COL_DATA_LO(u64 a)
4955 return 0x20050 + 0x40000 * a;
4959 * Register (RSL) cgx#_smu#_rx_ctl
4961 * CGX SMU RX Control Registers
4963 union cgxx_smux_rx_ctl {
4965 struct cgxx_smux_rx_ctl_s {
4967 u64 reserved_2_63 : 62;
4969 /* struct cgxx_smux_rx_ctl_s cn; */
4972 static inline u64 CGXX_SMUX_RX_CTL(u64 a)
4973 __attribute__ ((pure, always_inline));
4974 static inline u64 CGXX_SMUX_RX_CTL(u64 a)
4976 return 0x20048 + 0x40000 * a;
4980 * Register (RSL) cgx#_smu#_rx_decision
4982 * CGX SMU Packet Decision Registers This register specifies the byte
4983 * count used to determine when to accept or to filter a packet. As each
4984 * byte in a packet is received by CGX, the L2 byte count (i.e. the
4985 * number of bytes from the beginning of the L2 header (DMAC)) is
4986 * compared against CNT. In normal operation, the L2 header begins after
4987 * the PREAMBLE + SFD (CGX()_SMU()_RX_FRM_CTL[PRE_CHK] = 1) and any
4988 * optional UDD skip data (CGX()_SMU()_RX_UDD_SKP[LEN]).
4990 union cgxx_smux_rx_decision {
4992 struct cgxx_smux_rx_decision_s {
4994 u64 reserved_5_63 : 59;
4996 /* struct cgxx_smux_rx_decision_s cn; */
4999 static inline u64 CGXX_SMUX_RX_DECISION(u64 a)
5000 __attribute__ ((pure, always_inline));
5001 static inline u64 CGXX_SMUX_RX_DECISION(u64 a)
5003 return 0x20038 + 0x40000 * a;
5007 * Register (RSL) cgx#_smu#_rx_frm_chk
5009 * CGX SMU RX Frame Check Registers The CSRs provide the enable bits for
5010 * a subset of errors passed to CMR encoded.
5012 union cgxx_smux_rx_frm_chk {
5014 struct cgxx_smux_rx_frm_chk_s {
5015 u64 reserved_0_2 : 3;
5022 u64 reserved_9_63 : 55;
5024 /* struct cgxx_smux_rx_frm_chk_s cn; */
5027 static inline u64 CGXX_SMUX_RX_FRM_CHK(u64 a)
5028 __attribute__ ((pure, always_inline));
5029 static inline u64 CGXX_SMUX_RX_FRM_CHK(u64 a)
5031 return 0x20028 + 0x40000 * a;
5035 * Register (RSL) cgx#_smu#_rx_frm_ctl
5037 * CGX SMU RX Frame Control Registers This register controls the handling
5038 * of the frames. The [CTL_BCK] and [CTL_DRP] bits control how the
5039 * hardware handles incoming PAUSE packets. The most common modes of
5040 * operation: _ [CTL_BCK] = 1, [CTL_DRP] = 1: hardware handles everything
5041 * _ [CTL_BCK] = 0, [CTL_DRP] = 0: software sees all PAUSE frames _
5042 * [CTL_BCK] = 0, [CTL_DRP] = 1: all PAUSE frames are completely ignored
5043 * These control bits should be set to [CTL_BCK] = 0, [CTL_DRP] = 0 in
5044 * half-duplex mode. Since PAUSE packets only apply to full duplex
5045 * operation, any PAUSE packet would constitute an exception which should
5046 * be handled by the processing cores. PAUSE packets should not be
5049 union cgxx_smux_rx_frm_ctl {
5051 struct cgxx_smux_rx_frm_ctl_s {
5058 u64 reserved_6_11 : 6;
5060 u64 reserved_13_63 : 51;
5062 /* struct cgxx_smux_rx_frm_ctl_s cn; */
5065 static inline u64 CGXX_SMUX_RX_FRM_CTL(u64 a)
5066 __attribute__ ((pure, always_inline));
5067 static inline u64 CGXX_SMUX_RX_FRM_CTL(u64 a)
5069 return 0x20020 + 0x40000 * a;
5073 * Register (RSL) cgx#_smu#_rx_int
5075 * CGX SMU Receive Interrupt Registers SMU Interrupt Register. Internal:
5076 * Exception conditions \<9\> and \<4:0\> can also set the rcv/opcode in
5077 * the received packet's work queue entry. CGX()_SMU()_RX_FRM_CHK
5078 * provides a bit mask for configuring which conditions set the error.
5080 union cgxx_smux_rx_int {
5082 struct cgxx_smux_rx_int_s {
5097 u64 reserved_14_63 : 50;
5099 /* struct cgxx_smux_rx_int_s cn; */
5102 static inline u64 CGXX_SMUX_RX_INT(u64 a)
5103 __attribute__ ((pure, always_inline));
5104 static inline u64 CGXX_SMUX_RX_INT(u64 a)
5106 return 0x20000 + 0x40000 * a;
5110 * Register (RSL) cgx#_smu#_rx_int_ena_w1c
5112 * CGX SMU Receive Interrupt Enable Clear Registers This register clears
5113 * interrupt enable bits.
5115 union cgxx_smux_rx_int_ena_w1c {
5117 struct cgxx_smux_rx_int_ena_w1c_s {
5132 u64 reserved_14_63 : 50;
5134 /* struct cgxx_smux_rx_int_ena_w1c_s cn; */
5137 static inline u64 CGXX_SMUX_RX_INT_ENA_W1C(u64 a)
5138 __attribute__ ((pure, always_inline));
5139 static inline u64 CGXX_SMUX_RX_INT_ENA_W1C(u64 a)
5141 return 0x20010 + 0x40000 * a;
5145 * Register (RSL) cgx#_smu#_rx_int_ena_w1s
5147 * CGX SMU Receive Interrupt Enable Set Registers This register sets
5148 * interrupt enable bits.
5150 union cgxx_smux_rx_int_ena_w1s {
5152 struct cgxx_smux_rx_int_ena_w1s_s {
5167 u64 reserved_14_63 : 50;
5169 /* struct cgxx_smux_rx_int_ena_w1s_s cn; */
5172 static inline u64 CGXX_SMUX_RX_INT_ENA_W1S(u64 a)
5173 __attribute__ ((pure, always_inline));
5174 static inline u64 CGXX_SMUX_RX_INT_ENA_W1S(u64 a)
5176 return 0x20018 + 0x40000 * a;
5180 * Register (RSL) cgx#_smu#_rx_int_w1s
5182 * CGX SMU Receive Interrupt Set Registers This register sets interrupt
5185 union cgxx_smux_rx_int_w1s {
5187 struct cgxx_smux_rx_int_w1s_s {
5202 u64 reserved_14_63 : 50;
5204 /* struct cgxx_smux_rx_int_w1s_s cn; */
5207 static inline u64 CGXX_SMUX_RX_INT_W1S(u64 a)
5208 __attribute__ ((pure, always_inline));
5209 static inline u64 CGXX_SMUX_RX_INT_W1S(u64 a)
5211 return 0x20008 + 0x40000 * a;
5215 * Register (RSL) cgx#_smu#_rx_jabber
5217 * CGX SMU Maximum Packet-Size Registers This register specifies the
5218 * maximum size for packets, beyond which the SMU truncates. Internal:
5219 * JABBER[CNT] is checked against the packet that arrives from SPU. The
5220 * checking is performed before preamble is stripped or PTP is inserted.
5221 * If present, preamble is counted as eight bytes of the incoming packet.
5223 union cgxx_smux_rx_jabber {
5225 struct cgxx_smux_rx_jabber_s {
5227 u64 reserved_16_63 : 48;
5229 /* struct cgxx_smux_rx_jabber_s cn; */
5232 static inline u64 CGXX_SMUX_RX_JABBER(u64 a)
5233 __attribute__ ((pure, always_inline));
5234 static inline u64 CGXX_SMUX_RX_JABBER(u64 a)
5236 return 0x20030 + 0x40000 * a;
5240 * Register (RSL) cgx#_smu#_rx_udd_skp
5242 * CGX SMU User-Defined Data Skip Registers Internal: (1) The skip bytes
5243 * are part of the packet and will be sent down the NCB packet interface
5244 * and will be handled by NIX. (2) The system can determine if the UDD
5245 * bytes are included in the FCS check by using the FCSSEL field if the
5246 * FCS check is enabled. (3) Assume that the preamble/sfd is always at
5247 * the start of the frame even before UDD bytes. In most cases, there
5248 * will be no preamble in these cases since it will be packet interface
5249 * in direct communication to another packet interface (MAC to MAC)
5250 * without a PHY involved. (4) We can still do address filtering and
5251 * control packet filtering if the user desires. (5) In all cases, the
5252 * UDD bytes will be sent down the packet interface as part of the
5253 * packet. The UDD bytes are never stripped from the actual packet.
5255 union cgxx_smux_rx_udd_skp {
5257 struct cgxx_smux_rx_udd_skp_s {
5261 u64 reserved_9_63 : 55;
5263 /* struct cgxx_smux_rx_udd_skp_s cn; */
5266 static inline u64 CGXX_SMUX_RX_UDD_SKP(u64 a)
5267 __attribute__ ((pure, always_inline));
5268 static inline u64 CGXX_SMUX_RX_UDD_SKP(u64 a)
5270 return 0x20040 + 0x40000 * a;
5274 * Register (RSL) cgx#_smu#_rx_wol_ctrl0
5276 * CGX SMU RX Wake-on-LAN Control 0 Registers
5278 union cgxx_smux_rx_wol_ctrl0 {
5280 struct cgxx_smux_rx_wol_ctrl0_s {
5283 u64 reserved_52_63 : 12;
5285 /* struct cgxx_smux_rx_wol_ctrl0_s cn; */
5288 static inline u64 CGXX_SMUX_RX_WOL_CTRL0(u64 a)
5289 __attribute__ ((pure, always_inline));
5290 static inline u64 CGXX_SMUX_RX_WOL_CTRL0(u64 a)
5292 return 0x20068 + 0x40000 * a;
5296 * Register (RSL) cgx#_smu#_rx_wol_ctrl1
5298 * CGX SMU RX Wake-on-LAN Control 1 Registers
5300 union cgxx_smux_rx_wol_ctrl1 {
5302 struct cgxx_smux_rx_wol_ctrl1_s {
5305 /* struct cgxx_smux_rx_wol_ctrl1_s cn; */
5308 static inline u64 CGXX_SMUX_RX_WOL_CTRL1(u64 a)
5309 __attribute__ ((pure, always_inline));
5310 static inline u64 CGXX_SMUX_RX_WOL_CTRL1(u64 a)
5312 return 0x20070 + 0x40000 * a;
5316 * Register (RSL) cgx#_smu#_rx_wol_int
5318 * CGX SMU RX WOL Interrupt Registers These registers allow WOL
5319 * interrupts to be sent to the control processor.
5321 union cgxx_smux_rx_wol_int {
5323 struct cgxx_smux_rx_wol_int_s {
5325 u64 reserved_1_63 : 63;
5327 /* struct cgxx_smux_rx_wol_int_s cn; */
5330 static inline u64 CGXX_SMUX_RX_WOL_INT(u64 a)
5331 __attribute__ ((pure, always_inline));
5332 static inline u64 CGXX_SMUX_RX_WOL_INT(u64 a)
5334 return 0x20078 + 0x40000 * a;
5338 * Register (RSL) cgx#_smu#_rx_wol_int_ena_w1c
5340 * CGX SMU RX WOL Interrupt Enable Clear Registers This register clears
5341 * interrupt enable bits.
5343 union cgxx_smux_rx_wol_int_ena_w1c {
5345 struct cgxx_smux_rx_wol_int_ena_w1c_s {
5347 u64 reserved_1_63 : 63;
5349 /* struct cgxx_smux_rx_wol_int_ena_w1c_s cn; */
5352 static inline u64 CGXX_SMUX_RX_WOL_INT_ENA_W1C(u64 a)
5353 __attribute__ ((pure, always_inline));
5354 static inline u64 CGXX_SMUX_RX_WOL_INT_ENA_W1C(u64 a)
5356 return 0x20088 + 0x40000 * a;
5360 * Register (RSL) cgx#_smu#_rx_wol_int_ena_w1s
5362 * CGX SMU RX WOL Interrupt Enable Set Registers This register sets
5363 * interrupt enable bits.
5365 union cgxx_smux_rx_wol_int_ena_w1s {
5367 struct cgxx_smux_rx_wol_int_ena_w1s_s {
5369 u64 reserved_1_63 : 63;
5371 /* struct cgxx_smux_rx_wol_int_ena_w1s_s cn; */
5374 static inline u64 CGXX_SMUX_RX_WOL_INT_ENA_W1S(u64 a)
5375 __attribute__ ((pure, always_inline));
5376 static inline u64 CGXX_SMUX_RX_WOL_INT_ENA_W1S(u64 a)
5378 return 0x20090 + 0x40000 * a;
5382 * Register (RSL) cgx#_smu#_rx_wol_int_w1s
5384 * CGX SMU RX WOL Interrupt Set Registers This register sets interrupt
5387 union cgxx_smux_rx_wol_int_w1s {
5389 struct cgxx_smux_rx_wol_int_w1s_s {
5391 u64 reserved_1_63 : 63;
5393 /* struct cgxx_smux_rx_wol_int_w1s_s cn; */
5396 static inline u64 CGXX_SMUX_RX_WOL_INT_W1S(u64 a)
5397 __attribute__ ((pure, always_inline));
5398 static inline u64 CGXX_SMUX_RX_WOL_INT_W1S(u64 a)
5400 return 0x20080 + 0x40000 * a;
5404 * Register (RSL) cgx#_smu#_smac
5406 * CGX SMU SMAC Registers
5408 union cgxx_smux_smac {
5410 struct cgxx_smux_smac_s {
5412 u64 reserved_48_63 : 16;
5414 /* struct cgxx_smux_smac_s cn; */
5417 static inline u64 CGXX_SMUX_SMAC(u64 a)
5418 __attribute__ ((pure, always_inline));
5419 static inline u64 CGXX_SMUX_SMAC(u64 a)
5421 return 0x20108 + 0x40000 * a;
5425 * Register (RSL) cgx#_smu#_tx_append
5427 * CGX SMU TX Append Control Registers For more details on the
5428 * interactions between FCS and PAD, see also the description of
5429 * CGX()_SMU()_TX_MIN_PKT[MIN_SIZE].
5431 union cgxx_smux_tx_append {
5433 struct cgxx_smux_tx_append_s {
5438 u64 reserved_4_63 : 60;
5440 /* struct cgxx_smux_tx_append_s cn; */
5443 static inline u64 CGXX_SMUX_TX_APPEND(u64 a)
5444 __attribute__ ((pure, always_inline));
5445 static inline u64 CGXX_SMUX_TX_APPEND(u64 a)
5447 return 0x20100 + 0x40000 * a;
5451 * Register (RSL) cgx#_smu#_tx_ctl
5453 * CGX SMU Transmit Control Registers
5455 union cgxx_smux_tx_ctl {
5457 struct cgxx_smux_tx_ctl_s {
5464 u64 l2p_bp_conv : 1;
5466 u64 hg_pause_hgi : 2;
5467 u64 reserved_11_63 : 53;
5469 /* struct cgxx_smux_tx_ctl_s cn; */
5472 static inline u64 CGXX_SMUX_TX_CTL(u64 a)
5473 __attribute__ ((pure, always_inline));
5474 static inline u64 CGXX_SMUX_TX_CTL(u64 a)
5476 return 0x20178 + 0x40000 * a;
5480 * Register (RSL) cgx#_smu#_tx_dack
5482 * CGX SMU TX Drop Counters Registers
5484 union cgxx_smux_tx_dack {
5486 struct cgxx_smux_tx_dack_s {
5487 u64 dpi_sdrop_ack : 16;
5488 u64 reserved_16_63 : 48;
5490 /* struct cgxx_smux_tx_dack_s cn; */
5493 static inline u64 CGXX_SMUX_TX_DACK(u64 a)
5494 __attribute__ ((pure, always_inline));
5495 static inline u64 CGXX_SMUX_TX_DACK(u64 a)
5497 return 0x201b0 + 0x40000 * a;
5501 * Register (RSL) cgx#_smu#_tx_dcnt
5503 * CGX SMU TX Drop Counters Registers
5505 union cgxx_smux_tx_dcnt {
5507 struct cgxx_smux_tx_dcnt_s {
5508 u64 dpi_sdrop_cnt : 16;
5509 u64 reserved_16_63 : 48;
5511 /* struct cgxx_smux_tx_dcnt_s cn; */
5514 static inline u64 CGXX_SMUX_TX_DCNT(u64 a)
5515 __attribute__ ((pure, always_inline));
5516 static inline u64 CGXX_SMUX_TX_DCNT(u64 a)
5518 return 0x201a8 + 0x40000 * a;
5522 * Register (RSL) cgx#_smu#_tx_eee
5524 * INTERNAL: CGX SMU TX EEE Configure Registers Resvered. Internal:
5525 * These registers control when SMU TX requests to enter or exist LPI.
5526 * Those registers take effect only when EEE is supported and enabled for
5529 union cgxx_smux_tx_eee {
5531 struct cgxx_smux_tx_eee_s {
5532 u64 idle_thresh : 28;
5533 u64 reserved_28 : 1;
5538 u64 reserved_60_61 : 2;
5539 u64 tx_lpi_wake : 1;
5542 /* struct cgxx_smux_tx_eee_s cn; */
5545 static inline u64 CGXX_SMUX_TX_EEE(u64 a)
5546 __attribute__ ((pure, always_inline));
5547 static inline u64 CGXX_SMUX_TX_EEE(u64 a)
5549 return 0x20190 + 0x40000 * a;
5553 * Register (RSL) cgx#_smu#_tx_eee_timer_status
5555 * INTERNAL: CGX SMU TX EEE TIMER STATUS Registers Reserved. Internal:
5556 * These registers configure SMU TX EEE timing parameters.
5558 union cgxx_smux_tx_eee_timer_status {
5560 struct cgxx_smux_tx_eee_timer_status_s {
5561 u64 lpi_wake_cnt : 16;
5562 u64 reserved_16_30 : 15;
5563 u64 wake_timer_done : 1;
5564 u64 link_ok_cnt : 30;
5565 u64 reserved_62 : 1;
5566 u64 link_timer_done : 1;
5568 /* struct cgxx_smux_tx_eee_timer_status_s cn; */
5571 static inline u64 CGXX_SMUX_TX_EEE_TIMER_STATUS(u64 a)
5572 __attribute__ ((pure, always_inline));
5573 static inline u64 CGXX_SMUX_TX_EEE_TIMER_STATUS(u64 a)
5575 return 0x201a0 + 0x40000 * a;
5579 * Register (RSL) cgx#_smu#_tx_eee_timing
5581 * INTERNAL: CGX SMU TX EEE TIMING Parameter Registers Reserved.
5582 * Internal: These registers configure SMU TX EEE timing parameters.
5584 union cgxx_smux_tx_eee_timing {
5586 struct cgxx_smux_tx_eee_timing_s {
5587 u64 w_sys_tx_min : 16;
5588 u64 reserved_16_31 : 16;
5589 u64 link_ok_min : 30;
5590 u64 reserved_62_63 : 2;
5592 /* struct cgxx_smux_tx_eee_timing_s cn; */
5595 static inline u64 CGXX_SMUX_TX_EEE_TIMING(u64 a)
5596 __attribute__ ((pure, always_inline));
5597 static inline u64 CGXX_SMUX_TX_EEE_TIMING(u64 a)
5599 return 0x20198 + 0x40000 * a;
5603 * Register (RSL) cgx#_smu#_tx_ifg
5605 * CGX SMU TX Interframe-Gap Cycles Registers Programming IFG1 and IFG2:
5606 * * For XAUI/RXAUI/10G/25G/40G/50G/100G systems that require IEEE 802.3
5607 * compatibility, the [IFG1]+[IFG2] sum must be 12. * In loopback mode,
5608 * the [IFG1]+[IFG2] of local and remote parties must match exactly;
5609 * otherwise loopback FIFO will overrun: CGX()_SMU()_TX_INT[LB_OVRFLW]. *
5610 * When CGX()_SMU()_TX_CTL[DIC_EN] is set, [IFG1]+[IFG2] sum must be at
5611 * least 8. The behavior of smaller values is un-determined. * When
5612 * CGX()_SMU()_TX_CTL[DIC_EN] is cleared, the minimum value of
5613 * [IFG1]+[IFG2] is 1 for 40G/50G/100G LMAC_TYPE configurations and 5 for
5614 * all other values. The behavior of smaller values is un-determined.
5615 * Internal: When CGX()_SMU()_TX_CTL[DIC_EN] is set, SMU TX treats
5616 * ([IFG1]+[IFG2]) \< 8 as 8 for 40G/50G/100G MACs and ([IFG1]+[IFG2]) \<
5617 * 8 as 8 for other MACs. When CGX()_SMU()_TX_CTL[DIC_EN] is cleared, SMU
5618 * TX can work correctly with any IFG1 and IFG2.
5620 union cgxx_smux_tx_ifg {
5622 struct cgxx_smux_tx_ifg_s {
5626 u64 reserved_10_15 : 6;
5628 u64 reserved_24_63 : 40;
5630 /* struct cgxx_smux_tx_ifg_s cn; */
5633 static inline u64 CGXX_SMUX_TX_IFG(u64 a)
5634 __attribute__ ((pure, always_inline));
5635 static inline u64 CGXX_SMUX_TX_IFG(u64 a)
5637 return 0x20160 + 0x40000 * a;
5641 * Register (RSL) cgx#_smu#_tx_int
5643 * CGX SMU TX Interrupt Registers
5645 union cgxx_smux_tx_int {
5647 struct cgxx_smux_tx_int_s {
5650 u64 fake_commit : 1;
5654 u64 reserved_6_63 : 58;
5656 /* struct cgxx_smux_tx_int_s cn; */
5659 static inline u64 CGXX_SMUX_TX_INT(u64 a)
5660 __attribute__ ((pure, always_inline));
5661 static inline u64 CGXX_SMUX_TX_INT(u64 a)
5663 return 0x20140 + 0x40000 * a;
5667 * Register (RSL) cgx#_smu#_tx_int_ena_w1c
5669 * CGX SMU TX Interrupt Enable Clear Registers This register clears
5670 * interrupt enable bits.
5672 union cgxx_smux_tx_int_ena_w1c {
5674 struct cgxx_smux_tx_int_ena_w1c_s {
5677 u64 fake_commit : 1;
5681 u64 reserved_6_63 : 58;
5683 /* struct cgxx_smux_tx_int_ena_w1c_s cn; */
5686 static inline u64 CGXX_SMUX_TX_INT_ENA_W1C(u64 a)
5687 __attribute__ ((pure, always_inline));
5688 static inline u64 CGXX_SMUX_TX_INT_ENA_W1C(u64 a)
5690 return 0x20150 + 0x40000 * a;
5694 * Register (RSL) cgx#_smu#_tx_int_ena_w1s
5696 * CGX SMU TX Interrupt Enable Set Registers This register sets interrupt
5699 union cgxx_smux_tx_int_ena_w1s {
5701 struct cgxx_smux_tx_int_ena_w1s_s {
5704 u64 fake_commit : 1;
5708 u64 reserved_6_63 : 58;
5710 /* struct cgxx_smux_tx_int_ena_w1s_s cn; */
5713 static inline u64 CGXX_SMUX_TX_INT_ENA_W1S(u64 a)
5714 __attribute__ ((pure, always_inline));
5715 static inline u64 CGXX_SMUX_TX_INT_ENA_W1S(u64 a)
5717 return 0x20158 + 0x40000 * a;
5721 * Register (RSL) cgx#_smu#_tx_int_w1s
5723 * CGX SMU TX Interrupt Set Registers This register sets interrupt bits.
5725 union cgxx_smux_tx_int_w1s {
5727 struct cgxx_smux_tx_int_w1s_s {
5730 u64 fake_commit : 1;
5734 u64 reserved_6_63 : 58;
5736 /* struct cgxx_smux_tx_int_w1s_s cn; */
5739 static inline u64 CGXX_SMUX_TX_INT_W1S(u64 a)
5740 __attribute__ ((pure, always_inline));
5741 static inline u64 CGXX_SMUX_TX_INT_W1S(u64 a)
5743 return 0x20148 + 0x40000 * a;
5747 * Register (RSL) cgx#_smu#_tx_min_pkt
5749 * CGX SMU TX Minimum-Size-Packet Registers Internal: [MIN_SIZE] less
5750 * than 16 will be ignored by hardware which will use 16 instead.
5752 union cgxx_smux_tx_min_pkt {
5754 struct cgxx_smux_tx_min_pkt_s {
5756 u64 reserved_8_63 : 56;
5758 /* struct cgxx_smux_tx_min_pkt_s cn; */
5761 static inline u64 CGXX_SMUX_TX_MIN_PKT(u64 a)
5762 __attribute__ ((pure, always_inline));
5763 static inline u64 CGXX_SMUX_TX_MIN_PKT(u64 a)
5765 return 0x20118 + 0x40000 * a;
5769 * Register (RSL) cgx#_smu#_tx_pause_pkt_dmac
5771 * CGX SMU TX PAUSE-Packet DMAC-Field Registers This register provides
5772 * the DMAC value that is placed in outbound PAUSE packets.
5774 union cgxx_smux_tx_pause_pkt_dmac {
5776 struct cgxx_smux_tx_pause_pkt_dmac_s {
5778 u64 reserved_48_63 : 16;
5780 /* struct cgxx_smux_tx_pause_pkt_dmac_s cn; */
5783 static inline u64 CGXX_SMUX_TX_PAUSE_PKT_DMAC(u64 a)
5784 __attribute__ ((pure, always_inline));
5785 static inline u64 CGXX_SMUX_TX_PAUSE_PKT_DMAC(u64 a)
5787 return 0x20168 + 0x40000 * a;
5791 * Register (RSL) cgx#_smu#_tx_pause_pkt_interval
5793 * CGX SMU TX PAUSE-Packet Transmission-Interval Registers This register
5794 * specifies how often PAUSE packets are sent.
5796 union cgxx_smux_tx_pause_pkt_interval {
5798 struct cgxx_smux_tx_pause_pkt_interval_s {
5800 u64 hg2_intra_interval : 16;
5801 u64 hg2_intra_en : 1;
5802 u64 reserved_33_63 : 31;
5804 /* struct cgxx_smux_tx_pause_pkt_interval_s cn; */
5807 static inline u64 CGXX_SMUX_TX_PAUSE_PKT_INTERVAL(u64 a)
5808 __attribute__ ((pure, always_inline));
5809 static inline u64 CGXX_SMUX_TX_PAUSE_PKT_INTERVAL(u64 a)
5811 return 0x20120 + 0x40000 * a;
5815 * Register (RSL) cgx#_smu#_tx_pause_pkt_time
5817 * CGX SMU TX PAUSE Packet Time Registers
5819 union cgxx_smux_tx_pause_pkt_time {
5821 struct cgxx_smux_tx_pause_pkt_time_s {
5823 u64 reserved_16_63 : 48;
5825 /* struct cgxx_smux_tx_pause_pkt_time_s cn; */
5828 static inline u64 CGXX_SMUX_TX_PAUSE_PKT_TIME(u64 a)
5829 __attribute__ ((pure, always_inline));
5830 static inline u64 CGXX_SMUX_TX_PAUSE_PKT_TIME(u64 a)
5832 return 0x20110 + 0x40000 * a;
5836 * Register (RSL) cgx#_smu#_tx_pause_pkt_type
5838 * CGX SMU TX PAUSE-Packet P_TYPE-Field Registers This register provides
5839 * the P_TYPE field that is placed in outbound PAUSE packets.
5841 union cgxx_smux_tx_pause_pkt_type {
5843 struct cgxx_smux_tx_pause_pkt_type_s {
5845 u64 reserved_16_63 : 48;
5847 /* struct cgxx_smux_tx_pause_pkt_type_s cn; */
5850 static inline u64 CGXX_SMUX_TX_PAUSE_PKT_TYPE(u64 a)
5851 __attribute__ ((pure, always_inline));
5852 static inline u64 CGXX_SMUX_TX_PAUSE_PKT_TYPE(u64 a)
5854 return 0x20170 + 0x40000 * a;
5858 * Register (RSL) cgx#_smu#_tx_pause_togo
5860 * CGX SMU TX Time-to-Backpressure Registers
5862 union cgxx_smux_tx_pause_togo {
5864 struct cgxx_smux_tx_pause_togo_s {
5867 u64 reserved_32_63 : 32;
5869 /* struct cgxx_smux_tx_pause_togo_s cn; */
5872 static inline u64 CGXX_SMUX_TX_PAUSE_TOGO(u64 a)
5873 __attribute__ ((pure, always_inline));
5874 static inline u64 CGXX_SMUX_TX_PAUSE_TOGO(u64 a)
5876 return 0x20130 + 0x40000 * a;
5880 * Register (RSL) cgx#_smu#_tx_pause_zero
5882 * CGX SMU TX PAUSE Zero Registers
5884 union cgxx_smux_tx_pause_zero {
5886 struct cgxx_smux_tx_pause_zero_s {
5888 u64 reserved_1_63 : 63;
5890 /* struct cgxx_smux_tx_pause_zero_s cn; */
5893 static inline u64 CGXX_SMUX_TX_PAUSE_ZERO(u64 a)
5894 __attribute__ ((pure, always_inline));
5895 static inline u64 CGXX_SMUX_TX_PAUSE_ZERO(u64 a)
5897 return 0x20138 + 0x40000 * a;
5901 * Register (RSL) cgx#_smu#_tx_soft_pause
5903 * CGX SMU TX Soft PAUSE Registers
5905 union cgxx_smux_tx_soft_pause {
5907 struct cgxx_smux_tx_soft_pause_s {
5909 u64 reserved_16_63 : 48;
5911 /* struct cgxx_smux_tx_soft_pause_s cn; */
5914 static inline u64 CGXX_SMUX_TX_SOFT_PAUSE(u64 a)
5915 __attribute__ ((pure, always_inline));
5916 static inline u64 CGXX_SMUX_TX_SOFT_PAUSE(u64 a)
5918 return 0x20128 + 0x40000 * a;
5922 * Register (RSL) cgx#_smu#_tx_thresh
5924 * CGX SMU TX Threshold Registers
5926 union cgxx_smux_tx_thresh {
5928 struct cgxx_smux_tx_thresh_s {
5930 u64 reserved_12_15 : 4;
5932 u64 reserved_21_23 : 3;
5934 u64 reserved_29_31 : 3;
5936 u64 reserved_44_63 : 20;
5938 /* struct cgxx_smux_tx_thresh_s cn; */
5941 static inline u64 CGXX_SMUX_TX_THRESH(u64 a)
5942 __attribute__ ((pure, always_inline));
5943 static inline u64 CGXX_SMUX_TX_THRESH(u64 a)
5945 return 0x20180 + 0x40000 * a;
5949 * Register (RSL) cgx#_spu#_an_adv
5951 * CGX SPU Autonegotiation Advertisement Registers Software programs this
5952 * register with the contents of the AN-link code word base page to be
5953 * transmitted during autonegotiation. (See IEEE 802.3 section 73.6 for
5954 * details.) Any write operations to this register prior to completion of
5955 * autonegotiation, as indicated by CGX()_SPU()_AN_STATUS[AN_COMPLETE],
5956 * should be followed by a renegotiation in order for the new values to
5957 * take effect. Renegotiation is initiated by setting
5958 * CGX()_SPU()_AN_CONTROL[AN_RESTART]. Once autonegotiation has
5959 * completed, software can examine this register along with
5960 * CGX()_SPU()_AN_LP_BASE to determine the highest common denominator
5963 union cgxx_spux_an_adv {
5965 struct cgxx_spux_an_adv_s {
5984 u64 a25g_krs_crs : 1;
5987 u64 a25g_rs_fec_req : 1;
5988 u64 a25g_br_fec_req : 1;
5991 u64 reserved_48_63 : 16;
5993 /* struct cgxx_spux_an_adv_s cn; */
5996 static inline u64 CGXX_SPUX_AN_ADV(u64 a)
5997 __attribute__ ((pure, always_inline));
5998 static inline u64 CGXX_SPUX_AN_ADV(u64 a)
6000 return 0x10198 + 0x40000 * a;
6004 * Register (RSL) cgx#_spu#_an_bp_status
6006 * CGX SPU Autonegotiation Backplane Ethernet & BASE-R Copper Status
6007 * Registers The contents of this register are updated during
6008 * autonegotiation and are valid when CGX()_SPU()_AN_STATUS[AN_COMPLETE]
6009 * is set. At that time, one of the port type bits will be set depending
6010 * on the AN priority resolution. The port types are listed in order of
6011 * decreasing priority. If a BASE-R type is negotiated then [FEC] or
6012 * [RS_FEC] will be set to indicate whether/which FEC operation has been
6013 * negotiated and will be clear otherwise.
6015 union cgxx_spux_an_bp_status {
6017 struct cgxx_spux_an_bp_status_s {
6024 u64 n25g_krs_crs : 1;
6036 u64 reserved_18_63 : 46;
6038 /* struct cgxx_spux_an_bp_status_s cn; */
6041 static inline u64 CGXX_SPUX_AN_BP_STATUS(u64 a)
6042 __attribute__ ((pure, always_inline));
6043 static inline u64 CGXX_SPUX_AN_BP_STATUS(u64 a)
6045 return 0x101b8 + 0x40000 * a;
6049 * Register (RSL) cgx#_spu#_an_control
6051 * CGX SPU Autonegotiation Control Registers
6053 union cgxx_spux_an_control {
6055 struct cgxx_spux_an_control_s {
6056 u64 reserved_0_8 : 9;
6058 u64 reserved_10_11 : 2;
6061 u64 reserved_14 : 1;
6063 u64 an_arb_link_chk_en : 1;
6064 u64 usx_an_arb_link_chk_en : 1;
6065 u64 reserved_18_63 : 46;
6067 /* struct cgxx_spux_an_control_s cn; */
6070 static inline u64 CGXX_SPUX_AN_CONTROL(u64 a)
6071 __attribute__ ((pure, always_inline));
6072 static inline u64 CGXX_SPUX_AN_CONTROL(u64 a)
6074 return 0x10188 + 0x40000 * a;
6078 * Register (RSL) cgx#_spu#_an_lp_base
6080 * CGX SPU Autonegotiation Link-Partner Base-Page Ability Registers This
6081 * register captures the contents of the latest AN link code word base
6082 * page received from the link partner during autonegotiation. (See IEEE
6083 * 802.3 section 73.6 for details.) CGX()_SPU()_AN_STATUS[PAGE_RX] is set
6084 * when this register is updated by hardware.
6086 union cgxx_spux_an_lp_base {
6088 struct cgxx_spux_an_lp_base_s {
6107 u64 a25g_krs_crs : 1;
6110 u64 a25g_rs_fec_req : 1;
6111 u64 a25g_br_fec_req : 1;
6114 u64 reserved_48_63 : 16;
6116 /* struct cgxx_spux_an_lp_base_s cn; */
6119 static inline u64 CGXX_SPUX_AN_LP_BASE(u64 a)
6120 __attribute__ ((pure, always_inline));
6121 static inline u64 CGXX_SPUX_AN_LP_BASE(u64 a)
6123 return 0x101a0 + 0x40000 * a;
6127 * Register (RSL) cgx#_spu#_an_lp_xnp
6129 * CGX SPU Autonegotiation Link Partner Extended Next Page Ability
6130 * Registers This register captures the contents of the latest next page
6131 * code word received from the link partner during autonegotiation, if
6132 * any. See IEEE 802.3 section 73.7.7 for details.
6134 union cgxx_spux_an_lp_xnp {
6136 struct cgxx_spux_an_lp_xnp_s {
6144 u64 reserved_48_63 : 16;
6146 /* struct cgxx_spux_an_lp_xnp_s cn; */
6149 static inline u64 CGXX_SPUX_AN_LP_XNP(u64 a)
6150 __attribute__ ((pure, always_inline));
6151 static inline u64 CGXX_SPUX_AN_LP_XNP(u64 a)
6153 return 0x101b0 + 0x40000 * a;
6157 * Register (RSL) cgx#_spu#_an_status
6159 * CGX SPU Autonegotiation Status Registers
6161 union cgxx_spux_an_status {
6163 struct cgxx_spux_an_status_s {
6166 u64 link_status : 1;
6169 u64 an_complete : 1;
6174 u64 reserved_10_63 : 54;
6176 /* struct cgxx_spux_an_status_s cn; */
6179 static inline u64 CGXX_SPUX_AN_STATUS(u64 a)
6180 __attribute__ ((pure, always_inline));
6181 static inline u64 CGXX_SPUX_AN_STATUS(u64 a)
6183 return 0x10190 + 0x40000 * a;
6187 * Register (RSL) cgx#_spu#_an_xnp_tx
6189 * CGX SPU Autonegotiation Extended Next Page Transmit Registers Software
6190 * programs this register with the contents of the AN message next page
6191 * or unformatted next page link code word to be transmitted during
6192 * autonegotiation. Next page exchange occurs after the base link code
6193 * words have been exchanged if either end of the link segment sets the
6194 * NP bit to 1, indicating that it has at least one next page to send.
6195 * Once initiated, next page exchange continues until both ends of the
6196 * link segment set their NP bits to 0. See IEEE 802.3 section 73.7.7 for
6199 union cgxx_spux_an_xnp_tx {
6201 struct cgxx_spux_an_xnp_tx_s {
6209 u64 reserved_48_63 : 16;
6211 /* struct cgxx_spux_an_xnp_tx_s cn; */
6214 static inline u64 CGXX_SPUX_AN_XNP_TX(u64 a)
6215 __attribute__ ((pure, always_inline));
6216 static inline u64 CGXX_SPUX_AN_XNP_TX(u64 a)
6218 return 0x101a8 + 0x40000 * a;
6222 * Register (RSL) cgx#_spu#_br_algn_status
6224 * CGX SPU Multilane BASE-R PCS Alignment-Status Registers This register
6225 * implements the IEEE 802.3 multilane BASE-R PCS alignment status 1-4
6226 * registers (3.50-3.53). It is valid only when the LPCS type is
6227 * 40GBASE-R, 50GBASE-R, 100GBASE-R, (CGX()_CMR()_CONFIG[LMAC_TYPE] =
6228 * CGX_LMAC_TYPES_E::FORTYG_R,FIFTYG_R,HUNDREDG_R), and always returns
6229 * 0x0 for all other LPCS types. Service interfaces (lanes) 19-0 (100G)
6230 * and 3-0 (all others) are mapped to PCS lanes 19-0 or 3-0 via
6231 * CGX()_SPU()_BR_LANE_MAP()[LN_MAPPING]. For 100G, logical lane 0 fans
6232 * out to service interfaces 0-4, logical lane 1 fans out to service
6233 * interfaces 5-9, ... etc. For all other modes, logical lanes and
6234 * service interfaces are identical. Logical interfaces (lanes) map to
6235 * SerDes lanes via CGX()_CMR()_CONFIG[LANE_TO_SDS] (programmable).
6237 union cgxx_spux_br_algn_status {
6239 struct cgxx_spux_br_algn_status_s {
6240 u64 block_lock : 20;
6241 u64 reserved_20_29 : 10;
6243 u64 reserved_31_40 : 10;
6244 u64 marker_lock : 20;
6245 u64 reserved_61_63 : 3;
6247 /* struct cgxx_spux_br_algn_status_s cn; */
6250 static inline u64 CGXX_SPUX_BR_ALGN_STATUS(u64 a)
6251 __attribute__ ((pure, always_inline));
6252 static inline u64 CGXX_SPUX_BR_ALGN_STATUS(u64 a)
6254 return 0x10050 + 0x40000 * a;
6258 * Register (RSL) cgx#_spu#_br_lane_map#
6260 * CGX SPU 40,50,100GBASE-R Lane-Mapping Registers This register
6261 * implements the IEEE 802.3 lane 0-19 mapping registers (3.400-3.403).
6262 * It is valid only when the LPCS type is 40GBASE-R, 50GBASE-R,
6263 * 100GBASE-R, USXGMII (CGX()_CMR()_CONFIG[LMAC_TYPE]), and always
6264 * returns 0x0 for all other LPCS types. The LNx_MAPPING field for each
6265 * programmed PCS lane (called service interface in 802.3) is valid when
6266 * that lane has achieved alignment marker lock on the receive side (i.e.
6267 * the associated CGX()_SPU()_BR_ALGN_STATUS[MARKER_LOCK] = 1), and is
6268 * invalid otherwise. When valid, it returns the actual detected receive
6269 * PCS lane number based on the received alignment marker contents
6270 * received on that service interface. In RS-FEC mode the LNx_MAPPING
6271 * field is valid when that lane has achieved alignment marker lock on
6272 * the receive side (i.e. the associated
6273 * CGX()_SPU()_RSFEC_STATUS[AMPS_LOCK] = 1), and is invalid otherwise.
6274 * When valid, it returns the actual detected receive FEC lane number
6275 * based on the received alignment marker contents received on that
6276 * logical lane therefore expect for RS-FEC that LNx_MAPPING = x. The
6277 * mapping is flexible because IEEE 802.3 allows multilane BASE-R receive
6278 * lanes to be re-ordered. Note that for the transmit side, each logical
6279 * lane is mapped to a physical SerDes lane based on the programming of
6280 * CGX()_CMR()_CONFIG[LANE_TO_SDS]. For the receive side,
6281 * CGX()_CMR()_CONFIG[LANE_TO_SDS] specifies the logical lane to physical
6282 * SerDes lane mapping, and this register specifies the service interface
6283 * (or lane) to PCS lane mapping.
6285 union cgxx_spux_br_lane_mapx {
6287 struct cgxx_spux_br_lane_mapx_s {
6289 u64 reserved_6_63 : 58;
6291 /* struct cgxx_spux_br_lane_mapx_s cn; */
6294 static inline u64 CGXX_SPUX_BR_LANE_MAPX(u64 a, u64 b)
6295 __attribute__ ((pure, always_inline));
6296 static inline u64 CGXX_SPUX_BR_LANE_MAPX(u64 a, u64 b)
6298 return 0x10600 + 0x40000 * a + 8 * b;
6302 * Register (RSL) cgx#_spu#_br_pmd_control
6304 * CGX SPU BASE-R PMD Control Registers
6306 union cgxx_spux_br_pmd_control {
6308 struct cgxx_spux_br_pmd_control_s {
6309 u64 train_restart : 1;
6311 u64 use_lane_poly : 1;
6312 u64 max_wait_disable : 1;
6313 u64 reserved_4_63 : 60;
6315 struct cgxx_spux_br_pmd_control_cn96xx {
6316 u64 train_restart : 1;
6318 u64 use_lane_poly : 1;
6319 u64 reserved_3_63 : 61;
6321 /* struct cgxx_spux_br_pmd_control_s cnf95xxp1; */
6322 /* struct cgxx_spux_br_pmd_control_cn96xx cnf95xxp2; */
6325 static inline u64 CGXX_SPUX_BR_PMD_CONTROL(u64 a)
6326 __attribute__ ((pure, always_inline));
6327 static inline u64 CGXX_SPUX_BR_PMD_CONTROL(u64 a)
6329 return 0x100a8 + 0x40000 * a;
6333 * Register (RSL) cgx#_spu#_br_pmd_ld_cup
6335 * INTERNAL:CGX SPU BASE-R PMD Local Device Coefficient Update Registers
6336 * This register implements MDIO register 1.154 of 802.3-2012 Section 5
6337 * CL45 for 10GBASE-R and and of 802.3by-2016 CL45 for 25GBASE-R. Note
6338 * that for 10G, 25G LN0_ only is used. It implements MDIO registers
6339 * 1.1300-1.1303 for all other BASE-R modes (40G, 50G, 100G) per
6340 * 802.3bj-2014 CL45. Note that for 50G LN0_ and LN1_ only are used. The
6341 * fields in this register are read/write even though they are specified
6342 * as read-only in 802.3. The register is automatically cleared at the
6343 * start of training. When link training is in progress, each field
6344 * reflects the contents of the coefficient update field in the
6345 * associated lane's outgoing training frame. If
6346 * CGX()_SPU_DBG_CONTROL[BR_PMD_TRAIN_SOFT_EN] is set, then this register
6347 * must be updated by software during link training and hardware updates
6348 * are disabled. If CGX()_SPU_DBG_CONTROL[BR_PMD_TRAIN_SOFT_EN] is clear,
6349 * this register is automatically updated by hardware, and it should not
6350 * be written by software. The lane fields in this register are indexed
6351 * by logical PCS lane ID.
6353 union cgxx_spux_br_pmd_ld_cup {
6355 struct cgxx_spux_br_pmd_ld_cup_s {
6361 /* struct cgxx_spux_br_pmd_ld_cup_s cn; */
6364 static inline u64 CGXX_SPUX_BR_PMD_LD_CUP(u64 a)
6365 __attribute__ ((pure, always_inline));
6366 static inline u64 CGXX_SPUX_BR_PMD_LD_CUP(u64 a)
6368 return 0x100c8 + 0x40000 * a;
6372 * Register (RSL) cgx#_spu#_br_pmd_ld_rep
6374 * INTERNAL:CGX SPU BASE-R PMD Local Device Status Report Registers This
6375 * register implements MDIO register 1.155 of 802.3-2012 Section 5 CL45
6376 * for 10GBASE-R and and of 802.3by-2016 CL45 for 25GBASE-R. Note that
6377 * for 10G, 25G LN0_ only is used. It implements MDIO registers
6378 * 1.1400-1.1403 for all other BASE-R modes (40G, 50G, 100G) per
6379 * 802.3bj-2014 CL45. Note that for 50G LN0_ and LN1_ only are used. The
6380 * fields in this register are read/write even though they are specified
6381 * as read-only in 802.3. The register is automatically cleared at the
6382 * start of training. Each field reflects the contents of the status
6383 * report field in the associated lane's outgoing training frame. If
6384 * CGX()_SPU_DBG_CONTROL[BR_PMD_TRAIN_SOFT_EN] is set, then this register
6385 * must be updated by software during link training and hardware updates
6386 * are disabled. If CGX()_SPU_DBG_CONTROL[BR_PMD_TRAIN_SOFT_EN] is clear,
6387 * this register is automatically updated by hardware, and it should not
6388 * be written by software. The lane fields in this register are indexed
6389 * by logical PCS lane ID.
6391 union cgxx_spux_br_pmd_ld_rep {
6393 struct cgxx_spux_br_pmd_ld_rep_s {
6399 /* struct cgxx_spux_br_pmd_ld_rep_s cn; */
6402 static inline u64 CGXX_SPUX_BR_PMD_LD_REP(u64 a)
6403 __attribute__ ((pure, always_inline));
6404 static inline u64 CGXX_SPUX_BR_PMD_LD_REP(u64 a)
6406 return 0x100d0 + 0x40000 * a;
6410 * Register (RSL) cgx#_spu#_br_pmd_lp_cup
6412 * INTERNAL:CGX SPU BASE-R PMD Link Partner Coefficient Update Registers
6413 * This register implements MDIO register 1.152 of 802.3-2012 Section 5
6414 * CL45 for 10GBASE-R and and of 802.3by-2016 CL45 for 25GBASE-R. Note
6415 * that for 10G, 25G LN0_ only is used. It implements MDIO registers
6416 * 1.1100-1.1103 for all other BASE-R modes (40G, 50G, 100G) per
6417 * 802.3bj-2014 CL45. Note that for 50G LN0_ and LN1_ only are used. The
6418 * register is automatically cleared at the start of training. Each field
6419 * reflects the contents of the coefficient update field in the lane's
6420 * most recently received training frame. This register should not be
6421 * written when link training is enabled, i.e. when
6422 * CGX()_SPU()_BR_PMD_CONTROL[TRAIN_EN] is set. The lane fields in this
6423 * register are indexed by logical PCS lane ID.
6425 union cgxx_spux_br_pmd_lp_cup {
6427 struct cgxx_spux_br_pmd_lp_cup_s {
6433 /* struct cgxx_spux_br_pmd_lp_cup_s cn; */
6436 static inline u64 CGXX_SPUX_BR_PMD_LP_CUP(u64 a)
6437 __attribute__ ((pure, always_inline));
6438 static inline u64 CGXX_SPUX_BR_PMD_LP_CUP(u64 a)
6440 return 0x100b8 + 0x40000 * a;
6444 * Register (RSL) cgx#_spu#_br_pmd_lp_rep
6446 * INTERNAL:CGX SPU BASE-R PMD Link Partner Status Report Registers This
6447 * register implements MDIO register 1.153 of 802.3-2012 Section 5 CL45
6448 * for 10GBASE-R and and of 802.3by-2016 CL45 for 25GBASE-R. Note that
6449 * for 10G, 25G LN0_ only is used. It implements MDIO registers
6450 * 1.1200-1.1203 for all other BASE-R modes (40G, 50G, 100G) per
6451 * 802.3bj-2014 CL45. Note that for 50G LN0_ and LN1_ only are used. The
6452 * register is automatically cleared at the start of training. Each field
6453 * reflects the contents of the coefficient update field in the lane's
6454 * most recently received training frame. This register should not be
6455 * written when link training is enabled, i.e. when
6456 * CGX()_SPU()_BR_PMD_CONTROL[TRAIN_EN] is set. The lane fields in this
6457 * register are indexed by logical PCS lane ID.
6459 union cgxx_spux_br_pmd_lp_rep {
6461 struct cgxx_spux_br_pmd_lp_rep_s {
6467 /* struct cgxx_spux_br_pmd_lp_rep_s cn; */
6470 static inline u64 CGXX_SPUX_BR_PMD_LP_REP(u64 a)
6471 __attribute__ ((pure, always_inline));
6472 static inline u64 CGXX_SPUX_BR_PMD_LP_REP(u64 a)
6474 return 0x100c0 + 0x40000 * a;
6478 * Register (RSL) cgx#_spu#_br_pmd_status
6480 * INTERNAL:CGX SPU BASE-R PMD Status Registers The lane fields in this
6481 * register are indexed by logical PCS lane ID. The lane 0 field (LN0_*)
6482 * is valid for 10GBASE-R, 25GBASE-R, 40GBASE-R, 50GBASE-R and
6483 * 100GBASE-R. The lane 1 field (LN1_*) is valid for 40GBASE-R, 50GBASE-R
6484 * and 100GBASE-R. The remaining fields (LN2_*, LN3_*) are only valid for
6485 * 40GBASE-R and 100GBASE-R.
6487 union cgxx_spux_br_pmd_status {
6489 struct cgxx_spux_br_pmd_status_s {
6490 u64 ln0_train_status : 4;
6491 u64 ln1_train_status : 4;
6492 u64 ln2_train_status : 4;
6493 u64 ln3_train_status : 4;
6494 u64 reserved_16_63 : 48;
6496 /* struct cgxx_spux_br_pmd_status_s cn; */
6499 static inline u64 CGXX_SPUX_BR_PMD_STATUS(u64 a)
6500 __attribute__ ((pure, always_inline));
6501 static inline u64 CGXX_SPUX_BR_PMD_STATUS(u64 a)
6503 return 0x100b0 + 0x40000 * a;
6507 * Register (RSL) cgx#_spu#_br_status1
6509 * CGX SPU BASE-R Status 1 Registers
6511 union cgxx_spux_br_status1 {
6513 struct cgxx_spux_br_status1_s {
6518 u64 reserved_4_11 : 8;
6520 u64 reserved_13_63 : 51;
6522 /* struct cgxx_spux_br_status1_s cn; */
6525 static inline u64 CGXX_SPUX_BR_STATUS1(u64 a)
6526 __attribute__ ((pure, always_inline));
6527 static inline u64 CGXX_SPUX_BR_STATUS1(u64 a)
6529 return 0x10030 + 0x40000 * a;
6533 * Register (RSL) cgx#_spu#_br_status2
6535 * CGX SPU BASE-R Status 2 Registers This register implements a
6536 * combination of the following IEEE 802.3 registers: * BASE-R PCS status
6537 * 2 (MDIO address 3.33). * BASE-R BER high-order counter (MDIO address
6538 * 3.44). * Errored-blocks high-order counter (MDIO address 3.45). Note
6539 * that the relative locations of some fields have been moved from IEEE
6540 * 802.3 in order to make the register layout more software friendly: the
6541 * BER counter high-order and low-order bits from sections 3.44 and 3.33
6542 * have been combined into the contiguous, 22-bit [BER_CNT] field;
6543 * likewise, the errored-blocks counter high-order and low-order bits
6544 * from section 3.45 have been combined into the contiguous, 22-bit
6547 union cgxx_spux_br_status2 {
6549 struct cgxx_spux_br_status2_s {
6550 u64 reserved_0_13 : 14;
6551 u64 latched_ber : 1;
6552 u64 latched_lock : 1;
6554 u64 reserved_38_39 : 2;
6556 u64 reserved_62_63 : 2;
6558 /* struct cgxx_spux_br_status2_s cn; */
6561 static inline u64 CGXX_SPUX_BR_STATUS2(u64 a)
6562 __attribute__ ((pure, always_inline));
6563 static inline u64 CGXX_SPUX_BR_STATUS2(u64 a)
6565 return 0x10038 + 0x40000 * a;
6569 * Register (RSL) cgx#_spu#_br_tp_control
6571 * CGX SPU BASE-R Test-Pattern Control Registers Refer to the test
6572 * pattern methodology described in 802.3 sections 49.2.8 and 82.2.10.
6574 union cgxx_spux_br_tp_control {
6576 struct cgxx_spux_br_tp_control_s {
6584 u64 scramble_tp : 2;
6585 u64 pr_tp_data_type : 1;
6586 u64 reserved_10_63 : 54;
6588 /* struct cgxx_spux_br_tp_control_s cn; */
6591 static inline u64 CGXX_SPUX_BR_TP_CONTROL(u64 a)
6592 __attribute__ ((pure, always_inline));
6593 static inline u64 CGXX_SPUX_BR_TP_CONTROL(u64 a)
6595 return 0x10040 + 0x40000 * a;
6599 * Register (RSL) cgx#_spu#_br_tp_err_cnt
6601 * CGX SPU BASE-R Test-Pattern Error-Count Registers This register
6602 * provides the BASE-R PCS test-pattern error counter.
6604 union cgxx_spux_br_tp_err_cnt {
6606 struct cgxx_spux_br_tp_err_cnt_s {
6608 u64 reserved_16_63 : 48;
6610 /* struct cgxx_spux_br_tp_err_cnt_s cn; */
6613 static inline u64 CGXX_SPUX_BR_TP_ERR_CNT(u64 a)
6614 __attribute__ ((pure, always_inline));
6615 static inline u64 CGXX_SPUX_BR_TP_ERR_CNT(u64 a)
6617 return 0x10048 + 0x40000 * a;
6621 * Register (RSL) cgx#_spu#_br_tp_seed_a
6623 * CGX SPU BASE-R Test-Pattern Seed A Registers Refer to the test pattern
6624 * methodology described in 802.3 sections 49.2.8 and 82.2.10.
6626 union cgxx_spux_br_tp_seed_a {
6628 struct cgxx_spux_br_tp_seed_a_s {
6630 u64 reserved_58_63 : 6;
6632 /* struct cgxx_spux_br_tp_seed_a_s cn; */
6635 static inline u64 CGXX_SPUX_BR_TP_SEED_A(u64 a)
6636 __attribute__ ((pure, always_inline));
6637 static inline u64 CGXX_SPUX_BR_TP_SEED_A(u64 a)
6639 return 0x10060 + 0x40000 * a;
6643 * Register (RSL) cgx#_spu#_br_tp_seed_b
6645 * CGX SPU BASE-R Test-Pattern Seed B Registers Refer to the test pattern
6646 * methodology described in 802.3 sections 49.2.8 and 82.2.10.
6648 union cgxx_spux_br_tp_seed_b {
6650 struct cgxx_spux_br_tp_seed_b_s {
6652 u64 reserved_58_63 : 6;
6654 /* struct cgxx_spux_br_tp_seed_b_s cn; */
6657 static inline u64 CGXX_SPUX_BR_TP_SEED_B(u64 a)
6658 __attribute__ ((pure, always_inline));
6659 static inline u64 CGXX_SPUX_BR_TP_SEED_B(u64 a)
6661 return 0x10068 + 0x40000 * a;
6665 * Register (RSL) cgx#_spu#_bx_status
6667 * CGX SPU BASE-X Status Registers
6669 union cgxx_spux_bx_status {
6671 struct cgxx_spux_bx_status_s {
6673 u64 reserved_4_10 : 7;
6676 u64 reserved_13_63 : 51;
6678 /* struct cgxx_spux_bx_status_s cn; */
6681 static inline u64 CGXX_SPUX_BX_STATUS(u64 a)
6682 __attribute__ ((pure, always_inline));
6683 static inline u64 CGXX_SPUX_BX_STATUS(u64 a)
6685 return 0x10028 + 0x40000 * a;
6689 * Register (RSL) cgx#_spu#_control1
6691 * CGX SPU Control 1 Registers
6693 union cgxx_spux_control1 {
6695 struct cgxx_spux_control1_s {
6696 u64 reserved_0_1 : 2;
6699 u64 reserved_7_10 : 4;
6701 u64 reserved_12 : 1;
6705 u64 usxgmii_type : 3;
6706 u64 usxgmii_rate : 3;
6708 u64 reserved_23_63 : 41;
6710 struct cgxx_spux_control1_cn96xxp1 {
6711 u64 reserved_0_1 : 2;
6714 u64 reserved_7_10 : 4;
6716 u64 reserved_12 : 1;
6720 u64 usxgmii_type : 3;
6721 u64 usxgmii_rate : 3;
6722 u64 reserved_22_63 : 42;
6724 /* struct cgxx_spux_control1_s cn96xxp3; */
6725 /* struct cgxx_spux_control1_cn96xxp1 cnf95xxp1; */
6726 struct cgxx_spux_control1_cnf95xxp2 {
6727 u64 reserved_0_1 : 2;
6730 u64 reserved_7_10 : 4;
6732 u64 reserved_12 : 1;
6736 u64 usxgmii_type : 3;
6737 u64 usxgmii_rate : 3;
6738 u64 reserved_22 : 1;
6739 u64 reserved_23_63 : 41;
6743 static inline u64 CGXX_SPUX_CONTROL1(u64 a)
6744 __attribute__ ((pure, always_inline));
6745 static inline u64 CGXX_SPUX_CONTROL1(u64 a)
6747 return 0x10000 + 0x40000 * a;
6751 * Register (RSL) cgx#_spu#_control2
6753 * CGX SPU Control 2 Registers
6755 union cgxx_spux_control2 {
6757 struct cgxx_spux_control2_s {
6759 u64 reserved_4_63 : 60;
6761 /* struct cgxx_spux_control2_s cn; */
6764 static inline u64 CGXX_SPUX_CONTROL2(u64 a)
6765 __attribute__ ((pure, always_inline));
6766 static inline u64 CGXX_SPUX_CONTROL2(u64 a)
6768 return 0x10018 + 0x40000 * a;
6772 * Register (RSL) cgx#_spu#_fec_abil
6774 * CGX SPU Forward Error Correction Ability Registers
6776 union cgxx_spux_fec_abil {
6778 struct cgxx_spux_fec_abil_s {
6781 u64 reserved_2_63 : 62;
6783 /* struct cgxx_spux_fec_abil_s cn; */
6786 static inline u64 CGXX_SPUX_FEC_ABIL(u64 a)
6787 __attribute__ ((pure, always_inline));
6788 static inline u64 CGXX_SPUX_FEC_ABIL(u64 a)
6790 return 0x100d8 + 0x40000 * a;
6794 * Register (RSL) cgx#_spu#_fec_control
6796 * CGX SPU Forward Error Correction Control Registers
6798 union cgxx_spux_fec_control {
6800 struct cgxx_spux_fec_control_s {
6803 u64 fec_byp_ind_en : 1;
6804 u64 fec_byp_cor_en : 1;
6805 u64 reserved_5_63 : 59;
6807 /* struct cgxx_spux_fec_control_s cn; */
6810 static inline u64 CGXX_SPUX_FEC_CONTROL(u64 a)
6811 __attribute__ ((pure, always_inline));
6812 static inline u64 CGXX_SPUX_FEC_CONTROL(u64 a)
6814 return 0x100e0 + 0x40000 * a;
6818 * Register (RSL) cgx#_spu#_fec_ln#_rsfec_err
6820 * CGX SPU Reed-Solomon FEC Symbol Error Counter for FEC Lanes 0-3
6821 * Registers This register is valid only when Reed-Solomon FEC is
6822 * enabled. The symbol error counters are defined in 802.3 section
6823 * 91.6.11 (for 100G and extended to 50G) and 802.3by-2016 section
6824 * 108.6.9 (for 25G and extended to USXGMII). The counter is reset to all
6825 * zeros when the register is read, and held at all ones in case of
6826 * overflow. The reset operation takes precedence over the increment
6827 * operation; if the register is read on the same clock cycle as an
6828 * increment operation, the counter is reset to all zeros and the
6829 * increment operation is lost. The counters are writable for test
6830 * purposes, rather than read-only as specified in IEEE 802.3.
6832 union cgxx_spux_fec_lnx_rsfec_err {
6834 struct cgxx_spux_fec_lnx_rsfec_err_s {
6835 u64 symb_err_cnt : 32;
6836 u64 reserved_32_63 : 32;
6838 /* struct cgxx_spux_fec_lnx_rsfec_err_s cn; */
6841 static inline u64 CGXX_SPUX_FEC_LNX_RSFEC_ERR(u64 a, u64 b)
6842 __attribute__ ((pure, always_inline));
6843 static inline u64 CGXX_SPUX_FEC_LNX_RSFEC_ERR(u64 a, u64 b)
6845 return 0x10900 + 0x40000 * a + 8 * b;
6849 * Register (RSL) cgx#_spu#_int
6851 * CGX SPU Interrupt Registers
6853 union cgxx_spux_int {
6855 struct cgxx_spux_int_s {
6857 u64 rx_link_down : 1;
6867 u64 an_link_good : 1;
6868 u64 an_complete : 1;
6869 u64 training_done : 1;
6870 u64 training_failure : 1;
6871 u64 fec_align_status : 1;
6873 u64 rsfec_uncorr : 1;
6875 u64 usx_an_lnk_st : 1;
6877 u64 reserved_21_63 : 43;
6879 /* struct cgxx_spux_int_s cn; */
6882 static inline u64 CGXX_SPUX_INT(u64 a)
6883 __attribute__ ((pure, always_inline));
6884 static inline u64 CGXX_SPUX_INT(u64 a)
6886 return 0x10220 + 0x40000 * a;
6890 * Register (RSL) cgx#_spu#_int_ena_w1c
6892 * CGX SPU Interrupt Enable Clear Registers This register clears
6893 * interrupt enable bits.
6895 union cgxx_spux_int_ena_w1c {
6897 struct cgxx_spux_int_ena_w1c_s {
6899 u64 rx_link_down : 1;
6909 u64 an_link_good : 1;
6910 u64 an_complete : 1;
6911 u64 training_done : 1;
6912 u64 training_failure : 1;
6913 u64 fec_align_status : 1;
6915 u64 rsfec_uncorr : 1;
6917 u64 usx_an_lnk_st : 1;
6919 u64 reserved_21_63 : 43;
6921 /* struct cgxx_spux_int_ena_w1c_s cn; */
6924 static inline u64 CGXX_SPUX_INT_ENA_W1C(u64 a)
6925 __attribute__ ((pure, always_inline));
6926 static inline u64 CGXX_SPUX_INT_ENA_W1C(u64 a)
6928 return 0x10230 + 0x40000 * a;
6932 * Register (RSL) cgx#_spu#_int_ena_w1s
6934 * CGX SPU Interrupt Enable Set Registers This register sets interrupt
6937 union cgxx_spux_int_ena_w1s {
6939 struct cgxx_spux_int_ena_w1s_s {
6941 u64 rx_link_down : 1;
6951 u64 an_link_good : 1;
6952 u64 an_complete : 1;
6953 u64 training_done : 1;
6954 u64 training_failure : 1;
6955 u64 fec_align_status : 1;
6957 u64 rsfec_uncorr : 1;
6959 u64 usx_an_lnk_st : 1;
6961 u64 reserved_21_63 : 43;
6963 /* struct cgxx_spux_int_ena_w1s_s cn; */
6966 static inline u64 CGXX_SPUX_INT_ENA_W1S(u64 a)
6967 __attribute__ ((pure, always_inline));
6968 static inline u64 CGXX_SPUX_INT_ENA_W1S(u64 a)
6970 return 0x10238 + 0x40000 * a;
6974 * Register (RSL) cgx#_spu#_int_w1s
6976 * CGX SPU Interrupt Set Registers This register sets interrupt bits.
6978 union cgxx_spux_int_w1s {
6980 struct cgxx_spux_int_w1s_s {
6982 u64 rx_link_down : 1;
6992 u64 an_link_good : 1;
6993 u64 an_complete : 1;
6994 u64 training_done : 1;
6995 u64 training_failure : 1;
6996 u64 fec_align_status : 1;
6998 u64 rsfec_uncorr : 1;
7000 u64 usx_an_lnk_st : 1;
7002 u64 reserved_21_63 : 43;
7004 /* struct cgxx_spux_int_w1s_s cn; */
7007 static inline u64 CGXX_SPUX_INT_W1S(u64 a)
7008 __attribute__ ((pure, always_inline));
7009 static inline u64 CGXX_SPUX_INT_W1S(u64 a)
7011 return 0x10228 + 0x40000 * a;
7015 * Register (RSL) cgx#_spu#_ln#_br_bip_err_cnt
7017 * CGX SPU 40,50,100GBASE-R BIP Error-Counter Registers This register
7018 * implements the IEEE 802.3 BIP error-counter registers for PCS lanes
7019 * 0-19 (3.200-3.203). It is valid only when the LPCS type is 40GBASE-R,
7020 * 50GBASE-R, 100GBASE-R, (CGX()_CMR()_CONFIG[LMAC_TYPE]), and always
7021 * returns 0x0 for all other LPCS types. The counters are indexed by the
7022 * RX PCS lane number based on the alignment marker detected on each lane
7023 * and captured in CGX()_SPU()_BR_LANE_MAP(). Each counter counts the BIP
7024 * errors for its PCS lane, and is held at all ones in case of overflow.
7025 * The counters are reset to all zeros when this register is read by
7026 * software. The reset operation takes precedence over the increment
7027 * operation; if the register is read on the same clock cycle as an
7028 * increment operation, the counter is reset to all zeros and the
7029 * increment operation is lost. The counters are writable for test
7030 * purposes, rather than read-only as specified in IEEE 802.3.
7032 union cgxx_spux_lnx_br_bip_err_cnt {
7034 struct cgxx_spux_lnx_br_bip_err_cnt_s {
7035 u64 bip_err_cnt : 16;
7036 u64 reserved_16_63 : 48;
7038 /* struct cgxx_spux_lnx_br_bip_err_cnt_s cn; */
7041 static inline u64 CGXX_SPUX_LNX_BR_BIP_ERR_CNT(u64 a, u64 b)
7042 __attribute__ ((pure, always_inline));
7043 static inline u64 CGXX_SPUX_LNX_BR_BIP_ERR_CNT(u64 a, u64 b)
7045 return 0x10500 + 0x40000 * a + 8 * b;
7049 * Register (RSL) cgx#_spu#_ln#_fec_corr_blks
7051 * CGX SPU FEC Corrected-Blocks Counters 0-19 Registers This register is
7052 * valid only when the LPCS type is BASE-R
7053 * (CGX()_CMR()_CONFIG[LMAC_TYPE]) and applies to BASE-R FEC and Reed-
7054 * Solomon FEC (RS-FEC). When BASE-R FEC is enabled, the FEC corrected-
7055 * block counters are defined in IEEE 802.3 section 74.8.4.1. Each
7056 * corrected-blocks counter increments by one for a corrected FEC block,
7057 * i.e. an FEC block that has been received with invalid parity on the
7058 * associated PCS lane and has been corrected by the FEC decoder. The
7059 * counter is reset to all zeros when the register is read, and held at
7060 * all ones in case of overflow. The reset operation takes precedence
7061 * over the increment operation; if the register is read on the same
7062 * clock cycle as an increment operation, the counter is reset to all
7063 * zeros and the increment operation is lost. The counters are writable
7064 * for test purposes, rather than read-only as specified in IEEE 802.3.
7066 union cgxx_spux_lnx_fec_corr_blks {
7068 struct cgxx_spux_lnx_fec_corr_blks_s {
7069 u64 ln_corr_blks : 32;
7070 u64 reserved_32_63 : 32;
7072 /* struct cgxx_spux_lnx_fec_corr_blks_s cn; */
7075 static inline u64 CGXX_SPUX_LNX_FEC_CORR_BLKS(u64 a, u64 b)
7076 __attribute__ ((pure, always_inline));
7077 static inline u64 CGXX_SPUX_LNX_FEC_CORR_BLKS(u64 a, u64 b)
7079 return 0x10700 + 0x40000 * a + 8 * b;
7083 * Register (RSL) cgx#_spu#_ln#_fec_uncorr_blks
7085 * CGX SPU FEC Uncorrected-Blocks Counters 0-19 Registers This register
7086 * is valid only when the LPCS type is BASE-R
7087 * (CGX()_CMR()_CONFIG[LMAC_TYPE]) and applies to BASE-R FEC and Reed-
7088 * Solomon FEC (RS-FEC). When BASE-R FEC is enabled, the FEC corrected-
7089 * block counters are defined in IEEE 802.3 section 74.8.4.2. Each
7090 * uncorrected-blocks counter increments by one for an uncorrected FEC
7091 * block, i.e. an FEC block that has been received with invalid parity on
7092 * the associated PCS lane and has not been corrected by the FEC decoder.
7093 * The counter is reset to all zeros when the register is read, and held
7094 * at all ones in case of overflow. The reset operation takes precedence
7095 * over the increment operation; if the register is read on the same
7096 * clock cycle as an increment operation, the counter is reset to all
7097 * zeros and the increment operation is lost. The counters are writable
7098 * for test purposes, rather than read-only as specified in IEEE 802.3.
7100 union cgxx_spux_lnx_fec_uncorr_blks {
7102 struct cgxx_spux_lnx_fec_uncorr_blks_s {
7103 u64 ln_uncorr_blks : 32;
7104 u64 reserved_32_63 : 32;
7106 /* struct cgxx_spux_lnx_fec_uncorr_blks_s cn; */
7109 static inline u64 CGXX_SPUX_LNX_FEC_UNCORR_BLKS(u64 a, u64 b)
7110 __attribute__ ((pure, always_inline));
7111 static inline u64 CGXX_SPUX_LNX_FEC_UNCORR_BLKS(u64 a, u64 b)
7113 return 0x10800 + 0x40000 * a + 8 * b;
7117 * Register (RSL) cgx#_spu#_lpcs_states
7119 * CGX SPU BASE-X Transmit/Receive States Registers
7121 union cgxx_spux_lpcs_states {
7123 struct cgxx_spux_lpcs_states_s {
7126 u64 deskew_am_found : 20;
7128 u64 reserved_26_27 : 2;
7130 u64 reserved_31_63 : 33;
7132 /* struct cgxx_spux_lpcs_states_s cn; */
7135 static inline u64 CGXX_SPUX_LPCS_STATES(u64 a)
7136 __attribute__ ((pure, always_inline));
7137 static inline u64 CGXX_SPUX_LPCS_STATES(u64 a)
7139 return 0x10208 + 0x40000 * a;
7143 * Register (RSL) cgx#_spu#_misc_control
7145 * CGX SPU Miscellaneous Control Registers "* RX logical PCS lane
7146 * polarity vector \<3:0\> = [XOR_RXPLRT]\<3:0\> ^ {4{[RXPLRT]}}. * TX
7147 * logical PCS lane polarity vector \<3:0\> = [XOR_TXPLRT]\<3:0\> ^
7148 * {4{[TXPLRT]}}. In short, keep [RXPLRT] and [TXPLRT] cleared, and use
7149 * [XOR_RXPLRT] and [XOR_TXPLRT] fields to define the polarity per
7150 * logical PCS lane. Only bit 0 of vector is used for 10GBASE-R, and only
7151 * bits 1:0 of vector are used for RXAUI."
7153 union cgxx_spux_misc_control {
7155 struct cgxx_spux_misc_control_s {
7160 u64 intlv_rdisp : 1;
7161 u64 skip_after_term : 1;
7162 u64 rx_packet_dis : 1;
7163 u64 rx_edet_signal_ok : 1;
7164 u64 reserved_14_63 : 50;
7166 /* struct cgxx_spux_misc_control_s cn; */
7169 static inline u64 CGXX_SPUX_MISC_CONTROL(u64 a)
7170 __attribute__ ((pure, always_inline));
7171 static inline u64 CGXX_SPUX_MISC_CONTROL(u64 a)
7173 return 0x10218 + 0x40000 * a;
7177 * Register (RSL) cgx#_spu#_rsfec_corr
7179 * CGX SPU Reed-Solomon FEC Corrected Codeword Counter Register This
7180 * register implements the IEEE 802.3 RS-FEC corrected codewords counter
7181 * described in 802.3 section 91.6.8 (for 100G and extended to 50G) and
7182 * 802.3by-2016 section 108.6.7 (for 25G and extended to USXGMII).
7184 union cgxx_spux_rsfec_corr {
7186 struct cgxx_spux_rsfec_corr_s {
7188 u64 reserved_32_63 : 32;
7190 /* struct cgxx_spux_rsfec_corr_s cn; */
7193 static inline u64 CGXX_SPUX_RSFEC_CORR(u64 a)
7194 __attribute__ ((pure, always_inline));
7195 static inline u64 CGXX_SPUX_RSFEC_CORR(u64 a)
7197 return 0x10088 + 0x40000 * a;
7201 * Register (RSL) cgx#_spu#_rsfec_status
7203 * CGX SPU Reed-Solomon FEC Status Registers This register implements the
7204 * IEEE 802.3 RS-FEC status and lane mapping registers as described in
7205 * 802.3 section 91.6 (for 100G and extended to 50G) and 802.3by-2016
7206 * section 108-6 (for 25G and extended to USXGMII).
7208 union cgxx_spux_rsfec_status {
7210 struct cgxx_spux_rsfec_status_s {
7211 u64 fec_lane_mapping : 8;
7212 u64 fec_align_status : 1;
7215 u64 fec_byp_ind_abil : 1;
7216 u64 fec_byp_cor_abil : 1;
7217 u64 reserved_16_63 : 48;
7219 /* struct cgxx_spux_rsfec_status_s cn; */
7222 static inline u64 CGXX_SPUX_RSFEC_STATUS(u64 a)
7223 __attribute__ ((pure, always_inline));
7224 static inline u64 CGXX_SPUX_RSFEC_STATUS(u64 a)
7226 return 0x10080 + 0x40000 * a;
7230 * Register (RSL) cgx#_spu#_rsfec_uncorr
7232 * CGX SPU Reed-Solomon FEC Uncorrected Codeword Counter Register This
7233 * register implements the IEEE 802.3 RS-FEC uncorrected codewords
7234 * counter described in 802.3 section 91.6.9 (for 100G and extended to
7235 * 50G) and 802.3by-2016 section 108.6.8 (for 25G and extended to
7238 union cgxx_spux_rsfec_uncorr {
7240 struct cgxx_spux_rsfec_uncorr_s {
7242 u64 reserved_32_63 : 32;
7244 /* struct cgxx_spux_rsfec_uncorr_s cn; */
7247 static inline u64 CGXX_SPUX_RSFEC_UNCORR(u64 a)
7248 __attribute__ ((pure, always_inline));
7249 static inline u64 CGXX_SPUX_RSFEC_UNCORR(u64 a)
7251 return 0x10090 + 0x40000 * a;
7255 * Register (RSL) cgx#_spu#_rx_eee_wake
7257 * INTERNAL: CGX SPU RX EEE Wake Error Counter Registers Reserved.
7258 * Internal: A counter that is incremented each time that the LPI receive
7259 * state diagram enters the RX_WTF state indicating that a wake time
7260 * fault has been detected.
7262 union cgxx_spux_rx_eee_wake {
7264 struct cgxx_spux_rx_eee_wake_s {
7265 u64 wtf_error_counter : 16;
7266 u64 reserved_16_63 : 48;
7268 /* struct cgxx_spux_rx_eee_wake_s cn; */
7271 static inline u64 CGXX_SPUX_RX_EEE_WAKE(u64 a)
7272 __attribute__ ((pure, always_inline));
7273 static inline u64 CGXX_SPUX_RX_EEE_WAKE(u64 a)
7275 return 0x103e0 + 8 * a;
7279 * Register (RSL) cgx#_spu#_rx_lpi_timing
7281 * INTERNAL: CGX SPU RX EEE LPI Timing Parameters Registers Reserved.
7282 * Internal: This register specifies receiver LPI timing parameters Tqr,
7285 union cgxx_spux_rx_lpi_timing {
7287 struct cgxx_spux_rx_lpi_timing_s {
7291 u64 reserved_60_61 : 2;
7295 /* struct cgxx_spux_rx_lpi_timing_s cn; */
7298 static inline u64 CGXX_SPUX_RX_LPI_TIMING(u64 a)
7299 __attribute__ ((pure, always_inline));
7300 static inline u64 CGXX_SPUX_RX_LPI_TIMING(u64 a)
7302 return 0x103c0 + 8 * a;
7306 * Register (RSL) cgx#_spu#_rx_lpi_timing2
7308 * INTERNAL: CGX SPU RX EEE LPI Timing2 Parameters Registers Reserved.
7309 * Internal: This register specifies receiver LPI timing parameters
7312 union cgxx_spux_rx_lpi_timing2 {
7314 struct cgxx_spux_rx_lpi_timing2_s {
7315 u64 hold_off_timer : 20;
7316 u64 reserved_20_63 : 44;
7318 /* struct cgxx_spux_rx_lpi_timing2_s cn; */
7321 static inline u64 CGXX_SPUX_RX_LPI_TIMING2(u64 a)
7322 __attribute__ ((pure, always_inline));
7323 static inline u64 CGXX_SPUX_RX_LPI_TIMING2(u64 a)
7325 return 0x10420 + 8 * a;
7329 * Register (RSL) cgx#_spu#_rx_mrk_cnt
7331 * CGX SPU Receiver Marker Interval Count Control Registers
7333 union cgxx_spux_rx_mrk_cnt {
7335 struct cgxx_spux_rx_mrk_cnt_s {
7337 u64 reserved_20_43 : 24;
7338 u64 by_mrk_100g : 1;
7339 u64 reserved_45_47 : 3;
7340 u64 ram_mrk_cnt : 8;
7341 u64 reserved_56_63 : 8;
7343 /* struct cgxx_spux_rx_mrk_cnt_s cn; */
7346 static inline u64 CGXX_SPUX_RX_MRK_CNT(u64 a)
7347 __attribute__ ((pure, always_inline));
7348 static inline u64 CGXX_SPUX_RX_MRK_CNT(u64 a)
7350 return 0x103a0 + 8 * a;
7354 * Register (RSL) cgx#_spu#_spd_abil
7356 * CGX SPU PCS Speed Ability Registers
7358 union cgxx_spux_spd_abil {
7360 struct cgxx_spux_spd_abil_s {
7364 u64 twentyfivegb : 1;
7368 u64 reserved_7_63 : 57;
7370 /* struct cgxx_spux_spd_abil_s cn; */
7373 static inline u64 CGXX_SPUX_SPD_ABIL(u64 a)
7374 __attribute__ ((pure, always_inline));
7375 static inline u64 CGXX_SPUX_SPD_ABIL(u64 a)
7377 return 0x10010 + 0x40000 * a;
7381 * Register (RSL) cgx#_spu#_status1
7383 * CGX SPU Status 1 Registers
7385 union cgxx_spux_status1 {
7387 struct cgxx_spux_status1_s {
7391 u64 reserved_3_6 : 4;
7393 u64 rx_lpi_indication : 1;
7394 u64 tx_lpi_indication : 1;
7395 u64 rx_lpi_received : 1;
7396 u64 tx_lpi_received : 1;
7397 u64 reserved_12_63 : 52;
7399 /* struct cgxx_spux_status1_s cn; */
7402 static inline u64 CGXX_SPUX_STATUS1(u64 a)
7403 __attribute__ ((pure, always_inline));
7404 static inline u64 CGXX_SPUX_STATUS1(u64 a)
7406 return 0x10008 + 0x40000 * a;
7410 * Register (RSL) cgx#_spu#_status2
7412 * CGX SPU Status 2 Registers
7414 union cgxx_spux_status2 {
7416 struct cgxx_spux_status2_s {
7422 u64 twentyfivegb_r : 1;
7425 u64 hundredgb_r : 1;
7429 u64 reserved_12_13 : 2;
7431 u64 reserved_16_63 : 48;
7433 /* struct cgxx_spux_status2_s cn; */
7436 static inline u64 CGXX_SPUX_STATUS2(u64 a)
7437 __attribute__ ((pure, always_inline));
7438 static inline u64 CGXX_SPUX_STATUS2(u64 a)
7440 return 0x10020 + 0x40000 * a;
7444 * Register (RSL) cgx#_spu#_tx_lpi_timing
7446 * INTERNAL: CGX SPU TX EEE LPI Timing Parameters Registers Reserved.
7447 * Internal: Transmit LPI timing parameters Tsl, Tql and Tul
7449 union cgxx_spux_tx_lpi_timing {
7451 struct cgxx_spux_tx_lpi_timing_s {
7453 u64 reserved_19_31 : 13;
7455 u64 reserved_44_47 : 4;
7457 u64 reserved_60 : 1;
7458 u64 tx_lpi_ignore_twl : 1;
7462 /* struct cgxx_spux_tx_lpi_timing_s cn; */
7465 static inline u64 CGXX_SPUX_TX_LPI_TIMING(u64 a)
7466 __attribute__ ((pure, always_inline));
7467 static inline u64 CGXX_SPUX_TX_LPI_TIMING(u64 a)
7469 return 0x10400 + 8 * a;
7473 * Register (RSL) cgx#_spu#_tx_lpi_timing2
7475 * INTERNAL: CGX SPU TX EEE LPI Timing2 Parameters Registers Reserved.
7476 * Internal: This register specifies transmit LPI timer parameters.
7478 union cgxx_spux_tx_lpi_timing2 {
7480 struct cgxx_spux_tx_lpi_timing2_s {
7482 u64 reserved_8_11 : 4;
7484 u64 reserved_24_31 : 8;
7486 u64 reserved_44_47 : 4;
7488 u64 reserved_60_63 : 4;
7490 /* struct cgxx_spux_tx_lpi_timing2_s cn; */
7493 static inline u64 CGXX_SPUX_TX_LPI_TIMING2(u64 a)
7494 __attribute__ ((pure, always_inline));
7495 static inline u64 CGXX_SPUX_TX_LPI_TIMING2(u64 a)
7497 return 0x10440 + 8 * a;
7501 * Register (RSL) cgx#_spu#_tx_mrk_cnt
7503 * CGX SPU Transmitter Marker Interval Count Control Registers
7505 union cgxx_spux_tx_mrk_cnt {
7507 struct cgxx_spux_tx_mrk_cnt_s {
7509 u64 reserved_20_43 : 24;
7510 u64 by_mrk_100g : 1;
7511 u64 reserved_45_47 : 3;
7512 u64 ram_mrk_cnt : 8;
7513 u64 reserved_56_63 : 8;
7515 /* struct cgxx_spux_tx_mrk_cnt_s cn; */
7518 static inline u64 CGXX_SPUX_TX_MRK_CNT(u64 a)
7519 __attribute__ ((pure, always_inline));
7520 static inline u64 CGXX_SPUX_TX_MRK_CNT(u64 a)
7522 return 0x10380 + 8 * a;
7526 * Register (RSL) cgx#_spu#_usx_an_adv
7528 * CGX SPU USXGMII Autonegotiation Advertisement Registers Software
7529 * programs this register with the contents of the AN-link code word base
7530 * page to be transmitted during autonegotiation. Any write operations to
7531 * this register prior to completion of autonegotiation should be
7532 * followed by a renegotiation in order for the new values to take
7533 * effect. Once autonegotiation has completed, software can examine this
7534 * register along with CGX()_SPU()_USX_AN_ADV to determine the highest
7535 * common denominator technology. The format for this register is from
7536 * USXGMII Multiport specification section 1.1.2 Table 2.
7538 union cgxx_spux_usx_an_adv {
7540 struct cgxx_spux_usx_an_adv_s {
7542 u64 reserved_1_6 : 6;
7543 u64 eee_clk_stop_abil : 1;
7547 u64 reserved_13_14 : 2;
7549 u64 reserved_16_63 : 48;
7551 /* struct cgxx_spux_usx_an_adv_s cn; */
7554 static inline u64 CGXX_SPUX_USX_AN_ADV(u64 a)
7555 __attribute__ ((pure, always_inline));
7556 static inline u64 CGXX_SPUX_USX_AN_ADV(u64 a)
7558 return 0x101d0 + 0x40000 * a;
7562 * Register (RSL) cgx#_spu#_usx_an_control
7564 * CGX SPU USXGMII Autonegotiation Control Register
7566 union cgxx_spux_usx_an_control {
7568 struct cgxx_spux_usx_an_control_s {
7569 u64 reserved_0_8 : 9;
7571 u64 reserved_10_11 : 2;
7573 u64 reserved_13_14 : 2;
7575 u64 reserved_16_63 : 48;
7577 /* struct cgxx_spux_usx_an_control_s cn; */
7580 static inline u64 CGXX_SPUX_USX_AN_CONTROL(u64 a)
7581 __attribute__ ((pure, always_inline));
7582 static inline u64 CGXX_SPUX_USX_AN_CONTROL(u64 a)
7584 return 0x101c0 + 0x40000 * a;
7588 * Register (RSL) cgx#_spu#_usx_an_expansion
7590 * CGX SPU USXGMII Autonegotiation Expansion Register This register is
7591 * only used to signal page reception.
7593 union cgxx_spux_usx_an_expansion {
7595 struct cgxx_spux_usx_an_expansion_s {
7597 u64 an_page_received : 1;
7598 u64 next_page_able : 1;
7599 u64 reserved_3_63 : 61;
7601 /* struct cgxx_spux_usx_an_expansion_s cn; */
7604 static inline u64 CGXX_SPUX_USX_AN_EXPANSION(u64 a)
7605 __attribute__ ((pure, always_inline));
7606 static inline u64 CGXX_SPUX_USX_AN_EXPANSION(u64 a)
7608 return 0x101e0 + 0x40000 * a;
7612 * Register (RSL) cgx#_spu#_usx_an_flow_ctrl
7614 * CGX SPU USXGMII Flow Control Registers This register is used by
7615 * software to affect USXGMII AN hardware behavior.
7617 union cgxx_spux_usx_an_flow_ctrl {
7619 struct cgxx_spux_usx_an_flow_ctrl_s {
7620 u64 start_idle_detect : 1;
7621 u64 reserved_1_63 : 63;
7623 /* struct cgxx_spux_usx_an_flow_ctrl_s cn; */
7626 static inline u64 CGXX_SPUX_USX_AN_FLOW_CTRL(u64 a)
7627 __attribute__ ((pure, always_inline));
7628 static inline u64 CGXX_SPUX_USX_AN_FLOW_CTRL(u64 a)
7630 return 0x101e8 + 0x40000 * a;
7634 * Register (RSL) cgx#_spu#_usx_an_link_timer
7636 * CGX SPU USXGMII Link Timer Registers This is the link timer register.
7638 union cgxx_spux_usx_an_link_timer {
7640 struct cgxx_spux_usx_an_link_timer_s {
7642 u64 reserved_16_63 : 48;
7644 /* struct cgxx_spux_usx_an_link_timer_s cn; */
7647 static inline u64 CGXX_SPUX_USX_AN_LINK_TIMER(u64 a)
7648 __attribute__ ((pure, always_inline));
7649 static inline u64 CGXX_SPUX_USX_AN_LINK_TIMER(u64 a)
7651 return 0x101f0 + 0x40000 * a;
7655 * Register (RSL) cgx#_spu#_usx_an_lp_abil
7657 * CGX SPU USXGMII Autonegotiation Link-Partner Advertisement Registers
7658 * This register captures the contents of the latest AN link code word
7659 * base page received from the link partner during autonegotiation. This
7660 * is register 5 per IEEE 802.3, Clause 37.
7661 * CGX()_SPU()_USX_AN_EXPANSION[AN_PAGE_RECEIVED] is set when this
7662 * register is updated by hardware.
7664 union cgxx_spux_usx_an_lp_abil {
7666 struct cgxx_spux_usx_an_lp_abil_s {
7668 u64 reserved_1_6 : 6;
7669 u64 eee_clk_stop_abil : 1;
7673 u64 reserved_13_14 : 2;
7675 u64 reserved_16_63 : 48;
7677 /* struct cgxx_spux_usx_an_lp_abil_s cn; */
7680 static inline u64 CGXX_SPUX_USX_AN_LP_ABIL(u64 a)
7681 __attribute__ ((pure, always_inline));
7682 static inline u64 CGXX_SPUX_USX_AN_LP_ABIL(u64 a)
7684 return 0x101d8 + 0x40000 * a;
7688 * Register (RSL) cgx#_spu#_usx_an_status
7690 * CGX SPU USXGMII Autonegotiation Status Register
7692 union cgxx_spux_usx_an_status {
7694 struct cgxx_spux_usx_an_status_s {
7701 u64 reserved_6_63 : 58;
7703 /* struct cgxx_spux_usx_an_status_s cn; */
7706 static inline u64 CGXX_SPUX_USX_AN_STATUS(u64 a)
7707 __attribute__ ((pure, always_inline));
7708 static inline u64 CGXX_SPUX_USX_AN_STATUS(u64 a)
7710 return 0x101c8 + 0x40000 * a;
7714 * Register (RSL) cgx#_spu_dbg_control
7716 * CGX SPU Debug Control Registers
7718 union cgxx_spu_dbg_control {
7720 struct cgxx_spu_dbg_control_s {
7721 u64 marker_rxp : 15;
7722 u64 reserved_15 : 1;
7723 u64 scramble_dis : 1;
7724 u64 reserved_17_18 : 2;
7725 u64 br_pmd_train_soft_en : 1;
7726 u64 reserved_20_27 : 8;
7727 u64 timestamp_norm_dis : 1;
7728 u64 an_nonce_match_dis : 1;
7729 u64 br_ber_mon_dis : 1;
7730 u64 rf_cw_mon_erly_restart_dis : 1;
7731 u64 us_clk_period : 12;
7732 u64 ms_clk_period : 12;
7733 u64 reserved_56_63 : 8;
7735 struct cgxx_spu_dbg_control_cn96xxp1 {
7736 u64 marker_rxp : 15;
7737 u64 reserved_15 : 1;
7738 u64 scramble_dis : 1;
7739 u64 reserved_17_18 : 2;
7740 u64 br_pmd_train_soft_en : 1;
7741 u64 reserved_20_27 : 8;
7742 u64 timestamp_norm_dis : 1;
7743 u64 an_nonce_match_dis : 1;
7744 u64 br_ber_mon_dis : 1;
7745 u64 reserved_31 : 1;
7746 u64 us_clk_period : 12;
7747 u64 ms_clk_period : 12;
7748 u64 reserved_56_63 : 8;
7750 /* struct cgxx_spu_dbg_control_s cn96xxp3; */
7751 /* struct cgxx_spu_dbg_control_cn96xxp1 cnf95xxp1; */
7752 /* struct cgxx_spu_dbg_control_s cnf95xxp2; */
7755 static inline u64 CGXX_SPU_DBG_CONTROL(void)
7756 __attribute__ ((pure, always_inline));
7757 static inline u64 CGXX_SPU_DBG_CONTROL(void)
7763 * Register (RSL) cgx#_spu_sds#_skew_status
7765 * CGX SPU SerDes Lane Skew Status Registers This register provides
7766 * SerDes lane skew status. One register per physical SerDes lane.
7768 union cgxx_spu_sdsx_skew_status {
7770 struct cgxx_spu_sdsx_skew_status_s {
7771 u64 skew_status : 32;
7772 u64 reserved_32_63 : 32;
7774 /* struct cgxx_spu_sdsx_skew_status_s cn; */
7777 static inline u64 CGXX_SPU_SDSX_SKEW_STATUS(u64 a)
7778 __attribute__ ((pure, always_inline));
7779 static inline u64 CGXX_SPU_SDSX_SKEW_STATUS(u64 a)
7781 return 0x10340 + 8 * a;
7785 * Register (RSL) cgx#_spu_sds#_states
7787 * CGX SPU SerDes States Registers This register provides SerDes lane
7788 * states. One register per physical SerDes lane.
7790 union cgxx_spu_sdsx_states {
7792 struct cgxx_spu_sdsx_states_s {
7795 u64 br_block_lock : 1;
7796 u64 br_sh_invld_cnt : 7;
7797 u64 reserved_23 : 1;
7798 u64 fec_sync_cnt : 4;
7799 u64 fec_block_sync : 1;
7800 u64 reserved_29 : 1;
7803 u64 reserved_35 : 1;
7804 u64 train_lock_bad_markers : 3;
7805 u64 train_lock_found_1st_marker : 1;
7806 u64 train_frame_lock : 1;
7807 u64 train_code_viol : 1;
7809 u64 reserved_45_47 : 3;
7811 u64 am_lock_invld_cnt : 2;
7812 u64 reserved_52_63 : 12;
7814 /* struct cgxx_spu_sdsx_states_s cn; */
7817 static inline u64 CGXX_SPU_SDSX_STATES(u64 a)
7818 __attribute__ ((pure, always_inline));
7819 static inline u64 CGXX_SPU_SDSX_STATES(u64 a)
7821 return 0x10360 + 8 * a;
7825 * Register (RSL) cgx#_spu_usxgmii_control
7827 * CGX SPU Common USXGMII Control Register This register is the common
7828 * control register that enables USXGMII Mode. The fields in this
7829 * register are preserved across any LMAC soft-resets. For an LMAC in
7830 * soft- reset state in USXGMII mode, the CGX will transmit Remote Fault
7833 union cgxx_spu_usxgmii_control {
7835 struct cgxx_spu_usxgmii_control_s {
7837 u64 usxgmii_type : 3;
7839 u64 reserved_6_63 : 58;
7841 /* struct cgxx_spu_usxgmii_control_s cn; */
7844 static inline u64 CGXX_SPU_USXGMII_CONTROL(void)
7845 __attribute__ ((pure, always_inline));
7846 static inline u64 CGXX_SPU_USXGMII_CONTROL(void)
7851 #endif /* __CSRS_CGX_H__ */