1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2020 Marvell International Ltd.
6 #ifndef __CVMX_LMCX_DEFS_H__
7 #define __CVMX_LMCX_DEFS_H__
9 #define CVMX_LMCX_BANK_CONFLICT1(offs) \
10 ((0x000360ull) + ((offs) & 3) * 0x1000000ull)
11 #define CVMX_LMCX_BANK_CONFLICT2(offs) \
12 ((0x000368ull) + ((offs) & 3) * 0x1000000ull)
13 #define CVMX_LMCX_BIST_RESULT(offs) \
14 ((0x0000F8ull) + ((offs) & 1) * 0x60000000ull)
15 #define CVMX_LMCX_CHAR_CTL(offs) \
16 ((0x000220ull) + ((offs) & 3) * 0x1000000ull)
17 #define CVMX_LMCX_CHAR_DQ_ERR_COUNT(offs) \
18 ((0x000040ull) + ((offs) & 3) * 0x1000000ull)
19 #define CVMX_LMCX_CHAR_MASK0(offs) \
20 ((0x000228ull) + ((offs) & 3) * 0x1000000ull)
21 #define CVMX_LMCX_CHAR_MASK1(offs) \
22 ((0x000230ull) + ((offs) & 3) * 0x1000000ull)
23 #define CVMX_LMCX_CHAR_MASK2(offs) \
24 ((0x000238ull) + ((offs) & 3) * 0x1000000ull)
25 #define CVMX_LMCX_CHAR_MASK3(offs) \
26 ((0x000240ull) + ((offs) & 3) * 0x1000000ull)
27 #define CVMX_LMCX_CHAR_MASK4(offs) \
28 ((0x000318ull) + ((offs) & 3) * 0x1000000ull)
29 #define CVMX_LMCX_COMP_CTL(offs) \
30 ((0x000028ull) + ((offs) & 1) * 0x60000000ull)
31 #define CVMX_LMCX_COMP_CTL2(offs) \
32 ((0x0001B8ull) + ((offs) & 3) * 0x1000000ull)
33 #define CVMX_LMCX_CONFIG(offs) \
34 ((0x000188ull) + ((offs) & 3) * 0x1000000ull)
35 #define CVMX_LMCX_CONTROL(offs) \
36 ((0x000190ull) + ((offs) & 3) * 0x1000000ull)
37 #define CVMX_LMCX_CTL(offs) \
38 ((0x000010ull) + ((offs) & 1) * 0x60000000ull)
39 #define CVMX_LMCX_CTL1(offs) \
40 ((0x000090ull) + ((offs) & 1) * 0x60000000ull)
41 #define CVMX_LMCX_DBTRAIN_CTL(offs) \
42 ((0x0003F8ull) + ((offs) & 3) * 0x1000000ull)
43 #define CVMX_LMCX_DCLK_CNT(offs) \
44 ((0x0001E0ull) + ((offs) & 3) * 0x1000000ull)
45 #define CVMX_LMCX_DCLK_CNT_HI(offs) \
46 ((0x000070ull) + ((offs) & 1) * 0x60000000ull)
47 #define CVMX_LMCX_DCLK_CNT_LO(offs) \
48 ((0x000068ull) + ((offs) & 1) * 0x60000000ull)
49 #define CVMX_LMCX_DCLK_CTL(offs) \
50 ((0x0000B8ull) + ((offs) & 1) * 0x60000000ull)
51 #define CVMX_LMCX_DDR2_CTL(offs) \
52 ((0x000018ull) + ((offs) & 1) * 0x60000000ull)
53 #define CVMX_LMCX_DDR4_DIMM_CTL(offs) \
54 ((0x0003F0ull) + ((offs) & 3) * 0x1000000ull)
55 #define CVMX_LMCX_DDR_PLL_CTL(offs) \
56 ((0x000258ull) + ((offs) & 3) * 0x1000000ull)
57 #define CVMX_LMCX_DELAY_CFG(offs) \
58 ((0x000088ull) + ((offs) & 1) * 0x60000000ull)
59 #define CVMX_LMCX_DIMMX_DDR4_PARAMS0(offs, id) \
60 ((0x0000D0ull) + (((offs) & 1) + ((id) & 3) * 0x200000ull) * 8)
61 #define CVMX_LMCX_DIMMX_DDR4_PARAMS1(offs, id) \
62 ((0x000140ull) + (((offs) & 1) + ((id) & 3) * 0x200000ull) * 8)
63 #define CVMX_LMCX_DIMMX_PARAMS(offs, id) \
64 ((0x000270ull) + (((offs) & 1) + ((id) & 3) * 0x200000ull) * 8)
65 #define CVMX_LMCX_DIMM_CTL(offs) \
66 ((0x000310ull) + ((offs) & 3) * 0x1000000ull)
67 #define CVMX_LMCX_DLL_CTL(offs) \
68 ((0x0000C0ull) + ((offs) & 1) * 0x60000000ull)
69 #define CVMX_LMCX_DLL_CTL2(offs) \
70 ((0x0001C8ull) + ((offs) & 3) * 0x1000000ull)
71 #define CVMX_LMCX_DLL_CTL3(offs) \
72 ((0x000218ull) + ((offs) & 3) * 0x1000000ull)
73 #define CVMX_LMCX_ECC_PARITY_TEST(offs) \
74 ((0x000108ull) + ((offs) & 3) * 0x1000000ull)
75 #define CVMX_LMCX_EXT_CONFIG(offs) \
76 ((0x000030ull) + ((offs) & 3) * 0x1000000ull)
77 #define CVMX_LMCX_EXT_CONFIG2(offs) \
78 ((0x000090ull) + ((offs) & 3) * 0x1000000ull)
79 #define CVMX_LMCX_GENERAL_PURPOSE0(offs) \
80 ((0x000340ull) + ((offs) & 3) * 0x1000000ull)
81 #define CVMX_LMCX_GENERAL_PURPOSE1(offs) \
82 ((0x000348ull) + ((offs) & 3) * 0x1000000ull)
83 #define CVMX_LMCX_GENERAL_PURPOSE2(offs) \
84 ((0x000350ull) + ((offs) & 3) * 0x1000000ull)
85 #define CVMX_LMCX_IFB_CNT(offs) \
86 ((0x0001D0ull) + ((offs) & 3) * 0x1000000ull)
87 #define CVMX_LMCX_IFB_CNT_HI(offs) \
88 ((0x000050ull) + ((offs) & 1) * 0x60000000ull)
89 #define CVMX_LMCX_IFB_CNT_LO(offs) \
90 ((0x000048ull) + ((offs) & 1) * 0x60000000ull)
91 #define CVMX_LMCX_INT(offs) \
92 ((0x0001F0ull) + ((offs) & 3) * 0x1000000ull)
93 #define CVMX_LMCX_INT_EN(offs) \
94 ((0x0001E8ull) + ((offs) & 3) * 0x1000000ull)
95 #define CVMX_LMCX_LANEX_CRC_SWIZ(x, id) \
96 ((0x000380ull) + (((offs) & 15) + ((id) & 3) * 0x200000ull) * 8)
97 #define CVMX_LMCX_MEM_CFG0(offs) \
98 ((0x000000ull) + ((offs) & 1) * 0x60000000ull)
99 #define CVMX_LMCX_MEM_CFG1(offs) \
100 ((0x000008ull) + ((offs) & 1) * 0x60000000ull)
101 #define CVMX_LMCX_MODEREG_PARAMS0(offs) \
102 ((0x0001A8ull) + ((offs) & 3) * 0x1000000ull)
103 #define CVMX_LMCX_MODEREG_PARAMS1(offs) \
104 ((0x000260ull) + ((offs) & 3) * 0x1000000ull)
105 #define CVMX_LMCX_MODEREG_PARAMS2(offs) \
106 ((0x000050ull) + ((offs) & 3) * 0x1000000ull)
107 #define CVMX_LMCX_MODEREG_PARAMS3(offs) \
108 ((0x000058ull) + ((offs) & 3) * 0x1000000ull)
109 #define CVMX_LMCX_MPR_DATA0(offs) \
110 ((0x000070ull) + ((offs) & 3) * 0x1000000ull)
111 #define CVMX_LMCX_MPR_DATA1(offs) \
112 ((0x000078ull) + ((offs) & 3) * 0x1000000ull)
113 #define CVMX_LMCX_MPR_DATA2(offs) \
114 ((0x000080ull) + ((offs) & 3) * 0x1000000ull)
115 #define CVMX_LMCX_MR_MPR_CTL(offs) \
116 ((0x000068ull) + ((offs) & 3) * 0x1000000ull)
117 #define CVMX_LMCX_NS_CTL(offs) \
118 ((0x000178ull) + ((offs) & 3) * 0x1000000ull)
120 static inline uint64_t CVMX_LMCX_NXM(unsigned long offs)
122 switch (cvmx_get_octeon_family()) {
123 case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
124 case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
125 case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
126 case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
127 case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
128 return (0x0000C8ull) + (offs) * 0x60000000ull;
129 case OCTEON_CNF75XX & OCTEON_FAMILY_MASK:
130 case OCTEON_CN73XX & OCTEON_FAMILY_MASK:
131 return (0x0000C8ull) + (offs) * 0x1000000ull;
132 case OCTEON_CN78XX & OCTEON_FAMILY_MASK:
133 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
134 return (0x0000C8ull) + (offs) * 0x1000000ull;
135 if (OCTEON_IS_MODEL(OCTEON_CN78XX))
136 return (0x0000C8ull) + (offs) * 0x1000000ull;
137 case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
138 return (0x0000C8ull) + (offs) * 0x1000000ull;
140 return (0x0000C8ull) + (offs) * 0x1000000ull;
143 #define CVMX_LMCX_NXM_FADR(offs) \
144 ((0x000028ull) + ((offs) & 3) * 0x1000000ull)
145 #define CVMX_LMCX_OPS_CNT(offs) \
146 ((0x0001D8ull) + ((offs) & 3) * 0x1000000ull)
147 #define CVMX_LMCX_OPS_CNT_HI(offs) \
148 ((0x000060ull) + ((offs) & 1) * 0x60000000ull)
149 #define CVMX_LMCX_OPS_CNT_LO(offs) \
150 ((0x000058ull) + ((offs) & 1) * 0x60000000ull)
151 #define CVMX_LMCX_PHY_CTL(offs) \
152 ((0x000210ull) + ((offs) & 3) * 0x1000000ull)
153 #define CVMX_LMCX_PHY_CTL2(offs) \
154 ((0x000250ull) + ((offs) & 3) * 0x1000000ull)
155 #define CVMX_LMCX_PLL_BWCTL(offs) \
157 #define CVMX_LMCX_PLL_CTL(offs) \
158 ((0x0000A8ull) + ((offs) & 1) * 0x60000000ull)
159 #define CVMX_LMCX_PLL_STATUS(offs) \
160 ((0x0000B0ull) + ((offs) & 1) * 0x60000000ull)
161 #define CVMX_LMCX_PPR_CTL(offs) \
162 ((0x0003E0ull) + ((offs) & 3) * 0x1000000ull)
163 #define CVMX_LMCX_READ_LEVEL_CTL(offs) \
164 ((0x000140ull) + ((offs) & 1) * 0x60000000ull)
165 #define CVMX_LMCX_READ_LEVEL_DBG(offs) \
166 ((0x000148ull) + ((offs) & 1) * 0x60000000ull)
167 #define CVMX_LMCX_READ_LEVEL_RANKX(offs, id) \
168 ((0x000100ull) + (((offs) & 3) + ((id) & 1) * 0xC000000ull) * 8)
169 #define CVMX_LMCX_REF_STATUS(offs) \
170 ((0x0000A0ull) + ((offs) & 3) * 0x1000000ull)
171 #define CVMX_LMCX_RESET_CTL(offs) \
172 ((0x000180ull) + ((offs) & 3) * 0x1000000ull)
173 #define CVMX_LMCX_RETRY_CONFIG(offs) \
174 ((0x000110ull) + ((offs) & 3) * 0x1000000ull)
175 #define CVMX_LMCX_RETRY_STATUS(offs) \
176 ((0x000118ull) + ((offs) & 3) * 0x1000000ull)
177 #define CVMX_LMCX_RLEVEL_CTL(offs) \
178 ((0x0002A0ull) + ((offs) & 3) * 0x1000000ull)
179 #define CVMX_LMCX_RLEVEL_DBG(offs) \
180 ((0x0002A8ull) + ((offs) & 3) * 0x1000000ull)
181 #define CVMX_LMCX_RLEVEL_RANKX(offs, id) \
182 ((0x000280ull) + (((offs) & 3) + ((id) & 3) * 0x200000ull) * 8)
183 #define CVMX_LMCX_RODT_COMP_CTL(offs) \
184 ((0x0000A0ull) + ((offs) & 1) * 0x60000000ull)
185 #define CVMX_LMCX_RODT_CTL(offs) \
186 ((0x000078ull) + ((offs) & 1) * 0x60000000ull)
187 #define CVMX_LMCX_RODT_MASK(offs) \
188 ((0x000268ull) + ((offs) & 3) * 0x1000000ull)
189 #define CVMX_LMCX_SCRAMBLED_FADR(offs) \
190 ((0x000330ull) + ((offs) & 3) * 0x1000000ull)
191 #define CVMX_LMCX_SCRAMBLE_CFG0(offs) \
192 ((0x000320ull) + ((offs) & 3) * 0x1000000ull)
193 #define CVMX_LMCX_SCRAMBLE_CFG1(offs) \
194 ((0x000328ull) + ((offs) & 3) * 0x1000000ull)
195 #define CVMX_LMCX_SCRAMBLE_CFG2(offs) \
196 ((0x000338ull) + ((offs) & 3) * 0x1000000ull)
197 #define CVMX_LMCX_SEQ_CTL(offs) \
198 ((0x000048ull) + ((offs) & 3) * 0x1000000ull)
199 #define CVMX_LMCX_SLOT_CTL0(offs) \
200 ((0x0001F8ull) + ((offs) & 3) * 0x1000000ull)
201 #define CVMX_LMCX_SLOT_CTL1(offs) \
202 ((0x000200ull) + ((offs) & 3) * 0x1000000ull)
203 #define CVMX_LMCX_SLOT_CTL2(offs) \
204 ((0x000208ull) + ((offs) & 3) * 0x1000000ull)
205 #define CVMX_LMCX_SLOT_CTL3(offs) \
206 ((0x000248ull) + ((offs) & 3) * 0x1000000ull)
207 #define CVMX_LMCX_TIMING_PARAMS0(offs) \
208 ((0x000198ull) + ((offs) & 3) * 0x1000000ull)
209 #define CVMX_LMCX_TIMING_PARAMS1(offs) \
210 ((0x0001A0ull) + ((offs) & 3) * 0x1000000ull)
211 #define CVMX_LMCX_TIMING_PARAMS2(offs) \
212 ((0x000060ull) + ((offs) & 3) * 0x1000000ull)
213 #define CVMX_LMCX_TRO_CTL(offs) \
214 ((0x000248ull) + ((offs) & 3) * 0x1000000ull)
215 #define CVMX_LMCX_TRO_STAT(offs) \
216 ((0x000250ull) + ((offs) & 3) * 0x1000000ull)
217 #define CVMX_LMCX_WLEVEL_CTL(offs) \
218 ((0x000300ull) + ((offs) & 3) * 0x1000000ull)
219 #define CVMX_LMCX_WLEVEL_DBG(offs) \
220 ((0x000308ull) + ((offs) & 3) * 0x1000000ull)
222 static inline uint64_t CVMX_LMCX_WLEVEL_RANKX(unsigned long offs,
225 switch (cvmx_get_octeon_family()) {
226 case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
227 return (0x0002C0ull) + ((offs) + (id) * 0x200000ull) * 8;
228 case OCTEON_CNF75XX & OCTEON_FAMILY_MASK:
229 case OCTEON_CN73XX & OCTEON_FAMILY_MASK:
230 return (0x0002C0ull) + ((offs) + (id) * 0x200000ull) * 8;
231 case OCTEON_CN78XX & OCTEON_FAMILY_MASK:
232 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
233 return (0x0002C0ull) + ((offs) +
234 (id) * 0x200000ull) * 8;
235 if (OCTEON_IS_MODEL(OCTEON_CN78XX))
236 return (0x0002C0ull) + ((offs) +
237 (id) * 0x200000ull) * 8;
239 case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
240 case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
241 return (0x0002B0ull) + ((offs) + (id) * 0x0ull) * 8;
242 case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
243 case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
244 return (0x0002B0ull) + ((offs) + (id) * 0x200000ull) * 8;
245 case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
246 return (0x0002B0ull) + ((offs) + (id) * 0x200000ull) * 8;
248 return (0x0002C0ull) + ((offs) + (id) * 0x200000ull) * 8;
251 #define CVMX_LMCX_WODT_CTL0(offs) \
252 ((0x000030ull) + ((offs) & 1) * 0x60000000ull)
253 #define CVMX_LMCX_WODT_CTL1(offs) \
254 ((0x000080ull) + ((offs) & 1) * 0x60000000ull)
255 #define CVMX_LMCX_WODT_MASK(offs) \
256 ((0x0001B0ull) + ((offs) & 3) * 0x1000000ull)
261 * This register provides an assortment of various control fields needed
262 * to characterize the DDR3 interface.
264 union cvmx_lmcx_char_ctl {
266 struct cvmx_lmcx_char_ctl_s {
267 uint64_t reserved_54_63:10;
268 uint64_t dq_char_byte_check:1;
269 uint64_t dq_char_check_lock:1;
270 uint64_t dq_char_check_enable:1;
271 uint64_t dq_char_bit_sel:3;
272 uint64_t dq_char_byte_sel:4;
280 struct cvmx_lmcx_char_ctl_cn61xx {
281 uint64_t reserved_44_63:20;
289 struct cvmx_lmcx_char_ctl_cn63xx {
290 uint64_t reserved_42_63:22;
296 struct cvmx_lmcx_char_ctl_cn63xx cn63xxp1;
297 struct cvmx_lmcx_char_ctl_cn61xx cn66xx;
298 struct cvmx_lmcx_char_ctl_cn61xx cn68xx;
299 struct cvmx_lmcx_char_ctl_cn63xx cn68xxp1;
300 struct cvmx_lmcx_char_ctl_cn70xx {
301 uint64_t reserved_53_63:11;
302 uint64_t dq_char_check_lock:1;
303 uint64_t dq_char_check_enable:1;
304 uint64_t dq_char_bit_sel:3;
305 uint64_t dq_char_byte_sel:4;
313 struct cvmx_lmcx_char_ctl_cn70xx cn70xxp1;
314 struct cvmx_lmcx_char_ctl_s cn73xx;
315 struct cvmx_lmcx_char_ctl_s cn78xx;
316 struct cvmx_lmcx_char_ctl_s cn78xxp1;
317 struct cvmx_lmcx_char_ctl_cn61xx cnf71xx;
318 struct cvmx_lmcx_char_ctl_s cnf75xx;
322 * cvmx_lmc#_comp_ctl2
324 * LMC_COMP_CTL2 = LMC Compensation control
327 union cvmx_lmcx_comp_ctl2 {
329 struct cvmx_lmcx_comp_ctl2_s {
330 uint64_t reserved_51_63:13;
331 uint64_t rclk_char_mode:1;
332 uint64_t reserved_40_49:10;
333 uint64_t ptune_offset:4;
334 uint64_t reserved_12_35:24;
339 struct cvmx_lmcx_comp_ctl2_cn61xx {
340 uint64_t reserved_34_63:30;
341 uint64_t ddr__ptune:4;
342 uint64_t ddr__ntune:4;
352 struct cvmx_lmcx_comp_ctl2_cn61xx cn63xx;
353 struct cvmx_lmcx_comp_ctl2_cn61xx cn63xxp1;
354 struct cvmx_lmcx_comp_ctl2_cn61xx cn66xx;
355 struct cvmx_lmcx_comp_ctl2_cn61xx cn68xx;
356 struct cvmx_lmcx_comp_ctl2_cn61xx cn68xxp1;
357 struct cvmx_lmcx_comp_ctl2_cn70xx {
358 uint64_t reserved_51_63:13;
359 uint64_t rclk_char_mode:1;
360 uint64_t ddr__ptune:5;
361 uint64_t ddr__ntune:5;
362 uint64_t ptune_offset:4;
363 uint64_t ntune_offset:4;
369 uint64_t control_ctl:4;
374 struct cvmx_lmcx_comp_ctl2_cn70xx cn70xxp1;
375 struct cvmx_lmcx_comp_ctl2_cn70xx cn73xx;
376 struct cvmx_lmcx_comp_ctl2_cn70xx cn78xx;
377 struct cvmx_lmcx_comp_ctl2_cn70xx cn78xxp1;
378 struct cvmx_lmcx_comp_ctl2_cn61xx cnf71xx;
379 struct cvmx_lmcx_comp_ctl2_cn70xx cnf75xx;
385 * This register controls certain parameters required for memory configuration.
386 * Note the following:
387 * * Priority order for hardware write operations to
388 * LMC()_CONFIG/LMC()_FADR/LMC()_ECC_SYND: DED error > SEC error.
389 * * The self-refresh entry sequence(s) power the DLL up/down (depending on
390 * LMC()_MODEREG_PARAMS0[DLL]) when LMC()_CONFIG[SREF_WITH_DLL] is set.
391 * * Prior to the self-refresh exit sequence, LMC()_MODEREG_PARAMS0 should
393 * (if needed) to the appropriate values.
395 * See LMC initialization sequence for the LMC bringup sequence.
397 union cvmx_lmcx_config {
399 struct cvmx_lmcx_config_s {
400 uint64_t lrdimm_ena:1;
401 uint64_t bg2_enable:1;
402 uint64_t mode_x4dev:1;
405 uint64_t early_unload_d1_r1:1;
406 uint64_t early_unload_d1_r0:1;
407 uint64_t early_unload_d0_r1:1;
408 uint64_t early_unload_d0_r0:1;
409 uint64_t init_status:4;
413 uint64_t sref_with_dll:1;
414 uint64_t early_dqx:1;
415 uint64_t reserved_18_39:22;
418 uint64_t forcewrite:4;
419 uint64_t idlepower:3;
420 uint64_t pbank_lsb:4;
423 uint64_t init_start:1;
425 struct cvmx_lmcx_config_cn61xx {
426 uint64_t reserved_61_63:3;
429 uint64_t early_unload_d1_r1:1;
430 uint64_t early_unload_d1_r0:1;
431 uint64_t early_unload_d0_r1:1;
432 uint64_t early_unload_d0_r0:1;
433 uint64_t init_status:4;
437 uint64_t sref_with_dll:1;
438 uint64_t early_dqx:1;
440 uint64_t ref_zqcs_int:19;
443 uint64_t forcewrite:4;
444 uint64_t idlepower:3;
445 uint64_t pbank_lsb:4;
448 uint64_t init_start:1;
450 struct cvmx_lmcx_config_cn63xx {
451 uint64_t reserved_59_63:5;
452 uint64_t early_unload_d1_r1:1;
453 uint64_t early_unload_d1_r0:1;
454 uint64_t early_unload_d0_r1:1;
455 uint64_t early_unload_d0_r0:1;
456 uint64_t init_status:4;
460 uint64_t sref_with_dll:1;
461 uint64_t early_dqx:1;
463 uint64_t ref_zqcs_int:19;
466 uint64_t forcewrite:4;
467 uint64_t idlepower:3;
468 uint64_t pbank_lsb:4;
471 uint64_t init_start:1;
473 struct cvmx_lmcx_config_cn63xxp1 {
474 uint64_t reserved_55_63:9;
475 uint64_t init_status:4;
479 uint64_t sref_with_dll:1;
480 uint64_t early_dqx:1;
482 uint64_t ref_zqcs_int:19;
485 uint64_t forcewrite:4;
486 uint64_t idlepower:3;
487 uint64_t pbank_lsb:4;
490 uint64_t init_start:1;
492 struct cvmx_lmcx_config_cn66xx {
493 uint64_t reserved_60_63:4;
495 uint64_t early_unload_d1_r1:1;
496 uint64_t early_unload_d1_r0:1;
497 uint64_t early_unload_d0_r1:1;
498 uint64_t early_unload_d0_r0:1;
499 uint64_t init_status:4;
503 uint64_t sref_with_dll:1;
504 uint64_t early_dqx:1;
506 uint64_t ref_zqcs_int:19;
509 uint64_t forcewrite:4;
510 uint64_t idlepower:3;
511 uint64_t pbank_lsb:4;
514 uint64_t init_start:1;
516 struct cvmx_lmcx_config_cn63xx cn68xx;
517 struct cvmx_lmcx_config_cn63xx cn68xxp1;
518 struct cvmx_lmcx_config_cn70xx {
519 uint64_t reserved_63_63:1;
520 uint64_t bg2_enable:1;
521 uint64_t mode_x4dev:1;
524 uint64_t early_unload_d1_r1:1;
525 uint64_t early_unload_d1_r0:1;
526 uint64_t early_unload_d0_r1:1;
527 uint64_t early_unload_d0_r0:1;
528 uint64_t init_status:4;
532 uint64_t sref_with_dll:1;
533 uint64_t early_dqx:1;
534 uint64_t ref_zqcs_int:22;
537 uint64_t forcewrite:4;
538 uint64_t idlepower:3;
539 uint64_t pbank_lsb:4;
542 uint64_t reserved_0_0:1;
544 struct cvmx_lmcx_config_cn70xx cn70xxp1;
545 struct cvmx_lmcx_config_cn73xx {
546 uint64_t lrdimm_ena:1;
547 uint64_t bg2_enable:1;
548 uint64_t mode_x4dev:1;
551 uint64_t early_unload_d1_r1:1;
552 uint64_t early_unload_d1_r0:1;
553 uint64_t early_unload_d0_r1:1;
554 uint64_t early_unload_d0_r0:1;
555 uint64_t init_status:4;
559 uint64_t sref_with_dll:1;
560 uint64_t early_dqx:1;
561 uint64_t ref_zqcs_int:22;
564 uint64_t forcewrite:4;
565 uint64_t idlepower:3;
566 uint64_t pbank_lsb:4;
569 uint64_t reserved_0_0:1;
571 struct cvmx_lmcx_config_cn73xx cn78xx;
572 struct cvmx_lmcx_config_cn73xx cn78xxp1;
573 struct cvmx_lmcx_config_cn61xx cnf71xx;
574 struct cvmx_lmcx_config_cn73xx cnf75xx;
580 * LMC_CONTROL = LMC Control
581 * This register is an assortment of various control fields needed by the
584 union cvmx_lmcx_control {
586 struct cvmx_lmcx_control_s {
587 uint64_t scramble_ena:1;
594 uint64_t rodt_bprch:1;
595 uint64_t wodt_bprch:1;
597 uint64_t ext_zqcs_dis:1;
598 uint64_t int_zqcs_dis:1;
599 uint64_t auto_dclkdis:1;
601 uint64_t max_write_batch:4;
602 uint64_t nxm_write_en:1;
603 uint64_t elev_prio_dis:1;
604 uint64_t inorder_wr:1;
605 uint64_t inorder_rd:1;
606 uint64_t throttle_wr:1;
607 uint64_t throttle_rd:1;
612 uint64_t rdimm_ena:1;
614 struct cvmx_lmcx_control_s cn61xx;
615 struct cvmx_lmcx_control_cn63xx {
616 uint64_t reserved_24_63:40;
617 uint64_t rodt_bprch:1;
618 uint64_t wodt_bprch:1;
620 uint64_t ext_zqcs_dis:1;
621 uint64_t int_zqcs_dis:1;
622 uint64_t auto_dclkdis:1;
624 uint64_t max_write_batch:4;
625 uint64_t nxm_write_en:1;
626 uint64_t elev_prio_dis:1;
627 uint64_t inorder_wr:1;
628 uint64_t inorder_rd:1;
629 uint64_t throttle_wr:1;
630 uint64_t throttle_rd:1;
635 uint64_t rdimm_ena:1;
637 struct cvmx_lmcx_control_cn63xx cn63xxp1;
638 struct cvmx_lmcx_control_cn66xx {
639 uint64_t scramble_ena:1;
640 uint64_t reserved_24_62:39;
641 uint64_t rodt_bprch:1;
642 uint64_t wodt_bprch:1;
644 uint64_t ext_zqcs_dis:1;
645 uint64_t int_zqcs_dis:1;
646 uint64_t auto_dclkdis:1;
648 uint64_t max_write_batch:4;
649 uint64_t nxm_write_en:1;
650 uint64_t elev_prio_dis:1;
651 uint64_t inorder_wr:1;
652 uint64_t inorder_rd:1;
653 uint64_t throttle_wr:1;
654 uint64_t throttle_rd:1;
659 uint64_t rdimm_ena:1;
661 struct cvmx_lmcx_control_cn68xx {
662 uint64_t reserved_63_63:1;
669 uint64_t rodt_bprch:1;
670 uint64_t wodt_bprch:1;
672 uint64_t ext_zqcs_dis:1;
673 uint64_t int_zqcs_dis:1;
674 uint64_t auto_dclkdis:1;
676 uint64_t max_write_batch:4;
677 uint64_t nxm_write_en:1;
678 uint64_t elev_prio_dis:1;
679 uint64_t inorder_wr:1;
680 uint64_t inorder_rd:1;
681 uint64_t throttle_wr:1;
682 uint64_t throttle_rd:1;
687 uint64_t rdimm_ena:1;
689 struct cvmx_lmcx_control_cn68xx cn68xxp1;
690 struct cvmx_lmcx_control_s cn70xx;
691 struct cvmx_lmcx_control_s cn70xxp1;
692 struct cvmx_lmcx_control_s cn73xx;
693 struct cvmx_lmcx_control_s cn78xx;
694 struct cvmx_lmcx_control_s cn78xxp1;
695 struct cvmx_lmcx_control_cn66xx cnf71xx;
696 struct cvmx_lmcx_control_s cnf75xx;
702 * LMC_CTL = LMC Control
703 * This register is an assortment of various control fields needed by the
706 union cvmx_lmcx_ctl {
708 struct cvmx_lmcx_ctl_s {
709 uint64_t reserved_32_63:32;
710 uint64_t ddr__nctl:4;
711 uint64_t ddr__pctl:4;
714 uint64_t max_write_batch:4;
716 uint64_t pll_bypass:1;
717 uint64_t rdimm_ena:1;
719 uint64_t inorder_mwf:1;
720 uint64_t inorder_mrf:1;
721 uint64_t reserved_10_11:2;
729 struct cvmx_lmcx_ctl_cn30xx {
730 uint64_t reserved_32_63:32;
731 uint64_t ddr__nctl:4;
732 uint64_t ddr__pctl:4;
735 uint64_t max_write_batch:4;
737 uint64_t pll_bypass:1;
738 uint64_t rdimm_ena:1;
740 uint64_t inorder_mwf:1;
741 uint64_t inorder_mrf:1;
751 struct cvmx_lmcx_ctl_cn30xx cn31xx;
752 struct cvmx_lmcx_ctl_cn38xx {
753 uint64_t reserved_32_63:32;
754 uint64_t ddr__nctl:4;
755 uint64_t ddr__pctl:4;
758 uint64_t max_write_batch:4;
759 uint64_t reserved_16_17:2;
760 uint64_t rdimm_ena:1;
762 uint64_t inorder_mwf:1;
763 uint64_t inorder_mrf:1;
773 struct cvmx_lmcx_ctl_cn38xx cn38xxp2;
774 struct cvmx_lmcx_ctl_cn50xx {
775 uint64_t reserved_32_63:32;
776 uint64_t ddr__nctl:4;
777 uint64_t ddr__pctl:4;
780 uint64_t max_write_batch:4;
781 uint64_t reserved_17_17:1;
782 uint64_t pll_bypass:1;
783 uint64_t rdimm_ena:1;
785 uint64_t inorder_mwf:1;
786 uint64_t inorder_mrf:1;
796 struct cvmx_lmcx_ctl_cn52xx {
797 uint64_t reserved_32_63:32;
798 uint64_t ddr__nctl:4;
799 uint64_t ddr__pctl:4;
802 uint64_t max_write_batch:4;
803 uint64_t reserved_16_17:2;
804 uint64_t rdimm_ena:1;
806 uint64_t inorder_mwf:1;
807 uint64_t inorder_mrf:1;
817 struct cvmx_lmcx_ctl_cn52xx cn52xxp1;
818 struct cvmx_lmcx_ctl_cn52xx cn56xx;
819 struct cvmx_lmcx_ctl_cn52xx cn56xxp1;
820 struct cvmx_lmcx_ctl_cn58xx {
821 uint64_t reserved_32_63:32;
822 uint64_t ddr__nctl:4;
823 uint64_t ddr__pctl:4;
826 uint64_t max_write_batch:4;
827 uint64_t reserved_16_17:2;
828 uint64_t rdimm_ena:1;
830 uint64_t inorder_mwf:1;
831 uint64_t inorder_mrf:1;
841 struct cvmx_lmcx_ctl_cn58xx cn58xxp1;
847 * LMC_CTL1 = LMC Control1
848 * This register is an assortment of various control fields needed by the
851 union cvmx_lmcx_ctl1 {
853 struct cvmx_lmcx_ctl1_s {
854 uint64_t reserved_21_63:43;
856 uint64_t forcewrite:4;
857 uint64_t idlepower:3;
860 uint64_t dcc_enable:1;
861 uint64_t reserved_2_7:6;
862 uint64_t data_layout:2;
864 struct cvmx_lmcx_ctl1_cn30xx {
865 uint64_t reserved_2_63:62;
866 uint64_t data_layout:2;
868 struct cvmx_lmcx_ctl1_cn50xx {
869 uint64_t reserved_10_63:54;
871 uint64_t dcc_enable:1;
872 uint64_t reserved_2_7:6;
873 uint64_t data_layout:2;
875 struct cvmx_lmcx_ctl1_cn52xx {
876 uint64_t reserved_21_63:43;
878 uint64_t forcewrite:4;
879 uint64_t idlepower:3;
882 uint64_t dcc_enable:1;
883 uint64_t reserved_0_7:8;
885 struct cvmx_lmcx_ctl1_cn52xx cn52xxp1;
886 struct cvmx_lmcx_ctl1_cn52xx cn56xx;
887 struct cvmx_lmcx_ctl1_cn52xx cn56xxp1;
888 struct cvmx_lmcx_ctl1_cn58xx {
889 uint64_t reserved_10_63:54;
891 uint64_t dcc_enable:1;
892 uint64_t reserved_0_7:8;
894 struct cvmx_lmcx_ctl1_cn58xx cn58xxp1;
898 * cvmx_lmc#_dbtrain_ctl
903 union cvmx_lmcx_dbtrain_ctl {
905 struct cvmx_lmcx_dbtrain_ctl_s {
906 uint64_t reserved_63_63:1;
907 uint64_t lfsr_pattern_sel:1;
908 uint64_t cmd_count_ext:2;
909 uint64_t db_output_impedance:3;
913 uint64_t read_dq_count:7;
914 uint64_t read_cmd_count:5;
915 uint64_t write_ena:1;
922 uint64_t column_a:13;
924 struct cvmx_lmcx_dbtrain_ctl_cn73xx {
925 uint64_t reserved_60_63:4;
926 uint64_t db_output_impedance:3;
930 uint64_t read_dq_count:7;
931 uint64_t read_cmd_count:5;
932 uint64_t write_ena:1;
939 uint64_t column_a:13;
941 struct cvmx_lmcx_dbtrain_ctl_s cn78xx;
942 struct cvmx_lmcx_dbtrain_ctl_cnf75xx {
943 uint64_t reserved_62_63:2;
944 uint64_t cmd_count_ext:2;
945 uint64_t db_output_impedance:3;
949 uint64_t read_dq_count:7;
950 uint64_t read_cmd_count:5;
951 uint64_t write_ena:1;
958 uint64_t column_a:13;
965 * LMC_DCLK_CNT = Performance Counters
968 union cvmx_lmcx_dclk_cnt {
970 struct cvmx_lmcx_dclk_cnt_s {
973 struct cvmx_lmcx_dclk_cnt_s cn61xx;
974 struct cvmx_lmcx_dclk_cnt_s cn63xx;
975 struct cvmx_lmcx_dclk_cnt_s cn63xxp1;
976 struct cvmx_lmcx_dclk_cnt_s cn66xx;
977 struct cvmx_lmcx_dclk_cnt_s cn68xx;
978 struct cvmx_lmcx_dclk_cnt_s cn68xxp1;
979 struct cvmx_lmcx_dclk_cnt_s cn70xx;
980 struct cvmx_lmcx_dclk_cnt_s cn70xxp1;
981 struct cvmx_lmcx_dclk_cnt_s cn73xx;
982 struct cvmx_lmcx_dclk_cnt_s cn78xx;
983 struct cvmx_lmcx_dclk_cnt_s cn78xxp1;
984 struct cvmx_lmcx_dclk_cnt_s cnf71xx;
985 struct cvmx_lmcx_dclk_cnt_s cnf75xx;
989 * cvmx_lmc#_dclk_cnt_hi
991 * LMC_DCLK_CNT_HI = Performance Counters
994 union cvmx_lmcx_dclk_cnt_hi {
996 struct cvmx_lmcx_dclk_cnt_hi_s {
997 uint64_t reserved_32_63:32;
998 uint64_t dclkcnt_hi:32;
1000 struct cvmx_lmcx_dclk_cnt_hi_s cn30xx;
1001 struct cvmx_lmcx_dclk_cnt_hi_s cn31xx;
1002 struct cvmx_lmcx_dclk_cnt_hi_s cn38xx;
1003 struct cvmx_lmcx_dclk_cnt_hi_s cn38xxp2;
1004 struct cvmx_lmcx_dclk_cnt_hi_s cn50xx;
1005 struct cvmx_lmcx_dclk_cnt_hi_s cn52xx;
1006 struct cvmx_lmcx_dclk_cnt_hi_s cn52xxp1;
1007 struct cvmx_lmcx_dclk_cnt_hi_s cn56xx;
1008 struct cvmx_lmcx_dclk_cnt_hi_s cn56xxp1;
1009 struct cvmx_lmcx_dclk_cnt_hi_s cn58xx;
1010 struct cvmx_lmcx_dclk_cnt_hi_s cn58xxp1;
1014 * cvmx_lmc#_dclk_cnt_lo
1016 * LMC_DCLK_CNT_LO = Performance Counters
1019 union cvmx_lmcx_dclk_cnt_lo {
1021 struct cvmx_lmcx_dclk_cnt_lo_s {
1022 uint64_t reserved_32_63:32;
1023 uint64_t dclkcnt_lo:32;
1025 struct cvmx_lmcx_dclk_cnt_lo_s cn30xx;
1026 struct cvmx_lmcx_dclk_cnt_lo_s cn31xx;
1027 struct cvmx_lmcx_dclk_cnt_lo_s cn38xx;
1028 struct cvmx_lmcx_dclk_cnt_lo_s cn38xxp2;
1029 struct cvmx_lmcx_dclk_cnt_lo_s cn50xx;
1030 struct cvmx_lmcx_dclk_cnt_lo_s cn52xx;
1031 struct cvmx_lmcx_dclk_cnt_lo_s cn52xxp1;
1032 struct cvmx_lmcx_dclk_cnt_lo_s cn56xx;
1033 struct cvmx_lmcx_dclk_cnt_lo_s cn56xxp1;
1034 struct cvmx_lmcx_dclk_cnt_lo_s cn58xx;
1035 struct cvmx_lmcx_dclk_cnt_lo_s cn58xxp1;
1039 * cvmx_lmc#_dclk_ctl
1041 * LMC_DCLK_CTL = LMC DCLK generation control
1045 * This CSR is only relevant for LMC1. LMC0_DCLK_CTL is not used.
1048 union cvmx_lmcx_dclk_ctl {
1050 struct cvmx_lmcx_dclk_ctl_s {
1051 uint64_t reserved_8_63:56;
1052 uint64_t off90_ena:1;
1053 uint64_t dclk90_byp:1;
1054 uint64_t dclk90_ld:1;
1055 uint64_t dclk90_vlu:5;
1057 struct cvmx_lmcx_dclk_ctl_s cn56xx;
1058 struct cvmx_lmcx_dclk_ctl_s cn56xxp1;
1062 * cvmx_lmc#_ddr2_ctl
1064 * LMC_DDR2_CTL = LMC DDR2 & DLL Control Register
1067 union cvmx_lmcx_ddr2_ctl {
1069 struct cvmx_lmcx_ddr2_ctl_s {
1070 uint64_t reserved_32_63:32;
1080 uint64_t crip_mode:1;
1083 uint64_t qdll_ena:1;
1084 uint64_t dll90_vlu:5;
1085 uint64_t dll90_byp:1;
1089 struct cvmx_lmcx_ddr2_ctl_cn30xx {
1090 uint64_t reserved_32_63:32;
1100 uint64_t crip_mode:1;
1103 uint64_t qdll_ena:1;
1104 uint64_t dll90_vlu:5;
1105 uint64_t dll90_byp:1;
1106 uint64_t reserved_1_1:1;
1109 struct cvmx_lmcx_ddr2_ctl_cn30xx cn31xx;
1110 struct cvmx_lmcx_ddr2_ctl_s cn38xx;
1111 struct cvmx_lmcx_ddr2_ctl_s cn38xxp2;
1112 struct cvmx_lmcx_ddr2_ctl_s cn50xx;
1113 struct cvmx_lmcx_ddr2_ctl_s cn52xx;
1114 struct cvmx_lmcx_ddr2_ctl_s cn52xxp1;
1115 struct cvmx_lmcx_ddr2_ctl_s cn56xx;
1116 struct cvmx_lmcx_ddr2_ctl_s cn56xxp1;
1117 struct cvmx_lmcx_ddr2_ctl_s cn58xx;
1118 struct cvmx_lmcx_ddr2_ctl_s cn58xxp1;
1122 * cvmx_lmc#_ddr4_dimm_ctl
1124 * Bits 0-21 of this register are used only when LMC()_CONTROL[RDIMM_ENA] = 1.
1126 * During an RCW initialization sequence, bits 0-21 control LMC's write
1127 * operations to the extended DDR4 control words in the JEDEC standard
1128 * registering clock driver on an RDIMM.
1130 union cvmx_lmcx_ddr4_dimm_ctl {
1132 struct cvmx_lmcx_ddr4_dimm_ctl_s {
1133 uint64_t reserved_28_63:36;
1134 uint64_t rank_timing_enable:1;
1135 uint64_t bodt_trans_mode:1;
1136 uint64_t trans_mode_ena:1;
1137 uint64_t read_preamble_mode:1;
1138 uint64_t buff_config_da3:1;
1139 uint64_t mpr_over_ena:1;
1140 uint64_t ddr4_dimm1_wmask:11;
1141 uint64_t ddr4_dimm0_wmask:11;
1143 struct cvmx_lmcx_ddr4_dimm_ctl_cn70xx {
1144 uint64_t reserved_22_63:42;
1145 uint64_t ddr4_dimm1_wmask:11;
1146 uint64_t ddr4_dimm0_wmask:11;
1148 struct cvmx_lmcx_ddr4_dimm_ctl_cn70xx cn70xxp1;
1149 struct cvmx_lmcx_ddr4_dimm_ctl_s cn73xx;
1150 struct cvmx_lmcx_ddr4_dimm_ctl_s cn78xx;
1151 struct cvmx_lmcx_ddr4_dimm_ctl_s cn78xxp1;
1152 struct cvmx_lmcx_ddr4_dimm_ctl_s cnf75xx;
1156 * cvmx_lmc#_ddr_pll_ctl
1158 * This register controls the DDR_CK frequency. For details, refer to CK
1159 * speed programming. See LMC initialization sequence for the initialization
1161 * DDR PLL bringup sequence:
1163 * 1. Write [CLKF], [CLKR], [DDR_PS_EN].
1165 * 2. Wait 128 ref clock cycles (7680 core-clock cycles).
1167 * 3. Write 1 to [RESET_N].
1169 * 4. Wait 1152 ref clocks (1152*16 core-clock cycles).
1171 * 5. Write 0 to [DDR_DIV_RESET].
1173 * 6. Wait 10 ref clock cycles (160 core-clock cycles) before bringing up
1174 * the DDR interface.
1176 union cvmx_lmcx_ddr_pll_ctl {
1178 struct cvmx_lmcx_ddr_pll_ctl_s {
1179 uint64_t reserved_45_63:19;
1180 uint64_t dclk_alt_refclk_sel:1;
1182 uint64_t dclk_invert:1;
1183 uint64_t phy_dcok:1;
1184 uint64_t ddr4_mode:1;
1185 uint64_t pll_fbslip:1;
1186 uint64_t pll_lock:1;
1187 uint64_t reserved_18_26:9;
1190 uint64_t reserved_8_10:3;
1194 struct cvmx_lmcx_ddr_pll_ctl_cn61xx {
1195 uint64_t reserved_27_63:37;
1196 uint64_t jtg_test_mode:1;
1197 uint64_t dfm_div_reset:1;
1198 uint64_t dfm_ps_en:3;
1199 uint64_t ddr_div_reset:1;
1200 uint64_t ddr_ps_en:3;
1207 struct cvmx_lmcx_ddr_pll_ctl_cn61xx cn63xx;
1208 struct cvmx_lmcx_ddr_pll_ctl_cn61xx cn63xxp1;
1209 struct cvmx_lmcx_ddr_pll_ctl_cn61xx cn66xx;
1210 struct cvmx_lmcx_ddr_pll_ctl_cn61xx cn68xx;
1211 struct cvmx_lmcx_ddr_pll_ctl_cn61xx cn68xxp1;
1212 struct cvmx_lmcx_ddr_pll_ctl_cn70xx {
1213 uint64_t reserved_31_63:33;
1214 uint64_t phy_dcok:1;
1215 uint64_t ddr4_mode:1;
1216 uint64_t pll_fbslip:1;
1217 uint64_t pll_lock:1;
1218 uint64_t pll_rfslip:1;
1220 uint64_t jtg_test_mode:1;
1221 uint64_t ddr_div_reset:1;
1222 uint64_t ddr_ps_en:4;
1223 uint64_t reserved_8_17:10;
1227 struct cvmx_lmcx_ddr_pll_ctl_cn70xx cn70xxp1;
1228 struct cvmx_lmcx_ddr_pll_ctl_cn73xx {
1229 uint64_t reserved_45_63:19;
1230 uint64_t dclk_alt_refclk_sel:1;
1232 uint64_t dclk_invert:1;
1233 uint64_t phy_dcok:1;
1234 uint64_t ddr4_mode:1;
1235 uint64_t pll_fbslip:1;
1236 uint64_t pll_lock:1;
1237 uint64_t pll_rfslip:1;
1239 uint64_t jtg_test_mode:1;
1240 uint64_t ddr_div_reset:1;
1241 uint64_t ddr_ps_en:4;
1242 uint64_t reserved_9_17:9;
1243 uint64_t clkf_ext:1;
1247 struct cvmx_lmcx_ddr_pll_ctl_cn73xx cn78xx;
1248 struct cvmx_lmcx_ddr_pll_ctl_cn73xx cn78xxp1;
1249 struct cvmx_lmcx_ddr_pll_ctl_cn61xx cnf71xx;
1250 struct cvmx_lmcx_ddr_pll_ctl_cn73xx cnf75xx;
1254 * cvmx_lmc#_delay_cfg
1256 * LMC_DELAY_CFG = Open-loop delay line settings
1260 * The DQ bits add OUTGOING delay only to dq, dqs_[p,n], cb, cbs_[p,n], dqm.
1261 * Delay is approximately 50-80ps per setting depending on process/voltage.
1262 * There is no need to add incoming delay since by default all strobe bits
1263 * are delayed internally by 90 degrees (as was always the case in previous
1264 * passes and past chips.
1266 * The CMD add delay to all command bits DDR_RAS, DDR_CAS, DDR_A<15:0>,
1267 * DDR_BA<2:0>, DDR_n_CS<1:0>_L, DDR_WE, DDR_CKE and DDR_ODT_<7:0>.
1268 * Again, delay is 50-80ps per tap.
1270 * The CLK bits add delay to all clock signals DDR_CK_<5:0>_P and
1271 * DDR_CK_<5:0>_N. Again, delay is 50-80ps per tap.
1273 * The usage scenario is the following: There is too much delay on command
1274 * signals and setup on command is not met. The user can then delay the
1275 * clock until setup is met.
1277 * At the same time though, dq/dqs should be delayed because there is also
1278 * a DDR spec tying dqs with clock. If clock is too much delayed with
1279 * respect to dqs, writes will start to fail.
1281 * This scheme should eliminate the board need of adding routing delay to
1282 * clock signals to make high frequencies work.
1284 union cvmx_lmcx_delay_cfg {
1286 struct cvmx_lmcx_delay_cfg_s {
1287 uint64_t reserved_15_63:49;
1292 struct cvmx_lmcx_delay_cfg_s cn30xx;
1293 struct cvmx_lmcx_delay_cfg_cn38xx {
1294 uint64_t reserved_14_63:50;
1296 uint64_t reserved_9_9:1;
1298 uint64_t reserved_4_4:1;
1301 struct cvmx_lmcx_delay_cfg_cn38xx cn50xx;
1302 struct cvmx_lmcx_delay_cfg_cn38xx cn52xx;
1303 struct cvmx_lmcx_delay_cfg_cn38xx cn52xxp1;
1304 struct cvmx_lmcx_delay_cfg_cn38xx cn56xx;
1305 struct cvmx_lmcx_delay_cfg_cn38xx cn56xxp1;
1306 struct cvmx_lmcx_delay_cfg_cn38xx cn58xx;
1307 struct cvmx_lmcx_delay_cfg_cn38xx cn58xxp1;
1311 * cvmx_lmc#_dimm#_ddr4_params0
1313 * This register contains values to be programmed into the extra DDR4 control
1314 * words in the corresponding (registered) DIMM. These are control words
1315 * RC1x through RC8x.
1317 union cvmx_lmcx_dimmx_ddr4_params0 {
1319 struct cvmx_lmcx_dimmx_ddr4_params0_s {
1329 struct cvmx_lmcx_dimmx_ddr4_params0_s cn70xx;
1330 struct cvmx_lmcx_dimmx_ddr4_params0_s cn70xxp1;
1331 struct cvmx_lmcx_dimmx_ddr4_params0_s cn73xx;
1332 struct cvmx_lmcx_dimmx_ddr4_params0_s cn78xx;
1333 struct cvmx_lmcx_dimmx_ddr4_params0_s cn78xxp1;
1334 struct cvmx_lmcx_dimmx_ddr4_params0_s cnf75xx;
1338 * cvmx_lmc#_dimm#_ddr4_params1
1340 * This register contains values to be programmed into the extra DDR4 control
1341 * words in the corresponding (registered) DIMM. These are control words
1342 * RC9x through RCBx.
1344 union cvmx_lmcx_dimmx_ddr4_params1 {
1346 struct cvmx_lmcx_dimmx_ddr4_params1_s {
1347 uint64_t reserved_24_63:40;
1352 struct cvmx_lmcx_dimmx_ddr4_params1_s cn70xx;
1353 struct cvmx_lmcx_dimmx_ddr4_params1_s cn70xxp1;
1354 struct cvmx_lmcx_dimmx_ddr4_params1_s cn73xx;
1355 struct cvmx_lmcx_dimmx_ddr4_params1_s cn78xx;
1356 struct cvmx_lmcx_dimmx_ddr4_params1_s cn78xxp1;
1357 struct cvmx_lmcx_dimmx_ddr4_params1_s cnf75xx;
1361 * cvmx_lmc#_dimm#_params
1363 * This register contains values to be programmed into each control word in
1364 * the corresponding (registered) DIMM. The control words allow optimization
1365 * of the device properties for different raw card designs. Note that LMC
1366 * only uses this CSR when LMC()_CONTROL[RDIMM_ENA]=1. During a power-up/init
1367 * sequence, LMC writes these fields into the control words in the JEDEC
1368 * standard DDR3 SSTE32882 registering clock driver or DDR4 Register
1369 * DDR4RCD01 on an RDIMM when corresponding LMC()_DIMM_CTL[DIMM*_WMASK]
1372 union cvmx_lmcx_dimmx_params {
1374 struct cvmx_lmcx_dimmx_params_s {
1392 struct cvmx_lmcx_dimmx_params_s cn61xx;
1393 struct cvmx_lmcx_dimmx_params_s cn63xx;
1394 struct cvmx_lmcx_dimmx_params_s cn63xxp1;
1395 struct cvmx_lmcx_dimmx_params_s cn66xx;
1396 struct cvmx_lmcx_dimmx_params_s cn68xx;
1397 struct cvmx_lmcx_dimmx_params_s cn68xxp1;
1398 struct cvmx_lmcx_dimmx_params_s cn70xx;
1399 struct cvmx_lmcx_dimmx_params_s cn70xxp1;
1400 struct cvmx_lmcx_dimmx_params_s cn73xx;
1401 struct cvmx_lmcx_dimmx_params_s cn78xx;
1402 struct cvmx_lmcx_dimmx_params_s cn78xxp1;
1403 struct cvmx_lmcx_dimmx_params_s cnf71xx;
1404 struct cvmx_lmcx_dimmx_params_s cnf75xx;
1408 * cvmx_lmc#_dimm_ctl
1410 * Note that this CSR is only used when LMC()_CONTROL[RDIMM_ENA] = 1. During
1411 * a power-up/init sequence, this CSR controls LMC's write operations to the
1412 * control words in the JEDEC standard DDR3 SSTE32882 registering clock
1413 * driver or DDR4 Register DDR4RCD01 on an RDIMM.
1415 union cvmx_lmcx_dimm_ctl {
1417 struct cvmx_lmcx_dimm_ctl_s {
1418 uint64_t reserved_46_63:18;
1421 uint64_t dimm1_wmask:16;
1422 uint64_t dimm0_wmask:16;
1424 struct cvmx_lmcx_dimm_ctl_s cn61xx;
1425 struct cvmx_lmcx_dimm_ctl_s cn63xx;
1426 struct cvmx_lmcx_dimm_ctl_s cn63xxp1;
1427 struct cvmx_lmcx_dimm_ctl_s cn66xx;
1428 struct cvmx_lmcx_dimm_ctl_s cn68xx;
1429 struct cvmx_lmcx_dimm_ctl_s cn68xxp1;
1430 struct cvmx_lmcx_dimm_ctl_s cn70xx;
1431 struct cvmx_lmcx_dimm_ctl_s cn70xxp1;
1432 struct cvmx_lmcx_dimm_ctl_s cn73xx;
1433 struct cvmx_lmcx_dimm_ctl_s cn78xx;
1434 struct cvmx_lmcx_dimm_ctl_s cn78xxp1;
1435 struct cvmx_lmcx_dimm_ctl_s cnf71xx;
1436 struct cvmx_lmcx_dimm_ctl_s cnf75xx;
1442 * LMC_DLL_CTL = LMC DLL control and DCLK reset
1445 union cvmx_lmcx_dll_ctl {
1447 struct cvmx_lmcx_dll_ctl_s {
1448 uint64_t reserved_8_63:56;
1450 uint64_t dll90_byp:1;
1451 uint64_t dll90_ena:1;
1452 uint64_t dll90_vlu:5;
1454 struct cvmx_lmcx_dll_ctl_s cn52xx;
1455 struct cvmx_lmcx_dll_ctl_s cn52xxp1;
1456 struct cvmx_lmcx_dll_ctl_s cn56xx;
1457 struct cvmx_lmcx_dll_ctl_s cn56xxp1;
1461 * cvmx_lmc#_dll_ctl2
1463 * See LMC initialization sequence for the initialization sequence.
1466 union cvmx_lmcx_dll_ctl2 {
1468 struct cvmx_lmcx_dll_ctl2_s {
1469 uint64_t reserved_0_63:64;
1471 struct cvmx_lmcx_dll_ctl2_cn61xx {
1472 uint64_t reserved_16_63:48;
1474 uint64_t dll_bringup:1;
1476 uint64_t quad_dll_ena:1;
1478 uint64_t byp_setting:8;
1480 struct cvmx_lmcx_dll_ctl2_cn63xx {
1481 uint64_t reserved_15_63:49;
1482 uint64_t dll_bringup:1;
1484 uint64_t quad_dll_ena:1;
1486 uint64_t byp_setting:8;
1488 struct cvmx_lmcx_dll_ctl2_cn63xx cn63xxp1;
1489 struct cvmx_lmcx_dll_ctl2_cn63xx cn66xx;
1490 struct cvmx_lmcx_dll_ctl2_cn61xx cn68xx;
1491 struct cvmx_lmcx_dll_ctl2_cn61xx cn68xxp1;
1492 struct cvmx_lmcx_dll_ctl2_cn70xx {
1493 uint64_t reserved_17_63:47;
1495 uint64_t dll_bringup:1;
1497 uint64_t quad_dll_ena:1;
1499 uint64_t byp_setting:9;
1501 struct cvmx_lmcx_dll_ctl2_cn70xx cn70xxp1;
1502 struct cvmx_lmcx_dll_ctl2_cn70xx cn73xx;
1503 struct cvmx_lmcx_dll_ctl2_cn70xx cn78xx;
1504 struct cvmx_lmcx_dll_ctl2_cn70xx cn78xxp1;
1505 struct cvmx_lmcx_dll_ctl2_cn61xx cnf71xx;
1506 struct cvmx_lmcx_dll_ctl2_cn70xx cnf75xx;
1510 * cvmx_lmc#_dll_ctl3
1512 * LMC_DLL_CTL3 = LMC DLL control and DCLK reset
1515 union cvmx_lmcx_dll_ctl3 {
1517 struct cvmx_lmcx_dll_ctl3_s {
1518 uint64_t reserved_50_63:14;
1519 uint64_t wr_deskew_ena:1;
1520 uint64_t wr_deskew_ld:1;
1521 uint64_t bit_select:4;
1522 uint64_t reserved_0_43:44;
1524 struct cvmx_lmcx_dll_ctl3_cn61xx {
1525 uint64_t reserved_41_63:23;
1526 uint64_t dclk90_fwd:1;
1527 uint64_t ddr_90_dly_byp:1;
1528 uint64_t dclk90_recal_dis:1;
1529 uint64_t dclk90_byp_sel:1;
1530 uint64_t dclk90_byp_setting:8;
1531 uint64_t dll_fast:1;
1532 uint64_t dll90_setting:8;
1533 uint64_t fine_tune_mode:1;
1534 uint64_t dll_mode:1;
1535 uint64_t dll90_byte_sel:4;
1536 uint64_t offset_ena:1;
1537 uint64_t load_offset:1;
1538 uint64_t mode_sel:2;
1539 uint64_t byte_sel:4;
1542 struct cvmx_lmcx_dll_ctl3_cn63xx {
1543 uint64_t reserved_29_63:35;
1544 uint64_t dll_fast:1;
1545 uint64_t dll90_setting:8;
1546 uint64_t fine_tune_mode:1;
1547 uint64_t dll_mode:1;
1548 uint64_t dll90_byte_sel:4;
1549 uint64_t offset_ena:1;
1550 uint64_t load_offset:1;
1551 uint64_t mode_sel:2;
1552 uint64_t byte_sel:4;
1555 struct cvmx_lmcx_dll_ctl3_cn63xx cn63xxp1;
1556 struct cvmx_lmcx_dll_ctl3_cn63xx cn66xx;
1557 struct cvmx_lmcx_dll_ctl3_cn61xx cn68xx;
1558 struct cvmx_lmcx_dll_ctl3_cn61xx cn68xxp1;
1559 struct cvmx_lmcx_dll_ctl3_cn70xx {
1560 uint64_t reserved_44_63:20;
1561 uint64_t dclk90_fwd:1;
1562 uint64_t ddr_90_dly_byp:1;
1563 uint64_t dclk90_recal_dis:1;
1564 uint64_t dclk90_byp_sel:1;
1565 uint64_t dclk90_byp_setting:9;
1566 uint64_t dll_fast:1;
1567 uint64_t dll90_setting:9;
1568 uint64_t fine_tune_mode:1;
1569 uint64_t dll_mode:1;
1570 uint64_t dll90_byte_sel:4;
1571 uint64_t offset_ena:1;
1572 uint64_t load_offset:1;
1573 uint64_t mode_sel:2;
1574 uint64_t byte_sel:4;
1577 struct cvmx_lmcx_dll_ctl3_cn70xx cn70xxp1;
1578 struct cvmx_lmcx_dll_ctl3_cn73xx {
1579 uint64_t reserved_50_63:14;
1580 uint64_t wr_deskew_ena:1;
1581 uint64_t wr_deskew_ld:1;
1582 uint64_t bit_select:4;
1583 uint64_t dclk90_fwd:1;
1584 uint64_t ddr_90_dly_byp:1;
1585 uint64_t dclk90_recal_dis:1;
1586 uint64_t dclk90_byp_sel:1;
1587 uint64_t dclk90_byp_setting:9;
1588 uint64_t dll_fast:1;
1589 uint64_t dll90_setting:9;
1590 uint64_t fine_tune_mode:1;
1591 uint64_t dll_mode:1;
1592 uint64_t dll90_byte_sel:4;
1593 uint64_t offset_ena:1;
1594 uint64_t load_offset:1;
1595 uint64_t mode_sel:2;
1596 uint64_t byte_sel:4;
1599 struct cvmx_lmcx_dll_ctl3_cn73xx cn78xx;
1600 struct cvmx_lmcx_dll_ctl3_cn73xx cn78xxp1;
1601 struct cvmx_lmcx_dll_ctl3_cn61xx cnf71xx;
1602 struct cvmx_lmcx_dll_ctl3_cn73xx cnf75xx;
1606 * cvmx_lmc#_dual_memcfg
1608 * This register controls certain parameters of dual-memory configuration.
1610 * This register enables the design to have two separate memory
1611 * configurations, selected dynamically by the reference address. Note
1612 * however, that both configurations share LMC()_CONTROL[XOR_BANK],
1613 * LMC()_CONFIG [PBANK_LSB], LMC()_CONFIG[RANK_ENA], and all timing parameters.
1615 * In this description:
1616 * * config0 refers to the normal memory configuration that is defined by the
1617 * LMC()_CONFIG[ROW_LSB] parameter
1618 * * config1 refers to the dual (or second) memory configuration that is
1619 * defined by this register.
1621 union cvmx_lmcx_dual_memcfg {
1623 struct cvmx_lmcx_dual_memcfg_s {
1624 uint64_t reserved_20_63:44;
1627 uint64_t reserved_8_15:8;
1630 struct cvmx_lmcx_dual_memcfg_s cn50xx;
1631 struct cvmx_lmcx_dual_memcfg_s cn52xx;
1632 struct cvmx_lmcx_dual_memcfg_s cn52xxp1;
1633 struct cvmx_lmcx_dual_memcfg_s cn56xx;
1634 struct cvmx_lmcx_dual_memcfg_s cn56xxp1;
1635 struct cvmx_lmcx_dual_memcfg_s cn58xx;
1636 struct cvmx_lmcx_dual_memcfg_s cn58xxp1;
1637 struct cvmx_lmcx_dual_memcfg_cn61xx {
1638 uint64_t reserved_19_63:45;
1640 uint64_t reserved_8_15:8;
1643 struct cvmx_lmcx_dual_memcfg_cn61xx cn63xx;
1644 struct cvmx_lmcx_dual_memcfg_cn61xx cn63xxp1;
1645 struct cvmx_lmcx_dual_memcfg_cn61xx cn66xx;
1646 struct cvmx_lmcx_dual_memcfg_cn61xx cn68xx;
1647 struct cvmx_lmcx_dual_memcfg_cn61xx cn68xxp1;
1648 struct cvmx_lmcx_dual_memcfg_cn70xx {
1649 uint64_t reserved_19_63:45;
1651 uint64_t reserved_4_15:12;
1654 struct cvmx_lmcx_dual_memcfg_cn70xx cn70xxp1;
1655 struct cvmx_lmcx_dual_memcfg_cn70xx cn73xx;
1656 struct cvmx_lmcx_dual_memcfg_cn70xx cn78xx;
1657 struct cvmx_lmcx_dual_memcfg_cn70xx cn78xxp1;
1658 struct cvmx_lmcx_dual_memcfg_cn61xx cnf71xx;
1659 struct cvmx_lmcx_dual_memcfg_cn70xx cnf75xx;
1663 * cvmx_lmc#_ecc_parity_test
1665 * This register has bits to control the generation of ECC and command
1666 * address parity errors. ECC error is generated by enabling
1667 * [CA_PARITY_CORRUPT_ENA] and selecting any of the [ECC_CORRUPT_IDX]
1668 * index of the dataword from the cacheline to be corrupted.
1669 * User needs to select which bit of the 128-bit dataword to corrupt by
1670 * asserting any of the CHAR_MASK0 and CHAR_MASK2 bits. (CHAR_MASK0 and
1671 * CHAR_MASK2 corresponds to the lower and upper 64-bit signal that can
1672 * corrupt any individual bit of the data).
1674 * Command address parity error is generated by enabling
1675 * [CA_PARITY_CORRUPT_ENA] and selecting the DDR command that the parity
1676 * is to be corrupted with through [CA_PARITY_SEL].
1678 union cvmx_lmcx_ecc_parity_test {
1680 struct cvmx_lmcx_ecc_parity_test_s {
1681 uint64_t reserved_12_63:52;
1682 uint64_t ecc_corrupt_ena:1;
1683 uint64_t ecc_corrupt_idx:3;
1684 uint64_t reserved_6_7:2;
1685 uint64_t ca_parity_corrupt_ena:1;
1686 uint64_t ca_parity_sel:5;
1688 struct cvmx_lmcx_ecc_parity_test_s cn73xx;
1689 struct cvmx_lmcx_ecc_parity_test_s cn78xx;
1690 struct cvmx_lmcx_ecc_parity_test_s cn78xxp1;
1691 struct cvmx_lmcx_ecc_parity_test_s cnf75xx;
1695 * cvmx_lmc#_ecc_synd
1697 * LMC_ECC_SYND = MRD ECC Syndromes
1700 union cvmx_lmcx_ecc_synd {
1702 struct cvmx_lmcx_ecc_synd_s {
1703 uint64_t reserved_32_63:32;
1709 struct cvmx_lmcx_ecc_synd_s cn30xx;
1710 struct cvmx_lmcx_ecc_synd_s cn31xx;
1711 struct cvmx_lmcx_ecc_synd_s cn38xx;
1712 struct cvmx_lmcx_ecc_synd_s cn38xxp2;
1713 struct cvmx_lmcx_ecc_synd_s cn50xx;
1714 struct cvmx_lmcx_ecc_synd_s cn52xx;
1715 struct cvmx_lmcx_ecc_synd_s cn52xxp1;
1716 struct cvmx_lmcx_ecc_synd_s cn56xx;
1717 struct cvmx_lmcx_ecc_synd_s cn56xxp1;
1718 struct cvmx_lmcx_ecc_synd_s cn58xx;
1719 struct cvmx_lmcx_ecc_synd_s cn58xxp1;
1720 struct cvmx_lmcx_ecc_synd_s cn61xx;
1721 struct cvmx_lmcx_ecc_synd_s cn63xx;
1722 struct cvmx_lmcx_ecc_synd_s cn63xxp1;
1723 struct cvmx_lmcx_ecc_synd_s cn66xx;
1724 struct cvmx_lmcx_ecc_synd_s cn68xx;
1725 struct cvmx_lmcx_ecc_synd_s cn68xxp1;
1726 struct cvmx_lmcx_ecc_synd_s cn70xx;
1727 struct cvmx_lmcx_ecc_synd_s cn70xxp1;
1728 struct cvmx_lmcx_ecc_synd_s cn73xx;
1729 struct cvmx_lmcx_ecc_synd_s cn78xx;
1730 struct cvmx_lmcx_ecc_synd_s cn78xxp1;
1731 struct cvmx_lmcx_ecc_synd_s cnf71xx;
1732 struct cvmx_lmcx_ecc_synd_s cnf75xx;
1736 * cvmx_lmc#_ext_config
1738 * This register has additional configuration and control bits for the LMC.
1741 union cvmx_lmcx_ext_config {
1743 struct cvmx_lmcx_ext_config_s {
1744 uint64_t reserved_61_63:3;
1745 uint64_t bc4_dqs_ena:1;
1746 uint64_t ref_block:1;
1747 uint64_t mrs_side:1;
1748 uint64_t mrs_one_side:1;
1749 uint64_t mrs_bside_invert_disable:1;
1750 uint64_t dimm_sel_invert_off:1;
1751 uint64_t dimm_sel_force_invert:1;
1752 uint64_t coalesce_address_mode:1;
1753 uint64_t dimm1_cid:2;
1754 uint64_t dimm0_cid:2;
1755 uint64_t rcd_parity_check:1;
1756 uint64_t reserved_46_47:2;
1757 uint64_t error_alert_n_sample:1;
1758 uint64_t ea_int_polarity:1;
1759 uint64_t reserved_43_43:1;
1760 uint64_t par_addr_mask:3;
1761 uint64_t reserved_38_39:2;
1762 uint64_t mrs_cmd_override:1;
1763 uint64_t mrs_cmd_select:1;
1764 uint64_t reserved_33_35:3;
1765 uint64_t invert_data:1;
1766 uint64_t reserved_30_31:2;
1769 uint64_t reserved_27_27:1;
1770 uint64_t par_include_a17:1;
1771 uint64_t par_include_bg1:1;
1773 uint64_t reserved_21_23:3;
1774 uint64_t vrefint_seq_deskew:1;
1775 uint64_t read_ena_bprch:1;
1776 uint64_t read_ena_fprch:1;
1777 uint64_t slot_ctl_reset_force:1;
1778 uint64_t ref_int_lsbs:9;
1779 uint64_t drive_ena_bprch:1;
1780 uint64_t drive_ena_fprch:1;
1781 uint64_t dlcram_flip_synd:2;
1782 uint64_t dlcram_cor_dis:1;
1783 uint64_t dlc_nxm_rd:1;
1784 uint64_t l2c_nxm_rd:1;
1785 uint64_t l2c_nxm_wr:1;
1787 struct cvmx_lmcx_ext_config_cn70xx {
1788 uint64_t reserved_21_63:43;
1789 uint64_t vrefint_seq_deskew:1;
1790 uint64_t read_ena_bprch:1;
1791 uint64_t read_ena_fprch:1;
1792 uint64_t slot_ctl_reset_force:1;
1793 uint64_t ref_int_lsbs:9;
1794 uint64_t drive_ena_bprch:1;
1795 uint64_t drive_ena_fprch:1;
1796 uint64_t dlcram_flip_synd:2;
1797 uint64_t dlcram_cor_dis:1;
1798 uint64_t dlc_nxm_rd:1;
1799 uint64_t l2c_nxm_rd:1;
1800 uint64_t l2c_nxm_wr:1;
1802 struct cvmx_lmcx_ext_config_cn70xx cn70xxp1;
1803 struct cvmx_lmcx_ext_config_cn73xx {
1804 uint64_t reserved_60_63:4;
1805 uint64_t ref_block:1;
1806 uint64_t mrs_side:1;
1807 uint64_t mrs_one_side:1;
1808 uint64_t mrs_bside_invert_disable:1;
1809 uint64_t dimm_sel_invert_off:1;
1810 uint64_t dimm_sel_force_invert:1;
1811 uint64_t coalesce_address_mode:1;
1812 uint64_t dimm1_cid:2;
1813 uint64_t dimm0_cid:2;
1814 uint64_t rcd_parity_check:1;
1815 uint64_t reserved_46_47:2;
1816 uint64_t error_alert_n_sample:1;
1817 uint64_t ea_int_polarity:1;
1818 uint64_t reserved_43_43:1;
1819 uint64_t par_addr_mask:3;
1820 uint64_t reserved_38_39:2;
1821 uint64_t mrs_cmd_override:1;
1822 uint64_t mrs_cmd_select:1;
1823 uint64_t reserved_33_35:3;
1824 uint64_t invert_data:1;
1825 uint64_t reserved_30_31:2;
1828 uint64_t reserved_27_27:1;
1829 uint64_t par_include_a17:1;
1830 uint64_t par_include_bg1:1;
1832 uint64_t reserved_21_23:3;
1833 uint64_t vrefint_seq_deskew:1;
1834 uint64_t read_ena_bprch:1;
1835 uint64_t read_ena_fprch:1;
1836 uint64_t slot_ctl_reset_force:1;
1837 uint64_t ref_int_lsbs:9;
1838 uint64_t drive_ena_bprch:1;
1839 uint64_t drive_ena_fprch:1;
1840 uint64_t dlcram_flip_synd:2;
1841 uint64_t dlcram_cor_dis:1;
1842 uint64_t dlc_nxm_rd:1;
1843 uint64_t l2c_nxm_rd:1;
1844 uint64_t l2c_nxm_wr:1;
1846 struct cvmx_lmcx_ext_config_s cn78xx;
1847 struct cvmx_lmcx_ext_config_s cn78xxp1;
1848 struct cvmx_lmcx_ext_config_cn73xx cnf75xx;
1852 * cvmx_lmc#_ext_config2
1854 * This register has additional configuration and control bits for the LMC.
1857 union cvmx_lmcx_ext_config2 {
1859 struct cvmx_lmcx_ext_config2_s {
1860 uint64_t reserved_27_63:37;
1861 uint64_t sref_auto_idle_thres:5;
1862 uint64_t sref_auto_enable:1;
1863 uint64_t delay_unload_r3:1;
1864 uint64_t delay_unload_r2:1;
1865 uint64_t delay_unload_r1:1;
1866 uint64_t delay_unload_r0:1;
1867 uint64_t early_dqx2:1;
1868 uint64_t xor_bank_sel:4;
1869 uint64_t reserved_10_11:2;
1870 uint64_t row_col_switch:1;
1873 uint64_t macram_scrub_done:1;
1874 uint64_t macram_scrub:1;
1875 uint64_t macram_flip_synd:2;
1876 uint64_t macram_cor_dis:1;
1878 struct cvmx_lmcx_ext_config2_cn73xx {
1879 uint64_t reserved_10_63:54;
1880 uint64_t row_col_switch:1;
1883 uint64_t macram_scrub_done:1;
1884 uint64_t macram_scrub:1;
1885 uint64_t macram_flip_synd:2;
1886 uint64_t macram_cor_dis:1;
1888 struct cvmx_lmcx_ext_config2_s cn78xx;
1889 struct cvmx_lmcx_ext_config2_cnf75xx {
1890 uint64_t reserved_21_63:43;
1891 uint64_t delay_unload_r3:1;
1892 uint64_t delay_unload_r2:1;
1893 uint64_t delay_unload_r1:1;
1894 uint64_t delay_unload_r0:1;
1895 uint64_t early_dqx2:1;
1896 uint64_t xor_bank_sel:4;
1897 uint64_t reserved_10_11:2;
1898 uint64_t row_col_switch:1;
1901 uint64_t macram_scrub_done:1;
1902 uint64_t macram_scrub:1;
1903 uint64_t macram_flip_synd:2;
1904 uint64_t macram_cor_dis:1;
1911 * This register only captures the first transaction with ECC errors. A DED
1912 * error can over-write this register with its failing addresses if the
1913 * first error was a SEC. If you write LMC()_INT -> SEC_ERR/DED_ERR, it
1914 * clears the error bits and captures the next failing address. If FDIMM
1915 * is 1, that means the error is in the high DIMM. LMC()_FADR captures the
1916 * failing pre-scrambled address location (split into DIMM, bunk, bank, etc).
1917 * If scrambling is off, then LMC()_FADR will also capture the failing
1918 * physical location in the DRAM parts. LMC()_SCRAMBLED_FADR captures the
1919 * actual failing address location in the physical DRAM parts, i.e.,
1920 * If scrambling is on, LMC()_SCRAMBLED_FADR contains the failing physical
1921 * location in the DRAM parts (split into DIMM, bunk, bank, etc.)
1922 * If scrambling is off, the pre-scramble and post-scramble addresses are
1923 * the same; and so the contents of LMC()_SCRAMBLED_FADR match the contents
1926 union cvmx_lmcx_fadr {
1928 struct cvmx_lmcx_fadr_s {
1929 uint64_t reserved_43_63:21;
1931 uint64_t fill_order:2;
1932 uint64_t reserved_0_37:38;
1934 struct cvmx_lmcx_fadr_cn30xx {
1935 uint64_t reserved_32_63:32;
1942 struct cvmx_lmcx_fadr_cn30xx cn31xx;
1943 struct cvmx_lmcx_fadr_cn30xx cn38xx;
1944 struct cvmx_lmcx_fadr_cn30xx cn38xxp2;
1945 struct cvmx_lmcx_fadr_cn30xx cn50xx;
1946 struct cvmx_lmcx_fadr_cn30xx cn52xx;
1947 struct cvmx_lmcx_fadr_cn30xx cn52xxp1;
1948 struct cvmx_lmcx_fadr_cn30xx cn56xx;
1949 struct cvmx_lmcx_fadr_cn30xx cn56xxp1;
1950 struct cvmx_lmcx_fadr_cn30xx cn58xx;
1951 struct cvmx_lmcx_fadr_cn30xx cn58xxp1;
1952 struct cvmx_lmcx_fadr_cn61xx {
1953 uint64_t reserved_36_63:28;
1960 struct cvmx_lmcx_fadr_cn61xx cn63xx;
1961 struct cvmx_lmcx_fadr_cn61xx cn63xxp1;
1962 struct cvmx_lmcx_fadr_cn61xx cn66xx;
1963 struct cvmx_lmcx_fadr_cn61xx cn68xx;
1964 struct cvmx_lmcx_fadr_cn61xx cn68xxp1;
1965 struct cvmx_lmcx_fadr_cn70xx {
1966 uint64_t reserved_40_63:24;
1967 uint64_t fill_order:2;
1974 struct cvmx_lmcx_fadr_cn70xx cn70xxp1;
1975 struct cvmx_lmcx_fadr_cn73xx {
1976 uint64_t reserved_43_63:21;
1978 uint64_t fill_order:2;
1985 struct cvmx_lmcx_fadr_cn73xx cn78xx;
1986 struct cvmx_lmcx_fadr_cn73xx cn78xxp1;
1987 struct cvmx_lmcx_fadr_cn61xx cnf71xx;
1988 struct cvmx_lmcx_fadr_cn73xx cnf75xx;
1992 * cvmx_lmc#_general_purpose0
1994 union cvmx_lmcx_general_purpose0 {
1996 struct cvmx_lmcx_general_purpose0_s {
1999 struct cvmx_lmcx_general_purpose0_s cn73xx;
2000 struct cvmx_lmcx_general_purpose0_s cn78xx;
2001 struct cvmx_lmcx_general_purpose0_s cnf75xx;
2005 * cvmx_lmc#_general_purpose1
2007 union cvmx_lmcx_general_purpose1 {
2009 struct cvmx_lmcx_general_purpose1_s {
2012 struct cvmx_lmcx_general_purpose1_s cn73xx;
2013 struct cvmx_lmcx_general_purpose1_s cn78xx;
2014 struct cvmx_lmcx_general_purpose1_s cnf75xx;
2018 * cvmx_lmc#_general_purpose2
2020 union cvmx_lmcx_general_purpose2 {
2022 struct cvmx_lmcx_general_purpose2_s {
2023 uint64_t reserved_16_63:48;
2026 struct cvmx_lmcx_general_purpose2_s cn73xx;
2027 struct cvmx_lmcx_general_purpose2_s cn78xx;
2028 struct cvmx_lmcx_general_purpose2_s cnf75xx;
2034 * LMC_IFB_CNT = Performance Counters
2037 union cvmx_lmcx_ifb_cnt {
2039 struct cvmx_lmcx_ifb_cnt_s {
2042 struct cvmx_lmcx_ifb_cnt_s cn61xx;
2043 struct cvmx_lmcx_ifb_cnt_s cn63xx;
2044 struct cvmx_lmcx_ifb_cnt_s cn63xxp1;
2045 struct cvmx_lmcx_ifb_cnt_s cn66xx;
2046 struct cvmx_lmcx_ifb_cnt_s cn68xx;
2047 struct cvmx_lmcx_ifb_cnt_s cn68xxp1;
2048 struct cvmx_lmcx_ifb_cnt_s cn70xx;
2049 struct cvmx_lmcx_ifb_cnt_s cn70xxp1;
2050 struct cvmx_lmcx_ifb_cnt_s cn73xx;
2051 struct cvmx_lmcx_ifb_cnt_s cn78xx;
2052 struct cvmx_lmcx_ifb_cnt_s cn78xxp1;
2053 struct cvmx_lmcx_ifb_cnt_s cnf71xx;
2054 struct cvmx_lmcx_ifb_cnt_s cnf75xx;
2058 * cvmx_lmc#_ifb_cnt_hi
2060 * LMC_IFB_CNT_HI = Performance Counters
2063 union cvmx_lmcx_ifb_cnt_hi {
2065 struct cvmx_lmcx_ifb_cnt_hi_s {
2066 uint64_t reserved_32_63:32;
2067 uint64_t ifbcnt_hi:32;
2069 struct cvmx_lmcx_ifb_cnt_hi_s cn30xx;
2070 struct cvmx_lmcx_ifb_cnt_hi_s cn31xx;
2071 struct cvmx_lmcx_ifb_cnt_hi_s cn38xx;
2072 struct cvmx_lmcx_ifb_cnt_hi_s cn38xxp2;
2073 struct cvmx_lmcx_ifb_cnt_hi_s cn50xx;
2074 struct cvmx_lmcx_ifb_cnt_hi_s cn52xx;
2075 struct cvmx_lmcx_ifb_cnt_hi_s cn52xxp1;
2076 struct cvmx_lmcx_ifb_cnt_hi_s cn56xx;
2077 struct cvmx_lmcx_ifb_cnt_hi_s cn56xxp1;
2078 struct cvmx_lmcx_ifb_cnt_hi_s cn58xx;
2079 struct cvmx_lmcx_ifb_cnt_hi_s cn58xxp1;
2083 * cvmx_lmc#_ifb_cnt_lo
2085 * LMC_IFB_CNT_LO = Performance Counters
2088 union cvmx_lmcx_ifb_cnt_lo {
2090 struct cvmx_lmcx_ifb_cnt_lo_s {
2091 uint64_t reserved_32_63:32;
2092 uint64_t ifbcnt_lo:32;
2094 struct cvmx_lmcx_ifb_cnt_lo_s cn30xx;
2095 struct cvmx_lmcx_ifb_cnt_lo_s cn31xx;
2096 struct cvmx_lmcx_ifb_cnt_lo_s cn38xx;
2097 struct cvmx_lmcx_ifb_cnt_lo_s cn38xxp2;
2098 struct cvmx_lmcx_ifb_cnt_lo_s cn50xx;
2099 struct cvmx_lmcx_ifb_cnt_lo_s cn52xx;
2100 struct cvmx_lmcx_ifb_cnt_lo_s cn52xxp1;
2101 struct cvmx_lmcx_ifb_cnt_lo_s cn56xx;
2102 struct cvmx_lmcx_ifb_cnt_lo_s cn56xxp1;
2103 struct cvmx_lmcx_ifb_cnt_lo_s cn58xx;
2104 struct cvmx_lmcx_ifb_cnt_lo_s cn58xxp1;
2110 * This register contains the different interrupt-summary bits of the LMC.
2113 union cvmx_lmcx_int {
2115 struct cvmx_lmcx_int_s {
2116 uint64_t reserved_14_63:50;
2117 uint64_t macram_ded_err:1;
2118 uint64_t macram_sec_err:1;
2120 uint64_t dlcram_ded_err:1;
2121 uint64_t dlcram_sec_err:1;
2124 uint64_t nxm_wr_err:1;
2126 struct cvmx_lmcx_int_cn61xx {
2127 uint64_t reserved_9_63:55;
2130 uint64_t nxm_wr_err:1;
2132 struct cvmx_lmcx_int_cn61xx cn63xx;
2133 struct cvmx_lmcx_int_cn61xx cn63xxp1;
2134 struct cvmx_lmcx_int_cn61xx cn66xx;
2135 struct cvmx_lmcx_int_cn61xx cn68xx;
2136 struct cvmx_lmcx_int_cn61xx cn68xxp1;
2137 struct cvmx_lmcx_int_cn70xx {
2138 uint64_t reserved_12_63:52;
2140 uint64_t dlcram_ded_err:1;
2141 uint64_t dlcram_sec_err:1;
2144 uint64_t nxm_wr_err:1;
2146 struct cvmx_lmcx_int_cn70xx cn70xxp1;
2147 struct cvmx_lmcx_int_s cn73xx;
2148 struct cvmx_lmcx_int_s cn78xx;
2149 struct cvmx_lmcx_int_s cn78xxp1;
2150 struct cvmx_lmcx_int_cn61xx cnf71xx;
2151 struct cvmx_lmcx_int_s cnf75xx;
2157 * Unused CSR in O75.
2160 union cvmx_lmcx_int_en {
2162 struct cvmx_lmcx_int_en_s {
2163 uint64_t reserved_6_63:58;
2164 uint64_t ddr_error_alert_ena:1;
2165 uint64_t dlcram_ded_ena:1;
2166 uint64_t dlcram_sec_ena:1;
2167 uint64_t intr_ded_ena:1;
2168 uint64_t intr_sec_ena:1;
2169 uint64_t intr_nxm_wr_ena:1;
2171 struct cvmx_lmcx_int_en_cn61xx {
2172 uint64_t reserved_3_63:61;
2173 uint64_t intr_ded_ena:1;
2174 uint64_t intr_sec_ena:1;
2175 uint64_t intr_nxm_wr_ena:1;
2177 struct cvmx_lmcx_int_en_cn61xx cn63xx;
2178 struct cvmx_lmcx_int_en_cn61xx cn63xxp1;
2179 struct cvmx_lmcx_int_en_cn61xx cn66xx;
2180 struct cvmx_lmcx_int_en_cn61xx cn68xx;
2181 struct cvmx_lmcx_int_en_cn61xx cn68xxp1;
2182 struct cvmx_lmcx_int_en_s cn70xx;
2183 struct cvmx_lmcx_int_en_s cn70xxp1;
2184 struct cvmx_lmcx_int_en_s cn73xx;
2185 struct cvmx_lmcx_int_en_s cn78xx;
2186 struct cvmx_lmcx_int_en_s cn78xxp1;
2187 struct cvmx_lmcx_int_en_cn61xx cnf71xx;
2188 struct cvmx_lmcx_int_en_s cnf75xx;
2192 * cvmx_lmc#_lane#_crc_swiz
2194 * This register contains the CRC bit swizzle for even and odd ranks.
2197 union cvmx_lmcx_lanex_crc_swiz {
2199 struct cvmx_lmcx_lanex_crc_swiz_s {
2200 uint64_t reserved_56_63:8;
2201 uint64_t r1_swiz7:3;
2202 uint64_t r1_swiz6:3;
2203 uint64_t r1_swiz5:3;
2204 uint64_t r1_swiz4:3;
2205 uint64_t r1_swiz3:3;
2206 uint64_t r1_swiz2:3;
2207 uint64_t r1_swiz1:3;
2208 uint64_t r1_swiz0:3;
2209 uint64_t reserved_24_31:8;
2210 uint64_t r0_swiz7:3;
2211 uint64_t r0_swiz6:3;
2212 uint64_t r0_swiz5:3;
2213 uint64_t r0_swiz4:3;
2214 uint64_t r0_swiz3:3;
2215 uint64_t r0_swiz2:3;
2216 uint64_t r0_swiz1:3;
2217 uint64_t r0_swiz0:3;
2219 struct cvmx_lmcx_lanex_crc_swiz_s cn73xx;
2220 struct cvmx_lmcx_lanex_crc_swiz_s cn78xx;
2221 struct cvmx_lmcx_lanex_crc_swiz_s cn78xxp1;
2222 struct cvmx_lmcx_lanex_crc_swiz_s cnf75xx;
2226 * cvmx_lmc#_mem_cfg0
2228 * Specify the RSL base addresses for the block
2230 * LMC_MEM_CFG0 = LMC Memory Configuration Register0
2232 * This register controls certain parameters of Memory Configuration
2234 union cvmx_lmcx_mem_cfg0 {
2236 struct cvmx_lmcx_mem_cfg0_s {
2237 uint64_t reserved_32_63:32;
2240 uint64_t bunk_ena:1;
2243 uint64_t intr_ded_ena:1;
2244 uint64_t intr_sec_ena:1;
2247 uint64_t pbank_lsb:4;
2250 uint64_t init_start:1;
2252 struct cvmx_lmcx_mem_cfg0_s cn30xx;
2253 struct cvmx_lmcx_mem_cfg0_s cn31xx;
2254 struct cvmx_lmcx_mem_cfg0_s cn38xx;
2255 struct cvmx_lmcx_mem_cfg0_s cn38xxp2;
2256 struct cvmx_lmcx_mem_cfg0_s cn50xx;
2257 struct cvmx_lmcx_mem_cfg0_s cn52xx;
2258 struct cvmx_lmcx_mem_cfg0_s cn52xxp1;
2259 struct cvmx_lmcx_mem_cfg0_s cn56xx;
2260 struct cvmx_lmcx_mem_cfg0_s cn56xxp1;
2261 struct cvmx_lmcx_mem_cfg0_s cn58xx;
2262 struct cvmx_lmcx_mem_cfg0_s cn58xxp1;
2266 * cvmx_lmc#_mem_cfg1
2268 * LMC_MEM_CFG1 = LMC Memory Configuration Register1
2270 * This register controls the External Memory Configuration Timing Parameters.
2271 * Please refer to the appropriate DDR part spec from your memory vendor for
2272 * the various values in this CSR. The details of each of these timing
2273 * parameters can be found in the JEDEC spec or the vendor spec of the
2276 union cvmx_lmcx_mem_cfg1 {
2278 struct cvmx_lmcx_mem_cfg1_s {
2279 uint64_t reserved_32_63:32;
2280 uint64_t comp_bypass:1;
2290 struct cvmx_lmcx_mem_cfg1_s cn30xx;
2291 struct cvmx_lmcx_mem_cfg1_s cn31xx;
2292 struct cvmx_lmcx_mem_cfg1_cn38xx {
2293 uint64_t reserved_31_63:33;
2303 struct cvmx_lmcx_mem_cfg1_cn38xx cn38xxp2;
2304 struct cvmx_lmcx_mem_cfg1_s cn50xx;
2305 struct cvmx_lmcx_mem_cfg1_cn38xx cn52xx;
2306 struct cvmx_lmcx_mem_cfg1_cn38xx cn52xxp1;
2307 struct cvmx_lmcx_mem_cfg1_cn38xx cn56xx;
2308 struct cvmx_lmcx_mem_cfg1_cn38xx cn56xxp1;
2309 struct cvmx_lmcx_mem_cfg1_cn38xx cn58xx;
2310 struct cvmx_lmcx_mem_cfg1_cn38xx cn58xxp1;
2314 * cvmx_lmc#_modereg_params0
2316 * These parameters are written into the DDR3/DDR4 MR0, MR1, MR2 and MR3
2320 union cvmx_lmcx_modereg_params0 {
2322 struct cvmx_lmcx_modereg_params0_s {
2323 uint64_t reserved_28_63:36;
2343 struct cvmx_lmcx_modereg_params0_cn61xx {
2344 uint64_t reserved_25_63:39;
2361 struct cvmx_lmcx_modereg_params0_cn61xx cn63xx;
2362 struct cvmx_lmcx_modereg_params0_cn61xx cn63xxp1;
2363 struct cvmx_lmcx_modereg_params0_cn61xx cn66xx;
2364 struct cvmx_lmcx_modereg_params0_cn61xx cn68xx;
2365 struct cvmx_lmcx_modereg_params0_cn61xx cn68xxp1;
2366 struct cvmx_lmcx_modereg_params0_cn61xx cn70xx;
2367 struct cvmx_lmcx_modereg_params0_cn61xx cn70xxp1;
2368 struct cvmx_lmcx_modereg_params0_s cn73xx;
2369 struct cvmx_lmcx_modereg_params0_s cn78xx;
2370 struct cvmx_lmcx_modereg_params0_s cn78xxp1;
2371 struct cvmx_lmcx_modereg_params0_cn61xx cnf71xx;
2372 struct cvmx_lmcx_modereg_params0_s cnf75xx;
2376 * cvmx_lmc#_modereg_params1
2378 * These parameters are written into the DDR3 MR0, MR1, MR2 and MR3 registers.
2381 union cvmx_lmcx_modereg_params1 {
2383 struct cvmx_lmcx_modereg_params1_s {
2384 uint64_t reserved_55_63:9;
2385 uint64_t rtt_wr_11_ext:1;
2386 uint64_t rtt_wr_10_ext:1;
2387 uint64_t rtt_wr_01_ext:1;
2388 uint64_t rtt_wr_00_ext:1;
2389 uint64_t db_output_impedance:3;
2390 uint64_t rtt_nom_11:3;
2392 uint64_t rtt_wr_11:2;
2396 uint64_t rtt_nom_10:3;
2398 uint64_t rtt_wr_10:2;
2402 uint64_t rtt_nom_01:3;
2404 uint64_t rtt_wr_01:2;
2408 uint64_t rtt_nom_00:3;
2410 uint64_t rtt_wr_00:2;
2415 struct cvmx_lmcx_modereg_params1_cn61xx {
2416 uint64_t reserved_48_63:16;
2417 uint64_t rtt_nom_11:3;
2419 uint64_t rtt_wr_11:2;
2423 uint64_t rtt_nom_10:3;
2425 uint64_t rtt_wr_10:2;
2429 uint64_t rtt_nom_01:3;
2431 uint64_t rtt_wr_01:2;
2435 uint64_t rtt_nom_00:3;
2437 uint64_t rtt_wr_00:2;
2442 struct cvmx_lmcx_modereg_params1_cn61xx cn63xx;
2443 struct cvmx_lmcx_modereg_params1_cn61xx cn63xxp1;
2444 struct cvmx_lmcx_modereg_params1_cn61xx cn66xx;
2445 struct cvmx_lmcx_modereg_params1_cn61xx cn68xx;
2446 struct cvmx_lmcx_modereg_params1_cn61xx cn68xxp1;
2447 struct cvmx_lmcx_modereg_params1_cn61xx cn70xx;
2448 struct cvmx_lmcx_modereg_params1_cn61xx cn70xxp1;
2449 struct cvmx_lmcx_modereg_params1_s cn73xx;
2450 struct cvmx_lmcx_modereg_params1_s cn78xx;
2451 struct cvmx_lmcx_modereg_params1_s cn78xxp1;
2452 struct cvmx_lmcx_modereg_params1_cn61xx cnf71xx;
2453 struct cvmx_lmcx_modereg_params1_s cnf75xx;
2457 * cvmx_lmc#_modereg_params2
2459 * These parameters are written into the DDR4 mode registers.
2462 union cvmx_lmcx_modereg_params2 {
2464 struct cvmx_lmcx_modereg_params2_s {
2465 uint64_t reserved_41_63:23;
2466 uint64_t vrefdq_train_en:1;
2467 uint64_t vref_range_11:1;
2468 uint64_t vref_value_11:6;
2469 uint64_t rtt_park_11:3;
2470 uint64_t vref_range_10:1;
2471 uint64_t vref_value_10:6;
2472 uint64_t rtt_park_10:3;
2473 uint64_t vref_range_01:1;
2474 uint64_t vref_value_01:6;
2475 uint64_t rtt_park_01:3;
2476 uint64_t vref_range_00:1;
2477 uint64_t vref_value_00:6;
2478 uint64_t rtt_park_00:3;
2480 struct cvmx_lmcx_modereg_params2_s cn70xx;
2481 struct cvmx_lmcx_modereg_params2_cn70xxp1 {
2482 uint64_t reserved_40_63:24;
2483 uint64_t vref_range_11:1;
2484 uint64_t vref_value_11:6;
2485 uint64_t rtt_park_11:3;
2486 uint64_t vref_range_10:1;
2487 uint64_t vref_value_10:6;
2488 uint64_t rtt_park_10:3;
2489 uint64_t vref_range_01:1;
2490 uint64_t vref_value_01:6;
2491 uint64_t rtt_park_01:3;
2492 uint64_t vref_range_00:1;
2493 uint64_t vref_value_00:6;
2494 uint64_t rtt_park_00:3;
2496 struct cvmx_lmcx_modereg_params2_s cn73xx;
2497 struct cvmx_lmcx_modereg_params2_s cn78xx;
2498 struct cvmx_lmcx_modereg_params2_s cn78xxp1;
2499 struct cvmx_lmcx_modereg_params2_s cnf75xx;
2503 * cvmx_lmc#_modereg_params3
2505 * These parameters are written into the DDR4 mode registers.
2508 union cvmx_lmcx_modereg_params3 {
2510 struct cvmx_lmcx_modereg_params3_s {
2511 uint64_t reserved_39_63:25;
2512 uint64_t xrank_add_tccd_l:3;
2513 uint64_t xrank_add_tccd_s:3;
2515 uint64_t wr_cmd_lat:2;
2517 uint64_t temp_sense:1;
2526 uint64_t ca_par_pers:1;
2528 uint64_t par_lat_mode:3;
2529 uint64_t wr_preamble:1;
2530 uint64_t rd_preamble:1;
2531 uint64_t sre_abort:1;
2533 uint64_t vref_mon:1;
2537 struct cvmx_lmcx_modereg_params3_cn70xx {
2538 uint64_t reserved_33_63:31;
2540 uint64_t wr_cmd_lat:2;
2542 uint64_t temp_sense:1;
2551 uint64_t ca_par_pers:1;
2553 uint64_t par_lat_mode:3;
2554 uint64_t wr_preamble:1;
2555 uint64_t rd_preamble:1;
2556 uint64_t sre_abort:1;
2558 uint64_t vref_mon:1;
2562 struct cvmx_lmcx_modereg_params3_cn70xx cn70xxp1;
2563 struct cvmx_lmcx_modereg_params3_s cn73xx;
2564 struct cvmx_lmcx_modereg_params3_s cn78xx;
2565 struct cvmx_lmcx_modereg_params3_s cn78xxp1;
2566 struct cvmx_lmcx_modereg_params3_s cnf75xx;
2570 * cvmx_lmc#_mpr_data0
2572 * This register provides bits <63:0> of MPR data register.
2575 union cvmx_lmcx_mpr_data0 {
2577 struct cvmx_lmcx_mpr_data0_s {
2578 uint64_t mpr_data:64;
2580 struct cvmx_lmcx_mpr_data0_s cn70xx;
2581 struct cvmx_lmcx_mpr_data0_s cn70xxp1;
2582 struct cvmx_lmcx_mpr_data0_s cn73xx;
2583 struct cvmx_lmcx_mpr_data0_s cn78xx;
2584 struct cvmx_lmcx_mpr_data0_s cn78xxp1;
2585 struct cvmx_lmcx_mpr_data0_s cnf75xx;
2589 * cvmx_lmc#_mpr_data1
2591 * This register provides bits <127:64> of MPR data register.
2594 union cvmx_lmcx_mpr_data1 {
2596 struct cvmx_lmcx_mpr_data1_s {
2597 uint64_t mpr_data:64;
2599 struct cvmx_lmcx_mpr_data1_s cn70xx;
2600 struct cvmx_lmcx_mpr_data1_s cn70xxp1;
2601 struct cvmx_lmcx_mpr_data1_s cn73xx;
2602 struct cvmx_lmcx_mpr_data1_s cn78xx;
2603 struct cvmx_lmcx_mpr_data1_s cn78xxp1;
2604 struct cvmx_lmcx_mpr_data1_s cnf75xx;
2608 * cvmx_lmc#_mpr_data2
2610 * This register provides bits <143:128> of MPR data register.
2613 union cvmx_lmcx_mpr_data2 {
2615 struct cvmx_lmcx_mpr_data2_s {
2616 uint64_t reserved_16_63:48;
2617 uint64_t mpr_data:16;
2619 struct cvmx_lmcx_mpr_data2_s cn70xx;
2620 struct cvmx_lmcx_mpr_data2_s cn70xxp1;
2621 struct cvmx_lmcx_mpr_data2_s cn73xx;
2622 struct cvmx_lmcx_mpr_data2_s cn78xx;
2623 struct cvmx_lmcx_mpr_data2_s cn78xxp1;
2624 struct cvmx_lmcx_mpr_data2_s cnf75xx;
2628 * cvmx_lmc#_mr_mpr_ctl
2630 * This register provides the control functions when programming the MPR
2634 union cvmx_lmcx_mr_mpr_ctl {
2636 struct cvmx_lmcx_mr_mpr_ctl_s {
2637 uint64_t reserved_61_63:3;
2638 uint64_t mr_wr_secure_key_ena:1;
2639 uint64_t pba_func_space:3;
2640 uint64_t mr_wr_bg1:1;
2641 uint64_t mpr_sample_dq_enable:1;
2642 uint64_t pda_early_dqx:1;
2643 uint64_t mr_wr_pba_enable:1;
2644 uint64_t mr_wr_use_default_value:1;
2645 uint64_t mpr_whole_byte_enable:1;
2646 uint64_t mpr_byte_select:4;
2647 uint64_t mpr_bit_select:2;
2650 uint64_t mr_wr_pda_enable:1;
2651 uint64_t mr_wr_pda_mask:18;
2652 uint64_t mr_wr_rank:2;
2653 uint64_t mr_wr_sel:3;
2654 uint64_t mr_wr_addr:18;
2656 struct cvmx_lmcx_mr_mpr_ctl_cn70xx {
2657 uint64_t reserved_52_63:12;
2658 uint64_t mpr_whole_byte_enable:1;
2659 uint64_t mpr_byte_select:4;
2660 uint64_t mpr_bit_select:2;
2663 uint64_t mr_wr_pda_enable:1;
2664 uint64_t mr_wr_pda_mask:18;
2665 uint64_t mr_wr_rank:2;
2666 uint64_t mr_wr_sel:3;
2667 uint64_t mr_wr_addr:18;
2669 struct cvmx_lmcx_mr_mpr_ctl_cn70xx cn70xxp1;
2670 struct cvmx_lmcx_mr_mpr_ctl_s cn73xx;
2671 struct cvmx_lmcx_mr_mpr_ctl_s cn78xx;
2672 struct cvmx_lmcx_mr_mpr_ctl_s cn78xxp1;
2673 struct cvmx_lmcx_mr_mpr_ctl_s cnf75xx;
2679 * This register contains control parameters for handling nonsecure accesses.
2682 union cvmx_lmcx_ns_ctl {
2684 struct cvmx_lmcx_ns_ctl_s {
2685 uint64_t reserved_26_63:38;
2686 uint64_t ns_scramble_dis:1;
2687 uint64_t reserved_18_24:7;
2688 uint64_t adr_offset:18;
2690 struct cvmx_lmcx_ns_ctl_s cn73xx;
2691 struct cvmx_lmcx_ns_ctl_s cn78xx;
2692 struct cvmx_lmcx_ns_ctl_s cnf75xx;
2698 * Following is the decoding for mem_msb/rank:
2699 * 0x0: mem_msb = mem_adr[25].
2700 * 0x1: mem_msb = mem_adr[26].
2701 * 0x2: mem_msb = mem_adr[27].
2702 * 0x3: mem_msb = mem_adr[28].
2703 * 0x4: mem_msb = mem_adr[29].
2704 * 0x5: mem_msb = mem_adr[30].
2705 * 0x6: mem_msb = mem_adr[31].
2706 * 0x7: mem_msb = mem_adr[32].
2707 * 0x8: mem_msb = mem_adr[33].
2708 * 0x9: mem_msb = mem_adr[34].
2709 * 0xA: mem_msb = mem_adr[35].
2710 * 0xB: mem_msb = mem_adr[36].
2711 * 0xC-0xF = Reserved.
2713 * For example, for a DIMM made of Samsung's K4B1G0846C-ZCF7 1Gb
2714 * (16M * 8 bit * 8 bank) parts, the column address width = 10; so with
2715 * 10b of col, 3b of bus, 3b of bank, row_lsb = 16.
2716 * Therefore, row = mem_adr[29:16] and mem_msb = 4.
2718 * Note also that addresses greater than the max defined space (pbank_msb)
2719 * are also treated as NXM accesses.
2721 union cvmx_lmcx_nxm {
2723 struct cvmx_lmcx_nxm_s {
2724 uint64_t reserved_40_63:24;
2725 uint64_t mem_msb_d3_r1:4;
2726 uint64_t mem_msb_d3_r0:4;
2727 uint64_t mem_msb_d2_r1:4;
2728 uint64_t mem_msb_d2_r0:4;
2729 uint64_t mem_msb_d1_r1:4;
2730 uint64_t mem_msb_d1_r0:4;
2731 uint64_t mem_msb_d0_r1:4;
2732 uint64_t mem_msb_d0_r0:4;
2735 struct cvmx_lmcx_nxm_cn52xx {
2736 uint64_t reserved_8_63:56;
2739 struct cvmx_lmcx_nxm_cn52xx cn56xx;
2740 struct cvmx_lmcx_nxm_cn52xx cn58xx;
2741 struct cvmx_lmcx_nxm_s cn61xx;
2742 struct cvmx_lmcx_nxm_s cn63xx;
2743 struct cvmx_lmcx_nxm_s cn63xxp1;
2744 struct cvmx_lmcx_nxm_s cn66xx;
2745 struct cvmx_lmcx_nxm_s cn68xx;
2746 struct cvmx_lmcx_nxm_s cn68xxp1;
2747 struct cvmx_lmcx_nxm_cn70xx {
2748 uint64_t reserved_24_63:40;
2749 uint64_t mem_msb_d1_r1:4;
2750 uint64_t mem_msb_d1_r0:4;
2751 uint64_t mem_msb_d0_r1:4;
2752 uint64_t mem_msb_d0_r0:4;
2753 uint64_t reserved_4_7:4;
2756 struct cvmx_lmcx_nxm_cn70xx cn70xxp1;
2757 struct cvmx_lmcx_nxm_cn70xx cn73xx;
2758 struct cvmx_lmcx_nxm_cn70xx cn78xx;
2759 struct cvmx_lmcx_nxm_cn70xx cn78xxp1;
2760 struct cvmx_lmcx_nxm_s cnf71xx;
2761 struct cvmx_lmcx_nxm_cn70xx cnf75xx;
2765 * cvmx_lmc#_nxm_fadr
2767 * This register captures only the first transaction with a NXM error while
2768 * an interrupt is pending, and only captures a subsequent event once the
2769 * interrupt is cleared by writing a one to LMC()_INT[NXM_ERR]. It captures
2770 * the actual L2C-LMC address provided to the LMC that caused the NXM error.
2771 * A read or write NXM error is captured only if enabled using the NXM
2774 union cvmx_lmcx_nxm_fadr {
2776 struct cvmx_lmcx_nxm_fadr_s {
2777 uint64_t reserved_40_63:24;
2778 uint64_t nxm_faddr_ext:1;
2780 uint64_t nxm_type:1;
2781 uint64_t nxm_faddr:37;
2783 struct cvmx_lmcx_nxm_fadr_cn70xx {
2784 uint64_t reserved_39_63:25;
2786 uint64_t nxm_type:1;
2787 uint64_t nxm_faddr:37;
2789 struct cvmx_lmcx_nxm_fadr_cn70xx cn70xxp1;
2790 struct cvmx_lmcx_nxm_fadr_s cn73xx;
2791 struct cvmx_lmcx_nxm_fadr_s cn78xx;
2792 struct cvmx_lmcx_nxm_fadr_s cn78xxp1;
2793 struct cvmx_lmcx_nxm_fadr_s cnf75xx;
2799 * LMC_OPS_CNT = Performance Counters
2802 union cvmx_lmcx_ops_cnt {
2804 struct cvmx_lmcx_ops_cnt_s {
2807 struct cvmx_lmcx_ops_cnt_s cn61xx;
2808 struct cvmx_lmcx_ops_cnt_s cn63xx;
2809 struct cvmx_lmcx_ops_cnt_s cn63xxp1;
2810 struct cvmx_lmcx_ops_cnt_s cn66xx;
2811 struct cvmx_lmcx_ops_cnt_s cn68xx;
2812 struct cvmx_lmcx_ops_cnt_s cn68xxp1;
2813 struct cvmx_lmcx_ops_cnt_s cn70xx;
2814 struct cvmx_lmcx_ops_cnt_s cn70xxp1;
2815 struct cvmx_lmcx_ops_cnt_s cn73xx;
2816 struct cvmx_lmcx_ops_cnt_s cn78xx;
2817 struct cvmx_lmcx_ops_cnt_s cn78xxp1;
2818 struct cvmx_lmcx_ops_cnt_s cnf71xx;
2819 struct cvmx_lmcx_ops_cnt_s cnf75xx;
2823 * cvmx_lmc#_ops_cnt_hi
2825 * LMC_OPS_CNT_HI = Performance Counters
2828 union cvmx_lmcx_ops_cnt_hi {
2830 struct cvmx_lmcx_ops_cnt_hi_s {
2831 uint64_t reserved_32_63:32;
2832 uint64_t opscnt_hi:32;
2834 struct cvmx_lmcx_ops_cnt_hi_s cn30xx;
2835 struct cvmx_lmcx_ops_cnt_hi_s cn31xx;
2836 struct cvmx_lmcx_ops_cnt_hi_s cn38xx;
2837 struct cvmx_lmcx_ops_cnt_hi_s cn38xxp2;
2838 struct cvmx_lmcx_ops_cnt_hi_s cn50xx;
2839 struct cvmx_lmcx_ops_cnt_hi_s cn52xx;
2840 struct cvmx_lmcx_ops_cnt_hi_s cn52xxp1;
2841 struct cvmx_lmcx_ops_cnt_hi_s cn56xx;
2842 struct cvmx_lmcx_ops_cnt_hi_s cn56xxp1;
2843 struct cvmx_lmcx_ops_cnt_hi_s cn58xx;
2844 struct cvmx_lmcx_ops_cnt_hi_s cn58xxp1;
2848 * cvmx_lmc#_ops_cnt_lo
2850 * LMC_OPS_CNT_LO = Performance Counters
2853 union cvmx_lmcx_ops_cnt_lo {
2855 struct cvmx_lmcx_ops_cnt_lo_s {
2856 uint64_t reserved_32_63:32;
2857 uint64_t opscnt_lo:32;
2859 struct cvmx_lmcx_ops_cnt_lo_s cn30xx;
2860 struct cvmx_lmcx_ops_cnt_lo_s cn31xx;
2861 struct cvmx_lmcx_ops_cnt_lo_s cn38xx;
2862 struct cvmx_lmcx_ops_cnt_lo_s cn38xxp2;
2863 struct cvmx_lmcx_ops_cnt_lo_s cn50xx;
2864 struct cvmx_lmcx_ops_cnt_lo_s cn52xx;
2865 struct cvmx_lmcx_ops_cnt_lo_s cn52xxp1;
2866 struct cvmx_lmcx_ops_cnt_lo_s cn56xx;
2867 struct cvmx_lmcx_ops_cnt_lo_s cn56xxp1;
2868 struct cvmx_lmcx_ops_cnt_lo_s cn58xx;
2869 struct cvmx_lmcx_ops_cnt_lo_s cn58xxp1;
2875 * LMC_PHY_CTL = LMC PHY Control
2878 union cvmx_lmcx_phy_ctl {
2880 struct cvmx_lmcx_phy_ctl_s {
2881 uint64_t reserved_61_63:3;
2882 uint64_t dsk_dbg_load_dis:1;
2883 uint64_t dsk_dbg_overwrt_ena:1;
2884 uint64_t dsk_dbg_wr_mode:1;
2885 uint64_t data_rate_loopback:1;
2886 uint64_t dq_shallow_loopback:1;
2887 uint64_t dm_disable:1;
2890 uint64_t phy_reset:1;
2891 uint64_t dsk_dbg_rd_complete:1;
2892 uint64_t dsk_dbg_rd_data:10;
2893 uint64_t dsk_dbg_rd_start:1;
2894 uint64_t dsk_dbg_clk_scaler:2;
2895 uint64_t dsk_dbg_offset:2;
2896 uint64_t dsk_dbg_num_bits_sel:1;
2897 uint64_t dsk_dbg_byte_sel:4;
2898 uint64_t dsk_dbg_bit_sel:4;
2899 uint64_t dbi_mode_ena:1;
2900 uint64_t ddr_error_n_ena:1;
2901 uint64_t ref_pin_on:1;
2903 uint64_t int_pad_loopback_ena:1;
2904 uint64_t int_phy_loopback_ena:1;
2905 uint64_t phy_dsk_reset:1;
2906 uint64_t phy_dsk_byp:1;
2907 uint64_t phy_pwr_save_disable:1;
2909 uint64_t rx_always_on:1;
2911 uint64_t ck_tune1:1;
2912 uint64_t ck_dlyout1:4;
2913 uint64_t ck_tune0:1;
2914 uint64_t ck_dlyout0:4;
2915 uint64_t loopback:1;
2916 uint64_t loopback_pos:1;
2917 uint64_t ts_stagger:1;
2919 struct cvmx_lmcx_phy_ctl_cn61xx {
2920 uint64_t reserved_15_63:49;
2921 uint64_t rx_always_on:1;
2923 uint64_t ck_tune1:1;
2924 uint64_t ck_dlyout1:4;
2925 uint64_t ck_tune0:1;
2926 uint64_t ck_dlyout0:4;
2927 uint64_t loopback:1;
2928 uint64_t loopback_pos:1;
2929 uint64_t ts_stagger:1;
2931 struct cvmx_lmcx_phy_ctl_cn61xx cn63xx;
2932 struct cvmx_lmcx_phy_ctl_cn63xxp1 {
2933 uint64_t reserved_14_63:50;
2935 uint64_t ck_tune1:1;
2936 uint64_t ck_dlyout1:4;
2937 uint64_t ck_tune0:1;
2938 uint64_t ck_dlyout0:4;
2939 uint64_t loopback:1;
2940 uint64_t loopback_pos:1;
2941 uint64_t ts_stagger:1;
2943 struct cvmx_lmcx_phy_ctl_cn61xx cn66xx;
2944 struct cvmx_lmcx_phy_ctl_cn61xx cn68xx;
2945 struct cvmx_lmcx_phy_ctl_cn61xx cn68xxp1;
2946 struct cvmx_lmcx_phy_ctl_cn70xx {
2947 uint64_t reserved_51_63:13;
2948 uint64_t phy_reset:1;
2949 uint64_t dsk_dbg_rd_complete:1;
2950 uint64_t dsk_dbg_rd_data:10;
2951 uint64_t dsk_dbg_rd_start:1;
2952 uint64_t dsk_dbg_clk_scaler:2;
2953 uint64_t dsk_dbg_offset:2;
2954 uint64_t dsk_dbg_num_bits_sel:1;
2955 uint64_t dsk_dbg_byte_sel:4;
2956 uint64_t dsk_dbg_bit_sel:4;
2957 uint64_t dbi_mode_ena:1;
2958 uint64_t ddr_error_n_ena:1;
2959 uint64_t ref_pin_on:1;
2961 uint64_t int_pad_loopback_ena:1;
2962 uint64_t int_phy_loopback_ena:1;
2963 uint64_t phy_dsk_reset:1;
2964 uint64_t phy_dsk_byp:1;
2965 uint64_t phy_pwr_save_disable:1;
2967 uint64_t rx_always_on:1;
2969 uint64_t ck_tune1:1;
2970 uint64_t ck_dlyout1:4;
2971 uint64_t ck_tune0:1;
2972 uint64_t ck_dlyout0:4;
2973 uint64_t loopback:1;
2974 uint64_t loopback_pos:1;
2975 uint64_t ts_stagger:1;
2977 struct cvmx_lmcx_phy_ctl_cn70xx cn70xxp1;
2978 struct cvmx_lmcx_phy_ctl_cn73xx {
2979 uint64_t reserved_58_63:6;
2980 uint64_t data_rate_loopback:1;
2981 uint64_t dq_shallow_loopback:1;
2982 uint64_t dm_disable:1;
2985 uint64_t phy_reset:1;
2986 uint64_t dsk_dbg_rd_complete:1;
2987 uint64_t dsk_dbg_rd_data:10;
2988 uint64_t dsk_dbg_rd_start:1;
2989 uint64_t dsk_dbg_clk_scaler:2;
2990 uint64_t dsk_dbg_offset:2;
2991 uint64_t dsk_dbg_num_bits_sel:1;
2992 uint64_t dsk_dbg_byte_sel:4;
2993 uint64_t dsk_dbg_bit_sel:4;
2994 uint64_t dbi_mode_ena:1;
2995 uint64_t ddr_error_n_ena:1;
2996 uint64_t ref_pin_on:1;
2998 uint64_t int_pad_loopback_ena:1;
2999 uint64_t int_phy_loopback_ena:1;
3000 uint64_t phy_dsk_reset:1;
3001 uint64_t phy_dsk_byp:1;
3002 uint64_t phy_pwr_save_disable:1;
3004 uint64_t rx_always_on:1;
3006 uint64_t ck_tune1:1;
3007 uint64_t ck_dlyout1:4;
3008 uint64_t ck_tune0:1;
3009 uint64_t ck_dlyout0:4;
3010 uint64_t loopback:1;
3011 uint64_t loopback_pos:1;
3012 uint64_t ts_stagger:1;
3014 struct cvmx_lmcx_phy_ctl_s cn78xx;
3015 struct cvmx_lmcx_phy_ctl_s cn78xxp1;
3016 struct cvmx_lmcx_phy_ctl_cn61xx cnf71xx;
3017 struct cvmx_lmcx_phy_ctl_s cnf75xx;
3021 * cvmx_lmc#_phy_ctl2
3023 union cvmx_lmcx_phy_ctl2 {
3025 struct cvmx_lmcx_phy_ctl2_s {
3026 uint64_t reserved_27_63:37;
3027 uint64_t dqs8_dsk_adj:3;
3028 uint64_t dqs7_dsk_adj:3;
3029 uint64_t dqs6_dsk_adj:3;
3030 uint64_t dqs5_dsk_adj:3;
3031 uint64_t dqs4_dsk_adj:3;
3032 uint64_t dqs3_dsk_adj:3;
3033 uint64_t dqs2_dsk_adj:3;
3034 uint64_t dqs1_dsk_adj:3;
3035 uint64_t dqs0_dsk_adj:3;
3037 struct cvmx_lmcx_phy_ctl2_s cn78xx;
3038 struct cvmx_lmcx_phy_ctl2_s cnf75xx;
3042 * cvmx_lmc#_pll_bwctl
3044 * LMC_PLL_BWCTL = DDR PLL Bandwidth Control Register
3047 union cvmx_lmcx_pll_bwctl {
3049 struct cvmx_lmcx_pll_bwctl_s {
3050 uint64_t reserved_5_63:59;
3054 struct cvmx_lmcx_pll_bwctl_s cn30xx;
3055 struct cvmx_lmcx_pll_bwctl_s cn31xx;
3056 struct cvmx_lmcx_pll_bwctl_s cn38xx;
3057 struct cvmx_lmcx_pll_bwctl_s cn38xxp2;
3063 * LMC_PLL_CTL = LMC pll control
3067 * This CSR is only relevant for LMC0. LMC1_PLL_CTL is not used.
3069 * Exactly one of EN2, EN4, EN6, EN8, EN12, EN16 must be set.
3071 * The resultant DDR_CK frequency is the DDR2_REF_CLK
3072 * frequency multiplied by:
3074 * (CLKF + 1) / ((CLKR + 1) * EN(2,4,6,8,12,16))
3076 * The PLL frequency, which is:
3078 * (DDR2_REF_CLK freq) * ((CLKF + 1) / (CLKR + 1))
3080 * must reside between 1.2 and 2.5 GHz. A faster PLL frequency is
3081 * desirable if there is a choice.
3083 union cvmx_lmcx_pll_ctl {
3085 struct cvmx_lmcx_pll_ctl_s {
3086 uint64_t reserved_30_63:34;
3088 uint64_t fasten_n:1;
3089 uint64_t div_reset:1;
3093 uint64_t reserved_6_7:2;
3101 struct cvmx_lmcx_pll_ctl_cn50xx {
3102 uint64_t reserved_29_63:35;
3103 uint64_t fasten_n:1;
3104 uint64_t div_reset:1;
3108 uint64_t reserved_6_7:2;
3116 struct cvmx_lmcx_pll_ctl_s cn52xx;
3117 struct cvmx_lmcx_pll_ctl_s cn52xxp1;
3118 struct cvmx_lmcx_pll_ctl_cn50xx cn56xx;
3119 struct cvmx_lmcx_pll_ctl_cn56xxp1 {
3120 uint64_t reserved_28_63:36;
3121 uint64_t div_reset:1;
3125 uint64_t reserved_6_7:2;
3133 struct cvmx_lmcx_pll_ctl_cn56xxp1 cn58xx;
3134 struct cvmx_lmcx_pll_ctl_cn56xxp1 cn58xxp1;
3138 * cvmx_lmc#_pll_status
3140 * LMC_PLL_STATUS = LMC pll status
3143 union cvmx_lmcx_pll_status {
3145 struct cvmx_lmcx_pll_status_s {
3146 uint64_t reserved_32_63:32;
3147 uint64_t ddr__nctl:5;
3148 uint64_t ddr__pctl:5;
3149 uint64_t reserved_2_21:20;
3153 struct cvmx_lmcx_pll_status_s cn50xx;
3154 struct cvmx_lmcx_pll_status_s cn52xx;
3155 struct cvmx_lmcx_pll_status_s cn52xxp1;
3156 struct cvmx_lmcx_pll_status_s cn56xx;
3157 struct cvmx_lmcx_pll_status_s cn56xxp1;
3158 struct cvmx_lmcx_pll_status_s cn58xx;
3159 struct cvmx_lmcx_pll_status_cn58xxp1 {
3160 uint64_t reserved_2_63:62;
3169 * This register contains programmable timing and control parameters used
3170 * when running the post package repair sequence. The timing fields
3171 * PPR_CTL[TPGMPST], PPR_CTL[TPGM_EXIT] and PPR_CTL[TPGM] need to be set as
3172 * to satisfy the minimum values mentioned in the JEDEC DDR4 spec before
3173 * running the PPR sequence. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] to run
3176 * Running hard PPR may require LMC to issue security key as four consecutive
3177 * MR0 commands, each with a unique address field A[17:0]. Set the security
3178 * key in the general purpose CSRs as follows:
3180 * _ Security key 0 = LMC()_GENERAL_PURPOSE0[DATA]<17:0>.
3181 * _ Security key 1 = LMC()_GENERAL_PURPOSE0[DATA]<35:18>.
3182 * _ Security key 2 = LMC()_GENERAL_PURPOSE1[DATA]<17:0>.
3183 * _ Security key 3 = LMC()_GENERAL_PURPOSE1[DATA]<35:18>.
3185 union cvmx_lmcx_ppr_ctl {
3187 struct cvmx_lmcx_ppr_ctl_s {
3188 uint64_t reserved_27_63:37;
3189 uint64_t lrank_sel:3;
3190 uint64_t skip_issue_security:1;
3193 uint64_t tpgm_exit:5;
3196 struct cvmx_lmcx_ppr_ctl_cn73xx {
3197 uint64_t reserved_24_63:40;
3198 uint64_t skip_issue_security:1;
3201 uint64_t tpgm_exit:5;
3204 struct cvmx_lmcx_ppr_ctl_s cn78xx;
3205 struct cvmx_lmcx_ppr_ctl_cn73xx cnf75xx;
3209 * cvmx_lmc#_read_level_ctl
3212 * The HW writes and reads the cache block selected by ROW, COL, BNK and
3213 * the rank as part of a read-leveling sequence for a rank.
3214 * A cache block write is 16 72-bit words. PATTERN selects the write value.
3215 * For the first 8 words, the write value is the bit PATTERN<i> duplicated
3216 * into a 72-bit vector. The write value of the last 8 words is the inverse
3217 * of the write value of the first 8 words. See LMC*_READ_LEVEL_RANK*.
3219 union cvmx_lmcx_read_level_ctl {
3221 struct cvmx_lmcx_read_level_ctl_s {
3222 uint64_t reserved_44_63:20;
3223 uint64_t rankmask:4;
3227 uint64_t reserved_3_3:1;
3230 struct cvmx_lmcx_read_level_ctl_s cn52xx;
3231 struct cvmx_lmcx_read_level_ctl_s cn52xxp1;
3232 struct cvmx_lmcx_read_level_ctl_s cn56xx;
3233 struct cvmx_lmcx_read_level_ctl_s cn56xxp1;
3237 * cvmx_lmc#_read_level_dbg
3240 * A given read of LMC*_READ_LEVEL_DBG returns the read-leveling pass/fail
3241 * results for all possible delay settings (i.e. the BITMASK) for only one
3242 * byte in the last rank that the HW read-leveled.
3243 * LMC*_READ_LEVEL_DBG[BYTE] selects the particular byte.
3244 * To get these pass/fail results for another different rank, you must run
3245 * the hardware read-leveling again. For example, it is possible to get the
3246 * BITMASK results for every byte of every rank if you run read-leveling
3247 * separately for each rank, probing LMC*_READ_LEVEL_DBG between each
3250 union cvmx_lmcx_read_level_dbg {
3252 struct cvmx_lmcx_read_level_dbg_s {
3253 uint64_t reserved_32_63:32;
3254 uint64_t bitmask:16;
3255 uint64_t reserved_4_15:12;
3258 struct cvmx_lmcx_read_level_dbg_s cn52xx;
3259 struct cvmx_lmcx_read_level_dbg_s cn52xxp1;
3260 struct cvmx_lmcx_read_level_dbg_s cn56xx;
3261 struct cvmx_lmcx_read_level_dbg_s cn56xxp1;
3265 * cvmx_lmc#_read_level_rank#
3268 * This is four CSRs per LMC, one per each rank.
3269 * Each CSR is written by HW during a read-leveling sequence for the rank.
3270 * (HW sets STATUS==3 after HW read-leveling completes for the rank.)
3271 * Each CSR may also be written by SW, but not while a read-leveling sequence
3272 * is in progress. (HW sets STATUS==1 after a CSR write.)
3273 * Deskew setting is measured in units of 1/4 DCLK, so the above BYTE*
3274 * values can range over 4 DCLKs.
3275 * SW initiates a HW read-leveling sequence by programming
3276 * LMC*_READ_LEVEL_CTL and writing INIT_START=1 with SEQUENCE=1.
3277 * See LMC*_READ_LEVEL_CTL.
3279 union cvmx_lmcx_read_level_rankx {
3281 struct cvmx_lmcx_read_level_rankx_s {
3282 uint64_t reserved_38_63:26;
3294 struct cvmx_lmcx_read_level_rankx_s cn52xx;
3295 struct cvmx_lmcx_read_level_rankx_s cn52xxp1;
3296 struct cvmx_lmcx_read_level_rankx_s cn56xx;
3297 struct cvmx_lmcx_read_level_rankx_s cn56xxp1;
3301 * cvmx_lmc#_ref_status
3303 * This register contains the status of the refresh pending counter.
3306 union cvmx_lmcx_ref_status {
3308 struct cvmx_lmcx_ref_status_s {
3309 uint64_t reserved_4_63:60;
3310 uint64_t ref_pend_max_clr:1;
3311 uint64_t ref_count:3;
3313 struct cvmx_lmcx_ref_status_s cn73xx;
3314 struct cvmx_lmcx_ref_status_s cn78xx;
3315 struct cvmx_lmcx_ref_status_s cnf75xx;
3319 * cvmx_lmc#_reset_ctl
3321 * Specify the RSL base addresses for the block.
3324 union cvmx_lmcx_reset_ctl {
3326 struct cvmx_lmcx_reset_ctl_s {
3327 uint64_t reserved_4_63:60;
3329 uint64_t ddr3psoft:1;
3330 uint64_t ddr3pwarm:1;
3333 struct cvmx_lmcx_reset_ctl_s cn61xx;
3334 struct cvmx_lmcx_reset_ctl_s cn63xx;
3335 struct cvmx_lmcx_reset_ctl_s cn63xxp1;
3336 struct cvmx_lmcx_reset_ctl_s cn66xx;
3337 struct cvmx_lmcx_reset_ctl_s cn68xx;
3338 struct cvmx_lmcx_reset_ctl_s cn68xxp1;
3339 struct cvmx_lmcx_reset_ctl_s cn70xx;
3340 struct cvmx_lmcx_reset_ctl_s cn70xxp1;
3341 struct cvmx_lmcx_reset_ctl_s cn73xx;
3342 struct cvmx_lmcx_reset_ctl_s cn78xx;
3343 struct cvmx_lmcx_reset_ctl_s cn78xxp1;
3344 struct cvmx_lmcx_reset_ctl_s cnf71xx;
3345 struct cvmx_lmcx_reset_ctl_s cnf75xx;
3349 * cvmx_lmc#_retry_config
3351 * This register configures automatic retry operation.
3354 union cvmx_lmcx_retry_config {
3356 struct cvmx_lmcx_retry_config_s {
3357 uint64_t reserved_56_63:8;
3358 uint64_t max_errors:24;
3359 uint64_t reserved_13_31:19;
3360 uint64_t error_continue:1;
3361 uint64_t reserved_9_11:3;
3362 uint64_t auto_error_continue:1;
3363 uint64_t reserved_5_7:3;
3364 uint64_t pulse_count_auto_clr:1;
3365 uint64_t reserved_1_3:3;
3366 uint64_t retry_enable:1;
3368 struct cvmx_lmcx_retry_config_s cn73xx;
3369 struct cvmx_lmcx_retry_config_s cn78xx;
3370 struct cvmx_lmcx_retry_config_s cnf75xx;
3374 * cvmx_lmc#_retry_status
3376 * This register provides status on automatic retry operation.
3379 union cvmx_lmcx_retry_status {
3381 struct cvmx_lmcx_retry_status_s {
3382 uint64_t clear_error_count:1;
3383 uint64_t clear_error_pulse_count:1;
3384 uint64_t reserved_57_61:5;
3385 uint64_t error_pulse_count_valid:1;
3386 uint64_t error_pulse_count_sat:1;
3387 uint64_t reserved_52_54:3;
3388 uint64_t error_pulse_count:4;
3389 uint64_t reserved_45_47:3;
3390 uint64_t error_sequence:5;
3391 uint64_t reserved_33_39:7;
3392 uint64_t error_type:1;
3393 uint64_t reserved_24_31:8;
3394 uint64_t error_count:24;
3396 struct cvmx_lmcx_retry_status_s cn73xx;
3397 struct cvmx_lmcx_retry_status_s cn78xx;
3398 struct cvmx_lmcx_retry_status_s cnf75xx;
3402 * cvmx_lmc#_rlevel_ctl
3404 union cvmx_lmcx_rlevel_ctl {
3406 struct cvmx_lmcx_rlevel_ctl_s {
3407 uint64_t reserved_33_63:31;
3408 uint64_t tccd_sel:1;
3410 uint64_t reserved_22_23:2;
3411 uint64_t delay_unload_3:1;
3412 uint64_t delay_unload_2:1;
3413 uint64_t delay_unload_1:1;
3414 uint64_t delay_unload_0:1;
3417 uint64_t offset_en:1;
3421 struct cvmx_lmcx_rlevel_ctl_cn61xx {
3422 uint64_t reserved_22_63:42;
3423 uint64_t delay_unload_3:1;
3424 uint64_t delay_unload_2:1;
3425 uint64_t delay_unload_1:1;
3426 uint64_t delay_unload_0:1;
3429 uint64_t offset_en:1;
3433 struct cvmx_lmcx_rlevel_ctl_cn61xx cn63xx;
3434 struct cvmx_lmcx_rlevel_ctl_cn63xxp1 {
3435 uint64_t reserved_9_63:55;
3436 uint64_t offset_en:1;
3440 struct cvmx_lmcx_rlevel_ctl_cn61xx cn66xx;
3441 struct cvmx_lmcx_rlevel_ctl_cn61xx cn68xx;
3442 struct cvmx_lmcx_rlevel_ctl_cn61xx cn68xxp1;
3443 struct cvmx_lmcx_rlevel_ctl_cn70xx {
3444 uint64_t reserved_32_63:32;
3446 uint64_t reserved_22_23:2;
3447 uint64_t delay_unload_3:1;
3448 uint64_t delay_unload_2:1;
3449 uint64_t delay_unload_1:1;
3450 uint64_t delay_unload_0:1;
3453 uint64_t offset_en:1;
3457 struct cvmx_lmcx_rlevel_ctl_cn70xx cn70xxp1;
3458 struct cvmx_lmcx_rlevel_ctl_cn70xx cn73xx;
3459 struct cvmx_lmcx_rlevel_ctl_s cn78xx;
3460 struct cvmx_lmcx_rlevel_ctl_s cn78xxp1;
3461 struct cvmx_lmcx_rlevel_ctl_cn61xx cnf71xx;
3462 struct cvmx_lmcx_rlevel_ctl_s cnf75xx;
3466 * cvmx_lmc#_rlevel_dbg
3468 * A given read of LMC()_RLEVEL_DBG returns the read leveling pass/fail
3469 * results for all possible delay settings (i.e. the BITMASK) for only
3470 * one byte in the last rank that the hardware ran read leveling on.
3471 * LMC()_RLEVEL_CTL[BYTE] selects the particular byte. To get these
3472 * pass/fail results for a different rank, you must run the hardware
3473 * read leveling again. For example, it is possible to get the [BITMASK]
3474 * results for every byte of every rank if you run read leveling separately
3475 * for each rank, probing LMC()_RLEVEL_DBG between each read- leveling.
3477 union cvmx_lmcx_rlevel_dbg {
3479 struct cvmx_lmcx_rlevel_dbg_s {
3480 uint64_t bitmask:64;
3482 struct cvmx_lmcx_rlevel_dbg_s cn61xx;
3483 struct cvmx_lmcx_rlevel_dbg_s cn63xx;
3484 struct cvmx_lmcx_rlevel_dbg_s cn63xxp1;
3485 struct cvmx_lmcx_rlevel_dbg_s cn66xx;
3486 struct cvmx_lmcx_rlevel_dbg_s cn68xx;
3487 struct cvmx_lmcx_rlevel_dbg_s cn68xxp1;
3488 struct cvmx_lmcx_rlevel_dbg_s cn70xx;
3489 struct cvmx_lmcx_rlevel_dbg_s cn70xxp1;
3490 struct cvmx_lmcx_rlevel_dbg_s cn73xx;
3491 struct cvmx_lmcx_rlevel_dbg_s cn78xx;
3492 struct cvmx_lmcx_rlevel_dbg_s cn78xxp1;
3493 struct cvmx_lmcx_rlevel_dbg_s cnf71xx;
3494 struct cvmx_lmcx_rlevel_dbg_s cnf75xx;
3498 * cvmx_lmc#_rlevel_rank#
3500 * Four of these CSRs exist per LMC, one for each rank. Read level setting
3501 * is measured in units of 1/4 CK, so the BYTEn values can range over 16 CK
3502 * cycles. Each CSR is written by hardware during a read leveling sequence
3503 * for the rank. (Hardware sets [STATUS] to 3 after hardware read leveling
3504 * completes for the rank.)
3506 * If hardware is unable to find a match per LMC()_RLEVEL_CTL[OFFSET_EN] and
3507 * LMC()_RLEVEL_CTL[OFFSET], then hardware sets
3508 * LMC()_RLEVEL_RANK()[BYTEn<5:0>] to 0x0.
3510 * Each CSR may also be written by software, but not while a read leveling
3511 * sequence is in progress. (Hardware sets [STATUS] to 1 after a CSR write.)
3512 * Software initiates a hardware read leveling sequence by programming
3513 * LMC()_RLEVEL_CTL and writing [INIT_START] = 1 with [SEQ_SEL]=1.
3514 * See LMC()_RLEVEL_CTL.
3516 * LMC()_RLEVEL_RANKi values for ranks i without attached DRAM should be set
3517 * such that they do not increase the range of possible BYTE values for any
3518 * byte lane. The easiest way to do this is to set LMC()_RLEVEL_RANKi =
3519 * LMC()_RLEVEL_RANKj, where j is some rank with attached DRAM whose
3520 * LMC()_RLEVEL_RANKj is already fully initialized.
3522 union cvmx_lmcx_rlevel_rankx {
3524 struct cvmx_lmcx_rlevel_rankx_s {
3525 uint64_t reserved_56_63:8;
3537 struct cvmx_lmcx_rlevel_rankx_s cn61xx;
3538 struct cvmx_lmcx_rlevel_rankx_s cn63xx;
3539 struct cvmx_lmcx_rlevel_rankx_s cn63xxp1;
3540 struct cvmx_lmcx_rlevel_rankx_s cn66xx;
3541 struct cvmx_lmcx_rlevel_rankx_s cn68xx;
3542 struct cvmx_lmcx_rlevel_rankx_s cn68xxp1;
3543 struct cvmx_lmcx_rlevel_rankx_s cn70xx;
3544 struct cvmx_lmcx_rlevel_rankx_s cn70xxp1;
3545 struct cvmx_lmcx_rlevel_rankx_s cn73xx;
3546 struct cvmx_lmcx_rlevel_rankx_s cn78xx;
3547 struct cvmx_lmcx_rlevel_rankx_s cn78xxp1;
3548 struct cvmx_lmcx_rlevel_rankx_s cnf71xx;
3549 struct cvmx_lmcx_rlevel_rankx_s cnf75xx;
3553 * cvmx_lmc#_rodt_comp_ctl
3555 * LMC_RODT_COMP_CTL = LMC Compensation control
3558 union cvmx_lmcx_rodt_comp_ctl {
3560 struct cvmx_lmcx_rodt_comp_ctl_s {
3561 uint64_t reserved_17_63:47;
3563 uint64_t reserved_12_15:4;
3565 uint64_t reserved_5_7:3;
3568 struct cvmx_lmcx_rodt_comp_ctl_s cn50xx;
3569 struct cvmx_lmcx_rodt_comp_ctl_s cn52xx;
3570 struct cvmx_lmcx_rodt_comp_ctl_s cn52xxp1;
3571 struct cvmx_lmcx_rodt_comp_ctl_s cn56xx;
3572 struct cvmx_lmcx_rodt_comp_ctl_s cn56xxp1;
3573 struct cvmx_lmcx_rodt_comp_ctl_s cn58xx;
3574 struct cvmx_lmcx_rodt_comp_ctl_s cn58xxp1;
3578 * cvmx_lmc#_rodt_ctl
3580 * LMC_RODT_CTL = Obsolete LMC Read OnDieTermination control
3581 * See the description in LMC_WODT_CTL1. On Reads, Octeon only supports
3582 * turning on ODT's in the lower 2 DIMM's with the masks as below.
3585 * When a given RANK in position N is selected, the RODT _HI and _LO masks
3586 * for that position are used.
3587 * Mask[3:0] is used for RODT control of the RANKs in positions 3, 2, 1,
3588 * and 0, respectively.
3589 * In 64b mode, DIMMs are assumed to be ordered in the following order:
3590 * position 3: [unused , DIMM1_RANK1_LO]
3591 * position 2: [unused , DIMM1_RANK0_LO]
3592 * position 1: [unused , DIMM0_RANK1_LO]
3593 * position 0: [unused , DIMM0_RANK0_LO]
3594 * In 128b mode, DIMMs are assumed to be ordered in the following order:
3595 * position 3: [DIMM3_RANK1_HI, DIMM1_RANK1_LO]
3596 * position 2: [DIMM3_RANK0_HI, DIMM1_RANK0_LO]
3597 * position 1: [DIMM2_RANK1_HI, DIMM0_RANK1_LO]
3598 * position 0: [DIMM2_RANK0_HI, DIMM0_RANK0_LO]
3600 union cvmx_lmcx_rodt_ctl {
3602 struct cvmx_lmcx_rodt_ctl_s {
3603 uint64_t reserved_32_63:32;
3604 uint64_t rodt_hi3:4;
3605 uint64_t rodt_hi2:4;
3606 uint64_t rodt_hi1:4;
3607 uint64_t rodt_hi0:4;
3608 uint64_t rodt_lo3:4;
3609 uint64_t rodt_lo2:4;
3610 uint64_t rodt_lo1:4;
3611 uint64_t rodt_lo0:4;
3613 struct cvmx_lmcx_rodt_ctl_s cn30xx;
3614 struct cvmx_lmcx_rodt_ctl_s cn31xx;
3615 struct cvmx_lmcx_rodt_ctl_s cn38xx;
3616 struct cvmx_lmcx_rodt_ctl_s cn38xxp2;
3617 struct cvmx_lmcx_rodt_ctl_s cn50xx;
3618 struct cvmx_lmcx_rodt_ctl_s cn52xx;
3619 struct cvmx_lmcx_rodt_ctl_s cn52xxp1;
3620 struct cvmx_lmcx_rodt_ctl_s cn56xx;
3621 struct cvmx_lmcx_rodt_ctl_s cn56xxp1;
3622 struct cvmx_lmcx_rodt_ctl_s cn58xx;
3623 struct cvmx_lmcx_rodt_ctl_s cn58xxp1;
3627 * cvmx_lmc#_rodt_mask
3629 * System designers may desire to terminate DQ/DQS lines for higher frequency
3630 * DDR operations, especially on a multirank system. DDR3 DQ/DQS I/Os have
3631 * built-in termination resistors that can be turned on or off by the
3632 * controller, after meeting TAOND and TAOF timing requirements.
3634 * Each rank has its own ODT pin that fans out to all the memory parts in
3635 * that DIMM. System designers may prefer different combinations of ODT ONs
3636 * for read operations into different ranks. CNXXXX supports full
3637 * programmability by way of the mask register below. Each rank position has
3638 * its own 4-bit programmable field. When the controller does a read to that
3639 * rank, it sets the 4 ODT pins to the MASK pins below. For example, when
3640 * doing a read from Rank0, a system designer may desire to terminate the
3641 * lines with the resistor on DIMM0/Rank1. The mask [RODT_D0_R0] would then
3644 * CNXXXX drives the appropriate mask values on the ODT pins by default.
3645 * If this feature is not required, write 0x0 in this register. Note that,
3646 * as per the JEDEC DDR3 specifications, the ODT pin for the rank that is
3647 * being read should always be 0x0. When a given RANK is selected, the RODT
3648 * mask for that rank is used. The resulting RODT mask is driven to the
3649 * DIMMs in the following manner:
3651 union cvmx_lmcx_rodt_mask {
3653 struct cvmx_lmcx_rodt_mask_s {
3654 uint64_t rodt_d3_r1:8;
3655 uint64_t rodt_d3_r0:8;
3656 uint64_t rodt_d2_r1:8;
3657 uint64_t rodt_d2_r0:8;
3658 uint64_t rodt_d1_r1:8;
3659 uint64_t rodt_d1_r0:8;
3660 uint64_t rodt_d0_r1:8;
3661 uint64_t rodt_d0_r0:8;
3663 struct cvmx_lmcx_rodt_mask_s cn61xx;
3664 struct cvmx_lmcx_rodt_mask_s cn63xx;
3665 struct cvmx_lmcx_rodt_mask_s cn63xxp1;
3666 struct cvmx_lmcx_rodt_mask_s cn66xx;
3667 struct cvmx_lmcx_rodt_mask_s cn68xx;
3668 struct cvmx_lmcx_rodt_mask_s cn68xxp1;
3669 struct cvmx_lmcx_rodt_mask_cn70xx {
3670 uint64_t reserved_28_63:36;
3671 uint64_t rodt_d1_r1:4;
3672 uint64_t reserved_20_23:4;
3673 uint64_t rodt_d1_r0:4;
3674 uint64_t reserved_12_15:4;
3675 uint64_t rodt_d0_r1:4;
3676 uint64_t reserved_4_7:4;
3677 uint64_t rodt_d0_r0:4;
3679 struct cvmx_lmcx_rodt_mask_cn70xx cn70xxp1;
3680 struct cvmx_lmcx_rodt_mask_cn70xx cn73xx;
3681 struct cvmx_lmcx_rodt_mask_cn70xx cn78xx;
3682 struct cvmx_lmcx_rodt_mask_cn70xx cn78xxp1;
3683 struct cvmx_lmcx_rodt_mask_s cnf71xx;
3684 struct cvmx_lmcx_rodt_mask_cn70xx cnf75xx;
3688 * cvmx_lmc#_scramble_cfg0
3690 * LMC_SCRAMBLE_CFG0 = LMC Scramble Config0
3693 union cvmx_lmcx_scramble_cfg0 {
3695 struct cvmx_lmcx_scramble_cfg0_s {
3698 struct cvmx_lmcx_scramble_cfg0_s cn61xx;
3699 struct cvmx_lmcx_scramble_cfg0_s cn66xx;
3700 struct cvmx_lmcx_scramble_cfg0_s cn70xx;
3701 struct cvmx_lmcx_scramble_cfg0_s cn70xxp1;
3702 struct cvmx_lmcx_scramble_cfg0_s cn73xx;
3703 struct cvmx_lmcx_scramble_cfg0_s cn78xx;
3704 struct cvmx_lmcx_scramble_cfg0_s cn78xxp1;
3705 struct cvmx_lmcx_scramble_cfg0_s cnf71xx;
3706 struct cvmx_lmcx_scramble_cfg0_s cnf75xx;
3710 * cvmx_lmc#_scramble_cfg1
3712 * These registers set the aliasing that uses the lowest, legal chip select(s).
3715 union cvmx_lmcx_scramble_cfg1 {
3717 struct cvmx_lmcx_scramble_cfg1_s {
3720 struct cvmx_lmcx_scramble_cfg1_s cn61xx;
3721 struct cvmx_lmcx_scramble_cfg1_s cn66xx;
3722 struct cvmx_lmcx_scramble_cfg1_s cn70xx;
3723 struct cvmx_lmcx_scramble_cfg1_s cn70xxp1;
3724 struct cvmx_lmcx_scramble_cfg1_s cn73xx;
3725 struct cvmx_lmcx_scramble_cfg1_s cn78xx;
3726 struct cvmx_lmcx_scramble_cfg1_s cn78xxp1;
3727 struct cvmx_lmcx_scramble_cfg1_s cnf71xx;
3728 struct cvmx_lmcx_scramble_cfg1_s cnf75xx;
3732 * cvmx_lmc#_scramble_cfg2
3734 union cvmx_lmcx_scramble_cfg2 {
3736 struct cvmx_lmcx_scramble_cfg2_s {
3739 struct cvmx_lmcx_scramble_cfg2_s cn73xx;
3740 struct cvmx_lmcx_scramble_cfg2_s cn78xx;
3741 struct cvmx_lmcx_scramble_cfg2_s cnf75xx;
3745 * cvmx_lmc#_scrambled_fadr
3747 * LMC()_FADR captures the failing pre-scrambled address location (split into
3748 * DIMM, bunk, bank, etc). If scrambling is off, LMC()_FADR also captures the
3749 * failing physical location in the DRAM parts. LMC()_SCRAMBLED_FADR captures
3750 * the actual failing address location in the physical DRAM parts, i.e.:
3752 * * If scrambling is on, LMC()_SCRAMBLED_FADR contains the failing physical
3754 * DRAM parts (split into DIMM, bunk, bank, etc).
3756 * * If scrambling is off, the pre-scramble and post-scramble addresses are
3757 * the same, and so the
3758 * contents of LMC()_SCRAMBLED_FADR match the contents of LMC()_FADR.
3760 * This register only captures the first transaction with ECC errors. A DED
3761 * error can over-write this register with its failing addresses if the first
3762 * error was a SEC. If you write LMC()_CONFIG -> SEC_ERR/DED_ERR, it clears
3763 * the error bits and captures the next failing address. If [FDIMM] is 1,
3764 * that means the error is in the higher DIMM.
3766 union cvmx_lmcx_scrambled_fadr {
3768 struct cvmx_lmcx_scrambled_fadr_s {
3769 uint64_t reserved_43_63:21;
3771 uint64_t fill_order:2;
3772 uint64_t reserved_14_37:24;
3775 struct cvmx_lmcx_scrambled_fadr_cn61xx {
3776 uint64_t reserved_36_63:28;
3783 struct cvmx_lmcx_scrambled_fadr_cn61xx cn66xx;
3784 struct cvmx_lmcx_scrambled_fadr_cn70xx {
3785 uint64_t reserved_40_63:24;
3786 uint64_t fill_order:2;
3793 struct cvmx_lmcx_scrambled_fadr_cn70xx cn70xxp1;
3794 struct cvmx_lmcx_scrambled_fadr_cn73xx {
3795 uint64_t reserved_43_63:21;
3797 uint64_t fill_order:2;
3804 struct cvmx_lmcx_scrambled_fadr_cn73xx cn78xx;
3805 struct cvmx_lmcx_scrambled_fadr_cn73xx cn78xxp1;
3806 struct cvmx_lmcx_scrambled_fadr_cn61xx cnf71xx;
3807 struct cvmx_lmcx_scrambled_fadr_cn73xx cnf75xx;
3813 * This register is used to initiate the various control sequences in the LMC.
3816 union cvmx_lmcx_seq_ctl {
3818 struct cvmx_lmcx_seq_ctl_s {
3819 uint64_t reserved_6_63:58;
3820 uint64_t seq_complete:1;
3822 uint64_t init_start:1;
3824 struct cvmx_lmcx_seq_ctl_s cn70xx;
3825 struct cvmx_lmcx_seq_ctl_s cn70xxp1;
3826 struct cvmx_lmcx_seq_ctl_s cn73xx;
3827 struct cvmx_lmcx_seq_ctl_s cn78xx;
3828 struct cvmx_lmcx_seq_ctl_s cn78xxp1;
3829 struct cvmx_lmcx_seq_ctl_s cnf75xx;
3833 * cvmx_lmc#_slot_ctl0
3835 * This register is an assortment of control fields needed by the memory
3836 * controller. If software has not previously written to this register
3837 * (since the last DRESET), hardware updates the fields in this register to
3838 * the minimum allowed value when any of LMC()_RLEVEL_RANK(),
3839 * LMC()_WLEVEL_RANK(), LMC()_CONTROL, and LMC()_MODEREG_PARAMS0 registers
3840 * change. Ideally, only read this register after LMC has been initialized and
3841 * LMC()_RLEVEL_RANK(), LMC()_WLEVEL_RANK() have valid data.
3843 * The interpretation of the fields in this register depends on
3844 * LMC(0)_CONFIG[DDR2T]:
3846 * * If LMC()_CONFIG[DDR2T]=1, (FieldValue + 4) is the minimum CK cycles
3847 * between when the DRAM part registers CAS commands of the first and
3848 * second types from different cache blocks.
3850 * If LMC()_CONFIG[DDR2T]=0, (FieldValue + 3) is the minimum CK cycles
3851 * between when the DRAM part registers CAS commands of the first and second
3852 * types from different cache blocks.
3853 * FieldValue = 0 is always illegal in this case.
3854 * The hardware-calculated minimums for these fields are shown in
3855 * LMC(0)_SLOT_CTL0 Hardware-Calculated Minimums.
3857 union cvmx_lmcx_slot_ctl0 {
3859 struct cvmx_lmcx_slot_ctl0_s {
3860 uint64_t reserved_50_63:14;
3861 uint64_t w2r_l_init_ext:1;
3862 uint64_t w2r_init_ext:1;
3863 uint64_t w2w_l_init:6;
3864 uint64_t w2r_l_init:6;
3865 uint64_t r2w_l_init:6;
3866 uint64_t r2r_l_init:6;
3867 uint64_t w2w_init:6;
3868 uint64_t w2r_init:6;
3869 uint64_t r2w_init:6;
3870 uint64_t r2r_init:6;
3872 struct cvmx_lmcx_slot_ctl0_cn61xx {
3873 uint64_t reserved_24_63:40;
3874 uint64_t w2w_init:6;
3875 uint64_t w2r_init:6;
3876 uint64_t r2w_init:6;
3877 uint64_t r2r_init:6;
3879 struct cvmx_lmcx_slot_ctl0_cn61xx cn63xx;
3880 struct cvmx_lmcx_slot_ctl0_cn61xx cn63xxp1;
3881 struct cvmx_lmcx_slot_ctl0_cn61xx cn66xx;
3882 struct cvmx_lmcx_slot_ctl0_cn61xx cn68xx;
3883 struct cvmx_lmcx_slot_ctl0_cn61xx cn68xxp1;
3884 struct cvmx_lmcx_slot_ctl0_cn70xx {
3885 uint64_t reserved_48_63:16;
3886 uint64_t w2w_l_init:6;
3887 uint64_t w2r_l_init:6;
3888 uint64_t r2w_l_init:6;
3889 uint64_t r2r_l_init:6;
3890 uint64_t w2w_init:6;
3891 uint64_t w2r_init:6;
3892 uint64_t r2w_init:6;
3893 uint64_t r2r_init:6;
3895 struct cvmx_lmcx_slot_ctl0_cn70xx cn70xxp1;
3896 struct cvmx_lmcx_slot_ctl0_s cn73xx;
3897 struct cvmx_lmcx_slot_ctl0_s cn78xx;
3898 struct cvmx_lmcx_slot_ctl0_s cn78xxp1;
3899 struct cvmx_lmcx_slot_ctl0_cn61xx cnf71xx;
3900 struct cvmx_lmcx_slot_ctl0_s cnf75xx;
3904 * cvmx_lmc#_slot_ctl1
3906 * This register is an assortment of control fields needed by the memory
3907 * controller. If software has not previously written to this register
3908 * (since the last DRESET), hardware updates the fields in this register to
3909 * the minimum allowed value when any of LMC()_RLEVEL_RANK(),
3910 * LMC()_WLEVEL_RANK(), LMC()_CONTROL and LMC()_MODEREG_PARAMS0 change.
3911 * Ideally, only read this register after LMC has been initialized and
3912 * LMC()_RLEVEL_RANK(), LMC()_WLEVEL_RANK() have valid data.
3914 * The interpretation of the fields in this CSR depends on
3915 * LMC(0)_CONFIG[DDR2T]:
3917 * * If LMC()_CONFIG[DDR2T]=1, (FieldValue + 4) is the minimum CK cycles
3918 * between when the DRAM part registers CAS commands of the first and
3919 * second types from different cache blocks.
3921 * * If LMC()_CONFIG[DDR2T]=0, (FieldValue + 3) is the minimum CK cycles
3922 * between when the DRAM part registers CAS commands of the first and
3923 * second types from different cache blocks.
3924 * FieldValue = 0 is always illegal in this case.
3926 * The hardware-calculated minimums for these fields are shown in
3927 * LMC(0)_SLOT_CTL1 Hardware-Calculated Minimums.
3929 union cvmx_lmcx_slot_ctl1 {
3931 struct cvmx_lmcx_slot_ctl1_s {
3932 uint64_t reserved_24_63:40;
3933 uint64_t w2w_xrank_init:6;
3934 uint64_t w2r_xrank_init:6;
3935 uint64_t r2w_xrank_init:6;
3936 uint64_t r2r_xrank_init:6;
3938 struct cvmx_lmcx_slot_ctl1_s cn61xx;
3939 struct cvmx_lmcx_slot_ctl1_s cn63xx;
3940 struct cvmx_lmcx_slot_ctl1_s cn63xxp1;
3941 struct cvmx_lmcx_slot_ctl1_s cn66xx;
3942 struct cvmx_lmcx_slot_ctl1_s cn68xx;
3943 struct cvmx_lmcx_slot_ctl1_s cn68xxp1;
3944 struct cvmx_lmcx_slot_ctl1_s cn70xx;
3945 struct cvmx_lmcx_slot_ctl1_s cn70xxp1;
3946 struct cvmx_lmcx_slot_ctl1_s cn73xx;
3947 struct cvmx_lmcx_slot_ctl1_s cn78xx;
3948 struct cvmx_lmcx_slot_ctl1_s cn78xxp1;
3949 struct cvmx_lmcx_slot_ctl1_s cnf71xx;
3950 struct cvmx_lmcx_slot_ctl1_s cnf75xx;
3954 * cvmx_lmc#_slot_ctl2
3956 * This register is an assortment of control fields needed by the memory
3957 * controller. If software has not previously written to this register
3958 * (since the last DRESET), hardware updates the fields in this register
3959 * to the minimum allowed value when any of LMC()_RLEVEL_RANK(),
3960 * LMC()_WLEVEL_RANK(), LMC()_CONTROL and LMC()_MODEREG_PARAMS0 change.
3961 * Ideally, only read this register after LMC has been initialized and
3962 * LMC()_RLEVEL_RANK(), LMC()_WLEVEL_RANK() have valid data.
3964 * The interpretation of the fields in this CSR depends on LMC(0)_CONFIG[DDR2T]:
3966 * * If LMC()_CONFIG[DDR2T] = 1, (FieldValue + 4) is the minimum CK cycles
3967 * between when the DRAM part registers CAS commands of the first and
3968 * second types from different cache blocks.
3970 * * If LMC()_CONFIG[DDR2T] = 0, (FieldValue + 3) is the minimum CK cycles
3971 * between when the DRAM part registers CAS commands of the first and second
3972 * types from different cache blocks.
3973 * FieldValue = 0 is always illegal in this case.
3975 * The hardware-calculated minimums for these fields are shown in LMC Registers.
3977 union cvmx_lmcx_slot_ctl2 {
3979 struct cvmx_lmcx_slot_ctl2_s {
3980 uint64_t reserved_24_63:40;
3981 uint64_t w2w_xdimm_init:6;
3982 uint64_t w2r_xdimm_init:6;
3983 uint64_t r2w_xdimm_init:6;
3984 uint64_t r2r_xdimm_init:6;
3986 struct cvmx_lmcx_slot_ctl2_s cn61xx;
3987 struct cvmx_lmcx_slot_ctl2_s cn63xx;
3988 struct cvmx_lmcx_slot_ctl2_s cn63xxp1;
3989 struct cvmx_lmcx_slot_ctl2_s cn66xx;
3990 struct cvmx_lmcx_slot_ctl2_s cn68xx;
3991 struct cvmx_lmcx_slot_ctl2_s cn68xxp1;
3992 struct cvmx_lmcx_slot_ctl2_s cn70xx;
3993 struct cvmx_lmcx_slot_ctl2_s cn70xxp1;
3994 struct cvmx_lmcx_slot_ctl2_s cn73xx;
3995 struct cvmx_lmcx_slot_ctl2_s cn78xx;
3996 struct cvmx_lmcx_slot_ctl2_s cn78xxp1;
3997 struct cvmx_lmcx_slot_ctl2_s cnf71xx;
3998 struct cvmx_lmcx_slot_ctl2_s cnf75xx;
4002 * cvmx_lmc#_slot_ctl3
4004 * This register is an assortment of control fields needed by the memory
4005 * controller. If software has not previously written to this register
4006 * (since the last DRESET), hardware updates the fields in this register
4007 * to the minimum allowed value when any of LMC()_RLEVEL_RANK(),
4008 * LMC()_WLEVEL_RANK(), LMC()_CONTROL and LMC()_MODEREG_PARAMS0 change.
4009 * Ideally, only read this register after LMC has been initialized and
4010 * LMC()_RLEVEL_RANK(), LMC()_WLEVEL_RANK() have valid data.
4012 * The interpretation of the fields in this CSR depends on LMC(0)_CONFIG[DDR2T]:
4014 * * If LMC()_CONFIG[DDR2T] = 1, (FieldValue + 4) is the minimum CK cycles
4015 * between when the DRAM part registers CAS commands of the first and
4016 * second types from different cache blocks.
4018 * * If LMC()_CONFIG[DDR2T] = 0, (FieldValue + 3) is the minimum CK cycles
4019 * between when the DRAM part registers CAS commands of the first and second
4020 * types from different cache blocks.
4021 * FieldValue = 0 is always illegal in this case.
4023 * The hardware-calculated minimums for these fields are shown in LMC Registers.
4025 union cvmx_lmcx_slot_ctl3 {
4027 struct cvmx_lmcx_slot_ctl3_s {
4028 uint64_t reserved_50_63:14;
4029 uint64_t w2r_l_xrank_init_ext:1;
4030 uint64_t w2r_xrank_init_ext:1;
4031 uint64_t w2w_l_xrank_init:6;
4032 uint64_t w2r_l_xrank_init:6;
4033 uint64_t r2w_l_xrank_init:6;
4034 uint64_t r2r_l_xrank_init:6;
4035 uint64_t w2w_xrank_init:6;
4036 uint64_t w2r_xrank_init:6;
4037 uint64_t r2w_xrank_init:6;
4038 uint64_t r2r_xrank_init:6;
4040 struct cvmx_lmcx_slot_ctl3_s cn73xx;
4041 struct cvmx_lmcx_slot_ctl3_s cn78xx;
4042 struct cvmx_lmcx_slot_ctl3_s cnf75xx;
4046 * cvmx_lmc#_timing_params0
4048 union cvmx_lmcx_timing_params0 {
4050 struct cvmx_lmcx_timing_params0_s {
4051 uint64_t reserved_54_63:10;
4053 uint64_t reserved_26_47:22;
4055 uint64_t reserved_8_21:14;
4058 struct cvmx_lmcx_timing_params0_cn61xx {
4059 uint64_t reserved_47_63:17;
4070 uint64_t reserved_0_9:10;
4072 struct cvmx_lmcx_timing_params0_cn61xx cn63xx;
4073 struct cvmx_lmcx_timing_params0_cn63xxp1 {
4074 uint64_t reserved_46_63:18;
4086 struct cvmx_lmcx_timing_params0_cn61xx cn66xx;
4087 struct cvmx_lmcx_timing_params0_cn61xx cn68xx;
4088 struct cvmx_lmcx_timing_params0_cn61xx cn68xxp1;
4089 struct cvmx_lmcx_timing_params0_cn70xx {
4090 uint64_t reserved_48_63:16;
4100 uint64_t reserved_0_7:8;
4102 struct cvmx_lmcx_timing_params0_cn70xx cn70xxp1;
4103 struct cvmx_lmcx_timing_params0_cn73xx {
4104 uint64_t reserved_54_63:10;
4115 uint64_t reserved_0_7:8;
4117 struct cvmx_lmcx_timing_params0_cn73xx cn78xx;
4118 struct cvmx_lmcx_timing_params0_cn73xx cn78xxp1;
4119 struct cvmx_lmcx_timing_params0_cn61xx cnf71xx;
4120 struct cvmx_lmcx_timing_params0_cn73xx cnf75xx;
4124 * cvmx_lmc#_timing_params1
4126 union cvmx_lmcx_timing_params1 {
4128 struct cvmx_lmcx_timing_params1_s {
4129 uint64_t reserved_59_63:5;
4131 uint64_t trcd_ext:1;
4132 uint64_t tpdm_full_cycle_ena:1;
4133 uint64_t trfc_dlr:7;
4134 uint64_t reserved_4_48:45;
4137 struct cvmx_lmcx_timing_params1_cn61xx {
4138 uint64_t reserved_47_63:17;
4139 uint64_t tras_ext:1;
4142 uint64_t twldqsen:4;
4152 struct cvmx_lmcx_timing_params1_cn61xx cn63xx;
4153 struct cvmx_lmcx_timing_params1_cn63xxp1 {
4154 uint64_t reserved_46_63:18;
4157 uint64_t twldqsen:4;
4167 struct cvmx_lmcx_timing_params1_cn61xx cn66xx;
4168 struct cvmx_lmcx_timing_params1_cn61xx cn68xx;
4169 struct cvmx_lmcx_timing_params1_cn61xx cn68xxp1;
4170 struct cvmx_lmcx_timing_params1_cn70xx {
4171 uint64_t reserved_49_63:15;
4174 uint64_t twldqsen:4;
4184 struct cvmx_lmcx_timing_params1_cn70xx cn70xxp1;
4185 struct cvmx_lmcx_timing_params1_cn73xx {
4186 uint64_t reserved_59_63:5;
4188 uint64_t trcd_ext:1;
4189 uint64_t tpdm_full_cycle_ena:1;
4190 uint64_t trfc_dlr:7;
4193 uint64_t twldqsen:4;
4203 struct cvmx_lmcx_timing_params1_cn73xx cn78xx;
4204 struct cvmx_lmcx_timing_params1_cn73xx cn78xxp1;
4205 struct cvmx_lmcx_timing_params1_cn61xx cnf71xx;
4206 struct cvmx_lmcx_timing_params1_cn73xx cnf75xx;
4210 * cvmx_lmc#_timing_params2
4212 * This register sets timing parameters for DDR4.
4215 union cvmx_lmcx_timing_params2 {
4217 struct cvmx_lmcx_timing_params2_s {
4218 uint64_t reserved_16_63:48;
4219 uint64_t trrd_l_ext:1;
4221 uint64_t t_rw_op_max:4;
4225 struct cvmx_lmcx_timing_params2_cn70xx {
4226 uint64_t reserved_15_63:49;
4228 uint64_t t_rw_op_max:4;
4232 struct cvmx_lmcx_timing_params2_cn70xx cn70xxp1;
4233 struct cvmx_lmcx_timing_params2_s cn73xx;
4234 struct cvmx_lmcx_timing_params2_s cn78xx;
4235 struct cvmx_lmcx_timing_params2_s cn78xxp1;
4236 struct cvmx_lmcx_timing_params2_s cnf75xx;
4242 * LMC_TRO_CTL = LMC Temperature Ring Osc Control
4243 * This register is an assortment of various control fields needed to
4244 * control the temperature ring oscillator
4247 * To bring up the temperature ring oscillator, write TRESET to 0, and
4248 * follow by initializing RCLK_CNT to desired value
4250 union cvmx_lmcx_tro_ctl {
4252 struct cvmx_lmcx_tro_ctl_s {
4253 uint64_t reserved_33_63:31;
4254 uint64_t rclk_cnt:32;
4257 struct cvmx_lmcx_tro_ctl_s cn61xx;
4258 struct cvmx_lmcx_tro_ctl_s cn63xx;
4259 struct cvmx_lmcx_tro_ctl_s cn63xxp1;
4260 struct cvmx_lmcx_tro_ctl_s cn66xx;
4261 struct cvmx_lmcx_tro_ctl_s cn68xx;
4262 struct cvmx_lmcx_tro_ctl_s cn68xxp1;
4263 struct cvmx_lmcx_tro_ctl_s cnf71xx;
4267 * cvmx_lmc#_tro_stat
4269 * LMC_TRO_STAT = LMC Temperature Ring Osc Status
4270 * This register is an assortment of various control fields needed to
4271 * control the temperature ring oscillator
4273 union cvmx_lmcx_tro_stat {
4275 struct cvmx_lmcx_tro_stat_s {
4276 uint64_t reserved_32_63:32;
4277 uint64_t ring_cnt:32;
4279 struct cvmx_lmcx_tro_stat_s cn61xx;
4280 struct cvmx_lmcx_tro_stat_s cn63xx;
4281 struct cvmx_lmcx_tro_stat_s cn63xxp1;
4282 struct cvmx_lmcx_tro_stat_s cn66xx;
4283 struct cvmx_lmcx_tro_stat_s cn68xx;
4284 struct cvmx_lmcx_tro_stat_s cn68xxp1;
4285 struct cvmx_lmcx_tro_stat_s cnf71xx;
4289 * cvmx_lmc#_wlevel_ctl
4291 union cvmx_lmcx_wlevel_ctl {
4293 struct cvmx_lmcx_wlevel_ctl_s {
4294 uint64_t reserved_22_63:42;
4299 uint64_t lanemask:9;
4301 struct cvmx_lmcx_wlevel_ctl_s cn61xx;
4302 struct cvmx_lmcx_wlevel_ctl_s cn63xx;
4303 struct cvmx_lmcx_wlevel_ctl_cn63xxp1 {
4304 uint64_t reserved_10_63:54;
4306 uint64_t lanemask:9;
4308 struct cvmx_lmcx_wlevel_ctl_s cn66xx;
4309 struct cvmx_lmcx_wlevel_ctl_s cn68xx;
4310 struct cvmx_lmcx_wlevel_ctl_s cn68xxp1;
4311 struct cvmx_lmcx_wlevel_ctl_s cn70xx;
4312 struct cvmx_lmcx_wlevel_ctl_s cn70xxp1;
4313 struct cvmx_lmcx_wlevel_ctl_s cn73xx;
4314 struct cvmx_lmcx_wlevel_ctl_s cn78xx;
4315 struct cvmx_lmcx_wlevel_ctl_s cn78xxp1;
4316 struct cvmx_lmcx_wlevel_ctl_s cnf71xx;
4317 struct cvmx_lmcx_wlevel_ctl_s cnf75xx;
4321 * cvmx_lmc#_wlevel_dbg
4323 * A given write of LMC()_WLEVEL_DBG returns the write leveling pass/fail
4324 * results for all possible delay settings (i.e. the BITMASK) for only one
4325 * byte in the last rank that the hardware write leveled.
4326 * LMC()_WLEVEL_DBG[BYTE] selects the particular byte. To get these
4327 * pass/fail results for a different rank, you must run the hardware write
4328 * leveling again. For example, it is possible to get the [BITMASK] results
4329 * for every byte of every rank if you run write leveling separately for
4330 * each rank, probing LMC()_WLEVEL_DBG between each write-leveling.
4332 union cvmx_lmcx_wlevel_dbg {
4334 struct cvmx_lmcx_wlevel_dbg_s {
4335 uint64_t reserved_12_63:52;
4339 struct cvmx_lmcx_wlevel_dbg_s cn61xx;
4340 struct cvmx_lmcx_wlevel_dbg_s cn63xx;
4341 struct cvmx_lmcx_wlevel_dbg_s cn63xxp1;
4342 struct cvmx_lmcx_wlevel_dbg_s cn66xx;
4343 struct cvmx_lmcx_wlevel_dbg_s cn68xx;
4344 struct cvmx_lmcx_wlevel_dbg_s cn68xxp1;
4345 struct cvmx_lmcx_wlevel_dbg_s cn70xx;
4346 struct cvmx_lmcx_wlevel_dbg_s cn70xxp1;
4347 struct cvmx_lmcx_wlevel_dbg_s cn73xx;
4348 struct cvmx_lmcx_wlevel_dbg_s cn78xx;
4349 struct cvmx_lmcx_wlevel_dbg_s cn78xxp1;
4350 struct cvmx_lmcx_wlevel_dbg_s cnf71xx;
4351 struct cvmx_lmcx_wlevel_dbg_s cnf75xx;
4355 * cvmx_lmc#_wlevel_rank#
4357 * Four of these CSRs exist per LMC, one for each rank. Write level setting
4358 * is measured in units of 1/8 CK, so the below BYTEn values can range over
4359 * 4 CK cycles. Assuming LMC()_WLEVEL_CTL[SSET]=0, the BYTEn<2:0> values are
4360 * not used during write leveling, and they are overwritten by the hardware
4361 * as part of the write leveling sequence. (Hardware sets [STATUS] to 3 after
4362 * hardware write leveling completes for the rank). Software needs to set
4365 * Each CSR may also be written by software, but not while a write leveling
4366 * sequence is in progress. (Hardware sets [STATUS] to 1 after a CSR write.)
4367 * Software initiates a hardware write-leveling sequence by programming
4368 * LMC()_WLEVEL_CTL and writing RANKMASK and INIT_START=1 with SEQ_SEL=6 in
4371 * LMC will then step through and accumulate write leveling results for 8
4372 * unique delay settings (twice), starting at a delay of LMC()_WLEVEL_RANK()
4373 * [BYTEn<4:3>]* 8 CK increasing by 1/8 CK each setting. Hardware will then
4374 * set LMC()_WLEVEL_RANK()[BYTEn<2:0>] to indicate the first write leveling
4375 * result of 1 that followed a result of 0 during the sequence by searching
4376 * for a '1100' pattern in the generated bitmask, except that LMC will always
4377 * write LMC()_WLEVEL_RANK()[BYTEn<0>]=0. If hardware is unable to find a match
4378 * for a '1100' pattern, then hardware sets LMC()_WLEVEL_RANK() [BYTEn<2:0>]
4379 * to 0x4. See LMC()_WLEVEL_CTL.
4381 * LMC()_WLEVEL_RANKi values for ranks i without attached DRAM should be set
4382 * such that they do not increase the range of possible BYTE values for any
4383 * byte lane. The easiest way to do this is to set LMC()_WLEVEL_RANKi =
4384 * LMC()_WLEVEL_RANKj, where j is some rank with attached DRAM whose
4385 * LMC()_WLEVEL_RANKj is already fully initialized.
4387 union cvmx_lmcx_wlevel_rankx {
4389 struct cvmx_lmcx_wlevel_rankx_s {
4390 uint64_t reserved_47_63:17;
4402 struct cvmx_lmcx_wlevel_rankx_s cn61xx;
4403 struct cvmx_lmcx_wlevel_rankx_s cn63xx;
4404 struct cvmx_lmcx_wlevel_rankx_s cn63xxp1;
4405 struct cvmx_lmcx_wlevel_rankx_s cn66xx;
4406 struct cvmx_lmcx_wlevel_rankx_s cn68xx;
4407 struct cvmx_lmcx_wlevel_rankx_s cn68xxp1;
4408 struct cvmx_lmcx_wlevel_rankx_s cn70xx;
4409 struct cvmx_lmcx_wlevel_rankx_s cn70xxp1;
4410 struct cvmx_lmcx_wlevel_rankx_s cn73xx;
4411 struct cvmx_lmcx_wlevel_rankx_s cn78xx;
4412 struct cvmx_lmcx_wlevel_rankx_s cn78xxp1;
4413 struct cvmx_lmcx_wlevel_rankx_s cnf71xx;
4414 struct cvmx_lmcx_wlevel_rankx_s cnf75xx;
4418 * cvmx_lmc#_wodt_ctl0
4420 * LMC_WODT_CTL0 = LMC Write OnDieTermination control
4421 * See the description in LMC_WODT_CTL1.
4424 * Together, the LMC_WODT_CTL1 and LMC_WODT_CTL0 CSRs control the write
4425 * ODT mask. See LMC_WODT_CTL1.
4428 union cvmx_lmcx_wodt_ctl0 {
4430 struct cvmx_lmcx_wodt_ctl0_s {
4431 uint64_t reserved_0_63:64;
4433 struct cvmx_lmcx_wodt_ctl0_cn30xx {
4434 uint64_t reserved_32_63:32;
4435 uint64_t wodt_d1_r1:8;
4436 uint64_t wodt_d1_r0:8;
4437 uint64_t wodt_d0_r1:8;
4438 uint64_t wodt_d0_r0:8;
4440 struct cvmx_lmcx_wodt_ctl0_cn30xx cn31xx;
4441 struct cvmx_lmcx_wodt_ctl0_cn38xx {
4442 uint64_t reserved_32_63:32;
4443 uint64_t wodt_hi3:4;
4444 uint64_t wodt_hi2:4;
4445 uint64_t wodt_hi1:4;
4446 uint64_t wodt_hi0:4;
4447 uint64_t wodt_lo3:4;
4448 uint64_t wodt_lo2:4;
4449 uint64_t wodt_lo1:4;
4450 uint64_t wodt_lo0:4;
4452 struct cvmx_lmcx_wodt_ctl0_cn38xx cn38xxp2;
4453 struct cvmx_lmcx_wodt_ctl0_cn38xx cn50xx;
4454 struct cvmx_lmcx_wodt_ctl0_cn30xx cn52xx;
4455 struct cvmx_lmcx_wodt_ctl0_cn30xx cn52xxp1;
4456 struct cvmx_lmcx_wodt_ctl0_cn30xx cn56xx;
4457 struct cvmx_lmcx_wodt_ctl0_cn30xx cn56xxp1;
4458 struct cvmx_lmcx_wodt_ctl0_cn38xx cn58xx;
4459 struct cvmx_lmcx_wodt_ctl0_cn38xx cn58xxp1;
4463 * cvmx_lmc#_wodt_ctl1
4465 * LMC_WODT_CTL1 = LMC Write OnDieTermination control
4466 * System designers may desire to terminate DQ/DQS/DM lines for higher
4467 * frequency DDR operations (667MHz and faster), especially on a multi-rank
4468 * system. DDR2 DQ/DM/DQS I/O's have built in Termination resistor that can
4469 * be turned on or off by the controller, after meeting tAOND and tAOF
4470 * timing requirements. Each Rank has its own ODT pin that fans out to all
4471 * the memory parts in that DIMM. System designers may prefer different
4472 * combinations of ODT ON's for read and write into different ranks. Octeon
4473 * supports full programmability by way of the mask register below.
4474 * Each Rank position has its own 8-bit programmable field.
4475 * When the controller does a write to that rank, it sets the 8 ODT pins
4476 * to the MASK pins below. For eg., When doing a write into Rank0, a system
4477 * designer may desire to terminate the lines with the resistor on
4478 * Dimm0/Rank1. The mask WODT_D0_R0 would then be [00000010]. If ODT feature
4479 * is not desired, the DDR parts can be programmed to not look at these pins by
4480 * writing 0 in QS_DIC. Octeon drives the appropriate mask values on the ODT
4482 * If this feature is not required, write 0 in this register.
4485 * Together, the LMC_WODT_CTL1 and LMC_WODT_CTL0 CSRs control the write
4486 * ODT mask. When a given RANK is selected, the WODT mask for that RANK
4487 * is used. The resulting WODT mask is driven to the DIMMs in the following
4489 * BUNK_ENA=1 BUNK_ENA=0
4490 * Mask[7] -> DIMM3, RANK1 DIMM3
4491 * Mask[6] -> DIMM3, RANK0
4492 * Mask[5] -> DIMM2, RANK1 DIMM2
4493 * Mask[4] -> DIMM2, RANK0
4494 * Mask[3] -> DIMM1, RANK1 DIMM1
4495 * Mask[2] -> DIMM1, RANK0
4496 * Mask[1] -> DIMM0, RANK1 DIMM0
4497 * Mask[0] -> DIMM0, RANK0
4499 union cvmx_lmcx_wodt_ctl1 {
4501 struct cvmx_lmcx_wodt_ctl1_s {
4502 uint64_t reserved_32_63:32;
4503 uint64_t wodt_d3_r1:8;
4504 uint64_t wodt_d3_r0:8;
4505 uint64_t wodt_d2_r1:8;
4506 uint64_t wodt_d2_r0:8;
4508 struct cvmx_lmcx_wodt_ctl1_s cn30xx;
4509 struct cvmx_lmcx_wodt_ctl1_s cn31xx;
4510 struct cvmx_lmcx_wodt_ctl1_s cn52xx;
4511 struct cvmx_lmcx_wodt_ctl1_s cn52xxp1;
4512 struct cvmx_lmcx_wodt_ctl1_s cn56xx;
4513 struct cvmx_lmcx_wodt_ctl1_s cn56xxp1;
4517 * cvmx_lmc#_wodt_mask
4519 * System designers may desire to terminate DQ/DQS lines for higher-frequency
4520 * DDR operations, especially on a multirank system. DDR3 DQ/DQS I/Os have
4521 * built-in termination resistors that can be turned on or off by the
4522 * controller, after meeting TAOND and TAOF timing requirements. Each rank
4523 * has its own ODT pin that fans out to all of the memory parts in that DIMM.
4524 * System designers may prefer different combinations of ODT ONs for write
4525 * operations into different ranks. CNXXXX supports full programmability by
4526 * way of the mask register below. Each rank position has its own 8-bit
4527 * programmable field. When the controller does a write to that rank,
4528 * it sets the four ODT pins to the mask pins below. For example, when
4529 * doing a write into Rank0, a system designer may desire to terminate the
4530 * lines with the resistor on DIMM0/Rank1. The mask [WODT_D0_R0] would then
4533 * CNXXXX drives the appropriate mask values on the ODT pins by default.
4534 * If this feature is not required, write 0x0 in this register. When a
4535 * given RANK is selected, the WODT mask for that RANK is used. The
4536 * resulting WODT mask is driven to the DIMMs in the following manner:
4538 union cvmx_lmcx_wodt_mask {
4540 struct cvmx_lmcx_wodt_mask_s {
4541 uint64_t wodt_d3_r1:8;
4542 uint64_t wodt_d3_r0:8;
4543 uint64_t wodt_d2_r1:8;
4544 uint64_t wodt_d2_r0:8;
4545 uint64_t wodt_d1_r1:8;
4546 uint64_t wodt_d1_r0:8;
4547 uint64_t wodt_d0_r1:8;
4548 uint64_t wodt_d0_r0:8;
4550 struct cvmx_lmcx_wodt_mask_s cn61xx;
4551 struct cvmx_lmcx_wodt_mask_s cn63xx;
4552 struct cvmx_lmcx_wodt_mask_s cn63xxp1;
4553 struct cvmx_lmcx_wodt_mask_s cn66xx;
4554 struct cvmx_lmcx_wodt_mask_s cn68xx;
4555 struct cvmx_lmcx_wodt_mask_s cn68xxp1;
4556 struct cvmx_lmcx_wodt_mask_cn70xx {
4557 uint64_t reserved_28_63:36;
4558 uint64_t wodt_d1_r1:4;
4559 uint64_t reserved_20_23:4;
4560 uint64_t wodt_d1_r0:4;
4561 uint64_t reserved_12_15:4;
4562 uint64_t wodt_d0_r1:4;
4563 uint64_t reserved_4_7:4;
4564 uint64_t wodt_d0_r0:4;
4566 struct cvmx_lmcx_wodt_mask_cn70xx cn70xxp1;
4567 struct cvmx_lmcx_wodt_mask_cn70xx cn73xx;
4568 struct cvmx_lmcx_wodt_mask_cn70xx cn78xx;
4569 struct cvmx_lmcx_wodt_mask_cn70xx cn78xxp1;
4570 struct cvmx_lmcx_wodt_mask_s cnf71xx;
4571 struct cvmx_lmcx_wodt_mask_cn70xx cnf75xx;