2 * Cache-handling routined for MIPS CPUs
4 * Copyright (c) 2003 Wolfgang Denk <wd@denx.de>
6 * SPDX-License-Identifier: GPL-2.0+
9 #include <asm-offsets.h>
12 #include <asm/regdef.h>
13 #include <asm/mipsregs.h>
14 #include <asm/addrspace.h>
15 #include <asm/cacheops.h>
18 #ifndef CONFIG_SYS_MIPS_CACHE_MODE
19 #define CONFIG_SYS_MIPS_CACHE_MODE CONF_CM_CACHABLE_NONCOHERENT
22 #define INDEX_BASE CKSEG0
24 .macro f_fill64 dst, offset, val
25 LONG_S \val, (\offset + 0 * LONGSIZE)(\dst)
26 LONG_S \val, (\offset + 1 * LONGSIZE)(\dst)
27 LONG_S \val, (\offset + 2 * LONGSIZE)(\dst)
28 LONG_S \val, (\offset + 3 * LONGSIZE)(\dst)
29 LONG_S \val, (\offset + 4 * LONGSIZE)(\dst)
30 LONG_S \val, (\offset + 5 * LONGSIZE)(\dst)
31 LONG_S \val, (\offset + 6 * LONGSIZE)(\dst)
32 LONG_S \val, (\offset + 7 * LONGSIZE)(\dst)
34 LONG_S \val, (\offset + 8 * LONGSIZE)(\dst)
35 LONG_S \val, (\offset + 9 * LONGSIZE)(\dst)
36 LONG_S \val, (\offset + 10 * LONGSIZE)(\dst)
37 LONG_S \val, (\offset + 11 * LONGSIZE)(\dst)
38 LONG_S \val, (\offset + 12 * LONGSIZE)(\dst)
39 LONG_S \val, (\offset + 13 * LONGSIZE)(\dst)
40 LONG_S \val, (\offset + 14 * LONGSIZE)(\dst)
41 LONG_S \val, (\offset + 15 * LONGSIZE)(\dst)
45 .macro cache_loop curr, end, line_sz, op
46 10: cache \op, 0(\curr)
47 PTR_ADDU \curr, \curr, \line_sz
51 .macro l1_info sz, line_sz, off
55 mfc0 $1, CP0_CONFIG, 1
57 /* detect line size */
58 srl \line_sz, $1, \off + MIPS_CONF1_DL_SHF - MIPS_CONF1_DA_SHF
59 andi \line_sz, \line_sz, (MIPS_CONF1_DL >> MIPS_CONF1_DL_SHF)
63 sllv \line_sz, \sz, \line_sz
65 /* detect associativity */
66 srl \sz, $1, \off + MIPS_CONF1_DA_SHF - MIPS_CONF1_DA_SHF
67 andi \sz, \sz, (MIPS_CONF1_DA >> MIPS_CONF1_DA_SHF)
71 mul \sz, \sz, \line_sz
73 /* detect log32(sets) */
74 srl $1, $1, \off + MIPS_CONF1_DS_SHF - MIPS_CONF1_DA_SHF
75 andi $1, $1, (MIPS_CONF1_DS >> MIPS_CONF1_DS_SHF)
79 /* sz <<= log32(sets) */
89 * mips_cache_reset - low level initialisation of the primary caches
91 * This routine initialises the primary caches to ensure that they have good
92 * parity. It must be called by the ROM before any cached locations are used
93 * to prevent the possibility of data with bad parity being written to memory.
95 * To initialise the instruction cache it is essential that a source of data
96 * with good parity is available. This routine will initialise an area of
97 * memory starting at location zero to be used as a source of parity.
99 * Note that this function does not follow the standard calling convention &
100 * may clobber typically callee-saved registers.
112 #define R_L2_BYPASSED s7
114 LEAF(mips_cache_reset)
117 #ifdef CONFIG_MIPS_L2_CACHE
119 * For there to be an L2 present, Config2 must be present. If it isn't
120 * then we proceed knowing there's no L2 cache.
124 move R_L2_BYPASSED, zero
126 mfc0 t0, CP0_CONFIG, 1
127 bgez t0, l2_probe_done
130 * From MIPSr6 onwards the L2 cache configuration might not be reported
131 * by Config2. The Config5.L2C bit indicates whether this is the case,
132 * and if it is then we need knowledge of where else to look. For cores
133 * from Imagination Technologies this is a CM GCR.
135 # if __mips_isa_rev >= 6
136 /* Check that Config5 exists */
137 mfc0 t0, CP0_CONFIG, 2
138 bgez t0, l2_probe_cop0
139 mfc0 t0, CP0_CONFIG, 3
140 bgez t0, l2_probe_cop0
141 mfc0 t0, CP0_CONFIG, 4
142 bgez t0, l2_probe_cop0
144 /* Check Config5.L2C is set */
145 mfc0 t0, CP0_CONFIG, 5
146 and R_L2_L2C, t0, MIPS_CONF5_L2C
147 beqz R_L2_L2C, l2_probe_cop0
149 /* Config5.L2C is set */
150 # ifdef CONFIG_MIPS_CM
151 /* The CM will provide L2 configuration */
152 PTR_LI t0, CKSEG1ADDR(CONFIG_MIPS_CM_BASE)
153 lw t1, GCR_L2_CONFIG(t0)
154 bgez t1, l2_probe_done
157 GCR_L2_CONFIG_LINESZ_SHIFT, GCR_L2_CONFIG_LINESZ_BITS
158 beqz R_L2_LINE, l2_probe_done
160 sllv R_L2_LINE, t2, R_L2_LINE
162 ext t2, t1, GCR_L2_CONFIG_ASSOC_SHIFT, GCR_L2_CONFIG_ASSOC_BITS
164 mul R_L2_SIZE, R_L2_LINE, t2
166 ext t2, t1, GCR_L2_CONFIG_SETSZ_SHIFT, GCR_L2_CONFIG_SETSZ_BITS
167 sllv R_L2_SIZE, R_L2_SIZE, t2
169 mul R_L2_SIZE, R_L2_SIZE, t2
171 /* Bypass the L2 cache so that we can init the L1s early */
172 or t1, t1, GCR_L2_CONFIG_BYPASS
173 sw t1, GCR_L2_CONFIG(t0)
177 /* Zero the L2 tag registers */
178 sw zero, GCR_L2_TAG_ADDR(t0)
179 sw zero, GCR_L2_TAG_ADDR_UPPER(t0)
180 sw zero, GCR_L2_TAG_STATE(t0)
181 sw zero, GCR_L2_TAG_STATE_UPPER(t0)
182 sw zero, GCR_L2_DATA(t0)
183 sw zero, GCR_L2_DATA_UPPER(t0)
186 /* We don't know how to retrieve L2 configuration on this system */
192 * For pre-r6 systems, or r6 systems with Config5.L2C==0, probe the L2
193 * cache configuration from the cop0 Config2 register.
196 mfc0 t0, CP0_CONFIG, 2
198 srl R_L2_LINE, t0, MIPS_CONF2_SL_SHF
199 andi R_L2_LINE, R_L2_LINE, MIPS_CONF2_SL >> MIPS_CONF2_SL_SHF
200 beqz R_L2_LINE, l2_probe_done
202 sllv R_L2_LINE, t1, R_L2_LINE
204 srl t1, t0, MIPS_CONF2_SA_SHF
205 andi t1, t1, MIPS_CONF2_SA >> MIPS_CONF2_SA_SHF
207 mul R_L2_SIZE, R_L2_LINE, t1
209 srl t1, t0, MIPS_CONF2_SS_SHF
210 andi t1, t1, MIPS_CONF2_SS >> MIPS_CONF2_SS_SHF
211 sllv R_L2_SIZE, R_L2_SIZE, t1
213 mul R_L2_SIZE, R_L2_SIZE, t1
215 /* Attempt to bypass the L2 so that we can init the L1s early */
216 or t0, t0, MIPS_CONF2_L2B
217 mtc0 t0, CP0_CONFIG, 2
219 mfc0 t0, CP0_CONFIG, 2
220 and R_L2_BYPASSED, t0, MIPS_CONF2_L2B
222 /* Zero the L2 tag registers */
223 mtc0 zero, CP0_TAGLO, 4
228 #ifndef CONFIG_SYS_CACHE_SIZE_AUTO
229 li R_IC_SIZE, CONFIG_SYS_ICACHE_SIZE
230 li R_IC_LINE, CONFIG_SYS_ICACHE_LINE_SIZE
232 l1_info R_IC_SIZE, R_IC_LINE, MIPS_CONF1_IA_SHF
235 #ifndef CONFIG_SYS_CACHE_SIZE_AUTO
236 li R_DC_SIZE, CONFIG_SYS_DCACHE_SIZE
237 li R_DC_LINE, CONFIG_SYS_DCACHE_LINE_SIZE
239 l1_info R_DC_SIZE, R_DC_LINE, MIPS_CONF1_DA_SHF
242 #ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD
244 /* Determine the largest L1 cache size */
245 #ifndef CONFIG_SYS_CACHE_SIZE_AUTO
246 #if CONFIG_SYS_ICACHE_SIZE > CONFIG_SYS_DCACHE_SIZE
247 li v0, CONFIG_SYS_ICACHE_SIZE
249 li v0, CONFIG_SYS_DCACHE_SIZE
253 sltu t1, R_IC_SIZE, R_DC_SIZE
254 movn v0, R_DC_SIZE, t1
257 * Now clear that much memory starting from zero.
262 f_fill64 a0, -64, zero
265 #endif /* CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD */
267 #ifdef CONFIG_MIPS_L2_CACHE
269 * If the L2 is bypassed, init the L1 first so that we can execute the
270 * rest of the cache initialisation using the L1 instruction cache.
272 bnez R_L2_BYPASSED, l1_init
275 PTR_LI t0, INDEX_BASE
276 PTR_ADDU t1, t0, R_L2_SIZE
277 1: cache INDEX_STORE_TAG_SD, 0(t0)
278 PTR_ADDU t0, t0, R_L2_LINE
282 * If the L2 was bypassed then we already initialised the L1s before
283 * the L2, so we are now done.
285 bnez R_L2_BYPASSED, l2_unbypass
289 * The TagLo registers used depend upon the CPU implementation, but the
290 * architecture requires that it is safe for software to write to both
291 * TagLo selects 0 & 2 covering supported cases.
295 mtc0 zero, CP0_TAGLO, 2
298 * The caches are probably in an indeterminate state, so we force good
299 * parity into them by doing an invalidate for each line. If
300 * CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD is set then we'll proceed to
301 * perform a load/fill & a further invalidate for each line, assuming
302 * that the bottom of RAM (having just been cleared) will generate good
303 * parity for the cache.
307 * Initialize the I-cache first,
310 PTR_LI t0, INDEX_BASE
311 PTR_ADDU t1, t0, R_IC_SIZE
312 /* clear tag to invalidate */
313 cache_loop t0, t1, R_IC_LINE, INDEX_STORE_TAG_I
314 #ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD
315 /* fill once, so data field parity is correct */
316 PTR_LI t0, INDEX_BASE
317 cache_loop t0, t1, R_IC_LINE, FILL
318 /* invalidate again - prudent but not strictly neccessary */
319 PTR_LI t0, INDEX_BASE
320 cache_loop t0, t1, R_IC_LINE, INDEX_STORE_TAG_I
323 /* Enable use of the I-cache by setting Config.K0 */
326 li t1, CONFIG_SYS_MIPS_CACHE_MODE
327 #if __mips_isa_rev >= 2
330 ori t0, t0, CONF_CM_CMASK
331 xori t0, t0, CONF_CM_CMASK
337 * then initialize D-cache.
339 1: blez R_DC_SIZE, 3f
340 PTR_LI t0, INDEX_BASE
341 PTR_ADDU t1, t0, R_DC_SIZE
343 cache_loop t0, t1, R_DC_LINE, INDEX_STORE_TAG_D
344 #ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD
345 /* load from each line (in cached space) */
346 PTR_LI t0, INDEX_BASE
347 2: LONG_L zero, 0(t0)
348 PTR_ADDU t0, R_DC_LINE
351 PTR_LI t0, INDEX_BASE
352 cache_loop t0, t1, R_DC_LINE, INDEX_STORE_TAG_D
356 #ifdef CONFIG_MIPS_L2_CACHE
357 /* If the L2 isn't bypassed then we're done */
358 beqz R_L2_BYPASSED, return
360 /* The L2 is bypassed - go initialise it */
364 # if __mips_isa_rev >= 6
367 li t0, CKSEG1ADDR(CONFIG_MIPS_CM_BASE)
368 lw t1, GCR_L2_CONFIG(t0)
369 xor t1, t1, GCR_L2_CONFIG_BYPASS
370 sw t1, GCR_L2_CONFIG(t0)
375 1: mfc0 t0, CP0_CONFIG, 2
376 xor t0, t0, MIPS_CONF2_L2B
377 mtc0 t0, CP0_CONFIG, 2
381 # ifdef CONFIG_MIPS_CM
382 /* Config3 must exist for a CM to be present */
383 mfc0 t0, CP0_CONFIG, 1
385 mfc0 t0, CP0_CONFIG, 2
388 /* Check Config3.CMGCR to determine CM presence */
389 mfc0 t0, CP0_CONFIG, 3
390 and t0, t0, MIPS_CONF3_CMGCR
393 /* Change Config.K0 to a coherent CCA */
395 li t1, CONF_CM_CACHABLE_COW
396 #if __mips_isa_rev >= 2
399 ori t0, t0, CONF_CM_CMASK
400 xori t0, t0, CONF_CM_CMASK
406 * Join the coherent domain such that the caches of this core are kept
407 * coherent with those of other cores.
409 PTR_LI t0, CKSEG1ADDR(CONFIG_MIPS_CM_BASE)
412 li t3, GCR_Cx_COHERENCE_EN
414 li t3, GCR_Cx_COHERENCE_DOM_EN
415 1: sw t3, GCR_Cx_COHERENCE(t0)
423 END(mips_cache_reset)
426 * dcache_status - get cache status
428 * RETURNS: 0 - cache disabled; 1 - cache enabled
433 li t1, CONF_CM_UNCACHED
434 andi t0, t0, CONF_CM_CMASK
442 * dcache_disable - disable cache
451 ori t0, t0, CONF_CM_UNCACHED
457 * dcache_enable - enable cache
464 ori t0, CONF_CM_CMASK
465 xori t0, CONF_CM_CMASK
466 ori t0, CONFIG_SYS_MIPS_CACHE_MODE