1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * linux/arch/arm/mm/cache-v7m.S
5 * Based on linux/arch/arm/mm/cache-v7.S
7 * Copyright (C) 2001 Deep Blue Solutions Ltd.
8 * Copyright (C) 2005 ARM Ltd.
10 * This is the "shell" of the ARMv7M processor support.
12 #include <linux/linkage.h>
13 #include <linux/init.h>
14 #include <asm/assembler.h>
15 #include <asm/errno.h>
16 #include <asm/unwind.h>
19 #include "proc-macros.S"
23 /* Generic V7M read/write macros for memory mapped cache operations */
24 .macro v7m_cache_read, rt, reg
25 movw \rt, #:lower16:BASEADDR_V7M_SCB + \reg
26 movt \rt, #:upper16:BASEADDR_V7M_SCB + \reg
30 .macro v7m_cacheop, rt, tmp, op, c = al
31 movw\c \tmp, #:lower16:BASEADDR_V7M_SCB + \op
32 movt\c \tmp, #:upper16:BASEADDR_V7M_SCB + \op
37 .macro read_ccsidr, rt
38 v7m_cache_read \rt, V7M_SCB_CCSIDR
42 v7m_cache_read \rt, V7M_SCB_CLIDR
45 .macro write_csselr, rt, tmp
46 v7m_cacheop \rt, \tmp, V7M_SCB_CSSELR
50 * dcisw: Invalidate data cache by set/way
53 v7m_cacheop \rt, \tmp, V7M_SCB_DCISW
57 * dccisw: Clean and invalidate data cache by set/way
59 .macro dccisw, rt, tmp
60 v7m_cacheop \rt, \tmp, V7M_SCB_DCCISW
64 * dccimvac: Clean and invalidate data cache line by MVA to PoC.
66 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
67 .macro dccimvac\c, rt, tmp
68 v7m_cacheop \rt, \tmp, V7M_SCB_DCCIMVAC, \c
73 * dcimvac: Invalidate data cache line by MVA to PoC
75 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
76 .macro dcimvac\c, rt, tmp
77 v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC, \c
82 * dccmvau: Clean data cache line by MVA to PoU
84 .macro dccmvau, rt, tmp
85 v7m_cacheop \rt, \tmp, V7M_SCB_DCCMVAU
89 * dccmvac: Clean data cache line by MVA to PoC
91 .macro dccmvac, rt, tmp
92 v7m_cacheop \rt, \tmp, V7M_SCB_DCCMVAC
96 * icimvau: Invalidate instruction caches by MVA to PoU
98 .macro icimvau, rt, tmp
99 v7m_cacheop \rt, \tmp, V7M_SCB_ICIMVAU
103 * Invalidate the icache, inner shareable if SMP, invalidate BTB for UP.
104 * rt data ignored by ICIALLU(IS), so can be used for the address
106 .macro invalidate_icache, rt
107 v7m_cacheop \rt, \rt, V7M_SCB_ICIALLU
112 * Invalidate the BTB, inner shareable if SMP.
113 * rt data ignored by BPIALL, so it can be used for the address
115 .macro invalidate_bp, rt
116 v7m_cacheop \rt, \rt, V7M_SCB_BPIALL
120 ENTRY(v7m_invalidate_l1)
127 and r2, r1, r0, lsr #13
131 and r3, r1, r0, lsr #3 @ NumWays - 1
132 add r2, r2, #1 @ NumSets
135 add r0, r0, #4 @ SetShift
137 clz r1, r3 @ WayShift
138 add r4, r3, #1 @ NumWays
139 1: sub r2, r2, #1 @ NumSets--
140 mov r3, r4 @ Temp = NumWays
141 2: subs r3, r3, #1 @ Temp--
144 orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
152 ENDPROC(v7m_invalidate_l1)
155 * v7m_flush_icache_all()
157 * Flush the whole I-cache.
162 ENTRY(v7m_flush_icache_all)
165 ENDPROC(v7m_flush_icache_all)
168 * v7m_flush_dcache_all()
170 * Flush the whole D-cache.
172 * Corrupted registers: r0-r7, r9-r11
174 ENTRY(v7m_flush_dcache_all)
175 dmb @ ensure ordering with previous memory accesses
177 mov r3, r0, lsr #23 @ move LoC into position
178 ands r3, r3, #7 << 1 @ extract LoC*2 from clidr
179 beq finished @ if loc is 0, then no need to clean
181 mov r10, #0 @ start clean at cache level 0
183 add r2, r10, r10, lsr #1 @ work out 3x current cache level
184 mov r1, r0, lsr r2 @ extract cache type bits from clidr
185 and r1, r1, #7 @ mask of the bits for current cache only
186 cmp r1, #2 @ see what cache we have at this level
187 blt skip @ skip if no cache, or just i-cache
188 #ifdef CONFIG_PREEMPTION
189 save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic
191 write_csselr r10, r1 @ set current cache level
192 isb @ isb to sych the new cssr&csidr
193 read_ccsidr r1 @ read the new csidr
194 #ifdef CONFIG_PREEMPTION
195 restore_irqs_notrace r9
197 and r2, r1, #7 @ extract the length of the cache lines
198 add r2, r2, #4 @ add 4 (line length offset)
200 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
201 clz r5, r4 @ find bit position of way size increment
203 ands r7, r7, r1, lsr #13 @ extract max number of the index size
205 mov r9, r7 @ create working copy of max index
208 orr r11, r10, r6 @ factor way and cache number into r11
210 orr r11, r11, r6 @ factor index number into r11
211 dccisw r11, r6 @ clean/invalidate by set/way
212 subs r9, r9, #1 @ decrement the index
214 subs r4, r4, #1 @ decrement the way
217 add r10, r10, #2 @ increment cache number
221 mov r10, #0 @ switch back to cache level 0
222 write_csselr r10, r3 @ select current cache level in cssr
226 ENDPROC(v7m_flush_dcache_all)
229 * v7m_flush_cache_all()
231 * Flush the entire cache system.
232 * The data cache flush is now achieved using atomic clean / invalidates
233 * working outwards from L1 cache. This is done using Set/Way based cache
234 * maintenance instructions.
235 * The instruction cache can still be invalidated back to the point of
236 * unification in a single instruction.
239 ENTRY(v7m_flush_kern_cache_all)
240 stmfd sp!, {r4-r7, r9-r11, lr}
241 bl v7m_flush_dcache_all
243 ldmfd sp!, {r4-r7, r9-r11, lr}
245 ENDPROC(v7m_flush_kern_cache_all)
248 * v7m_flush_cache_all()
250 * Flush all TLB entries in a particular address space
252 * - mm - mm_struct describing address space
254 ENTRY(v7m_flush_user_cache_all)
258 * v7m_flush_cache_range(start, end, flags)
260 * Flush a range of TLB entries in the specified address space.
262 * - start - start address (may not be aligned)
263 * - end - end address (exclusive, may not be aligned)
264 * - flags - vm_area_struct flags describing address space
266 * It is assumed that:
267 * - we have a VIPT cache.
269 ENTRY(v7m_flush_user_cache_range)
271 ENDPROC(v7m_flush_user_cache_all)
272 ENDPROC(v7m_flush_user_cache_range)
275 * v7m_coherent_kern_range(start,end)
277 * Ensure that the I and D caches are coherent within specified
278 * region. This is typically used when code has been written to
279 * a memory region, and will be executed.
281 * - start - virtual start address of region
282 * - end - virtual end address of region
284 * It is assumed that:
285 * - the Icache does not read data from the write buffer
287 ENTRY(v7m_coherent_kern_range)
291 * v7m_coherent_user_range(start,end)
293 * Ensure that the I and D caches are coherent within specified
294 * region. This is typically used when code has been written to
295 * a memory region, and will be executed.
297 * - start - virtual start address of region
298 * - end - virtual end address of region
300 * It is assumed that:
301 * - the Icache does not read data from the write buffer
303 ENTRY(v7m_coherent_user_range)
305 dcache_line_size r2, r3
310 * We use open coded version of dccmvau otherwise USER() would
311 * point at movw instruction.
318 icache_line_size r2, r3
331 ENDPROC(v7m_coherent_kern_range)
332 ENDPROC(v7m_coherent_user_range)
335 * v7m_flush_kern_dcache_area(void *addr, size_t size)
337 * Ensure that the data held in the page kaddr is written back
338 * to the page in question.
340 * - addr - kernel address
341 * - size - region size
343 ENTRY(v7m_flush_kern_dcache_area)
344 dcache_line_size r2, r3
349 dccimvac r0, r3 @ clean & invalidate D line / unified line
355 ENDPROC(v7m_flush_kern_dcache_area)
358 * v7m_dma_inv_range(start,end)
360 * Invalidate the data cache within the specified region; we will
361 * be performing a DMA operation in this region and we want to
362 * purge old data in the cache.
364 * - start - virtual start address of region
365 * - end - virtual end address of region
368 dcache_line_size r2, r3
374 subne r3, r2, #1 @ restore r3, corrupted by v7m's dccimvac
386 ENDPROC(v7m_dma_inv_range)
389 * v7m_dma_clean_range(start,end)
390 * - start - virtual start address of region
391 * - end - virtual end address of region
394 dcache_line_size r2, r3
398 dccmvac r0, r3 @ clean D / U line
404 ENDPROC(v7m_dma_clean_range)
407 * v7m_dma_flush_range(start,end)
408 * - start - virtual start address of region
409 * - end - virtual end address of region
411 ENTRY(v7m_dma_flush_range)
412 dcache_line_size r2, r3
416 dccimvac r0, r3 @ clean & invalidate D / U line
422 ENDPROC(v7m_dma_flush_range)
425 * dma_map_area(start, size, dir)
426 * - start - kernel virtual start address
427 * - size - size of region
428 * - dir - DMA direction
430 ENTRY(v7m_dma_map_area)
432 teq r2, #DMA_FROM_DEVICE
433 beq v7m_dma_inv_range
434 b v7m_dma_clean_range
435 ENDPROC(v7m_dma_map_area)
438 * dma_unmap_area(start, size, dir)
439 * - start - kernel virtual start address
440 * - size - size of region
441 * - dir - DMA direction
443 ENTRY(v7m_dma_unmap_area)
445 teq r2, #DMA_TO_DEVICE
446 bne v7m_dma_inv_range
448 ENDPROC(v7m_dma_unmap_area)
450 .globl v7m_flush_kern_cache_louis
451 .equ v7m_flush_kern_cache_louis, v7m_flush_kern_cache_all
455 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
456 define_cache_functions v7m