2 * Copyright (c) 2000 - 2011 Samsung Electronics Co., Ltd. All rights reserved.
3 * Minkyu Kang <mk7.kang@samsung.com>
5 * based on arch/arm/cpu/armv7/s5pc2xx/cache.S
7 * See file CREDITS for list of people who contributed to this
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of
13 * the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
26 #include <asm/arch/cpu.h>
29 .global invalidate_dcache
30 .global l2_cache_enable
31 .global l2_cache_disable
35 * Invalidate the whole D-cache.
37 * Corrupted registers: r0-r5, r7, r9-r11
40 stmfd r13!, {r0 - r5, r7, r9 - r12, r14}
42 mrc p15, 1, r0, c0, c0, 1 @ read clidr
43 ands r3, r0, #0x7000000 @ extract loc from clidr
44 mov r3, r3, lsr #23 @ left align loc bit field
45 beq finished_inval @ if loc is 0, then no need to
47 mov r10, #0 @ start clean at cache level 0
49 add r2, r10, r10, lsr #1 @ work out 3x current cache
51 mov r1, r0, lsr r2 @ extract cache type bits from
53 and r1, r1, #7 @ mask of the bits for current
55 cmp r1, #2 @ see what cache we have at
57 blt skip_inval @ skip if no cache, or just
59 mcr p15, 2, r10, c0, c0, 0 @ select current cache level
61 mov r2, #0 @ operand for mcr SBZ
62 mcr p15, 0, r2, c7, c5, 4 @ flush prefetch buffer to
63 @ sych the new cssr&csidr,
64 @ with armv7 this is 'isb',
65 @ but we compile with armv5
66 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
67 and r2, r1, #7 @ extract the length of the
69 add r2, r2, #4 @ add 4 (line length offset)
71 ands r4, r4, r1, lsr #3 @ find maximum number on the
73 clz r5, r4 @ find bit position of way
76 ands r7, r7, r1, lsr #13 @ extract max number of the
79 mov r9, r4 @ create working copy of max
82 orr r11, r10, r9, lsl r5 @ factor way and cache number
84 orr r11, r11, r7, lsl r2 @ factor index number into r11
85 mcr p15, 0, r11, c7, c6, 2 @ invalidate by set/way
86 subs r9, r9, #1 @ decrement the way
88 subs r7, r7, #1 @ decrement the index
91 add r10, r10, #2 @ increment cache number
95 mov r10, #0 @ swith back to cache level 0
96 mcr p15, 2, r10, c0, c0, 0 @ select current cache level
98 mcr p15, 0, r10, c7, c5, 4 @ flush prefetch buffer,
99 @ with armv7 this is 'isb',
100 @ but we compile with armv5
102 ldmfd r13!, {r0 - r5, r7, r9 - r12, pc}
105 push {r0, r1, r2, lr}
106 mrc p15, 0, r0, c1, c0, 1
108 mcr p15, 0, r0, c1, c0, 1
112 push {r0, r1, r2, lr}
113 mrc p15, 0, r0, c1, c0, 1
115 mcr p15, 0, r0, c1, c0, 1
119 .global v7_flush_dcache_all
120 .global v7_flush_cache_all
123 * v7_flush_dcache_all()
125 * Flush the whole D-cache.
127 * Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode)
129 * - mm - mm_struct describing address space
132 mrc p15, 1, r0, c0, c0, 1 @ read clidr
133 ands r3, r0, #0x7000000 @ extract loc from clidr
134 mov r3, r3, lsr #23 @ left align loc bit field
135 beq finished @ if loc is 0, then no need to clean
136 mov r10, #0 @ start clean at cache level 0
138 add r2, r10, r10, lsr #1 @ work out 3x current cache level
139 mov r1, r0, lsr r2 @ extract cache type bits from clidr
140 and r1, r1, #7 @ mask of the bits for current cache only
141 cmp r1, #2 @ see what cache we have at this level
142 blt skip @ skip if no cache, or just i-cache
143 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
144 mcr p15, 0, r10, c7, c5, 4 @ flush prefetch buffer,
145 @ with armv7 this is 'isb',
146 @ but we compile with armv5
147 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
148 and r2, r1, #7 @ extract the length of the cache lines
149 add r2, r2, #4 @ add 4 (line length offset)
151 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
152 clz r5, r4 @ find bit position of way size increment
154 ands r7, r7, r1, lsr #13 @ extract max number of the index size
156 mov r9, r4 @ create working copy of max way size
158 orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
159 orr r11, r11, r7, lsl r2 @ factor index number into r11
160 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
161 subs r9, r9, #1 @ decrement the way
163 subs r7, r7, #1 @ decrement the index
166 add r10, r10, #2 @ increment cache number
170 mov r10, #0 @ swith back to cache level 0
171 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
172 mcr p15, 0, r10, c7, c5, 4 @ flush prefetch buffer,
173 @ with armv7 this is 'isb',
174 @ but we compile with armv5
178 * v7_flush_cache_all()
180 * Flush the entire cache system.
181 * The data cache flush is now achieved using atomic clean / invalidates
182 * working outwards from L1 cache. This is done using Set/Way based cache
183 * maintainance instructions.
184 * The instruction cache can still be invalidated back to the point of
185 * unification in a single instruction.
189 stmfd sp!, {r0-r7, r9-r11, lr}
190 bl v7_flush_dcache_all
192 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
193 ldmfd sp!, {r0-r7, r9-r11, lr}