blob: 50a1e7e96e7f0955b897cba0dabd733a20ba780f [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (C) 2016 MediaTek Inc.
3 *
4 * This program is free software: you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/linkage.h>
15
16 .text
17.equ C1_IBIT , 0x00001000
18.equ C1_CBIT , 0x00000004
19.equ PSR_F_BIT, 0x00000040
20.equ PSR_I_BIT, 0x00000080
21
22ENTRY(__enable_icache)
23 MRC p15,0,r0,c1,c0,0
24 ORR r0,r0,#C1_IBIT
25 MCR p15,0,r0,c1,c0,0
26 BX lr
27ENDPROC(__enable_icache)
28ENTRY(__disable_icache)
29 MRC p15,0,r0,c1,c0,0
30 BIC r0,r0,#C1_IBIT
31 MCR p15,0,r0,c1,c0,0
32 BX lr
33ENDPROC(__disable_icache)
34ENTRY(__enable_dcache)
35 MRC p15,0,r0,c1,c0,0
36 ORR r0,r0,#C1_CBIT
37 dsb
38 MCR p15,0,r0,c1,c0,0
39 dsb
40 isb
41 BX lr
42ENDPROC(__enable_dcache)
43ENTRY(__disable_dcache)
44 MRC p15,0,r0,c1,c0,0
45 BIC r0,r0,#C1_CBIT
46 dsb
47 MCR p15,0,r0,c1,c0,0
48 dsb
49 isb
50 /*
51Erratum:794322,An instruction fetch can be allocated into the L2 cache after the cache is disabled Status
52This erratum can be avoided by inserting both of the following after the SCTLR.C bit is cleared to 0,
53and before the caches are cleaned or invalidated:
541) A TLBIMVA operation to any address.
552) A DSB instruction.
56*/
57 MCR p15,0,r0,c8,c7,1
58 dsb
59 isb
60 BX lr
61ENDPROC(__disable_dcache)
62ENTRY(__enable_cache)
63 MRC p15,0,r0,c1,c0,0
64 ORR r0,r0,#C1_IBIT
65 ORR r0,r0,#C1_CBIT
66 MCR p15,0,r0,c1,c0,0
67 BX lr
68ENDPROC(__enable_cache)
69ENTRY(__disable_cache)
70 MRC p15,0,r0,c1,c0,0
71 BIC r0,r0,#C1_IBIT
72 BIC r0,r0,#C1_CBIT
73 MCR p15,0,r0,c1,c0,0
74/*
75Erratum:794322,An instruction fetch can be allocated into the L2 cache after the cache is disabled Status
76This erratum can be avoided by inserting both of the following after the SCTLR.C bit is cleared to 0,
77and before the caches are cleaned or invalidated:
781) A TLBIMVA operation to any address.
792) A DSB instruction.
80*/
81 MCR p15,0,r0,c8,c7,1
82 dsb
83 BX lr
84ENDPROC(__disable_cache)
85
86
87ENTRY(__inner_flush_dcache_all)
88 push {r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r14}
89 dmb @ ensure ordering with previous memory accesses
90 mrc p15, 1, r0, c0, c0, 1 @ read clidr
91 ands r3, r0, #0x7000000 @ extract loc from clidr
92 mov r3, r3, lsr #23 @ left align loc bit field
93 beq all_finished @ if loc is 0, then no need to clean
94 mov r10, #0 @ start clean at cache level 0
95all_loop1:
96 add r2, r10, r10, lsr #1 @ work out 3x current cache level
97 mov r1, r0, lsr r2 @ extract cache type bits from clidr
98 and r1, r1, #7 @ mask of the bits for current cache only
99 cmp r1, #2 @ see what cache we have at this level
100 blt all_skip @ skip if no cache, or just i-cache
101#ifdef CONFIG_ARM_ERRATA_814220
102 dsb
103#endif
104 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
105 isb @ isb to sych the new cssr&csidr
106 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
107 and r2, r1, #7 @ extract the length of the cache lines
108 add r2, r2, #4 @ add 4 (line length offset)
109 ldr r4, =0x3ff
110 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
111 clz r5, r4 @ find bit position of way size increment
112 ldr r7, =0x7fff
113 ands r7, r7, r1, lsr #13 @ extract max number of the index size
114all_loop2:
115 mov r9, r4 @ create working copy of max way size
116all_loop3:
117 ARM ( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
118 THUMB( lsl r6, r9, r5 )
119 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
120 ARM ( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
121 THUMB( lsl r6, r7, r2 )
122 THUMB( orr r11, r11, r6 ) @ factor index number into r11
123#ifdef CONFIG_L1C_OPT
124#replace DCCISW by DCISW+DCCSW
125 cmp r10, #2
126 mrsne r1, cpsr @disable IRQ and save flag to make clean and invalidate atomic
127 orrne r8, r1, #PSR_I_BIT | PSR_F_BIT
128 msrne cpsr_c, r8
129 mcrne p15, 0, r11, c7, c10, 2 @ clean by set/way
130 mcrne p15, 0, r11, c7, c6, 2 @ invalidate by set/way
131 msrne cpsr_c, r1
132 mcreq p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
133#else
134 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
135#endif
136 subs r9, r9, #1 @ decrement the way
137 bge all_loop3
138 subs r7, r7, #1 @ decrement the index
139 bge all_loop2
140all_skip:
141 add r10, r10, #2 @ increment cache number
142 cmp r3, r10
143 bgt all_loop1
144all_finished:
145 mov r10, #0 @ swith back to cache level 0
146 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
147 dsb
148 isb
149 pop {r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r14}
150 bx lr
151ENDPROC(__inner_flush_dcache_all)
152
153ENTRY(__inner_flush_dcache_L1)
154 push {r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r14}
155 dmb @ ensure ordering with previous memory accesses
156 mrc p15, 1, r0, c0, c0, 1 @ read clidr
157 ands r3, r0, #0x7000000 @ extract loc from clidr
158 mov r3, r3, lsr #23 @ left align loc bit field
159 beq L1_finished @ if loc is 0, then no need to clean
160 mov r10, #0 @ start clean at cache level 1
161L1_loop1:
162 add r2, r10, r10, lsr #1 @ work out 3x current cache level
163 mov r1, r0, lsr r2 @ extract cache type bits from clidr
164 and r1, r1, #7 @ mask of the bits for current cache only
165 cmp r1, #2 @ see what cache we have at this level
166 blt L1_skip @ skip if no cache, or just i-cache
167#ifdef CONFIG_ARM_ERRATA_814220
168 dsb
169#endif
170 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
171 isb @ isb to sych the new cssr&csidr
172 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
173 and r2, r1, #7 @ extract the length of the cache lines
174 add r2, r2, #4 @ add 4 (line length offset)
175 ldr r4, =0x3ff
176 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
177 clz r5, r4 @ find bit position of way size increment
178 ldr r7, =0x7fff
179 ands r7, r7, r1, lsr #13 @ extract max number of the index size
180L1_loop2:
181 mov r9, r4 @ create working copy of max way size
182L1_loop3:
183 ARM ( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
184 THUMB( lsl r6, r9, r5 )
185 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
186 ARM ( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
187 THUMB( lsl r6, r7, r2 )
188 THUMB( orr r11, r11, r6 ) @ factor index number into r11
189#ifdef CONFIG_L1C_OPT
190#replace DCCISW by DCISW+DCCSW
191 mrs r1, cpsr @disable IRQ and save flag to make clean and invalidate atomic
192 orr r8, r1, #PSR_I_BIT | PSR_F_BIT
193 msr cpsr_c, r8
194 mcr p15, 0, r11, c7, c10, 2 @ clean by set/way
195 mcr p15, 0, r11, c7, c6, 2 @ invalidate by set/way
196 msr cpsr_c, r1
197#else
198 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
199#endif
200 subs r9, r9, #1 @ decrement the way
201 bge L1_loop3
202 subs r7, r7, #1 @ decrement the index
203 bge L1_loop2
204L1_skip:
205 @add r10, r10, #2 @ increment cache number
206 @cmp r3, r10
207 @bgt L1_loop1
208L1_finished:
209 mov r10, #0 @ swith back to cache level 0
210 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
211 dsb
212 isb
213 pop {r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r14}
214 bx lr
215ENDPROC(__inner_flush_dcache_L1)
216
217ENTRY(__inner_flush_dcache_L2)
218 push {r0,r1,r2,r3,r4,r5,r6,r7,r9,r10,r11,r14}
219 @push {r4,r5,r7,r9,r10,r11}
220 dmb @ ensure ordering with previous memory accesses
221 mrc p15, 1, r0, c0, c0, 1 @ read clidr
222 ands r3, r0, #0x7000000 @ extract loc from clidr
223 mov r3, r3, lsr #23 @ left align loc bit field
224 beq L2_finished @ if loc is 0, then no need to clean
225 mov r10, #2 @ start clean at cache level 2
226L2_loop1:
227 add r2, r10, r10, lsr #1 @ work out 3x current cache level
228 mov r1, r0, lsr r2 @ extract cache type bits from clidr
229 and r1, r1, #7 @ mask of the bits for current cache only
230 cmp r1, #2 @ see what cache we have at this level
231 blt L2_skip @ skip if no cache, or just i-cache
232#ifdef CONFIG_ARM_ERRATA_814220
233 dsb
234#endif
235 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
236 isb @ isb to sych the new cssr&csidr
237 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
238 and r2, r1, #7 @ extract the length of the cache lines
239 add r2, r2, #4 @ add 4 (line length offset)
240 ldr r4, =0x3ff
241 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
242 clz r5, r4 @ find bit position of way size increment
243 ldr r7, =0x7fff
244 ands r7, r7, r1, lsr #13 @ extract max number of the index size
245L2_loop2:
246 mov r9, r4 @ create working copy of max way size
247L2_loop3:
248 ARM ( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
249 THUMB( lsl r6, r9, r5 )
250 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
251 ARM ( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
252 THUMB( lsl r6, r7, r2 )
253 THUMB( orr r11, r11, r6 ) @ factor index number into r11
254 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
255 subs r9, r9, #1 @ decrement the way
256 bge L2_loop3
257 subs r7, r7, #1 @ decrement the index
258 bge L2_loop2
259L2_skip:
260 @add r10, r10, #2 @ increment cache number
261 @cmp r3, r10
262 @bgt L2_loop1
263L2_finished:
264 mov r10, #0 @ swith back to cache level 0
265 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
266 dsb
267 isb
268 @pop {r4,r5,r7,r9,r10,r11}
269 pop {r0,r1,r2,r3,r4,r5,r6,r7,r9,r10,r11,r14}
270 bx lr
271ENDPROC(__inner_flush_dcache_L2)
272
273ENTRY(__inner_clean_dcache_all)
274 push {r0,r1,r2,r3,r4,r5,r6,r7,r9,r10,r11,r14}
275 @push {r4,r5,r7,r9,r10,r11}
276 dmb @ ensure ordering with previous memory accesses
277 mrc p15, 1, r0, c0, c0, 1 @ read clidr
278 ands r3, r0, #0x7000000 @ extract loc from clidr
279 mov r3, r3, lsr #23 @ left align loc bit field
280 beq all_cl_finished @ if loc is 0, then no need to clean
281 mov r10, #0 @ start clean at cache level 0
282all_cl_loop1:
283 add r2, r10, r10, lsr #1 @ work out 3x current cache level
284 mov r1, r0, lsr r2 @ extract cache type bits from clidr
285 and r1, r1, #7 @ mask of the bits for current cache only
286 cmp r1, #2 @ see what cache we have at this level
287 blt all_cl_skip @ skip if no cache, or just i-cache
288#ifdef CONFIG_ARM_ERRATA_814220
289 dsb
290#endif
291 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
292 isb @ isb to sych the new cssr&csidr
293 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
294 and r2, r1, #7 @ extract the length of the cache lines
295 add r2, r2, #4 @ add 4 (line length offset)
296 ldr r4, =0x3ff
297 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
298 clz r5, r4 @ find bit position of way size increment
299 ldr r7, =0x7fff
300 ands r7, r7, r1, lsr #13 @ extract max number of the index size
301all_cl_loop2:
302 mov r9, r4 @ create working copy of max way size
303all_cl_loop3:
304 ARM ( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
305 THUMB( lsl r6, r9, r5 )
306 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
307 ARM ( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
308 THUMB( lsl r6, r7, r2 )
309 THUMB( orr r11, r11, r6 ) @ factor index number into r11
310 mcr p15, 0, r11, c7, c10, 2 @ clean by set/way
311
312 subs r9, r9, #1 @ decrement the way
313 bge all_cl_loop3
314 subs r7, r7, #1 @ decrement the index
315 bge all_cl_loop2
316all_cl_skip:
317 add r10, r10, #2 @ increment cache number
318 cmp r3, r10
319 bgt all_cl_loop1
320all_cl_finished:
321 mov r10, #0 @ swith back to cache level 0
322 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
323 dsb
324 isb
325 @pop {r4,r5,r7,r9,r10,r11}
326 pop {r0,r1,r2,r3,r4,r5,r6,r7,r9,r10,r11,r14}
327 bx lr
328ENDPROC(__inner_clean_dcache_all)
329
330ENTRY(__inner_clean_dcache_L1)
331 push {r0,r1,r2,r3,r4,r5,r6,r7,r9,r10,r11,r14}
332 @push {r4,r5,r7,r9,r10,r11}
333 dmb @ ensure ordering with previous memory accesses
334 mrc p15, 1, r0, c0, c0, 1 @ read clidr
335 ands r3, r0, #0x7000000 @ extract loc from clidr
336 mov r3, r3, lsr #23 @ left align loc bit field
337 beq L1_cl_finished @ if loc is 0, then no need to clean
338 mov r10, #0 @ start clean at cache level 1
339L1_cl_loop1:
340 add r2, r10, r10, lsr #1 @ work out 3x current cache level
341 mov r1, r0, lsr r2 @ extract cache type bits from clidr
342 and r1, r1, #7 @ mask of the bits for current cache only
343 cmp r1, #2 @ see what cache we have at this level
344 blt L1_cl_skip @ skip if no cache, or just i-cache
345#ifdef CONFIG_ARM_ERRATA_814220
346 dsb
347#endif
348 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
349 isb @ isb to sych the new cssr&csidr
350 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
351 and r2, r1, #7 @ extract the length of the cache lines
352 add r2, r2, #4 @ add 4 (line length offset)
353 ldr r4, =0x3ff
354 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
355 clz r5, r4 @ find bit position of way size increment
356 ldr r7, =0x7fff
357 ands r7, r7, r1, lsr #13 @ extract max number of the index size
358L1_cl_loop2:
359 mov r9, r4 @ create working copy of max way size
360L1_cl_loop3:
361 ARM ( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
362 THUMB( lsl r6, r9, r5 )
363 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
364 ARM ( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
365 THUMB( lsl r6, r7, r2 )
366 THUMB( orr r11, r11, r6 ) @ factor index number into r11
367 mcr p15, 0, r11, c7, c10, 2 @ clean by set/way
368
369 subs r9, r9, #1 @ decrement the way
370 bge L1_cl_loop3
371 subs r7, r7, #1 @ decrement the index
372 bge L1_cl_loop2
373L1_cl_skip:
374 @add r10, r10, #2 @ increment cache number
375 @cmp r3, r10
376 @bgt L1_cl_loop1
377L1_cl_finished:
378 mov r10, #0 @ swith back to cache level 0
379 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
380 dsb
381 isb
382 @pop {r4,r5,r7,r9,r10,r11}
383 pop {r0,r1,r2,r3,r4,r5,r6,r7,r9,r10,r11,r14}
384 bx lr
385ENDPROC(__inner_clean_dcache_L1)
386
387ENTRY(__inner_clean_dcache_L2)
388#if 0
389 mov r0, sp
390 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
391 dsb
392 sub r0, r0, #64
393 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
394 dsb
395#endif
396 push {r0,r1,r2,r3,r4,r5,r6,r7,r9,r10,r11,r14}
397 @push {r4,r5,r7,r9,r10,r11}
398#if 0
399 mov r0, sp
400 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
401 dsb
402 sub r0, r0, #64
403 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
404 dsb
405#endif
406 dmb @ ensure ordering with previous memory accesses
407 mrc p15, 1, r0, c0, c0, 1 @ read clidr
408 ands r3, r0, #0x7000000 @ extract loc from clidr
409 mov r3, r3, lsr #23 @ left align loc bit field
410 beq L2_cl_finished @ if loc is 0, then no need to clean
411 mov r10, #2 @ start clean at cache level 2
412L2_cl_loop1:
413 add r2, r10, r10, lsr #1 @ work out 3x current cache level
414 mov r1, r0, lsr r2 @ extract cache type bits from clidr
415 and r1, r1, #7 @ mask of the bits for current cache only
416 cmp r1, #2 @ see what cache we have at this level
417 blt L2_cl_skip @ skip if no cache, or just i-cache
418#ifdef CONFIG_ARM_ERRATA_814220
419 dsb
420#endif
421 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
422 isb @ isb to sych the new cssr&csidr
423 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
424 and r2, r1, #7 @ extract the length of the cache lines
425 add r2, r2, #4 @ add 4 (line length offset)
426 ldr r4, =0x3ff
427 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
428 clz r5, r4 @ find bit position of way size increment
429 ldr r7, =0x7fff
430 ands r7, r7, r1, lsr #13 @ extract max number of the index size
431L2_cl_loop2:
432 mov r9, r4 @ create working copy of max way size
433L2_cl_loop3:
434 ARM ( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
435 THUMB( lsl r6, r9, r5 )
436 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
437 ARM ( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
438 THUMB( lsl r6, r7, r2 )
439 THUMB( orr r11, r11, r6 ) @ factor index number into r11
440 mcr p15, 0, r11, c7, c10, 2 @ clean by set/way
441 subs r9, r9, #1 @ decrement the way
442 bge L2_cl_loop3
443 subs r7, r7, #1 @ decrement the index
444 bge L2_cl_loop2
445L2_cl_skip:
446 @add r10, r10, #2 @ increment cache number
447 @cmp r3, r10
448 @bgt L2_cl_loop1
449L2_cl_finished:
450 mov r10, #0 @ swith back to cache level 0
451 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
452 dsb
453 isb
454 @pop {r4,r5,r7,r9,r10,r11}
455 pop {r0,r1,r2,r3,r4,r5,r6,r7,r9,r10,r11,r14}
456 bx lr
457ENDPROC(__inner_clean_dcache_L2)
458ENTRY(__inner_inv_dcache_all)
459 push {r0,r1,r2,r3,r4,r5,r6,r7,r9,r10,r11,r14}
460 @push {r4,r5,r7,r9,r10,r11}
461 dmb @ ensure ordering with previous memory accesses
462 mrc p15, 1, r0, c0, c0, 1 @ read clidr
463 ands r3, r0, #0x7000000 @ extract loc from clidr
464 mov r3, r3, lsr #23 @ left align loc bit field
465 beq all_inv_finished @ if loc is 0, then no need to clean
466 mov r10, #0 @ start clean at cache level 0
467all_inv_loop1:
468 add r2, r10, r10, lsr #1 @ work out 3x current cache level
469 mov r1, r0, lsr r2 @ extract cache type bits from clidr
470 and r1, r1, #7 @ mask of the bits for current cache only
471 cmp r1, #2 @ see what cache we have at this level
472 blt all_inv_skip @ skip if no cache, or just i-cache
473#ifdef CONFIG_ARM_ERRATA_814220
474 dsb
475#endif
476 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
477 isb @ isb to sych the new cssr&csidr
478 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
479 and r2, r1, #7 @ extract the length of the cache lines
480 add r2, r2, #4 @ add 4 (line length offset)
481 ldr r4, =0x3ff
482 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
483 clz r5, r4 @ find bit position of way size increment
484 ldr r7, =0x7fff
485 ands r7, r7, r1, lsr #13 @ extract max number of the index size
486all_inv_loop2:
487 mov r9, r4 @ create working copy of max way size
488all_inv_loop3:
489 ARM ( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
490 THUMB( lsl r6, r9, r5 )
491 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
492 ARM ( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
493 THUMB( lsl r6, r7, r2 )
494 THUMB( orr r11, r11, r6 ) @ factor index number into r11
495 mcr p15, 0, r11, c7, c6, 2 @ invalidate by set/way
496
497 subs r9, r9, #1 @ decrement the way
498 bge all_inv_loop3
499 subs r7, r7, #1 @ decrement the index
500 bge all_inv_loop2
501all_inv_skip:
502 add r10, r10, #2 @ increment cache number
503 cmp r3, r10
504 bgt all_inv_loop1
505all_inv_finished:
506 mov r10, #0 @ swith back to cache level 0
507 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
508 dsb
509 isb
510 @pop {r4,r5,r7,r9,r10,r11}
511 pop {r0,r1,r2,r3,r4,r5,r6,r7,r9,r10,r11,r14}
512 bx lr
513ENDPROC(__inner_inv_dcache_all)
514
515ENTRY(__inner_inv_dcache_L1)
516 push {r0,r1,r2,r3,r4,r5,r6,r7,r9,r10,r11,r14}
517 @push {r4,r5,r7,r9,r10,r11}
518 dmb @ ensure ordering with previous memory accesses
519 mrc p15, 1, r0, c0, c0, 1 @ read clidr
520 ands r3, r0, #0x7000000 @ extract loc from clidr
521 mov r3, r3, lsr #23 @ left align loc bit field
522 beq L1_inv_finished @ if loc is 0, then no need to clean
523 mov r10, #0 @ start clean at cache level 1
524L1_inv_loop1:
525 add r2, r10, r10, lsr #1 @ work out 3x current cache level
526 mov r1, r0, lsr r2 @ extract cache type bits from clidr
527 and r1, r1, #7 @ mask of the bits for current cache only
528 cmp r1, #2 @ see what cache we have at this level
529 blt L1_inv_skip @ skip if no cache, or just i-cache
530#ifdef CONFIG_ARM_ERRATA_814220
531 dsb
532#endif
533 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
534 isb @ isb to sych the new cssr&csidr
535 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
536 and r2, r1, #7 @ extract the length of the cache lines
537 add r2, r2, #4 @ add 4 (line length offset)
538 ldr r4, =0x3ff
539 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
540 clz r5, r4 @ find bit position of way size increment
541 ldr r7, =0x7fff
542 ands r7, r7, r1, lsr #13 @ extract max number of the index size
543L1_inv_loop2:
544 mov r9, r4 @ create working copy of max way size
545L1_inv_loop3:
546 ARM ( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
547 THUMB( lsl r6, r9, r5 )
548 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
549 ARM ( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
550 THUMB( lsl r6, r7, r2 )
551 THUMB( orr r11, r11, r6 ) @ factor index number into r11
552 mcr p15, 0, r11, c7, c6, 2 @ invalidate by set/way
553 subs r9, r9, #1 @ decrement the way
554 bge L1_inv_loop3
555 subs r7, r7, #1 @ decrement the index
556 bge L1_inv_loop2
557L1_inv_skip:
558 @add r10, r10, #2 @ increment cache number
559 @cmp r3, r10
560 @bgt L1_inv_loop1
561L1_inv_finished:
562 mov r10, #0 @ swith back to cache level 0
563 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
564 dsb
565 isb
566 @pop {r4,r5,r7,r9,r10,r11}
567 pop {r0,r1,r2,r3,r4,r5,r6,r7,r9,r10,r11,r14}
568 bx lr
569ENDPROC(__inner_inv_dcache_L1)
570
571ENTRY(__inner_inv_dcache_L2)
572 push {r0,r1,r2,r3,r4,r5,r6,r7,r9,r10,r11,r14}
573 @push {r4,r5,r7,r9,r10,r11}
574 dmb @ ensure ordering with previous memory accesses
575 mrc p15, 1, r0, c0, c0, 1 @ read clidr
576 ands r3, r0, #0x7000000 @ extract loc from clidr
577 mov r3, r3, lsr #23 @ left align loc bit field
578 beq L2_inv_finished @ if loc is 0, then no need to clean
579 mov r10, #2 @ start clean at cache level 2
580L2_inv_loop1:
581 add r2, r10, r10, lsr #1 @ work out 3x current cache level
582 mov r1, r0, lsr r2 @ extract cache type bits from clidr
583 and r1, r1, #7 @ mask of the bits for current cache only
584 cmp r1, #2 @ see what cache we have at this level
585 blt L2_inv_skip @ skip if no cache, or just i-cache
586#ifdef CONFIG_ARM_ERRATA_814220
587 dsb
588#endif
589 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
590 isb @ isb to sych the new cssr&csidr
591 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
592 and r2, r1, #7 @ extract the length of the cache lines
593 add r2, r2, #4 @ add 4 (line length offset)
594 ldr r4, =0x3ff
595 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
596 clz r5, r4 @ find bit position of way size increment
597 ldr r7, =0x7fff
598 ands r7, r7, r1, lsr #13 @ extract max number of the index size
599L2_inv_loop2:
600 mov r9, r4 @ create working copy of max way size
601L2_inv_loop3:
602 ARM ( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
603 THUMB( lsl r6, r9, r5 )
604 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
605 ARM ( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
606 THUMB( lsl r6, r7, r2 )
607 THUMB( orr r11, r11, r6 ) @ factor index number into r11
608 mcr p15, 0, r11, c7, c6, 2 @ invalidate by set/way
609 subs r9, r9, #1 @ decrement the way
610 bge L2_inv_loop3
611 subs r7, r7, #1 @ decrement the index
612 bge L2_inv_loop2
613L2_inv_skip:
614 @add r10, r10, #2 @ increment cache number
615 @cmp r3, r10
616 @bgt L2_inv_loop1
617L2_inv_finished:
618 mov r10, #0 @ swith back to cache level 0
619 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
620 dsb
621 isb
622 @pop {r4,r5,r7,r9,r10,r11}
623 pop {r0,r1,r2,r3,r4,r5,r6,r7,r9,r10,r11,r14}
624 bx lr
625ENDPROC(__inner_inv_dcache_L2)
626
627ENTRY(__disable_dcache__inner_flush_dcache_L1)
628/*******************************************************************************
629 * push stack *
630 ******************************************************************************/
631 push {r0,r1,r2,r3,r4,r5,r6,r7,r9,r10,r11,r14}
632/*******************************************************************************
633 * __disable_dcache *
634 ******************************************************************************/
635 MRC p15,0,r0,c1,c0,0
636 BIC r0,r0,#C1_CBIT
637 dsb
638 MCR p15,0,r0,c1,c0,0
639 dsb
640 isb
641/*
642Erratum:794322,An instruction fetch can be allocated into the L2 cache after the cache is disabled Status
643This erratum can be avoided by inserting both of the following after the SCTLR.C bit is cleared to 0,
644and before the caches are cleaned or invalidated:
6451) A TLBIMVA operation to any address.
6462) A DSB instruction.
647*/
648 MCR p15,0,r0,c8,c7,1
649 dsb
650 isb
651/*******************************************************************************
652 * __inner_flush_dcache_L1 *
653 ******************************************************************************/
654 dmb @ ensure ordering with previous memory accesses
655 mrc p15, 1, r0, c0, c0, 1 @ read clidr
656 ands r3, r0, #0x7000000 @ extract loc from clidr
657 mov r3, r3, lsr #23 @ left align loc bit field
658 beq DF1_L1_finished @ if loc is 0, then no need to clean
659 mov r10, #0 @ start clean at cache level 1
660DF1_L1_loop1:
661 add r2, r10, r10, lsr #1 @ work out 3x current cache level
662 mov r1, r0, lsr r2 @ extract cache type bits from clidr
663 and r1, r1, #7 @ mask of the bits for current cache only
664 cmp r1, #2 @ see what cache we have at this level
665 blt DF1_L1_skip @ skip if no cache, or just i-cache
666#ifdef CONFIG_ARM_ERRATA_814220
667 dsb
668#endif
669 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
670 isb @ isb to sych the new cssr&csidr
671 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
672 and r2, r1, #7 @ extract the length of the cache lines
673 add r2, r2, #4 @ add 4 (line length offset)
674 ldr r4, =0x3ff
675 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
676 clz r5, r4 @ find bit position of way size increment
677 ldr r7, =0x7fff
678 ands r7, r7, r1, lsr #13 @ extract max number of the index size
679DF1_L1_loop2:
680 mov r9, r4 @ create working copy of max way size
681DF1_L1_loop3:
682 ARM ( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
683 THUMB( lsl r6, r9, r5 )
684 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
685 ARM ( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
686 THUMB (lsl r6, r7, r2 )
687 THUMB( orr r11, r11, r6 ) @ factor index number into r11
688#if 1
689 mcr p15, 0, r11, c7, c10, 2 @ clean by set/way
690 mcr p15, 0, r11, c7, c6, 2 @ invalidate by set/way
691#endif
692
693#if 0
694 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
695#endif
696 subs r9, r9, #1 @ decrement the way
697 bge DF1_L1_loop3
698 subs r7, r7, #1 @ decrement the index
699 bge DF1_L1_loop2
700DF1_L1_skip:
701 @add r10, r10, #2 @ increment cache number
702 @cmp r3, r10
703 @bgt DF1_L1_loop1
704DF1_L1_finished:
705 mov r10, #0 @ swith back to cache level 0
706 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
707 dsb
708 isb
709/*******************************************************************************
710 * pop stack *
711 ******************************************************************************/
712 pop {r0,r1,r2,r3,r4,r5,r6,r7,r9,r10,r11,r14}
713 bx lr
714ENDPROC(__disable_dcache__inner_flush_dcache_L1)
715
716ENTRY(__disable_dcache__inner_flush_dcache_L1__inner_flush_dcache_L2)
717/*******************************************************************************
718 * push stack *
719 ******************************************************************************/
720 push {r0,r1,r2,r3,r4,r5,r6,r7,r9,r10,r11,r14}
721/*******************************************************************************
722 * __disable_dcache *
723 ******************************************************************************/
724 MRC p15,0,r0,c1,c0,0
725 BIC r0,r0,#C1_CBIT
726 dsb
727 MCR p15,0,r0,c1,c0,0
728 dsb
729 isb
730/*
731Erratum:794322,An instruction fetch can be allocated into the L2 cache after the cache is disabled Status
732This erratum can be avoided by inserting both of the following after the SCTLR.C bit is cleared to 0,
733and before the caches are cleaned or invalidated:
7341) A TLBIMVA operation to any address.
7352) A DSB instruction.
736*/
737 MCR p15,0,r0,c8,c7,1
738 dsb
739 isb
740/*******************************************************************************
741 * __inner_flush_dcache_L1 *
742 ******************************************************************************/
743 dmb @ ensure ordering with previous memory accesses
744 mrc p15, 1, r0, c0, c0, 1 @ read clidr
745 ands r3, r0, #0x7000000 @ extract loc from clidr
746 mov r3, r3, lsr #23 @ left align loc bit field
747 beq DF1F2_L1_finished @ if loc is 0, then no need to clean
748 mov r10, #0 @ start clean at cache level 1
749DF1F2_L1_loop1:
750 add r2, r10, r10, lsr #1 @ work out 3x current cache level
751 mov r1, r0, lsr r2 @ extract cache type bits from clidr
752 and r1, r1, #7 @ mask of the bits for current cache only
753 cmp r1, #2 @ see what cache we have at this level
754 blt DF1F2_L1_skip @ skip if no cache, or just i-cache
755#ifdef CONFIG_ARM_ERRATA_814220
756 dsb
757#endif
758 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
759 isb @ isb to sych the new cssr&csidr
760 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
761 and r2, r1, #7 @ extract the length of the cache lines
762 add r2, r2, #4 @ add 4 (line length offset)
763 ldr r4, =0x3ff
764 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
765 clz r5, r4 @ find bit position of way size increment
766 ldr r7, =0x7fff
767 ands r7, r7, r1, lsr #13 @ extract max number of the index size
768DF1F2_L1_loop2:
769 mov r9, r4 @ create working copy of max way size
770DF1F2_L1_loop3:
771 ARM ( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
772 THUMB( lsl r6, r9, r5 )
773 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
774 ARM ( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
775 THUMB( lsl r6, r7, r2 )
776 THUMB( orr r11, r11, r6 ) @ factor index number into r11
777#if 1
778 mcr p15, 0, r11, c7, c10, 2 @ clean by set/way
779 mcr p15, 0, r11, c7, c6, 2 @ invalidate by set/way
780#endif
781
782#if 0
783 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
784#endif
785 subs r9, r9, #1 @ decrement the way
786 bge DF1F2_L1_loop3
787 subs r7, r7, #1 @ decrement the index
788 bge DF1F2_L1_loop2
789DF1F2_L1_skip:
790 @add r10, r10, #2 @ increment cache number
791 @cmp r3, r10
792 @bgt DF1F2_L1_loop1
793DF1F2_L1_finished:
794 mov r10, #0 @ swith back to cache level 0
795 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
796 dsb
797 isb
798/*******************************************************************************
799 * clrex *
800 ******************************************************************************/
801 clrex
802/*******************************************************************************
803 * __inner_flush_dcache_L2 *
804 ******************************************************************************/
805 dmb @ ensure ordering with previous memory accesses
806 mrc p15, 1, r0, c0, c0, 1 @ read clidr
807 ands r3, r0, #0x7000000 @ extract loc from clidr
808 mov r3, r3, lsr #23 @ left align loc bit field
809 beq DF1F2_L2_finished @ if loc is 0, then no need to clean
810 mov r10, #2 @ start clean at cache level 2
811DF1F2_L2_loop1:
812 add r2, r10, r10, lsr #1 @ work out 3x current cache level
813 mov r1, r0, lsr r2 @ extract cache type bits from clidr
814 and r1, r1, #7 @ mask of the bits for current cache only
815 cmp r1, #2 @ see what cache we have at this level
816 blt DF1F2_L2_skip @ skip if no cache, or just i-cache
817#ifdef CONFIG_ARM_ERRATA_814220
818 dsb
819#endif
820 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
821 isb @ isb to sych the new cssr&csidr
822 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
823 and r2, r1, #7 @ extract the length of the cache lines
824 add r2, r2, #4 @ add 4 (line length offset)
825 ldr r4, =0x3ff
826 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
827 clz r5, r4 @ find bit position of way size increment
828 ldr r7, =0x7fff
829 ands r7, r7, r1, lsr #13 @ extract max number of the index size
830DF1F2_L2_loop2:
831 mov r9, r4 @ create working copy of max way size
832DF1F2_L2_loop3:
833 ARM ( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
834 THUMB( lsl r6, r9, r5 )
835 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
836 ARM ( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
837 THUMB( lsl r6, r7, r2 )
838 THUMB( orr r11, r11, r6 ) @ factor index number into r11
839 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
840 subs r9, r9, #1 @ decrement the way
841 bge DF1F2_L2_loop3
842 subs r7, r7, #1 @ decrement the index
843 bge DF1F2_L2_loop2
844DF1F2_L2_skip:
845 @add r10, r10, #2 @ increment cache number
846 @cmp r3, r10
847 @bgt DF1F2_L2_loop1
848DF1F2_L2_finished:
849 mov r10, #0 @ swith back to cache level 0
850 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
851 dsb
852 isb
853/*******************************************************************************
854 * pop stack *
855 ******************************************************************************/
856 pop {r0,r1,r2,r3,r4,r5,r6,r7,r9,r10,r11,r14}
857 bx lr
858ENDPROC(__disable_dcache__inner_flush_dcache_L1__inner_flush_dcache_L2)
859
860ENTRY(dis_D_inner_fL1L2)
861ENTRY(__disable_dcache__inner_flush_dcache_L1__inner_clean_dcache_L2)
862/*******************************************************************************
863 * push stack *
864 ******************************************************************************/
865 push {r0,r1,r2,r3,r4,r5,r6,r7,r9,r10,r11,r14}
866/*******************************************************************************
867 * __disable_dcache *
868 ******************************************************************************/
869 MRC p15,0,r0,c1,c0,0
870 BIC r0,r0,#C1_CBIT
871 dsb
872 MCR p15,0,r0,c1,c0,0
873 dsb
874 isb
875/*
876Erratum:794322,An instruction fetch can be allocated into the L2 cache after the cache is disabled Status
877This erratum can be avoided by inserting both of the following after the SCTLR.C bit is cleared to 0,
878and before the caches are cleaned or invalidated:
8791) A TLBIMVA operation to any address.
8802) A DSB instruction.
881*/
882 MCR p15,0,r0,c8,c7,1
883 dsb
884 isb
885/*******************************************************************************
886 * __inner_flush_dcache_L1 *
887 ******************************************************************************/
888 dmb @ ensure ordering with previous memory accesses
889 mrc p15, 1, r0, c0, c0, 1 @ read clidr
890 ands r3, r0, #0x7000000 @ extract loc from clidr
891 mov r3, r3, lsr #23 @ left align loc bit field
892 beq DF1C2_L1_finished @ if loc is 0, then no need to clean
893 mov r10, #0 @ start clean at cache level 1
894DF1C2_L1_loop1:
895 add r2, r10, r10, lsr #1 @ work out 3x current cache level
896 mov r1, r0, lsr r2 @ extract cache type bits from clidr
897 and r1, r1, #7 @ mask of the bits for current cache only
898 cmp r1, #2 @ see what cache we have at this level
899 blt DF1C2_L1_skip @ skip if no cache, or just i-cache
900#ifdef CONFIG_ARM_ERRATA_814220
901 dsb
902#endif
903 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
904 isb @ isb to sych the new cssr&csidr
905 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
906 and r2, r1, #7 @ extract the length of the cache lines
907 add r2, r2, #4 @ add 4 (line length offset)
908 ldr r4, =0x3ff
909 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
910 clz r5, r4 @ find bit position of way size increment
911 ldr r7, =0x7fff
912 ands r7, r7, r1, lsr #13 @ extract max number of the index size
913DF1C2_L1_loop2:
914 mov r9, r4 @ create working copy of max way size
915DF1C2_L1_loop3:
916 ARM ( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
917 THUMB( lsl r6, r9, r5 )
918 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
919 ARM ( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
920 THUMB( lsl r6, r7, r2 )
921 THUMB( orr r11, r11, r6 ) @ factor index number into r11
922#if 1
923 mcr p15, 0, r11, c7, c10, 2 @ clean by set/way
924 mcr p15, 0, r11, c7, c6, 2 @ invalidate by set/way
925#endif
926
927#if 0
928 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
929#endif
930 subs r9, r9, #1 @ decrement the way
931 bge DF1C2_L1_loop3
932 subs r7, r7, #1 @ decrement the index
933 bge DF1C2_L1_loop2
934DF1C2_L1_skip:
935 @add r10, r10, #2 @ increment cache number
936 @cmp r3, r10
937 @bgt DF1C2_L1_loop1
938DF1C2_L1_finished:
939 mov r10, #0 @ swith back to cache level 0
940 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
941 dsb
942 isb
943/*******************************************************************************
944 * clrex *
945 ******************************************************************************/
946 clrex
947/*******************************************************************************
948 * __inner_clean_dcache_L2 *
949 ******************************************************************************/
950 dmb @ ensure ordering with previous memory accesses
951 mrc p15, 1, r0, c0, c0, 1 @ read clidr
952 ands r3, r0, #0x7000000 @ extract loc from clidr
953 mov r3, r3, lsr #23 @ left align loc bit field
954 beq DF1C2_L2_cl_finished @ if loc is 0, then no need to clean
955 mov r10, #2 @ start clean at cache level 2
956DF1C2_L2_cl_loop1:
957 add r2, r10, r10, lsr #1 @ work out 3x current cache level
958 mov r1, r0, lsr r2 @ extract cache type bits from clidr
959 and r1, r1, #7 @ mask of the bits for current cache only
960 cmp r1, #2 @ see what cache we have at this level
961 blt DF1C2_L2_cl_skip @ skip if no cache, or just i-cache
962#ifdef CONFIG_ARM_ERRATA_814220
963 dsb
964#endif
965 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
966 isb @ isb to sych the new cssr&csidr
967 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
968 and r2, r1, #7 @ extract the length of the cache lines
969 add r2, r2, #4 @ add 4 (line length offset)
970 ldr r4, =0x3ff
971 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
972 clz r5, r4 @ find bit position of way size increment
973 ldr r7, =0x7fff
974 ands r7, r7, r1, lsr #13 @ extract max number of the index size
975DF1C2_L2_cl_loop2:
976 mov r9, r4 @ create working copy of max way size
977DF1C2_L2_cl_loop3:
978 ARM ( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
979 THUMB( lsl r6, r9, r5 )
980 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
981 ARM ( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
982 THUMB( lsl r6, r7, r2 )
983 THUMB( orr r11, r11, r6 ) @ factor index number into r11
984 mcr p15, 0, r11, c7, c10, 2 @ clean by set/way
985 subs r9, r9, #1 @ decrement the way
986 bge DF1C2_L2_cl_loop3
987 subs r7, r7, #1 @ decrement the index
988 bge DF1C2_L2_cl_loop2
989DF1C2_L2_cl_skip:
990 @add r10, r10, #2 @ increment cache number
991 @cmp r3, r10
992 @bgt DF1C2_L2_cl_loop1
993DF1C2_L2_cl_finished:
994 mov r10, #0 @ swith back to cache level 0
995 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
996 dsb
997 isb
998/*******************************************************************************
999 * pop stack *
1000 ******************************************************************************/
1001 pop {r0,r1,r2,r3,r4,r5,r6,r7,r9,r10,r11,r14}
1002 bx lr
1003ENDPROC(__disable_dcache__inner_flush_dcache_L1__inner_clean_dcache_L2)
1004ENDPROC(dis_D_inner_fL1L2)
1005
1006
1007ENTRY(d_i_dis_flush_all)
1008/*******************************************************************************
1009 * push stack *
1010 ******************************************************************************/
1011 push {r0,r1,r2,r3,r4,r5,r6,r7,r9,r10,r11,r14}
1012/*******************************************************************************
1013 * __disable_dcache *
1014 ******************************************************************************/
1015 MRC p15,0,r0,c1,c0,0
1016 BIC r0,r0,#C1_CBIT
1017 BIC r0,r0,#C1_IBIT
1018 dsb
1019 MCR p15,0,r0,c1,c0,0
1020 dsb
1021 isb
1022/*
1023Erratum:794322,An instruction fetch can be allocated into the L2 cache after the cache is disabled Status
1024This erratum can be avoided by inserting both of the following after the SCTLR.C bit is cleared to 0,
1025and before the caches are cleaned or invalidated:
10261) A TLBIMVA operation to any address.
10272) A DSB instruction.
1028*/
1029 MCR p15,0,r0,c8,c7,1
1030 dsb
1031 isb
1032/*******************************************************************************
1033 * __inner_flush_dcache_L1 *
1034 ******************************************************************************/
1035 dmb @ ensure ordering with previous memory accesses
1036 mrc p15, 1, r0, c0, c0, 1 @ read clidr
1037 ands r3, r0, #0x7000000 @ extract loc from clidr
1038 mov r3, r3, lsr #23 @ left align loc bit field
1039 beq DIF1F2_L1_finished @ if loc is 0, then no need to clean
1040 mov r10, #0 @ start clean at cache level 1
1041DIF1F2_L1_loop1:
1042 add r2, r10, r10, lsr #1 @ work out 3x current cache level
1043 mov r1, r0, lsr r2 @ extract cache type bits from clidr
1044 and r1, r1, #7 @ mask of the bits for current cache only
1045 cmp r1, #2 @ see what cache we have at this level
1046 blt DIF1F2_L1_skip @ skip if no cache, or just i-cache
1047#ifdef CONFIG_ARM_ERRATA_814220
1048 dsb
1049#endif
1050 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
1051 isb @ isb to sych the new cssr&csidr
1052 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
1053 and r2, r1, #7 @ extract the length of the cache lines
1054 add r2, r2, #4 @ add 4 (line length offset)
1055 ldr r4, =0x3ff
1056 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
1057 clz r5, r4 @ find bit position of way size increment
1058 ldr r7, =0x7fff
1059 ands r7, r7, r1, lsr #13 @ extract max number of the index size
1060DIF1F2_L1_loop2:
1061 mov r9, r4 @ create working copy of max way size
1062DIF1F2_L1_loop3:
1063 ARM ( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
1064 THUMB( lsl r6, r9, r5 )
1065 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
1066 ARM ( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
1067 THUMB( lsl r6, r7, r2 )
1068 THUMB( orr r11, r11, r6 ) @ factor index number into r11
1069#if 1
1070 mcr p15, 0, r11, c7, c10, 2 @ clean by set/way
1071 mcr p15, 0, r11, c7, c6, 2 @ invalidate by set/way
1072#endif
1073
1074#if 0
1075 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
1076#endif
1077 subs r9, r9, #1 @ decrement the way
1078 bge DIF1F2_L1_loop3
1079 subs r7, r7, #1 @ decrement the index
1080 bge DIF1F2_L1_loop2
1081DIF1F2_L1_skip:
1082 @add r10, r10, #2 @ increment cache number
1083 @cmp r3, r10
1084 @bgt DIF1F2_L1_loop1
1085DIF1F2_L1_finished:
1086 mov r10, #0 @ swith back to cache level 0
1087 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
1088 dsb
1089 isb
1090/*******************************************************************************
1091 * clrex *
1092 ******************************************************************************/
1093 clrex
1094/*******************************************************************************
1095 * __inner_flush_dcache_L2 *
1096 ******************************************************************************/
1097 dmb @ ensure ordering with previous memory accesses
1098 mrc p15, 1, r0, c0, c0, 1 @ read clidr
1099 ands r3, r0, #0x7000000 @ extract loc from clidr
1100 mov r3, r3, lsr #23 @ left align loc bit field
1101 beq DIF1F2_L2_finished @ if loc is 0, then no need to clean
1102 mov r10, #2 @ start clean at cache level 2
1103DIF1F2_L2_loop1:
1104 add r2, r10, r10, lsr #1 @ work out 3x current cache level
1105 mov r1, r0, lsr r2 @ extract cache type bits from clidr
1106 and r1, r1, #7 @ mask of the bits for current cache only
1107 cmp r1, #2 @ see what cache we have at this level
1108 blt DIF1F2_L2_skip @ skip if no cache, or just i-cache
1109#ifdef CONFIG_ARM_ERRATA_814220
1110 dsb
1111#endif
1112 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
1113 isb @ isb to sych the new cssr&csidr
1114 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
1115 and r2, r1, #7 @ extract the length of the cache lines
1116 add r2, r2, #4 @ add 4 (line length offset)
1117 ldr r4, =0x3ff
1118 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
1119 clz r5, r4 @ find bit position of way size increment
1120 ldr r7, =0x7fff
1121 ands r7, r7, r1, lsr #13 @ extract max number of the index size
1122DIF1F2_L2_loop2:
1123 mov r9, r4 @ create working copy of max way size
1124DIF1F2_L2_loop3:
1125 ARM ( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
1126 THUMB( lsl r6, r9, r5 )
1127 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
1128 ARM ( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
1129 THUMB( lsl r6, r7, r2 )
1130 THUMB( orr r11, r11, r6 ) @ factor index number into r11
1131 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
1132 subs r9, r9, #1 @ decrement the way
1133 bge DIF1F2_L2_loop3
1134 subs r7, r7, #1 @ decrement the index
1135 bge DIF1F2_L2_loop2
1136DIF1F2_L2_skip:
1137 @add r10, r10, #2 @ increment cache number
1138 @cmp r3, r10
1139 @bgt DIF1F2_L2_loop1
1140DIF1F2_L2_finished:
1141 mov r10, #0 @ swith back to cache level 0
1142 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
1143 dsb
1144 isb
1145/*******************************************************************************
1146 * pop stack *
1147 ******************************************************************************/
1148 pop {r0,r1,r2,r3,r4,r5,r6,r7,r9,r10,r11,r14}
1149 bx lr
1150ENDPROC(d_i_dis_flush_all)
1151
1152ENTRY(dis_D_inner_flush_all)
1153 /* disable data cache*/
1154
1155 MRC p15,0,r0,c1,c0,0
1156 BIC r0,r0,#C1_CBIT
1157 dsb
1158 MCR p15,0,r0,c1,c0,0
1159 dsb
1160 isb
1161
1162 b v7_flush_dcache_all
1163ENDPROC(dis_D_inner_flush_all)
1164
1165 .end