| lh | 9ed821d | 2023-04-07 01:36:19 -0700 | [diff] [blame] | 1 | /******************************************************************************* | 
|  | 2 | * °æÈ¨ËùÓÐ (C)2016, ÖÐÐËͨѶ¹É·ÝÓÐÏÞ¹«Ë¾¡£ | 
|  | 3 | * | 
|  | 4 | * ÎļþÃû³Æ:     head.S | 
|  | 5 | * Îļþ±êʶ:     head.S | 
|  | 6 | * ÄÚÈÝÕªÒª:     °æ±¾½âѹÆô¶¯´úÂë | 
|  | 7 | * | 
|  | 8 | * ÐÞ¸ÄÈÕÆÚ        °æ±¾ºÅ      Ð޸ıê¼Ç        ÐÞ¸ÄÈË          ÐÞ¸ÄÄÚÈÝ | 
|  | 9 | * ------------------------------------------------------------------------------ | 
|  | 10 | * 2016/09/12      V1.0        Create          µËÄþÒ          ´´½¨ | 
|  | 11 | * | 
|  | 12 | *******************************************************************************/ | 
|  | 13 |  | 
|  | 14 | /******************************************************************************* | 
|  | 15 | *                                   Í·Îļþ                                     * | 
|  | 16 | *******************************************************************************/ | 
|  | 17 |  | 
|  | 18 | /******************************************************************************* | 
|  | 19 | *                                   ºê¶¨Òå                                     * | 
|  | 20 | *******************************************************************************/ | 
|  | 21 | #ifdef __thumb2__ | 
|  | 22 | #define ARM(x...) | 
|  | 23 | #define THUMB(x...)	x | 
|  | 24 | #define W(instr)	instr.w | 
|  | 25 | #else | 
|  | 26 | #define ARM(x...)	x | 
|  | 27 | #define THUMB(x...) | 
|  | 28 | #define W(instr)	instr | 
|  | 29 | #endif | 
|  | 30 | #define ARM_BE8(x...) | 
|  | 31 |  | 
|  | 32 | #define END(name) \ | 
|  | 33 | .size name, .-name | 
|  | 34 |  | 
|  | 35 | #define ENDPROC(name) \ | 
|  | 36 | .type name, %function; \ | 
|  | 37 | END(name) | 
|  | 38 |  | 
|  | 39 | #define CYGOPT_HAL_ARM_MMU | 
|  | 40 | #define CONFIG_CPU_CP15 | 
|  | 41 |  | 
|  | 42 | #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH | 
|  | 43 | #define CB_BITS 0x08 | 
|  | 44 | #else | 
|  | 45 | #define CB_BITS 0x0c | 
|  | 46 | #endif | 
|  | 47 |  | 
|  | 48 | /******************************************************************************* | 
|  | 49 | *                                Íⲿº¯ÊýÉùÃ÷                                  * | 
|  | 50 | *******************************************************************************/ | 
|  | 51 | .extern decompress_kernel | 
|  | 52 |  | 
|  | 53 | /******************************************************************************* | 
|  | 54 | *                                Íⲿ±äÁ¿ÉùÃ÷                                  * | 
|  | 55 | *******************************************************************************/ | 
|  | 56 | .extern image_start | 
|  | 57 |  | 
|  | 58 | /******************************************************************************* | 
|  | 59 | *                                È«¾Öº¯ÊýʵÏÖ                                  * | 
|  | 60 | *******************************************************************************/ | 
|  | 61 |  | 
|  | 62 | .section ".start", #alloc, #execinstr | 
|  | 63 | .align | 
|  | 64 | .arm				@ Always enter in ARM state | 
|  | 65 | .global _start | 
|  | 66 | .type	_start, function | 
|  | 67 |  | 
|  | 68 | _start: | 
|  | 69 | .rept	8 | 
|  | 70 | mov	r0, r0 | 
|  | 71 | .endr | 
|  | 72 |  | 
|  | 73 | .text | 
|  | 74 | /* move to SVC MODE */ | 
|  | 75 | mrs     r0, cpsr | 
|  | 76 | bic     r0, #0x1f | 
|  | 77 | orr     r0, r0, #0xd3 | 
|  | 78 | bic     r0, #(1<<8)                     /* unmask Asynchronous abort */ | 
|  | 79 | msr     cpsr_cxsf, r0 | 
|  | 80 |  | 
|  | 81 | /* Control Register Setup */ | 
|  | 82 | mrc     p15, 0, r0, c1, c0, 0 | 
|  | 83 | bic     r0, r0, #(1<<0)         /* MMU disabled */ | 
|  | 84 | orr     r0, r0, #(1<<1)         /* Alignment fault checking enabled */ | 
|  | 85 | bic     r0, r0, #(1<<2)         /* Data Cache disabled */ | 
|  | 86 | orr     r0, r0, #(1<<11)        /* Branch prediction enabled */ | 
|  | 87 | bic     r0, r0, #(1<<12)        /* Instruction Cache disabled */ | 
|  | 88 | bic     r0, r0, #(1<<13)        /* USE VBAR to set the vector base address */ | 
|  | 89 | DSB                             /* Ensure all previous loads/stores have completed */ | 
|  | 90 | mcr     p15, 0, r0, c1, c0, 0 | 
|  | 91 | ISB | 
|  | 92 |  | 
|  | 93 | adr	r0, LC0 | 
|  | 94 | ldmia   r0, {r1, r2, r3, r6, r10, r11, r12} | 
|  | 95 | ldr	sp, [r0, #28] | 
|  | 96 |  | 
|  | 97 | /* | 
|  | 98 | * We might be running at a different address.  We need | 
|  | 99 | * to fix up various pointers. | 
|  | 100 | */ | 
|  | 101 | sub	r0,  r0,  r1    @ calculate the delta offset | 
|  | 102 | add r2,  r2,  r0    @ __bss_start | 
|  | 103 | add r3,  r3,  r0    @ __bss_end | 
|  | 104 | add	r6,  r6,  r0    @ _edata | 
|  | 105 | add	r10, r10, r0    @ inflated kernel size location | 
|  | 106 | add r11, r11, r0    @ got_start | 
|  | 107 | add r12, r12, r0    @ got_end | 
|  | 108 | add	sp,  sp,  r0    @ sp | 
|  | 109 |  | 
|  | 110 | /*¡¡½âѹºóÄÚºËµÄÆô¶¯µØÖ· */ | 
|  | 111 | ldr r4, =image_start | 
|  | 112 | add r4,  r4,  r0 | 
|  | 113 | ldr r4, [r4] | 
|  | 114 |  | 
|  | 115 | /* | 
|  | 116 | * The kernel build system appends the size of the | 
|  | 117 | * decompressed kernel at the end of the compressed data | 
|  | 118 | * in little-endian form. | 
|  | 119 | */ | 
|  | 120 | ldrb	r9, [r10, #0] | 
|  | 121 | ldrb	lr, [r10, #1] | 
|  | 122 | orr	    r9, r9, lr, lsl #8 | 
|  | 123 | ldrb	lr, [r10, #2] | 
|  | 124 | ldrb	r10, [r10, #3] | 
|  | 125 | orr	    r9, r9, lr, lsl #16 | 
|  | 126 | orr	    r9, r9, r10, lsl #24 | 
|  | 127 |  | 
|  | 128 | add     r10, r4, r9 | 
|  | 129 | adr     r9, _clear_bss | 
|  | 130 | cmp	    r10, r9 | 
|  | 131 | dead_loop: | 
|  | 132 | bgt     dead_loop | 
|  | 133 |  | 
|  | 134 | /* | 
|  | 135 | * Relocate all entries in the GOT table. | 
|  | 136 | * Bump bss entries to _edata + dtb size | 
|  | 137 | */ | 
|  | 138 | mov r5, #0 | 
|  | 139 | 1: | 
|  | 140 | ldr	  r1, [r11, #0]     @ relocate entries in the GOT | 
|  | 141 | add	  r1, r1, r0        @ This fixes up C references | 
|  | 142 | cmp	  r1, r2            @ if entry >= bss_start && | 
|  | 143 | cmphs r3, r1            @ bss_end > entry | 
|  | 144 | addhi r1, r1, r5        @ entry += dtb size | 
|  | 145 | str	  r1, [r11], #4     @ next entry | 
|  | 146 | cmp	  r11, r12 | 
|  | 147 | blo	  1b | 
|  | 148 |  | 
|  | 149 | /* bump our bss pointers too */ | 
|  | 150 | add	r2, r2, r5 | 
|  | 151 | add	r3, r3, r5 | 
|  | 152 |  | 
|  | 153 | /* | 
|  | 154 | * BSS¶ÎÇåÁã | 
|  | 155 | */ | 
|  | 156 | mov     r0, #0 | 
|  | 157 | _clear_bss: | 
|  | 158 | str     r0, [r2], #4 | 
|  | 159 | cmp     r3, r2 | 
|  | 160 | bhi     _clear_bss | 
|  | 161 |  | 
|  | 162 | bl  cache_on | 
|  | 163 | /* | 
|  | 164 | * decompress kernel | 
|  | 165 | */ | 
|  | 166 | mov	r0, r4 | 
|  | 167 | mov	r1, sp			    @ malloc space above stack | 
|  | 168 | add	r2, sp, #0x10000	@ 64k max | 
|  | 169 | bl  decompress_kernel | 
|  | 170 | bl  cache_clean_flush | 
|  | 171 | bl  cache_off | 
|  | 172 |  | 
|  | 173 | /* | 
|  | 174 | * enter kernel | 
|  | 175 | */ | 
|  | 176 | mov	r0, #0 | 
|  | 177 | bx	r4 | 
|  | 178 |  | 
|  | 179 | .align	2 | 
|  | 180 | .type	LC0, #object | 
|  | 181 |  | 
|  | 182 | LC0: | 
|  | 183 | .word	LC0                 @ r1 | 
|  | 184 | .word	__bss_start         @ r2 | 
|  | 185 | .word	__bss_end           @ r3 | 
|  | 186 | .word	_edata              @ r6 | 
|  | 187 | .word	input_data_end - 4  @ r10 (inflated size location) | 
|  | 188 | .word	_got_start          @ r11 | 
|  | 189 | .word	_got_end            @ ip | 
|  | 190 | .word	.L_user_stack_end   @ sp | 
|  | 191 | .size	LC0, . - LC0 | 
|  | 192 |  | 
|  | 193 | __setup_mmu: | 
|  | 194 | lsr r3, r4, #0x14 | 
|  | 195 | lsl r3, r3, #0x14 | 
|  | 196 | sub	r3, r3, #16384		@ Page directory size | 
|  | 197 | bic	r3, r3, #0xff		@ Align the pointer | 
|  | 198 | bic	r3, r3, #0x3f00 | 
|  | 199 | /* | 
|  | 200 | * Initialise the page tables, turning on the cacheable and bufferable | 
|  | 201 | * bits for the RAM area only. | 
|  | 202 | */ | 
|  | 203 | mov	r0, r3 | 
|  | 204 | mov	r9, r0, lsr #18 | 
|  | 205 | mov	r9, r9, lsl #18		@ start of RAM | 
|  | 206 | add	r10, r9, #0x10000000	@ a reasonable RAM size | 
|  | 207 | mov	r1, #0x12		@ XN|U + section mapping | 
|  | 208 | orr	r1, r1, #3 << 10	@ AP=11 | 
|  | 209 | add	r2, r3, #16384 | 
|  | 210 | 1:		cmp	r1, r9			@ if virt > start of RAM | 
|  | 211 | cmphs	r10, r1			@   && end of RAM > virt | 
|  | 212 | bic	r1, r1, #0x1c		@ clear XN|U + C + B | 
|  | 213 | orrlo	r1, r1, #0x10		@ Set XN|U for non-RAM | 
|  | 214 | orrhs	r1, r1, r6		@ set RAM section settings | 
|  | 215 | str	r1, [r0], #4		@ 1:1 mapping | 
|  | 216 | add	r1, r1, #1048576 | 
|  | 217 | teq	r0, r2 | 
|  | 218 | bne	1b | 
|  | 219 | /* | 
|  | 220 | * If ever we are running from Flash, then we surely want the cache | 
|  | 221 | * to be enabled also for our execution instance...  We map 2MB of it | 
|  | 222 | * so there is no map overlap problem for up to 1 MB compressed kernel. | 
|  | 223 | * If the execution is in RAM then we would only be duplicating the above. | 
|  | 224 | */ | 
|  | 225 | orr	r1, r6, #0x04		@ ensure B is set for this | 
|  | 226 | orr	r1, r1, #3 << 10 | 
|  | 227 | mov	r2, pc | 
|  | 228 | mov	r2, r2, lsr #20 | 
|  | 229 | orr	r1, r1, r2, lsl #20 | 
|  | 230 | add	r0, r3, r2, lsl #2 | 
|  | 231 | str	r1, [r0], #4 | 
|  | 232 | add	r1, r1, #1048576 | 
|  | 233 | str	r1, [r0] | 
|  | 234 | mov	pc, lr | 
|  | 235 | ENDPROC(__setup_mmu) | 
|  | 236 |  | 
|  | 237 | __armv7_mmu_cache_on: | 
|  | 238 | mov	r12, lr | 
|  | 239 | #ifdef CYGOPT_HAL_ARM_MMU | 
|  | 240 | mrc	p15, 0, r11, c0, c1, 4	@ read ID_MMFR0 | 
|  | 241 | tst	r11, #0xf		@ VMSA | 
|  | 242 | movne	r6, #CB_BITS | 0x02	@ !XN | 
|  | 243 | blne	__setup_mmu | 
|  | 244 | mov	r0, #0 | 
|  | 245 | mcr	p15, 0, r0, c7, c10, 4	@ drain write buffer | 
|  | 246 | tst	r11, #0xf		@ VMSA | 
|  | 247 | mcrne	p15, 0, r0, c8, c7, 0	@ flush I,D TLBs | 
|  | 248 | #endif | 
|  | 249 | mrc	p15, 0, r0, c1, c0, 0	@ read control reg | 
|  | 250 | bic	r0, r0, #1 << 28	@ clear SCTLR.TRE | 
|  | 251 | orr	r0, r0, #0x5000		@ I-cache enable, RR cache replacement | 
|  | 252 | orr	r0, r0, #0x003c		@ write buffer | 
|  | 253 | bic	r0, r0, #2		@ A (no unaligned access fault) | 
|  | 254 | orr	r0, r0, #1 << 22	@ U (v6 unaligned access model) | 
|  | 255 | @ (needed for ARM1176) | 
|  | 256 | #ifdef CYGOPT_HAL_ARM_MMU | 
|  | 257 | ARM_BE8(	orr	r0, r0, #1 << 25 )	@ big-endian page tables | 
|  | 258 | mrcne   p15, 0, r6, c2, c0, 2   @ read ttb control reg | 
|  | 259 | orrne	r0, r0, #1		@ MMU enabled | 
|  | 260 | movne	r1, #0xfffffffd		@ domain 0 = client | 
|  | 261 | bic     r6, r6, #1 << 31        @ 32-bit translation system | 
|  | 262 | bic     r6, r6, #3 << 0         @ use only ttbr0 | 
|  | 263 | mcrne	p15, 0, r3, c2, c0, 0	@ load page table pointer | 
|  | 264 | mcrne	p15, 0, r1, c3, c0, 0	@ load domain access control | 
|  | 265 | mcrne   p15, 0, r6, c2, c0, 2   @ load ttb control | 
|  | 266 | #endif | 
|  | 267 | mcr	p15, 0, r0, c7, c5, 4	@ ISB | 
|  | 268 | mcr	p15, 0, r0, c1, c0, 0	@ load control register | 
|  | 269 | mrc	p15, 0, r0, c1, c0, 0	@ and read it back | 
|  | 270 | mov	r0, #0 | 
|  | 271 | mcr	p15, 0, r0, c7, c5, 4	@ ISB | 
|  | 272 | mov	pc, r12 | 
|  | 273 |  | 
|  | 274 | #define PROC_ENTRY_SIZE (4*5) | 
|  | 275 |  | 
|  | 276 | /* cache on */ | 
|  | 277 | .align	5 | 
|  | 278 | cache_on:	mov	r3, #8			@ cache_on function | 
|  | 279 | b	call_cache_fn | 
|  | 280 |  | 
|  | 281 | call_cache_fn:	adr	r12, proc_types | 
|  | 282 | #ifdef CONFIG_CPU_CP15 | 
|  | 283 | mrc	p15, 0, r9, c0, c0	@ get processor ID | 
|  | 284 | #elif defined(CONFIG_CPU_V7M) | 
|  | 285 | /* | 
|  | 286 | * On v7-M the processor id is located in the V7M_SCB_CPUID | 
|  | 287 | * register, but as cache handling is IMPLEMENTATION DEFINED on | 
|  | 288 | * v7-M (if existant at all) we just return early here. | 
|  | 289 | * If V7M_SCB_CPUID were used the cpu ID functions (i.e. | 
|  | 290 | * __armv7_mmu_cache_{on,off,flush}) would be selected which | 
|  | 291 | * use cp15 registers that are not implemented on v7-M. | 
|  | 292 | */ | 
|  | 293 | bx	lr | 
|  | 294 | #else | 
|  | 295 | ldr	r9, =CONFIG_PROCESSOR_ID | 
|  | 296 | #endif | 
|  | 297 | 1:		ldr	r1, [r12, #0]		@ get value | 
|  | 298 | ldr	r2, [r12, #4]		@ get mask | 
|  | 299 | eor	r1, r1, r9		@ (real ^ match) | 
|  | 300 | tst	r1, r2			@       & mask | 
|  | 301 | ARM(		addeq	pc, r12, r3		) @ call cache function | 
|  | 302 | THUMB(		addeq	r12, r3			) | 
|  | 303 | THUMB(		moveq	pc, r12			) @ call cache function | 
|  | 304 | add	r12, r12, #PROC_ENTRY_SIZE | 
|  | 305 | b	1b | 
|  | 306 |  | 
|  | 307 | /* | 
|  | 308 | * Table for cache operations.  This is basically: | 
|  | 309 | *   - CPU ID match | 
|  | 310 | *   - CPU ID mask | 
|  | 311 | *   - 'cache on' method instruction | 
|  | 312 | *   - 'cache off' method instruction | 
|  | 313 | *   - 'cache flush' method instruction | 
|  | 314 | * | 
|  | 315 | * We match an entry using: ((real_id ^ match) & mask) == 0 | 
|  | 316 | * | 
|  | 317 | * Writethrough caches generally only need 'on' and 'off' | 
|  | 318 | * methods.  Writeback caches _must_ have the flush method | 
|  | 319 | * defined. | 
|  | 320 | */ | 
|  | 321 | .align	2 | 
|  | 322 | .type	proc_types,#object | 
|  | 323 | proc_types: | 
|  | 324 | .word	0x000f0000		@ new CPU Id | 
|  | 325 | .word	0x000f0000 | 
|  | 326 | W(b)	__armv7_mmu_cache_on | 
|  | 327 | W(b)	__armv7_mmu_cache_off | 
|  | 328 | W(b)	__armv7_mmu_cache_flush | 
|  | 329 |  | 
|  | 330 | .word	0			@ unrecognised type | 
|  | 331 | .word	0 | 
|  | 332 | mov	pc, lr | 
|  | 333 | THUMB(		nop				) | 
|  | 334 | mov	pc, lr | 
|  | 335 | THUMB(		nop				) | 
|  | 336 | mov	pc, lr | 
|  | 337 | THUMB(		nop				) | 
|  | 338 |  | 
|  | 339 | .size	proc_types, . - proc_types | 
|  | 340 |  | 
|  | 341 | /* | 
|  | 342 | * If you get a "non-constant expression in ".if" statement" | 
|  | 343 | * error from the assembler on this line, check that you have | 
|  | 344 | * not accidentally written a "b" instruction where you should | 
|  | 345 | * have written W(b). | 
|  | 346 | */ | 
|  | 347 | .if (. - proc_types) % PROC_ENTRY_SIZE != 0 | 
|  | 348 | .error "The size of one or more proc_types entries is wrong." | 
|  | 349 | .endif | 
|  | 350 |  | 
|  | 351 | .align	5 | 
|  | 352 | cache_off:	mov	r3, #12			@ cache_off function | 
|  | 353 | b	call_cache_fn | 
|  | 354 |  | 
|  | 355 | __armv7_mmu_cache_off: | 
|  | 356 | mrc	p15, 0, r0, c1, c0 | 
|  | 357 | #ifdef CYGOPT_HAL_ARM_MMU | 
|  | 358 | bic	r0, r0, #0x000d | 
|  | 359 | #else | 
|  | 360 | bic	r0, r0, #0x000c | 
|  | 361 | #endif | 
|  | 362 | mcr	p15, 0, r0, c1, c0	@ turn MMU and cache off | 
|  | 363 | mov	r12, lr | 
|  | 364 | bl	__armv7_mmu_cache_flush | 
|  | 365 | mov	r0, #0 | 
|  | 366 | #ifdef CYGOPT_HAL_ARM_MMU | 
|  | 367 | mcr	p15, 0, r0, c8, c7, 0	@ invalidate whole TLB | 
|  | 368 | #endif | 
|  | 369 | mcr	p15, 0, r0, c7, c5, 6	@ invalidate BTC | 
|  | 370 | mcr	p15, 0, r0, c7, c10, 4	@ DSB | 
|  | 371 | mcr	p15, 0, r0, c7, c5, 4	@ ISB | 
|  | 372 | mov	pc, r12 | 
|  | 373 |  | 
|  | 374 | .align	5 | 
|  | 375 | cache_clean_flush: | 
|  | 376 | mov	r3, #16 | 
|  | 377 | b	call_cache_fn | 
|  | 378 |  | 
|  | 379 | __armv7_mmu_cache_flush: | 
|  | 380 | tst	r4, #1 | 
|  | 381 | bne	_iflush | 
|  | 382 | mrc	p15, 0, r10, c0, c1, 5	@ read ID_MMFR1 | 
|  | 383 | tst	r10, #0xf << 16		@ hierarchical cache (ARMv7) | 
|  | 384 | mov	r10, #0 | 
|  | 385 | beq	hierarchical | 
|  | 386 | mcr	p15, 0, r10, c7, c14, 0	@ clean+invalidate D | 
|  | 387 | b	_iflush | 
|  | 388 | hierarchical: | 
|  | 389 | mcr	p15, 0, r10, c7, c10, 5	@ DMB | 
|  | 390 | stmfd	sp!, {r0-r7, r9-r11} | 
|  | 391 | mrc	p15, 1, r0, c0, c0, 1 | 
|  | 392 | ands	r3, r0, #0x7000000 | 
|  | 393 | mov	r3, r3, lsr #23 | 
|  | 394 | beq	_finished | 
|  | 395 | mov	r10, #0 | 
|  | 396 | _loop1: | 
|  | 397 | add	r2, r10, r10, lsr #1 | 
|  | 398 | mov	r1, r0, lsr r2 | 
|  | 399 | and	r1, r1, #7 | 
|  | 400 | cmp	r1, #2 | 
|  | 401 | blt	_skip | 
|  | 402 | mcr	p15, 2, r10, c0, c0, 0 | 
|  | 403 | mcr	p15, 0, r10, c7, c5, 4 | 
|  | 404 | mrc	p15, 1, r1, c0, c0, 0 | 
|  | 405 | and	r2, r1, #7 | 
|  | 406 | add	r2, r2, #4 | 
|  | 407 | ldr	r4, =0x3ff | 
|  | 408 | ands	r4, r4, r1, lsr #3 | 
|  | 409 | clz	r5, r4 | 
|  | 410 | ldr	r7, =0x7fff | 
|  | 411 | ands	r7, r7, r1, lsr #13 | 
|  | 412 | _loop2: | 
|  | 413 | mov	r9, r4 | 
|  | 414 | _loop3: | 
|  | 415 | ARM(		orr	r11, r10, r9, lsl r5	) | 
|  | 416 | ARM(		orr	r11, r11, r7, lsl r2	) | 
|  | 417 | THUMB(		lsl	r6, r9, r5		) | 
|  | 418 | THUMB(		orr	r11, r10, r6		) | 
|  | 419 | THUMB(		lsl	r6, r7, r2		) | 
|  | 420 | THUMB(		orr	r11, r11, r6		) | 
|  | 421 | mcr	p15, 0, r11, c7, c14, 2 | 
|  | 422 | subs	r9, r9, #1 | 
|  | 423 | bge	_loop3 | 
|  | 424 | subs	r7, r7, #1 | 
|  | 425 | bge	_loop2 | 
|  | 426 | _skip: | 
|  | 427 | add	r10, r10, #2 | 
|  | 428 | cmp	r3, r10 | 
|  | 429 | bgt	_loop1 | 
|  | 430 | _finished: | 
|  | 431 | ldmfd	sp!, {r0-r7, r9-r11} | 
|  | 432 | mov	r10, #0			@ swith back to cache level 0 | 
|  | 433 | mcr	p15, 2, r10, c0, c0, 0	@ select current cache level in cssr | 
|  | 434 | _iflush: | 
|  | 435 | mcr	p15, 0, r10, c7, c10, 4	@ DSB | 
|  | 436 | mcr	p15, 0, r10, c7, c5, 0	@ invalidate I+BTB | 
|  | 437 | mcr	p15, 0, r10, c7, c10, 4	@ DSB | 
|  | 438 | mcr	p15, 0, r10, c7, c5, 4	@ ISB | 
|  | 439 | mov	pc, lr | 
|  | 440 |  | 
|  | 441 | .align | 
|  | 442 | .section ".stack", "aw", %nobits | 
|  | 443 |  | 
|  | 444 | .L_user_stack:	.space	4096 | 
|  | 445 | .L_user_stack_end: | 
|  | 446 |  |