b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
| 2 | /* |
| 3 | * linux/arch/arm/boot/compressed/head.S |
| 4 | * |
| 5 | * Copyright (C) 1996-2002 Russell King |
| 6 | * Copyright (C) 2004 Hyok S. Choi (MPU support) |
| 7 | */ |
| 8 | #include <linux/linkage.h> |
| 9 | #include <asm/assembler.h> |
| 10 | #include <asm/v7m.h> |
| 11 | |
| 12 | #include "efi-header.S" |
| 13 | |
| 14 | AR_CLASS( .arch armv7-a ) |
| 15 | M_CLASS( .arch armv7-m ) |
| 16 | |
| 17 | /* |
| 18 | * Debugging stuff |
| 19 | * |
| 20 | * Note that these macros must not contain any code which is not |
| 21 | * 100% relocatable. Any attempt to do so will result in a crash. |
| 22 | * Please select one of the following when turning on debugging. |
| 23 | */ |
| 24 | #ifdef DEBUG |
| 25 | |
| 26 | #if defined(CONFIG_DEBUG_ICEDCC) |
| 27 | |
| 28 | #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7) |
| 29 | .macro loadsp, rb, tmp1, tmp2 |
| 30 | .endm |
| 31 | .macro writeb, ch, rb |
| 32 | mcr p14, 0, \ch, c0, c5, 0 |
| 33 | .endm |
| 34 | #elif defined(CONFIG_CPU_XSCALE) |
| 35 | .macro loadsp, rb, tmp1, tmp2 |
| 36 | .endm |
| 37 | .macro writeb, ch, rb |
| 38 | mcr p14, 0, \ch, c8, c0, 0 |
| 39 | .endm |
| 40 | #else |
| 41 | .macro loadsp, rb, tmp1, tmp2 |
| 42 | .endm |
| 43 | .macro writeb, ch, rb |
| 44 | mcr p14, 0, \ch, c1, c0, 0 |
| 45 | .endm |
| 46 | #endif |
| 47 | |
| 48 | #else |
| 49 | |
| 50 | #include CONFIG_DEBUG_LL_INCLUDE |
| 51 | |
| 52 | .macro writeb, ch, rb |
| 53 | senduart \ch, \rb |
| 54 | .endm |
| 55 | |
| 56 | #if defined(CONFIG_ARCH_SA1100) |
| 57 | .macro loadsp, rb, tmp1, tmp2 |
| 58 | mov \rb, #0x80000000 @ physical base address |
| 59 | #ifdef CONFIG_DEBUG_LL_SER3 |
| 60 | add \rb, \rb, #0x00050000 @ Ser3 |
| 61 | #else |
| 62 | add \rb, \rb, #0x00010000 @ Ser1 |
| 63 | #endif |
| 64 | .endm |
| 65 | #else |
| 66 | .macro loadsp, rb, tmp1, tmp2 |
| 67 | addruart \rb, \tmp1, \tmp2 |
| 68 | .endm |
| 69 | #endif |
| 70 | #endif |
| 71 | #endif |
| 72 | |
| 73 | .macro kputc,val |
| 74 | mov r0, \val |
| 75 | bl putc |
| 76 | .endm |
| 77 | |
| 78 | .macro kphex,val,len |
| 79 | mov r0, \val |
| 80 | mov r1, #\len |
| 81 | bl phex |
| 82 | .endm |
| 83 | |
| 84 | .macro debug_reloc_start |
| 85 | #ifdef DEBUG |
| 86 | kputc #'\n' |
| 87 | kphex r6, 8 /* processor id */ |
| 88 | kputc #':' |
| 89 | kphex r7, 8 /* architecture id */ |
| 90 | #ifdef CONFIG_CPU_CP15 |
| 91 | kputc #':' |
| 92 | mrc p15, 0, r0, c1, c0 |
| 93 | kphex r0, 8 /* control reg */ |
| 94 | #endif |
| 95 | kputc #'\n' |
| 96 | kphex r5, 8 /* decompressed kernel start */ |
| 97 | kputc #'-' |
| 98 | kphex r9, 8 /* decompressed kernel end */ |
| 99 | kputc #'>' |
| 100 | kphex r4, 8 /* kernel execution address */ |
| 101 | kputc #'\n' |
| 102 | #endif |
| 103 | .endm |
| 104 | |
| 105 | .macro debug_reloc_end |
| 106 | #ifdef DEBUG |
| 107 | kphex r5, 8 /* end of kernel */ |
| 108 | kputc #'\n' |
| 109 | mov r0, r4 |
| 110 | bl memdump /* dump 256 bytes at start of kernel */ |
| 111 | #endif |
| 112 | .endm |
| 113 | |
| 114 | /* |
| 115 | * Debug kernel copy by printing the memory addresses involved |
| 116 | */ |
| 117 | .macro dbgkc, begin, end, cbegin, cend |
| 118 | #ifdef DEBUG |
| 119 | kputc #'\n' |
| 120 | kputc #'C' |
| 121 | kputc #':' |
| 122 | kputc #'0' |
| 123 | kputc #'x' |
| 124 | kphex \begin, 8 /* Start of compressed kernel */ |
| 125 | kputc #'-' |
| 126 | kputc #'0' |
| 127 | kputc #'x' |
| 128 | kphex \end, 8 /* End of compressed kernel */ |
| 129 | kputc #'-' |
| 130 | kputc #'>' |
| 131 | kputc #'0' |
| 132 | kputc #'x' |
| 133 | kphex \cbegin, 8 /* Start of kernel copy */ |
| 134 | kputc #'-' |
| 135 | kputc #'0' |
| 136 | kputc #'x' |
| 137 | kphex \cend, 8 /* End of kernel copy */ |
| 138 | kputc #'\n' |
| 139 | kputc #'\r' |
| 140 | #endif |
| 141 | .endm |
| 142 | |
| 143 | .macro be32tocpu, val, tmp |
| 144 | #ifndef __ARMEB__ |
| 145 | /* convert to little endian */ |
| 146 | rev_l \val, \tmp |
| 147 | #endif |
| 148 | .endm |
| 149 | |
| 150 | .section ".start", "ax" |
| 151 | /* |
| 152 | * sort out different calling conventions |
| 153 | */ |
| 154 | .align |
| 155 | /* |
| 156 | * Always enter in ARM state for CPUs that support the ARM ISA. |
| 157 | * As of today (2014) that's exactly the members of the A and R |
| 158 | * classes. |
| 159 | */ |
| 160 | AR_CLASS( .arm ) |
| 161 | start: |
| 162 | .type start,#function |
| 163 | /* |
| 164 | * These 7 nops along with the 1 nop immediately below for |
| 165 | * !THUMB2 form 8 nops that make the compressed kernel bootable |
| 166 | * on legacy ARM systems that were assuming the kernel in a.out |
| 167 | * binary format. The boot loaders on these systems would |
| 168 | * jump 32 bytes into the image to skip the a.out header. |
| 169 | * with these 8 nops filling exactly 32 bytes, things still |
| 170 | * work as expected on these legacy systems. Thumb2 mode keeps |
| 171 | * 7 of the nops as it turns out that some boot loaders |
| 172 | * were patching the initial instructions of the kernel, i.e |
| 173 | * had started to exploit this "patch area". |
| 174 | */ |
| 175 | __initial_nops |
| 176 | .rept 5 |
| 177 | __nop |
| 178 | .endr |
| 179 | #ifndef CONFIG_THUMB2_KERNEL |
| 180 | __nop |
| 181 | #else |
| 182 | AR_CLASS( sub pc, pc, #3 ) @ A/R: switch to Thumb2 mode |
| 183 | M_CLASS( nop.w ) @ M: already in Thumb2 mode |
| 184 | .thumb |
| 185 | #endif |
| 186 | W(b) 1f |
| 187 | |
| 188 | .word _magic_sig @ Magic numbers to help the loader |
| 189 | .word _magic_start @ absolute load/run zImage address |
| 190 | .word _magic_end @ zImage end address |
| 191 | .word 0x04030201 @ endianness flag |
| 192 | .word 0x45454545 @ another magic number to indicate |
| 193 | .word _magic_table @ additional data table |
| 194 | |
| 195 | __EFI_HEADER |
| 196 | 1: |
| 197 | ARM_BE8( setend be ) @ go BE8 if compiled for BE8 |
| 198 | AR_CLASS( mrs r9, cpsr ) |
| 199 | #ifdef CONFIG_ARM_VIRT_EXT |
| 200 | bl __hyp_stub_install @ get into SVC mode, reversibly |
| 201 | #endif |
| 202 | mov r7, r1 @ save architecture ID |
| 203 | mov r8, r2 @ save atags pointer |
| 204 | |
| 205 | #ifndef CONFIG_CPU_V7M |
| 206 | /* |
| 207 | * Booting from Angel - need to enter SVC mode and disable |
| 208 | * FIQs/IRQs (numeric definitions from angel arm.h source). |
| 209 | * We only do this if we were in user mode on entry. |
| 210 | */ |
| 211 | mrs r2, cpsr @ get current mode |
| 212 | tst r2, #3 @ not user? |
| 213 | bne not_angel |
| 214 | mov r0, #0x17 @ angel_SWIreason_EnterSVC |
| 215 | ARM( swi 0x123456 ) @ angel_SWI_ARM |
| 216 | THUMB( svc 0xab ) @ angel_SWI_THUMB |
| 217 | not_angel: |
| 218 | safe_svcmode_maskall r0 |
| 219 | msr spsr_cxsf, r9 @ Save the CPU boot mode in |
| 220 | @ SPSR |
| 221 | #endif |
| 222 | /* |
| 223 | * Note that some cache flushing and other stuff may |
| 224 | * be needed here - is there an Angel SWI call for this? |
| 225 | */ |
| 226 | |
| 227 | /* |
| 228 | * some architecture specific code can be inserted |
| 229 | * by the linker here, but it should preserve r7, r8, and r9. |
| 230 | */ |
| 231 | |
| 232 | .text |
| 233 | |
| 234 | #ifdef CONFIG_AUTO_ZRELADDR |
| 235 | /* |
| 236 | * Find the start of physical memory. As we are executing |
| 237 | * without the MMU on, we are in the physical address space. |
| 238 | * We just need to get rid of any offset by aligning the |
| 239 | * address. |
| 240 | * |
| 241 | * This alignment is a balance between the requirements of |
| 242 | * different platforms - we have chosen 128MB to allow |
| 243 | * platforms which align the start of their physical memory |
| 244 | * to 128MB to use this feature, while allowing the zImage |
| 245 | * to be placed within the first 128MB of memory on other |
| 246 | * platforms. Increasing the alignment means we place |
| 247 | * stricter alignment requirements on the start of physical |
| 248 | * memory, but relaxing it means that we break people who |
| 249 | * are already placing their zImage in (eg) the top 64MB |
| 250 | * of this range. |
| 251 | */ |
| 252 | mov r4, pc |
| 253 | and r4, r4, #0xf8000000 |
| 254 | /* Determine final kernel image address. */ |
| 255 | add r4, r4, #TEXT_OFFSET |
| 256 | #else |
| 257 | ldr r4, =zreladdr |
| 258 | #endif |
| 259 | |
| 260 | /* |
| 261 | * Set up a page table only if it won't overwrite ourself. |
| 262 | * That means r4 < pc || r4 - 16k page directory > &_end. |
| 263 | * Given that r4 > &_end is most unfrequent, we add a rough |
| 264 | * additional 1MB of room for a possible appended DTB. |
| 265 | */ |
| 266 | mov r0, pc |
| 267 | cmp r0, r4 |
| 268 | ldrcc r0, LC0+32 |
| 269 | addcc r0, r0, pc |
| 270 | cmpcc r4, r0 |
| 271 | orrcc r4, r4, #1 @ remember we skipped cache_on |
| 272 | blcs cache_on |
| 273 | |
| 274 | restart: adr r0, LC0 |
| 275 | ldmia r0, {r1, r2, r3, r6, r10, r11, r12} |
| 276 | ldr sp, [r0, #28] |
| 277 | |
| 278 | /* |
| 279 | * We might be running at a different address. We need |
| 280 | * to fix up various pointers. |
| 281 | */ |
| 282 | sub r0, r0, r1 @ calculate the delta offset |
| 283 | add r6, r6, r0 @ _edata |
| 284 | add r10, r10, r0 @ inflated kernel size location |
| 285 | |
| 286 | /* |
| 287 | * The kernel build system appends the size of the |
| 288 | * decompressed kernel at the end of the compressed data |
| 289 | * in little-endian form. |
| 290 | */ |
| 291 | ldrb r9, [r10, #0] |
| 292 | ldrb lr, [r10, #1] |
| 293 | orr r9, r9, lr, lsl #8 |
| 294 | ldrb lr, [r10, #2] |
| 295 | ldrb r10, [r10, #3] |
| 296 | orr r9, r9, lr, lsl #16 |
| 297 | orr r9, r9, r10, lsl #24 |
| 298 | |
| 299 | #ifndef CONFIG_ZBOOT_ROM |
| 300 | /* malloc space is above the relocated stack (64k max) */ |
| 301 | add sp, sp, r0 |
| 302 | add r10, sp, #0x10000 |
| 303 | #else |
| 304 | /* |
| 305 | * With ZBOOT_ROM the bss/stack is non relocatable, |
| 306 | * but someone could still run this code from RAM, |
| 307 | * in which case our reference is _edata. |
| 308 | */ |
| 309 | mov r10, r6 |
| 310 | #endif |
| 311 | |
| 312 | mov r5, #0 @ init dtb size to 0 |
| 313 | #ifdef CONFIG_ARM_APPENDED_DTB |
| 314 | /* |
| 315 | * r0 = delta |
| 316 | * r2 = BSS start |
| 317 | * r3 = BSS end |
| 318 | * r4 = final kernel address (possibly with LSB set) |
| 319 | * r5 = appended dtb size (still unknown) |
| 320 | * r6 = _edata |
| 321 | * r7 = architecture ID |
| 322 | * r8 = atags/device tree pointer |
| 323 | * r9 = size of decompressed image |
| 324 | * r10 = end of this image, including bss/stack/malloc space if non XIP |
| 325 | * r11 = GOT start |
| 326 | * r12 = GOT end |
| 327 | * sp = stack pointer |
| 328 | * |
| 329 | * if there are device trees (dtb) appended to zImage, advance r10 so that the |
| 330 | * dtb data will get relocated along with the kernel if necessary. |
| 331 | */ |
| 332 | |
| 333 | ldr lr, [r6, #0] |
| 334 | #ifndef __ARMEB__ |
| 335 | ldr r1, =0xedfe0dd0 @ sig is 0xd00dfeed big endian |
| 336 | #else |
| 337 | ldr r1, =0xd00dfeed |
| 338 | #endif |
| 339 | cmp lr, r1 |
| 340 | bne dtb_check_done @ not found |
| 341 | |
| 342 | #ifdef CONFIG_ARM_ATAG_DTB_COMPAT |
| 343 | /* |
| 344 | * OK... Let's do some funky business here. |
| 345 | * If we do have a DTB appended to zImage, and we do have |
| 346 | * an ATAG list around, we want the later to be translated |
| 347 | * and folded into the former here. No GOT fixup has occurred |
| 348 | * yet, but none of the code we're about to call uses any |
| 349 | * global variable. |
| 350 | */ |
| 351 | |
| 352 | /* Get the initial DTB size */ |
| 353 | ldr r5, [r6, #4] |
| 354 | be32tocpu r5, r1 |
| 355 | /* 50% DTB growth should be good enough */ |
| 356 | add r5, r5, r5, lsr #1 |
| 357 | /* preserve 64-bit alignment */ |
| 358 | add r5, r5, #7 |
| 359 | bic r5, r5, #7 |
| 360 | /* clamp to 32KB min and 1MB max */ |
| 361 | cmp r5, #(1 << 15) |
| 362 | movlo r5, #(1 << 15) |
| 363 | cmp r5, #(1 << 20) |
| 364 | movhi r5, #(1 << 20) |
| 365 | /* temporarily relocate the stack past the DTB work space */ |
| 366 | add sp, sp, r5 |
| 367 | |
| 368 | stmfd sp!, {r0-r3, ip, lr} |
| 369 | mov r0, r8 |
| 370 | mov r1, r6 |
| 371 | mov r2, r5 |
| 372 | bl atags_to_fdt |
| 373 | |
| 374 | /* |
| 375 | * If returned value is 1, there is no ATAG at the location |
| 376 | * pointed by r8. Try the typical 0x100 offset from start |
| 377 | * of RAM and hope for the best. |
| 378 | */ |
| 379 | cmp r0, #1 |
| 380 | sub r0, r4, #TEXT_OFFSET |
| 381 | bic r0, r0, #1 |
| 382 | add r0, r0, #0x100 |
| 383 | mov r1, r6 |
| 384 | mov r2, r5 |
| 385 | bleq atags_to_fdt |
| 386 | |
| 387 | ldmfd sp!, {r0-r3, ip, lr} |
| 388 | sub sp, sp, r5 |
| 389 | #endif |
| 390 | |
| 391 | mov r8, r6 @ use the appended device tree |
| 392 | |
| 393 | /* |
| 394 | * Make sure that the DTB doesn't end up in the final |
| 395 | * kernel's .bss area. To do so, we adjust the decompressed |
| 396 | * kernel size to compensate if that .bss size is larger |
| 397 | * than the relocated code. |
| 398 | */ |
| 399 | ldr r5, =_kernel_bss_size |
| 400 | adr r1, wont_overwrite |
| 401 | sub r1, r6, r1 |
| 402 | subs r1, r5, r1 |
| 403 | addhi r9, r9, r1 |
| 404 | |
| 405 | /* Get the current DTB size */ |
| 406 | ldr r5, [r6, #4] |
| 407 | be32tocpu r5, r1 |
| 408 | |
| 409 | /* preserve 64-bit alignment */ |
| 410 | add r5, r5, #7 |
| 411 | bic r5, r5, #7 |
| 412 | |
| 413 | /* relocate some pointers past the appended dtb */ |
| 414 | add r6, r6, r5 |
| 415 | add r10, r10, r5 |
| 416 | add sp, sp, r5 |
| 417 | dtb_check_done: |
| 418 | #endif |
| 419 | |
| 420 | /* |
| 421 | * Check to see if we will overwrite ourselves. |
| 422 | * r4 = final kernel address (possibly with LSB set) |
| 423 | * r9 = size of decompressed image |
| 424 | * r10 = end of this image, including bss/stack/malloc space if non XIP |
| 425 | * We basically want: |
| 426 | * r4 - 16k page directory >= r10 -> OK |
| 427 | * r4 + image length <= address of wont_overwrite -> OK |
| 428 | * Note: the possible LSB in r4 is harmless here. |
| 429 | */ |
| 430 | add r10, r10, #16384 |
| 431 | cmp r4, r10 |
| 432 | bhs wont_overwrite |
| 433 | add r10, r4, r9 |
| 434 | adr r9, wont_overwrite |
| 435 | cmp r10, r9 |
| 436 | bls wont_overwrite |
| 437 | |
| 438 | /* |
| 439 | * Relocate ourselves past the end of the decompressed kernel. |
| 440 | * r6 = _edata |
| 441 | * r10 = end of the decompressed kernel |
| 442 | * Because we always copy ahead, we need to do it from the end and go |
| 443 | * backward in case the source and destination overlap. |
| 444 | */ |
| 445 | /* |
| 446 | * Bump to the next 256-byte boundary with the size of |
| 447 | * the relocation code added. This avoids overwriting |
| 448 | * ourself when the offset is small. |
| 449 | */ |
| 450 | add r10, r10, #((reloc_code_end - restart + 256) & ~255) |
| 451 | bic r10, r10, #255 |
| 452 | |
| 453 | /* Get start of code we want to copy and align it down. */ |
| 454 | adr r5, restart |
| 455 | bic r5, r5, #31 |
| 456 | |
| 457 | /* Relocate the hyp vector base if necessary */ |
| 458 | #ifdef CONFIG_ARM_VIRT_EXT |
| 459 | mrs r0, spsr |
| 460 | and r0, r0, #MODE_MASK |
| 461 | cmp r0, #HYP_MODE |
| 462 | bne 1f |
| 463 | |
| 464 | /* |
| 465 | * Compute the address of the hyp vectors after relocation. |
| 466 | * This requires some arithmetic since we cannot directly |
| 467 | * reference __hyp_stub_vectors in a PC-relative way. |
| 468 | * Call __hyp_set_vectors with the new address so that we |
| 469 | * can HVC again after the copy. |
| 470 | */ |
| 471 | 0: adr r0, 0b |
| 472 | movw r1, #:lower16:__hyp_stub_vectors - 0b |
| 473 | movt r1, #:upper16:__hyp_stub_vectors - 0b |
| 474 | add r0, r0, r1 |
| 475 | sub r0, r0, r5 |
| 476 | add r0, r0, r10 |
| 477 | bl __hyp_set_vectors |
| 478 | 1: |
| 479 | #endif |
| 480 | |
| 481 | sub r9, r6, r5 @ size to copy |
| 482 | add r9, r9, #31 @ rounded up to a multiple |
| 483 | bic r9, r9, #31 @ ... of 32 bytes |
| 484 | add r6, r9, r5 |
| 485 | add r9, r9, r10 |
| 486 | |
| 487 | #ifdef DEBUG |
| 488 | sub r10, r6, r5 |
| 489 | sub r10, r9, r10 |
| 490 | /* |
| 491 | * We are about to copy the kernel to a new memory area. |
| 492 | * The boundaries of the new memory area can be found in |
| 493 | * r10 and r9, whilst r5 and r6 contain the boundaries |
| 494 | * of the memory we are going to copy. |
| 495 | * Calling dbgkc will help with the printing of this |
| 496 | * information. |
| 497 | */ |
| 498 | dbgkc r5, r6, r10, r9 |
| 499 | #endif |
| 500 | |
| 501 | 1: ldmdb r6!, {r0 - r3, r10 - r12, lr} |
| 502 | cmp r6, r5 |
| 503 | stmdb r9!, {r0 - r3, r10 - r12, lr} |
| 504 | bhi 1b |
| 505 | |
| 506 | /* Preserve offset to relocated code. */ |
| 507 | sub r6, r9, r6 |
| 508 | |
| 509 | #ifndef CONFIG_ZBOOT_ROM |
| 510 | /* cache_clean_flush may use the stack, so relocate it */ |
| 511 | add sp, sp, r6 |
| 512 | #endif |
| 513 | |
| 514 | bl cache_clean_flush |
| 515 | |
| 516 | badr r0, restart |
| 517 | add r0, r0, r6 |
| 518 | mov pc, r0 |
| 519 | |
| 520 | wont_overwrite: |
| 521 | /* |
| 522 | * If delta is zero, we are running at the address we were linked at. |
| 523 | * r0 = delta |
| 524 | * r2 = BSS start |
| 525 | * r3 = BSS end |
| 526 | * r4 = kernel execution address (possibly with LSB set) |
| 527 | * r5 = appended dtb size (0 if not present) |
| 528 | * r7 = architecture ID |
| 529 | * r8 = atags pointer |
| 530 | * r11 = GOT start |
| 531 | * r12 = GOT end |
| 532 | * sp = stack pointer |
| 533 | */ |
| 534 | orrs r1, r0, r5 |
| 535 | beq not_relocated |
| 536 | |
| 537 | add r11, r11, r0 |
| 538 | add r12, r12, r0 |
| 539 | |
| 540 | #ifndef CONFIG_ZBOOT_ROM |
| 541 | /* |
| 542 | * If we're running fully PIC === CONFIG_ZBOOT_ROM = n, |
| 543 | * we need to fix up pointers into the BSS region. |
| 544 | * Note that the stack pointer has already been fixed up. |
| 545 | */ |
| 546 | add r2, r2, r0 |
| 547 | add r3, r3, r0 |
| 548 | |
| 549 | /* |
| 550 | * Relocate all entries in the GOT table. |
| 551 | * Bump bss entries to _edata + dtb size |
| 552 | */ |
| 553 | 1: ldr r1, [r11, #0] @ relocate entries in the GOT |
| 554 | add r1, r1, r0 @ This fixes up C references |
| 555 | cmp r1, r2 @ if entry >= bss_start && |
| 556 | cmphs r3, r1 @ bss_end > entry |
| 557 | addhi r1, r1, r5 @ entry += dtb size |
| 558 | str r1, [r11], #4 @ next entry |
| 559 | cmp r11, r12 |
| 560 | blo 1b |
| 561 | |
| 562 | /* bump our bss pointers too */ |
| 563 | add r2, r2, r5 |
| 564 | add r3, r3, r5 |
| 565 | |
| 566 | #else |
| 567 | |
| 568 | /* |
| 569 | * Relocate entries in the GOT table. We only relocate |
| 570 | * the entries that are outside the (relocated) BSS region. |
| 571 | */ |
| 572 | 1: ldr r1, [r11, #0] @ relocate entries in the GOT |
| 573 | cmp r1, r2 @ entry < bss_start || |
| 574 | cmphs r3, r1 @ _end < entry |
| 575 | addlo r1, r1, r0 @ table. This fixes up the |
| 576 | str r1, [r11], #4 @ C references. |
| 577 | cmp r11, r12 |
| 578 | blo 1b |
| 579 | #endif |
| 580 | |
| 581 | not_relocated: mov r0, #0 |
| 582 | 1: str r0, [r2], #4 @ clear bss |
| 583 | str r0, [r2], #4 |
| 584 | str r0, [r2], #4 |
| 585 | str r0, [r2], #4 |
| 586 | cmp r2, r3 |
| 587 | blo 1b |
| 588 | |
| 589 | /* |
| 590 | * Did we skip the cache setup earlier? |
| 591 | * That is indicated by the LSB in r4. |
| 592 | * Do it now if so. |
| 593 | */ |
| 594 | tst r4, #1 |
| 595 | bic r4, r4, #1 |
| 596 | blne cache_on |
| 597 | |
| 598 | /* |
| 599 | * The C runtime environment should now be setup sufficiently. |
| 600 | * Set up some pointers, and start decompressing. |
| 601 | * r4 = kernel execution address |
| 602 | * r7 = architecture ID |
| 603 | * r8 = atags pointer |
| 604 | */ |
| 605 | mov r0, r4 |
| 606 | mov r1, sp @ malloc space above stack |
| 607 | add r2, sp, #0x10000 @ 64k max |
| 608 | mov r3, r7 |
| 609 | bl decompress_kernel |
| 610 | bl cache_clean_flush |
| 611 | bl cache_off |
| 612 | |
| 613 | #ifdef CONFIG_ARM_VIRT_EXT |
| 614 | mrs r0, spsr @ Get saved CPU boot mode |
| 615 | and r0, r0, #MODE_MASK |
| 616 | cmp r0, #HYP_MODE @ if not booted in HYP mode... |
| 617 | bne __enter_kernel @ boot kernel directly |
| 618 | |
| 619 | adr r12, .L__hyp_reentry_vectors_offset |
| 620 | ldr r0, [r12] |
| 621 | add r0, r0, r12 |
| 622 | |
| 623 | bl __hyp_set_vectors |
| 624 | __HVC(0) @ otherwise bounce to hyp mode |
| 625 | |
| 626 | b . @ should never be reached |
| 627 | |
| 628 | .align 2 |
| 629 | .L__hyp_reentry_vectors_offset: .long __hyp_reentry_vectors - . |
| 630 | #else |
| 631 | b __enter_kernel |
| 632 | #endif |
| 633 | |
| 634 | .align 2 |
| 635 | .type LC0, #object |
| 636 | LC0: .word LC0 @ r1 |
| 637 | .word __bss_start @ r2 |
| 638 | .word _end @ r3 |
| 639 | .word _edata @ r6 |
| 640 | .word input_data_end - 4 @ r10 (inflated size location) |
| 641 | .word _got_start @ r11 |
| 642 | .word _got_end @ ip |
| 643 | .word .L_user_stack_end @ sp |
| 644 | .word _end - restart + 16384 + 1024*1024 |
| 645 | .size LC0, . - LC0 |
| 646 | |
| 647 | #ifdef CONFIG_ARCH_RPC |
| 648 | .globl params |
| 649 | params: ldr r0, =0x10000100 @ params_phys for RPC |
| 650 | mov pc, lr |
| 651 | .ltorg |
| 652 | .align |
| 653 | #endif |
| 654 | |
| 655 | /* |
| 656 | * Turn on the cache. We need to setup some page tables so that we |
| 657 | * can have both the I and D caches on. |
| 658 | * |
| 659 | * We place the page tables 16k down from the kernel execution address, |
| 660 | * and we hope that nothing else is using it. If we're using it, we |
| 661 | * will go pop! |
| 662 | * |
| 663 | * On entry, |
| 664 | * r4 = kernel execution address |
| 665 | * r7 = architecture number |
| 666 | * r8 = atags pointer |
| 667 | * On exit, |
| 668 | * r0, r1, r2, r3, r9, r10, r12 corrupted |
| 669 | * This routine must preserve: |
| 670 | * r4, r7, r8 |
| 671 | */ |
| 672 | .align 5 |
| 673 | cache_on: mov r3, #8 @ cache_on function |
| 674 | b call_cache_fn |
| 675 | |
| 676 | /* |
| 677 | * Initialize the highest priority protection region, PR7 |
| 678 | * to cover all 32bit address and cacheable and bufferable. |
| 679 | */ |
| 680 | __armv4_mpu_cache_on: |
| 681 | mov r0, #0x3f @ 4G, the whole |
| 682 | mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting |
| 683 | mcr p15, 0, r0, c6, c7, 1 |
| 684 | |
| 685 | mov r0, #0x80 @ PR7 |
| 686 | mcr p15, 0, r0, c2, c0, 0 @ D-cache on |
| 687 | mcr p15, 0, r0, c2, c0, 1 @ I-cache on |
| 688 | mcr p15, 0, r0, c3, c0, 0 @ write-buffer on |
| 689 | |
| 690 | mov r0, #0xc000 |
| 691 | mcr p15, 0, r0, c5, c0, 1 @ I-access permission |
| 692 | mcr p15, 0, r0, c5, c0, 0 @ D-access permission |
| 693 | |
| 694 | mov r0, #0 |
| 695 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer |
| 696 | mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache |
| 697 | mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache |
| 698 | mrc p15, 0, r0, c1, c0, 0 @ read control reg |
| 699 | @ ...I .... ..D. WC.M |
| 700 | orr r0, r0, #0x002d @ .... .... ..1. 11.1 |
| 701 | orr r0, r0, #0x1000 @ ...1 .... .... .... |
| 702 | |
| 703 | mcr p15, 0, r0, c1, c0, 0 @ write control reg |
| 704 | |
| 705 | mov r0, #0 |
| 706 | mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache |
| 707 | mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache |
| 708 | mov pc, lr |
| 709 | |
| 710 | __armv3_mpu_cache_on: |
| 711 | mov r0, #0x3f @ 4G, the whole |
| 712 | mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting |
| 713 | |
| 714 | mov r0, #0x80 @ PR7 |
| 715 | mcr p15, 0, r0, c2, c0, 0 @ cache on |
| 716 | mcr p15, 0, r0, c3, c0, 0 @ write-buffer on |
| 717 | |
| 718 | mov r0, #0xc000 |
| 719 | mcr p15, 0, r0, c5, c0, 0 @ access permission |
| 720 | |
| 721 | mov r0, #0 |
| 722 | mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 |
| 723 | /* |
| 724 | * ?? ARMv3 MMU does not allow reading the control register, |
| 725 | * does this really work on ARMv3 MPU? |
| 726 | */ |
| 727 | mrc p15, 0, r0, c1, c0, 0 @ read control reg |
| 728 | @ .... .... .... WC.M |
| 729 | orr r0, r0, #0x000d @ .... .... .... 11.1 |
| 730 | /* ?? this overwrites the value constructed above? */ |
| 731 | mov r0, #0 |
| 732 | mcr p15, 0, r0, c1, c0, 0 @ write control reg |
| 733 | |
| 734 | /* ?? invalidate for the second time? */ |
| 735 | mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 |
| 736 | mov pc, lr |
| 737 | |
| 738 | #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH |
| 739 | #define CB_BITS 0x08 |
| 740 | #else |
| 741 | #define CB_BITS 0x0c |
| 742 | #endif |
| 743 | |
| 744 | __setup_mmu: sub r3, r4, #16384 @ Page directory size |
| 745 | bic r3, r3, #0xff @ Align the pointer |
| 746 | bic r3, r3, #0x3f00 |
| 747 | /* |
| 748 | * Initialise the page tables, turning on the cacheable and bufferable |
| 749 | * bits for the RAM area only. |
| 750 | */ |
| 751 | mov r0, r3 |
| 752 | mov r9, r0, lsr #18 |
| 753 | mov r9, r9, lsl #18 @ start of RAM |
| 754 | add r10, r9, #0x10000000 @ a reasonable RAM size |
| 755 | mov r1, #0x12 @ XN|U + section mapping |
| 756 | orr r1, r1, #3 << 10 @ AP=11 |
| 757 | add r2, r3, #16384 |
| 758 | 1: cmp r1, r9 @ if virt > start of RAM |
| 759 | cmphs r10, r1 @ && end of RAM > virt |
| 760 | bic r1, r1, #0x1c @ clear XN|U + C + B |
| 761 | orrlo r1, r1, #0x10 @ Set XN|U for non-RAM |
| 762 | orrhs r1, r1, r6 @ set RAM section settings |
| 763 | str r1, [r0], #4 @ 1:1 mapping |
| 764 | add r1, r1, #1048576 |
| 765 | teq r0, r2 |
| 766 | bne 1b |
| 767 | /* |
| 768 | * If ever we are running from Flash, then we surely want the cache |
| 769 | * to be enabled also for our execution instance... We map 2MB of it |
| 770 | * so there is no map overlap problem for up to 1 MB compressed kernel. |
| 771 | * If the execution is in RAM then we would only be duplicating the above. |
| 772 | */ |
| 773 | orr r1, r6, #0x04 @ ensure B is set for this |
| 774 | orr r1, r1, #3 << 10 |
| 775 | mov r2, pc |
| 776 | mov r2, r2, lsr #20 |
| 777 | orr r1, r1, r2, lsl #20 |
| 778 | add r0, r3, r2, lsl #2 |
| 779 | str r1, [r0], #4 |
| 780 | add r1, r1, #1048576 |
| 781 | str r1, [r0] |
| 782 | mov pc, lr |
| 783 | ENDPROC(__setup_mmu) |
| 784 | |
| 785 | @ Enable unaligned access on v6, to allow better code generation |
| 786 | @ for the decompressor C code: |
| 787 | __armv6_mmu_cache_on: |
| 788 | mrc p15, 0, r0, c1, c0, 0 @ read SCTLR |
| 789 | bic r0, r0, #2 @ A (no unaligned access fault) |
| 790 | orr r0, r0, #1 << 22 @ U (v6 unaligned access model) |
| 791 | mcr p15, 0, r0, c1, c0, 0 @ write SCTLR |
| 792 | b __armv4_mmu_cache_on |
| 793 | |
| 794 | __arm926ejs_mmu_cache_on: |
| 795 | #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH |
| 796 | mov r0, #4 @ put dcache in WT mode |
| 797 | mcr p15, 7, r0, c15, c0, 0 |
| 798 | #endif |
| 799 | |
| 800 | __armv4_mmu_cache_on: |
| 801 | mov r12, lr |
| 802 | #ifdef CONFIG_MMU |
| 803 | mov r6, #CB_BITS | 0x12 @ U |
| 804 | bl __setup_mmu |
| 805 | mov r0, #0 |
| 806 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer |
| 807 | mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs |
| 808 | mrc p15, 0, r0, c1, c0, 0 @ read control reg |
| 809 | orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement |
| 810 | orr r0, r0, #0x0030 |
| 811 | ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables |
| 812 | bl __common_mmu_cache_on |
| 813 | mov r0, #0 |
| 814 | mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs |
| 815 | #endif |
| 816 | mov pc, r12 |
| 817 | |
| 818 | __armv7_mmu_cache_on: |
| 819 | mov r12, lr |
| 820 | #ifdef CONFIG_MMU |
| 821 | mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0 |
| 822 | tst r11, #0xf @ VMSA |
| 823 | movne r6, #CB_BITS | 0x02 @ !XN |
| 824 | blne __setup_mmu |
| 825 | mov r0, #0 |
| 826 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer |
| 827 | tst r11, #0xf @ VMSA |
| 828 | mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs |
| 829 | #endif |
| 830 | mrc p15, 0, r0, c1, c0, 0 @ read control reg |
| 831 | bic r0, r0, #1 << 28 @ clear SCTLR.TRE |
| 832 | orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement |
| 833 | orr r0, r0, #0x003c @ write buffer |
| 834 | bic r0, r0, #2 @ A (no unaligned access fault) |
| 835 | orr r0, r0, #1 << 22 @ U (v6 unaligned access model) |
| 836 | @ (needed for ARM1176) |
| 837 | #ifdef CONFIG_MMU |
| 838 | ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables |
| 839 | mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg |
| 840 | orrne r0, r0, #1 @ MMU enabled |
| 841 | movne r1, #0xfffffffd @ domain 0 = client |
| 842 | bic r6, r6, #1 << 31 @ 32-bit translation system |
| 843 | bic r6, r6, #(7 << 0) | (1 << 4) @ use only ttbr0 |
| 844 | mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer |
| 845 | mcrne p15, 0, r1, c3, c0, 0 @ load domain access control |
| 846 | mcrne p15, 0, r6, c2, c0, 2 @ load ttb control |
| 847 | #endif |
| 848 | mcr p15, 0, r0, c7, c5, 4 @ ISB |
| 849 | mcr p15, 0, r0, c1, c0, 0 @ load control register |
| 850 | mrc p15, 0, r0, c1, c0, 0 @ and read it back |
| 851 | mov r0, #0 |
| 852 | mcr p15, 0, r0, c7, c5, 4 @ ISB |
| 853 | mov pc, r12 |
| 854 | |
| 855 | __fa526_cache_on: |
| 856 | mov r12, lr |
| 857 | mov r6, #CB_BITS | 0x12 @ U |
| 858 | bl __setup_mmu |
| 859 | mov r0, #0 |
| 860 | mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache |
| 861 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer |
| 862 | mcr p15, 0, r0, c8, c7, 0 @ flush UTLB |
| 863 | mrc p15, 0, r0, c1, c0, 0 @ read control reg |
| 864 | orr r0, r0, #0x1000 @ I-cache enable |
| 865 | bl __common_mmu_cache_on |
| 866 | mov r0, #0 |
| 867 | mcr p15, 0, r0, c8, c7, 0 @ flush UTLB |
| 868 | mov pc, r12 |
| 869 | |
| 870 | __common_mmu_cache_on: |
| 871 | #ifndef CONFIG_THUMB2_KERNEL |
| 872 | #ifndef DEBUG |
| 873 | orr r0, r0, #0x000d @ Write buffer, mmu |
| 874 | #endif |
| 875 | mov r1, #-1 |
| 876 | mcr p15, 0, r3, c2, c0, 0 @ load page table pointer |
| 877 | mcr p15, 0, r1, c3, c0, 0 @ load domain access control |
| 878 | b 1f |
| 879 | .align 5 @ cache line aligned |
| 880 | 1: mcr p15, 0, r0, c1, c0, 0 @ load control register |
| 881 | mrc p15, 0, r0, c1, c0, 0 @ and read it back to |
| 882 | sub pc, lr, r0, lsr #32 @ properly flush pipeline |
| 883 | #endif |
| 884 | |
| 885 | #define PROC_ENTRY_SIZE (4*5) |
| 886 | |
| 887 | /* |
| 888 | * Here follow the relocatable cache support functions for the |
| 889 | * various processors. This is a generic hook for locating an |
| 890 | * entry and jumping to an instruction at the specified offset |
| 891 | * from the start of the block. Please note this is all position |
| 892 | * independent code. |
| 893 | * |
| 894 | * r1 = corrupted |
| 895 | * r2 = corrupted |
| 896 | * r3 = block offset |
| 897 | * r9 = corrupted |
| 898 | * r12 = corrupted |
| 899 | */ |
| 900 | |
| 901 | call_cache_fn: adr r12, proc_types |
| 902 | #ifdef CONFIG_CPU_CP15 |
| 903 | mrc p15, 0, r9, c0, c0 @ get processor ID |
| 904 | #elif defined(CONFIG_CPU_V7M) |
| 905 | /* |
| 906 | * On v7-M the processor id is located in the V7M_SCB_CPUID |
| 907 | * register, but as cache handling is IMPLEMENTATION DEFINED on |
| 908 | * v7-M (if existant at all) we just return early here. |
| 909 | * If V7M_SCB_CPUID were used the cpu ID functions (i.e. |
| 910 | * __armv7_mmu_cache_{on,off,flush}) would be selected which |
| 911 | * use cp15 registers that are not implemented on v7-M. |
| 912 | */ |
| 913 | bx lr |
| 914 | #else |
| 915 | ldr r9, =CONFIG_PROCESSOR_ID |
| 916 | #endif |
| 917 | 1: ldr r1, [r12, #0] @ get value |
| 918 | ldr r2, [r12, #4] @ get mask |
| 919 | eor r1, r1, r9 @ (real ^ match) |
| 920 | tst r1, r2 @ & mask |
| 921 | ARM( addeq pc, r12, r3 ) @ call cache function |
| 922 | THUMB( addeq r12, r3 ) |
| 923 | THUMB( moveq pc, r12 ) @ call cache function |
| 924 | add r12, r12, #PROC_ENTRY_SIZE |
| 925 | b 1b |
| 926 | |
| 927 | /* |
| 928 | * Table for cache operations. This is basically: |
| 929 | * - CPU ID match |
| 930 | * - CPU ID mask |
| 931 | * - 'cache on' method instruction |
| 932 | * - 'cache off' method instruction |
| 933 | * - 'cache flush' method instruction |
| 934 | * |
| 935 | * We match an entry using: ((real_id ^ match) & mask) == 0 |
| 936 | * |
| 937 | * Writethrough caches generally only need 'on' and 'off' |
| 938 | * methods. Writeback caches _must_ have the flush method |
| 939 | * defined. |
| 940 | */ |
| 941 | .align 2 |
| 942 | .type proc_types,#object |
| 943 | proc_types: |
| 944 | .word 0x41000000 @ old ARM ID |
| 945 | .word 0xff00f000 |
| 946 | mov pc, lr |
| 947 | THUMB( nop ) |
| 948 | mov pc, lr |
| 949 | THUMB( nop ) |
| 950 | mov pc, lr |
| 951 | THUMB( nop ) |
| 952 | |
| 953 | .word 0x41007000 @ ARM7/710 |
| 954 | .word 0xfff8fe00 |
| 955 | mov pc, lr |
| 956 | THUMB( nop ) |
| 957 | mov pc, lr |
| 958 | THUMB( nop ) |
| 959 | mov pc, lr |
| 960 | THUMB( nop ) |
| 961 | |
| 962 | .word 0x41807200 @ ARM720T (writethrough) |
| 963 | .word 0xffffff00 |
| 964 | W(b) __armv4_mmu_cache_on |
| 965 | W(b) __armv4_mmu_cache_off |
| 966 | mov pc, lr |
| 967 | THUMB( nop ) |
| 968 | |
| 969 | .word 0x41007400 @ ARM74x |
| 970 | .word 0xff00ff00 |
| 971 | W(b) __armv3_mpu_cache_on |
| 972 | W(b) __armv3_mpu_cache_off |
| 973 | W(b) __armv3_mpu_cache_flush |
| 974 | |
| 975 | .word 0x41009400 @ ARM94x |
| 976 | .word 0xff00ff00 |
| 977 | W(b) __armv4_mpu_cache_on |
| 978 | W(b) __armv4_mpu_cache_off |
| 979 | W(b) __armv4_mpu_cache_flush |
| 980 | |
| 981 | .word 0x41069260 @ ARM926EJ-S (v5TEJ) |
| 982 | .word 0xff0ffff0 |
| 983 | W(b) __arm926ejs_mmu_cache_on |
| 984 | W(b) __armv4_mmu_cache_off |
| 985 | W(b) __armv5tej_mmu_cache_flush |
| 986 | |
| 987 | .word 0x00007000 @ ARM7 IDs |
| 988 | .word 0x0000f000 |
| 989 | mov pc, lr |
| 990 | THUMB( nop ) |
| 991 | mov pc, lr |
| 992 | THUMB( nop ) |
| 993 | mov pc, lr |
| 994 | THUMB( nop ) |
| 995 | |
| 996 | @ Everything from here on will be the new ID system. |
| 997 | |
| 998 | .word 0x4401a100 @ sa110 / sa1100 |
| 999 | .word 0xffffffe0 |
| 1000 | W(b) __armv4_mmu_cache_on |
| 1001 | W(b) __armv4_mmu_cache_off |
| 1002 | W(b) __armv4_mmu_cache_flush |
| 1003 | |
| 1004 | .word 0x6901b110 @ sa1110 |
| 1005 | .word 0xfffffff0 |
| 1006 | W(b) __armv4_mmu_cache_on |
| 1007 | W(b) __armv4_mmu_cache_off |
| 1008 | W(b) __armv4_mmu_cache_flush |
| 1009 | |
| 1010 | .word 0x56056900 |
| 1011 | .word 0xffffff00 @ PXA9xx |
| 1012 | W(b) __armv4_mmu_cache_on |
| 1013 | W(b) __armv4_mmu_cache_off |
| 1014 | W(b) __armv4_mmu_cache_flush |
| 1015 | |
| 1016 | .word 0x56158000 @ PXA168 |
| 1017 | .word 0xfffff000 |
| 1018 | W(b) __armv4_mmu_cache_on |
| 1019 | W(b) __armv4_mmu_cache_off |
| 1020 | W(b) __armv5tej_mmu_cache_flush |
| 1021 | |
| 1022 | .word 0x56050000 @ Feroceon |
| 1023 | .word 0xff0f0000 |
| 1024 | W(b) __armv4_mmu_cache_on |
| 1025 | W(b) __armv4_mmu_cache_off |
| 1026 | W(b) __armv5tej_mmu_cache_flush |
| 1027 | |
| 1028 | #ifdef CONFIG_CPU_FEROCEON_OLD_ID |
| 1029 | /* this conflicts with the standard ARMv5TE entry */ |
| 1030 | .long 0x41009260 @ Old Feroceon |
| 1031 | .long 0xff00fff0 |
| 1032 | b __armv4_mmu_cache_on |
| 1033 | b __armv4_mmu_cache_off |
| 1034 | b __armv5tej_mmu_cache_flush |
| 1035 | #endif |
| 1036 | |
| 1037 | .word 0x66015261 @ FA526 |
| 1038 | .word 0xff01fff1 |
| 1039 | W(b) __fa526_cache_on |
| 1040 | W(b) __armv4_mmu_cache_off |
| 1041 | W(b) __fa526_cache_flush |
| 1042 | |
| 1043 | @ These match on the architecture ID |
| 1044 | |
| 1045 | .word 0x00020000 @ ARMv4T |
| 1046 | .word 0x000f0000 |
| 1047 | W(b) __armv4_mmu_cache_on |
| 1048 | W(b) __armv4_mmu_cache_off |
| 1049 | W(b) __armv4_mmu_cache_flush |
| 1050 | |
| 1051 | .word 0x00050000 @ ARMv5TE |
| 1052 | .word 0x000f0000 |
| 1053 | W(b) __armv4_mmu_cache_on |
| 1054 | W(b) __armv4_mmu_cache_off |
| 1055 | W(b) __armv4_mmu_cache_flush |
| 1056 | |
| 1057 | .word 0x00060000 @ ARMv5TEJ |
| 1058 | .word 0x000f0000 |
| 1059 | W(b) __armv4_mmu_cache_on |
| 1060 | W(b) __armv4_mmu_cache_off |
| 1061 | W(b) __armv5tej_mmu_cache_flush |
| 1062 | |
| 1063 | .word 0x0007b000 @ ARMv6 |
| 1064 | .word 0x000ff000 |
| 1065 | W(b) __armv6_mmu_cache_on |
| 1066 | W(b) __armv4_mmu_cache_off |
| 1067 | W(b) __armv6_mmu_cache_flush |
| 1068 | |
| 1069 | .word 0x000f0000 @ new CPU Id |
| 1070 | .word 0x000f0000 |
| 1071 | W(b) __armv7_mmu_cache_on |
| 1072 | W(b) __armv7_mmu_cache_off |
| 1073 | W(b) __armv7_mmu_cache_flush |
| 1074 | |
| 1075 | .word 0 @ unrecognised type |
| 1076 | .word 0 |
| 1077 | mov pc, lr |
| 1078 | THUMB( nop ) |
| 1079 | mov pc, lr |
| 1080 | THUMB( nop ) |
| 1081 | mov pc, lr |
| 1082 | THUMB( nop ) |
| 1083 | |
| 1084 | .size proc_types, . - proc_types |
| 1085 | |
| 1086 | /* |
| 1087 | * If you get a "non-constant expression in ".if" statement" |
| 1088 | * error from the assembler on this line, check that you have |
| 1089 | * not accidentally written a "b" instruction where you should |
| 1090 | * have written W(b). |
| 1091 | */ |
| 1092 | .if (. - proc_types) % PROC_ENTRY_SIZE != 0 |
| 1093 | .error "The size of one or more proc_types entries is wrong." |
| 1094 | .endif |
| 1095 | |
| 1096 | /* |
| 1097 | * Turn off the Cache and MMU. ARMv3 does not support |
| 1098 | * reading the control register, but ARMv4 does. |
| 1099 | * |
| 1100 | * On exit, |
| 1101 | * r0, r1, r2, r3, r9, r12 corrupted |
| 1102 | * This routine must preserve: |
| 1103 | * r4, r7, r8 |
| 1104 | */ |
| 1105 | .align 5 |
| 1106 | cache_off: mov r3, #12 @ cache_off function |
| 1107 | b call_cache_fn |
| 1108 | |
| 1109 | __armv4_mpu_cache_off: |
| 1110 | mrc p15, 0, r0, c1, c0 |
| 1111 | bic r0, r0, #0x000d |
| 1112 | mcr p15, 0, r0, c1, c0 @ turn MPU and cache off |
| 1113 | mov r0, #0 |
| 1114 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer |
| 1115 | mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache |
| 1116 | mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache |
| 1117 | mov pc, lr |
| 1118 | |
| 1119 | __armv3_mpu_cache_off: |
| 1120 | mrc p15, 0, r0, c1, c0 |
| 1121 | bic r0, r0, #0x000d |
| 1122 | mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off |
| 1123 | mov r0, #0 |
| 1124 | mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 |
| 1125 | mov pc, lr |
| 1126 | |
| 1127 | __armv4_mmu_cache_off: |
| 1128 | #ifdef CONFIG_MMU |
| 1129 | mrc p15, 0, r0, c1, c0 |
| 1130 | bic r0, r0, #0x000d |
| 1131 | mcr p15, 0, r0, c1, c0 @ turn MMU and cache off |
| 1132 | mov r0, #0 |
| 1133 | mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4 |
| 1134 | mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4 |
| 1135 | #endif |
| 1136 | mov pc, lr |
| 1137 | |
| 1138 | __armv7_mmu_cache_off: |
| 1139 | mrc p15, 0, r0, c1, c0 |
| 1140 | #ifdef CONFIG_MMU |
| 1141 | bic r0, r0, #0x0005 |
| 1142 | #else |
| 1143 | bic r0, r0, #0x0004 |
| 1144 | #endif |
| 1145 | mcr p15, 0, r0, c1, c0 @ turn MMU and cache off |
| 1146 | mov r12, lr |
| 1147 | bl __armv7_mmu_cache_flush |
| 1148 | mov r0, #0 |
| 1149 | #ifdef CONFIG_MMU |
| 1150 | mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB |
| 1151 | #endif |
| 1152 | mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC |
| 1153 | mcr p15, 0, r0, c7, c10, 4 @ DSB |
| 1154 | mcr p15, 0, r0, c7, c5, 4 @ ISB |
| 1155 | mov pc, r12 |
| 1156 | |
| 1157 | /* |
| 1158 | * Clean and flush the cache to maintain consistency. |
| 1159 | * |
| 1160 | * On exit, |
| 1161 | * r1, r2, r3, r9, r10, r11, r12 corrupted |
| 1162 | * This routine must preserve: |
| 1163 | * r4, r6, r7, r8 |
| 1164 | */ |
| 1165 | .align 5 |
| 1166 | cache_clean_flush: |
| 1167 | mov r3, #16 |
| 1168 | b call_cache_fn |
| 1169 | |
| 1170 | __armv4_mpu_cache_flush: |
| 1171 | tst r4, #1 |
| 1172 | movne pc, lr |
| 1173 | mov r2, #1 |
| 1174 | mov r3, #0 |
| 1175 | mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache |
| 1176 | mov r1, #7 << 5 @ 8 segments |
| 1177 | 1: orr r3, r1, #63 << 26 @ 64 entries |
| 1178 | 2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index |
| 1179 | subs r3, r3, #1 << 26 |
| 1180 | bcs 2b @ entries 63 to 0 |
| 1181 | subs r1, r1, #1 << 5 |
| 1182 | bcs 1b @ segments 7 to 0 |
| 1183 | |
| 1184 | teq r2, #0 |
| 1185 | mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache |
| 1186 | mcr p15, 0, ip, c7, c10, 4 @ drain WB |
| 1187 | mov pc, lr |
| 1188 | |
| 1189 | __fa526_cache_flush: |
| 1190 | tst r4, #1 |
| 1191 | movne pc, lr |
| 1192 | mov r1, #0 |
| 1193 | mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache |
| 1194 | mcr p15, 0, r1, c7, c5, 0 @ flush I cache |
| 1195 | mcr p15, 0, r1, c7, c10, 4 @ drain WB |
| 1196 | mov pc, lr |
| 1197 | |
| 1198 | __armv6_mmu_cache_flush: |
| 1199 | mov r1, #0 |
| 1200 | tst r4, #1 |
| 1201 | mcreq p15, 0, r1, c7, c14, 0 @ clean+invalidate D |
| 1202 | mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB |
| 1203 | mcreq p15, 0, r1, c7, c15, 0 @ clean+invalidate unified |
| 1204 | mcr p15, 0, r1, c7, c10, 4 @ drain WB |
| 1205 | mov pc, lr |
| 1206 | |
| 1207 | __armv7_mmu_cache_flush: |
| 1208 | tst r4, #1 |
| 1209 | bne iflush |
| 1210 | mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1 |
| 1211 | tst r10, #0xf << 16 @ hierarchical cache (ARMv7) |
| 1212 | mov r10, #0 |
| 1213 | beq hierarchical |
| 1214 | mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D |
| 1215 | b iflush |
| 1216 | hierarchical: |
| 1217 | mcr p15, 0, r10, c7, c10, 5 @ DMB |
| 1218 | stmfd sp!, {r0-r7, r9-r11} |
| 1219 | mrc p15, 1, r0, c0, c0, 1 @ read clidr |
| 1220 | ands r3, r0, #0x7000000 @ extract loc from clidr |
| 1221 | mov r3, r3, lsr #23 @ left align loc bit field |
| 1222 | beq finished @ if loc is 0, then no need to clean |
| 1223 | mov r10, #0 @ start clean at cache level 0 |
| 1224 | loop1: |
| 1225 | add r2, r10, r10, lsr #1 @ work out 3x current cache level |
| 1226 | mov r1, r0, lsr r2 @ extract cache type bits from clidr |
| 1227 | and r1, r1, #7 @ mask of the bits for current cache only |
| 1228 | cmp r1, #2 @ see what cache we have at this level |
| 1229 | blt skip @ skip if no cache, or just i-cache |
| 1230 | mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr |
| 1231 | mcr p15, 0, r10, c7, c5, 4 @ isb to sych the new cssr&csidr |
| 1232 | mrc p15, 1, r1, c0, c0, 0 @ read the new csidr |
| 1233 | and r2, r1, #7 @ extract the length of the cache lines |
| 1234 | add r2, r2, #4 @ add 4 (line length offset) |
| 1235 | ldr r4, =0x3ff |
| 1236 | ands r4, r4, r1, lsr #3 @ find maximum number on the way size |
| 1237 | clz r5, r4 @ find bit position of way size increment |
| 1238 | ldr r7, =0x7fff |
| 1239 | ands r7, r7, r1, lsr #13 @ extract max number of the index size |
| 1240 | loop2: |
| 1241 | mov r9, r4 @ create working copy of max way size |
| 1242 | loop3: |
| 1243 | ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11 |
| 1244 | ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11 |
| 1245 | THUMB( lsl r6, r9, r5 ) |
| 1246 | THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11 |
| 1247 | THUMB( lsl r6, r7, r2 ) |
| 1248 | THUMB( orr r11, r11, r6 ) @ factor index number into r11 |
| 1249 | mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way |
| 1250 | subs r9, r9, #1 @ decrement the way |
| 1251 | bge loop3 |
| 1252 | subs r7, r7, #1 @ decrement the index |
| 1253 | bge loop2 |
| 1254 | skip: |
| 1255 | add r10, r10, #2 @ increment cache number |
| 1256 | cmp r3, r10 |
| 1257 | bgt loop1 |
| 1258 | finished: |
| 1259 | ldmfd sp!, {r0-r7, r9-r11} |
| 1260 | mov r10, #0 @ switch back to cache level 0 |
| 1261 | mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr |
| 1262 | iflush: |
| 1263 | mcr p15, 0, r10, c7, c10, 4 @ DSB |
| 1264 | mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB |
| 1265 | mcr p15, 0, r10, c7, c10, 4 @ DSB |
| 1266 | mcr p15, 0, r10, c7, c5, 4 @ ISB |
| 1267 | mov pc, lr |
| 1268 | |
| 1269 | __armv5tej_mmu_cache_flush: |
| 1270 | tst r4, #1 |
| 1271 | movne pc, lr |
| 1272 | 1: mrc p15, 0, APSR_nzcv, c7, c14, 3 @ test,clean,invalidate D cache |
| 1273 | bne 1b |
| 1274 | mcr p15, 0, r0, c7, c5, 0 @ flush I cache |
| 1275 | mcr p15, 0, r0, c7, c10, 4 @ drain WB |
| 1276 | mov pc, lr |
| 1277 | |
| 1278 | __armv4_mmu_cache_flush: |
| 1279 | tst r4, #1 |
| 1280 | movne pc, lr |
| 1281 | mov r2, #64*1024 @ default: 32K dcache size (*2) |
| 1282 | mov r11, #32 @ default: 32 byte line size |
| 1283 | mrc p15, 0, r3, c0, c0, 1 @ read cache type |
| 1284 | teq r3, r9 @ cache ID register present? |
| 1285 | beq no_cache_id |
| 1286 | mov r1, r3, lsr #18 |
| 1287 | and r1, r1, #7 |
| 1288 | mov r2, #1024 |
| 1289 | mov r2, r2, lsl r1 @ base dcache size *2 |
| 1290 | tst r3, #1 << 14 @ test M bit |
| 1291 | addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1 |
| 1292 | mov r3, r3, lsr #12 |
| 1293 | and r3, r3, #3 |
| 1294 | mov r11, #8 |
| 1295 | mov r11, r11, lsl r3 @ cache line size in bytes |
| 1296 | no_cache_id: |
| 1297 | mov r1, pc |
| 1298 | bic r1, r1, #63 @ align to longest cache line |
| 1299 | add r2, r1, r2 |
| 1300 | 1: |
| 1301 | ARM( ldr r3, [r1], r11 ) @ s/w flush D cache |
| 1302 | THUMB( ldr r3, [r1] ) @ s/w flush D cache |
| 1303 | THUMB( add r1, r1, r11 ) |
| 1304 | teq r1, r2 |
| 1305 | bne 1b |
| 1306 | |
| 1307 | mcr p15, 0, r1, c7, c5, 0 @ flush I cache |
| 1308 | mcr p15, 0, r1, c7, c6, 0 @ flush D cache |
| 1309 | mcr p15, 0, r1, c7, c10, 4 @ drain WB |
| 1310 | mov pc, lr |
| 1311 | |
| 1312 | __armv3_mmu_cache_flush: |
| 1313 | __armv3_mpu_cache_flush: |
| 1314 | tst r4, #1 |
| 1315 | movne pc, lr |
| 1316 | mov r1, #0 |
| 1317 | mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3 |
| 1318 | mov pc, lr |
| 1319 | |
| 1320 | /* |
| 1321 | * Various debugging routines for printing hex characters and |
| 1322 | * memory, which again must be relocatable. |
| 1323 | */ |
| 1324 | #ifdef DEBUG |
| 1325 | .align 2 |
| 1326 | .type phexbuf,#object |
| 1327 | phexbuf: .space 12 |
| 1328 | .size phexbuf, . - phexbuf |
| 1329 | |
| 1330 | @ phex corrupts {r0, r1, r2, r3} |
| 1331 | phex: adr r3, phexbuf |
| 1332 | mov r2, #0 |
| 1333 | strb r2, [r3, r1] |
| 1334 | 1: subs r1, r1, #1 |
| 1335 | movmi r0, r3 |
| 1336 | bmi puts |
| 1337 | and r2, r0, #15 |
| 1338 | mov r0, r0, lsr #4 |
| 1339 | cmp r2, #10 |
| 1340 | addge r2, r2, #7 |
| 1341 | add r2, r2, #'0' |
| 1342 | strb r2, [r3, r1] |
| 1343 | b 1b |
| 1344 | |
| 1345 | @ puts corrupts {r0, r1, r2, r3} |
| 1346 | puts: loadsp r3, r2, r1 |
| 1347 | 1: ldrb r2, [r0], #1 |
| 1348 | teq r2, #0 |
| 1349 | moveq pc, lr |
| 1350 | 2: writeb r2, r3 |
| 1351 | mov r1, #0x00020000 |
| 1352 | 3: subs r1, r1, #1 |
| 1353 | bne 3b |
| 1354 | teq r2, #'\n' |
| 1355 | moveq r2, #'\r' |
| 1356 | beq 2b |
| 1357 | teq r0, #0 |
| 1358 | bne 1b |
| 1359 | mov pc, lr |
| 1360 | @ putc corrupts {r0, r1, r2, r3} |
| 1361 | putc: |
| 1362 | mov r2, r0 |
| 1363 | loadsp r3, r1, r0 |
| 1364 | mov r0, #0 |
| 1365 | b 2b |
| 1366 | |
| 1367 | @ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr} |
| 1368 | memdump: mov r12, r0 |
| 1369 | mov r10, lr |
| 1370 | mov r11, #0 |
| 1371 | 2: mov r0, r11, lsl #2 |
| 1372 | add r0, r0, r12 |
| 1373 | mov r1, #8 |
| 1374 | bl phex |
| 1375 | mov r0, #':' |
| 1376 | bl putc |
| 1377 | 1: mov r0, #' ' |
| 1378 | bl putc |
| 1379 | ldr r0, [r12, r11, lsl #2] |
| 1380 | mov r1, #8 |
| 1381 | bl phex |
| 1382 | and r0, r11, #7 |
| 1383 | teq r0, #3 |
| 1384 | moveq r0, #' ' |
| 1385 | bleq putc |
| 1386 | and r0, r11, #7 |
| 1387 | add r11, r11, #1 |
| 1388 | teq r0, #7 |
| 1389 | bne 1b |
| 1390 | mov r0, #'\n' |
| 1391 | bl putc |
| 1392 | cmp r11, #64 |
| 1393 | blt 2b |
| 1394 | mov pc, r10 |
| 1395 | #endif |
| 1396 | |
| 1397 | .ltorg |
| 1398 | |
| 1399 | #ifdef CONFIG_ARM_VIRT_EXT |
| 1400 | .align 5 |
| 1401 | __hyp_reentry_vectors: |
| 1402 | W(b) . @ reset |
| 1403 | W(b) . @ undef |
| 1404 | W(b) . @ svc |
| 1405 | W(b) . @ pabort |
| 1406 | W(b) . @ dabort |
| 1407 | W(b) __enter_kernel @ hyp |
| 1408 | W(b) . @ irq |
| 1409 | W(b) . @ fiq |
| 1410 | #endif /* CONFIG_ARM_VIRT_EXT */ |
| 1411 | |
| 1412 | __enter_kernel: |
| 1413 | mov r0, #0 @ must be 0 |
| 1414 | mov r1, r7 @ restore architecture number |
| 1415 | mov r2, r8 @ restore atags pointer |
| 1416 | ARM( mov pc, r4 ) @ call kernel |
| 1417 | M_CLASS( add r4, r4, #1 ) @ enter in Thumb mode for M class |
| 1418 | THUMB( bx r4 ) @ entry point is always ARM for A/R classes |
| 1419 | |
| 1420 | reloc_code_end: |
| 1421 | |
| 1422 | #ifdef CONFIG_EFI_STUB |
| 1423 | .align 2 |
| 1424 | _start: .long start - . |
| 1425 | |
| 1426 | ENTRY(efi_stub_entry) |
| 1427 | @ allocate space on stack for passing current zImage address |
| 1428 | @ and for the EFI stub to return of new entry point of |
| 1429 | @ zImage, as EFI stub may copy the kernel. Pointer address |
| 1430 | @ is passed in r2. r0 and r1 are passed through from the |
| 1431 | @ EFI firmware to efi_entry |
| 1432 | adr ip, _start |
| 1433 | ldr r3, [ip] |
| 1434 | add r3, r3, ip |
| 1435 | stmfd sp!, {r3, lr} |
| 1436 | mov r2, sp @ pass zImage address in r2 |
| 1437 | bl efi_entry |
| 1438 | |
| 1439 | @ Check for error return from EFI stub. r0 has FDT address |
| 1440 | @ or error code. |
| 1441 | cmn r0, #1 |
| 1442 | beq efi_load_fail |
| 1443 | |
| 1444 | @ Preserve return value of efi_entry() in r4 |
| 1445 | mov r4, r0 |
| 1446 | |
| 1447 | @ our cache maintenance code relies on CP15 barrier instructions |
| 1448 | @ but since we arrived here with the MMU and caches configured |
| 1449 | @ by UEFI, we must check that the CP15BEN bit is set in SCTLR. |
| 1450 | @ Note that this bit is RAO/WI on v6 and earlier, so the ISB in |
| 1451 | @ the enable path will be executed on v7+ only. |
| 1452 | mrc p15, 0, r1, c1, c0, 0 @ read SCTLR |
| 1453 | tst r1, #(1 << 5) @ CP15BEN bit set? |
| 1454 | bne 0f |
| 1455 | orr r1, r1, #(1 << 5) @ CP15 barrier instructions |
| 1456 | mcr p15, 0, r1, c1, c0, 0 @ write SCTLR |
| 1457 | ARM( .inst 0xf57ff06f @ v7+ isb ) |
| 1458 | THUMB( isb ) |
| 1459 | |
| 1460 | 0: bl cache_clean_flush |
| 1461 | bl cache_off |
| 1462 | |
| 1463 | @ Set parameters for booting zImage according to boot protocol |
| 1464 | @ put FDT address in r2, it was returned by efi_entry() |
| 1465 | @ r1 is the machine type, and r0 needs to be 0 |
| 1466 | mov r0, #0 |
| 1467 | mov r1, #0xFFFFFFFF |
| 1468 | mov r2, r4 |
| 1469 | |
| 1470 | @ Branch to (possibly) relocated zImage that is in [sp] |
| 1471 | ldr lr, [sp] |
| 1472 | ldr ip, =start_offset |
| 1473 | add lr, lr, ip |
| 1474 | mov pc, lr @ no mode switch |
| 1475 | |
| 1476 | efi_load_fail: |
| 1477 | @ Return EFI_LOAD_ERROR to EFI firmware on error. |
| 1478 | ldr r0, =0x80000001 |
| 1479 | ldmfd sp!, {ip, pc} |
| 1480 | ENDPROC(efi_stub_entry) |
| 1481 | #endif |
| 1482 | |
| 1483 | .align |
| 1484 | .section ".stack", "aw", %nobits |
| 1485 | .L_user_stack: .space 4096 |
| 1486 | .L_user_stack_end: |