blob: 012c4c99d2bd2797eb247664d76caebf7b4f4eca [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/arch/arm/kernel/setup.c
4 *
5 * Copyright (C) 1995-2001 Russell King
6 */
7#include <linux/efi.h>
8#include <linux/export.h>
9#include <linux/kernel.h>
10#include <linux/stddef.h>
11#include <linux/ioport.h>
12#include <linux/delay.h>
13#include <linux/utsname.h>
14#include <linux/initrd.h>
15#include <linux/console.h>
16#include <linux/seq_file.h>
17#include <linux/screen_info.h>
18#include <linux/of_platform.h>
19#include <linux/init.h>
20#include <linux/kexec.h>
21#include <linux/libfdt.h>
22#include <linux/of_fdt.h>
23#include <linux/cpu.h>
24#include <linux/interrupt.h>
25#include <linux/smp.h>
26#include <linux/proc_fs.h>
27#include <linux/memblock.h>
28#include <linux/bug.h>
29#include <linux/compiler.h>
30#include <linux/sort.h>
31#include <linux/psci.h>
32
33#include <asm/unified.h>
34#include <asm/cp15.h>
35#include <asm/cpu.h>
36#include <asm/cputype.h>
37#include <asm/efi.h>
38#include <asm/elf.h>
39#include <asm/early_ioremap.h>
40#include <asm/fixmap.h>
41#include <asm/procinfo.h>
42#include <asm/psci.h>
43#include <asm/sections.h>
44#include <asm/setup.h>
45#include <asm/smp_plat.h>
46#include <asm/mach-types.h>
47#include <asm/cacheflush.h>
48#include <asm/cachetype.h>
49#include <asm/tlbflush.h>
50#include <asm/xen/hypervisor.h>
51
52#include <asm/prom.h>
53#include <asm/mach/arch.h>
54#include <asm/mach/irq.h>
55#include <asm/mach/time.h>
56#include <asm/system_info.h>
57#include <asm/system_misc.h>
58#include <asm/traps.h>
59#include <asm/unwind.h>
60#include <asm/memblock.h>
61#include <asm/virt.h>
62
63#include "atags.h"
64
65
66#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
67char fpe_type[8];
68
69static int __init fpe_setup(char *line)
70{
71 memcpy(fpe_type, line, 8);
72 return 1;
73}
74
75__setup("fpe=", fpe_setup);
76#endif
77
78extern void init_default_cache_policy(unsigned long);
79extern void paging_init(const struct machine_desc *desc);
80extern void early_mm_init(const struct machine_desc *);
81extern void adjust_lowmem_bounds(void);
82extern enum reboot_mode reboot_mode;
83extern void setup_dma_zone(const struct machine_desc *desc);
84
85unsigned int processor_id;
86EXPORT_SYMBOL(processor_id);
87unsigned int __machine_arch_type __read_mostly;
88EXPORT_SYMBOL(__machine_arch_type);
89unsigned int cacheid __read_mostly;
90EXPORT_SYMBOL(cacheid);
91
92unsigned int __atags_pointer __initdata;
93
94unsigned int system_rev;
95EXPORT_SYMBOL(system_rev);
96
97const char *system_serial;
98EXPORT_SYMBOL(system_serial);
99
100unsigned int system_serial_low;
101EXPORT_SYMBOL(system_serial_low);
102
103unsigned int system_serial_high;
104EXPORT_SYMBOL(system_serial_high);
105
106unsigned int elf_hwcap __read_mostly;
107EXPORT_SYMBOL(elf_hwcap);
108
109unsigned int elf_hwcap2 __read_mostly;
110EXPORT_SYMBOL(elf_hwcap2);
111
112
113#ifdef MULTI_CPU
114struct processor processor __ro_after_init;
115#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
116struct processor *cpu_vtable[NR_CPUS] = {
117 [0] = &processor,
118};
119#endif
120#endif
121#ifdef MULTI_TLB
122struct cpu_tlb_fns cpu_tlb __ro_after_init;
123#endif
124#ifdef MULTI_USER
125struct cpu_user_fns cpu_user __ro_after_init;
126#endif
127#ifdef MULTI_CACHE
128struct cpu_cache_fns cpu_cache __ro_after_init;
129#endif
130#ifdef CONFIG_OUTER_CACHE
131struct outer_cache_fns outer_cache __ro_after_init;
132EXPORT_SYMBOL(outer_cache);
133#endif
134
135/*
136 * Cached cpu_architecture() result for use by assembler code.
137 * C code should use the cpu_architecture() function instead of accessing this
138 * variable directly.
139 */
140int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
141
142struct stack {
143 u32 irq[3];
144 u32 abt[3];
145 u32 und[3];
146 u32 fiq[3];
147} ____cacheline_aligned;
148
149#ifndef CONFIG_CPU_V7M
150static struct stack stacks[NR_CPUS];
151#endif
152
153char elf_platform[ELF_PLATFORM_SIZE];
154EXPORT_SYMBOL(elf_platform);
155
156static const char *cpu_name;
157static const char *machine_name;
158static char __initdata cmd_line[COMMAND_LINE_SIZE];
159const struct machine_desc *machine_desc __initdata;
160
161static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
162#define ENDIANNESS ((char)endian_test.l)
163
164DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
165
166#ifdef CONFIG_CPU_ASR1901
167static u32 asr1901_memsize;
168#endif
169
170/*
171 * Standard memory resources
172 */
173static struct resource mem_res[] = {
174 {
175 .name = "Video RAM",
176 .start = 0,
177 .end = 0,
178 .flags = IORESOURCE_MEM
179 },
180 {
181 .name = "Kernel code",
182 .start = 0,
183 .end = 0,
184 .flags = IORESOURCE_SYSTEM_RAM
185 },
186 {
187 .name = "Kernel data",
188 .start = 0,
189 .end = 0,
190 .flags = IORESOURCE_SYSTEM_RAM
191 }
192};
193
194#define video_ram mem_res[0]
195#define kernel_code mem_res[1]
196#define kernel_data mem_res[2]
197
198static struct resource io_res[] = {
199 {
200 .name = "reserved",
201 .start = 0x3bc,
202 .end = 0x3be,
203 .flags = IORESOURCE_IO | IORESOURCE_BUSY
204 },
205 {
206 .name = "reserved",
207 .start = 0x378,
208 .end = 0x37f,
209 .flags = IORESOURCE_IO | IORESOURCE_BUSY
210 },
211 {
212 .name = "reserved",
213 .start = 0x278,
214 .end = 0x27f,
215 .flags = IORESOURCE_IO | IORESOURCE_BUSY
216 }
217};
218
219#define lp0 io_res[0]
220#define lp1 io_res[1]
221#define lp2 io_res[2]
222
223static const char *proc_arch[] = {
224 "undefined/unknown",
225 "3",
226 "4",
227 "4T",
228 "5",
229 "5T",
230 "5TE",
231 "5TEJ",
232 "6TEJ",
233 "7",
234 "7M",
235 "?(12)",
236 "?(13)",
237 "?(14)",
238 "?(15)",
239 "?(16)",
240 "?(17)",
241};
242
243#ifdef CONFIG_CPU_V7M
244static int __get_cpu_architecture(void)
245{
246 return CPU_ARCH_ARMv7M;
247}
248#else
249static int __get_cpu_architecture(void)
250{
251 int cpu_arch;
252
253 if ((read_cpuid_id() & 0x0008f000) == 0) {
254 cpu_arch = CPU_ARCH_UNKNOWN;
255 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
256 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
257 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
258 cpu_arch = (read_cpuid_id() >> 16) & 7;
259 if (cpu_arch)
260 cpu_arch += CPU_ARCH_ARMv3;
261 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
262 /* Revised CPUID format. Read the Memory Model Feature
263 * Register 0 and check for VMSAv7 or PMSAv7 */
264 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
265 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
266 (mmfr0 & 0x000000f0) >= 0x00000030)
267 cpu_arch = CPU_ARCH_ARMv7;
268 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
269 (mmfr0 & 0x000000f0) == 0x00000020)
270 cpu_arch = CPU_ARCH_ARMv6;
271 else
272 cpu_arch = CPU_ARCH_UNKNOWN;
273 } else
274 cpu_arch = CPU_ARCH_UNKNOWN;
275
276 return cpu_arch;
277}
278#endif
279
280int __pure cpu_architecture(void)
281{
282 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
283
284 return __cpu_architecture;
285}
286
287static int cpu_has_aliasing_icache(unsigned int arch)
288{
289 int aliasing_icache;
290 unsigned int id_reg, num_sets, line_size;
291
292 /* PIPT caches never alias. */
293 if (icache_is_pipt())
294 return 0;
295
296 /* arch specifies the register format */
297 switch (arch) {
298 case CPU_ARCH_ARMv7:
299 set_csselr(CSSELR_ICACHE | CSSELR_L1);
300 isb();
301 id_reg = read_ccsidr();
302 line_size = 4 << ((id_reg & 0x7) + 2);
303 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
304 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
305 break;
306 case CPU_ARCH_ARMv6:
307 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
308 break;
309 default:
310 /* I-cache aliases will be handled by D-cache aliasing code */
311 aliasing_icache = 0;
312 }
313
314 return aliasing_icache;
315}
316
317static void __init cacheid_init(void)
318{
319 unsigned int arch = cpu_architecture();
320
321 if (arch >= CPU_ARCH_ARMv6) {
322 unsigned int cachetype = read_cpuid_cachetype();
323
324 if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) {
325 cacheid = 0;
326 } else if ((cachetype & (7 << 29)) == 4 << 29) {
327 /* ARMv7 register format */
328 arch = CPU_ARCH_ARMv7;
329 cacheid = CACHEID_VIPT_NONALIASING;
330 switch (cachetype & (3 << 14)) {
331 case (1 << 14):
332 cacheid |= CACHEID_ASID_TAGGED;
333 break;
334 case (3 << 14):
335 cacheid |= CACHEID_PIPT;
336 break;
337 }
338 } else {
339 arch = CPU_ARCH_ARMv6;
340 if (cachetype & (1 << 23))
341 cacheid = CACHEID_VIPT_ALIASING;
342 else
343 cacheid = CACHEID_VIPT_NONALIASING;
344 }
345 if (cpu_has_aliasing_icache(arch))
346 cacheid |= CACHEID_VIPT_I_ALIASING;
347 } else {
348 cacheid = CACHEID_VIVT;
349 }
350
351 pr_info("CPU: %s data cache, %s instruction cache\n",
352 cache_is_vivt() ? "VIVT" :
353 cache_is_vipt_aliasing() ? "VIPT aliasing" :
354 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
355 cache_is_vivt() ? "VIVT" :
356 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
357 icache_is_vipt_aliasing() ? "VIPT aliasing" :
358 icache_is_pipt() ? "PIPT" :
359 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
360}
361
362/*
363 * These functions re-use the assembly code in head.S, which
364 * already provide the required functionality.
365 */
366extern struct proc_info_list *lookup_processor_type(unsigned int);
367
368void __init early_print(const char *str, ...)
369{
370 extern void printascii(const char *);
371 char buf[256];
372 va_list ap;
373
374 va_start(ap, str);
375 vsnprintf(buf, sizeof(buf), str, ap);
376 va_end(ap);
377
378#ifdef CONFIG_DEBUG_LL
379 printascii(buf);
380#endif
381 printk("%s", buf);
382}
383
384#ifdef CONFIG_ARM_PATCH_IDIV
385
386static inline u32 __attribute_const__ sdiv_instruction(void)
387{
388 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
389 /* "sdiv r0, r0, r1" */
390 u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
391 return __opcode_to_mem_thumb32(insn);
392 }
393
394 /* "sdiv r0, r0, r1" */
395 return __opcode_to_mem_arm(0xe710f110);
396}
397
398static inline u32 __attribute_const__ udiv_instruction(void)
399{
400 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
401 /* "udiv r0, r0, r1" */
402 u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
403 return __opcode_to_mem_thumb32(insn);
404 }
405
406 /* "udiv r0, r0, r1" */
407 return __opcode_to_mem_arm(0xe730f110);
408}
409
410static inline u32 __attribute_const__ bx_lr_instruction(void)
411{
412 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
413 /* "bx lr; nop" */
414 u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
415 return __opcode_to_mem_thumb32(insn);
416 }
417
418 /* "bx lr" */
419 return __opcode_to_mem_arm(0xe12fff1e);
420}
421
422static void __init patch_aeabi_idiv(void)
423{
424 extern void __aeabi_uidiv(void);
425 extern void __aeabi_idiv(void);
426 uintptr_t fn_addr;
427 unsigned int mask;
428
429 mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
430 if (!(elf_hwcap & mask))
431 return;
432
433 pr_info("CPU: div instructions available: patching division code\n");
434
435 fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
436 asm ("" : "+g" (fn_addr));
437 ((u32 *)fn_addr)[0] = udiv_instruction();
438 ((u32 *)fn_addr)[1] = bx_lr_instruction();
439 flush_icache_range(fn_addr, fn_addr + 8);
440
441 fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
442 asm ("" : "+g" (fn_addr));
443 ((u32 *)fn_addr)[0] = sdiv_instruction();
444 ((u32 *)fn_addr)[1] = bx_lr_instruction();
445 flush_icache_range(fn_addr, fn_addr + 8);
446}
447
448#else
449static inline void patch_aeabi_idiv(void) { }
450#endif
451
452static void __init cpuid_init_hwcaps(void)
453{
454 int block;
455 u32 isar5;
456
457 if (cpu_architecture() < CPU_ARCH_ARMv7)
458 return;
459
460 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
461 if (block >= 2)
462 elf_hwcap |= HWCAP_IDIVA;
463 if (block >= 1)
464 elf_hwcap |= HWCAP_IDIVT;
465
466 /* LPAE implies atomic ldrd/strd instructions */
467 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
468 if (block >= 5)
469 elf_hwcap |= HWCAP_LPAE;
470
471 /* check for supported v8 Crypto instructions */
472 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
473
474 block = cpuid_feature_extract_field(isar5, 4);
475 if (block >= 2)
476 elf_hwcap2 |= HWCAP2_PMULL;
477 if (block >= 1)
478 elf_hwcap2 |= HWCAP2_AES;
479
480 block = cpuid_feature_extract_field(isar5, 8);
481 if (block >= 1)
482 elf_hwcap2 |= HWCAP2_SHA1;
483
484 block = cpuid_feature_extract_field(isar5, 12);
485 if (block >= 1)
486 elf_hwcap2 |= HWCAP2_SHA2;
487
488 block = cpuid_feature_extract_field(isar5, 16);
489 if (block >= 1)
490 elf_hwcap2 |= HWCAP2_CRC32;
491}
492
493static void __init elf_hwcap_fixup(void)
494{
495 unsigned id = read_cpuid_id();
496
497 /*
498 * HWCAP_TLS is available only on 1136 r1p0 and later,
499 * see also kuser_get_tls_init.
500 */
501 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
502 ((id >> 20) & 3) == 0) {
503 elf_hwcap &= ~HWCAP_TLS;
504 return;
505 }
506
507 /* Verify if CPUID scheme is implemented */
508 if ((id & 0x000f0000) != 0x000f0000)
509 return;
510
511 /*
512 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
513 * avoid advertising SWP; it may not be atomic with
514 * multiprocessing cores.
515 */
516 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
517 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
518 cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
519 elf_hwcap &= ~HWCAP_SWP;
520}
521
522/*
523 * cpu_init - initialise one CPU.
524 *
525 * cpu_init sets up the per-CPU stacks.
526 */
527void notrace cpu_init(void)
528{
529#ifndef CONFIG_CPU_V7M
530 unsigned int cpu = smp_processor_id();
531 struct stack *stk = &stacks[cpu];
532
533 if (cpu >= NR_CPUS) {
534 pr_crit("CPU%u: bad primary CPU number\n", cpu);
535 BUG();
536 }
537
538 /*
539 * This only works on resume and secondary cores. For booting on the
540 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
541 */
542 set_my_cpu_offset(per_cpu_offset(cpu));
543
544 cpu_proc_init();
545
546 /*
547 * Define the placement constraint for the inline asm directive below.
548 * In Thumb-2, msr with an immediate value is not allowed.
549 */
550#ifdef CONFIG_THUMB2_KERNEL
551#define PLC_l "l"
552#define PLC_r "r"
553#else
554#define PLC_l "I"
555#define PLC_r "I"
556#endif
557
558 /*
559 * setup stacks for re-entrant exception handlers
560 */
561 __asm__ (
562 "msr cpsr_c, %1\n\t"
563 "add r14, %0, %2\n\t"
564 "mov sp, r14\n\t"
565 "msr cpsr_c, %3\n\t"
566 "add r14, %0, %4\n\t"
567 "mov sp, r14\n\t"
568 "msr cpsr_c, %5\n\t"
569 "add r14, %0, %6\n\t"
570 "mov sp, r14\n\t"
571 "msr cpsr_c, %7\n\t"
572 "add r14, %0, %8\n\t"
573 "mov sp, r14\n\t"
574 "msr cpsr_c, %9"
575 :
576 : "r" (stk),
577 PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
578 "I" (offsetof(struct stack, irq[0])),
579 PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
580 "I" (offsetof(struct stack, abt[0])),
581 PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE),
582 "I" (offsetof(struct stack, und[0])),
583 PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
584 "I" (offsetof(struct stack, fiq[0])),
585 PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
586 : "r14");
587#endif
588}
589
590u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
591
592void __init smp_setup_processor_id(void)
593{
594 int i;
595 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
596 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
597
598 cpu_logical_map(0) = cpu;
599 for (i = 1; i < nr_cpu_ids; ++i)
600 cpu_logical_map(i) = i == cpu ? 0 : i;
601
602 /*
603 * clear __my_cpu_offset on boot CPU to avoid hang caused by
604 * using percpu variable early, for example, lockdep will
605 * access percpu variable inside lock_release
606 */
607 set_my_cpu_offset(0);
608
609 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
610}
611
612struct mpidr_hash mpidr_hash;
613#ifdef CONFIG_SMP
614/**
615 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
616 * level in order to build a linear index from an
617 * MPIDR value. Resulting algorithm is a collision
618 * free hash carried out through shifting and ORing
619 */
620static void __init smp_build_mpidr_hash(void)
621{
622 u32 i, affinity;
623 u32 fs[3], bits[3], ls, mask = 0;
624 /*
625 * Pre-scan the list of MPIDRS and filter out bits that do
626 * not contribute to affinity levels, ie they never toggle.
627 */
628 for_each_possible_cpu(i)
629 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
630 pr_debug("mask of set bits 0x%x\n", mask);
631 /*
632 * Find and stash the last and first bit set at all affinity levels to
633 * check how many bits are required to represent them.
634 */
635 for (i = 0; i < 3; i++) {
636 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
637 /*
638 * Find the MSB bit and LSB bits position
639 * to determine how many bits are required
640 * to express the affinity level.
641 */
642 ls = fls(affinity);
643 fs[i] = affinity ? ffs(affinity) - 1 : 0;
644 bits[i] = ls - fs[i];
645 }
646 /*
647 * An index can be created from the MPIDR by isolating the
648 * significant bits at each affinity level and by shifting
649 * them in order to compress the 24 bits values space to a
650 * compressed set of values. This is equivalent to hashing
651 * the MPIDR through shifting and ORing. It is a collision free
652 * hash though not minimal since some levels might contain a number
653 * of CPUs that is not an exact power of 2 and their bit
654 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
655 */
656 mpidr_hash.shift_aff[0] = fs[0];
657 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
658 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
659 (bits[1] + bits[0]);
660 mpidr_hash.mask = mask;
661 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
662 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
663 mpidr_hash.shift_aff[0],
664 mpidr_hash.shift_aff[1],
665 mpidr_hash.shift_aff[2],
666 mpidr_hash.mask,
667 mpidr_hash.bits);
668 /*
669 * 4x is an arbitrary value used to warn on a hash table much bigger
670 * than expected on most systems.
671 */
672 if (mpidr_hash_size() > 4 * num_possible_cpus())
673 pr_warn("Large number of MPIDR hash buckets detected\n");
674 sync_cache_w(&mpidr_hash);
675}
676#endif
677
678/*
679 * locate processor in the list of supported processor types. The linker
680 * builds this table for us from the entries in arch/arm/mm/proc-*.S
681 */
682struct proc_info_list *lookup_processor(u32 midr)
683{
684 struct proc_info_list *list = lookup_processor_type(midr);
685
686 if (!list) {
687 pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
688 smp_processor_id(), midr);
689 while (1)
690 /* can't use cpu_relax() here as it may require MMU setup */;
691 }
692
693 return list;
694}
695
696static void __init setup_processor(void)
697{
698 unsigned int midr = read_cpuid_id();
699 struct proc_info_list *list = lookup_processor(midr);
700
701 cpu_name = list->cpu_name;
702 __cpu_architecture = __get_cpu_architecture();
703
704 init_proc_vtable(list->proc);
705#ifdef MULTI_TLB
706 cpu_tlb = *list->tlb;
707#endif
708#ifdef MULTI_USER
709 cpu_user = *list->user;
710#endif
711#ifdef MULTI_CACHE
712 cpu_cache = *list->cache;
713#endif
714
715 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
716 list->cpu_name, midr, midr & 15,
717 proc_arch[cpu_architecture()], get_cr());
718
719 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
720 list->arch_name, ENDIANNESS);
721 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
722 list->elf_name, ENDIANNESS);
723 elf_hwcap = list->elf_hwcap;
724
725 cpuid_init_hwcaps();
726 patch_aeabi_idiv();
727
728#ifndef CONFIG_ARM_THUMB
729 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
730#endif
731#ifdef CONFIG_MMU
732 init_default_cache_policy(list->__cpu_mm_mmu_flags);
733#endif
734 erratum_a15_798181_init();
735
736 elf_hwcap_fixup();
737
738 cacheid_init();
739 cpu_init();
740}
741
742void __init dump_machine_table(void)
743{
744 const struct machine_desc *p;
745
746 early_print("Available machine support:\n\nID (hex)\tNAME\n");
747 for_each_machine_desc(p)
748 early_print("%08x\t%s\n", p->nr, p->name);
749
750 early_print("\nPlease check your kernel config and/or bootloader.\n");
751
752 while (true)
753 /* can't use cpu_relax() here as it may require MMU setup */;
754}
755
756int __init arm_add_memory(u64 start, u64 size)
757{
758 u64 aligned_start;
759
760 /*
761 * Ensure that start/size are aligned to a page boundary.
762 * Size is rounded down, start is rounded up.
763 */
764 aligned_start = PAGE_ALIGN(start);
765 if (aligned_start > start + size)
766 size = 0;
767 else
768 size -= aligned_start - start;
769
770#ifndef CONFIG_PHYS_ADDR_T_64BIT
771 if (aligned_start > ULONG_MAX) {
772 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
773 (long long)start);
774 return -EINVAL;
775 }
776
777 if (aligned_start + size > ULONG_MAX) {
778 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
779 (long long)start);
780 /*
781 * To ensure bank->start + bank->size is representable in
782 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
783 * This means we lose a page after masking.
784 */
785 size = ULONG_MAX - aligned_start;
786 }
787#endif
788
789 if (aligned_start < PHYS_OFFSET) {
790 if (aligned_start + size <= PHYS_OFFSET) {
791 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
792 aligned_start, aligned_start + size);
793 return -EINVAL;
794 }
795
796 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
797 aligned_start, (u64)PHYS_OFFSET);
798
799 size -= PHYS_OFFSET - aligned_start;
800 aligned_start = PHYS_OFFSET;
801 }
802
803 start = aligned_start;
804 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
805
806 /*
807 * Check whether this memory region has non-zero size or
808 * invalid node number.
809 */
810 if (size == 0)
811 return -EINVAL;
812
813 memblock_add(start, size);
814 return 0;
815}
816
817/*
818 * Pick out the memory size. We look for mem=size@start,
819 * where start and size are "size[KkMm]"
820 */
821
822static int __init early_mem(char *p)
823{
824 static int usermem __initdata = 0;
825 u64 size;
826 u64 start;
827 char *endp;
828
829 /*
830 * If the user specifies memory size, we
831 * blow away any automatically generated
832 * size.
833 */
834 if (usermem == 0) {
835 usermem = 1;
836 memblock_remove(memblock_start_of_DRAM(),
837 memblock_end_of_DRAM() - memblock_start_of_DRAM());
838 }
839
840 start = PHYS_OFFSET;
841 size = memparse(p, &endp);
842 if (*endp == '@')
843 start = memparse(endp + 1, NULL);
844
845 arm_add_memory(start, size);
846
847#ifdef CONFIG_CPU_ASR1901
848 asr1901_memsize = size;
849#endif
850 return 0;
851}
852early_param("mem", early_mem);
853
854#ifdef CONFIG_CPU_ASR1901
855u32 get_early_memsize(void)
856{
857 return asr1901_memsize;
858}
859#endif
860
861static void __init request_standard_resources(const struct machine_desc *mdesc)
862{
863 struct memblock_region *region;
864 struct resource *res;
865
866 kernel_code.start = virt_to_phys(_text);
867 kernel_code.end = virt_to_phys(__init_begin - 1);
868 kernel_data.start = virt_to_phys(_sdata);
869 kernel_data.end = virt_to_phys(_end - 1);
870
871 for_each_memblock(memory, region) {
872 phys_addr_t start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
873 phys_addr_t end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
874 unsigned long boot_alias_start;
875
876 /*
877 * Some systems have a special memory alias which is only
878 * used for booting. We need to advertise this region to
879 * kexec-tools so they know where bootable RAM is located.
880 */
881 boot_alias_start = phys_to_idmap(start);
882 if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
883 res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
884 if (!res)
885 panic("%s: Failed to allocate %zu bytes\n",
886 __func__, sizeof(*res));
887 res->name = "System RAM (boot alias)";
888 res->start = boot_alias_start;
889 res->end = phys_to_idmap(end);
890 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
891 request_resource(&iomem_resource, res);
892 }
893
894 res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
895 if (!res)
896 panic("%s: Failed to allocate %zu bytes\n", __func__,
897 sizeof(*res));
898 res->name = "System RAM";
899 res->start = start;
900 res->end = end;
901 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
902
903 request_resource(&iomem_resource, res);
904
905 if (kernel_code.start >= res->start &&
906 kernel_code.end <= res->end)
907 request_resource(res, &kernel_code);
908 if (kernel_data.start >= res->start &&
909 kernel_data.end <= res->end)
910 request_resource(res, &kernel_data);
911 }
912
913 if (mdesc->video_start) {
914 video_ram.start = mdesc->video_start;
915 video_ram.end = mdesc->video_end;
916 request_resource(&iomem_resource, &video_ram);
917 }
918
919 /*
920 * Some machines don't have the possibility of ever
921 * possessing lp0, lp1 or lp2
922 */
923 if (mdesc->reserve_lp0)
924 request_resource(&ioport_resource, &lp0);
925 if (mdesc->reserve_lp1)
926 request_resource(&ioport_resource, &lp1);
927 if (mdesc->reserve_lp2)
928 request_resource(&ioport_resource, &lp2);
929}
930
931#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) || \
932 defined(CONFIG_EFI)
933struct screen_info screen_info = {
934 .orig_video_lines = 30,
935 .orig_video_cols = 80,
936 .orig_video_mode = 0,
937 .orig_video_ega_bx = 0,
938 .orig_video_isVGA = 1,
939 .orig_video_points = 8
940};
941#endif
942
943static int __init customize_machine(void)
944{
945 /*
946 * customizes platform devices, or adds new ones
947 * On DT based machines, we fall back to populating the
948 * machine from the device tree, if no callback is provided,
949 * otherwise we would always need an init_machine callback.
950 */
951 if (machine_desc->init_machine)
952 machine_desc->init_machine();
953
954 return 0;
955}
956arch_initcall(customize_machine);
957
958static int __init init_machine_late(void)
959{
960 struct device_node *root;
961 int ret;
962
963 if (machine_desc->init_late)
964 machine_desc->init_late();
965
966 root = of_find_node_by_path("/");
967 if (root) {
968 ret = of_property_read_string(root, "serial-number",
969 &system_serial);
970 if (ret)
971 system_serial = NULL;
972 }
973
974 if (!system_serial)
975 system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
976 system_serial_high,
977 system_serial_low);
978
979 return 0;
980}
981late_initcall(init_machine_late);
982
983#ifdef CONFIG_KEXEC
984/*
985 * The crash region must be aligned to 128MB to avoid
986 * zImage relocating below the reserved region.
987 */
988#define CRASH_ALIGN (128 << 20)
989
990static inline unsigned long long get_total_mem(void)
991{
992 unsigned long total;
993
994 total = max_low_pfn - min_low_pfn;
995 return total << PAGE_SHIFT;
996}
997
998/**
999 * reserve_crashkernel() - reserves memory are for crash kernel
1000 *
1001 * This function reserves memory area given in "crashkernel=" kernel command
1002 * line parameter. The memory reserved is used by a dump capture kernel when
1003 * primary kernel is crashing.
1004 */
1005static void __init reserve_crashkernel(void)
1006{
1007 unsigned long long crash_size, crash_base;
1008 unsigned long long total_mem;
1009 int ret;
1010
1011 total_mem = get_total_mem();
1012 ret = parse_crashkernel(boot_command_line, total_mem,
1013 &crash_size, &crash_base);
1014 if (ret)
1015 return;
1016
1017 if (crash_base <= 0) {
1018 unsigned long long crash_max = idmap_to_phys((u32)~0);
1019 unsigned long long lowmem_max = __pa(high_memory - 1) + 1;
1020 if (crash_max > lowmem_max)
1021 crash_max = lowmem_max;
1022 crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max,
1023 crash_size, CRASH_ALIGN);
1024 if (!crash_base) {
1025 pr_err("crashkernel reservation failed - No suitable area found.\n");
1026 return;
1027 }
1028 } else {
1029 unsigned long long start;
1030
1031 /* below 16KB */
1032 if (crash_base < 0x4000)
1033 start = memblock_find_in_range(crash_base,
1034 crash_base + crash_size,
1035 crash_size, PAGE_SIZE);
1036 else
1037 start = memblock_find_in_range(crash_base,
1038 crash_base + crash_size,
1039 crash_size, SECTION_SIZE);
1040 if (start != crash_base) {
1041 pr_err("crashkernel reservation failed - memory is in use.\n");
1042 return;
1043 }
1044 }
1045
1046 ret = memblock_reserve(crash_base, crash_size);
1047 if (ret < 0) {
1048 pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
1049 (unsigned long)crash_base);
1050 return;
1051 }
1052
1053 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
1054 (unsigned long)(crash_size >> 20),
1055 (unsigned long)(crash_base >> 20),
1056 (unsigned long)(total_mem >> 20));
1057
1058 /* The crashk resource must always be located in normal mem */
1059 crashk_res.start = crash_base;
1060 crashk_res.end = crash_base + crash_size - 1;
1061 insert_resource(&iomem_resource, &crashk_res);
1062
1063 if (arm_has_idmap_alias()) {
1064 /*
1065 * If we have a special RAM alias for use at boot, we
1066 * need to advertise to kexec tools where the alias is.
1067 */
1068 static struct resource crashk_boot_res = {
1069 .name = "Crash kernel (boot alias)",
1070 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
1071 };
1072
1073 crashk_boot_res.start = phys_to_idmap(crash_base);
1074 crashk_boot_res.end = crashk_boot_res.start + crash_size - 1;
1075 insert_resource(&iomem_resource, &crashk_boot_res);
1076 }
1077}
1078#else
1079static inline void reserve_crashkernel(void) {}
1080#endif /* CONFIG_KEXEC */
1081
1082void __init hyp_mode_check(void)
1083{
1084#ifdef CONFIG_ARM_VIRT_EXT
1085 sync_boot_mode();
1086
1087 if (is_hyp_mode_available()) {
1088 pr_info("CPU: All CPU(s) started in HYP mode.\n");
1089 pr_info("CPU: Virtualization extensions available.\n");
1090 } else if (is_hyp_mode_mismatched()) {
1091 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
1092 __boot_cpu_mode & MODE_MASK);
1093 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
1094 } else
1095 pr_info("CPU: All CPU(s) started in SVC mode.\n");
1096#endif
1097}
1098
1099void (*__arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
1100EXPORT_SYMBOL_GPL(__arm_pm_restart);
1101
1102static int arm_restart(struct notifier_block *nb, unsigned long action,
1103 void *data)
1104{
1105 __arm_pm_restart(action, data);
1106 return NOTIFY_DONE;
1107}
1108
1109static struct notifier_block arm_restart_nb = {
1110 .notifier_call = arm_restart,
1111 .priority = 128,
1112};
1113
1114void __init setup_arch(char **cmdline_p)
1115{
1116 const struct machine_desc *mdesc = NULL;
1117 void *atags_vaddr = NULL;
1118
1119 if (__atags_pointer)
1120 atags_vaddr = FDT_VIRT_BASE(__atags_pointer);
1121
1122 setup_processor();
1123 if (atags_vaddr) {
1124 mdesc = setup_machine_fdt(atags_vaddr);
1125 if (mdesc)
1126 memblock_reserve(__atags_pointer,
1127 fdt_totalsize(atags_vaddr));
1128 }
1129 if (!mdesc)
1130 mdesc = setup_machine_tags(atags_vaddr, __machine_arch_type);
1131 if (!mdesc) {
1132 early_print("\nError: invalid dtb and unrecognized/unsupported machine ID\n");
1133 early_print(" r1=0x%08x, r2=0x%08x\n", __machine_arch_type,
1134 __atags_pointer);
1135 if (__atags_pointer)
1136 early_print(" r2[]=%*ph\n", 16, atags_vaddr);
1137 dump_machine_table();
1138 }
1139
1140 machine_desc = mdesc;
1141 machine_name = mdesc->name;
1142 dump_stack_set_arch_desc("%s", mdesc->name);
1143
1144 if (mdesc->reboot_mode != REBOOT_HARD)
1145 reboot_mode = mdesc->reboot_mode;
1146
1147 init_mm.start_code = (unsigned long) _text;
1148 init_mm.end_code = (unsigned long) _etext;
1149 init_mm.end_data = (unsigned long) _edata;
1150 init_mm.brk = (unsigned long) _end;
1151
1152 /* populate cmd_line too for later use, preserving boot_command_line */
1153 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
1154 *cmdline_p = cmd_line;
1155
1156 early_fixmap_init();
1157 early_ioremap_init();
1158
1159 parse_early_param();
1160
1161#ifdef CONFIG_MMU
1162 early_mm_init(mdesc);
1163#endif
1164 setup_dma_zone(mdesc);
1165 xen_early_init();
1166 efi_init();
1167 /*
1168 * Make sure the calculation for lowmem/highmem is set appropriately
1169 * before reserving/allocating any mmeory
1170 */
1171 adjust_lowmem_bounds();
1172 arm_memblock_init(mdesc);
1173 /* Memory may have been removed so recalculate the bounds. */
1174 adjust_lowmem_bounds();
1175
1176 early_ioremap_reset();
1177
1178 paging_init(mdesc);
1179 request_standard_resources(mdesc);
1180
1181 if (mdesc->restart) {
1182 __arm_pm_restart = mdesc->restart;
1183 register_restart_handler(&arm_restart_nb);
1184 }
1185
1186 unflatten_device_tree();
1187
1188 arm_dt_init_cpu_maps();
1189 psci_dt_init();
1190#ifdef CONFIG_SMP
1191 if (is_smp()) {
1192 if (!mdesc->smp_init || !mdesc->smp_init()) {
1193 if (psci_smp_available())
1194 smp_set_ops(&psci_smp_ops);
1195 else if (mdesc->smp)
1196 smp_set_ops(mdesc->smp);
1197 }
1198 smp_init_cpus();
1199 smp_build_mpidr_hash();
1200 }
1201#endif
1202
1203 if (!is_smp())
1204 hyp_mode_check();
1205
1206 reserve_crashkernel();
1207
1208#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
1209 handle_arch_irq = mdesc->handle_irq;
1210#endif
1211
1212#ifdef CONFIG_VT
1213#if defined(CONFIG_VGA_CONSOLE)
1214 conswitchp = &vga_con;
1215#elif defined(CONFIG_DUMMY_CONSOLE)
1216 conswitchp = &dummy_con;
1217#endif
1218#endif
1219
1220 if (mdesc->init_early)
1221 mdesc->init_early();
1222}
1223
1224
1225static int __init topology_init(void)
1226{
1227 int cpu;
1228
1229 for_each_possible_cpu(cpu) {
1230 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
1231 cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
1232 register_cpu(&cpuinfo->cpu, cpu);
1233 }
1234
1235 return 0;
1236}
1237subsys_initcall(topology_init);
1238
1239#ifdef CONFIG_HAVE_PROC_CPU
1240static int __init proc_cpu_init(void)
1241{
1242 struct proc_dir_entry *res;
1243
1244 res = proc_mkdir("cpu", NULL);
1245 if (!res)
1246 return -ENOMEM;
1247 return 0;
1248}
1249fs_initcall(proc_cpu_init);
1250#endif
1251
1252static const char *hwcap_str[] = {
1253 "swp",
1254 "half",
1255 "thumb",
1256 "26bit",
1257 "fastmult",
1258 "fpa",
1259 "vfp",
1260 "edsp",
1261 "java",
1262 "iwmmxt",
1263 "crunch",
1264 "thumbee",
1265 "neon",
1266 "vfpv3",
1267 "vfpv3d16",
1268 "tls",
1269 "vfpv4",
1270 "idiva",
1271 "idivt",
1272 "vfpd32",
1273 "lpae",
1274 "evtstrm",
1275 NULL
1276};
1277
1278static const char *hwcap2_str[] = {
1279 "aes",
1280 "pmull",
1281 "sha1",
1282 "sha2",
1283 "crc32",
1284 NULL
1285};
1286
1287static int c_show(struct seq_file *m, void *v)
1288{
1289 int i, j;
1290 u32 cpuid;
1291
1292 for_each_online_cpu(i) {
1293 /*
1294 * glibc reads /proc/cpuinfo to determine the number of
1295 * online processors, looking for lines beginning with
1296 * "processor". Give glibc what it expects.
1297 */
1298 seq_printf(m, "processor\t: %d\n", i);
1299 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1300 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1301 cpu_name, cpuid & 15, elf_platform);
1302
1303#if defined(CONFIG_SMP)
1304 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1305 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1306 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1307#else
1308 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1309 loops_per_jiffy / (500000/HZ),
1310 (loops_per_jiffy / (5000/HZ)) % 100);
1311#endif
1312 /* dump out the processor features */
1313 seq_puts(m, "Features\t: ");
1314
1315 for (j = 0; hwcap_str[j]; j++)
1316 if (elf_hwcap & (1 << j))
1317 seq_printf(m, "%s ", hwcap_str[j]);
1318
1319 for (j = 0; hwcap2_str[j]; j++)
1320 if (elf_hwcap2 & (1 << j))
1321 seq_printf(m, "%s ", hwcap2_str[j]);
1322
1323 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1324 seq_printf(m, "CPU architecture: %s\n",
1325 proc_arch[cpu_architecture()]);
1326
1327 if ((cpuid & 0x0008f000) == 0x00000000) {
1328 /* pre-ARM7 */
1329 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1330 } else {
1331 if ((cpuid & 0x0008f000) == 0x00007000) {
1332 /* ARM7 */
1333 seq_printf(m, "CPU variant\t: 0x%02x\n",
1334 (cpuid >> 16) & 127);
1335 } else {
1336 /* post-ARM7 */
1337 seq_printf(m, "CPU variant\t: 0x%x\n",
1338 (cpuid >> 20) & 15);
1339 }
1340 seq_printf(m, "CPU part\t: 0x%03x\n",
1341 (cpuid >> 4) & 0xfff);
1342 }
1343 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1344 }
1345
1346 seq_printf(m, "Hardware\t: %s\n", machine_name);
1347 seq_printf(m, "Revision\t: %04x\n", system_rev);
1348 seq_printf(m, "Serial\t\t: %s\n", system_serial);
1349
1350 return 0;
1351}
1352
1353static void *c_start(struct seq_file *m, loff_t *pos)
1354{
1355 return *pos < 1 ? (void *)1 : NULL;
1356}
1357
1358static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1359{
1360 ++*pos;
1361 return NULL;
1362}
1363
1364static void c_stop(struct seq_file *m, void *v)
1365{
1366}
1367
1368const struct seq_operations cpuinfo_op = {
1369 .start = c_start,
1370 .next = c_next,
1371 .stop = c_stop,
1372 .show = c_show
1373};