blob: 15fca800aafac93f04e86d67ce713cd45db23086 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/arch/arm/kernel/traps.c
4 *
5 * Copyright (C) 1995-2009 Russell King
6 * Fragments that appear the same as linux/arch/i386/kernel/traps.c (C) Linus Torvalds
7 *
8 * 'traps.c' handles hardware exceptions after we have saved some state in
9 * 'linux/arch/arm/lib/traps.S'. Mostly a debugging aid, but will probably
10 * kill the offending process.
11 */
12#include <linux/signal.h>
13#include <linux/personality.h>
14#include <linux/kallsyms.h>
15#include <linux/spinlock.h>
16#include <linux/uaccess.h>
17#include <linux/hardirq.h>
18#include <linux/kdebug.h>
19#include <linux/kprobes.h>
20#include <linux/module.h>
21#include <linux/kexec.h>
22#include <linux/bug.h>
23#include <linux/delay.h>
24#include <linux/init.h>
25#include <linux/sched/signal.h>
26#include <linux/sched/debug.h>
27#include <linux/sched/task_stack.h>
28#include <linux/irq.h>
29
30#include <linux/atomic.h>
31#include <asm/cacheflush.h>
32#include <asm/exception.h>
33#include <asm/spectre.h>
34#include <asm/unistd.h>
35#include <asm/traps.h>
36#include <asm/ptrace.h>
37#include <asm/unwind.h>
38#include <asm/tls.h>
39#include <asm/system_misc.h>
40#include <asm/opcodes.h>
41
42#ifdef CONFIG_PXA_RAMDUMP
43#include "linux/ramdump.h"
44#endif
45
46static const char *handler[]= {
47 "prefetch abort",
48 "data abort",
49 "address exception",
50 "interrupt",
51 "undefined instruction",
52};
53
54void *vectors_page;
55
56#ifdef CONFIG_DEBUG_USER
57unsigned int user_debug;
58
59static int __init user_debug_setup(char *str)
60{
61 get_option(&str, &user_debug);
62 return 1;
63}
64__setup("user_debug=", user_debug_setup);
65#endif
66
67static void dump_mem(const char *, const char *, unsigned long, unsigned long);
68
69void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
70{
71 unsigned long end = frame + 4 + sizeof(struct pt_regs);
72
73#ifdef CONFIG_KALLSYMS
74 printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
75#else
76 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
77#endif
78
79 if (in_entry_text(from) && end <= ALIGN(frame, THREAD_SIZE))
80 dump_mem("", "Exception stack", frame + 4, end);
81}
82
83void dump_backtrace_stm(u32 *stack, u32 instruction)
84{
85 char str[80], *p;
86 unsigned int x;
87 int reg;
88
89 for (reg = 10, x = 0, p = str; reg >= 0; reg--) {
90 if (instruction & BIT(reg)) {
91 p += sprintf(p, " r%d:%08x", reg, *stack--);
92 if (++x == 6) {
93 x = 0;
94 p = str;
95 printk("%s\n", str);
96 }
97 }
98 }
99 if (p != str)
100 printk("%s\n", str);
101}
102
103#ifndef CONFIG_ARM_UNWIND
104/*
105 * Stack pointers should always be within the kernels view of
106 * physical memory. If it is not there, then we can't dump
107 * out any information relating to the stack.
108 */
109static int verify_stack(unsigned long sp)
110{
111 if (sp < PAGE_OFFSET ||
112 (sp > (unsigned long)high_memory && high_memory != NULL))
113 return -EFAULT;
114
115 return 0;
116}
117#endif
118
119/*
120 * Dump out the contents of some memory nicely...
121 */
122static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
123 unsigned long top)
124{
125 unsigned long first;
126 mm_segment_t fs;
127 int i;
128
129 /*
130 * We need to switch to kernel mode so that we can use __get_user
131 * to safely read from kernel space. Note that we now dump the
132 * code first, just in case the backtrace kills us.
133 */
134 fs = get_fs();
135 set_fs(KERNEL_DS);
136
137 printk("%s%s(0x%08lx to 0x%08lx)\n", lvl, str, bottom, top);
138
139 for (first = bottom & ~31; first < top; first += 32) {
140 unsigned long p;
141 char str[sizeof(" 12345678") * 8 + 1];
142
143 memset(str, ' ', sizeof(str));
144 str[sizeof(str) - 1] = '\0';
145
146 for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
147 if (p >= bottom && p < top) {
148 unsigned long val;
149 if (__get_user(val, (unsigned long *)p) == 0)
150 sprintf(str + i * 9, " %08lx", val);
151 else
152 sprintf(str + i * 9, " ????????");
153 }
154 }
155 printk("%s%04lx:%s\n", lvl, first & 0xffff, str);
156 }
157
158 set_fs(fs);
159}
160
161static void __dump_instr(const char *lvl, struct pt_regs *regs)
162{
163 unsigned long addr = instruction_pointer(regs);
164 const int thumb = thumb_mode(regs);
165 const int width = thumb ? 4 : 8;
166 char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
167 int i;
168
169 /*
170 * Note that we now dump the code first, just in case the backtrace
171 * kills us.
172 */
173
174 for (i = -4; i < 1 + !!thumb; i++) {
175 unsigned int val, bad;
176
177 if (thumb)
178 bad = get_user(val, &((u16 *)addr)[i]);
179 else
180 bad = get_user(val, &((u32 *)addr)[i]);
181
182 if (!bad)
183 p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
184 width, val);
185 else {
186 p += sprintf(p, "bad PC value");
187 break;
188 }
189 }
190 printk("%sCode: %s\n", lvl, str);
191}
192
193static void dump_instr(const char *lvl, struct pt_regs *regs)
194{
195 mm_segment_t fs;
196
197 if (!user_mode(regs)) {
198 fs = get_fs();
199 set_fs(KERNEL_DS);
200 __dump_instr(lvl, regs);
201 set_fs(fs);
202 } else {
203 __dump_instr(lvl, regs);
204 }
205}
206
207#ifdef CONFIG_ARM_UNWIND
208static inline void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
209{
210 unwind_backtrace(regs, tsk);
211}
212#else
213static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
214{
215 unsigned int fp, mode;
216 int ok = 1;
217
218 printk("Backtrace: ");
219
220 if (!tsk)
221 tsk = current;
222
223 if (regs) {
224 fp = frame_pointer(regs);
225 mode = processor_mode(regs);
226 } else if (tsk != current) {
227 fp = thread_saved_fp(tsk);
228 mode = 0x10;
229 } else {
230 asm("mov %0, fp" : "=r" (fp) : : "cc");
231 mode = 0x10;
232 }
233
234 if (!fp) {
235 pr_cont("no frame pointer");
236 ok = 0;
237 } else if (verify_stack(fp)) {
238 pr_cont("invalid frame pointer 0x%08x", fp);
239 ok = 0;
240 } else if (fp < (unsigned long)end_of_stack(tsk))
241 pr_cont("frame pointer underflow");
242 pr_cont("\n");
243
244 if (ok)
245 c_backtrace(fp, mode);
246}
247#endif
248
249void show_stack(struct task_struct *tsk, unsigned long *sp)
250{
251 dump_backtrace(NULL, tsk);
252 barrier();
253}
254
255#ifdef CONFIG_PREEMPT
256#define S_PREEMPT " PREEMPT"
257#else
258#define S_PREEMPT ""
259#endif
260#ifdef CONFIG_SMP
261#define S_SMP " SMP"
262#else
263#define S_SMP ""
264#endif
265#ifdef CONFIG_THUMB2_KERNEL
266#define S_ISA " THUMB2"
267#else
268#define S_ISA " ARM"
269#endif
270
271#if defined(CONFIG_CPU_ASR18XX) || defined(CONFIG_CPU_ASR1901)
272extern void (*__arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
273static void machine_force_restart(void)
274{
275 if (__arm_pm_restart) {
276 __arm_pm_restart(0, NULL);
277 }
278
279 pr_emerg("waiting for sys restart...\n");
280 while (1)
281 cpu_relax();
282}
283#endif
284
285static int __die(const char *str, int err, struct pt_regs *regs)
286{
287 struct task_struct *tsk = current;
288 static int die_counter;
289 int ret;
290
291 pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP S_ISA "\n",
292 str, err, ++die_counter);
293
294#if defined(CONFIG_CPU_ASR18XX) || defined(CONFIG_CPU_ASR1901)
295 if (die_counter > 1) {
296 pr_emerg("force machine resetart\n");
297 machine_force_restart();
298 }
299#endif
300
301 /* trap and error numbers are mostly meaningless on ARM */
302 ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV);
303 if (ret == NOTIFY_STOP)
304 return 1;
305
306 print_modules();
307 __show_regs(regs);
308 pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
309 TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk));
310
311 if (!user_mode(regs) || in_interrupt()) {
312 dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
313 THREAD_SIZE + (unsigned long)task_stack_page(tsk));
314 dump_backtrace(regs, tsk);
315 dump_instr(KERN_EMERG, regs);
316 }
317
318#ifdef CONFIG_PXA_RAMDUMP
319 ramdump_save_dynamic_context(str, err, current_thread_info(), regs);
320#endif
321 return 0;
322}
323
324static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
325static int die_owner = -1;
326static unsigned int die_nest_count;
327
328static unsigned long oops_begin(void)
329{
330 int cpu;
331 unsigned long flags;
332
333 oops_enter();
334
335 /* racy, but better than risking deadlock. */
336 raw_local_irq_save(flags);
337 cpu = smp_processor_id();
338 if (!arch_spin_trylock(&die_lock)) {
339 if (cpu == die_owner)
340 /* nested oops. should stop eventually */;
341 else
342 arch_spin_lock(&die_lock);
343 }
344 die_nest_count++;
345 die_owner = cpu;
346 console_verbose();
347 bust_spinlocks(1);
348 return flags;
349}
350
351static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
352{
353 if (regs && kexec_should_crash(current))
354 crash_kexec(regs);
355
356 bust_spinlocks(0);
357 die_owner = -1;
358 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
359 die_nest_count--;
360 if (!die_nest_count)
361 /* Nest count reaches zero, release the lock. */
362 arch_spin_unlock(&die_lock);
363 raw_local_irq_restore(flags);
364 oops_exit();
365
366 if (in_interrupt())
367 panic("Fatal exception in interrupt");
368 if (panic_on_oops)
369 panic("Fatal exception");
370 if (signr)
371 make_task_dead(signr);
372}
373
374/*
375 * This function is protected against re-entrancy.
376 */
377void die(const char *str, struct pt_regs *regs, int err)
378{
379 enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE;
380 unsigned long flags = oops_begin();
381 int sig = SIGSEGV;
382
383 if (!user_mode(regs))
384 bug_type = report_bug(regs->ARM_pc, regs);
385 if (bug_type != BUG_TRAP_TYPE_NONE)
386 str = "Oops - BUG";
387
388 if (__die(str, err, regs))
389 sig = 0;
390
391 oops_end(flags, regs, sig);
392}
393
394void arm_notify_die(const char *str, struct pt_regs *regs,
395 int signo, int si_code, void __user *addr,
396 unsigned long err, unsigned long trap)
397{
398 if (user_mode(regs)) {
399 current->thread.error_code = err;
400 current->thread.trap_no = trap;
401
402 force_sig_fault(signo, si_code, addr);
403 } else {
404 die(str, regs, err);
405 }
406}
407
408#ifdef CONFIG_GENERIC_BUG
409
410int is_valid_bugaddr(unsigned long pc)
411{
412#ifdef CONFIG_THUMB2_KERNEL
413 u16 bkpt;
414 u16 insn = __opcode_to_mem_thumb16(BUG_INSTR_VALUE);
415#else
416 u32 bkpt;
417 u32 insn = __opcode_to_mem_arm(BUG_INSTR_VALUE);
418#endif
419
420 if (probe_kernel_address((unsigned *)pc, bkpt))
421 return 0;
422
423 return bkpt == insn;
424}
425
426#endif
427
428static LIST_HEAD(undef_hook);
429static DEFINE_RAW_SPINLOCK(undef_lock);
430
431void register_undef_hook(struct undef_hook *hook)
432{
433 unsigned long flags;
434
435 raw_spin_lock_irqsave(&undef_lock, flags);
436 list_add(&hook->node, &undef_hook);
437 raw_spin_unlock_irqrestore(&undef_lock, flags);
438}
439
440void unregister_undef_hook(struct undef_hook *hook)
441{
442 unsigned long flags;
443
444 raw_spin_lock_irqsave(&undef_lock, flags);
445 list_del(&hook->node);
446 raw_spin_unlock_irqrestore(&undef_lock, flags);
447}
448
449static nokprobe_inline
450int call_undef_hook(struct pt_regs *regs, unsigned int instr)
451{
452 struct undef_hook *hook;
453 unsigned long flags;
454 int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL;
455
456 raw_spin_lock_irqsave(&undef_lock, flags);
457 list_for_each_entry(hook, &undef_hook, node)
458 if ((instr & hook->instr_mask) == hook->instr_val &&
459 (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val)
460 fn = hook->fn;
461 raw_spin_unlock_irqrestore(&undef_lock, flags);
462
463 return fn ? fn(regs, instr) : 1;
464}
465
466asmlinkage void do_undefinstr(struct pt_regs *regs)
467{
468 unsigned int instr;
469 void __user *pc;
470
471 pc = (void __user *)instruction_pointer(regs);
472
473 if (processor_mode(regs) == SVC_MODE) {
474#ifdef CONFIG_THUMB2_KERNEL
475 if (thumb_mode(regs)) {
476 instr = __mem_to_opcode_thumb16(((u16 *)pc)[0]);
477 if (is_wide_instruction(instr)) {
478 u16 inst2;
479 inst2 = __mem_to_opcode_thumb16(((u16 *)pc)[1]);
480 instr = __opcode_thumb32_compose(instr, inst2);
481 }
482 } else
483#endif
484 instr = __mem_to_opcode_arm(*(u32 *) pc);
485 } else if (thumb_mode(regs)) {
486 if (get_user(instr, (u16 __user *)pc))
487 goto die_sig;
488 instr = __mem_to_opcode_thumb16(instr);
489 if (is_wide_instruction(instr)) {
490 unsigned int instr2;
491 if (get_user(instr2, (u16 __user *)pc+1))
492 goto die_sig;
493 instr2 = __mem_to_opcode_thumb16(instr2);
494 instr = __opcode_thumb32_compose(instr, instr2);
495 }
496 } else {
497 if (get_user(instr, (u32 __user *)pc))
498 goto die_sig;
499 instr = __mem_to_opcode_arm(instr);
500 }
501
502 if (call_undef_hook(regs, instr) == 0)
503 return;
504
505die_sig:
506#ifdef CONFIG_DEBUG_USER
507 if (user_debug & UDBG_UNDEFINED) {
508 pr_info("%s (%d): undefined instruction: pc=%p\n",
509 current->comm, task_pid_nr(current), pc);
510 __show_regs(regs);
511 dump_instr(KERN_INFO, regs);
512 }
513#endif
514 arm_notify_die("Oops - undefined instruction", regs,
515 SIGILL, ILL_ILLOPC, pc, 0, 6);
516}
517NOKPROBE_SYMBOL(do_undefinstr)
518
519/*
520 * Handle FIQ similarly to NMI on x86 systems.
521 *
522 * The runtime environment for NMIs is extremely restrictive
523 * (NMIs can pre-empt critical sections meaning almost all locking is
524 * forbidden) meaning this default FIQ handling must only be used in
525 * circumstances where non-maskability improves robustness, such as
526 * watchdog or debug logic.
527 *
528 * This handler is not appropriate for general purpose use in drivers
529 * platform code and can be overrideen using set_fiq_handler.
530 */
531asmlinkage void __exception_irq_entry handle_fiq_as_nmi(struct pt_regs *regs)
532{
533 struct pt_regs *old_regs = set_irq_regs(regs);
534
535 nmi_enter();
536
537 /* nop. FIQ handlers for special arch/arm features can be added here. */
538
539 nmi_exit();
540
541 set_irq_regs(old_regs);
542}
543
544/*
545 * bad_mode handles the impossible case in the vectors. If you see one of
546 * these, then it's extremely serious, and could mean you have buggy hardware.
547 * It never returns, and never tries to sync. We hope that we can at least
548 * dump out some state information...
549 */
550asmlinkage void bad_mode(struct pt_regs *regs, int reason)
551{
552 console_verbose();
553
554 pr_crit("Bad mode in %s handler detected\n", handler[reason]);
555
556 die("Oops - bad mode", regs, 0);
557 local_irq_disable();
558 panic("bad mode");
559}
560
561static int bad_syscall(int n, struct pt_regs *regs)
562{
563 if ((current->personality & PER_MASK) != PER_LINUX) {
564 send_sig(SIGSEGV, current, 1);
565 return regs->ARM_r0;
566 }
567
568#ifdef CONFIG_DEBUG_USER
569 if (user_debug & UDBG_SYSCALL) {
570 pr_err("[%d] %s: obsolete system call %08x.\n",
571 task_pid_nr(current), current->comm, n);
572 dump_instr(KERN_ERR, regs);
573 }
574#endif
575
576 arm_notify_die("Oops - bad syscall", regs, SIGILL, ILL_ILLTRP,
577 (void __user *)instruction_pointer(regs) -
578 (thumb_mode(regs) ? 2 : 4),
579 n, 0);
580
581 return regs->ARM_r0;
582}
583
584static inline int
585__do_cache_op(unsigned long start, unsigned long end)
586{
587 int ret;
588
589 do {
590 unsigned long chunk = min(PAGE_SIZE, end - start);
591
592 if (fatal_signal_pending(current))
593 return 0;
594
595 ret = flush_cache_user_range(start, start + chunk);
596 if (ret)
597 return ret;
598
599 cond_resched();
600 start += chunk;
601 } while (start < end);
602
603 return 0;
604}
605
606static inline int
607do_cache_op(unsigned long start, unsigned long end, int flags)
608{
609 if (end < start || flags)
610 return -EINVAL;
611
612 if (!access_ok(start, end - start))
613 return -EFAULT;
614
615 return __do_cache_op(start, end);
616}
617
618/*
619 * Handle all unrecognised system calls.
620 * 0x9f0000 - 0x9fffff are some more esoteric system calls
621 */
622#define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
623asmlinkage int arm_syscall(int no, struct pt_regs *regs)
624{
625 if ((no >> 16) != (__ARM_NR_BASE>> 16))
626 return bad_syscall(no, regs);
627
628 switch (no & 0xffff) {
629 case 0: /* branch through 0 */
630 arm_notify_die("branch through zero", regs,
631 SIGSEGV, SEGV_MAPERR, NULL, 0, 0);
632 return 0;
633
634 case NR(breakpoint): /* SWI BREAK_POINT */
635 regs->ARM_pc -= thumb_mode(regs) ? 2 : 4;
636 ptrace_break(regs);
637 return regs->ARM_r0;
638
639 /*
640 * Flush a region from virtual address 'r0' to virtual address 'r1'
641 * _exclusive_. There is no alignment requirement on either address;
642 * user space does not need to know the hardware cache layout.
643 *
644 * r2 contains flags. It should ALWAYS be passed as ZERO until it
645 * is defined to be something else. For now we ignore it, but may
646 * the fires of hell burn in your belly if you break this rule. ;)
647 *
648 * (at a later date, we may want to allow this call to not flush
649 * various aspects of the cache. Passing '0' will guarantee that
650 * everything necessary gets flushed to maintain consistency in
651 * the specified region).
652 */
653 case NR(cacheflush):
654 return do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2);
655
656 case NR(usr26):
657 if (!(elf_hwcap & HWCAP_26BIT))
658 break;
659 regs->ARM_cpsr &= ~MODE32_BIT;
660 return regs->ARM_r0;
661
662 case NR(usr32):
663 if (!(elf_hwcap & HWCAP_26BIT))
664 break;
665 regs->ARM_cpsr |= MODE32_BIT;
666 return regs->ARM_r0;
667
668 case NR(set_tls):
669 set_tls(regs->ARM_r0);
670 return 0;
671
672 case NR(get_tls):
673 return current_thread_info()->tp_value[0];
674
675 default:
676 /* Calls 9f00xx..9f07ff are defined to return -ENOSYS
677 if not implemented, rather than raising SIGILL. This
678 way the calling program can gracefully determine whether
679 a feature is supported. */
680 if ((no & 0xffff) <= 0x7ff)
681 return -ENOSYS;
682 break;
683 }
684#ifdef CONFIG_DEBUG_USER
685 /*
686 * experience shows that these seem to indicate that
687 * something catastrophic has happened
688 */
689 if (user_debug & UDBG_SYSCALL) {
690 pr_err("[%d] %s: arm syscall %d\n",
691 task_pid_nr(current), current->comm, no);
692 dump_instr("", regs);
693 if (user_mode(regs)) {
694 __show_regs(regs);
695 c_backtrace(frame_pointer(regs), processor_mode(regs));
696 }
697 }
698#endif
699 arm_notify_die("Oops - bad syscall(2)", regs, SIGILL, ILL_ILLTRP,
700 (void __user *)instruction_pointer(regs) -
701 (thumb_mode(regs) ? 2 : 4),
702 no, 0);
703 return 0;
704}
705
706#ifdef CONFIG_TLS_REG_EMUL
707
708/*
709 * We might be running on an ARMv6+ processor which should have the TLS
710 * register but for some reason we can't use it, or maybe an SMP system
711 * using a pre-ARMv6 processor (there are apparently a few prototypes like
712 * that in existence) and therefore access to that register must be
713 * emulated.
714 */
715
716static int get_tp_trap(struct pt_regs *regs, unsigned int instr)
717{
718 int reg = (instr >> 12) & 15;
719 if (reg == 15)
720 return 1;
721 regs->uregs[reg] = current_thread_info()->tp_value[0];
722 regs->ARM_pc += 4;
723 return 0;
724}
725
726static struct undef_hook arm_mrc_hook = {
727 .instr_mask = 0x0fff0fff,
728 .instr_val = 0x0e1d0f70,
729 .cpsr_mask = PSR_T_BIT,
730 .cpsr_val = 0,
731 .fn = get_tp_trap,
732};
733
734static int __init arm_mrc_hook_init(void)
735{
736 register_undef_hook(&arm_mrc_hook);
737 return 0;
738}
739
740late_initcall(arm_mrc_hook_init);
741
742#endif
743
744/*
745 * A data abort trap was taken, but we did not handle the instruction.
746 * Try to abort the user program, or panic if it was the kernel.
747 */
748asmlinkage void
749baddataabort(int code, unsigned long instr, struct pt_regs *regs)
750{
751 unsigned long addr = instruction_pointer(regs);
752
753#ifdef CONFIG_DEBUG_USER
754 if (user_debug & UDBG_BADABORT) {
755 pr_err("8<--- cut here ---\n");
756 pr_err("[%d] %s: bad data abort: code %d instr 0x%08lx\n",
757 task_pid_nr(current), current->comm, code, instr);
758 dump_instr(KERN_ERR, regs);
759 show_pte(KERN_ERR, current->mm, addr);
760 }
761#endif
762
763 arm_notify_die("unknown data abort code", regs,
764 SIGILL, ILL_ILLOPC, (void __user *)addr, instr, 0);
765}
766
767void __readwrite_bug(const char *fn)
768{
769 pr_err("%s called, but not implemented\n", fn);
770 BUG();
771}
772EXPORT_SYMBOL(__readwrite_bug);
773
774void __pte_error(const char *file, int line, pte_t pte)
775{
776 pr_err("%s:%d: bad pte %08llx.\n", file, line, (long long)pte_val(pte));
777}
778
779void __pmd_error(const char *file, int line, pmd_t pmd)
780{
781 pr_err("%s:%d: bad pmd %08llx.\n", file, line, (long long)pmd_val(pmd));
782}
783
784void __pgd_error(const char *file, int line, pgd_t pgd)
785{
786 pr_err("%s:%d: bad pgd %08llx.\n", file, line, (long long)pgd_val(pgd));
787}
788
789asmlinkage void __div0(void)
790{
791 pr_err("Division by zero in kernel.\n");
792 dump_stack();
793}
794EXPORT_SYMBOL(__div0);
795
796void abort(void)
797{
798 BUG();
799
800 /* if that doesn't kill us, halt */
801 panic("Oops failed to kill thread");
802}
803
804void __init trap_init(void)
805{
806 return;
807}
808
809#ifdef CONFIG_KUSER_HELPERS
810static void __init kuser_init(void *vectors)
811{
812 extern char __kuser_helper_start[], __kuser_helper_end[];
813 int kuser_sz = __kuser_helper_end - __kuser_helper_start;
814
815 memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
816
817 /*
818 * vectors + 0xfe0 = __kuser_get_tls
819 * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
820 */
821 if (tls_emu || has_tls_reg)
822 memcpy(vectors + 0xfe0, vectors + 0xfe8, 4);
823}
824#else
825static inline void __init kuser_init(void *vectors)
826{
827}
828#endif
829
830#ifndef CONFIG_CPU_V7M
831static void copy_from_lma(void *vma, void *lma_start, void *lma_end)
832{
833 memcpy(vma, lma_start, lma_end - lma_start);
834}
835
836static void flush_vectors(void *vma, size_t offset, size_t size)
837{
838 unsigned long start = (unsigned long)vma + offset;
839 unsigned long end = start + size;
840
841 flush_icache_range(start, end);
842}
843
844#ifdef CONFIG_HARDEN_BRANCH_HISTORY
845int spectre_bhb_update_vectors(unsigned int method)
846{
847 extern char __vectors_bhb_bpiall_start[], __vectors_bhb_bpiall_end[];
848 extern char __vectors_bhb_loop8_start[], __vectors_bhb_loop8_end[];
849 void *vec_start, *vec_end;
850
851 if (system_state > SYSTEM_SCHEDULING) {
852 pr_err("CPU%u: Spectre BHB workaround too late - system vulnerable\n",
853 smp_processor_id());
854 return SPECTRE_VULNERABLE;
855 }
856
857 switch (method) {
858 case SPECTRE_V2_METHOD_LOOP8:
859 vec_start = __vectors_bhb_loop8_start;
860 vec_end = __vectors_bhb_loop8_end;
861 break;
862
863 case SPECTRE_V2_METHOD_BPIALL:
864 vec_start = __vectors_bhb_bpiall_start;
865 vec_end = __vectors_bhb_bpiall_end;
866 break;
867
868 default:
869 pr_err("CPU%u: unknown Spectre BHB state %d\n",
870 smp_processor_id(), method);
871 return SPECTRE_VULNERABLE;
872 }
873
874 copy_from_lma(vectors_page, vec_start, vec_end);
875 flush_vectors(vectors_page, 0, vec_end - vec_start);
876
877 return SPECTRE_MITIGATED;
878}
879#endif
880
881void __init early_trap_init(void *vectors_base)
882{
883 extern char __stubs_start[], __stubs_end[];
884 extern char __vectors_start[], __vectors_end[];
885 unsigned i;
886
887 vectors_page = vectors_base;
888
889 /*
890 * Poison the vectors page with an undefined instruction. This
891 * instruction is chosen to be undefined for both ARM and Thumb
892 * ISAs. The Thumb version is an undefined instruction with a
893 * branch back to the undefined instruction.
894 */
895 for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
896 ((u32 *)vectors_base)[i] = 0xe7fddef1;
897
898 /*
899 * Copy the vectors, stubs and kuser helpers (in entry-armv.S)
900 * into the vector page, mapped at 0xffff0000, and ensure these
901 * are visible to the instruction stream.
902 */
903 copy_from_lma(vectors_base, __vectors_start, __vectors_end);
904 copy_from_lma(vectors_base + 0x1000, __stubs_start, __stubs_end);
905
906 kuser_init(vectors_base);
907
908 flush_vectors(vectors_base, 0, PAGE_SIZE * 2);
909}
910#else /* ifndef CONFIG_CPU_V7M */
911void __init early_trap_init(void *vectors_base)
912{
913 /*
914 * on V7-M there is no need to copy the vector table to a dedicated
915 * memory area. The address is configurable and so a table in the kernel
916 * image can be used.
917 */
918}
919#endif