blob: e0f534a1aad899e9334b0d1310325d02b10a1c1f [file] [log] [blame]
xf.libfc6e712025-02-07 01:54:34 -08001/*
2 * linux/arch/arm/kernel/process.c
3 *
4 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
5 * Original Copyright (C) 1995 Linus Torvalds
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <stdarg.h>
12
13#include <linux/export.h>
14#include <linux/sched.h>
15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/stddef.h>
18#include <linux/unistd.h>
19#include <linux/user.h>
20#include <linux/delay.h>
21#include <linux/reboot.h>
22#include <linux/interrupt.h>
23#include <linux/kallsyms.h>
24#include <linux/init.h>
25#include <linux/cpu.h>
26#include <linux/elfcore.h>
27#include <linux/pm.h>
28#include <linux/tick.h>
29#include <linux/utsname.h>
30#include <linux/uaccess.h>
31#include <linux/random.h>
32#include <linux/hw_breakpoint.h>
33#include <linux/cpuidle.h>
34#include <linux/console.h>
35
36#include <asm/cacheflush.h>
37#include <asm/processor.h>
38#include <asm/thread_notify.h>
39#include <asm/stacktrace.h>
40#include <asm/mach/time.h>
41
42/*add for HUB: CVE-2014-9870*/
43#include <asm/tls.h>
44
45#ifdef CONFIG_CC_STACKPROTECTOR
46#include <linux/stackprotector.h>
47unsigned long __stack_chk_guard __read_mostly;
48EXPORT_SYMBOL(__stack_chk_guard);
49#endif
50
51s64 pm_enter_time = 0;
52static const char *processor_modes[] = {
53 "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" ,
54 "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26",
55 "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "UK6_32" , "ABT_32" ,
56 "UK8_32" , "UK9_32" , "UK10_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32"
57};
58
59static const char *isa_modes[] = {
60 "ARM" , "Thumb" , "Jazelle", "ThumbEE"
61};
62
63extern void setup_mm_for_reboot(void);
64
65static volatile int hlt_counter;
66
67#ifdef CONFIG_SMP
68void arch_trigger_all_cpu_backtrace(void)
69{
70 smp_send_all_cpu_backtrace();
71}
72#endif
73
74void disable_hlt(void)
75{
76 hlt_counter++;
77}
78
79EXPORT_SYMBOL(disable_hlt);
80
81void enable_hlt(void)
82{
83 hlt_counter--;
84}
85
86EXPORT_SYMBOL(enable_hlt);
87
88static int __init nohlt_setup(char *__unused)
89{
90 hlt_counter = 1;
91 return 1;
92}
93
94static int __init hlt_setup(char *__unused)
95{
96 hlt_counter = 0;
97 return 1;
98}
99
100__setup("nohlt", nohlt_setup);
101__setup("hlt", hlt_setup);
102
103extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
104typedef void (*phys_reset_t)(unsigned long);
105
106#ifdef CONFIG_ARM_FLUSH_CONSOLE_ON_RESTART
107void arm_machine_flush_console(void)
108{
109 printk("\n");
110 pr_emerg("Restarting %s\n", linux_banner);
111 if (console_trylock()) {
112 console_unlock();
113 return;
114 }
115
116 mdelay(50);
117
118 local_irq_disable();
119 if (!console_trylock())
120 pr_emerg("arm_restart: Console was locked! Busting\n");
121 else
122 pr_emerg("arm_restart: Console was locked!\n");
123 console_unlock();
124}
125#else
126void arm_machine_flush_console(void)
127{
128}
129#endif
130
131/*
132 * A temporary stack to use for CPU reset. This is static so that we
133 * don't clobber it with the identity mapping. When running with this
134 * stack, any references to the current task *will not work* so you
135 * should really do as little as possible before jumping to your reset
136 * code.
137 */
138static u64 soft_restart_stack[16];
139
140static void __soft_restart(void *addr)
141{
142 phys_reset_t phys_reset;
143
144 /* Take out a flat memory mapping. */
145 setup_mm_for_reboot();
146
147 /* Clean and invalidate caches */
148 flush_cache_all();
149
150 /* Turn off caching */
151 cpu_proc_fin();
152
153 /* Push out any further dirty data, and ensure cache is empty */
154 flush_cache_all();
155
156 /* Switch to the identity mapping. */
157 phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
158 phys_reset((unsigned long)addr);
159
160 /* Should never get here. */
161 BUG();
162}
163
164void soft_restart(unsigned long addr)
165{
166 u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack);
167
168 /* Disable interrupts first */
169 local_irq_disable();
170 local_fiq_disable();
171
172 /* Disable the L2 if we're the last man standing. */
173 if (num_online_cpus() == 1)
174 outer_disable();
175
176 /* Change to the new stack and continue with the reset. */
177 call_with_stack(__soft_restart, (void *)addr, (void *)stack);
178
179 /* Should never get here. */
180 BUG();
181}
182
183static void null_restart(char mode, const char *cmd)
184{
185}
186
187/*
188 * Function pointers to optional machine specific functions
189 */
190void (*pm_power_off)(void);
191EXPORT_SYMBOL(pm_power_off);
192
193void (*arm_pm_restart)(char str, const char *cmd) = null_restart;
194EXPORT_SYMBOL_GPL(arm_pm_restart);
195
196static void do_nothing(void *unused)
197{
198}
199
200/*
201 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
202 * pm_idle and update to new pm_idle value. Required while changing pm_idle
203 * handler on SMP systems.
204 *
205 * Caller must have changed pm_idle to the new value before the call. Old
206 * pm_idle value will not be used by any CPU after the return of this function.
207 */
208void cpu_idle_wait(void)
209{
210 smp_mb();
211 /* kick all the CPUs so that they exit out of pm_idle */
212 smp_call_function(do_nothing, NULL, 1);
213}
214EXPORT_SYMBOL_GPL(cpu_idle_wait);
215
216/*
217 * This is our default idle handler.
218 */
219extern void pm_idle_sram_print(void);
220
221void (*arm_pm_idle)(void);
222
223static void default_idle(void)
224{
225 if (arm_pm_idle)
226 arm_pm_idle();
227 else
228 cpu_do_idle();
229 local_irq_enable();
230#ifdef CONFIG_PM
231 pm_idle_sram_print();
232#endif
233}
234
235void (*pm_idle)(void) = default_idle;
236EXPORT_SYMBOL(pm_idle);
237
238/*
239 * The idle thread, has rather strange semantics for calling pm_idle,
240 * but this is what x86 does and we need to do the same, so that
241 * things like cpuidle get called in the same way. The only difference
242 * is that we always respect 'hlt_counter' to prevent low power idle.
243 */
244void cpu_idle(void)
245{
246 local_fiq_enable();
247
248 /* endless idle loop with no priority at all */
249 while (1) {
250 idle_notifier_call_chain(IDLE_START);
251 tick_nohz_idle_enter();
252 rcu_idle_enter();
253 while (!need_resched()) {
254#ifdef CONFIG_HOTPLUG_CPU
255 if (cpu_is_offline(smp_processor_id()))
256 cpu_die();
257#endif
258
259 /*
260 * We need to disable interrupts here
261 * to ensure we don't miss a wakeup call.
262 */
263 local_irq_disable();
264#ifdef CONFIG_PL310_ERRATA_769419
265 wmb();
266#endif
267 if (hlt_counter) {
268 local_irq_enable();
269 cpu_relax();
270 } else if (!need_resched()) {
271 stop_critical_timings();
272 pm_enter_time = ktime_to_us(ktime_get());
273 if (cpuidle_idle_call())
274 pm_idle();
275 start_critical_timings();
276 /*
277 * pm_idle functions must always
278 * return with IRQs enabled.
279 */
280 WARN_ON(irqs_disabled());
281 } else
282 local_irq_enable();
283 }
284 rcu_idle_exit();
285 tick_nohz_idle_exit();
286 idle_notifier_call_chain(IDLE_END);
287 schedule_preempt_disabled();
288 }
289}
290
291static char reboot_mode = 'h';
292
293int __init reboot_setup(char *str)
294{
295 reboot_mode = str[0];
296 return 1;
297}
298
299__setup("reboot=", reboot_setup);
300
301void machine_shutdown(void)
302{
303#ifdef CONFIG_SMP
304
305 preempt_disable();
306
307 smp_send_stop();
308#endif
309}
310
311void machine_halt(void)
312{
313 machine_shutdown();
314 local_irq_disable();
315 while (1);
316}
317
318void machine_power_off(void)
319{
320 machine_shutdown();
321 if (pm_power_off)
322 pm_power_off();
323}
324
325void machine_restart(char *cmd)
326{
327 machine_shutdown();
328 arm_machine_flush_console();
329
330 arm_pm_restart(reboot_mode, cmd);
331
332 /* Give a grace period for failure to restart of 1s */
333 mdelay(1000);
334
335 /* Whoops - the platform was unable to reboot. Tell the user! */
336 printk("Reboot failed -- System halted\n");
337 local_irq_disable();
338 while (1);
339}
340
341void __show_regs(struct pt_regs *regs)
342{
343 unsigned long flags;
344 char buf[64];
345
346 printk("CPU: %d %s (%s %.*s)\n",
347 raw_smp_processor_id(), print_tainted(),
348 init_utsname()->release,
349 (int)strcspn(init_utsname()->version, " "),
350 init_utsname()->version);
351 print_symbol("PC is at %s\n", instruction_pointer(regs));
352 print_symbol("LR is at %s\n", regs->ARM_lr);
353 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
354 "sp : %08lx ip : %08lx fp : %08lx\n",
355 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
356 regs->ARM_sp, regs->ARM_ip, regs->ARM_fp);
357 printk("r10: %08lx r9 : %08lx r8 : %08lx\n",
358 regs->ARM_r10, regs->ARM_r9,
359 regs->ARM_r8);
360 printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
361 regs->ARM_r7, regs->ARM_r6,
362 regs->ARM_r5, regs->ARM_r4);
363 printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
364 regs->ARM_r3, regs->ARM_r2,
365 regs->ARM_r1, regs->ARM_r0);
366
367 flags = regs->ARM_cpsr;
368 buf[0] = flags & PSR_N_BIT ? 'N' : 'n';
369 buf[1] = flags & PSR_Z_BIT ? 'Z' : 'z';
370 buf[2] = flags & PSR_C_BIT ? 'C' : 'c';
371 buf[3] = flags & PSR_V_BIT ? 'V' : 'v';
372 buf[4] = '\0';
373
374 printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n",
375 buf, interrupts_enabled(regs) ? "n" : "ff",
376 fast_interrupts_enabled(regs) ? "n" : "ff",
377 processor_modes[processor_mode(regs)],
378 isa_modes[isa_mode(regs)],
379 get_fs() == get_ds() ? "kernel" : "user");
380#ifdef CONFIG_CPU_CP15
381 {
382 unsigned int ctrl;
383
384 buf[0] = '\0';
385#ifdef CONFIG_CPU_CP15_MMU
386 {
387 unsigned int transbase, dac;
388 asm("mrc p15, 0, %0, c2, c0\n\t"
389 "mrc p15, 0, %1, c3, c0\n"
390 : "=r" (transbase), "=r" (dac));
391 snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x",
392 transbase, dac);
393 }
394#endif
395 asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl));
396
397 printk("Control: %08x%s\n", ctrl, buf);
398 }
399#endif
400
401 //show_extra_register_data(regs, 128);
402}
403
404void show_regs(struct pt_regs * regs)
405{
406 printk("\n");
407 printk("Pid: %d, comm: %20s\n", task_pid_nr(current), current->comm);
408 __show_regs(regs);
409 dump_stack();
410}
411
412ATOMIC_NOTIFIER_HEAD(thread_notify_head);
413
414EXPORT_SYMBOL_GPL(thread_notify_head);
415
416/*
417 * Free current thread data structures etc..
418 */
419void exit_thread(void)
420{
421 thread_notify(THREAD_NOTIFY_EXIT, current_thread_info());
422}
423
424void flush_thread(void)
425{
426 struct thread_info *thread = current_thread_info();
427 struct task_struct *tsk = current;
428
429 flush_ptrace_hw_breakpoint(tsk);
430
431 memset(thread->used_cp, 0, sizeof(thread->used_cp));
432 memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
433 memset(&thread->fpstate, 0, sizeof(union fp_state));
434
435 thread_notify(THREAD_NOTIFY_FLUSH, thread);
436}
437
438void release_thread(struct task_struct *dead_task)
439{
440}
441
442asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
443
444int
445copy_thread(unsigned long clone_flags, unsigned long stack_start,
446 unsigned long stk_sz, struct task_struct *p, struct pt_regs *regs)
447{
448 struct thread_info *thread = task_thread_info(p);
449 struct pt_regs *childregs = task_pt_regs(p);
450
451 *childregs = *regs;
452 childregs->ARM_r0 = 0;
453 childregs->ARM_sp = stack_start;
454
455 memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
456 thread->cpu_context.sp = (unsigned long)childregs;
457 thread->cpu_context.pc = (unsigned long)ret_from_fork;
458
459 clear_ptrace_hw_breakpoint(p);
460
461 /*Fix for HUB: CVE-2014-9870*/
462 if (clone_flags & CLONE_SETTLS)
463 thread->tp_value[0] = childregs->ARM_r3;
464 thread->tp_value[1] = get_tpuser();
465
466 thread_notify(THREAD_NOTIFY_COPY, thread);
467
468 return 0;
469}
470
471/*
472 * Fill in the task's elfregs structure for a core dump.
473 */
474int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs)
475{
476 elf_core_copy_regs(elfregs, task_pt_regs(t));
477 return 1;
478}
479
480/*
481 * fill in the fpe structure for a core dump...
482 */
483int dump_fpu (struct pt_regs *regs, struct user_fp *fp)
484{
485 struct thread_info *thread = current_thread_info();
486 int used_math = thread->used_cp[1] | thread->used_cp[2];
487
488 if (used_math)
489 memcpy(fp, &thread->fpstate.soft, sizeof (*fp));
490
491 return used_math != 0;
492}
493EXPORT_SYMBOL(dump_fpu);
494
495/*
496 * Shuffle the argument into the correct register before calling the
497 * thread function. r4 is the thread argument, r5 is the pointer to
498 * the thread function, and r6 points to the exit function.
499 */
500extern void kernel_thread_helper(void);
501asm( ".pushsection .text\n"
502" .align\n"
503" .type kernel_thread_helper, #function\n"
504"kernel_thread_helper:\n"
505#ifdef CONFIG_TRACE_IRQFLAGS
506" bl trace_hardirqs_on\n"
507#endif
508" msr cpsr_c, r7\n"
509" mov r0, r4\n"
510" mov lr, r6\n"
511" mov pc, r5\n"
512" .size kernel_thread_helper, . - kernel_thread_helper\n"
513" .popsection");
514
515#ifdef CONFIG_ARM_UNWIND
516extern void kernel_thread_exit(long code);
517asm( ".pushsection .text\n"
518" .align\n"
519" .type kernel_thread_exit, #function\n"
520"kernel_thread_exit:\n"
521" .fnstart\n"
522" .cantunwind\n"
523" bl do_exit\n"
524" nop\n"
525" .fnend\n"
526" .size kernel_thread_exit, . - kernel_thread_exit\n"
527" .popsection");
528#else
529#define kernel_thread_exit do_exit
530#endif
531
532/*
533 * Create a kernel thread.
534 */
535pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
536{
537 struct pt_regs regs;
538
539 memset(&regs, 0, sizeof(regs));
540
541 regs.ARM_r4 = (unsigned long)arg;
542 regs.ARM_r5 = (unsigned long)fn;
543 regs.ARM_r6 = (unsigned long)kernel_thread_exit;
544 regs.ARM_r7 = SVC_MODE | PSR_ENDSTATE | PSR_ISETSTATE;
545 regs.ARM_pc = (unsigned long)kernel_thread_helper;
546 regs.ARM_cpsr = regs.ARM_r7 | PSR_I_BIT;
547
548 return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
549}
550EXPORT_SYMBOL(kernel_thread);
551
552unsigned long get_wchan(struct task_struct *p)
553{
554 struct stackframe frame;
555 unsigned long stack_page;
556 int count = 0;
557 if (!p || p == current || p->state == TASK_RUNNING)
558 return 0;
559
560 frame.fp = thread_saved_fp(p);
561 frame.sp = thread_saved_sp(p);
562 frame.lr = 0; /* recovered from the stack */
563 frame.pc = thread_saved_pc(p);
564 stack_page = (unsigned long)task_stack_page(p);
565 do {
566 if (frame.sp < stack_page ||
567 frame.sp >= stack_page + THREAD_SIZE ||
568 unwind_frame(&frame) < 0)
569 return 0;
570 if (!in_sched_functions(frame.pc))
571 return frame.pc;
572 } while (count ++ < 16);
573 return 0;
574}
575
576unsigned long arch_randomize_brk(struct mm_struct *mm)
577{
578 unsigned long range_end = mm->brk + 0x02000000;
579 return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
580}
581
582#ifdef CONFIG_MMU
583
584/*
585 * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock. If the lock is not
586 * initialized by pgtable_page_ctor() then a coredump of the vector page will
587 * fail.
588 */
589static int __init vectors_user_mapping_init_page(void)
590{
591 struct page *page;
592 unsigned long addr = 0xffff0000;
593 pgd_t *pgd;
594 pud_t *pud;
595 pmd_t *pmd;
596
597 pgd = pgd_offset_k(addr);
598 pud = pud_offset(pgd, addr);
599 pmd = pmd_offset(pud, addr);
600 page = pmd_page(*(pmd));
601
602 pgtable_page_ctor(page);
603
604 return 0;
605}
606late_initcall(vectors_user_mapping_init_page);
607
608/*
609 * The vectors page is always readable from user space for the
610 * atomic helpers and the signal restart code. Insert it into the
611 * gate_vma so that it is visible through ptrace and /proc/<pid>/mem.
612 */
613static struct vm_area_struct gate_vma;
614
615static int __init gate_vma_init(void)
616{
617 gate_vma.vm_start = 0xffff0000;
618 gate_vma.vm_end = 0xffff0000 + PAGE_SIZE;
619 gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
620 gate_vma.vm_flags = VM_READ | VM_EXEC |
621 VM_MAYREAD | VM_MAYEXEC;
622 return 0;
623}
624arch_initcall(gate_vma_init);
625
626struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
627{
628 return &gate_vma;
629}
630
631int in_gate_area(struct mm_struct *mm, unsigned long addr)
632{
633 return (addr >= gate_vma.vm_start) && (addr < gate_vma.vm_end);
634}
635
636int in_gate_area_no_mm(unsigned long addr)
637{
638 return in_gate_area(NULL, addr);
639}
640
641const char *arch_vma_name(struct vm_area_struct *vma)
642{
643 return (vma == &gate_vma) ? "[vectors]" : NULL;
644}
645#endif