| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 | 
|  | 2 | /* | 
|  | 3 | *  linux/arch/m68k/mm/fault.c | 
|  | 4 | * | 
|  | 5 | *  Copyright (C) 1995  Hamish Macdonald | 
|  | 6 | */ | 
|  | 7 |  | 
|  | 8 | #include <linux/mman.h> | 
|  | 9 | #include <linux/mm.h> | 
|  | 10 | #include <linux/kernel.h> | 
|  | 11 | #include <linux/ptrace.h> | 
|  | 12 | #include <linux/interrupt.h> | 
|  | 13 | #include <linux/module.h> | 
|  | 14 | #include <linux/uaccess.h> | 
|  | 15 |  | 
|  | 16 | #include <asm/setup.h> | 
|  | 17 | #include <asm/traps.h> | 
|  | 18 | #include <asm/pgalloc.h> | 
|  | 19 |  | 
|  | 20 | extern void die_if_kernel(char *, struct pt_regs *, long); | 
|  | 21 |  | 
|  | 22 | int send_fault_sig(struct pt_regs *regs) | 
|  | 23 | { | 
|  | 24 | int signo, si_code; | 
|  | 25 | void __user *addr; | 
|  | 26 |  | 
|  | 27 | signo = current->thread.signo; | 
|  | 28 | si_code = current->thread.code; | 
|  | 29 | addr = (void __user *)current->thread.faddr; | 
|  | 30 | pr_debug("send_fault_sig: %p,%d,%d\n", addr, signo, si_code); | 
|  | 31 |  | 
|  | 32 | if (user_mode(regs)) { | 
|  | 33 | force_sig_fault(signo, si_code, addr, current); | 
|  | 34 | } else { | 
|  | 35 | if (fixup_exception(regs)) | 
|  | 36 | return -1; | 
|  | 37 |  | 
|  | 38 | //if (signo == SIGBUS) | 
|  | 39 | //	force_sig_fault(si_signo, si_code, addr, current); | 
|  | 40 |  | 
|  | 41 | /* | 
|  | 42 | * Oops. The kernel tried to access some bad page. We'll have to | 
|  | 43 | * terminate things with extreme prejudice. | 
|  | 44 | */ | 
|  | 45 | if ((unsigned long)addr < PAGE_SIZE) | 
|  | 46 | pr_alert("Unable to handle kernel NULL pointer dereference"); | 
|  | 47 | else | 
|  | 48 | pr_alert("Unable to handle kernel access"); | 
|  | 49 | pr_cont(" at virtual address %p\n", addr); | 
|  | 50 | die_if_kernel("Oops", regs, 0 /*error_code*/); | 
|  | 51 | do_exit(SIGKILL); | 
|  | 52 | } | 
|  | 53 |  | 
|  | 54 | return 1; | 
|  | 55 | } | 
|  | 56 |  | 
|  | 57 | /* | 
|  | 58 | * This routine handles page faults.  It determines the problem, and | 
|  | 59 | * then passes it off to one of the appropriate routines. | 
|  | 60 | * | 
|  | 61 | * error_code: | 
|  | 62 | *	bit 0 == 0 means no page found, 1 means protection fault | 
|  | 63 | *	bit 1 == 0 means read, 1 means write | 
|  | 64 | * | 
|  | 65 | * If this routine detects a bad access, it returns 1, otherwise it | 
|  | 66 | * returns 0. | 
|  | 67 | */ | 
|  | 68 | int do_page_fault(struct pt_regs *regs, unsigned long address, | 
|  | 69 | unsigned long error_code) | 
|  | 70 | { | 
|  | 71 | struct mm_struct *mm = current->mm; | 
|  | 72 | struct vm_area_struct * vma; | 
|  | 73 | vm_fault_t fault; | 
|  | 74 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; | 
|  | 75 |  | 
|  | 76 | pr_debug("do page fault:\nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n", | 
|  | 77 | regs->sr, regs->pc, address, error_code, mm ? mm->pgd : NULL); | 
|  | 78 |  | 
|  | 79 | /* | 
|  | 80 | * If we're in an interrupt or have no user | 
|  | 81 | * context, we must not take the fault.. | 
|  | 82 | */ | 
|  | 83 | if (faulthandler_disabled() || !mm) | 
|  | 84 | goto no_context; | 
|  | 85 |  | 
|  | 86 | if (user_mode(regs)) | 
|  | 87 | flags |= FAULT_FLAG_USER; | 
|  | 88 | retry: | 
|  | 89 | down_read(&mm->mmap_sem); | 
|  | 90 |  | 
|  | 91 | vma = find_vma(mm, address); | 
|  | 92 | if (!vma) | 
|  | 93 | goto map_err; | 
|  | 94 | if (vma->vm_flags & VM_IO) | 
|  | 95 | goto acc_err; | 
|  | 96 | if (vma->vm_start <= address) | 
|  | 97 | goto good_area; | 
|  | 98 | if (!(vma->vm_flags & VM_GROWSDOWN)) | 
|  | 99 | goto map_err; | 
|  | 100 | if (user_mode(regs)) { | 
|  | 101 | /* Accessing the stack below usp is always a bug.  The | 
|  | 102 | "+ 256" is there due to some instructions doing | 
|  | 103 | pre-decrement on the stack and that doesn't show up | 
|  | 104 | until later.  */ | 
|  | 105 | if (address + 256 < rdusp()) | 
|  | 106 | goto map_err; | 
|  | 107 | } | 
|  | 108 | if (expand_stack(vma, address)) | 
|  | 109 | goto map_err; | 
|  | 110 |  | 
|  | 111 | /* | 
|  | 112 | * Ok, we have a good vm_area for this memory access, so | 
|  | 113 | * we can handle it.. | 
|  | 114 | */ | 
|  | 115 | good_area: | 
|  | 116 | pr_debug("do_page_fault: good_area\n"); | 
|  | 117 | switch (error_code & 3) { | 
|  | 118 | default:	/* 3: write, present */ | 
|  | 119 | /* fall through */ | 
|  | 120 | case 2:		/* write, not present */ | 
|  | 121 | if (!(vma->vm_flags & VM_WRITE)) | 
|  | 122 | goto acc_err; | 
|  | 123 | flags |= FAULT_FLAG_WRITE; | 
|  | 124 | break; | 
|  | 125 | case 1:		/* read, present */ | 
|  | 126 | goto acc_err; | 
|  | 127 | case 0:		/* read, not present */ | 
|  | 128 | if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) | 
|  | 129 | goto acc_err; | 
|  | 130 | } | 
|  | 131 |  | 
|  | 132 | /* | 
|  | 133 | * If for any reason at all we couldn't handle the fault, | 
|  | 134 | * make sure we exit gracefully rather than endlessly redo | 
|  | 135 | * the fault. | 
|  | 136 | */ | 
|  | 137 |  | 
|  | 138 | fault = handle_mm_fault(vma, address, flags); | 
|  | 139 | pr_debug("handle_mm_fault returns %x\n", fault); | 
|  | 140 |  | 
|  | 141 | if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) | 
|  | 142 | return 0; | 
|  | 143 |  | 
|  | 144 | if (unlikely(fault & VM_FAULT_ERROR)) { | 
|  | 145 | if (fault & VM_FAULT_OOM) | 
|  | 146 | goto out_of_memory; | 
|  | 147 | else if (fault & VM_FAULT_SIGSEGV) | 
|  | 148 | goto map_err; | 
|  | 149 | else if (fault & VM_FAULT_SIGBUS) | 
|  | 150 | goto bus_err; | 
|  | 151 | BUG(); | 
|  | 152 | } | 
|  | 153 |  | 
|  | 154 | /* | 
|  | 155 | * Major/minor page fault accounting is only done on the | 
|  | 156 | * initial attempt. If we go through a retry, it is extremely | 
|  | 157 | * likely that the page will be found in page cache at that point. | 
|  | 158 | */ | 
|  | 159 | if (flags & FAULT_FLAG_ALLOW_RETRY) { | 
|  | 160 | if (fault & VM_FAULT_MAJOR) | 
|  | 161 | current->maj_flt++; | 
|  | 162 | else | 
|  | 163 | current->min_flt++; | 
|  | 164 | if (fault & VM_FAULT_RETRY) { | 
|  | 165 | /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk | 
|  | 166 | * of starvation. */ | 
|  | 167 | flags &= ~FAULT_FLAG_ALLOW_RETRY; | 
|  | 168 | flags |= FAULT_FLAG_TRIED; | 
|  | 169 |  | 
|  | 170 | /* | 
|  | 171 | * No need to up_read(&mm->mmap_sem) as we would | 
|  | 172 | * have already released it in __lock_page_or_retry | 
|  | 173 | * in mm/filemap.c. | 
|  | 174 | */ | 
|  | 175 |  | 
|  | 176 | goto retry; | 
|  | 177 | } | 
|  | 178 | } | 
|  | 179 |  | 
|  | 180 | up_read(&mm->mmap_sem); | 
|  | 181 | return 0; | 
|  | 182 |  | 
|  | 183 | /* | 
|  | 184 | * We ran out of memory, or some other thing happened to us that made | 
|  | 185 | * us unable to handle the page fault gracefully. | 
|  | 186 | */ | 
|  | 187 | out_of_memory: | 
|  | 188 | up_read(&mm->mmap_sem); | 
|  | 189 | if (!user_mode(regs)) | 
|  | 190 | goto no_context; | 
|  | 191 | pagefault_out_of_memory(); | 
|  | 192 | return 0; | 
|  | 193 |  | 
|  | 194 | no_context: | 
|  | 195 | current->thread.signo = SIGBUS; | 
|  | 196 | current->thread.faddr = address; | 
|  | 197 | return send_fault_sig(regs); | 
|  | 198 |  | 
|  | 199 | bus_err: | 
|  | 200 | current->thread.signo = SIGBUS; | 
|  | 201 | current->thread.code = BUS_ADRERR; | 
|  | 202 | current->thread.faddr = address; | 
|  | 203 | goto send_sig; | 
|  | 204 |  | 
|  | 205 | map_err: | 
|  | 206 | current->thread.signo = SIGSEGV; | 
|  | 207 | current->thread.code = SEGV_MAPERR; | 
|  | 208 | current->thread.faddr = address; | 
|  | 209 | goto send_sig; | 
|  | 210 |  | 
|  | 211 | acc_err: | 
|  | 212 | current->thread.signo = SIGSEGV; | 
|  | 213 | current->thread.code = SEGV_ACCERR; | 
|  | 214 | current->thread.faddr = address; | 
|  | 215 |  | 
|  | 216 | send_sig: | 
|  | 217 | up_read(&mm->mmap_sem); | 
|  | 218 | return send_fault_sig(regs); | 
|  | 219 | } |