blob: cbd091720c8b4ea5c337c7071e422f084124647e [file] [log] [blame]
xf.li771eb062025-02-09 23:05:11 -08001/*
2 * linux/arch/arm/mm/fault.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Modifications for ARM processor (c) 1995-2004 Russell King
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
xf.libfc6e712025-02-07 01:54:34 -080011#include <linux/module.h>
12#include <linux/signal.h>
13#include <linux/mm.h>
14#include <linux/hardirq.h>
15#include <linux/init.h>
16#include <linux/kprobes.h>
17#include <linux/uaccess.h>
18#include <linux/page-flags.h>
19#include <linux/sched.h>
20#include <linux/highmem.h>
21#include <linux/perf_event.h>
xf.li771eb062025-02-09 23:05:11 -080022
xf.libfc6e712025-02-07 01:54:34 -080023#include <asm/exception.h>
24#include <asm/pgtable.h>
25#include <asm/system_misc.h>
26#include <asm/system_info.h>
27#include <asm/tlbflush.h>
xf.li771eb062025-02-09 23:05:11 -080028
xf.libfc6e712025-02-07 01:54:34 -080029#include <asm/mach/map.h>
30#include <linux/slab.h>
xf.li771eb062025-02-09 23:05:11 -080031
xf.libfc6e712025-02-07 01:54:34 -080032#include "fault.h"
xf.li771eb062025-02-09 23:05:11 -080033
xf.libfc6e712025-02-07 01:54:34 -080034#ifdef CONFIG_MMU
xf.li771eb062025-02-09 23:05:11 -080035
xf.libfc6e712025-02-07 01:54:34 -080036#ifdef CONFIG_KPROBES
xf.li771eb062025-02-09 23:05:11 -080037static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
38{
39 int ret = 0;
40
41 if (!user_mode(regs)) {
42 /* kprobe_running() needs smp_processor_id() */
43 preempt_disable();
44 if (kprobe_running() && kprobe_fault_handler(regs, fsr))
45 ret = 1;
46 preempt_enable();
47 }
48
49 return ret;
xf.libfc6e712025-02-07 01:54:34 -080050}
xf.libfc6e712025-02-07 01:54:34 -080051#else
xf.li771eb062025-02-09 23:05:11 -080052static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
53{
54 return 0;
55}
xf.libfc6e712025-02-07 01:54:34 -080056#endif
xf.li771eb062025-02-09 23:05:11 -080057
58/*
59 * This is useful to dump out the page tables associated with
60 * 'addr' in mm 'mm'.
61 */
62void show_pte(struct mm_struct *mm, unsigned long addr)
63{
64 pgd_t *pgd;
65
66 if (!mm)
67 mm = &init_mm;
68
69 printk(KERN_ALERT "pgd = %p\n", mm->pgd);
70 pgd = pgd_offset(mm, addr);
71 printk(KERN_ALERT "[%08lx] *pgd=%08llx",
72 addr, (long long)pgd_val(*pgd));
73
74 do {
75 pud_t *pud;
76 pmd_t *pmd;
77 pte_t *pte;
78
79 if (pgd_none(*pgd))
80 break;
81
82 if (pgd_bad(*pgd)) {
83 printk("(bad)");
84 break;
85 }
86
87 pud = pud_offset(pgd, addr);
88 if (PTRS_PER_PUD != 1)
89 printk(", *pud=%08llx", (long long)pud_val(*pud));
90
91 if (pud_none(*pud))
92 break;
93
94 if (pud_bad(*pud)) {
95 printk("(bad)");
96 break;
97 }
98
99 pmd = pmd_offset(pud, addr);
100 if (PTRS_PER_PMD != 1)
101 printk(", *pmd=%08llx", (long long)pmd_val(*pmd));
102
103 if (pmd_none(*pmd))
104 break;
105
106 if (pmd_bad(*pmd)) {
107 printk("(bad)");
108 break;
109 }
110
111 /* We must not map this if we have highmem enabled */
112 if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
113 break;
114
115 pte = pte_offset_map(pmd, addr);
116 printk(", *pte=%08llx", (long long)pte_val(*pte));
117#ifndef CONFIG_ARM_LPAE
118 printk(", *ppte=%08llx",
119 (long long)pte_val(pte[PTE_HWTABLE_PTRS]));
120#endif
121 pte_unmap(pte);
122 } while(0);
123
124 printk("\n");
125}
126#else /* CONFIG_MMU */
127void show_pte(struct mm_struct *mm, unsigned long addr)
128{ }
129#endif /* CONFIG_MMU */
130
131/*
132 * Oops. The kernel tried to access some page that wasn't present.
133 */
134static void
135__do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
136 struct pt_regs *regs)
137{
138 /*
139 * Are we prepared to handle this kernel fault?
140 */
141 if (fixup_exception(regs))
142 return;
143
144 /*
145 * No handler, we'll have to terminate things with extreme prejudice.
146 */
147 bust_spinlocks(1);
148 printk(KERN_ALERT
149 "Unable to handle kernel %s at virtual address %08lx\n",
150 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
151 "paging request", addr);
152
153 show_pte(mm, addr);
154 die("Oops", regs, fsr);
155 bust_spinlocks(0);
156 do_exit(SIGKILL);
157}
158
159/*
160 * Something tried to access memory that isn't in our memory map..
161 * User mode accesses just cause a SIGSEGV
162 */
163static void
164__do_user_fault(struct task_struct *tsk, unsigned long addr,
165 unsigned int fsr, unsigned int sig, int code,
166 struct pt_regs *regs)
167{
168 struct siginfo si;
169
170#ifdef CONFIG_DEBUG_USER
171 if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) ||
172 ((user_debug & UDBG_BUS) && (sig == SIGBUS))) {
173 printk(KERN_DEBUG "%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n",
174 tsk->comm, sig, addr, fsr);
175 show_pte(tsk->mm, addr);
176 show_regs(regs);
177 }
178#endif
179
180 tsk->thread.address = addr;
181 tsk->thread.error_code = fsr;
182 tsk->thread.trap_no = 14;
183 si.si_signo = sig;
184 si.si_errno = 0;
185 si.si_code = code;
186 si.si_addr = (void __user *)addr;
187 force_sig_info(sig, &si, tsk);
188}
189
190void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
191{
192 struct task_struct *tsk = current;
193 struct mm_struct *mm = tsk->active_mm;
194
195 /*
196 * If we are in kernel mode at this point, we
197 * have no context to handle this fault with.
198 */
199 if (user_mode(regs))
200 __do_user_fault(tsk, addr, fsr, SIGSEGV, SEGV_MAPERR, regs);
201 else
202 __do_kernel_fault(mm, addr, fsr, regs);
203}
204
205#ifdef CONFIG_MMU
206#define VM_FAULT_BADMAP 0x010000
207#define VM_FAULT_BADACCESS 0x020000
208
209/*
210 * Check that the permissions on the VMA allow for the fault which occurred.
211 * If we encountered a write fault, we must have write permission, otherwise
212 * we allow any permission.
213 */
214static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
215{
216 unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
217
218 if (fsr & FSR_WRITE)
219 mask = VM_WRITE;
220 if (fsr & FSR_LNX_PF)
221 mask = VM_EXEC;
222
223 return vma->vm_flags & mask ? false : true;
224}
225
226static int __kprobes
227__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
228 unsigned int flags, struct task_struct *tsk)
229{
230 struct vm_area_struct *vma;
231 int fault;
232
233 vma = find_vma(mm, addr);
234 fault = VM_FAULT_BADMAP;
235 if (unlikely(!vma))
236 goto out;
237 if (unlikely(vma->vm_start > addr))
238 goto check_stack;
239
240 /*
241 * Ok, we have a good vm_area for this
242 * memory access, so we can handle it.
243 */
244good_area:
245 if (access_error(fsr, vma)) {
246 fault = VM_FAULT_BADACCESS;
247 goto out;
248 }
249
250 return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
251
252check_stack:
253 /* Don't allow expansion below FIRST_USER_ADDRESS */
254 if (vma->vm_flags & VM_GROWSDOWN &&
255 addr >= FIRST_USER_ADDRESS && !expand_stack(vma, addr))
256 goto good_area;
257out:
258 return fault;
259}
260
261static int __kprobes
262do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
263{
264 struct task_struct *tsk;
265 struct mm_struct *mm;
266 int fault, sig, code;
267 int write = fsr & FSR_WRITE;
268 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
269 (write ? FAULT_FLAG_WRITE : 0);
270
271 if (notify_page_fault(regs, fsr))
272 return 0;
273
274 tsk = current;
275 mm = tsk->mm;
276
277 /* Enable interrupts if they were enabled in the parent context. */
278 if (interrupts_enabled(regs))
279 local_irq_enable();
280
281 /*
282 * If we're in an interrupt or have no user
283 * context, we must not take the fault..
284 */
285 if (!mm || pagefault_disabled())
286 goto no_context;
287
288 /*
289 * As per x86, we may deadlock here. However, since the kernel only
290 * validly references user space from well defined areas of the code,
291 * we can bug out early if this is from code which shouldn't.
292 */
293 if (!down_read_trylock(&mm->mmap_sem)) {
294 if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
295 goto no_context;
296retry:
297 down_read(&mm->mmap_sem);
298 } else {
299 /*
300 * The above down_read_trylock() might have succeeded in
301 * which case, we'll have missed the might_sleep() from
302 * down_read()
303 */
304 might_sleep();
305#ifdef CONFIG_DEBUG_VM
306 if (!user_mode(regs) &&
307 !search_exception_tables(regs->ARM_pc))
308 goto no_context;
309#endif
310 }
311
312 fault = __do_page_fault(mm, addr, fsr, flags, tsk);
313
314 /* If we need to retry but a fatal signal is pending, handle the
315 * signal first. We do not need to release the mmap_sem because
316 * it would already be released in __lock_page_or_retry in
317 * mm/filemap.c. */
318 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
319 return 0;
320
321 /*
322 * Major/minor page fault accounting is only done on the
323 * initial attempt. If we go through a retry, it is extremely
324 * likely that the page will be found in page cache at that point.
325 */
326
327 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
328 if (!(fault & VM_FAULT_ERROR) && flags & FAULT_FLAG_ALLOW_RETRY) {
329 if (fault & VM_FAULT_MAJOR) {
330 tsk->maj_flt++;
331 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
332 regs, addr);
333 } else {
334 tsk->min_flt++;
335 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
336 regs, addr);
337 }
338 if (fault & VM_FAULT_RETRY) {
339 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
340 * of starvation. */
341 flags &= ~FAULT_FLAG_ALLOW_RETRY;
342 goto retry;
343 }
344 }
345
346 up_read(&mm->mmap_sem);
347
348 /*
349 * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
350 */
351 if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
352 return 0;
353
354 if (fault & VM_FAULT_OOM) {
355 /*
356 * We ran out of memory, call the OOM killer, and return to
357 * userspace (which will retry the fault, or kill us if we
358 * got oom-killed)
359 */
360 pagefault_out_of_memory();
361 return 0;
362 }
363
364 /*
365 * If we are in kernel mode at this point, we
366 * have no context to handle this fault with.
367 */
368 if (!user_mode(regs))
369 goto no_context;
370
371 if (fault & VM_FAULT_SIGBUS) {
372 /*
373 * We had some memory, but were unable to
374 * successfully fix up this page fault.
375 */
376 sig = SIGBUS;
377 code = BUS_ADRERR;
378 } else {
379 /*
380 * Something tried to access memory that
381 * isn't in our memory map..
382 */
383 sig = SIGSEGV;
384 code = fault == VM_FAULT_BADACCESS ?
385 SEGV_ACCERR : SEGV_MAPERR;
386 }
387
388 __do_user_fault(tsk, addr, fsr, sig, code, regs);
389 return 0;
390
391no_context:
392 __do_kernel_fault(mm, addr, fsr, regs);
393 return 0;
394}
395#else /* CONFIG_MMU */
396static int
397do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
398{
399 return 0;
400}
401#endif /* CONFIG_MMU */
402
403/*
404 * First Level Translation Fault Handler
405 *
406 * We enter here because the first level page table doesn't contain
407 * a valid entry for the address.
408 *
409 * If the address is in kernel space (>= TASK_SIZE), then we are
410 * probably faulting in the vmalloc() area.
411 *
412 * If the init_task's first level page tables contains the relevant
413 * entry, we copy the it to this task. If not, we send the process
414 * a signal, fixup the exception, or oops the kernel.
415 *
416 * NOTE! We MUST NOT take any locks for this case. We may be in an
417 * interrupt or a critical region, and should only copy the information
418 * from the master page table, nothing more.
419 */
420#ifdef CONFIG_MMU
421static int __kprobes
422do_translation_fault(unsigned long addr, unsigned int fsr,
423 struct pt_regs *regs)
424{
425 unsigned int index;
426 pgd_t *pgd, *pgd_k;
427 pud_t *pud, *pud_k;
428 pmd_t *pmd, *pmd_k;
429
430 if (addr < TASK_SIZE)
431 return do_page_fault(addr, fsr, regs);
432
433 if (interrupts_enabled(regs))
434 local_irq_enable();
435
436 if (user_mode(regs))
437 goto bad_area;
438
439 index = pgd_index(addr);
440
441 /*
442 * FIXME: CP15 C1 is write only on ARMv3 architectures.
443 */
444 pgd = cpu_get_pgd() + index;
445 pgd_k = init_mm.pgd + index;
446
447 if (pgd_none(*pgd_k))
448 goto bad_area;
449 if (!pgd_present(*pgd))
450 set_pgd(pgd, *pgd_k);
451
452 pud = pud_offset(pgd, addr);
453 pud_k = pud_offset(pgd_k, addr);
454
455 if (pud_none(*pud_k))
456 goto bad_area;
457 if (!pud_present(*pud))
458 set_pud(pud, *pud_k);
459
460 pmd = pmd_offset(pud, addr);
461 pmd_k = pmd_offset(pud_k, addr);
462
463#ifdef CONFIG_ARM_LPAE
464 /*
465 * Only one hardware entry per PMD with LPAE.
466 */
467 index = 0;
468#else
469 /*
470 * On ARM one Linux PGD entry contains two hardware entries (see page
471 * tables layout in pgtable.h). We normally guarantee that we always
472 * fill both L1 entries. But create_mapping() doesn't follow the rule.
473 * It can create inidividual L1 entries, so here we have to call
474 * pmd_none() check for the entry really corresponded to address, not
475 * for the first of pair.
476 */
477 index = (addr >> SECTION_SHIFT) & 1;
478#endif
479 if (pmd_none(pmd_k[index]))
480 goto bad_area;
481
482 copy_pmd(pmd, pmd_k);
483 return 0;
484
485bad_area:
486 do_bad_area(addr, fsr, regs);
487 return 0;
488}
489#else /* CONFIG_MMU */
490static int
491do_translation_fault(unsigned long addr, unsigned int fsr,
492 struct pt_regs *regs)
493{
494 return 0;
495}
496#endif /* CONFIG_MMU */
497
498/*
499 * Some section permission faults need to be handled gracefully.
500 * They can happen due to a __{get,put}_user during an oops.
501 */
502static int
503do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
504{
505 if (interrupts_enabled(regs))
506 local_irq_enable();
507
508 do_bad_area(addr, fsr, regs);
509 return 0;
510}
511
512/*
513 * This abort handler always returns "fault".
514 */
515static int
516do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
517{
518 return 1;
519}
520
521struct fsr_info {
522 int (*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
523 int sig;
524 int code;
525 const char *name;
526};
527
528/* FSR definition */
xf.libfc6e712025-02-07 01:54:34 -0800529#ifdef CONFIG_ARM_LPAE
530#include "fsr-3level.c"
531#else
532#include "fsr-2level.c"
533#endif
xf.li771eb062025-02-09 23:05:11 -0800534
535void __init
536hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
537 int sig, int code, const char *name)
538{
539 if (nr < 0 || nr >= ARRAY_SIZE(fsr_info))
540 BUG();
541
542 fsr_info[nr].fn = fn;
543 fsr_info[nr].sig = sig;
544 fsr_info[nr].code = code;
545 fsr_info[nr].name = name;
546}
547
xf.libfc6e712025-02-07 01:54:34 -0800548#ifdef CONFIG_MODEM_CODE_IS_MAPPING
xf.li771eb062025-02-09 23:05:11 -0800549//#define __codetext __attribute__((__section__(".modem.text")))
550
551static DECLARE_RWSEM(shrinker_rwsem);
552atomic_t _code_page_count = ATOMIC_INIT(0);
553
554struct addr_info{
555 struct list_head node;
556 unsigned long vaddr;
557 unsigned long kaddr;
558 unsigned long page_index;
559};
560
561enum modem_access_technology {
562 GSM = 0,
563 UTRAN = 1,
564 LTE = 2,
565 COM = 3,
566 NR_MODEM_ACCESS =4
567};
568struct list_head modem_page_list[NR_MODEM_ACCESS] ={
569 LIST_HEAD_INIT(modem_page_list[0]),
570 LIST_HEAD_INIT(modem_page_list[1]),
571 LIST_HEAD_INIT(modem_page_list[2]),
572 LIST_HEAD_INIT(modem_page_list[3]),
573};
574
575unsigned int page_used[40];
576struct completion page_completion[40*32];
577
578static void unmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
579{
580 pte_t *pte;
581
582 pte = pte_offset_kernel(pmd, addr);
583 do {
584 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
585 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
586 } while (pte++, addr += PAGE_SIZE, addr != end);
587}
588
589static void unmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
590{
591 pmd_t *pmd;
592 unsigned long next;
593
594 pmd = pmd_offset(pud, addr);
595 do {
596 next = pmd_addr_end(addr, end);
597 if (pmd_none_or_clear_bad(pmd))
598 continue;
599 unmap_pte_range(pmd, addr, next);
600 } while (pmd++, addr = next, addr != end);
601}
602
603static void unmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
604{
605 pud_t *pud;
606 unsigned long next;
607
608 pud = pud_offset(pgd, addr);
609 do {
610 next = pud_addr_end(addr, end);
611 if (pud_none_or_clear_bad(pud))
612 continue;
613 unmap_pmd_range(pud, addr, next);
614 } while (pud++, addr = next, addr != end);
615}
616
617static void unmap_page_range(unsigned long addr, unsigned long end)
618{
619 pgd_t *pgd;
620 unsigned long next;
621
622 BUG_ON(addr >= end);
623 pgd = pgd_offset_k(addr);
624 do {
625 next = pgd_addr_end(addr, end);
626 if (pgd_none_or_clear_bad(pgd))
627 continue;
628 unmap_pud_range(pgd, addr, next);
629 } while (pgd++, addr = next, addr != end);
630}
631
632void shrink_modem_mem(unsigned int access_type)
633{
634 int i = 0;
635 unsigned long vaddr;
636 struct addr_info *addr, *tmp_addr;
637 struct list_head tmp_page_list;
638
639 for (i= 0; i < NR_MODEM_ACCESS; i++) {
640 if (i == access_type)
641 continue;
642
643 down_write(&shrinker_rwsem);
644 list_replace_init(&modem_page_list[i],&tmp_page_list);
645 up_write(&shrinker_rwsem);
646 list_for_each_entry_safe(addr, tmp_addr, &tmp_page_list, node) {
647 list_del_init(&addr->node);
648 page_completion[addr->page_index].done = 0;
649 page_used[addr->page_index/BITS_PER_LONG] &= ~(1 << (addr->page_index % BITS_PER_LONG));
650 vaddr = addr->vaddr & PAGE_MASK;
651 if(vaddr < cpps_global_var.cpko_text_start || vaddr > cpps_global_var.modem_text_end){
652 panic("addr_info: %08x is destroy",addr);
653 }
654 flush_cache_vunmap(vaddr, vaddr + PAGE_SIZE);
655 unmap_page_range(vaddr, vaddr + PAGE_SIZE);
656 flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
xf.libfc6e712025-02-07 01:54:34 -0800657#ifdef CONFIG_DEBUG_RODATA
xf.li771eb062025-02-09 23:05:11 -0800658 unsigned int flags;
659 local_irq_save(flags);
660 set_memory_rw(addr->kaddr,1);
661 local_irq_restore(flags);
xf.libfc6e712025-02-07 01:54:34 -0800662#endif
xf.li771eb062025-02-09 23:05:11 -0800663 free_page(addr->kaddr);
664 kfree(addr);
665
666 atomic_dec(&_code_page_count);/*after reclaim ,need modify this*/
667 };
668
669 }
670}
671EXPORT_SYMBOL(shrink_modem_mem);
672phys_addr_t virt_is_mapping(unsigned long addr)
673{
674 pgd_t *pgd;
675 pmd_t *pmd;
676 pte_t *ptep, pte;
677 unsigned long pfn;
678
679 /* check whether we found an entry */
680 pgd = pgd_offset_k(addr);
681
682 if(!pgd_none(*pgd)) {
683 /* get the page middle directory */
684 pmd = pmd_offset(pgd, addr);
685 /* check for a valid entry */
686 if(!pmd_none(*pmd)) {
687 /* get a pointer to the page table entry */
688 ptep = pte_offset_map(pmd, addr);
689 /* get the page table entry itself */
690 pte = *ptep;
691 if (pte_present(pte)) {
692 //ptr_page = pte_page(pte);
693 pfn = pte_pfn(pte);
694 //pte_unmap(ptep);
695 return __pfn_to_phys(pfn);
696 }
697 /* check for a valid page */
698 }
699 }
700 return 0;
701}
702
703static int sync_pgd(unsigned long addr, unsigned int fsr,
704 struct pt_regs *regs)
705{
706 unsigned int index;
707 pgd_t *pgd, *pgd_k;
708 pud_t *pud, *pud_k;
709 pmd_t *pmd, *pmd_k;
710 index = pgd_index(addr);
711
712 /*
713 * FIXME: CP15 C1 is write only on ARMv3 architectures.
714 */
715 pgd = cpu_get_pgd() + index;
716 pgd_k = init_mm.pgd + index;
717
718 if (pgd_none(*pgd_k))
719 goto bad_area;
720 if (!pgd_present(*pgd))
721 set_pgd(pgd, *pgd_k);
722
723 pud = pud_offset(pgd, addr);
724 pud_k = pud_offset(pgd_k, addr);
725
726 if (pud_none(*pud_k))
727 goto bad_area;
728 if (!pud_present(*pud))
729 set_pud(pud, *pud_k);
730
731 pmd = pmd_offset(pud, addr);
732 pmd_k = pmd_offset(pud_k, addr);
733
734 #ifdef CONFIG_ARM_LPAE
735 /*
736 * Only one hardware entry per PMD with LPAE.
737 */
738 index = 0;
xf.libfc6e712025-02-07 01:54:34 -0800739#else
xf.li771eb062025-02-09 23:05:11 -0800740 /*
741 * On ARM one Linux PGD entry contains two hardware entries (see page
742 * tables layout in pgtable.h). We normally guarantee that we always
743 * fill both L1 entries. But create_mapping() doesn't follow the rule.
744 * It can create inidividual L1 entries, so here we have to call
745 * pmd_none() check for the entry really corresponded to address, not
746 * for the first of pair.
747 */
748 index = (addr >> SECTION_SHIFT) & 1;
xf.libfc6e712025-02-07 01:54:34 -0800749#endif
xf.li771eb062025-02-09 23:05:11 -0800750 if (pmd_none(pmd_k[index]))
751 goto bad_area;
752 copy_pmd(pmd, pmd_k);
753 return 0;
754bad_area:
755 do_bad_area(addr, fsr, regs);
756 return 0;
757}
758
759unsigned long* read_code_file(unsigned long page_index)
760{
761 unsigned long* code_buf;
762 ssize_t result;
763 code_buf = get_zeroed_page(GFP_ATOMIC);
764 if(!code_buf)
765 panic("memeory not enough!!");
766 atomic_inc(&_code_page_count);/*after reclaim ,need modify this*/
767
768 if(IS_ERR(cpps_global_var.fp_code) || cpps_global_var.fp_code == NULL) {
769 panic("open file error\n");
770 }
771 mm_segment_t old_fs;
772 old_fs = get_fs();
773 set_fs(KERNEL_DS);
774 loff_t pos;
775 pos = page_index * PAGE_SIZE + cpps_global_var.modem_offset;
776 result = vfs_read(cpps_global_var.fp_code, (char *)code_buf, PAGE_SIZE, &pos);
777 if(result < 0){
778 panic("read code file error\n");
779 }
xf.libfc6e712025-02-07 01:54:34 -0800780#ifdef CONFIG_DEBUG_RODATA
xf.li771eb062025-02-09 23:05:11 -0800781 unsigned int flags;
782 local_irq_save(flags);
783 set_memory_ro((unsigned long)code_buf,1);
784 local_irq_restore(flags);
xf.libfc6e712025-02-07 01:54:34 -0800785#endif
xf.li771eb062025-02-09 23:05:11 -0800786 set_fs(old_fs);
787 return code_buf;
788}
789
790void read_code_mapping(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
791{
792 unsigned long offset;
793 unsigned long vaddr;
794 const struct mem_type *mtype;
795 unsigned long* vir_codebuf;
796 unsigned long page_index;
797 unsigned long page_shift;
798
799
800 if(virt_is_mapping(addr & PAGE_MASK) != 0) {
801 sync_pgd(addr & PAGE_MASK, fsr, regs);
802 return;
803 }
804
805 vaddr = addr & PAGE_MASK;
806 offset = vaddr & (~cpps_global_var.cpko_text_start);
807 page_index = offset >> PAGE_SHIFT;
808 page_shift = page_index % BITS_PER_LONG;
809
810 if ((page_used[page_index/BITS_PER_LONG] >> page_shift) & 0x1) {
811 wait_for_completion(&page_completion[page_index]);
812 sync_pgd(vaddr,fsr,regs);
813 return;
814 }
815 else
816 page_used[page_index/BITS_PER_LONG] |= (1 << page_shift);
817
818 local_irq_enable();
819 vir_codebuf = read_code_file(page_index);
820
821 /*add vir_codebuf to every list by address*/
822 struct addr_info *addr_info;
823 addr_info = kzalloc(sizeof(struct addr_info), GFP_KERNEL);
824 addr_info->kaddr = vir_codebuf;
825 addr_info->vaddr= addr;
826 addr_info->page_index = page_index;
827 down_write(&shrinker_rwsem);
828 if(vaddr < cpps_global_var.__utran_modem_text_start)
829 list_add(&addr_info->node, &modem_page_list[GSM]);
830 else if(vaddr < cpps_global_var.__lte_modem_text_start)
831 list_add(&addr_info->node, &modem_page_list[UTRAN]);
832 else if(vaddr < cpps_global_var.__comm_modem_text_start)
833 list_add(&addr_info->node, &modem_page_list[LTE]);
834 else
835 list_add(&addr_info->node, &modem_page_list[COM]);
836
837 up_write(&shrinker_rwsem);
838
839 local_irq_disable();
840 mtype = get_mem_type(MT_MEMORY);
841 ioremap_page(vaddr, __pa(vir_codebuf), mtype);
842 sync_pgd(vaddr, fsr, regs);
843 flush_icache_range(vaddr, vaddr + PAGE_SIZE);
844
845 if (waitqueue_active(&page_completion[page_index].wait))
846 complete_all(&page_completion[page_index]);/*after reclaim ,need clear done*/
847 return;
848}
xf.libfc6e712025-02-07 01:54:34 -0800849#endif
xf.li771eb062025-02-09 23:05:11 -0800850/*
851 * Dispatch a data abort to the relevant handler.
852 */
853asmlinkage void __exception
854do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
855{
856 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
857 struct siginfo info;
858
xf.libfc6e712025-02-07 01:54:34 -0800859#ifdef CONFIG_MODEM_CODE_IS_MAPPING
xf.li771eb062025-02-09 23:05:11 -0800860 if(addr != 0 && addr >= cpps_global_var.cpko_text_start && addr <= cpps_global_var.modem_text_end) {
861 read_code_mapping(addr, fsr & ~FSR_LNX_PF, regs);
862 return;
863 }
xf.libfc6e712025-02-07 01:54:34 -0800864#endif
865
xf.li771eb062025-02-09 23:05:11 -0800866 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
867 return;
868
869 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
870 inf->name, fsr, addr);
871
872 info.si_signo = inf->sig;
873 info.si_errno = 0;
874 info.si_code = inf->code;
875 info.si_addr = (void __user *)addr;
876 arm_notify_die("", regs, &info, fsr, 0);
877}
878
879void __init
880hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
881 int sig, int code, const char *name)
882{
883 if (nr < 0 || nr >= ARRAY_SIZE(ifsr_info))
884 BUG();
885
886 ifsr_info[nr].fn = fn;
887 ifsr_info[nr].sig = sig;
888 ifsr_info[nr].code = code;
889 ifsr_info[nr].name = name;
890}
891
892asmlinkage void __exception
893do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
894{
895 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
896 struct siginfo info;
897
898#ifdef CONFIG_MODEM_CODE_IS_MAPPING
899 if(addr != 0 && addr >= cpps_global_var.cpko_text_start && addr <= cpps_global_var.modem_text_end) {
900 read_code_mapping(addr, ifsr | FSR_LNX_PF, regs);
901 return;
902 }
903
904#endif
905
906 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
907 return;
908
909 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
910 inf->name, ifsr, addr);
911
912 info.si_signo = inf->sig;
913 info.si_errno = 0;
914 info.si_code = inf->code;
915 info.si_addr = (void __user *)addr;
916 arm_notify_die("", regs, &info, ifsr, 0);
917}
918
919#ifndef CONFIG_ARM_LPAE
920static int __init exceptions_init(void)
921{
922 if (cpu_architecture() >= CPU_ARCH_ARMv6) {
923 hook_fault_code(4, do_translation_fault, SIGSEGV, SEGV_MAPERR,
924 "I-cache maintenance fault");
925 }
926
927 if (cpu_architecture() >= CPU_ARCH_ARMv7) {
928 /*
929 * TODO: Access flag faults introduced in ARMv6K.
930 * Runtime check for 'K' extension is needed
931 */
932 hook_fault_code(3, do_bad, SIGSEGV, SEGV_MAPERR,
933 "section access flag fault");
934 hook_fault_code(6, do_bad, SIGSEGV, SEGV_MAPERR,
935 "section access flag fault");
936 }
937#ifdef CONFIG_MODEM_CODE_IS_MAPPING
938 int index = 0;
939 for(index = 0;index < 40*32;index++)
940 init_completion(&page_completion[index]);
941#endif
942 return 0;
943}
944
945arch_initcall(exceptions_init);
946#endif