[Feature][task-view-998]merge P56U10 version, ZXW code
Only Configure: No
Affected branch: master
Affected module: unknow
Is it affected on both ZXIC and MTK: only ZXIC
Self-test: No
Doc Update: No
Change-Id: I466f2ab935c5ede0be1803c75518b2cc4f939c15
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/mm/fault.c b/ap/os/linux/linux-3.4.x/arch/arm/mm/fault.c
index 9cef922..cbd0917 100755
--- a/ap/os/linux/linux-3.4.x/arch/arm/mm/fault.c
+++ b/ap/os/linux/linux-3.4.x/arch/arm/mm/fault.c
@@ -1,4 +1,13 @@
-
+/*
+ * linux/arch/arm/mm/fault.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ * Modifications for ARM processor (c) 1995-2004 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
#include <linux/module.h>
#include <linux/signal.h>
#include <linux/mm.h>
@@ -10,295 +19,928 @@
#include <linux/sched.h>
#include <linux/highmem.h>
#include <linux/perf_event.h>
+
#include <asm/exception.h>
#include <asm/pgtable.h>
#include <asm/system_misc.h>
#include <asm/system_info.h>
#include <asm/tlbflush.h>
+
#include <asm/mach/map.h>
#include <linux/slab.h>
+
#include "fault.h"
+
#ifdef CONFIG_MMU
+
#ifdef CONFIG_KPROBES
-static inline int notify_page_fault(struct pt_regs*regs,unsigned int fsr){int
-ret=(0xe17+2085-0x163c);if(!user_mode(regs)){preempt_disable();if(kprobe_running
-()&&kprobe_fault_handler(regs,fsr))ret=(0x8c+9569-0x25ec);preempt_enable();}
-return ret;}
-#else
-static inline int notify_page_fault(struct pt_regs*regs,unsigned int fsr){return
-(0x1483+202-0x154d);}
-#endif
-void show_pte(struct mm_struct*mm,unsigned long addr){pgd_t*pgd;if(!mm)mm=&
-init_mm;printk(KERN_ALERT"\x70\x67\x64\x20\x3d\x20\x25\x70" "\n",mm->pgd);pgd=
-pgd_offset(mm,addr);printk(KERN_ALERT
-"\x5b\x25\x30\x38\x6c\x78\x5d\x20\x2a\x70\x67\x64\x3d\x25\x30\x38\x6c\x6c\x78",
-addr,(long long)pgd_val(*pgd));do{pud_t*pud;pmd_t*pmd;pte_t*pte;if(pgd_none(*pgd
-))break;if(pgd_bad(*pgd)){printk("\x28\x62\x61\x64\x29");break;}pud=pud_offset(
-pgd,addr);if(PTRS_PER_PUD!=(0x1643+673-0x18e3))printk(
-"\x2c\x20\x2a\x70\x75\x64\x3d\x25\x30\x38\x6c\x6c\x78",(long long)pud_val(*pud))
-;if(pud_none(*pud))break;if(pud_bad(*pud)){printk("\x28\x62\x61\x64\x29");break;
-}pmd=pmd_offset(pud,addr);if(PTRS_PER_PMD!=(0x2f2+3649-0x1132))printk(
-"\x2c\x20\x2a\x70\x6d\x64\x3d\x25\x30\x38\x6c\x6c\x78",(long long)pmd_val(*pmd))
-;if(pmd_none(*pmd))break;if(pmd_bad(*pmd)){printk("\x28\x62\x61\x64\x29");break;
-}if(PageHighMem(pfn_to_page(pmd_val(*pmd)>>PAGE_SHIFT)))break;pte=pte_offset_map
-(pmd,addr);printk("\x2c\x20\x2a\x70\x74\x65\x3d\x25\x30\x38\x6c\x6c\x78",(long
-long)pte_val(*pte));
-#ifndef CONFIG_ARM_LPAE
-printk("\x2c\x20\x2a\x70\x70\x74\x65\x3d\x25\x30\x38\x6c\x6c\x78",(long long)
-pte_val(pte[PTE_HWTABLE_PTRS]));
-#endif
-pte_unmap(pte);}while((0xd42+2541-0x172f));printk("\n");}
-#else
-void show_pte(struct mm_struct*mm,unsigned long addr){}
-#endif
-static void __do_kernel_fault(struct mm_struct*mm,unsigned long addr,unsigned
-int fsr,struct pt_regs*regs){if(fixup_exception(regs))return;bust_spinlocks(
-(0xc15+4433-0x1d65));printk(KERN_ALERT
-"\x55\x6e\x61\x62\x6c\x65\x20\x74\x6f\x20\x68\x61\x6e\x64\x6c\x65\x20\x6b\x65\x72\x6e\x65\x6c\x20\x25\x73\x20\x61\x74\x20\x76\x69\x72\x74\x75\x61\x6c\x20\x61\x64\x64\x72\x65\x73\x73\x20\x25\x30\x38\x6c\x78" "\n"
-,(addr<PAGE_SIZE)?
-"\x4e\x55\x4c\x4c\x20\x70\x6f\x69\x6e\x74\x65\x72\x20\x64\x65\x72\x65\x66\x65\x72\x65\x6e\x63\x65"
-:"\x70\x61\x67\x69\x6e\x67\x20\x72\x65\x71\x75\x65\x73\x74",addr);show_pte(mm,
-addr);die("\x4f\x6f\x70\x73",regs,fsr);bust_spinlocks((0xd40+5536-0x22e0));
-do_exit(SIGKILL);}static void __do_user_fault(struct task_struct*tsk,unsigned
-long addr,unsigned int fsr,unsigned int sig,int code,struct pt_regs*regs){struct
- siginfo si;
-#ifdef CONFIG_DEBUG_USER
-if(((user_debug&UDBG_SEGV)&&(sig==SIGSEGV))||((user_debug&UDBG_BUS)&&(sig==
-SIGBUS))){printk(KERN_DEBUG
-"\x25\x73\x3a\x20\x75\x6e\x68\x61\x6e\x64\x6c\x65\x64\x20\x70\x61\x67\x65\x20\x66\x61\x75\x6c\x74\x20\x28\x25\x64\x29\x20\x61\x74\x20\x30\x78\x25\x30\x38\x6c\x78\x2c\x20\x63\x6f\x64\x65\x20\x30\x78\x25\x30\x33\x78" "\n"
-,tsk->comm,sig,addr,fsr);show_pte(tsk->mm,addr);show_regs(regs);}
-#endif
-tsk->thread.address=addr;tsk->thread.error_code=fsr;tsk->thread.trap_no=
-(0xbf5+5705-0x2230);si.si_signo=sig;si.si_errno=(0x1d10+1487-0x22df);si.si_code=
-code;si.si_addr=(void __user*)addr;force_sig_info(sig,&si,tsk);}void do_bad_area
-(unsigned long addr,unsigned int fsr,struct pt_regs*regs){struct task_struct*tsk
-=current;struct mm_struct*mm=tsk->active_mm;if(user_mode(regs))__do_user_fault(
-tsk,addr,fsr,SIGSEGV,SEGV_MAPERR,regs);else __do_kernel_fault(mm,addr,fsr,regs);
+static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
+{
+ int ret = 0;
+
+ if (!user_mode(regs)) {
+ /* kprobe_running() needs smp_processor_id() */
+ preempt_disable();
+ if (kprobe_running() && kprobe_fault_handler(regs, fsr))
+ ret = 1;
+ preempt_enable();
+ }
+
+ return ret;
}
-#ifdef CONFIG_MMU
-#define VM_FAULT_BADMAP 65536
-#define VM_FAULT_BADACCESS 131072
-static inline bool access_error(unsigned int fsr,struct vm_area_struct*vma){
-unsigned int mask=VM_READ|VM_WRITE|VM_EXEC;if(fsr&FSR_WRITE)mask=VM_WRITE;if(fsr
-&FSR_LNX_PF)mask=VM_EXEC;return vma->vm_flags&mask?false:true;}static int
-__kprobes __do_page_fault(struct mm_struct*mm,unsigned long addr,unsigned int
-fsr,unsigned int flags,struct task_struct*tsk){struct vm_area_struct*vma;int
-fault;vma=find_vma(mm,addr);fault=VM_FAULT_BADMAP;if(unlikely(!vma))goto out;if(
-unlikely(vma->vm_start>addr))goto check_stack;good_area:if(access_error(fsr,vma)
-){fault=VM_FAULT_BADACCESS;goto out;}return handle_mm_fault(mm,vma,addr&
-PAGE_MASK,flags);check_stack:if(vma->vm_flags&VM_GROWSDOWN&&addr>=
-FIRST_USER_ADDRESS&&!expand_stack(vma,addr))goto good_area;out:return fault;}
-static int __kprobes do_page_fault(unsigned long addr,unsigned int fsr,struct
-pt_regs*regs){struct task_struct*tsk;struct mm_struct*mm;int fault,sig,code;int
-write=fsr&FSR_WRITE;unsigned int flags=FAULT_FLAG_ALLOW_RETRY|
-FAULT_FLAG_KILLABLE|(write?FAULT_FLAG_WRITE:(0x83+6084-0x1847));if(
-notify_page_fault(regs,fsr))return(0x13b6+3139-0x1ff9);tsk=current;mm=tsk->mm;if
-(interrupts_enabled(regs))local_irq_enable();if(!mm||pagefault_disabled())goto
-no_context;if(!down_read_trylock(&mm->mmap_sem)){if(!user_mode(regs)&&!
-search_exception_tables(regs->ARM_pc))goto no_context;retry:down_read(&mm->
-mmap_sem);}else{might_sleep();
-#ifdef CONFIG_DEBUG_VM
-if(!user_mode(regs)&&!search_exception_tables(regs->ARM_pc))goto no_context;
-#endif
-}fault=__do_page_fault(mm,addr,fsr,flags,tsk);if((fault&VM_FAULT_RETRY)&&
-fatal_signal_pending(current))return(0xea5+2777-0x197e);perf_sw_event(
-PERF_COUNT_SW_PAGE_FAULTS,(0x11ef+2350-0x1b1c),regs,addr);if(!(fault&
-VM_FAULT_ERROR)&&flags&FAULT_FLAG_ALLOW_RETRY){if(fault&VM_FAULT_MAJOR){tsk->
-maj_flt++;perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,(0xa32+629-0xca6),regs,
-addr);}else{tsk->min_flt++;perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
-(0x11a1+4424-0x22e8),regs,addr);}if(fault&VM_FAULT_RETRY){flags&=~
-FAULT_FLAG_ALLOW_RETRY;goto retry;}}up_read(&mm->mmap_sem);if(likely(!(fault&(
-VM_FAULT_ERROR|VM_FAULT_BADMAP|VM_FAULT_BADACCESS))))return(0x143c+1796-0x1b40);
-if(fault&VM_FAULT_OOM){pagefault_out_of_memory();return(0x600+3648-0x1440);}if(!
-user_mode(regs))goto no_context;if(fault&VM_FAULT_SIGBUS){sig=SIGBUS;code=
-BUS_ADRERR;}else{sig=SIGSEGV;code=fault==VM_FAULT_BADACCESS?SEGV_ACCERR:
-SEGV_MAPERR;}__do_user_fault(tsk,addr,fsr,sig,code,regs);return
-(0xac0+3527-0x1887);no_context:__do_kernel_fault(mm,addr,fsr,regs);return
-(0x191a+3158-0x2570);}
-#else
-static int do_page_fault(unsigned long addr,unsigned int fsr,struct pt_regs*regs
-){return(0x255+6734-0x1ca3);}
-#endif
-#ifdef CONFIG_MMU
-static int __kprobes do_translation_fault(unsigned long addr,unsigned int fsr,
-struct pt_regs*regs){unsigned int index;pgd_t*pgd,*pgd_k;pud_t*pud,*pud_k;pmd_t*
-pmd,*pmd_k;if(addr<TASK_SIZE)return do_page_fault(addr,fsr,regs);if(
-interrupts_enabled(regs))local_irq_enable();if(user_mode(regs))goto bad_area;
-index=pgd_index(addr);pgd=cpu_get_pgd()+index;pgd_k=init_mm.pgd+index;if(
-pgd_none(*pgd_k))goto bad_area;if(!pgd_present(*pgd))set_pgd(pgd,*pgd_k);pud=
-pud_offset(pgd,addr);pud_k=pud_offset(pgd_k,addr);if(pud_none(*pud_k))goto
-bad_area;if(!pud_present(*pud))set_pud(pud,*pud_k);pmd=pmd_offset(pud,addr);
-pmd_k=pmd_offset(pud_k,addr);
-#ifdef CONFIG_ARM_LPAE
-index=(0xeef+6127-0x26de);
#else
-index=(addr>>SECTION_SHIFT)&(0x6e5+1582-0xd12);
+static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
+{
+ return 0;
+}
#endif
-if(pmd_none(pmd_k[index]))goto bad_area;copy_pmd(pmd,pmd_k);return
-(0x1722+1498-0x1cfc);bad_area:do_bad_area(addr,fsr,regs);return
-(0x1cba+276-0x1dce);}
-#else
-static int do_translation_fault(unsigned long addr,unsigned int fsr,struct
-pt_regs*regs){return(0x163f+815-0x196e);}
-#endif
-static int do_sect_fault(unsigned long addr,unsigned int fsr,struct pt_regs*regs
-){if(interrupts_enabled(regs))local_irq_enable();do_bad_area(addr,fsr,regs);
-return(0x11f9+1016-0x15f1);}static int do_bad(unsigned long addr,unsigned int
-fsr,struct pt_regs*regs){return(0x461+8053-0x23d5);}struct fsr_info{int(*fn)(
-unsigned long addr,unsigned int fsr,struct pt_regs*regs);int sig;int code;const
-char*name;};
+
+/*
+ * This is useful to dump out the page tables associated with
+ * 'addr' in mm 'mm'.
+ */
+void show_pte(struct mm_struct *mm, unsigned long addr)
+{
+ pgd_t *pgd;
+
+ if (!mm)
+ mm = &init_mm;
+
+ printk(KERN_ALERT "pgd = %p\n", mm->pgd);
+ pgd = pgd_offset(mm, addr);
+ printk(KERN_ALERT "[%08lx] *pgd=%08llx",
+ addr, (long long)pgd_val(*pgd));
+
+ do {
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ if (pgd_none(*pgd))
+ break;
+
+ if (pgd_bad(*pgd)) {
+ printk("(bad)");
+ break;
+ }
+
+ pud = pud_offset(pgd, addr);
+ if (PTRS_PER_PUD != 1)
+ printk(", *pud=%08llx", (long long)pud_val(*pud));
+
+ if (pud_none(*pud))
+ break;
+
+ if (pud_bad(*pud)) {
+ printk("(bad)");
+ break;
+ }
+
+ pmd = pmd_offset(pud, addr);
+ if (PTRS_PER_PMD != 1)
+ printk(", *pmd=%08llx", (long long)pmd_val(*pmd));
+
+ if (pmd_none(*pmd))
+ break;
+
+ if (pmd_bad(*pmd)) {
+ printk("(bad)");
+ break;
+ }
+
+ /* We must not map this if we have highmem enabled */
+ if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
+ break;
+
+ pte = pte_offset_map(pmd, addr);
+ printk(", *pte=%08llx", (long long)pte_val(*pte));
+#ifndef CONFIG_ARM_LPAE
+ printk(", *ppte=%08llx",
+ (long long)pte_val(pte[PTE_HWTABLE_PTRS]));
+#endif
+ pte_unmap(pte);
+ } while(0);
+
+ printk("\n");
+}
+#else /* CONFIG_MMU */
+void show_pte(struct mm_struct *mm, unsigned long addr)
+{ }
+#endif /* CONFIG_MMU */
+
+/*
+ * Oops. The kernel tried to access some page that wasn't present.
+ */
+static void
+__do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
+ struct pt_regs *regs)
+{
+ /*
+ * Are we prepared to handle this kernel fault?
+ */
+ if (fixup_exception(regs))
+ return;
+
+ /*
+ * No handler, we'll have to terminate things with extreme prejudice.
+ */
+ bust_spinlocks(1);
+ printk(KERN_ALERT
+ "Unable to handle kernel %s at virtual address %08lx\n",
+ (addr < PAGE_SIZE) ? "NULL pointer dereference" :
+ "paging request", addr);
+
+ show_pte(mm, addr);
+ die("Oops", regs, fsr);
+ bust_spinlocks(0);
+ do_exit(SIGKILL);
+}
+
+/*
+ * Something tried to access memory that isn't in our memory map..
+ * User mode accesses just cause a SIGSEGV
+ */
+static void
+__do_user_fault(struct task_struct *tsk, unsigned long addr,
+ unsigned int fsr, unsigned int sig, int code,
+ struct pt_regs *regs)
+{
+ struct siginfo si;
+
+#ifdef CONFIG_DEBUG_USER
+ if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) ||
+ ((user_debug & UDBG_BUS) && (sig == SIGBUS))) {
+ printk(KERN_DEBUG "%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n",
+ tsk->comm, sig, addr, fsr);
+ show_pte(tsk->mm, addr);
+ show_regs(regs);
+ }
+#endif
+
+ tsk->thread.address = addr;
+ tsk->thread.error_code = fsr;
+ tsk->thread.trap_no = 14;
+ si.si_signo = sig;
+ si.si_errno = 0;
+ si.si_code = code;
+ si.si_addr = (void __user *)addr;
+ force_sig_info(sig, &si, tsk);
+}
+
+void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+{
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->active_mm;
+
+ /*
+ * If we are in kernel mode at this point, we
+ * have no context to handle this fault with.
+ */
+ if (user_mode(regs))
+ __do_user_fault(tsk, addr, fsr, SIGSEGV, SEGV_MAPERR, regs);
+ else
+ __do_kernel_fault(mm, addr, fsr, regs);
+}
+
+#ifdef CONFIG_MMU
+#define VM_FAULT_BADMAP 0x010000
+#define VM_FAULT_BADACCESS 0x020000
+
+/*
+ * Check that the permissions on the VMA allow for the fault which occurred.
+ * If we encountered a write fault, we must have write permission, otherwise
+ * we allow any permission.
+ */
+static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
+{
+ unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
+
+ if (fsr & FSR_WRITE)
+ mask = VM_WRITE;
+ if (fsr & FSR_LNX_PF)
+ mask = VM_EXEC;
+
+ return vma->vm_flags & mask ? false : true;
+}
+
+static int __kprobes
+__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
+ unsigned int flags, struct task_struct *tsk)
+{
+ struct vm_area_struct *vma;
+ int fault;
+
+ vma = find_vma(mm, addr);
+ fault = VM_FAULT_BADMAP;
+ if (unlikely(!vma))
+ goto out;
+ if (unlikely(vma->vm_start > addr))
+ goto check_stack;
+
+ /*
+ * Ok, we have a good vm_area for this
+ * memory access, so we can handle it.
+ */
+good_area:
+ if (access_error(fsr, vma)) {
+ fault = VM_FAULT_BADACCESS;
+ goto out;
+ }
+
+ return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
+
+check_stack:
+ /* Don't allow expansion below FIRST_USER_ADDRESS */
+ if (vma->vm_flags & VM_GROWSDOWN &&
+ addr >= FIRST_USER_ADDRESS && !expand_stack(vma, addr))
+ goto good_area;
+out:
+ return fault;
+}
+
+static int __kprobes
+do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+{
+ struct task_struct *tsk;
+ struct mm_struct *mm;
+ int fault, sig, code;
+ int write = fsr & FSR_WRITE;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+ (write ? FAULT_FLAG_WRITE : 0);
+
+ if (notify_page_fault(regs, fsr))
+ return 0;
+
+ tsk = current;
+ mm = tsk->mm;
+
+ /* Enable interrupts if they were enabled in the parent context. */
+ if (interrupts_enabled(regs))
+ local_irq_enable();
+
+ /*
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+ if (!mm || pagefault_disabled())
+ goto no_context;
+
+ /*
+ * As per x86, we may deadlock here. However, since the kernel only
+ * validly references user space from well defined areas of the code,
+ * we can bug out early if this is from code which shouldn't.
+ */
+ if (!down_read_trylock(&mm->mmap_sem)) {
+ if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
+ goto no_context;
+retry:
+ down_read(&mm->mmap_sem);
+ } else {
+ /*
+ * The above down_read_trylock() might have succeeded in
+ * which case, we'll have missed the might_sleep() from
+ * down_read()
+ */
+ might_sleep();
+#ifdef CONFIG_DEBUG_VM
+ if (!user_mode(regs) &&
+ !search_exception_tables(regs->ARM_pc))
+ goto no_context;
+#endif
+ }
+
+ fault = __do_page_fault(mm, addr, fsr, flags, tsk);
+
+ /* If we need to retry but a fatal signal is pending, handle the
+ * signal first. We do not need to release the mmap_sem because
+ * it would already be released in __lock_page_or_retry in
+ * mm/filemap.c. */
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ return 0;
+
+ /*
+ * Major/minor page fault accounting is only done on the
+ * initial attempt. If we go through a retry, it is extremely
+ * likely that the page will be found in page cache at that point.
+ */
+
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
+ if (!(fault & VM_FAULT_ERROR) && flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault & VM_FAULT_MAJOR) {
+ tsk->maj_flt++;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
+ regs, addr);
+ } else {
+ tsk->min_flt++;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
+ regs, addr);
+ }
+ if (fault & VM_FAULT_RETRY) {
+ /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
+ * of starvation. */
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ goto retry;
+ }
+ }
+
+ up_read(&mm->mmap_sem);
+
+ /*
+ * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
+ */
+ if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
+ return 0;
+
+ if (fault & VM_FAULT_OOM) {
+ /*
+ * We ran out of memory, call the OOM killer, and return to
+ * userspace (which will retry the fault, or kill us if we
+ * got oom-killed)
+ */
+ pagefault_out_of_memory();
+ return 0;
+ }
+
+ /*
+ * If we are in kernel mode at this point, we
+ * have no context to handle this fault with.
+ */
+ if (!user_mode(regs))
+ goto no_context;
+
+ if (fault & VM_FAULT_SIGBUS) {
+ /*
+ * We had some memory, but were unable to
+ * successfully fix up this page fault.
+ */
+ sig = SIGBUS;
+ code = BUS_ADRERR;
+ } else {
+ /*
+ * Something tried to access memory that
+ * isn't in our memory map..
+ */
+ sig = SIGSEGV;
+ code = fault == VM_FAULT_BADACCESS ?
+ SEGV_ACCERR : SEGV_MAPERR;
+ }
+
+ __do_user_fault(tsk, addr, fsr, sig, code, regs);
+ return 0;
+
+no_context:
+ __do_kernel_fault(mm, addr, fsr, regs);
+ return 0;
+}
+#else /* CONFIG_MMU */
+static int
+do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+{
+ return 0;
+}
+#endif /* CONFIG_MMU */
+
+/*
+ * First Level Translation Fault Handler
+ *
+ * We enter here because the first level page table doesn't contain
+ * a valid entry for the address.
+ *
+ * If the address is in kernel space (>= TASK_SIZE), then we are
+ * probably faulting in the vmalloc() area.
+ *
+ * If the init_task's first level page tables contains the relevant
+ * entry, we copy the it to this task. If not, we send the process
+ * a signal, fixup the exception, or oops the kernel.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may be in an
+ * interrupt or a critical region, and should only copy the information
+ * from the master page table, nothing more.
+ */
+#ifdef CONFIG_MMU
+static int __kprobes
+do_translation_fault(unsigned long addr, unsigned int fsr,
+ struct pt_regs *regs)
+{
+ unsigned int index;
+ pgd_t *pgd, *pgd_k;
+ pud_t *pud, *pud_k;
+ pmd_t *pmd, *pmd_k;
+
+ if (addr < TASK_SIZE)
+ return do_page_fault(addr, fsr, regs);
+
+ if (interrupts_enabled(regs))
+ local_irq_enable();
+
+ if (user_mode(regs))
+ goto bad_area;
+
+ index = pgd_index(addr);
+
+ /*
+ * FIXME: CP15 C1 is write only on ARMv3 architectures.
+ */
+ pgd = cpu_get_pgd() + index;
+ pgd_k = init_mm.pgd + index;
+
+ if (pgd_none(*pgd_k))
+ goto bad_area;
+ if (!pgd_present(*pgd))
+ set_pgd(pgd, *pgd_k);
+
+ pud = pud_offset(pgd, addr);
+ pud_k = pud_offset(pgd_k, addr);
+
+ if (pud_none(*pud_k))
+ goto bad_area;
+ if (!pud_present(*pud))
+ set_pud(pud, *pud_k);
+
+ pmd = pmd_offset(pud, addr);
+ pmd_k = pmd_offset(pud_k, addr);
+
+#ifdef CONFIG_ARM_LPAE
+ /*
+ * Only one hardware entry per PMD with LPAE.
+ */
+ index = 0;
+#else
+ /*
+ * On ARM one Linux PGD entry contains two hardware entries (see page
+ * tables layout in pgtable.h). We normally guarantee that we always
+ * fill both L1 entries. But create_mapping() doesn't follow the rule.
+ * It can create inidividual L1 entries, so here we have to call
+ * pmd_none() check for the entry really corresponded to address, not
+ * for the first of pair.
+ */
+ index = (addr >> SECTION_SHIFT) & 1;
+#endif
+ if (pmd_none(pmd_k[index]))
+ goto bad_area;
+
+ copy_pmd(pmd, pmd_k);
+ return 0;
+
+bad_area:
+ do_bad_area(addr, fsr, regs);
+ return 0;
+}
+#else /* CONFIG_MMU */
+static int
+do_translation_fault(unsigned long addr, unsigned int fsr,
+ struct pt_regs *regs)
+{
+ return 0;
+}
+#endif /* CONFIG_MMU */
+
+/*
+ * Some section permission faults need to be handled gracefully.
+ * They can happen due to a __{get,put}_user during an oops.
+ */
+static int
+do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+{
+ if (interrupts_enabled(regs))
+ local_irq_enable();
+
+ do_bad_area(addr, fsr, regs);
+ return 0;
+}
+
+/*
+ * This abort handler always returns "fault".
+ */
+static int
+do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+{
+ return 1;
+}
+
+struct fsr_info {
+ int (*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
+ int sig;
+ int code;
+ const char *name;
+};
+
+/* FSR definition */
#ifdef CONFIG_ARM_LPAE
#include "fsr-3level.c"
#else
#include "fsr-2level.c"
#endif
-void __init hook_fault_code(int nr,int(*fn)(unsigned long,unsigned int,struct
-pt_regs*),int sig,int code,const char*name){if(nr<(0xce5+4250-0x1d7f)||nr>=
-ARRAY_SIZE(fsr_info))BUG();fsr_info[nr].fn=fn;fsr_info[nr].sig=sig;fsr_info[nr].
-code=code;fsr_info[nr].name=name;}
+
+void __init
+hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
+ int sig, int code, const char *name)
+{
+ if (nr < 0 || nr >= ARRAY_SIZE(fsr_info))
+ BUG();
+
+ fsr_info[nr].fn = fn;
+ fsr_info[nr].sig = sig;
+ fsr_info[nr].code = code;
+ fsr_info[nr].name = name;
+}
+
#ifdef CONFIG_MODEM_CODE_IS_MAPPING
-static DECLARE_RWSEM(shrinker_rwsem);atomic_t _code_page_count=ATOMIC_INIT(
-(0x64c+3281-0x131d));struct addr_info{struct list_head node;unsigned long vaddr;
-unsigned long kaddr;unsigned long page_index;};enum modem_access_technology{GSM=
-(0x42f+2882-0xf71),UTRAN=(0x1869+204-0x1934),LTE=(0x1735+1609-0x1d7c),COM=
-(0x788+3773-0x1642),NR_MODEM_ACCESS=(0x4b8+5810-0x1b66)};struct list_head
-modem_page_list[NR_MODEM_ACCESS]={LIST_HEAD_INIT(modem_page_list[
-(0x5e7+4772-0x188b)]),LIST_HEAD_INIT(modem_page_list[(0xa89+6078-0x2246)]),
-LIST_HEAD_INIT(modem_page_list[(0x3ad+8939-0x2696)]),LIST_HEAD_INIT(
-modem_page_list[(0x19c3+1240-0x1e98)]),};unsigned int page_used[
-(0x195d+2789-0x241a)];struct completion page_completion[(0x51f+748-0x7e3)*
-(0x42f+1562-0xa29)];static void unmap_pte_range(pmd_t*pmd,unsigned long addr,
-unsigned long end){pte_t*pte;pte=pte_offset_kernel(pmd,addr);do{pte_t ptent=
-ptep_get_and_clear(&init_mm,addr,pte);WARN_ON(!pte_none(ptent)&&!pte_present(
-ptent));}while(pte++,addr+=PAGE_SIZE,addr!=end);}static void unmap_pmd_range(
-pud_t*pud,unsigned long addr,unsigned long end){pmd_t*pmd;unsigned long next;pmd
-=pmd_offset(pud,addr);do{next=pmd_addr_end(addr,end);if(pmd_none_or_clear_bad(
-pmd))continue;unmap_pte_range(pmd,addr,next);}while(pmd++,addr=next,addr!=end);}
-static void unmap_pud_range(pgd_t*pgd,unsigned long addr,unsigned long end){
-pud_t*pud;unsigned long next;pud=pud_offset(pgd,addr);do{next=pud_addr_end(addr,
-end);if(pud_none_or_clear_bad(pud))continue;unmap_pmd_range(pud,addr,next);}
-while(pud++,addr=next,addr!=end);}static void unmap_page_range(unsigned long
-addr,unsigned long end){pgd_t*pgd;unsigned long next;BUG_ON(addr>=end);pgd=
-pgd_offset_k(addr);do{next=pgd_addr_end(addr,end);if(pgd_none_or_clear_bad(pgd))
-continue;unmap_pud_range(pgd,addr,next);}while(pgd++,addr=next,addr!=end);}void
-shrink_modem_mem(unsigned int access_type){int i=(0xfc2+581-0x1207);unsigned
-long vaddr;struct addr_info*addr,*tmp_addr;struct list_head tmp_page_list;for(i=
-(0x5eb+4111-0x15fa);i<NR_MODEM_ACCESS;i++){if(i==access_type)continue;down_write
-(&shrinker_rwsem);list_replace_init(&modem_page_list[i],&tmp_page_list);up_write
-(&shrinker_rwsem);list_for_each_entry_safe(addr,tmp_addr,&tmp_page_list,node){
-list_del_init(&addr->node);page_completion[addr->page_index].done=
-(0x187+7014-0x1ced);page_used[addr->page_index/BITS_PER_LONG]&=~(
-(0x1b0a+2503-0x24d0)<<(addr->page_index%BITS_PER_LONG));vaddr=addr->vaddr&
-PAGE_MASK;if(vaddr<cpps_global_var.cpko_text_start||vaddr>cpps_global_var.
-modem_text_end){panic(
-"\x61\x64\x64\x72\x5f\x69\x6e\x66\x6f\x3a\x20\x25\x30\x38\x78\x20\x69\x73\x20\x20\x64\x65\x73\x74\x72\x6f\x79"
-,addr);}flush_cache_vunmap(vaddr,vaddr+PAGE_SIZE);unmap_page_range(vaddr,vaddr+
-PAGE_SIZE);flush_tlb_kernel_range(vaddr,vaddr+PAGE_SIZE);
+//#define __codetext __attribute__((__section__(".modem.text")))
+
+static DECLARE_RWSEM(shrinker_rwsem);
+atomic_t _code_page_count = ATOMIC_INIT(0);
+
+struct addr_info{
+ struct list_head node;
+ unsigned long vaddr;
+ unsigned long kaddr;
+ unsigned long page_index;
+};
+
+enum modem_access_technology {
+ GSM = 0,
+ UTRAN = 1,
+ LTE = 2,
+ COM = 3,
+ NR_MODEM_ACCESS =4
+};
+struct list_head modem_page_list[NR_MODEM_ACCESS] ={
+ LIST_HEAD_INIT(modem_page_list[0]),
+ LIST_HEAD_INIT(modem_page_list[1]),
+ LIST_HEAD_INIT(modem_page_list[2]),
+ LIST_HEAD_INIT(modem_page_list[3]),
+};
+
+unsigned int page_used[40];
+struct completion page_completion[40*32];
+
+static void unmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
+{
+ pte_t *pte;
+
+ pte = pte_offset_kernel(pmd, addr);
+ do {
+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+}
+
+static void unmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
+{
+ pmd_t *pmd;
+ unsigned long next;
+
+ pmd = pmd_offset(pud, addr);
+ do {
+ next = pmd_addr_end(addr, end);
+ if (pmd_none_or_clear_bad(pmd))
+ continue;
+ unmap_pte_range(pmd, addr, next);
+ } while (pmd++, addr = next, addr != end);
+}
+
+static void unmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
+{
+ pud_t *pud;
+ unsigned long next;
+
+ pud = pud_offset(pgd, addr);
+ do {
+ next = pud_addr_end(addr, end);
+ if (pud_none_or_clear_bad(pud))
+ continue;
+ unmap_pmd_range(pud, addr, next);
+ } while (pud++, addr = next, addr != end);
+}
+
+static void unmap_page_range(unsigned long addr, unsigned long end)
+{
+ pgd_t *pgd;
+ unsigned long next;
+
+ BUG_ON(addr >= end);
+ pgd = pgd_offset_k(addr);
+ do {
+ next = pgd_addr_end(addr, end);
+ if (pgd_none_or_clear_bad(pgd))
+ continue;
+ unmap_pud_range(pgd, addr, next);
+ } while (pgd++, addr = next, addr != end);
+}
+
+void shrink_modem_mem(unsigned int access_type)
+{
+ int i = 0;
+ unsigned long vaddr;
+ struct addr_info *addr, *tmp_addr;
+ struct list_head tmp_page_list;
+
+ for (i= 0; i < NR_MODEM_ACCESS; i++) {
+ if (i == access_type)
+ continue;
+
+ down_write(&shrinker_rwsem);
+ list_replace_init(&modem_page_list[i],&tmp_page_list);
+ up_write(&shrinker_rwsem);
+ list_for_each_entry_safe(addr, tmp_addr, &tmp_page_list, node) {
+ list_del_init(&addr->node);
+ page_completion[addr->page_index].done = 0;
+ page_used[addr->page_index/BITS_PER_LONG] &= ~(1 << (addr->page_index % BITS_PER_LONG));
+ vaddr = addr->vaddr & PAGE_MASK;
+ if(vaddr < cpps_global_var.cpko_text_start || vaddr > cpps_global_var.modem_text_end){
+ panic("addr_info: %08x is destroy",addr);
+ }
+ flush_cache_vunmap(vaddr, vaddr + PAGE_SIZE);
+ unmap_page_range(vaddr, vaddr + PAGE_SIZE);
+ flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
#ifdef CONFIG_DEBUG_RODATA
-unsigned int flags;local_irq_save(flags);set_memory_rw(addr->kaddr,
-(0x4a0+48-0x4cf));local_irq_restore(flags);
+ unsigned int flags;
+ local_irq_save(flags);
+ set_memory_rw(addr->kaddr,1);
+ local_irq_restore(flags);
#endif
-free_page(addr->kaddr);kfree(addr);atomic_dec(&_code_page_count);};}}
-EXPORT_SYMBOL(shrink_modem_mem);phys_addr_t virt_is_mapping(unsigned long addr){
-pgd_t*pgd;pmd_t*pmd;pte_t*ptep,pte;unsigned long pfn;pgd=pgd_offset_k(addr);if(!
-pgd_none(*pgd)){pmd=pmd_offset(pgd,addr);if(!pmd_none(*pmd)){ptep=pte_offset_map
-(pmd,addr);pte=*ptep;if(pte_present(pte)){pfn=pte_pfn(pte);return __pfn_to_phys(
-pfn);}}}return(0x9d4+475-0xbaf);}static int sync_pgd(unsigned long addr,unsigned
- int fsr,struct pt_regs*regs){unsigned int index;pgd_t*pgd,*pgd_k;pud_t*pud,*
-pud_k;pmd_t*pmd,*pmd_k;index=pgd_index(addr);pgd=cpu_get_pgd()+index;pgd_k=
-init_mm.pgd+index;if(pgd_none(*pgd_k))goto bad_area;if(!pgd_present(*pgd))
-set_pgd(pgd,*pgd_k);pud=pud_offset(pgd,addr);pud_k=pud_offset(pgd_k,addr);if(
-pud_none(*pud_k))goto bad_area;if(!pud_present(*pud))set_pud(pud,*pud_k);pmd=
-pmd_offset(pud,addr);pmd_k=pmd_offset(pud_k,addr);
-#ifdef CONFIG_ARM_LPAE
-index=(0x9fd+4051-0x19d0);
+ free_page(addr->kaddr);
+ kfree(addr);
+
+ atomic_dec(&_code_page_count);/*after reclaim ,need modify this*/
+ };
+
+ }
+}
+EXPORT_SYMBOL(shrink_modem_mem);
+phys_addr_t virt_is_mapping(unsigned long addr)
+{
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *ptep, pte;
+ unsigned long pfn;
+
+ /* check whether we found an entry */
+ pgd = pgd_offset_k(addr);
+
+ if(!pgd_none(*pgd)) {
+ /* get the page middle directory */
+ pmd = pmd_offset(pgd, addr);
+ /* check for a valid entry */
+ if(!pmd_none(*pmd)) {
+ /* get a pointer to the page table entry */
+ ptep = pte_offset_map(pmd, addr);
+ /* get the page table entry itself */
+ pte = *ptep;
+ if (pte_present(pte)) {
+ //ptr_page = pte_page(pte);
+ pfn = pte_pfn(pte);
+ //pte_unmap(ptep);
+ return __pfn_to_phys(pfn);
+ }
+ /* check for a valid page */
+ }
+ }
+ return 0;
+}
+
+static int sync_pgd(unsigned long addr, unsigned int fsr,
+ struct pt_regs *regs)
+{
+ unsigned int index;
+ pgd_t *pgd, *pgd_k;
+ pud_t *pud, *pud_k;
+ pmd_t *pmd, *pmd_k;
+ index = pgd_index(addr);
+
+ /*
+ * FIXME: CP15 C1 is write only on ARMv3 architectures.
+ */
+ pgd = cpu_get_pgd() + index;
+ pgd_k = init_mm.pgd + index;
+
+ if (pgd_none(*pgd_k))
+ goto bad_area;
+ if (!pgd_present(*pgd))
+ set_pgd(pgd, *pgd_k);
+
+ pud = pud_offset(pgd, addr);
+ pud_k = pud_offset(pgd_k, addr);
+
+ if (pud_none(*pud_k))
+ goto bad_area;
+ if (!pud_present(*pud))
+ set_pud(pud, *pud_k);
+
+ pmd = pmd_offset(pud, addr);
+ pmd_k = pmd_offset(pud_k, addr);
+
+ #ifdef CONFIG_ARM_LPAE
+ /*
+ * Only one hardware entry per PMD with LPAE.
+ */
+ index = 0;
#else
-index=(addr>>SECTION_SHIFT)&(0x1525+1755-0x1bff);
+ /*
+ * On ARM one Linux PGD entry contains two hardware entries (see page
+ * tables layout in pgtable.h). We normally guarantee that we always
+ * fill both L1 entries. But create_mapping() doesn't follow the rule.
+ * It can create inidividual L1 entries, so here we have to call
+ * pmd_none() check for the entry really corresponded to address, not
+ * for the first of pair.
+ */
+ index = (addr >> SECTION_SHIFT) & 1;
#endif
-if(pmd_none(pmd_k[index]))goto bad_area;copy_pmd(pmd,pmd_k);return
-(0x222+5515-0x17ad);bad_area:do_bad_area(addr,fsr,regs);return(0x5a9+1791-0xca8)
-;}unsigned long*read_code_file(unsigned long page_index){unsigned long*code_buf;
-ssize_t result;code_buf=get_zeroed_page(GFP_ATOMIC);if(!code_buf)panic(
-"\x6d\x65\x6d\x65\x6f\x72\x79\x20\x6e\x6f\x74\x20\x65\x6e\x6f\x75\x67\x68\x21\x21"
-);atomic_inc(&_code_page_count);if(IS_ERR(cpps_global_var.fp_code)||
-cpps_global_var.fp_code==NULL){panic(
-"\x6f\x70\x65\x6e\x20\x66\x69\x6c\x65\x20\x65\x72\x72\x6f\x72" "\n");}
-mm_segment_t old_fs;old_fs=get_fs();set_fs(KERNEL_DS);loff_t pos;pos=page_index*
-PAGE_SIZE+cpps_global_var.modem_offset;result=vfs_read(cpps_global_var.fp_code,(
-char*)code_buf,PAGE_SIZE,&pos);if(result<(0x261d+202-0x26e7)){panic(
-"\x72\x65\x61\x64\x20\x63\x6f\x64\x65\x20\x66\x69\x6c\x65\x20\x65\x72\x72\x6f\x72" "\n"
-);}
+ if (pmd_none(pmd_k[index]))
+ goto bad_area;
+ copy_pmd(pmd, pmd_k);
+ return 0;
+bad_area:
+ do_bad_area(addr, fsr, regs);
+ return 0;
+}
+
+unsigned long* read_code_file(unsigned long page_index)
+{
+ unsigned long* code_buf;
+ ssize_t result;
+ code_buf = get_zeroed_page(GFP_ATOMIC);
+ if(!code_buf)
+ panic("memeory not enough!!");
+ atomic_inc(&_code_page_count);/*after reclaim ,need modify this*/
+
+ if(IS_ERR(cpps_global_var.fp_code) || cpps_global_var.fp_code == NULL) {
+ panic("open file error\n");
+ }
+ mm_segment_t old_fs;
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ loff_t pos;
+ pos = page_index * PAGE_SIZE + cpps_global_var.modem_offset;
+ result = vfs_read(cpps_global_var.fp_code, (char *)code_buf, PAGE_SIZE, &pos);
+ if(result < 0){
+ panic("read code file error\n");
+ }
#ifdef CONFIG_DEBUG_RODATA
-unsigned int flags;local_irq_save(flags);set_memory_ro((unsigned long)code_buf,
-(0x14db+4276-0x258e));local_irq_restore(flags);
+ unsigned int flags;
+ local_irq_save(flags);
+ set_memory_ro((unsigned long)code_buf,1);
+ local_irq_restore(flags);
#endif
-set_fs(old_fs);return code_buf;}void read_code_mapping(unsigned long addr,
-unsigned int fsr,struct pt_regs*regs){unsigned long offset;unsigned long vaddr;
-const struct mem_type*mtype;unsigned long*vir_codebuf;unsigned long page_index;
-unsigned long page_shift;if(virt_is_mapping(addr&PAGE_MASK)!=(0x28c+598-0x4e2)){
-sync_pgd(addr&PAGE_MASK,fsr,regs);return;}vaddr=addr&PAGE_MASK;offset=vaddr&(~
-cpps_global_var.cpko_text_start);page_index=offset>>PAGE_SHIFT;page_shift=
-page_index%BITS_PER_LONG;if((page_used[page_index/BITS_PER_LONG]>>page_shift)&
-(0x4ef+7834-0x2388)){wait_for_completion(&page_completion[page_index]);sync_pgd(
-vaddr,fsr,regs);return;}else page_used[page_index/BITS_PER_LONG]|=(
-(0xf5f+4169-0x1fa7)<<page_shift);local_irq_enable();vir_codebuf=read_code_file(
-page_index);struct addr_info*addr_info;addr_info=kzalloc(sizeof(struct addr_info
-),GFP_KERNEL);addr_info->kaddr=vir_codebuf;addr_info->vaddr=addr;addr_info->
-page_index=page_index;down_write(&shrinker_rwsem);if(vaddr<cpps_global_var.
-__utran_modem_text_start)list_add(&addr_info->node,&modem_page_list[GSM]);else
-if(vaddr<cpps_global_var.__lte_modem_text_start)list_add(&addr_info->node,&
-modem_page_list[UTRAN]);else if(vaddr<cpps_global_var.__comm_modem_text_start)
-list_add(&addr_info->node,&modem_page_list[LTE]);else list_add(&addr_info->node,
-&modem_page_list[COM]);up_write(&shrinker_rwsem);local_irq_disable();mtype=
-get_mem_type(MT_MEMORY);ioremap_page(vaddr,__pa(vir_codebuf),mtype);sync_pgd(
-vaddr,fsr,regs);flush_icache_range(vaddr,vaddr+PAGE_SIZE);if(waitqueue_active(&
-page_completion[page_index].wait))complete_all(&page_completion[page_index]);
-return;}
+ set_fs(old_fs);
+ return code_buf;
+}
+
+void read_code_mapping(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+{
+ unsigned long offset;
+ unsigned long vaddr;
+ const struct mem_type *mtype;
+ unsigned long* vir_codebuf;
+ unsigned long page_index;
+ unsigned long page_shift;
+
+
+ if(virt_is_mapping(addr & PAGE_MASK) != 0) {
+ sync_pgd(addr & PAGE_MASK, fsr, regs);
+ return;
+ }
+
+ vaddr = addr & PAGE_MASK;
+ offset = vaddr & (~cpps_global_var.cpko_text_start);
+ page_index = offset >> PAGE_SHIFT;
+ page_shift = page_index % BITS_PER_LONG;
+
+ if ((page_used[page_index/BITS_PER_LONG] >> page_shift) & 0x1) {
+ wait_for_completion(&page_completion[page_index]);
+ sync_pgd(vaddr,fsr,regs);
+ return;
+ }
+ else
+ page_used[page_index/BITS_PER_LONG] |= (1 << page_shift);
+
+ local_irq_enable();
+ vir_codebuf = read_code_file(page_index);
+
+ /*add vir_codebuf to every list by address*/
+ struct addr_info *addr_info;
+ addr_info = kzalloc(sizeof(struct addr_info), GFP_KERNEL);
+ addr_info->kaddr = vir_codebuf;
+ addr_info->vaddr= addr;
+ addr_info->page_index = page_index;
+ down_write(&shrinker_rwsem);
+ if(vaddr < cpps_global_var.__utran_modem_text_start)
+ list_add(&addr_info->node, &modem_page_list[GSM]);
+ else if(vaddr < cpps_global_var.__lte_modem_text_start)
+ list_add(&addr_info->node, &modem_page_list[UTRAN]);
+ else if(vaddr < cpps_global_var.__comm_modem_text_start)
+ list_add(&addr_info->node, &modem_page_list[LTE]);
+ else
+ list_add(&addr_info->node, &modem_page_list[COM]);
+
+ up_write(&shrinker_rwsem);
+
+ local_irq_disable();
+ mtype = get_mem_type(MT_MEMORY);
+ ioremap_page(vaddr, __pa(vir_codebuf), mtype);
+ sync_pgd(vaddr, fsr, regs);
+ flush_icache_range(vaddr, vaddr + PAGE_SIZE);
+
+ if (waitqueue_active(&page_completion[page_index].wait))
+ complete_all(&page_completion[page_index]);/*after reclaim ,need clear done*/
+ return;
+}
#endif
-asmlinkage void __exception do_DataAbort(unsigned long addr,unsigned int fsr,
-struct pt_regs*regs){const struct fsr_info*inf=fsr_info+fsr_fs(fsr);struct
-siginfo info;
+/*
+ * Dispatch a data abort to the relevant handler.
+ */
+asmlinkage void __exception
+do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+{
+ const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
+ struct siginfo info;
+
#ifdef CONFIG_MODEM_CODE_IS_MAPPING
-if(addr!=(0x2a4+3839-0x11a3)&&addr>=cpps_global_var.cpko_text_start&&addr<=
-cpps_global_var.modem_text_end){read_code_mapping(addr,fsr&~FSR_LNX_PF,regs);
-return;}
-#endif
-if(!inf->fn(addr,fsr&~FSR_LNX_PF,regs))return;printk(KERN_ALERT
-"\x55\x6e\x68\x61\x6e\x64\x6c\x65\x64\x20\x66\x61\x75\x6c\x74\x3a\x20\x25\x73\x20\x28\x30\x78\x25\x30\x33\x78\x29\x20\x61\x74\x20\x30\x78\x25\x30\x38\x6c\x78" "\n"
-,inf->name,fsr,addr);info.si_signo=inf->sig;info.si_errno=(0x1b19+1711-0x21c8);
-info.si_code=inf->code;info.si_addr=(void __user*)addr;arm_notify_die("",regs,&
-info,fsr,(0x1741+937-0x1aea));}void __init hook_ifault_code(int nr,int(*fn)(
-unsigned long,unsigned int,struct pt_regs*),int sig,int code,const char*name){if
-(nr<(0x129+679-0x3d0)||nr>=ARRAY_SIZE(ifsr_info))BUG();ifsr_info[nr].fn=fn;
-ifsr_info[nr].sig=sig;ifsr_info[nr].code=code;ifsr_info[nr].name=name;}
-asmlinkage void __exception do_PrefetchAbort(unsigned long addr,unsigned int
-ifsr,struct pt_regs*regs){const struct fsr_info*inf=ifsr_info+fsr_fs(ifsr);
-struct siginfo info;
-#ifdef CONFIG_MODEM_CODE_IS_MAPPING
-if(addr!=(0x4f8+4535-0x16af)&&addr>=cpps_global_var.cpko_text_start&&addr<=
-cpps_global_var.modem_text_end){read_code_mapping(addr,ifsr|FSR_LNX_PF,regs);
-return;}
-#endif
-if(!inf->fn(addr,ifsr|FSR_LNX_PF,regs))return;printk(KERN_ALERT
-"\x55\x6e\x68\x61\x6e\x64\x6c\x65\x64\x20\x70\x72\x65\x66\x65\x74\x63\x68\x20\x61\x62\x6f\x72\x74\x3a\x20\x25\x73\x20\x28\x30\x78\x25\x30\x33\x78\x29\x20\x61\x74\x20\x30\x78\x25\x30\x38\x6c\x78" "\n"
-,inf->name,ifsr,addr);info.si_signo=inf->sig;info.si_errno=(0x462+8659-0x2635);
-info.si_code=inf->code;info.si_addr=(void __user*)addr;arm_notify_die("",regs,&
-info,ifsr,(0x6fd+655-0x98c));}
-#ifndef CONFIG_ARM_LPAE
-static int __init exceptions_init(void){if(cpu_architecture()>=CPU_ARCH_ARMv6){
-hook_fault_code((0x1609+3919-0x2554),do_translation_fault,SIGSEGV,SEGV_MAPERR,
-"\x49\x2d\x63\x61\x63\x68\x65\x20\x6d\x61\x69\x6e\x74\x65\x6e\x61\x6e\x63\x65\x20\x66\x61\x75\x6c\x74"
-);}if(cpu_architecture()>=CPU_ARCH_ARMv7){hook_fault_code((0xc06+3912-0x1b4b),
-do_bad,SIGSEGV,SEGV_MAPERR,
-"\x73\x65\x63\x74\x69\x6f\x6e\x20\x61\x63\x63\x65\x73\x73\x20\x66\x6c\x61\x67\x20\x66\x61\x75\x6c\x74"
-);hook_fault_code((0xd6b+1748-0x1439),do_bad,SIGSEGV,SEGV_MAPERR,
-"\x73\x65\x63\x74\x69\x6f\x6e\x20\x61\x63\x63\x65\x73\x73\x20\x66\x6c\x61\x67\x20\x66\x61\x75\x6c\x74"
-);}
-#ifdef CONFIG_MODEM_CODE_IS_MAPPING
-int index=(0xaf+2735-0xb5e);for(index=(0xf17+785-0x1228);index<
-(0x3e9+2169-0xc3a)*(0x300+3991-0x1277);index++)init_completion(&page_completion[
-index]);
-#endif
-return(0x715+4575-0x18f4);}arch_initcall(exceptions_init);
+ if(addr != 0 && addr >= cpps_global_var.cpko_text_start && addr <= cpps_global_var.modem_text_end) {
+ read_code_mapping(addr, fsr & ~FSR_LNX_PF, regs);
+ return;
+ }
#endif
+ if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
+ return;
+
+ printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
+ inf->name, fsr, addr);
+
+ info.si_signo = inf->sig;
+ info.si_errno = 0;
+ info.si_code = inf->code;
+ info.si_addr = (void __user *)addr;
+ arm_notify_die("", regs, &info, fsr, 0);
+}
+
+void __init
+hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
+ int sig, int code, const char *name)
+{
+ if (nr < 0 || nr >= ARRAY_SIZE(ifsr_info))
+ BUG();
+
+ ifsr_info[nr].fn = fn;
+ ifsr_info[nr].sig = sig;
+ ifsr_info[nr].code = code;
+ ifsr_info[nr].name = name;
+}
+
+asmlinkage void __exception
+do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
+{
+ const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
+ struct siginfo info;
+
+#ifdef CONFIG_MODEM_CODE_IS_MAPPING
+ if(addr != 0 && addr >= cpps_global_var.cpko_text_start && addr <= cpps_global_var.modem_text_end) {
+ read_code_mapping(addr, ifsr | FSR_LNX_PF, regs);
+ return;
+ }
+
+#endif
+
+ if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
+ return;
+
+ printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
+ inf->name, ifsr, addr);
+
+ info.si_signo = inf->sig;
+ info.si_errno = 0;
+ info.si_code = inf->code;
+ info.si_addr = (void __user *)addr;
+ arm_notify_die("", regs, &info, ifsr, 0);
+}
+
+#ifndef CONFIG_ARM_LPAE
+static int __init exceptions_init(void)
+{
+ if (cpu_architecture() >= CPU_ARCH_ARMv6) {
+ hook_fault_code(4, do_translation_fault, SIGSEGV, SEGV_MAPERR,
+ "I-cache maintenance fault");
+ }
+
+ if (cpu_architecture() >= CPU_ARCH_ARMv7) {
+ /*
+ * TODO: Access flag faults introduced in ARMv6K.
+ * Runtime check for 'K' extension is needed
+ */
+ hook_fault_code(3, do_bad, SIGSEGV, SEGV_MAPERR,
+ "section access flag fault");
+ hook_fault_code(6, do_bad, SIGSEGV, SEGV_MAPERR,
+ "section access flag fault");
+ }
+#ifdef CONFIG_MODEM_CODE_IS_MAPPING
+ int index = 0;
+ for(index = 0;index < 40*32;index++)
+ init_completion(&page_completion[index]);
+#endif
+ return 0;
+}
+
+arch_initcall(exceptions_init);
+#endif