[Feature][ZXW-65]merged P49 base code
Change-Id: I3e09c0c3d47483bc645f02310380ecb7fc6f4041
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/mach-zx297520v3/Makefile.boot b/ap/os/linux/linux-3.4.x/arch/arm/mach-zx297520v3/Makefile.boot
index 9a8f31c..cd3f9eb 100644
--- a/ap/os/linux/linux-3.4.x/arch/arm/mach-zx297520v3/Makefile.boot
+++ b/ap/os/linux/linux-3.4.x/arch/arm/mach-zx297520v3/Makefile.boot
@@ -14,9 +14,15 @@
params_phys-$(CONFIG_ARCH_ZX297520V3_MIFI) := 0x20440100
initrd_phys-$(CONFIG_ARCH_ZX297520V3_MIFI) := 0x20840000
+ifeq ($(MODEM_TYPE),lte_only)
+ zreladdr-$(CONFIG_ARCH_ZX297520V3_UFI) := 0x20348000
+params_phys-$(CONFIG_ARCH_ZX297520V3_UFI) := 0x20340100
+initrd_phys-$(CONFIG_ARCH_ZX297520V3_UFI) := 0x20740000
+else
zreladdr-$(CONFIG_ARCH_ZX297520V3_UFI) := 0x20448000
params_phys-$(CONFIG_ARCH_ZX297520V3_UFI) := 0x20440100
initrd_phys-$(CONFIG_ARCH_ZX297520V3_UFI) := 0x20840000
+endif
zreladdr-$(CONFIG_ARCH_ZX297520V3_PHONE) := 0x20448000
params_phys-$(CONFIG_ARCH_ZX297520V3_PHONE) := 0x20440100
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/mach-zx297520v3/gpio.c b/ap/os/linux/linux-3.4.x/arch/arm/mach-zx297520v3/gpio.c
index 146a1e6..fadaa4a 100644
--- a/ap/os/linux/linux-3.4.x/arch/arm/mach-zx297520v3/gpio.c
+++ b/ap/os/linux/linux-3.4.x/arch/arm/mach-zx297520v3/gpio.c
@@ -43,6 +43,23 @@
reg_spin_unlock(); \
} while (0)
+static gpio_func_id g_gpiofunc_id[] ={
+ GPIO0_GPIO0, GPIO1_GPIO1, GPIO2_GPIO2, GPIO3_GPIO3, GPIO4_GPIO4, GPIO5_GPIO5, GPIO6_GPIO6, GPIO7_GPIO7, GPIO8_GPIO8, GPIO9_GPIO9,
+ GPIO10_GPIO10, GPIO11_GPIO11,GPIO12_GPIO12, GPIO13_GPIO13, GPIO14_GPIO14, GPIO15_GPIO15, GPIO16_GPIO16,GPIO17_GPIO17, GPIO18_GPIO18, GPIO19_GPIO19,
+ GPIO20_GPIO20, GPIO21_GPIO21,GPIO22_GPIO22, GPIO23_GPIO23, GPIO24_GPIO24, GPIO25_GPIO25, GPIO26_GPIO26,GPIO27_GPIO27, GPIO28_GPIO28, GPIO29_GPIO29,
+ GPIO30_GPIO30, GPIO31_GPIO31,GPIO32_GPIO32, GPIO33_GPIO33, GPIO34_GPIO34, GPIO35_GPIO35, GPIO36_GPIO36,GPIO37_GPIO37, GPIO38_GPIO38, GPIO39_GPIO39,
+ GPIO40_GPIO40, GPIO41_GPIO41,GPIO42_GPIO42, GPIO43_GPIO43, GPIO44_GPIO44, GPIO45_GPIO45, GPIO46_GPIO46,GPIO47_GPIO47, GPIO48_GPIO48, GPIO49_GPIO49,
+ GPIO50_GPIO50, GPIO51_GPIO51,GPIO52_GPIO52, GPIO53_GPIO53, GPIO54_GPIO54, GPIO55_GPIO55, GPIO56_GPIO56,GPIO57_GPIO57, GPIO58_GPIO58, GPIO59_GPIO59,
+ GPIO60_GPIO60, GPIO61_GPIO61,GPIO62_GPIO62, GPIO63_GPIO63, GPIO64_GPIO64, GPIO65_GPIO65, GPIO66_GPIO66,GPIO67_GPIO67, GPIO68_GPIO68, GPIO69_GPIO69,
+ GPIO70_GPIO70, GPIO71_GPIO71,GPIO72_GPIO72, GPIO73_GPIO73, GPIO74_GPIO74, GPIO75_GPIO75, GPIO76_GPIO76,GPIO77_GPIO77, GPIO78_GPIO78, GPIO79_GPIO79,
+ GPIO80_GPIO80, GPIO81_GPIO81,GPIO82_GPIO82, GPIO83_GPIO83, GPIO84_GPIO84, GPIO85_GPIO85, GPIO86_GPIO86,GPIO87_GPIO87, GPIO88_GPIO88, GPIO89_GPIO89,
+ GPIO90_GPIO90, GPIO91_GPIO91,GPIO92_GPIO92, GPIO93_GPIO93, GPIO94_GPIO94, GPIO95_GPIO95, GPIO96_GPIO96,GPIO97_GPIO97, GPIO98_GPIO98, GPIO99_GPIO99,
+ GPIO100_GPIO100, GPIO101_GPIO101,GPIO102_GPIO102, GPIO103_GPIO103, GPIO104_GPIO104, GPIO105_GPIO105, GPIO106_GPIO106,GPIO107_GPIO107, GPIO108_GPIO108, GPIO109_GPIO109,
+ GPIO110_GPIO110, GPIO111_GPIO111,GPIO112_GPIO112, GPIO113_GPIO113, GPIO114_GPIO114, GPIO115_GPIO115, GPIO116_GPIO116,GPIO117_GPIO117, GPIO118_GPIO118, GPIO119_GPIO119,
+ GPIO120_GPIO120, GPIO121_GPIO121,GPIO122_GPIO122, GPIO123_GPIO123, GPIO124_GPIO124, GPIO125_GPIO125, GPIO126_GPIO126,GPIO127_GPIO127, GPIO128_GPIO128, GPIO129_GPIO129,
+ GPIO130_GPIO130, GPIO131_GPIO131,GPIO132_GPIO132, GPIO133_GPIO133, GPIO134_GPIO134, GPIO135_GPIO135
+};
+
/*
* select gpio multiplex function
* gpio: gpio number
@@ -365,6 +382,16 @@
/*
* gpio input
*
+ * gpio_func_id
+ */
+gpio_func_id zx29_gpio_get_gpiofunc_id(unsigned int gpio)
+{
+ return g_gpiofunc_id[gpio];
+}
+EXPORT_SYMBOL(zx29_gpio_get_gpiofunc_id);
+/*
+ * gpio input
+ *
* 0:low 1:high
*/
unsigned int zx29_gpio_input_data(unsigned int gpio)
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/mach-zx297520v3/include/mach/gpio.h b/ap/os/linux/linux-3.4.x/arch/arm/mach-zx297520v3/include/mach/gpio.h
index 5b4c88d..09bdda7 100644
--- a/ap/os/linux/linux-3.4.x/arch/arm/mach-zx297520v3/include/mach/gpio.h
+++ b/ap/os/linux/linux-3.4.x/arch/arm/mach-zx297520v3/include/mach/gpio.h
@@ -569,6 +569,7 @@
int zx29_gpio2irq(unsigned int gpio);
+gpio_func_id zx29_gpio_get_gpiofunc_id(unsigned int gpio);
int zx29_gpio_config(unsigned int gpio, gpio_func_id func );
void zx29_gpio_set_inttype(unsigned int gpio, unsigned int type);
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/mach-zx297520v3/include/mach/spinlock.h b/ap/os/linux/linux-3.4.x/arch/arm/mach-zx297520v3/include/mach/spinlock.h
index a96c57c..c50656d 100644
--- a/ap/os/linux/linux-3.4.x/arch/arm/mach-zx297520v3/include/mach/spinlock.h
+++ b/ap/os/linux/linux-3.4.x/arch/arm/mach-zx297520v3/include/mach/spinlock.h
@@ -77,7 +77,11 @@
ADC_SFLOCK, /*for adc*/
UART_SFLOCK,
PMIC_SFLOCK,
+#ifdef CONFIG_SYSVIPC_CROSS_SHM
+ SHM_SFLOCK,
+#else
SFLOCK_ID9,
+#endif
SFLOCK_ID10,
SFLOCK_ID11,
SFLOCK_ID12,
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/mach-zx297520v3/pwr_ctrl.c b/ap/os/linux/linux-3.4.x/arch/arm/mach-zx297520v3/pwr_ctrl.c
index b2fd556..3244e59 100755
--- a/ap/os/linux/linux-3.4.x/arch/arm/mach-zx297520v3/pwr_ctrl.c
+++ b/ap/os/linux/linux-3.4.x/arch/arm/mach-zx297520v3/pwr_ctrl.c
@@ -274,6 +274,20 @@
gpio_direction_output(ZX29_GPIO_85, 1);
gpio_direction_output(ZX29_GPIO_130, 1);
#else
+ #ifdef _USE_VEHICLE_DC_REF
+ ret=gpio_request(ZX29_GPIO_132,"aic_vehicle_en");
+ if(ret){
+ printk("request aic_vehicle_en gpio failed\n");
+ gpio_free(ZX29_GPIO_132);
+ }
+ ret=gpio_request(ZX29_GPIO_91,"aic_vehicle_power_en");
+ if(ret){
+ printk("request aic_vehicle_power_en gpio failed\n");
+ gpio_free(ZX29_GPIO_91);
+ }
+ gpio_direction_output(ZX29_GPIO_132, 1);
+ gpio_direction_output(ZX29_GPIO_91, 1);
+ #else
ret=gpio_request(ZX29_GPIO_123,"aic_ufi_en");
if(ret){
printk("request aic_ufi_en gpio failed\n");
@@ -286,6 +300,7 @@
}
gpio_direction_output(ZX29_GPIO_123, 1);
gpio_direction_output(ZX29_GPIO_129, 1);
+ #endif
#endif
mdelay(10);
printk("qqq aic en succ.\n");
@@ -296,8 +311,13 @@
gpio_direction_output(ZX29_GPIO_85, 0);
gpio_direction_output(ZX29_GPIO_130, 0);
#else
+ #ifdef _USE_VEHICLE_DC_REF
+ gpio_direction_output(ZX29_GPIO_132, 0);
+ gpio_direction_output(ZX29_GPIO_91, 0);
+ #else
gpio_direction_output(ZX29_GPIO_123, 0);
gpio_direction_output(ZX29_GPIO_129, 0);
+ #endif
#endif
printk("qqq aic en fail.\n");
}
@@ -315,8 +335,13 @@
gpio_direction_output(ZX29_GPIO_85, 0);
gpio_direction_output(ZX29_GPIO_130, 0);
#else
+ #ifdef _USE_VEHICLE_DC_REF
+ gpio_direction_output(ZX29_GPIO_132, 0);
+ gpio_direction_output(ZX29_GPIO_91, 0);
+ #else
gpio_direction_output(ZX29_GPIO_123, 0);
gpio_direction_output(ZX29_GPIO_129, 0);
+ #endif
#endif
mdelay(50);
printk("qqq aic dis succ.\n");
@@ -327,8 +352,13 @@
gpio_direction_output(ZX29_GPIO_85, 1);
gpio_direction_output(ZX29_GPIO_130, 1);
#else
+ #ifdef _USE_VEHICLE_DC_REF
+ gpio_direction_output(ZX29_GPIO_132, 1);
+ gpio_direction_output(ZX29_GPIO_91, 1);
+ #else
gpio_direction_output(ZX29_GPIO_123, 1);
gpio_direction_output(ZX29_GPIO_129, 1);
+ #endif
#endif
printk("qqq aic dis fail.\n");
}
@@ -345,8 +375,13 @@
gpio_direction_output(ZX29_GPIO_85, 1);
gpio_direction_output(ZX29_GPIO_130, 1);
#else
+ #ifdef _USE_VEHICLE_DC_REF
+ gpio_direction_output(ZX29_GPIO_132, 1);
+ gpio_direction_output(ZX29_GPIO_91, 1);
+ #else
gpio_direction_output(ZX29_GPIO_129, 1);
gpio_direction_output(ZX29_GPIO_123, 1);
+ #endif
#endif
mdelay(50);
printk("qqq re enable succ.\n");
@@ -357,8 +392,13 @@
gpio_direction_output(ZX29_GPIO_85, 0);
gpio_direction_output(ZX29_GPIO_130, 0);
#else
+ #ifdef _USE_VEHICLE_DC_REF
+ gpio_direction_output(ZX29_GPIO_132, 0);
+ gpio_direction_output(ZX29_GPIO_91, 0);
+ #else
gpio_direction_output(ZX29_GPIO_123, 0);
gpio_direction_output(ZX29_GPIO_129, 0);
+ #endif
#endif
printk("qqq aic dis fail.\n");
}
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/mach-zx297520v3/zx297520v3-mdl-devices.c b/ap/os/linux/linux-3.4.x/arch/arm/mach-zx297520v3/zx297520v3-mdl-devices.c
index d3165a9..751bb7d 100644
--- a/ap/os/linux/linux-3.4.x/arch/arm/mach-zx297520v3/zx297520v3-mdl-devices.c
+++ b/ap/os/linux/linux-3.4.x/arch/arm/mach-zx297520v3/zx297520v3-mdl-devices.c
@@ -948,7 +948,7 @@
static struct gpio_keys_button zx29_keypad_int[] = {
#if 1
{
- .active_low = 1, /*\CAǷ\F1\B5͵\E7ƽ\D3\D0Ч\A1\A31: \B0\B4\CF\C2Ϊ\B5͵\E7ƽ 0: \B0\B4\CF\C2Ϊ\B8ߵ\E7ƽ*/
+ .active_low = 1, /*是否低电平有效。1: 按下为低电平 0: 按下为高电平*/
.desc = "kpd_power",
.code = KEY_POWER /* power: 116 */,
.use_pmu_pwron = 1, /*true: use pmu pwron interrupt fase: use zx297520v2 ext int*/
@@ -1149,10 +1149,11 @@
#endif
#ifdef CONFIG_MMC_ZX29
- //&zx29_sdmmc0_device,
-#ifndef _USE_VEHICLE_DC
- &zx29_sdmmc1_device,
+#ifdef _USE_VEHICLE_DC_REF
+ &zx29_sdmmc0_device,
#endif
+ &zx29_sdmmc1_device,
+
#endif
#ifdef CONFIG_I2C_ZX29
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/mm/fault.c b/ap/os/linux/linux-3.4.x/arch/arm/mm/fault.c
index f4821ea..27e4e4d 100755
--- a/ap/os/linux/linux-3.4.x/arch/arm/mm/fault.c
+++ b/ap/os/linux/linux-3.4.x/arch/arm/mm/fault.c
@@ -21,12 +21,12 @@
#ifdef CONFIG_MMU
#ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs*regs,unsigned int fsr){int
-ret=(0xfe0+3369-0x1d09);if(!user_mode(regs)){preempt_disable();if(kprobe_running
-()&&kprobe_fault_handler(regs,fsr))ret=(0xe57+1779-0x1549);preempt_enable();}
+ret=(0x6ef+1787-0xdea);if(!user_mode(regs)){preempt_disable();if(kprobe_running(
+)&&kprobe_fault_handler(regs,fsr))ret=(0x1c3c+2058-0x2445);preempt_enable();}
return ret;}
#else
static inline int notify_page_fault(struct pt_regs*regs,unsigned int fsr){return
-(0x1899+41-0x18c2);}
+(0x1f50+1157-0x23d5);}
#endif
void show_pte(struct mm_struct*mm,unsigned long addr){pgd_t*pgd;if(!mm)mm=&
init_mm;printk(KERN_ALERT"\x70\x67\x64\x20\x3d\x20\x25\x70" "\n",mm->pgd);pgd=
@@ -34,10 +34,10 @@
"\x5b\x25\x30\x38\x6c\x78\x5d\x20\x2a\x70\x67\x64\x3d\x25\x30\x38\x6c\x6c\x78",
addr,(long long)pgd_val(*pgd));do{pud_t*pud;pmd_t*pmd;pte_t*pte;if(pgd_none(*pgd
))break;if(pgd_bad(*pgd)){printk("\x28\x62\x61\x64\x29");break;}pud=pud_offset(
-pgd,addr);if(PTRS_PER_PUD!=(0x713+6770-0x2184))printk(
+pgd,addr);if(PTRS_PER_PUD!=(0xa17+4067-0x19f9))printk(
"\x2c\x20\x2a\x70\x75\x64\x3d\x25\x30\x38\x6c\x6c\x78",(long long)pud_val(*pud))
;if(pud_none(*pud))break;if(pud_bad(*pud)){printk("\x28\x62\x61\x64\x29");break;
-}pmd=pmd_offset(pud,addr);if(PTRS_PER_PMD!=(0x1819+1979-0x1fd3))printk(
+}pmd=pmd_offset(pud,addr);if(PTRS_PER_PMD!=(0x566+5292-0x1a11))printk(
"\x2c\x20\x2a\x70\x6d\x64\x3d\x25\x30\x38\x6c\x6c\x78",(long long)pmd_val(*pmd))
;if(pmd_none(*pmd))break;if(pmd_bad(*pmd)){printk("\x28\x62\x61\x64\x29");break;
}if(PageHighMem(pfn_to_page(pmd_val(*pmd)>>PAGE_SHIFT)))break;pte=pte_offset_map
@@ -47,18 +47,18 @@
printk("\x2c\x20\x2a\x70\x70\x74\x65\x3d\x25\x30\x38\x6c\x6c\x78",(long long)
pte_val(pte[PTE_HWTABLE_PTRS]));
#endif
-pte_unmap(pte);}while((0xa31+3906-0x1973));printk("\n");}
+pte_unmap(pte);}while((0x1090+1845-0x17c5));printk("\n");}
#else
void show_pte(struct mm_struct*mm,unsigned long addr){}
#endif
static void __do_kernel_fault(struct mm_struct*mm,unsigned long addr,unsigned
int fsr,struct pt_regs*regs){if(fixup_exception(regs))return;bust_spinlocks(
-(0xa61+1566-0x107e));printk(KERN_ALERT
+(0x18fd+2706-0x238e));printk(KERN_ALERT
"\x55\x6e\x61\x62\x6c\x65\x20\x74\x6f\x20\x68\x61\x6e\x64\x6c\x65\x20\x6b\x65\x72\x6e\x65\x6c\x20\x25\x73\x20\x61\x74\x20\x76\x69\x72\x74\x75\x61\x6c\x20\x61\x64\x64\x72\x65\x73\x73\x20\x25\x30\x38\x6c\x78" "\n"
,(addr<PAGE_SIZE)?
"\x4e\x55\x4c\x4c\x20\x70\x6f\x69\x6e\x74\x65\x72\x20\x64\x65\x72\x65\x66\x65\x72\x65\x6e\x63\x65"
:"\x70\x61\x67\x69\x6e\x67\x20\x72\x65\x71\x75\x65\x73\x74",addr);show_pte(mm,
-addr);die("\x4f\x6f\x70\x73",regs,fsr);bust_spinlocks((0x1c7c+2291-0x256f));
+addr);die("\x4f\x6f\x70\x73",regs,fsr);bust_spinlocks((0xe81+2270-0x175f));
do_exit(SIGKILL);}static void __do_user_fault(struct task_struct*tsk,unsigned
long addr,unsigned int fsr,unsigned int sig,int code,struct pt_regs*regs){struct
siginfo si;
@@ -69,7 +69,7 @@
,tsk->comm,sig,addr,fsr);show_pte(tsk->mm,addr);show_regs(regs);}
#endif
tsk->thread.address=addr;tsk->thread.error_code=fsr;tsk->thread.trap_no=
-(0x2c0+1319-0x7d9);si.si_signo=sig;si.si_errno=(0x2054+1247-0x2533);si.si_code=
+(0xd65+5191-0x219e);si.si_signo=sig;si.si_errno=(0x184+9332-0x25f8);si.si_code=
code;si.si_addr=(void __user*)addr;force_sig_info(sig,&si,tsk);}void do_bad_area
(unsigned long addr,unsigned int fsr,struct pt_regs*regs){struct task_struct*tsk
=current;struct mm_struct*mm=tsk->active_mm;if(user_mode(regs))__do_user_fault(
@@ -91,8 +91,8 @@
static int __kprobes do_page_fault(unsigned long addr,unsigned int fsr,struct
pt_regs*regs){struct task_struct*tsk;struct mm_struct*mm;int fault,sig,code;int
write=fsr&FSR_WRITE;unsigned int flags=FAULT_FLAG_ALLOW_RETRY|
-FAULT_FLAG_KILLABLE|(write?FAULT_FLAG_WRITE:(0x6a+1294-0x578));if(
-notify_page_fault(regs,fsr))return(0xf5+924-0x491);tsk=current;mm=tsk->mm;if(
+FAULT_FLAG_KILLABLE|(write?FAULT_FLAG_WRITE:(0x1838+1160-0x1cc0));if(
+notify_page_fault(regs,fsr))return(0xc4f+4994-0x1fd1);tsk=current;mm=tsk->mm;if(
interrupts_enabled(regs))local_irq_enable();if(!mm||pagefault_disabled())goto
no_context;if(!down_read_trylock(&mm->mmap_sem)){if(!user_mode(regs)&&!
search_exception_tables(regs->ARM_pc))goto no_context;retry:down_read(&mm->
@@ -101,22 +101,23 @@
if(!user_mode(regs)&&!search_exception_tables(regs->ARM_pc))goto no_context;
#endif
}fault=__do_page_fault(mm,addr,fsr,flags,tsk);if((fault&VM_FAULT_RETRY)&&
-fatal_signal_pending(current))return(0xff1+632-0x1269);perf_sw_event(
-PERF_COUNT_SW_PAGE_FAULTS,(0x4d9+791-0x7ef),regs,addr);if(!(fault&VM_FAULT_ERROR
-)&&flags&FAULT_FLAG_ALLOW_RETRY){if(fault&VM_FAULT_MAJOR){tsk->maj_flt++;
-perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,(0x231+5463-0x1787),regs,addr);}else
-{tsk->min_flt++;perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,(0x1a3+1319-0x6c9),
-regs,addr);}if(fault&VM_FAULT_RETRY){flags&=~FAULT_FLAG_ALLOW_RETRY;goto retry;}
-}up_read(&mm->mmap_sem);if(likely(!(fault&(VM_FAULT_ERROR|VM_FAULT_BADMAP|
-VM_FAULT_BADACCESS))))return(0x575+5730-0x1bd7);if(fault&VM_FAULT_OOM){
-pagefault_out_of_memory();return(0xc1a+632-0xe92);}if(!user_mode(regs))goto
-no_context;if(fault&VM_FAULT_SIGBUS){sig=SIGBUS;code=BUS_ADRERR;}else{sig=
-SIGSEGV;code=fault==VM_FAULT_BADACCESS?SEGV_ACCERR:SEGV_MAPERR;}__do_user_fault(
-tsk,addr,fsr,sig,code,regs);return(0x52+5873-0x1743);no_context:
-__do_kernel_fault(mm,addr,fsr,regs);return(0xc9d+5546-0x2247);}
+fatal_signal_pending(current))return(0x333+8251-0x236e);perf_sw_event(
+PERF_COUNT_SW_PAGE_FAULTS,(0x1bd0+1313-0x20f0),regs,addr);if(!(fault&
+VM_FAULT_ERROR)&&flags&FAULT_FLAG_ALLOW_RETRY){if(fault&VM_FAULT_MAJOR){tsk->
+maj_flt++;perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,(0x420+2661-0xe84),regs,
+addr);}else{tsk->min_flt++;perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
+(0x648+7540-0x23bb),regs,addr);}if(fault&VM_FAULT_RETRY){flags&=~
+FAULT_FLAG_ALLOW_RETRY;goto retry;}}up_read(&mm->mmap_sem);if(likely(!(fault&(
+VM_FAULT_ERROR|VM_FAULT_BADMAP|VM_FAULT_BADACCESS))))return(0x10d9+5310-0x2597);
+if(fault&VM_FAULT_OOM){pagefault_out_of_memory();return(0x1b03+2939-0x267e);}if(
+!user_mode(regs))goto no_context;if(fault&VM_FAULT_SIGBUS){sig=SIGBUS;code=
+BUS_ADRERR;}else{sig=SIGSEGV;code=fault==VM_FAULT_BADACCESS?SEGV_ACCERR:
+SEGV_MAPERR;}__do_user_fault(tsk,addr,fsr,sig,code,regs);return
+(0x2246+675-0x24e9);no_context:__do_kernel_fault(mm,addr,fsr,regs);return
+(0x120b+3454-0x1f89);}
#else
static int do_page_fault(unsigned long addr,unsigned int fsr,struct pt_regs*regs
-){return(0x56d+5422-0x1a9b);}
+){return(0x218b+1329-0x26bc);}
#endif
#ifdef CONFIG_MMU
static int __kprobes do_translation_fault(unsigned long addr,unsigned int fsr,
@@ -129,21 +130,21 @@
bad_area;if(!pud_present(*pud))set_pud(pud,*pud_k);pmd=pmd_offset(pud,addr);
pmd_k=pmd_offset(pud_k,addr);
#ifdef CONFIG_ARM_LPAE
-index=(0x152+7215-0x1d81);
+index=(0x556+6466-0x1e98);
#else
-index=(addr>>SECTION_SHIFT)&(0x936+7173-0x253a);
+index=(addr>>SECTION_SHIFT)&(0x2ad+2390-0xc02);
#endif
if(pmd_none(pmd_k[index]))goto bad_area;copy_pmd(pmd,pmd_k);return
-(0x28a+6674-0x1c9c);bad_area:do_bad_area(addr,fsr,regs);return
-(0x40f+6581-0x1dc4);}
+(0x100b+5156-0x242f);bad_area:do_bad_area(addr,fsr,regs);return
+(0x1a61+415-0x1c00);}
#else
static int do_translation_fault(unsigned long addr,unsigned int fsr,struct
-pt_regs*regs){return(0x3c2+7140-0x1fa6);}
+pt_regs*regs){return(0x216f+575-0x23ae);}
#endif
static int do_sect_fault(unsigned long addr,unsigned int fsr,struct pt_regs*regs
){if(interrupts_enabled(regs))local_irq_enable();do_bad_area(addr,fsr,regs);
-return(0x18ba+2768-0x238a);}static int do_bad(unsigned long addr,unsigned int
-fsr,struct pt_regs*regs){return(0x222+7784-0x2089);}struct fsr_info{int(*fn)(
+return(0x178a+442-0x1944);}static int do_bad(unsigned long addr,unsigned int fsr
+,struct pt_regs*regs){return(0x1986+838-0x1ccb);}struct fsr_info{int(*fn)(
unsigned long addr,unsigned int fsr,struct pt_regs*regs);int sig;int code;const
char*name;};
#ifdef CONFIG_ARM_LPAE
@@ -152,21 +153,21 @@
#include "fsr-2level.c"
#endif
void __init hook_fault_code(int nr,int(*fn)(unsigned long,unsigned int,struct
-pt_regs*),int sig,int code,const char*name){if(nr<(0x7e5+3376-0x1515)||nr>=
+pt_regs*),int sig,int code,const char*name){if(nr<(0xbf9+1207-0x10b0)||nr>=
ARRAY_SIZE(fsr_info))BUG();fsr_info[nr].fn=fn;fsr_info[nr].sig=sig;fsr_info[nr].
code=code;fsr_info[nr].name=name;}
#ifdef CONFIG_MODEM_CODE_IS_MAPPING
static DECLARE_RWSEM(shrinker_rwsem);atomic_t _code_page_count=ATOMIC_INIT(
-(0xb56+5619-0x2149));struct addr_info{struct list_head node;unsigned long vaddr;
+(0x3a5+1630-0xa03));struct addr_info{struct list_head node;unsigned long vaddr;
unsigned long kaddr;unsigned long page_index;};enum modem_access_technology{GSM=
-(0xa82+6623-0x2461),UTRAN=(0x19a+294-0x2bf),LTE=(0x102c+3798-0x1f00),COM=
-(0xf7c+1460-0x152d),NR_MODEM_ACCESS=(0x882+5823-0x1f3d)};struct list_head
+(0x9d4+2796-0x14c0),UTRAN=(0x19b9+1702-0x205e),LTE=(0x1182+1686-0x1816),COM=
+(0x79d+190-0x858),NR_MODEM_ACCESS=(0xc18+34-0xc36)};struct list_head
modem_page_list[NR_MODEM_ACCESS]={LIST_HEAD_INIT(modem_page_list[
-(0x2141+685-0x23ee)]),LIST_HEAD_INIT(modem_page_list[(0x260+5072-0x162f)]),
-LIST_HEAD_INIT(modem_page_list[(0xcab+2899-0x17fc)]),LIST_HEAD_INIT(
-modem_page_list[(0x562+2250-0xe29)]),};unsigned int page_used[
-(0x339+4597-0x1506)];struct completion page_completion[(0x942+128-0x99a)*
-(0x6f2+5322-0x1b9c)];static void unmap_pte_range(pmd_t*pmd,unsigned long addr,
+(0x884+455-0xa4b)]),LIST_HEAD_INIT(modem_page_list[(0x13d1+1582-0x19fe)]),
+LIST_HEAD_INIT(modem_page_list[(0x1cfd+2123-0x2546)]),LIST_HEAD_INIT(
+modem_page_list[(0xbc3+3792-0x1a90)]),};unsigned int page_used[
+(0x260+6029-0x19c5)];struct completion page_completion[(0xb4d+2529-0x1506)*
+(0x7f4+37-0x7f9)];static void unmap_pte_range(pmd_t*pmd,unsigned long addr,
unsigned long end){pte_t*pte;pte=pte_offset_kernel(pmd,addr);do{pte_t ptent=
ptep_get_and_clear(&init_mm,addr,pte);WARN_ON(!pte_none(ptent)&&!pte_present(
ptent));}while(pte++,addr+=PAGE_SIZE,addr!=end);}static void unmap_pmd_range(
@@ -180,14 +181,14 @@
addr,unsigned long end){pgd_t*pgd;unsigned long next;BUG_ON(addr>=end);pgd=
pgd_offset_k(addr);do{next=pgd_addr_end(addr,end);if(pgd_none_or_clear_bad(pgd))
continue;unmap_pud_range(pgd,addr,next);}while(pgd++,addr=next,addr!=end);}void
-shrink_modem_mem(unsigned int access_type){int i=(0x13cf+2895-0x1f1e);unsigned
+shrink_modem_mem(unsigned int access_type){int i=(0x129c+1251-0x177f);unsigned
long vaddr;struct addr_info*addr,*tmp_addr;struct list_head tmp_page_list;for(i=
-(0xe58+2447-0x17e7);i<NR_MODEM_ACCESS;i++){if(i==access_type)continue;down_write
+(0xdaf+1270-0x12a5);i<NR_MODEM_ACCESS;i++){if(i==access_type)continue;down_write
(&shrinker_rwsem);list_replace_init(&modem_page_list[i],&tmp_page_list);up_write
(&shrinker_rwsem);list_for_each_entry_safe(addr,tmp_addr,&tmp_page_list,node){
list_del_init(&addr->node);page_completion[addr->page_index].done=
-(0x764+741-0xa49);page_used[addr->page_index/BITS_PER_LONG]&=~(
-(0x8a1+5030-0x1c46)<<(addr->page_index%BITS_PER_LONG));vaddr=addr->vaddr&
+(0x3a3+15-0x3b2);page_used[addr->page_index/BITS_PER_LONG]&=~(
+(0x1431+116-0x14a4)<<(addr->page_index%BITS_PER_LONG));vaddr=addr->vaddr&
PAGE_MASK;if(vaddr<cpps_global_var.cpko_text_start||vaddr>cpps_global_var.
modem_text_end){panic(
"\x61\x64\x64\x72\x5f\x69\x6e\x66\x6f\x3a\x20\x25\x30\x38\x78\x20\x69\x73\x20\x20\x64\x65\x73\x74\x72\x6f\x79"
@@ -195,14 +196,14 @@
PAGE_SIZE);flush_tlb_kernel_range(vaddr,vaddr+PAGE_SIZE);
#ifdef CONFIG_DEBUG_RODATA
unsigned int flags;local_irq_save(flags);set_memory_rw(addr->kaddr,
-(0x1abf+1188-0x1f62));local_irq_restore(flags);
+(0x4b0+2552-0xea7));local_irq_restore(flags);
#endif
free_page(addr->kaddr);kfree(addr);atomic_dec(&_code_page_count);};}}
EXPORT_SYMBOL(shrink_modem_mem);phys_addr_t virt_is_mapping(unsigned long addr){
pgd_t*pgd;pmd_t*pmd;pte_t*ptep,pte;unsigned long pfn;pgd=pgd_offset_k(addr);if(!
pgd_none(*pgd)){pmd=pmd_offset(pgd,addr);if(!pmd_none(*pmd)){ptep=pte_offset_map
(pmd,addr);pte=*ptep;if(pte_present(pte)){pfn=pte_pfn(pte);return __pfn_to_phys(
-pfn);}}}return(0x292+547-0x4b5);}static int sync_pgd(unsigned long addr,unsigned
+pfn);}}}return(0x3eb+976-0x7bb);}static int sync_pgd(unsigned long addr,unsigned
int fsr,struct pt_regs*regs){unsigned int index;pgd_t*pgd,*pgd_k;pud_t*pud,*
pud_k;pmd_t*pmd,*pmd_k;index=pgd_index(addr);pgd=cpu_get_pgd()+index;pgd_k=
init_mm.pgd+index;if(pgd_none(*pgd_k))goto bad_area;if(!pgd_present(*pgd))
@@ -210,13 +211,13 @@
pud_none(*pud_k))goto bad_area;if(!pud_present(*pud))set_pud(pud,*pud_k);pmd=
pmd_offset(pud,addr);pmd_k=pmd_offset(pud_k,addr);
#ifdef CONFIG_ARM_LPAE
-index=(0x13d8+743-0x16bf);
+index=(0x532+6727-0x1f79);
#else
-index=(addr>>SECTION_SHIFT)&(0x1714+778-0x1a1d);
+index=(addr>>SECTION_SHIFT)&(0xc67+4122-0x1c80);
#endif
if(pmd_none(pmd_k[index]))goto bad_area;copy_pmd(pmd,pmd_k);return
-(0x113d+306-0x126f);bad_area:do_bad_area(addr,fsr,regs);return
-(0x7c5+4314-0x189f);}unsigned long*read_code_file(unsigned long page_index){
+(0xebf+3585-0x1cc0);bad_area:do_bad_area(addr,fsr,regs);return
+(0x12a3+3758-0x2151);}unsigned long*read_code_file(unsigned long page_index){
unsigned long*code_buf;ssize_t result;code_buf=get_zeroed_page(GFP_ATOMIC);if(!
code_buf)panic(
"\x6d\x65\x6d\x65\x6f\x72\x79\x20\x6e\x6f\x74\x20\x65\x6e\x6f\x75\x67\x68\x21\x21"
@@ -225,23 +226,23 @@
"\x6f\x70\x65\x6e\x20\x66\x69\x6c\x65\x20\x65\x72\x72\x6f\x72" "\n");}
mm_segment_t old_fs;old_fs=get_fs();set_fs(KERNEL_DS);loff_t pos;pos=page_index*
PAGE_SIZE+cpps_global_var.modem_offset;result=vfs_read(cpps_global_var.fp_code,(
-char*)code_buf,PAGE_SIZE,&pos);if(result<(0x52a+4323-0x160d)){panic(
+char*)code_buf,PAGE_SIZE,&pos);if(result<(0x59d+1700-0xc41)){panic(
"\x72\x65\x61\x64\x20\x63\x6f\x64\x65\x20\x66\x69\x6c\x65\x20\x65\x72\x72\x6f\x72" "\n"
);}
#ifdef CONFIG_DEBUG_RODATA
unsigned int flags;local_irq_save(flags);set_memory_ro((unsigned long)code_buf,
-(0x239f+4-0x23a2));local_irq_restore(flags);
+(0x1f+9427-0x24f1));local_irq_restore(flags);
#endif
set_fs(old_fs);return code_buf;}void read_code_mapping(unsigned long addr,
unsigned int fsr,struct pt_regs*regs){unsigned long offset;unsigned long vaddr;
const struct mem_type*mtype;unsigned long*vir_codebuf;unsigned long page_index;
-unsigned long page_shift;if(virt_is_mapping(addr&PAGE_MASK)!=(0x539+3633-0x136a)
+unsigned long page_shift;if(virt_is_mapping(addr&PAGE_MASK)!=(0x9c2+6248-0x222a)
){sync_pgd(addr&PAGE_MASK,fsr,regs);return;}vaddr=addr&PAGE_MASK;offset=vaddr&(~
cpps_global_var.cpko_text_start);page_index=offset>>PAGE_SHIFT;page_shift=
page_index%BITS_PER_LONG;if((page_used[page_index/BITS_PER_LONG]>>page_shift)&
-(0x53+2746-0xb0c)){wait_for_completion(&page_completion[page_index]);sync_pgd(
+(0x982+4838-0x1c67)){wait_for_completion(&page_completion[page_index]);sync_pgd(
vaddr,fsr,regs);return;}else page_used[page_index/BITS_PER_LONG]|=(
-(0x7cb+4993-0x1b4b)<<page_shift);local_irq_enable();vir_codebuf=read_code_file(
+(0x936+3686-0x179b)<<page_shift);local_irq_enable();vir_codebuf=read_code_file(
page_index);struct addr_info*addr_info;addr_info=kzalloc(sizeof(struct addr_info
),GFP_KERNEL);addr_info->kaddr=vir_codebuf;addr_info->vaddr=addr;addr_info->
page_index=page_index;down_write(&shrinker_rwsem);if(vaddr<cpps_global_var.
@@ -259,46 +260,46 @@
struct pt_regs*regs){const struct fsr_info*inf=fsr_info+fsr_fs(fsr);struct
siginfo info;
#ifdef CONFIG_MODEM_CODE_IS_MAPPING
-if(addr!=(0xa26+1881-0x117f)&&addr>=cpps_global_var.cpko_text_start&&addr<=
+if(addr!=(0x1ccd+2394-0x2627)&&addr>=cpps_global_var.cpko_text_start&&addr<=
cpps_global_var.modem_text_end){read_code_mapping(addr,fsr&~FSR_LNX_PF,regs);
return;}
#endif
if(!inf->fn(addr,fsr&~FSR_LNX_PF,regs))return;printk(KERN_ALERT
"\x55\x6e\x68\x61\x6e\x64\x6c\x65\x64\x20\x66\x61\x75\x6c\x74\x3a\x20\x25\x73\x20\x28\x30\x78\x25\x30\x33\x78\x29\x20\x61\x74\x20\x30\x78\x25\x30\x38\x6c\x78" "\n"
-,inf->name,fsr,addr);info.si_signo=inf->sig;info.si_errno=(0x1791+761-0x1a8a);
+,inf->name,fsr,addr);info.si_signo=inf->sig;info.si_errno=(0x10dd+3541-0x1eb2);
info.si_code=inf->code;info.si_addr=(void __user*)addr;arm_notify_die("",regs,&
-info,fsr,(0xd40+2877-0x187d));}void __init hook_ifault_code(int nr,int(*fn)(
+info,fsr,(0x390+3520-0x1150));}void __init hook_ifault_code(int nr,int(*fn)(
unsigned long,unsigned int,struct pt_regs*),int sig,int code,const char*name){if
-(nr<(0x12dc+4844-0x25c8)||nr>=ARRAY_SIZE(ifsr_info))BUG();ifsr_info[nr].fn=fn;
+(nr<(0xbb5+3866-0x1acf)||nr>=ARRAY_SIZE(ifsr_info))BUG();ifsr_info[nr].fn=fn;
ifsr_info[nr].sig=sig;ifsr_info[nr].code=code;ifsr_info[nr].name=name;}
asmlinkage void __exception do_PrefetchAbort(unsigned long addr,unsigned int
ifsr,struct pt_regs*regs){const struct fsr_info*inf=ifsr_info+fsr_fs(ifsr);
struct siginfo info;
#ifdef CONFIG_MODEM_CODE_IS_MAPPING
-if(addr!=(0xfb3+4972-0x231f)&&addr>=cpps_global_var.cpko_text_start&&addr<=
+if(addr!=(0xdc7+6420-0x26db)&&addr>=cpps_global_var.cpko_text_start&&addr<=
cpps_global_var.modem_text_end){read_code_mapping(addr,ifsr|FSR_LNX_PF,regs);
return;}
#endif
if(!inf->fn(addr,ifsr|FSR_LNX_PF,regs))return;printk(KERN_ALERT
"\x55\x6e\x68\x61\x6e\x64\x6c\x65\x64\x20\x70\x72\x65\x66\x65\x74\x63\x68\x20\x61\x62\x6f\x72\x74\x3a\x20\x25\x73\x20\x28\x30\x78\x25\x30\x33\x78\x29\x20\x61\x74\x20\x30\x78\x25\x30\x38\x6c\x78" "\n"
-,inf->name,ifsr,addr);info.si_signo=inf->sig;info.si_errno=(0x1979+745-0x1c62);
+,inf->name,ifsr,addr);info.si_signo=inf->sig;info.si_errno=(0x8b3+2422-0x1229);
info.si_code=inf->code;info.si_addr=(void __user*)addr;arm_notify_die("",regs,&
-info,ifsr,(0x4c7+5661-0x1ae4));}
+info,ifsr,(0x9dd+4243-0x1a70));}
#ifndef CONFIG_ARM_LPAE
static int __init exceptions_init(void){if(cpu_architecture()>=CPU_ARCH_ARMv6){
-hook_fault_code((0x155+3694-0xfbf),do_translation_fault,SIGSEGV,SEGV_MAPERR,
+hook_fault_code((0x6cb+7772-0x2523),do_translation_fault,SIGSEGV,SEGV_MAPERR,
"\x49\x2d\x63\x61\x63\x68\x65\x20\x6d\x61\x69\x6e\x74\x65\x6e\x61\x6e\x63\x65\x20\x66\x61\x75\x6c\x74"
-);}if(cpu_architecture()>=CPU_ARCH_ARMv7){hook_fault_code((0x559+5769-0x1bdf),
-do_bad,SIGSEGV,SEGV_MAPERR,
+);}if(cpu_architecture()>=CPU_ARCH_ARMv7){hook_fault_code((0xa8+69-0xea),do_bad,
+SIGSEGV,SEGV_MAPERR,
"\x73\x65\x63\x74\x69\x6f\x6e\x20\x61\x63\x63\x65\x73\x73\x20\x66\x6c\x61\x67\x20\x66\x61\x75\x6c\x74"
-);hook_fault_code((0xf74+4680-0x21b6),do_bad,SIGSEGV,SEGV_MAPERR,
+);hook_fault_code((0x630+1162-0xab4),do_bad,SIGSEGV,SEGV_MAPERR,
"\x73\x65\x63\x74\x69\x6f\x6e\x20\x61\x63\x63\x65\x73\x73\x20\x66\x6c\x61\x67\x20\x66\x61\x75\x6c\x74"
);}
#ifdef CONFIG_MODEM_CODE_IS_MAPPING
-int index=(0x26f+2998-0xe25);for(index=(0xc84+4770-0x1f26);index<
-(0x1aea+2741-0x2577)*(0x432+2795-0xefd);index++)init_completion(&page_completion
-[index]);
+int index=(0x74f+298-0x879);for(index=(0x6c+5061-0x1431);index<
+(0x190+7054-0x1cf6)*(0x13cf+4383-0x24ce);index++)init_completion(&
+page_completion[index]);
#endif
-return(0x114a+827-0x1485);}arch_initcall(exceptions_init);
+return(0x16fa+3252-0x23ae);}arch_initcall(exceptions_init);
#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/cpko/cpko_main.c b/ap/os/linux/linux-3.4.x/drivers/cpko/cpko_main.c
index dffcb04..9808d24 100755
--- a/ap/os/linux/linux-3.4.x/drivers/cpko/cpko_main.c
+++ b/ap/os/linux/linux-3.4.x/drivers/cpko/cpko_main.c
@@ -75,36 +75,38 @@
(VOID*pCodecContext);extern VOID mmp_AmrEncClose(VOID*pCodecContext);extern
UINT8 zDrvEdcp_IsBusy(int EdcpNum);extern SINT32 zDrvVp_AudioDataWrite(const
VOID*pBuf,UINT32 uiLen);extern SINT32 zDrvVp_AudioDataOpen(UINT32 audioType,
-UINT32 sampleRate);extern SINT32 zDrvVp_AudioDataClose(void);typedef struct
-cpko_section{unsigned int cpko_text_start;unsigned int cpko_rodata_start;
-unsigned int __utran_modem_text_start;unsigned int __lte_modem_text_start;
-unsigned int __comm_modem_text_start;unsigned int modem_text_end;unsigned int
-cpko_data_start;unsigned int cpko_bss_start;unsigned int cpko_text_offset;}
-cpko_section_layout;cpko_section_layout cpko_ps_section;int raise(int signo){
-return(0x11fa+11-0x1205);}extern unsigned int SysEntry(void);static int
+UINT32 sampleRate);extern SINT32 zDrvVp_AudioDataClose(void);extern SINT32
+zDrvVp_GetVpLoop_Wrap(VOID);typedef struct cpko_section{unsigned int
+cpko_text_start;unsigned int cpko_rodata_start;unsigned int
+__utran_modem_text_start;unsigned int __lte_modem_text_start;unsigned int
+__comm_modem_text_start;unsigned int modem_text_end;unsigned int cpko_data_start
+;unsigned int cpko_bss_start;unsigned int cpko_text_offset;}cpko_section_layout;
+cpko_section_layout cpko_ps_section;int raise(int signo){return
+(0x588+2748-0x1044);}extern unsigned int SysEntry(void);static int
ko_Main_Thread(void*data){struct sched_param param={.sched_priority=
-MAX_USER_RT_PRIO/(0xded+2359-0x1722)-(0x11e3+270-0x12ee)};int ret=
-(0xf8b+2482-0x193d);sched_setscheduler(current,SCHED_FIFO,¶m);ret=SysEntry()
-;if(ret!=(0xbd2+2713-0x166b))panic("Main_Thread\n");param.sched_priority=
-MAX_USER_RT_PRIO-(0x755+66-0x769);sched_setscheduler(kthreadd_task,SCHED_FIFO,&
-param);return(0xfd4+33-0xff5);}int zte_modem_ko_start(void){kthread_run(
-ko_Main_Thread,NULL,"\x5a\x54\x45\x4d\x61\x69\x6e\x54\x68\x72\x65\x61\x64");
-return(0x81b+4816-0x1aeb);}static void cpko_sectioninfo_set(void){int ret;struct
- file*fp;mm_segment_t old_fs;loff_t cpko_pos=(0x2024+1215-0x24e3);struct
+MAX_USER_RT_PRIO/(0x1aa2+478-0x1c7e)-(0xb4b+3818-0x1a32)};int ret=
+(0x985+3929-0x18de);sched_setscheduler(current,SCHED_FIFO,¶m);ret=SysEntry()
+;if(ret!=(0xacd+2158-0x133b))panic("Main_Thread\n");param.sched_priority=
+MAX_USER_RT_PRIO-(0x1194+2176-0x19e6);sched_setscheduler(kthreadd_task,
+SCHED_FIFO,¶m);return(0x1e56+83-0x1ea9);}int zte_modem_ko_start(void){
+kthread_run(ko_Main_Thread,NULL,
+"\x5a\x54\x45\x4d\x61\x69\x6e\x54\x68\x72\x65\x61\x64");return
+(0x1109+1789-0x1806);}static void cpko_sectioninfo_set(void){int ret;struct file
+*fp;mm_segment_t old_fs;loff_t cpko_pos=(0x16fb+814-0x1a29);struct
cpps_globalModem globalVar;fp=filp_open(
"\x2f\x6c\x69\x62\x2f\x63\x70\x6b\x6f\x2f\x63\x70\x6b\x6f\x5f\x73\x65\x63\x69\x6e\x66\x6f\x2e\x62\x69\x6e"
-,(0x1d5+6034-0x1967),(0x5d9+3622-0x13ff));if(IS_ERR(fp)||fp==NULL)panic(
+,(0x2fa+1778-0x9ec),(0x13ec+700-0x16a8));if(IS_ERR(fp)||fp==NULL)panic(
"\x6f\x70\x65\x6e\x20\x66\x69\x6c\x65\x20\x65\x72\x72\x6f\x72" "\n");old_fs=
get_fs();set_fs(KERNEL_DS);ret=vfs_read(fp,(char*)&cpko_ps_section,sizeof(
-cpko_section_layout),&cpko_pos);if(ret<=(0x5a5+4030-0x1563))panic(
+cpko_section_layout),&cpko_pos);if(ret<=(0xd62+2480-0x1712))panic(
"\x72\x65\x61\x64\x20\x66\x69\x6c\x65\x20\x65\x72\x72\x6f\x72" "\n");filp_close(
fp,NULL);
#ifdef CONFIG_MODEM_CODE_IS_MAPPING
fp=filp_open(
"\x2f\x6c\x69\x62\x2f\x63\x70\x6b\x6f\x2f\x63\x70\x6b\x6f\x2e\x6b\x6f",
-(0x8d3+2653-0x1330),(0xab6+4007-0x1a5d));if(IS_ERR(fp)||fp==NULL)panic(
+(0x172f+1154-0x1bb1),(0xc82+3654-0x1ac8));if(IS_ERR(fp)||fp==NULL)panic(
"\x6f\x70\x65\x6e\x20\x66\x69\x6c\x65\x20\x65\x72\x72\x6f\x72" "\n");fp->f_ra.
-ra_pages=(0x4b1+8568-0x2629);
+ra_pages=(0xb17+2019-0x12fa);
#endif
if(cpko_ps_section.cpko_text_start){globalVar.cpko_text_start=(unsigned long)
cpko_ps_section.cpko_text_start;globalVar.cpko_rodata_start=(unsigned long)
@@ -124,7 +126,7 @@
vfree_modem_section(globalVar.cpko_text_start,globalVar.modem_text_end);
#endif
}else panic("\x66\x69\x6c\x65\x20\x65\x72\x72\x6f\x72" "\n");}static int
-cpko_start(void){struct cpps_callbacks callback={(0x2ec+4566-0x14c2)};callback.
+cpko_start(void){struct cpps_callbacks callback={(0x800+2644-0x1254)};callback.
zOss_ResetNVFactory=zOss_ResetNVFactory;callback.zOss_NvramFlush=zOss_NvramFlush
;callback.zOss_NvItemWrite=zOss_NvItemWrite;callback.zOss_NvItemWriteFactory=
zOss_NvItemWriteFactory;callback.zOss_NvItemRead=zOss_NvItemRead;callback.
@@ -169,17 +171,18 @@
zDrvVp_SetDtmfMute_Wrap;callback.zDrvVp_SetTxVol_Wrap=zDrvVp_SetTxVol_Wrap;
callback.zDrvVp_GetTxVol_Wrap=zDrvVp_GetTxVol_Wrap;callback.zDrvVp_GetPath_Wrap=
zDrvVp_GetPath_Wrap;callback.zDrvVp_Loop=zDrvVp_Loop;callback.
-zDrvVp_Soft_Dtmf_Loop=zDrvVp_Soft_Dtmf_Loop;callback.
-zDrvDtmf_Detect_RegCallbacks=zDrvDtmf_Detect_RegCallbacks;callback.
-zDrvVp_SetPath_Wrap=zDrvVp_SetPath_Wrap;callback.zDrvVp_GetPath_Wrap=
-zDrvVp_GetPath_Wrap;callback.halVoice_Open3G=halVoice_Open3G;callback.
-halVoice_Close3G=halVoice_Close3G;callback.zDrvVp_GetSlicFlag=zDrvVp_GetSlicFlag
-;callback.zDrvVp_SetEchoDelay_Wrap=zDrvVp_SetEchoDelay_Wrap;callback.
-zDrvVp_GetEchoDelay_Wrap=zDrvVp_GetEchoDelay_Wrap;callback.
-zDrvVp_SetTxNsMode_Wrap=zDrvVp_SetTxNsMode_Wrap;callback.zDrvVp_GetTxNsMode_Wrap
-=zDrvVp_GetTxNsMode_Wrap;callback.zDrvVp_SetRxNsMode_Wrap=
-zDrvVp_SetRxNsMode_Wrap;callback.zDrvVp_GetRxNsMode_Wrap=zDrvVp_GetRxNsMode_Wrap
-;callback.zDrvVp_SetModuleState_Wrap=zDrvVp_SetModuleState_Wrap;callback.
+zDrvVp_GetVpLoop_Wrap=zDrvVp_GetVpLoop_Wrap;callback.zDrvVp_Soft_Dtmf_Loop=
+zDrvVp_Soft_Dtmf_Loop;callback.zDrvDtmf_Detect_RegCallbacks=
+zDrvDtmf_Detect_RegCallbacks;callback.zDrvVp_SetPath_Wrap=zDrvVp_SetPath_Wrap;
+callback.zDrvVp_GetPath_Wrap=zDrvVp_GetPath_Wrap;callback.halVoice_Open3G=
+halVoice_Open3G;callback.halVoice_Close3G=halVoice_Close3G;callback.
+zDrvVp_GetSlicFlag=zDrvVp_GetSlicFlag;callback.zDrvVp_SetEchoDelay_Wrap=
+zDrvVp_SetEchoDelay_Wrap;callback.zDrvVp_GetEchoDelay_Wrap=
+zDrvVp_GetEchoDelay_Wrap;callback.zDrvVp_SetTxNsMode_Wrap=
+zDrvVp_SetTxNsMode_Wrap;callback.zDrvVp_GetTxNsMode_Wrap=zDrvVp_GetTxNsMode_Wrap
+;callback.zDrvVp_SetRxNsMode_Wrap=zDrvVp_SetRxNsMode_Wrap;callback.
+zDrvVp_GetRxNsMode_Wrap=zDrvVp_GetRxNsMode_Wrap;callback.
+zDrvVp_SetModuleState_Wrap=zDrvVp_SetModuleState_Wrap;callback.
zDrvVp_GetModuleState_Wrap=zDrvVp_GetModuleState_Wrap;callback.mmp_AmrDecOpen=
mmp_AmrDecOpen;callback.mmp_AmrEncOpen=mmp_AmrEncOpen;callback.mmp_AmrDecode=
mmp_AmrDecode;callback.mmp_AmrEncode=mmp_AmrEncode;callback.mmp_AmrDecClose=
@@ -193,5 +196,5 @@
psm_GetModemSleepFlagStatus=psm_GetModemSleepFlagStatus;
#endif
cpps_callbacks_register(&callback);cpko_sectioninfo_set();zte_modem_ko_start();
-return(0x1c55+1612-0x22a1);}static int cpko_stop(void){return(0x193+979-0x566);}
+return(0x990+7510-0x26e6);}static int cpko_stop(void){return(0xe70+451-0x1033);}
module_init(cpko_start);module_exit(cpko_stop);
diff --git a/ap/os/linux/linux-3.4.x/drivers/cpufreq/cpufreq.c b/ap/os/linux/linux-3.4.x/drivers/cpufreq/cpufreq.c
index cf864ef..811757a 100644
--- a/ap/os/linux/linux-3.4.x/drivers/cpufreq/cpufreq.c
+++ b/ap/os/linux/linux-3.4.x/drivers/cpufreq/cpufreq.c
@@ -1948,3 +1948,61 @@
return 0;
}
core_initcall(cpufreq_core_init);
+
+
+/* only for zx297520v3 */
+static int cpufreq_manual_adjust(unsigned int min_freq, unsigned int max_freq)
+{
+ unsigned int ret;
+ struct cpufreq_policy new_policy;
+ struct cpufreq_policy *data;
+
+ data = cpufreq_cpu_get(0);
+ if (!data)
+ return -EINVAL;
+
+ memcpy(&new_policy, data, sizeof(struct cpufreq_policy));
+
+ new_policy.min = min_freq;
+ new_policy.max = max_freq;
+
+ ret = __cpufreq_set_policy(data, &new_policy);
+ data->user_policy.min = data->min;
+ data->user_policy.max = data->max;
+
+ cpufreq_cpu_put(data);
+
+ return ret;
+}
+
+extern u32 zDrvTsCtrl_DfsEn(void);
+int cpufreq_performance(void)
+{
+ int ret;
+
+ if (zDrvTsCtrl_DfsEn())
+ return 0;
+
+ ret = cpufreq_manual_adjust(312000, 624000);
+ if (ret)
+ return ret;
+
+ return cpufreq_manual_adjust(624000, 624000);
+}
+
+int cpufreq_powersave(void)
+{
+ int ret;
+
+ ret = cpufreq_manual_adjust(312000, 624000);
+ if (ret)
+ return ret;
+
+ return cpufreq_manual_adjust(312000, 312000);
+}
+
+int cpufreq_normal(void)
+{
+ return cpufreq_manual_adjust(312000, 624000);
+}
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/mfd/zx234290-irq.c b/ap/os/linux/linux-3.4.x/drivers/mfd/zx234290-irq.c
index 14980d8..1d466ee 100644
--- a/ap/os/linux/linux-3.4.x/drivers/mfd/zx234290-irq.c
+++ b/ap/os/linux/linux-3.4.x/drivers/mfd/zx234290-irq.c
@@ -110,7 +110,7 @@
buck_sts &= ~buck_mask;
ldo_sts &= ~ldo_mask;
-
+ buck_sts &=~(1<<ZX234290_LDO_RSTERR_LSH);//clear rst flag
if(buck_sts )
irq_sts |= (1 << ZX234290_INT_BUCK_FAUL);
else
diff --git a/ap/os/linux/linux-3.4.x/drivers/mmc/core/core.c b/ap/os/linux/linux-3.4.x/drivers/mmc/core/core.c
index a8dd777..35c15c9 100644
--- a/ap/os/linux/linux-3.4.x/drivers/mmc/core/core.c
+++ b/ap/os/linux/linux-3.4.x/drivers/mmc/core/core.c
@@ -35,6 +35,7 @@
#include <linux/mmc/sd.h>
#include <mach/zx29_mmc.h>
#include <mach/highspeed_debug.h>
+#include "../host/zx29_mmc.h"
#include "core.h"
#include "bus.h"
@@ -44,7 +45,7 @@
#include "mmc_ops.h"
#include "sd_ops.h"
#include "sdio_ops.h"
-#include "../host/dw_mmc.h"
+//#include "../host/dw_mmc.h"
static struct workqueue_struct *workqueue;
/*
@@ -75,6 +76,10 @@
/*
* Internal function. Schedule delayed work in the MMC work queue.
*/
+extern void mmc_spin_lock(struct dw_mci *host);
+extern void mmc_spin_unlock(struct dw_mci *host);
+void mmc_enable_irq(struct mmc_host *mmc,int enable);
+
static int mmc_schedule_delayed_work(struct delayed_work *work,
unsigned long delay)
{
@@ -695,6 +700,26 @@
}
EXPORT_SYMBOL(mmc_try_claim_host);
+
+void mmc_get_host(struct mmc_host *mmc)
+{
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct dw_mci *host = slot->host;
+
+ mmc_spin_lock(host);
+ mmc_enable_irq(mmc,1);
+}
+
+void mmc_put_host(struct mmc_host *mmc)
+{
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct dw_mci *host = slot->host;
+
+ mmc_enable_irq(mmc,0);
+ mmc_spin_unlock(host);
+}
+
+
/**
* mmc_release_host - release a host
* @host: mmc host to release
@@ -2068,6 +2093,8 @@
container_of(work, struct mmc_host, detect.work);
int i;
bool extend_wakelock = false;
+ struct dw_mci_slot *slot = mmc_priv(host);
+ struct dw_mci *hw_host = slot->host;
if (host->rescan_disable)
return;
@@ -2113,6 +2140,7 @@
goto out;
mmc_claim_host(host);
+ mmc_spin_lock(hw_host);
for (i = 0; i < ARRAY_SIZE(freqs); i++) {
if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) {
extend_wakelock = true;
@@ -2121,6 +2149,7 @@
if (freqs[i] <= host->f_min)
break;
}
+ mmc_spin_unlock(hw_host);
mmc_release_host(host);
out:
diff --git a/ap/os/linux/linux-3.4.x/drivers/mmc/host/zx29_mmc-pltfm.c b/ap/os/linux/linux-3.4.x/drivers/mmc/host/zx29_mmc-pltfm.c
index b674ba8..5d3ef3c 100644
--- a/ap/os/linux/linux-3.4.x/drivers/mmc/host/zx29_mmc-pltfm.c
+++ b/ap/os/linux/linux-3.4.x/drivers/mmc/host/zx29_mmc-pltfm.c
@@ -285,6 +285,15 @@
if(rc)
BUG();
zx29_gpio_config(ZX29_GPIO_77, GPIO77_SD1_DATA3);
+#ifdef _USE_VEHICLE_DC_REF
+ rc=gpio_request(ZX29_GPIO_85,"emmc_vcc_en");
+ if(rc)
+ printk("mmc: Get emmc VCC en gpio fail\n");
+
+ zx29_gpio_config(ZX29_GPIO_85, GPIO85_GPIO85);
+ zx29_gpio_output_data(ZX29_GPIO_85,GPIO_HIGH);
+#endif
+
}
#endif
if(host->pdata->quirks & DW_MCI_QUIRK_AUTO_GATE) {
diff --git a/ap/os/linux/linux-3.4.x/drivers/mmc/host/zx29_mmc.c b/ap/os/linux/linux-3.4.x/drivers/mmc/host/zx29_mmc.c
index 9e3e781..b1a7a72 100644
--- a/ap/os/linux/linux-3.4.x/drivers/mmc/host/zx29_mmc.c
+++ b/ap/os/linux/linux-3.4.x/drivers/mmc/host/zx29_mmc.c
@@ -163,6 +163,9 @@
#endif
#define DW_DMA_DESC_TRANS_LEN (8192 -16)//(4*1024) /*define dw idma one desc can config trans-len*/
+extern void mmc_get_host(struct mmc_host * host);
+extern void mmc_put_host(struct mmc_host * host);
+
static void dw_mci_init_dma(struct dw_mci *host);
static void dw_mci_stop_dma(struct dw_mci *host);
static inline bool dw_mci_fifo_reset(struct dw_mci *host);
@@ -1884,7 +1887,9 @@
list_add_tail(&slot->queue_node, &host->queue);
}
}
-
+#ifdef _USE_VEHICLE_DC
+volatile int s_request_cnt = 0;
+#endif
static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
@@ -1897,10 +1902,25 @@
* atomic, otherwise the card could be removed in between and the
* request wouldn't fail until another card was inserted.
*/
+#ifdef _USE_VEHICLE_DC
+ if(host->host_id ==1){
+ if(s_request_cnt ==0)
+ mmc_get_host(slot->mmc);
+ s_request_cnt++;
+ }
+#endif
DW_MCI_SPIN_LOCK_BH(&host->lock);
if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
DW_MCI_SPIN_UNLOCK_BH(&host->lock);
+#ifdef _USE_VEHICLE_DC
+ if(host->host_id ==1){
+ s_request_cnt--;
+ if(s_request_cnt ==0)
+ mmc_put_host(slot->mmc);
+ }
+#endif
+
mrq->cmd->error = -ENOMEDIUM;
mmc_request_done(mmc, mrq);
return;
@@ -2223,6 +2243,12 @@
xlog_mmc_log_op(host->host_id, mrq->cmd->opcode, "REQ DONE, list empty",
mrq->cmd->error, mrq->cmd->resp[0], (mrq->stop ? mrq->stop->resp[0] : 0));
host->state = STATE_IDLE;
+#ifdef _USE_VEHICLE_DC
+ if(host->host_id ==1){
+ mmc_put_host(prev_mmc);
+ s_request_cnt = 0;
+ }
+#endif
}
spin_unlock(&host->lock);
@@ -3298,6 +3324,28 @@
#endif
return IRQ_HANDLED;
}
+void mmc_enable_irq(struct mmc_host *mmc,int enable)
+{
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+
+#ifdef _USE_VEHICLE_DC
+ if(slot->host->host_id != 1)
+ return;
+
+ mci_writel(slot->host, DBADDR, (u32)slot->host->sg_dma);
+
+ if((slot == NULL)||(slot->host==NULL)||(slot->host->irq==NULL))
+ return;
+ if(enable){
+ enable_irq(slot->host->irq);
+ }
+ else{
+ disable_irq(slot->host->irq);
+ }
+#endif
+ return;
+
+}
static void dw_mci_work_routine_card(struct work_struct *work)
{
@@ -3926,6 +3974,26 @@
EXPORT_SYMBOL_GPL(zx29_mci_enable_sdio_irq);
#endif
+
+void mmc_spin_lock(struct dw_mci *host)
+{
+
+#ifdef _USE_VEHICLE_DC
+ if(host->host_id==1)
+ soft_spin_lock(SD1_SFLOCK);
+#endif
+ return;
+}
+void mmc_spin_unlock(struct dw_mci *host)
+{
+
+#ifdef _USE_VEHICLE_DC
+ if(host->host_id==1)
+ soft_spin_unlock(SD1_SFLOCK);
+#endif
+ return;
+}
+
int dw_mci_probe(struct dw_mci *host)
{
//const struct dw_mci_drv_data *drv_data = host->drv_data;
@@ -3964,6 +4032,8 @@
spin_lock_init(&host->lock);
INIT_LIST_HEAD(&host->queue);
+
+ mmc_spin_lock(host);
/*
* Get the host data width - this assumes that HCON has been set with
@@ -4069,6 +4139,10 @@
ret = request_irq(host->irq, dw_mci_interrupt, host->irq_flags | IRQF_NO_THREAD , "dw-mci", host);
if (ret)
goto err_workqueue;
+#ifdef _USE_VEHICLE_DC
+ if(host->host_id ==1)
+ disable_irq(host->irq);
+#endif
//ret = irq_set_irq_wake(host->irq, 1);
if (host->quirks & DW_MCI_QUIRK_SDIO) {
@@ -4139,6 +4213,9 @@
mci_writel(host, UHS_REG_EXT,clk_phase);
printk("%s UHS_REG = 0x%x\n",__func__,mci_readl(host, UHS_REG_EXT));
}
+
+ mmc_spin_unlock(host);
+
return 0;
err_workqueue:
@@ -4161,7 +4238,8 @@
err_clk_biu:
if (!IS_ERR(host->biu_clk))
clk_disable_unprepare(host->biu_clk);
-
+
+ mmc_spin_unlock(host);
return ret;
}
EXPORT_SYMBOL(dw_mci_probe);
diff --git a/ap/os/linux/linux-3.4.x/drivers/mtd/nand/nand_ids.c b/ap/os/linux/linux-3.4.x/drivers/mtd/nand/nand_ids.c
index 76aa21c..ca6012e 100755
--- a/ap/os/linux/linux-3.4.x/drivers/mtd/nand/nand_ids.c
+++ b/ap/os/linux/linux-3.4.x/drivers/mtd/nand/nand_ids.c
@@ -44,7 +44,7 @@
{"NAND 16MiB 1,8V 8-bit", 0x33, 512, 16, 0x4000, 0},
{"NAND 16MiB 3,3V 8-bit", 0x73, 512, 16, 0x4000, 0},
{"NAND 16MiB 1,8V 16-bit", 0x43, 512, 16, 0x4000, NAND_BUSWIDTH_16},
- {"NAND 16MiB 3,3V 16-bit", 0x53, 512, 16, 0x4000, NAND_BUSWIDTH_16},
+ //{"NAND 16MiB 3,3V 16-bit", 0x53, 512, 16, 0x4000, NAND_BUSWIDTH_16},
//{"NAND 32MiB 1,8V 8-bit", 0x35, 512, 32, 0x4000, 0},
{"NAND 32MiB 3,3V 8-bit", 0x75, 512, 32, 0x4000, 0},
@@ -88,7 +88,7 @@
{"SPI-NAND 256MiB 1,8V", 0x25, 2048, 256, 0x20000, 0}, //MT29F2G01ABAGDWB
{"SPI-NAND 512MiB 1,8V", 0x35, 4096, 512, 0x40000, 0},
{"SPI-NAND 512MiB 1,8V", 0x45, 2048, 512, 0x20000, 0}, //GD5F4GQ6REY2G
-
+ {"SPI-NAND 512MiB 1,8V", 0x53, 4096, 512, 0x40000, 0}, //XT26Q04D-B
/*
* These are the new chips with large page size. The pagesize and the
* erasesize is determined from the extended id bytes
@@ -224,6 +224,7 @@
{NAND_MFR_HOSIN, "hosin"},
{NAND_MFR_EMST, "emst"},
{NAND_MFR_FORESEE, "foresee"},
+ {NAND_MFR_XTX, "xtx"},
{0x0, "Unknown"}
};
diff --git a/ap/os/linux/linux-3.4.x/drivers/mtd/nand/spi_nand_devices.c b/ap/os/linux/linux-3.4.x/drivers/mtd/nand/spi_nand_devices.c
index fa0c004..a2eb6b1 100755
--- a/ap/os/linux/linux-3.4.x/drivers/mtd/nand/spi_nand_devices.c
+++ b/ap/os/linux/linux-3.4.x/drivers/mtd/nand/spi_nand_devices.c
@@ -268,6 +268,22 @@
}
}
+static void get_xtx_nand_para(uint8_t device_id)
+{
+
+ switch (device_id) {
+ case 0x53:
+ main_size = 4096;
+ spare_size = 256;
+ break;
+ default:
+ printk("Spectra: Unknown xtx NAND (Device ID: 0x%x)."
+ "Will use default parameter values instead.\n",
+ device_id);
+ }
+}
+
+
void spi_nand_get_param(uint32_t maf_id, uint32_t dev_id)
{
if (maf_id == NAND_MFR_GIGADEVICE) {
@@ -310,6 +326,9 @@
else if (maf_id == NAND_MFR_MICRON) {
get_micron_nand_para(dev_id);
}
+ else if (maf_id == NAND_MFR_XTX) {
+ get_xtx_nand_para(dev_id);
+ }
else{
printk("Spectra: Unknown manufacturer (ID: 0x%x).", maf_id);
}
@@ -390,6 +409,10 @@
.oobfree = {{64,64}}
};
+static struct nand_ecclayout nand_xtx_oob_256= {
+ .eccbytes = 192,
+ .oobfree = {{2,62}}
+};
static void spi_nand_winbond_init(struct spi_nand_info *spi_nand)
{
@@ -477,6 +500,7 @@
||(g_maf_id == NAND_MFR_FORESEE)
||(g_maf_id == NAND_MFR_GIGADEVICE)
||(g_maf_id == NAND_MFR_WINBOND)
+ || (g_maf_id == NAND_MFR_XTX)
||(g_maf_id == NAND_MFR_MICRON))
return PLX4_MODE;
else
@@ -494,6 +518,7 @@
||(g_maf_id == NAND_MFR_FORESEE)
||(g_maf_id == NAND_MFR_GIGADEVICE)
||(g_maf_id == NAND_MFR_WINBOND)
+ || (g_maf_id == NAND_MFR_XTX)
||(g_maf_id == NAND_MFR_MICRON))
return RDX4_MODE;
else
@@ -622,6 +647,12 @@
}
}
break;
+ case NAND_MFR_XTX:
+ if(mtd->oobsize==256 && mtd->writesize==4096)
+ {
+ chip->ecc.layout =&nand_xtx_oob_256;
+ }
+ break;
default:
break;
}
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/psnet/psnet_io.c b/ap/os/linux/linux-3.4.x/drivers/net/psnet/psnet_io.c
index ca1e0f2..7033685 100755
--- a/ap/os/linux/linux-3.4.x/drivers/net/psnet/psnet_io.c
+++ b/ap/os/linux/linux-3.4.x/drivers/net/psnet/psnet_io.c
@@ -91,7 +91,7 @@
if(likely(g_psnet_ipv6_prefix[cid-1].flag))
return;
- if ((pkt[0] & 0xF0) == 0x60 && len >= (sizeof(struct ipv6hdr) + sizeof(struct ra_msg) + sizeof(struct nd_opt_prefix_info))){
+ if (cid > 0 && cid <= DDR_DEV_MAX && (pkt[0] & 0xF0) == 0x60 && len >= (sizeof(struct ipv6hdr) + sizeof(struct ra_msg) + sizeof(struct nd_opt_prefix_info))){
struct ipv6hdr *ip6h = (struct ipv6hdr *)pkt;
unsigned char nexthdr = ip6h->nexthdr;
unsigned char *hp = pkt + sizeof(struct ipv6hdr);
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wireless/aic8800/rwnx_main.c b/ap/os/linux/linux-3.4.x/drivers/net/wireless/aic8800/rwnx_main.c
index 6237623..09586d0 100755
--- a/ap/os/linux/linux-3.4.x/drivers/net/wireless/aic8800/rwnx_main.c
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wireless/aic8800/rwnx_main.c
@@ -5874,6 +5874,7 @@
{};
uint32_t ldpc_cfg_ram[] = {
+#if 0//def CONFIG_FPGA_VERIFICATION
0x00363638,
0x1DF8F834,
0x1DF8F834,
@@ -6209,6 +6210,343 @@
0x0213130F,
0x02131308,
0x02131308
+#else
+ 0x00767679,
+ 0x1DF8F870,
+ 0x1DF8F870,
+ 0x1DF8F870,
+ 0x1DF8F870,
+ 0x006E6E72,
+ 0x1DF8F869,
+ 0x1DF8F869,
+ 0x1DF8F869,
+ 0x1DF8F869,
+ 0x0076767B,
+ 0x1DF8F870,
+ 0x1DF8F870,
+ 0x1DF8F870,
+ 0x1DF8F870,
+ 0x007E7E85,
+ 0x1DF4F876,
+ 0x1DF4F876,
+ 0x1DF4F876,
+ 0x1DF8F876,
+ 0x0081818A,
+ 0x1DF8F87B,
+ 0x1DF8F87B,
+ 0x1DF8F87B,
+ 0x1DF8F87B,
+ 0x0081818D,
+ 0x1DF8F87B,
+ 0x1DF8F87B,
+ 0x1DF8F87B,
+ 0x1DF8F87B,
+ 0x0081818A,
+ 0x1DF8F87B,
+ 0x1DF8F87C,
+ 0x1DF8F87B,
+ 0x1DF8F87B,
+ 0x007E7E40,
+ 0x1DF8F87B,
+ 0x1DF8F87B,
+ 0x1DF8F87B,
+ 0x1DF8F87B,
+ 0x008B8B92,
+ 0x1DF8F887,
+ 0x1DF8F889,
+ 0x1DF8F887,
+ 0x1DF8F887,
+ 0x00515155,
+ 0x1DF8F84C,
+ 0x1DF8F84C,
+ 0x1DF8F889,
+ 0x1DF8F889,
+ 0x00515154,
+ 0x1DF8F84C,
+ 0x1DF8F84C,
+ 0x1DF8F888,
+ 0x1DF8F888,
+ 0x004F4F53,
+ 0x1DF8F84A,
+ 0x1DF8F84A,
+ 0x1DF8F84A,
+ 0x1DF8F84A,
+ 0x004F4F53,
+ 0x1DF8F84A,
+ 0x1DF8F84A,
+ 0x1DF8F84A,
+ 0x1DF8F84A,
+ 0x004F4F53,
+ 0x1DF8F84A,
+ 0x1DF8F84A,
+ 0x1DF8F84A,
+ 0x1DF8F84A,
+ 0x004E4E53,
+ 0x1DF8F849,
+ 0x1DF8F848,
+ 0x1DF8F848,
+ 0x1DF8F848,
+ 0x004D4D52,
+ 0x1DF8F847,
+ 0x1DF8F847,
+ 0x1DF8F847,
+ 0x1DF8F847,
+ 0x004F4F55,
+ 0x1DF8F84B,
+ 0x1DF8F84A,
+ 0x1DF8F84A,
+ 0x1DF8F84A,
+ 0x004E4E53,
+ 0x1DF8F849,
+ 0x1DF8F848,
+ 0x1DF8F848,
+ 0x1DF8F848,
+ 0x0049494D,
+ 0x1DF8F844,
+ 0x1DF8F844,
+ 0x1DF8F844,
+ 0x1DF8F844,
+ 0x0051518F,
+ 0x1DF8F849,
+ 0x1DF8F848,
+ 0x1DF8F848,
+ 0x1DF8F848,
+ 0x00424277,
+ 0x1DF8F83F,
+ 0x1DF8F83C,
+ 0x1DF8F83C,
+ 0x1DF8F83C,
+ 0x00424275,
+ 0x1DF8F89E,
+ 0x1DF8F83C,
+ 0x1DF8F83C,
+ 0x1DF8F83C,
+ 0x0055555C,
+ 0x1DF8F84C,
+ 0x1DF8F84C,
+ 0x1DF8F84C,
+ 0x1DF8F84C,
+ 0x0053535C,
+ 0x1DF8F84C,
+ 0x1DF8F84B,
+ 0x1DF8F84B,
+ 0x1DF8F84B,
+ 0x00F8F89E,
+ 0x1DF8F88C,
+ 0x1DF8F84A,
+ 0x1DF8F84A,
+ 0x1DF8F84A,
+ 0x00898940,
+ 0x18F8F846,
+ 0x18CFF845,
+ 0x18CFF844,
+ 0x18CFF844,
+ 0x0056565F,
+ 0x1DF8F84F,
+ 0x1DF8F84F,
+ 0x1DF8F84F,
+ 0x1DF8F84F,
+ 0x0055555E,
+ 0x1DF8F84E,
+ 0x1DF8F84E,
+ 0x1DF8F84E,
+ 0x1DF8F84E,
+ 0x0056565F,
+ 0x1DF8F84F,
+ 0x1DF8F84F,
+ 0x1DF8F84F,
+ 0x1DF8F84F,
+ 0x00555561,
+ 0x1DF8F850,
+ 0x1DF8F84E,
+ 0x1DF8F84E,
+ 0x1DF8F84E,
+ 0x0053535F,
+ 0x1DF8F84D,
+ 0x1DF8F84C,
+ 0x1DF8F84C,
+ 0x1DF8F84C,
+ 0x0055555F,
+ 0x1DF8F84F,
+ 0x1DF8F84E,
+ 0x1DF8F84E,
+ 0x1DF8F84E,
+ 0x005555AA,
+ 0x1DF8F854,
+ 0x1DF8F84E,
+ 0x1DF8F84E,
+ 0x1DF8F84E,
+ 0x005959A6,
+ 0x1DF8F84D,
+ 0x1DF8F84C,
+ 0x1DF8F84C,
+ 0x1DF8F84C,
+ 0x004F4F9B,
+ 0x1DF8F84E,
+ 0x1DF8F846,
+ 0x1DF8F846,
+ 0x1DF8F846,
+ 0x00F8F8A5,
+ 0x1DF8F894,
+ 0x1DF8F84C,
+ 0x1DF8F84C,
+ 0x1DF8F84C,
+ 0x009898A4,
+ 0x1DF8F84D,
+ 0x1DF8F84C,
+ 0x1DF8F84C,
+ 0x1DF8F84C,
+ 0x00464686,
+ 0x1DF8F8B3,
+ 0x1DF8F83D,
+ 0x1DF8F83D,
+ 0x1DF8F83D,
+ 0x008E8E40,
+ 0x1AF8F848,
+ 0x1ADFF848,
+ 0x1ADFF846,
+ 0x1ADFF846,
+ 0x007F7F40,
+ 0x18D2D275,
+ 0x18D2D23A,
+ 0x18D2D23A,
+ 0x18D2D239,
+ 0x00454540,
+ 0x0F868664,
+ 0x0F86863E,
+ 0x0F86863D,
+ 0x0F86863D,
+ 0x005C5C64,
+ 0x1DF8F856,
+ 0x1DF8F855,
+ 0x1DF8F855,
+ 0x1DF8F855,
+ 0x005B5B68,
+ 0x1DF8F858,
+ 0x1DF8F855,
+ 0x1DF8F855,
+ 0x1DF8F855,
+ 0x005A5A64,
+ 0x1DF8F855,
+ 0x1DF8F854,
+ 0x1DF8F854,
+ 0x1DF8F854,
+ 0x005A5AB5,
+ 0x1DF8F85B,
+ 0x1DF8F855,
+ 0x1DF8F854,
+ 0x1DF8F854,
+ 0x00F8F8B0,
+ 0x1DF8F8A3,
+ 0x1DF8F852,
+ 0x1DF8F852,
+ 0x1DF8F852,
+ 0x00A4A4AE,
+ 0x1DF8F854,
+ 0x1DF8F852,
+ 0x1DF8F852,
+ 0x1DF8F852,
+ 0x009A9A40,
+ 0x1DF8F84E,
+ 0x1DF8F84D,
+ 0x1DF8F84C,
+ 0x1DF8F84C,
+ 0x009C9C40,
+ 0x1DF8F895,
+ 0x1DF8F849,
+ 0x1DF8F84A,
+ 0x1DF8F84A,
+ 0x00494940,
+ 0x1197976F,
+ 0x11979742,
+ 0x11979741,
+ 0x11979741,
+ 0x006E6E74,
+ 0x1DF8F869,
+ 0x1DF8F869,
+ 0x1DF8F869,
+ 0x1DF8F869,
+ 0x006E6E40,
+ 0x1ADEF869,
+ 0x1ADEF869,
+ 0x1ADEF869,
+ 0x1ADEF869,
+ 0x00757540,
+ 0x0D78F86E,
+ 0x0D78F86E,
+ 0x0D78F86E,
+ 0x0D79F86E,
+ 0x00787885,
+ 0x1DF8F873,
+ 0x1DF8F873,
+ 0x1DF8F873,
+ 0x1DF8F873,
+ 0x00787840,
+ 0x1DF8F873,
+ 0x1DF8F873,
+ 0x1DF8F873,
+ 0x1DF8F873,
+ 0x00787840,
+ 0x0E81F873,
+ 0x0E81F873,
+ 0x0E81F873,
+ 0x0E82F873,
+ 0x00404040,
+ 0x0E82F873,
+ 0x0E82F873,
+ 0x0E82F873,
+ 0x0E82F873,
+ 0x00818140,
+ 0x1092F87E,
+ 0x1092F87E,
+ 0x1092F87E,
+ 0x1092F87E,
+ 0x00404040,
+ 0x1092F87E,
+ 0x1092F87E,
+ 0x1092F87E,
+ 0x1092F87E,
+ 0x00737340,
+ 0x14B2B26B,
+ 0x14B2B235,
+ 0x14B2B235,
+ 0x14B2B235,
+ 0x00404040,
+ 0x0E828260,
+ 0x0E82823D,
+ 0x0E82823C,
+ 0x0E82823C,
+ 0x00404040,
+ 0x0F8B8B66,
+ 0x0F8B8B3F,
+ 0x0F8B8B3D,
+ 0x0F8B8B3D,
+ 0x00404040,
+ 0x0B68683D,
+ 0x0B68681E,
+ 0x0B68681E,
+ 0x0B68681E,
+ 0x00222240,
+ 0x06434318,
+ 0x06434329,
+ 0x06434318,
+ 0x06434318,
+ 0x00404040,
+ 0x129D9D72,
+ 0x129D9D43,
+ 0x129D9D41,
+ 0x129D9D41,
+ 0x00404040,
+ 0x0D757542,
+ 0x0D757520,
+ 0x0D757520,
+ 0x0D757520,
+ 0x00232340,
+ 0x084C4C19,
+ 0x084C4C2C,
+ 0x084C4C19,
+ 0x084C4C19
+#endif
};
uint32_t agc_cfg_ram[] = {
@@ -6825,54 +7163,55 @@
0x20c0cbbb,
0x20c0cbd2,
#else
- 0x00ffc772,
- 0x00ffc780,
- 0x00ffc872,
- 0x00ffc880,
- 0x00ffc970,
- 0x00ffc980,
- 0x00ffc990,
- 0x00ffca80,
- 0x00ffca9a,
- 0x00ffcb90,
- 0x00ffcc95,
- 0x00ffce80,
- 0x00ffcf80,
- 0x00ffcf80,
- 0x00ffcf80,
- 0x00ffcf80,
- 0x00ffc05b,
- 0x00ffc066,
- 0x00ffc070,
- 0x00ffc080,
- 0x00ffc175,
- 0x00ffc185,
- 0x00ffc272,
- 0x00ffc280,
- 0x00ffc290,
- 0x00ffc380,
- 0x00ffc472,
- 0x00ffc483,
- 0x00ffc572,
- 0x00ffc580,
- 0x00ffc590,
- 0x00ffc680,
+ //11b
+ 0x00ffd780,
+ 0x00ffd872,
+ 0x00ffd880,
+ 0x00ffd972,
+ 0x00ffd980,
+ 0x00ffda75,
+ 0x00ffda86,
+ 0x00ffdb77,
+ 0x00ffdb86,
+ 0x00ffdc78,
+ 0x00ffdc89,
+ 0x00ffdd79,
+ 0x00ffdd89,
+ 0x00ffde83,
+ 0x00ffdf79,
+ 0x00ffdf8b,
+ 0x00ffd072,
+ 0x00ffd072,
+ 0x00ffd080,
+ 0x00ffd172,
+ 0x00ffd180,
+ 0x00ffd272,
+ 0x00ffd280,
+ 0x00ffd36d,
+ 0x00ffd379,
+ 0x00ffd46d,
+ 0x00ffd479,
+ 0x00ffd572,
+ 0x00ffd580,
+ 0x00ffd672,
+ 0x00ffd680,
+ 0x00ffd772,
0x00ffc87d,
0x00ffc88b,
0x00ffc979,
0x00ffc989,
0x00ffca7d,
- 0x00ffca8d,
- 0x00ffcb7a,
- 0x00ffcb8a,
- 0x00ffcc7d,
- 0x00ffcc8d,
- 0x00ffcd79,
- 0x00ffcd89,
- 0x00ffce7d,
- 0x00ffce8d,
- 0x00ffcf80,
- 0x00ffcf99,
+ 0x00ffca88,
+ 0x00ffcc5e,
+ 0x00ffcc69,
+ 0x00ffcc78,
+ 0x00ffcc85,
+ 0x00ffcd70,
+ 0x00ffcd80,
+ 0x00ffce70,
+ 0x00ffce80,
+ 0x00ffcf7d,
+ 0x00ffcf90,
0x00ffc080,
0x00ffc090,
0x00ffc180,
@@ -6880,9 +7219,9 @@
0x00ffc27b,
0x00ffc28b,
0x00ffc37b,
- 0x00ffc38b,
- 0x00ffc480,
- 0x00ffc490,
+ 0x00ffc390,
+ 0x00ffc485,
+ 0x00ffc495,
0x00ffc579,
0x00ffc589,
0x00ffc679,
@@ -6894,17 +7233,17 @@
0x00ffc979,
0x00ffc989,
0x00ffca7d,
- 0x00ffca8d,
- 0x00ffcb7a,
- 0x00ffcb8a,
- 0x00ffcc7d,
- 0x00ffcc8d,
- 0x00ffcd79,
- 0x00ffcd89,
- 0x00ffce7d,
- 0x00ffce8d,
- 0x00ffcf80,
- 0x00ffcf99,
+ 0x00ffca88,
+ 0x00ffcc5e,
+ 0x00ffcc69,
+ 0x00ffcc78,
+ 0x00ffcc85,
+ 0x00ffcd70,
+ 0x00ffcd80,
+ 0x00ffce70,
+ 0x00ffce80,
+ 0x00ffcf7d,
+ 0x00ffcf90,
0x00ffc080,
0x00ffc090,
0x00ffc180,
@@ -6912,19 +7251,19 @@
0x00ffc27b,
0x00ffc28b,
0x00ffc37b,
- 0x00ffc38b,
- 0x00ffc480,
- 0x00ffc490,
+ 0x00ffc390,
+ 0x00ffc485,
+ 0x00ffc495,
0x00ffc579,
0x00ffc589,
0x00ffc679,
0x00ffc689,
0x00ffc780,
- 0x00ffc790
+ 0x00ffc790,
#endif
};
-uint32_t txgain_table[32] =
+u32 wifi_txgain_table_24g_8800dcdw[32] =
{
0xA4B22189,
0x00007825,
@@ -6960,7 +7299,42 @@
0x00004832
};
-uint32_t rxgain_table_24g_20m[64] = {
+u32 wifi_txgain_table_24g_1_8800dcdw[32] =
+{
+ 0x090E2011, //index 0
+ 0x00004001,
+ 0x090E2015, //index 1
+ 0x00004001,
+ 0x090E201B, //index 2
+ 0x00004001,
+ 0x110E2018, //index 3
+ 0x00004001,
+ 0x110E201E, //index 4
+ 0x00004001,
+ 0x110E2023, //index 5
+ 0x00004001,
+ 0x190E2021, //index 6
+ 0x00004001,
+ 0x190E202B, //index 7
+ 0x00004001,
+ 0x210E202B, //index 8
+ 0x00004001,
+ 0x230E2027, //index 9
+ 0x00004001,
+ 0x230E2031, //index 10
+ 0x00004001,
+ 0x240E2039, //index 11
+ 0x00004001,
+ 0x260E2039, //index 12
+ 0x00004001,
+ 0x2E0E203F, //index 13
+ 0x00004001,
+ 0x368E203F, //index 14
+ 0x00004001,
+ 0x3EF2203F, //index 15
+ 0x00004001
+};
+u32 wifi_rxgain_table_24g_20m_8800dcdw[64] = {
0x82f282d1,
0x9591a324,
0x80808419,
@@ -6993,28 +7367,28 @@
0x9595a324,
0x80808419,
0x000000f0,
- 0x06f282d2,
- 0x95911124,
+ 0x02f282d2,//index 8
+ 0x95951124,
0x80808419,
0x000000f0,
- 0x06f282f4,
- 0x95911124,
+ 0x02f282f4,//index 9
+ 0x95951124,
0x80808419,
0x000000f0,
- 0x06f282e6,
- 0x9591a324,
- 0x80808419,
- 0x000000f0,
- 0x06f282e6,
+ 0x02f282e6,//index 10
0x9595a324,
0x80808419,
0x000000f0,
- 0x06f282e6,
+ 0x02f282e6,//index 11
0x9599a324,
0x80808419,
0x000000f0,
- 0x06f282e6,
- 0x959b5924,
+ 0x02f282e6,//index 12
+ 0x959da324,
+ 0x80808419,
+ 0x000000f0,
+ 0x02f282e6,//index 13
+ 0x959f5924,
0x80808419,
0x000000f0,
0x06f282e6,
@@ -7022,14 +7396,14 @@
0x80808419,
0x000000f0,
0x0ef29ae6,
- 0x959f5924,
+ 0x959f592c,//////0x959f5924, //loft [35:34]=3
0x80808419,
0x000000f0
};
-uint32_t rxgain_table_24g_40m[64] = {
+u32 wifi_rxgain_table_24g_40m_8800dcdw[64] = {
0x83428151,
0x9631a328,
0x80808419,
@@ -7062,28 +7436,28 @@
0x9635a328,
0x80808419,
0x000000f0,
- 0x07429952,
- 0x96311128,
+ 0x03428152,//index 8
+ 0x96351128,
0x80808419,
0x000000f0,
- 0x07429974,
- 0x96311128,
+ 0x03428174,//index 9
+ 0x96351128,
0x80808419,
0x000000f0,
- 0x07429966,
- 0x9631a328,
- 0x80808419,
- 0x000000f0,
- 0x07429966,
+ 0x03428166,//index 10
0x9635a328,
0x80808419,
0x000000f0,
- 0x07429966,
+ 0x03428166,//index 11
0x9639a328,
0x80808419,
0x000000f0,
- 0x07429966,
- 0x963b5928,
+ 0x03428166,//index 12
+ 0x963da328,
+ 0x80808419,
+ 0x000000f0,
+ 0x03428166,//index 13
+ 0x963f5928,
0x80808419,
0x000000f0,
0x07429966,
@@ -7124,7 +7498,7 @@
{0x0098, 0x445},
{0x009c, 0x5e332},
#endif
- //{0x0090, 0x0013FC00}, //rx_ringbuf_start2
+ {0x0090, 0x0013FC00}, //rx_ringbuf_start2
#endif
#ifdef CONFIG_USB_TX_AGGR
{0x00E8, 0x03021714}, //usb fc params(rx msg fc recover, rx msg fc trigger, wifi fc recover, wifi fc trigger)
@@ -7709,13 +8083,15 @@
if (testmode == 0) {
- if ((ret = rwnx_send_rf_config_req(rwnx_hw, 0, 1, (u8_l *)txgain_table, 128)))
+ if ((ret = rwnx_send_rf_config_req(rwnx_hw, 0, 1, (u8_l *)wifi_txgain_table_24g_8800dcdw, 128)))
goto err_lmac_reqs;
- if ((ret = rwnx_send_rf_config_req(rwnx_hw, 0, 0, (u8_l *)rxgain_table_24g_20m, 256)))
+ if ((ret = rwnx_send_rf_config_req(rwnx_hw, 16, 1, (u8_l *)wifi_txgain_table_24g_1_8800dcdw, 128)))
+ goto err_lmac_reqs;
+ if ((ret = rwnx_send_rf_config_req(rwnx_hw, 0, 0, (u8_l *)wifi_rxgain_table_24g_20m_8800dcdw, 256)))
goto err_lmac_reqs;
- if ((ret = rwnx_send_rf_config_req(rwnx_hw, 32, 0, (u8_l *)rxgain_table_24g_40m, 256)))
+ if ((ret = rwnx_send_rf_config_req(rwnx_hw, 32, 0, (u8_l *)wifi_rxgain_table_24g_40m_8800dcdw, 256)))
goto err_lmac_reqs;
if ((ret = rwnx_send_rf_calib_req(rwnx_hw, &cfm))) {
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wireless/rtl8192cd_92es/8192cd_sme.c b/ap/os/linux/linux-3.4.x/drivers/net/wireless/rtl8192cd_92es/8192cd_sme.c
index 0c22647..510e9d3 100755
--- a/ap/os/linux/linux-3.4.x/drivers/net/wireless/rtl8192cd_92es/8192cd_sme.c
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wireless/rtl8192cd_92es/8192cd_sme.c
@@ -11560,6 +11560,15 @@
#ifdef CONFIG_POWER_SAVE
rtw_lock_suspend_timeout(priv, 5000);
#endif
+
+ if (priv->pmib->dot1180211AuthEntry.dot11EnablePSK != 0) {
+ if (OPMODE & WIFI_AP_STATE) {
+ //Reset 4-WAY STATE for some phones' connection issue
+ if (pstat && pstat->wpa_sta_info)
+ pstat->wpa_sta_info->state = PSK_STATE_IDLE;
+ }
+ }
+
txinsn.retry = priv->pmib->dot11OperationEntry.dot11ShortRetryLimit;
pmib= GET_MIB(priv);
@@ -21277,7 +21286,7 @@
#endif
#endif
- DEBUG_INFO("auth alg=%x, seq=%X\n", algorithm, seq);
+ printk(KERN_ERR"[%s,%d][%s] auth alg=%x, seq=%X\n", __FUNCTION__, __LINE__, priv->dev->name, algorithm, seq);
if (privacy == CONFIG_AUTH_WEP_AUTO &&
priv->pmib->dot1180211AuthEntry.dot11PrivacyAlgrthm != _WEP_40_PRIVACY_ &&
@@ -21421,7 +21430,7 @@
#endif
// allocate a new one
- DEBUG_INFO("going to alloc stainfo for sa=%02X%02X%02X%02X%02X%02X\n", sa[0],sa[1],sa[2],sa[3],sa[4],sa[5]);
+ printk(KERN_ERR"[%s,%d] going to alloc stainfo for sa=%02X%02X%02X%02X%02X%02X\n", __FUNCTION__, __LINE__, sa[0],sa[1],sa[2],sa[3],sa[4],sa[5]);
pstat = alloc_stainfo(priv, sa, -1);
if (pstat == NULL)
@@ -21434,10 +21443,11 @@
pstat->auth_seq = 0; // clear in alloc_stainfo;nctu note
pstat->tpcache_mgt = GetTupleCache(pframe);
}
-#ifdef CONFIG_IEEE80211W
+#if 0//def CONFIG_IEEE80211W
else if (pstat->isPMF)
{
pstat->auth_seq = seq + 1;
+ printk(KERN_ERR"[%s,%d][%s] auth_seq[%d] for 11w \n", __FUNCTION__, __LINE__, priv->dev->name, pstat->auth_seq);
#ifdef INCLUDE_WPA_PSK
if (timer_pending(&pstat->wpa_sta_info->resendTimer))
del_timer(&pstat->wpa_sta_info->resendTimer);
@@ -21447,6 +21457,7 @@
#endif
else
{ // close exist connection.;nctu note
+ printk(KERN_ERR"[%s,%d][%s] close exist connection. \n", __FUNCTION__, __LINE__, priv->dev->name);
if (asoc_list_del(priv, pstat))
{
#if defined(CONFIG_RTK_MESH) && defined(MESH_BOOTSEQ_AUTH)
@@ -21468,9 +21479,11 @@
if (pstat->expire_to > 0)
{
+ printk(KERN_ERR"[%s,%d][%s] sta of exist del. \n", __FUNCTION__, __LINE__, priv->dev->name);
cnt_assoc_num(priv, pstat, DECREASE, (char *)__FUNCTION__);
check_sta_characteristic(priv, pstat, DECREASE);
}
+ printk(KERN_ERR"[%s,%d][%s] ---- \n", __FUNCTION__, __LINE__, priv->dev->name);
}
if (seq==1) {
#ifdef SUPPORT_TX_MCAST2UNI
@@ -21785,6 +21798,9 @@
else
#endif
pstat->auth_seq = seq + 1;
+
+ printk(KERN_ERR"[%s,%d][%s] auth_seq[%d] for original \n", __FUNCTION__, __LINE__, priv->dev->name, pstat->auth_seq);
+
#if defined(CONFIG_RTK_MESH) && defined(MESH_BOOTSEQ_AUTH)
if ((FALSE == isMeshMP) || ((1 == seq) && (TRUE == isMeshMP)))
#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-cpufreq.c b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-cpufreq.c
index 0434bc3..d31584c 100644
--- a/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-cpufreq.c
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-cpufreq.c
@@ -108,7 +108,7 @@
mutex_lock(&cpufreq_lock);
- if((pm_get_mask_info()&PM_NO_CPU_FREQ) || cpu_dfs_is_not_allowed||zDrvTsCtrl_DfsEn())
+ if((pm_get_mask_info()&PM_NO_CPU_FREQ) /*|| cpu_dfs_is_not_allowed||zDrvTsCtrl_DfsEn()*/)
{
ret = -EAGAIN;
goto out;
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx29-cpufreq.c b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx29-cpufreq.c
index 27f947f..3eabcc0 100644
--- a/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx29-cpufreq.c
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx29-cpufreq.c
@@ -470,6 +470,10 @@
#endif
}
+int cpufreq_performance(void);
+int cpufreq_powersave(void);
+int cpufreq_normal(void);
+
/**
* set freq according to index of freq_table.
*
@@ -479,12 +483,44 @@
{
int ret = 0;
+#if 0
if(!freq_change_enabled_by_startup)
return -1;
if(old_index == new_index)
return ret;
+ ret = clk_set_rate(cpu_clk, zx29_freq_table[new_index].frequency * 1000);
+ if (ret)
+ pm_printk("[CPUFREQ] Failed to set rate %dkHz: ret = %d\n", zx29_freq_table[new_index].frequency, ret);
+
+ pm_printk("[CPUFREQ] set cpufreq:old index:%d new index:%d \n", old_index, new_index);
+// printk("[CPUFREQ] set cpufreq:old index:%d new index:%d current_axi_freq(%d)\n", old_index, new_index,get_cur_axi());
+ debug_cpu_clk_info();
+ trace_freq_change(old_index,new_index);
+
+#ifdef CONFIG_AXI_FREQ
+ mutex_lock(&axifreq_lock);
+ set_axi_frequency_by_cpu(zx29_freq_table[new_index].frequency);
+ mutex_unlock(&axifreq_lock);
+#endif
+
+#endif
+
+ return ret;
+}
+
+int zx29_set_frequency_new(unsigned int old_index,
+ unsigned int new_index)
+{
+ int ret = 0;
+
+ if(!freq_change_enabled_by_startup)
+ return -1;
+/*
+ if(old_index == new_index)
+ return ret;
+*/
ret = clk_set_rate(cpu_clk, zx29_freq_table[new_index].frequency * 1000);
if (ret)
pm_printk("[CPUFREQ] Failed to set rate %dkHz: ret = %d\n", zx29_freq_table[new_index].frequency, ret);
@@ -515,40 +551,11 @@
int zx_set_frequency(unsigned int freq)
{
- int ret = 0;
- unsigned int new_index;
-
- if(pm_get_mask_info()&PM_NO_CPU_FREQ)
- return 0;
-
-
- mutex_lock(&cpufreq_lock);
-
if(freq==624000000) {
- new_index =L0;
- cpu_dfs_is_not_allowed=1;
- //cpufreq_level = zx29_get_frequency();
+ return cpufreq_performance();
} else{
- new_index =L1;
- cpu_dfs_is_not_allowed=0;
+ return cpufreq_normal();
}
-
- ret = clk_set_rate(cpu_clk, zx29_freq_table[new_index].frequency * 1000);
- if (ret)
- pm_printk("[CPUFREQ] Failed to set rate %dkHz: ret = %d\n", zx29_freq_table[new_index].frequency, ret);
-
- pm_printk("[CPUFREQ] zx_set_frequency:new index:%d \n", new_index);
- debug_cpu_clk_info();
-
- mutex_unlock(&cpufreq_lock);
-
-#ifdef CONFIG_AXI_FREQ
- mutex_lock(&axifreq_lock);
- set_axi_frequency_by_cpu(zx29_freq_table[new_index].frequency);
- mutex_unlock(&axifreq_lock);
-#endif
-
- return ret;
}
EXPORT_SYMBOL(zx_set_frequency);
@@ -575,7 +582,7 @@
info->cpu_clk = cpu_clk;
info->volt_table = zx29_volt_table;
info->freq_table = zx29_freq_table;
- info->set_freq = zx29_set_frequency;
+ info->set_freq = zx29_set_frequency_new;
cpufreq_driver_inited = 1;
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/rpm/at_io.c b/ap/os/linux/linux-3.4.x/drivers/soc/zte/rpm/at_io.c
index 13f1884..8678a6a 100755
--- a/ap/os/linux/linux-3.4.x/drivers/soc/zte/rpm/at_io.c
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/rpm/at_io.c
@@ -442,7 +442,7 @@
}
if (chID > CHANNEL_NUM || chID == 0)
{
- printk("err chid=%d cmd=%s!\n",ch_ID, data);
+ //printk("err chid=%d cmd=%s!\n",ch_ID, data);
return ATIO_SUCCESS;
}
//µ±Ç°ÎªATͨµÀµÄÊý¾Ý
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/tsc/zx-tsc-strategy.c b/ap/os/linux/linux-3.4.x/drivers/soc/zte/tsc/zx-tsc-strategy.c
index 5d74c58..e44578a 100644
--- a/ap/os/linux/linux-3.4.x/drivers/soc/zte/tsc/zx-tsc-strategy.c
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/tsc/zx-tsc-strategy.c
@@ -163,24 +163,24 @@
else if(temperature>=TsNvData.Threshods[probe_num].THROSHOLD_5)
{
tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_UPTRANSIMITPOWER2, BIT_PROBE_ADC1 ,STRTEGY_START);
- tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2, BIT_PROBE_ADC1 ,STRTEGY_START);
+// tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2, BIT_PROBE_ADC1 ,STRTEGY_START);
tsctrl_set_strategy2Iram(TSCTRL_PS_RATE, BIT_PROBE_ADC1, STRTEGY_START);
tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_DOWNRATE2, BIT_PROBE_ADC1, STRTEGY_START);
- tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADC1, STRTEGY_START);
+// tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADC1, STRTEGY_START);
tsctrl_set_strategy2Iram(TSCTRL_DFS, BIT_PROBE_ADC1, STRTEGY_START);//zDrvPow_SetArmPsCoreFreq(CLK312M);
}
else if(temperature>=TsNvData.Threshods[probe_num].THROSHOLD_3)
{
tsctrl_set_strategy2Iram(TSCTRL_PS_RATE, BIT_PROBE_ADC1, STRTEGY_START);
tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_DOWNRATE2, BIT_PROBE_ADC1, STRTEGY_START);
- tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADC1, STRTEGY_START);
+// tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADC1, STRTEGY_START);
tsctrl_set_strategy2Iram(TSCTRL_DFS, BIT_PROBE_ADC1, STRTEGY_START);//zDrvPow_SetArmPsCoreFreq(CLK312M);
}
else if(temperature>=TsNvData.Threshods[probe_num].THROSHOLD_1)
{
tsctrl_set_strategy2Iram(TSCTRL_PS_RATE, BIT_PROBE_ADC1, STRTEGY_START);
tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_DOWNRATE2, BIT_PROBE_ADC1, STRTEGY_START);
- tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADC1, STRTEGY_START);
+// tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADC1, STRTEGY_START);
}
else if(temperature>=TsNvData.Threshods[probe_num].THROSHOLD_0)
{
@@ -196,10 +196,10 @@
tsctrl_set_strategy2Iram(TSCTRL_SHUTDOWN,BIT_PROBE_ADC1,STRTEGY_STOP);
tsctrl_set_strategy2Iram(TSCTRL_PS_ANYRESIDENT, BIT_PROBE_ADC1, STRTEGY_STOP);
tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_UPTRANSIMITPOWER2, BIT_PROBE_ADC1, STRTEGY_STOP);
- tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2, BIT_PROBE_ADC1, STRTEGY_STOP);
+// tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2, BIT_PROBE_ADC1, STRTEGY_STOP);
tsctrl_set_strategy2Iram(TSCTRL_PS_RATE, BIT_PROBE_ADC1, STRTEGY_STOP);
tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_DOWNRATE2, BIT_PROBE_ADC1, STRTEGY_STOP);
- tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADC1, STRTEGY_STOP);
+// tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADC1, STRTEGY_STOP);
tsctrl_set_strategy2Iram(TSCTRL_DFS, BIT_PROBE_ADC1, STRTEGY_STOP);// zDrvPow_SetArmPsCoreFreq(CLK624M);
}
else if(temperature<TsNvData.Threshods[probe_num].THROSHOLD_2)
@@ -207,7 +207,7 @@
tsctrl_set_strategy2Iram(TSCTRL_SHUTDOWN,BIT_PROBE_ADC1,STRTEGY_STOP);
tsctrl_set_strategy2Iram(TSCTRL_PS_ANYRESIDENT, BIT_PROBE_ADC1, STRTEGY_STOP);
tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_UPTRANSIMITPOWER2 ,BIT_PROBE_ADC1, STRTEGY_STOP);
- tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2 ,BIT_PROBE_ADC1, STRTEGY_STOP);
+// tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2 ,BIT_PROBE_ADC1, STRTEGY_STOP);
tsctrl_set_strategy2Iram(TSCTRL_DFS, BIT_PROBE_ADC1, STRTEGY_STOP);// zDrvPow_SetArmPsCoreFreq(CLK624M);
}
else if(temperature<TsNvData.Threshods[probe_num].THROSHOLD_4)
@@ -216,7 +216,7 @@
tsctrl_set_strategy2Iram(TSCTRL_SHUTDOWN,BIT_PROBE_ADC1,STRTEGY_STOP);
tsctrl_set_strategy2Iram(TSCTRL_PS_ANYRESIDENT,BIT_PROBE_ADC1,STRTEGY_STOP);
tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_UPTRANSIMITPOWER2,BIT_PROBE_ADC1,STRTEGY_STOP);
- tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2,BIT_PROBE_ADC1,STRTEGY_STOP);
+// tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2,BIT_PROBE_ADC1,STRTEGY_STOP);
}
else if(temperature<TsNvData.Threshods[probe_num].THROSHOLD_6)
{
@@ -272,24 +272,24 @@
else if(temperature>=TsNvData.Threshods[probe_num].THROSHOLD_5)
{
tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_UPTRANSIMITPOWER2, BIT_PROBE_ADC2 ,STRTEGY_START);
- tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2, BIT_PROBE_ADC2 ,STRTEGY_START);
+// tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2, BIT_PROBE_ADC2 ,STRTEGY_START);
tsctrl_set_strategy2Iram(TSCTRL_PS_RATE, BIT_PROBE_ADC2, STRTEGY_START);
tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_DOWNRATE2, BIT_PROBE_ADC2, STRTEGY_START);
- tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADC2, STRTEGY_START);
+// tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADC2, STRTEGY_START);
tsctrl_set_strategy2Iram(TSCTRL_DFS, BIT_PROBE_ADC2, STRTEGY_START);//zDrvPow_SetArmPsCoreFreq(CLK312M);
}
else if(temperature>=TsNvData.Threshods[probe_num].THROSHOLD_3)
{
tsctrl_set_strategy2Iram(TSCTRL_PS_RATE, BIT_PROBE_ADC2, STRTEGY_START);
tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_DOWNRATE2, BIT_PROBE_ADC2, STRTEGY_START);
- tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADC2, STRTEGY_START);
+// tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADC2, STRTEGY_START);
tsctrl_set_strategy2Iram(TSCTRL_DFS, BIT_PROBE_ADC2, STRTEGY_START);//zDrvPow_SetArmPsCoreFreq(CLK312M);
}
else if(temperature>=TsNvData.Threshods[probe_num].THROSHOLD_1)
{
tsctrl_set_strategy2Iram(TSCTRL_PS_RATE, BIT_PROBE_ADC2, STRTEGY_START);
tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_DOWNRATE2, BIT_PROBE_ADC2, STRTEGY_START);
- tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADC2, STRTEGY_START);
+// tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADC2, STRTEGY_START);
}
else if(temperature>=TsNvData.Threshods[probe_num].THROSHOLD_0)
{
@@ -306,10 +306,10 @@
tsctrl_set_strategy2Iram(TSCTRL_SHUTDOWN,BIT_PROBE_ADC2,STRTEGY_STOP);
tsctrl_set_strategy2Iram(TSCTRL_PS_ANYRESIDENT, BIT_PROBE_ADC2, STRTEGY_STOP);
tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_UPTRANSIMITPOWER2, BIT_PROBE_ADC2, STRTEGY_STOP);
- tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2, BIT_PROBE_ADC2, STRTEGY_STOP);
+// tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2, BIT_PROBE_ADC2, STRTEGY_STOP);
tsctrl_set_strategy2Iram(TSCTRL_PS_RATE, BIT_PROBE_ADC2, STRTEGY_STOP);
tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_DOWNRATE2, BIT_PROBE_ADC2, STRTEGY_STOP);
- tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADC2, STRTEGY_STOP);
+// tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADC2, STRTEGY_STOP);
tsctrl_set_strategy2Iram(TSCTRL_DFS, BIT_PROBE_ADC2, STRTEGY_STOP);// zDrvPow_SetArmPsCoreFreq(CLK624M);
}
else if(temperature<TsNvData.Threshods[probe_num].THROSHOLD_2)
@@ -317,7 +317,7 @@
tsctrl_set_strategy2Iram(TSCTRL_SHUTDOWN,BIT_PROBE_ADC2,STRTEGY_STOP);
tsctrl_set_strategy2Iram(TSCTRL_PS_ANYRESIDENT, BIT_PROBE_ADC2, STRTEGY_STOP);
tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_UPTRANSIMITPOWER2 ,BIT_PROBE_ADC2, STRTEGY_STOP);
- tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2 ,BIT_PROBE_ADC2, STRTEGY_STOP);
+// tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2 ,BIT_PROBE_ADC2, STRTEGY_STOP);
tsctrl_set_strategy2Iram(TSCTRL_DFS, BIT_PROBE_ADC2, STRTEGY_STOP);// zDrvPow_SetArmPsCoreFreq(CLK624M);
}
else if(temperature<TsNvData.Threshods[probe_num].THROSHOLD_4)
@@ -325,7 +325,7 @@
tsctrl_set_strategy2Iram(TSCTRL_SHUTDOWN,BIT_PROBE_ADC2,STRTEGY_STOP);
tsctrl_set_strategy2Iram(TSCTRL_PS_ANYRESIDENT,BIT_PROBE_ADC2,STRTEGY_STOP);
tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_UPTRANSIMITPOWER2,BIT_PROBE_ADC2,STRTEGY_STOP);
- tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2,BIT_PROBE_ADC2,STRTEGY_STOP);
+// tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2,BIT_PROBE_ADC2,STRTEGY_STOP);
}
else if(temperature<TsNvData.Threshods[probe_num].THROSHOLD_6)
{
@@ -381,24 +381,24 @@
else if(temperature>=TsNvData.Threshods[probe_num].THROSHOLD_5)
{
tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_UPTRANSIMITPOWER2, BIT_PROBE_ADCRF ,STRTEGY_START);
- tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2, BIT_PROBE_ADCRF ,STRTEGY_START);
+// tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2, BIT_PROBE_ADCRF ,STRTEGY_START);
tsctrl_set_strategy2Iram(TSCTRL_PS_RATE, BIT_PROBE_ADCRF, STRTEGY_START);
tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_DOWNRATE2, BIT_PROBE_ADCRF, STRTEGY_START);
- tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADCRF, STRTEGY_START);
+// tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADCRF, STRTEGY_START);
tsctrl_set_strategy2Iram(TSCTRL_DFS, BIT_PROBE_ADCRF, STRTEGY_START);//zDrvPow_SetArmPsCoreFreq(CLK312M);
}
else if(temperature>=TsNvData.Threshods[probe_num].THROSHOLD_3)
{
tsctrl_set_strategy2Iram(TSCTRL_PS_RATE, BIT_PROBE_ADCRF, STRTEGY_START);
tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_DOWNRATE2, BIT_PROBE_ADCRF, STRTEGY_START);
- tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADCRF, STRTEGY_START);
+// tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADCRF, STRTEGY_START);
tsctrl_set_strategy2Iram(TSCTRL_DFS, BIT_PROBE_ADCRF, STRTEGY_START);//zDrvPow_SetArmPsCoreFreq(CLK312M);
}
else if(temperature>=TsNvData.Threshods[probe_num].THROSHOLD_1)
{
tsctrl_set_strategy2Iram(TSCTRL_PS_RATE, BIT_PROBE_ADCRF, STRTEGY_START);
tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_DOWNRATE2, BIT_PROBE_ADCRF, STRTEGY_START);
- tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADCRF, STRTEGY_START);
+// tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADCRF, STRTEGY_START);
}
else if(temperature>=TsNvData.Threshods[probe_num].THROSHOLD_0)
{
@@ -415,10 +415,10 @@
tsctrl_set_strategy2Iram(TSCTRL_SHUTDOWN,BIT_PROBE_ADCRF,STRTEGY_STOP);
tsctrl_set_strategy2Iram(TSCTRL_PS_ANYRESIDENT, BIT_PROBE_ADCRF, STRTEGY_STOP);
tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_UPTRANSIMITPOWER2, BIT_PROBE_ADCRF, STRTEGY_STOP);
- tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2, BIT_PROBE_ADCRF, STRTEGY_STOP);
+// tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2, BIT_PROBE_ADCRF, STRTEGY_STOP);
tsctrl_set_strategy2Iram(TSCTRL_PS_RATE, BIT_PROBE_ADCRF, STRTEGY_STOP);
tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_DOWNRATE2, BIT_PROBE_ADCRF, STRTEGY_STOP);
- tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADCRF, STRTEGY_STOP);
+// tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADCRF, STRTEGY_STOP);
tsctrl_set_strategy2Iram(TSCTRL_DFS, BIT_PROBE_ADCRF, STRTEGY_STOP);// zDrvPow_SetArmPsCoreFreq(CLK624M);
}
else if(temperature<TsNvData.Threshods[probe_num].THROSHOLD_2)
@@ -426,7 +426,7 @@
tsctrl_set_strategy2Iram(TSCTRL_SHUTDOWN,BIT_PROBE_ADCRF,STRTEGY_STOP);
tsctrl_set_strategy2Iram(TSCTRL_PS_ANYRESIDENT, BIT_PROBE_ADCRF, STRTEGY_STOP);
tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_UPTRANSIMITPOWER2 ,BIT_PROBE_ADCRF, STRTEGY_STOP);
- tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2 ,BIT_PROBE_ADCRF, STRTEGY_STOP);
+// tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2 ,BIT_PROBE_ADCRF, STRTEGY_STOP);
tsctrl_set_strategy2Iram(TSCTRL_DFS, BIT_PROBE_ADCRF, STRTEGY_STOP);// zDrvPow_SetArmPsCoreFreq(CLK624M);
}
else if(temperature<TsNvData.Threshods[probe_num].THROSHOLD_4)
@@ -434,7 +434,7 @@
tsctrl_set_strategy2Iram(TSCTRL_SHUTDOWN,BIT_PROBE_ADCRF,STRTEGY_STOP);
tsctrl_set_strategy2Iram(TSCTRL_PS_ANYRESIDENT,BIT_PROBE_ADCRF,STRTEGY_STOP);
tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_UPTRANSIMITPOWER2,BIT_PROBE_ADCRF,STRTEGY_STOP);
- tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2,BIT_PROBE_ADCRF,STRTEGY_STOP);
+// tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2,BIT_PROBE_ADCRF,STRTEGY_STOP);
}
else if(temperature<TsNvData.Threshods[probe_num].THROSHOLD_6)
{
@@ -526,7 +526,6 @@
}
-
/*******************************************************************************
* Function: tsc_RefStrategyDispatch
* Description:
@@ -539,6 +538,37 @@
* Others: //not use
********************************************************************************/
+static void tsctrl_print_temp(void)
+{
+ if(g_adc1_flag==1){
+ sc_debug_info_record(MODULE_ID_AP_TSC, "temp1:%d\n", zx_read_reg(TSCTRL_TEMPADC1) );
+ printk( "temp1:%d\n", zx_read_reg(TSCTRL_TEMPADC1));
+ }
+ if(g_adc2_flag==1){
+ sc_debug_info_record(MODULE_ID_AP_TSC, "temp2:%d\n", zx_read_reg(TSCTRL_TEMPADC2) );
+ printk( "temp2:%d\n", zx_read_reg(TSCTRL_TEMPADC2));
+ }
+ if(g_adc3_flag==1){
+ sc_debug_info_record(MODULE_ID_AP_TSC, "tempRf:%d\n", zx_read_reg(TSCTRL_TEMPADCRF) );
+ printk( "tempRf:%d\n", zx_read_reg(TSCTRL_TEMPADCRF));
+ }
+}
+/*******************************************************************************
+ * Function: tsctrl_set_strategy
+ * Description:
+ * Parameters:
+ * Input:
+ *
+
+ * Output: N/A
+ * Returns: N/A
+
+ * Others: //not use
+********************************************************************************/
+int cpufreq_performance(void);
+int cpufreq_powersave(void);
+int cpufreq_normal(void);
+
static void tsctrl_set_strategy(void)
{
u32 i=0;
@@ -562,6 +592,7 @@
if(any_resident_flag){
tsctrl_callback_dispatch(PS_STRATEGY_ANYRESIDENT,false); /*È¥ÈÎÒâפÁô,Ò²¾ÍÊÇÕý³£×¤Áô*/
tsc_set_reg_bits(TSCTRL_PS,BIT_PS_ANYRESIDENT,BITS_FOR_PSIRAM,false);
+ tsctrl_print_temp();
sc_debug_info_record(MODULE_ID_AP_TSC, "AnyResident stop\n");
tsc_print_log("AnyResident stop!\n");
any_resident_flag=0;
@@ -570,6 +601,7 @@
if(!any_resident_flag){
tsctrl_callback_dispatch(PS_STRATEGY_ANYRESIDENT,true);/*ÈÎÒâפÁô*/
tsc_set_reg_bits(TSCTRL_PS,BIT_PS_ANYRESIDENT,BITS_FOR_PSIRAM,true);
+ tsctrl_print_temp();
sc_debug_info_record(MODULE_ID_AP_TSC, "AnyResident start!\n");
tsc_print_log("AnyResident start!\n");
any_resident_flag=1;
@@ -610,6 +642,7 @@
if(ps_rate_flag!=0){
tsctrl_callback_dispatch(PS_STRATEGY_RATE,STRTEGY_STOP);//STOP
tsc_set_reg_bits(TSCTRL_PS,BIT_PS_RATE,BITS_FOR_PSIRAM,STRTEGY_STOP);
+ tsctrl_print_temp();
sc_debug_info_record(MODULE_ID_AP_TSC, "ps modem rate limit stop!\n");
tsc_print_log("ps modem rate limit stop!\n");
ps_rate_flag=0;
@@ -620,6 +653,7 @@
if(ps_rate_flag!=1){
tsctrl_callback_dispatch(PS_STRATEGY_RATE,STRTEGY_START);//START
tsc_set_reg_bits(TSCTRL_PS,BIT_PS_RATE,BITS_FOR_PSIRAM,STRTEGY_START);
+ tsctrl_print_temp();
sc_debug_info_record(MODULE_ID_AP_TSC, "ps modem rate limit start!\n");
tsc_print_log("ps modem rate limit start!\n");
ps_rate_flag=1;
@@ -628,6 +662,7 @@
if(ps_rate_flag!=2){
tsctrl_callback_dispatch(PS_STRATEGY_RATE,STRTEGY_HOLD);//HOLD
tsc_set_reg_bits(TSCTRL_PS,BIT_PS_RATE,BITS_FOR_PSIRAM,STRTEGY_HOLD);
+ tsctrl_print_temp();
sc_debug_info_record(MODULE_ID_AP_TSC, "ps modem rate limit hold!\n");
tsc_print_log("ps modem rate limit hold!\n");
ps_rate_flag=2;
@@ -639,6 +674,7 @@
if(tsc_read_reg(TSCTRL_LIMIT_LTE_DOWNRATE1+i*0x4)==0){
if(g_phy_Strategy[i].flag !=0) {
tsc_set_reg_bits(TSCTRL_PHY,(BIT_LIMIT_LTE_DOWNRATE1+i),BITS_FOR_PHYIRAM,STRTEGY_STOP);
+ tsctrl_print_temp();
sc_debug_info_record(MODULE_ID_AP_TSC, "%s stop\n", g_phy_Strategy[i].name );
tsc_print_log("%s stop\n", g_phy_Strategy[i].name);
g_phy_Strategy[i].flag=0;
@@ -647,6 +683,7 @@
if(g_phy_Strategy[i].flag!=1) {
tsc_set_reg_bits(TSCTRL_PHY,(BIT_LIMIT_LTE_DOWNRATE1+i),BITS_FOR_PHYIRAM,STRTEGY_START);
+ tsctrl_print_temp();
sc_debug_info_record(MODULE_ID_AP_TSC, "%s start\n", g_phy_Strategy[i].name );
tsc_print_log("%s start\n", g_phy_Strategy[i].name);
g_phy_Strategy[i].flag=1;
@@ -662,6 +699,7 @@
if(tsc_read_reg(TSCTRL_LIMIT_LTE_DOWNRATE1+i*0x4)==0){
if(g_phy_Strategy[i].flag !=0) {
tsc_set_reg_bits(TSCTRL_PHY,(BIT_LIMIT_LTE_DOWNRATE1+i),BITS_FOR_PHYIRAM,STRTEGY_STOP);
+ tsctrl_print_temp();
sc_debug_info_record(MODULE_ID_AP_TSC, "%s stop\n", g_phy_Strategy[i].name );
tsc_print_log("%s stop\n", g_phy_Strategy[i].name);
g_phy_Strategy[i].flag=0;
@@ -669,6 +707,7 @@
}else{
if(g_phy_Strategy[i].flag !=1) {
tsc_set_reg_bits(TSCTRL_PHY,(BIT_LIMIT_LTE_DOWNRATE1+i),BITS_FOR_PHYIRAM,STRTEGY_START);
+ tsctrl_print_temp();
sc_debug_info_record(MODULE_ID_AP_TSC, "%s start\n", g_phy_Strategy[i].name );
tsc_print_log("%s start\n", g_phy_Strategy[i].name);
g_phy_Strategy[i].flag=1;
@@ -687,10 +726,12 @@
if(tsc_read_reg(TSCTRL_DFS)==0){
if(ps_freq_flag){
//tsc_print_log("CPU_FREQ0:zx_getspeed(0)=%d\n",zx_getspeed(0));
- if(zx_getspeed(0) != 624000 )
- zx29_set_frequency(1,0); //zDrvPow_SetArmPsCoreFreq(CLK624M);
+// if(zx_getspeed(0) != 624000 )
+// zx29_set_frequency(1,0); //zDrvPow_SetArmPsCoreFreq(CLK624M);
+ cpufreq_normal();
tsc_set_reg_bits(TSCTRL_PS,BIT_PS_FREQ, BITS_FOR_PSIRAM,STRTEGY_STOP);
+ tsctrl_print_temp();
sc_debug_info_record(MODULE_ID_AP_TSC, "ps freq 624M start\n" );
tsc_print_log("ps freq 624M start\n");
ps_freq_flag=0;
@@ -698,8 +739,10 @@
}else{
if(!ps_freq_flag){
//tsc_print_log("CPU_FREQ1:zx_getspeed(0)=%d\n",zx_getspeed(0));
- zx29_set_frequency(0,1); //zDrvPow_SetArmPsCoreFreq(CLK312M);
+ //zx29_set_frequency(0,1); //zDrvPow_SetArmPsCoreFreq(CLK312M);
+ cpufreq_powersave();
tsc_set_reg_bits(TSCTRL_PS,BIT_PS_FREQ, BITS_FOR_PSIRAM,STRTEGY_START);
+ tsctrl_print_temp();
sc_debug_info_record(MODULE_ID_AP_TSC, "ps freq 312M start\n" );
tsc_print_log("ps freq 312M start\n");
ps_freq_flag=1;
diff --git a/ap/os/linux/linux-3.4.x/drivers/tty/serial/zx29_uart.c b/ap/os/linux/linux-3.4.x/drivers/tty/serial/zx29_uart.c
index efe89a3..eed941f 100755
--- a/ap/os/linux/linux-3.4.x/drivers/tty/serial/zx29_uart.c
+++ b/ap/os/linux/linux-3.4.x/drivers/tty/serial/zx29_uart.c
@@ -136,6 +136,7 @@
unsigned char uart_port_autobaud_gtflag = 0 ;
unsigned char uart_port_autobaud_suflag = 0 ;
unsigned char g_console_open_flag = 1;
+int g_uart_overrun = 0;
unsigned char UART_AT_send_ok[UART_AT_SENDOK_NUM] =
@@ -1448,6 +1449,22 @@
struct zx29_sgbuf *sgbuf = zup->curr_sg;
size_t pending;
+ uint32_t ris_status;
+ ris_status = UART_GET_RIS(&zup->port);
+ if(ris_status & (UART_OEIS | UART_BEIS | UART_PEIS | UART_FEIS)){
+ if(ris_status & UART_OEIS){
+ zup->port.icount.overrun++;
+ g_uart_overrun = 4;
+ test_uart_static(NULL, 0, 20, zup->port.line);
+ }
+ if(ris_status & UART_BEIS)
+ zup->port.icount.brk++;
+ if(ris_status & UART_PEIS)
+ zup->port.icount.parity++;
+ if(ris_status & UART_FEIS)
+ zup->port.icount.frame++;
+ UART_PUT_ICR(&zup->port, (UART_OEIS | UART_BEIS | UART_PEIS | UART_FEIS));
+ }
dma_peripheral_id rx_id = uart_get_rx_dma_peripheral_id(zup);
zx29_dma_stop(rx_id);
@@ -2092,6 +2109,8 @@
if(ris_status & (UART_OEIS | UART_BEIS | UART_PEIS | UART_FEIS)){
if(ris_status & UART_OEIS){
zup->port.icount.overrun++;
+ g_uart_overrun = 1;
+ test_uart_static(NULL, 0, 19,zup->port.line);
//if(!uart_console(&zup->port))
//BUG_ON(1);
}
@@ -2110,9 +2129,17 @@
zx29_dma_stop(rx_id);
zup->dmarx.running = false;
zup->dmarx.used = false;
- int i;
+
+ tmp_len = sgbuf->sg.length - zx29_dma_get_transfer_num(rx_id);
+ if(tmp_len != pending){
+ pending = tmp_len;
+ }
+ dmarx->use_buf_b = !dmarx->use_buf_b;
+ wmb();
+ if(zup->uart_power_mode){
+ int i;
for(i= 0;i < 3;i++){
fr = UART_GET_FR(&zup->port);
if((fr & UART_FR_RXFE) == 0){
@@ -2129,16 +2156,9 @@
}
//zup->sg2tty = sgbuf;
- tmp_len = sgbuf->sg.length - zx29_dma_get_transfer_num(rx_id);
- if(tmp_len != pending){
- pending = tmp_len;
- }
- dmarx->use_buf_b = !dmarx->use_buf_b;
- wmb();
//when app ctrl sleep ,always start dma receive
- if(zup->uart_power_mode){
if(zup->sleep_state == 0){
//now start dma again
if (zx29_dma_rx_trigger_dma(zup)) {
@@ -2153,7 +2173,7 @@
}
if(pending || (i > 0)){
- test_uart_static(zup->port.line, NULL, 0, 13);
+ test_uart_static(NULL, 0, 13,zup->port.line);
zx29_uart_deal_dma_fifo_rx_chars(zup, pending, sgbuf, &flags, g_fifo_residue_buf,i);
}
}
@@ -2231,11 +2251,12 @@
struct uart_port *port = dev_id;
struct zx29_uart_port *zup = container_of(port, struct zx29_uart_port, port);
unsigned long flags;
- unsigned int status, pass_counter = 256;
+ unsigned int status,ris, pass_counter = 256;
int handled = 0;
raw_spin_lock_irqsave(&zup->port.lock, flags);
status = UART_GET_MIS(port) & zup->imr;
+ ris = UART_GET_RIS(port);
if (status) {
do {
UART_PUT_ICR(port,(status & ~(UART_TXIS|UART_RTIS|UART_RXIS)));
@@ -2248,6 +2269,11 @@
#endif
if (status & (UART_RXIS)){
#if CONFIG_SERIAL_ZX29_DMA
+ if(ris & UART_OEIS){
+ zup->port.icount.overrun++;
+ g_uart_overrun = 8;
+ test_uart_static(NULL, 0, 21, zup->port.line);
+ }
if (zx29_dma_rx_used(zup)){
UART_PUT_ICR(port,UART_RXIS);
if(!(zup->imr & UART_RTIM)){
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/dwc_otg/dwc_otg_chg_identify.c b/ap/os/linux/linux-3.4.x/drivers/usb/dwc_otg/dwc_otg_chg_identify.c
index 6d887bf..a771aa7 100644
--- a/ap/os/linux/linux-3.4.x/drivers/usb/dwc_otg/dwc_otg_chg_identify.c
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/dwc_otg/dwc_otg_chg_identify.c
@@ -1043,11 +1043,18 @@
/*GPIOºÍÍⲿÖжϺŸù¾ÝÏîĿʵ¼ÊÇé¿öÐÞ¸Ä
*´Ë´¦Îª²Î¿¼´úÂë
*/
+ #ifdef _USE_VEHICLE_DC_REF
+#define USB_GPIO ZX29_GPIO_54
+#define USB_GPIO_FUNC_GPIO GPIO54_GPIO54
+#define USB_GPIO_FUNC_EXT_INT GPIO54_EXT_INT7
+#define USB_DT_INT PCU_EX7_INT
+
+ #else
#define USB_GPIO ZX29_GPIO_52
#define USB_GPIO_FUNC_GPIO GPIO52_GPIO52
#define USB_GPIO_FUNC_EXT_INT GPIO52_EXT_INT5
#define USB_DT_INT PCU_EX5_INT
-
+#endif
int Usb_Detect_Val(void)
{
int value;
@@ -1085,10 +1092,33 @@
//5.23
zx29_gpio_config(USB_GPIO,USB_GPIO_FUNC_GPIO);
gpio_direction_input(USB_GPIO);
- msleep(5);
+ msleep(500);
value = gpio_get_value(USB_GPIO);
- printk("%s,value:%d\n", __func__,value);
+ printk("%s,value:%d, usb_plugin:%d\n", __func__,value, usb_plugin);
zx29_gpio_config(USB_GPIO,USB_GPIO_FUNC_EXT_INT);
+
+#ifdef _USE_VEHICLE_DC_REF
+ if(value == 1)
+ {
+
+ zx29_gpio_set_inttype(USB_GPIO,IRQ_TYPE_EDGE_FALLING);
+ pcu_int_clear(USB_DT_INT);
+ if(usb_plugin == 0){
+ dwc_otg_usb_chg_detect(); //plug in;
+ usb_plugin = 1;
+ }
+ }
+ else
+ {
+ zx29_gpio_set_inttype(USB_GPIO,IRQ_TYPE_EDGE_RISING);
+ pcu_int_clear(USB_DT_INT);
+ if(usb_plugin == 1){
+ dwc_otg_disconnect();
+ dwc_otg_usb_chg_remove(); //not plug in;
+ usb_plugin = 0;
+ }
+ }
+#else
if(value == 1)
{
@@ -1109,7 +1139,8 @@
usb_plugin = 1;
}
}
- printk(KERN_INFO"%s,value:%d,end\n", __func__,value);
+#endif
+ printk(KERN_INFO"%s,value:%d,usb_plugin:%d, end\n", __func__,value, usb_plugin);
USBSTACK_DBG("%s,value:%d", __func__,value);
return IRQ_HANDLED;
}
@@ -1127,6 +1158,7 @@
int value2 = 0;
printk("-----------Usb_Detect_Irq_probe\n");
+
dwc_chg_Regcallback(usb_detect_typedet);
ret = gpio_request(USB_GPIO, "usb");
@@ -1141,7 +1173,20 @@
printk(KERN_INFO "%s,value:%d, irq_num:%d\n",__func__,value, usb_detect_irq);
zx29_gpio_config(USB_GPIO,USB_GPIO_FUNC_EXT_INT);
-#if 1
+ #ifdef _USE_VEHICLE_DC_REF
+ if(value == 1)
+ {
+ dwc_otg_usb_chg_detect(); //plug in;
+ usb_plugin = 1;
+ zx29_gpio_set_inttype(USB_GPIO,IRQ_TYPE_EDGE_FALLING);
+ }
+ else
+ {
+ usb_plugin = 0;
+ zx29_gpio_set_inttype(USB_GPIO,IRQ_TYPE_EDGE_RISING);
+ }
+ #else
+
if(value == 1)
{
usb_plugin = 0;
@@ -1158,7 +1203,7 @@
ret = request_threaded_irq(usb_detect_irq, Usb_Detect_Irq_Handler,Usb_Detect_Irq_Thread,IRQF_ONESHOT,
"usb", _dev);
- printk(KERN_INFO "%s,ret:%d\n",__func__,ret);
+ printk(KERN_INFO "%s,ret:%d, usb_plugin:%d\n",__func__,ret, usb_plugin);
if (ret)
{
printk(KERN_INFO"cannot request Usb_Detect_Irq\n");
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/android.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/android.c
index 4a2e122..a5fa3db 100755
--- a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/android.c
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/android.c
@@ -3106,12 +3106,16 @@
t_resp->cmd = USB_RPMSG_GET_USB_SPEED;
struct usb_gadget *gadget = cdev->gadget;
- if(NULL == cdev)
- {
+ if(NULL == cdev)
+ {
printk("android_set_rpmsg_resp, gadget is NULL\n");
sprintf(t_resp->param, "%s\n", "invalid state");
- return ;
- }
+ return ;
+ }
+
+ if(!_android_dev->enabled) {
+ gadget->speed = 0;
+ }
sprintf(t_resp->param, "%s\n", usb_speed_string(gadget->speed));
break;
@@ -3121,10 +3125,10 @@
spin_lock_irqsave(&cdev->lock, flags);
if (cdev->config)
sprintf(t_resp->param, "%s\n", "CONFIGURED");
- else if (_android_dev->connected)
- sprintf(t_resp->param, "%s\n", "CONNECTED");
+ else if (!_android_dev->enabled)
+ sprintf(t_resp->param, "%s\n", "DISCONNECTED");
else
- sprintf(t_resp->param, "%s\n", "unknown state");
+ sprintf(t_resp->param, "%s\n", "CONNECTED");
spin_unlock_irqrestore(&cdev->lock, flags);
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/u_serial.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/u_serial.c
index b2ab988..461af0a 100644
--- a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/u_serial.c
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/u_serial.c
@@ -509,11 +509,10 @@
}while(port->port_usb->suspend_state==1);
spin_lock(&port->port_lock);
}
+#endif
}
//if (port->port_usb&&port->port_usb->suspend_state == 0)
-#else
- }else
-#endif
+
#endif
{
#ifdef CONFIG_PM
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/usb_netlink.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/usb_netlink.c
index 8805fa9..7e53a63 100755
--- a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/usb_netlink.c
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/usb_netlink.c
@@ -26,9 +26,7 @@
int zDrvNand_WriteBootflag( int flag );
#endif
extern int detected_charger(void);
-//xf.li@20230614 add for adb offline start
int get_usb_enum_mode(void);
-//xf.li@20230614 add for adb offline end
#ifdef _USE_VEHICLE_DC
extern int usb_server_init(void);
@@ -523,6 +521,7 @@
{
usb_notify_up(USB_RAMDUMP_TRIGGER, NULL);
}
+
ssize_t kobj_usb_show(struct kobject *kobject,struct attribute *attr,char *buf)
{
int dc=0;
@@ -566,9 +565,7 @@
}else if(!strcmp(attr->name, USB_ADB_AGENT)){
#ifdef _USE_VEHICLE_DC
adb_agent_state = adb_rpmsg_agent_state();
-//xf.li@20230614 add for adb offline start
- sprintf(buf, "%u, %s\n",adb_agent_state,( (adb_agent_state == 0) ? "AP" : "CAP"));
-//xf.li@20230614 add for adb offline end
+ sprintf(buf, "%u, %s\n",adb_agent_state,( (adb_agent_state == 0) ? "AP" : "CAP"));
#endif
}
@@ -579,9 +576,7 @@
ssize_t kobj_usb_store(struct kobject *kobject,struct attribute *attr, const char *buf,size_t size)
{
unsigned int value = 0;
-//xf.li@20230614 add for adb offline start
int cur_usbmode = 0;
-//xf.li@20230614 add for adb offline end
value = simple_strtoul(buf, NULL, 10);
if(!strcmp(attr->name,CHARGER_PLUG_NAME)){
@@ -619,19 +614,17 @@
}else if(!strcmp(attr->name,USB_GPIO_DETECT_ENABLE)){
usb_gpio_detect_enable =value;
}else if(!strcmp(attr->name,USB_ADB_AGENT)){
- adb_agent_state =value;
-//xf.li@20230614 add for adb offline start
#ifdef _USE_VEHICLE_DC
cur_usbmode = get_usb_enum_mode();
adb_agent_state =value;
if(cur_usbmode == 0){
adb_enable_rpmsg_agent(adb_agent_state);
+
}else{
printk("---none adb, switch is forbidern\n");
}
-//xf.li@20230614 add for adb offline end
#endif
- }
+ }
return size;
}
@@ -966,14 +959,13 @@
EXPORT_SYMBOL_GPL(usb_dbg_showLog);
#ifdef _USE_VEHICLE_DC
-//xf.li@20230614 add for adb offline start
int usb_get_adb_agent(void)
{
return adb_agent_state;
}
-
EXPORT_SYMBOL_GPL(usb_get_adb_agent);
-//xf.li@20230614 add for adb offline end
+
+
void usb_set_rpmsg_resp(int type, char*resp)
{
@@ -1001,10 +993,9 @@
EXPORT_SYMBOL_GPL(usb_set_rpmsg_resp);
void usb_parse_cap_notify(int type)
-{
-//xf.li@20230614 add for adb offline start
+{
int cur_usbmode = 0;
-//xf.li@20230614 add for adb offline end
+
if(type >= USB_RPMSG_NOTIFY_MAX){
printk("usb_parse_cap_notify fail, invalid type:%d\n", type);
return ;
@@ -1070,8 +1061,7 @@
schedule_work(&switch_usbmode);
break;
-
-//xf.li@20230614 add for adb offline start
+
case USB_RPMSG_FORCE_RNDIS:
case USB_RPMSG_FORCE_ECM:
cur_usbmode = get_usb_enum_mode();
@@ -1083,12 +1073,13 @@
printk("cur_usbmode: %s\n", ( (cur_usbmode == 0) ? "debug": "user"));
if(cur_usbmode == 0){
switch_mode = USB_SWITCH_DEBUG;
+
}else if( cur_usbmode == 1){
switch_mode = USB_SWITCH_USER;
+
}
schedule_work(&switch_usbmode);
- break;
-//xf.li@20230614 add for adb offline end
+ break;
case USB_RPMSG_OPEN_DL:
usb_record_dbginfo(USB_OPEN_DL, 0, 0);
#ifndef CONFIG_SYSTEM_RECOVERY
diff --git a/ap/os/linux/linux-3.4.x/drivers/watchdog/zx_soft_wdt.c b/ap/os/linux/linux-3.4.x/drivers/watchdog/zx_soft_wdt.c
index de12136..ebf9ea2 100644
--- a/ap/os/linux/linux-3.4.x/drivers/watchdog/zx_soft_wdt.c
+++ b/ap/os/linux/linux-3.4.x/drivers/watchdog/zx_soft_wdt.c
@@ -16,6 +16,7 @@
#include <linux/cp_types.h>
#include "NvParam_drv.h"
#include "pub_debug_info.h"
+#include <linux/uaccess.h>
#define WDT_DEFAULT (30)
#define WDT_INT_TIME (5)
@@ -382,7 +383,8 @@
unsigned int ret = 0;
unsigned int temp;
-
+ bool flag;
+
struct soft_wdt_file_private *priv = file->private_data;
switch(cmd)
@@ -393,53 +395,87 @@
printk(KERN_ERR"[zx soft wdt]: wrong internal val (val must >= %d)!\n", WDT_SLEEP_TIME);
return -ENXIO;
}
- priv->interval = arg;
+ ret = copy_from_user(&temp, (unsigned int*)arg, sizeof(unsigned int));
+ if(ret)
+ printk("%s: copy user failed\n",__func__);
+
+ priv->interval = temp;
+ //priv->interval = arg;
+ printk("priv->interval :%d\n", priv->interval );
+
break;
case ZX_WDT_SET_WAKEUP:
- priv->wakeup = (bool)arg;
+ ret = copy_from_user(&flag, (bool*)arg, sizeof(bool));
+ if(ret)
+ printk("%s: copy user failed\n",__func__);
+ priv->wakeup = flag;
break;
case ZX_WDT_FEED_DOG:
priv->handle_timeout_cnt = 0;
priv->handle_timeout = priv->interval + zx_wdt_get_global_cnt();
+ printk("feed priv->handle_timeout :%d\n", priv->handle_timeout );
+
break;
case ZX_WDT_SET_AP_SWITCH:
- zx_wdt_enbale((bool)arg);
+ ret = copy_from_user(&flag, (bool*)arg, sizeof(bool));
+ if(ret)
+ printk("%s: copy user failed\n",__func__);
+ zx_wdt_enbale(flag);
break;
case ZX_WDT_GET_HANDLE_TIMEOUT:
temp = priv->handle_timeout;
- *(unsigned int *)arg = temp;
+ ret = copy_to_user((void *)arg, &temp, sizeof(unsigned int));
+ if(ret)
+ printk("%s: copy user failed\n",__func__);
+ //*(unsigned int *)arg = temp;
+ printk("get priv->handle_timeout :%d\n", temp);
+
break;
case ZX_WDT_GET_GLOBAL_CNT:
temp = zx_wdt_get_global_cnt();
- *(unsigned int *)arg = temp;
+ ret = copy_to_user((void *)arg, &temp, sizeof(unsigned int));
+ if(ret)
+ printk("%s: copy user failed\n",__func__);
+ printk("priv->global_cnt :%d\n", temp );
+
break;
case ZX_WDT_GET_AP_TIMEOUT:
temp = zx_wdt_get_time_out();
- *(unsigned int *)arg = temp;
+ ret = copy_to_user((void *)arg, &temp, sizeof(unsigned int));
+ if(ret)
+ printk("%s: copy user failed\n",__func__);
break;
case ZX_WDT_SET_NV:
#ifdef CONFIG_PREEMPT_RT_FULL
#ifndef CONFIG_ARCH_ZX297520V3_CAP
- ret = zx_wdt_set_nv((bool)arg);
+ ret = copy_from_user(&flag, (bool*)arg, sizeof(bool));
+ if(ret)
+ printk("%s: copy user failed\n",__func__);
+ ret = zx_wdt_set_nv(flag);
#endif
#endif
break;
case ZX_WDT_GET_NV:
#ifdef CONFIG_PREEMPT_RT_FULL
#ifndef CONFIG_ARCH_ZX297520V3_CAP
- temp = zx_wdt_get_wdtnv_for_ctrm();
- *(bool *)arg = (bool)temp;
+ flag= zx_wdt_get_wdtnv_for_ctrm();
+ ret = copy_to_user((void *)arg, &flag, sizeof(bool));
+ if(ret)
+ printk("%s: copy user failed\n",__func__);
#endif
#endif
break;
case ZX_WDT_SET_CHECK:
- priv->is_check = (bool)arg;
+ ret = copy_from_user(&flag, (bool*)arg, sizeof(bool));
+ if(ret)
+ printk("%s: copy user failed\n",__func__);
+ priv->is_check = flag;
break;
default:
diff --git a/ap/os/linux/linux-3.4.x/include/linux/ipc.h b/ap/os/linux/linux-3.4.x/include/linux/ipc.h
index 30e8161..6042f8f 100644
--- a/ap/os/linux/linux-3.4.x/include/linux/ipc.h
+++ b/ap/os/linux/linux-3.4.x/include/linux/ipc.h
@@ -96,6 +96,9 @@
umode_t mode;
unsigned long seq;
void *security;
+#ifdef CONFIG_SYSVIPC_CROSS_SHM
+ bool rpmflag; /*shm remote flag*/
+#endif
};
#endif /* __KERNEL__ */
diff --git a/ap/os/linux/linux-3.4.x/include/linux/module.h b/ap/os/linux/linux-3.4.x/include/linux/module.h
index 04380b0..de4f6c9 100755
--- a/ap/os/linux/linux-3.4.x/include/linux/module.h
+++ b/ap/os/linux/linux-3.4.x/include/linux/module.h
@@ -733,6 +733,7 @@
int (*zDrvVp_GetPath_Wrap)(void);
int (*zDrvVp_Loop)(int);
+ int (*zDrvVp_GetVpLoop_Wrap)(void);
int (*zDrvVp_Soft_Dtmf_Loop)(int);
void (*zDrvDtmf_Detect_RegCallbacks)(T_DrvDtmf_Detect_Opt);
int (*zDrvVp_SetPath_Wrap)(int);
diff --git a/ap/os/linux/linux-3.4.x/include/linux/mtd/nand.h b/ap/os/linux/linux-3.4.x/include/linux/mtd/nand.h
index ec38761..24d11fc 100755
--- a/ap/os/linux/linux-3.4.x/include/linux/mtd/nand.h
+++ b/ap/os/linux/linux-3.4.x/include/linux/mtd/nand.h
@@ -570,6 +570,7 @@
#define NAND_MFR_HOSIN 0xD6
#define NAND_MFR_EMST 0xC8
#define NAND_MFR_FORESEE 0xCD
+#define NAND_MFR_XTX 0x0B
#define NAND_DEVID_EMST_F50D1G41LB_1G 0x11
diff --git a/ap/os/linux/linux-3.4.x/include/net/net_namespace.h b/ap/os/linux/linux-3.4.x/include/net/net_namespace.h
index ee547c1..2079a39 100644
--- a/ap/os/linux/linux-3.4.x/include/net/net_namespace.h
+++ b/ap/os/linux/linux-3.4.x/include/net/net_namespace.h
@@ -47,7 +47,9 @@
*/
#endif
spinlock_t rules_mod_lock;
-
+ //BDSA-2019-2065
+ u32 hash_mix;
+
struct list_head list; /* list of network namespaces */
struct list_head cleanup_list; /* namespaces on death row */
struct list_head exit_list; /* Use only net_mutex */
diff --git a/ap/os/linux/linux-3.4.x/include/net/netns/hash.h b/ap/os/linux/linux-3.4.x/include/net/netns/hash.h
index 548d78f..0f3152d 100644
--- a/ap/os/linux/linux-3.4.x/include/net/netns/hash.h
+++ b/ap/os/linux/linux-3.4.x/include/net/netns/hash.h
@@ -1,21 +1,10 @@
#ifndef __NET_NS_HASH_H__
#define __NET_NS_HASH_H__
+//BDSA-2019-2065
+#include <net/net_namespace.h>
-#include <asm/cache.h>
-
-struct net;
-
-static inline unsigned net_hash_mix(struct net *net)
+static inline u32 net_hash_mix(const struct net *net)
{
-#ifdef CONFIG_NET_NS
- /*
- * shift this right to eliminate bits, that are
- * always zeroed
- */
-
- return (unsigned)(((unsigned long)net) >> L1_CACHE_SHIFT);
-#else
- return 0;
-#endif
+ return net->hash_mix;
}
#endif
diff --git a/ap/os/linux/linux-3.4.x/include/net/tcp.h b/ap/os/linux/linux-3.4.x/include/net/tcp.h
index b5d2d14..a118f21 100755
--- a/ap/os/linux/linux-3.4.x/include/net/tcp.h
+++ b/ap/os/linux/linux-3.4.x/include/net/tcp.h
@@ -1300,6 +1300,8 @@
{
if (sk->sk_send_head == skb_unlinked)
sk->sk_send_head = NULL;
+ if (tcp_sk(sk)->highest_sack == skb_unlinked) //CVE-2016-6828
+ tcp_sk(sk)->highest_sack = NULL;
}
static inline void tcp_init_send_head(struct sock *sk)
diff --git a/ap/os/linux/linux-3.4.x/init/Kconfig b/ap/os/linux/linux-3.4.x/init/Kconfig
index 3db3a51..a66cd15 100644
--- a/ap/os/linux/linux-3.4.x/init/Kconfig
+++ b/ap/os/linux/linux-3.4.x/init/Kconfig
@@ -243,6 +243,16 @@
depends on SYSCTL
default y
+config SYSVIPC_CROSSMSG
+ bool "Cross Core Message by ZTE_RPMSG"
+ depends on SYSVIPC && !ARCH_ZX297520V3_CAP
+ default n
+
+config SYSVIPC_CROSS_SHM
+ bool "Cross Core SHM"
+ depends on SYSVIPC && !ARCH_ZX297520V3_CAP
+ default n
+
config POSIX_MQUEUE
bool "POSIX Message Queues"
depends on NET && EXPERIMENTAL
diff --git a/ap/os/linux/linux-3.4.x/ipc/Makefile b/ap/os/linux/linux-3.4.x/ipc/Makefile
index 9075e17..bb1c60c 100644
--- a/ap/os/linux/linux-3.4.x/ipc/Makefile
+++ b/ap/os/linux/linux-3.4.x/ipc/Makefile
@@ -9,4 +9,5 @@
obj-$(CONFIG_POSIX_MQUEUE) += mqueue.o msgutil.o $(obj_mq-y)
obj-$(CONFIG_IPC_NS) += namespace.o
obj-$(CONFIG_POSIX_MQUEUE_SYSCTL) += mq_sysctl.o
-
+obj-$(CONFIG_SYSVIPC_CROSSMSG) += cross_msg.o
+obj-$(CONFIG_SYSVIPC_CROSS_SHM) += shm_ctrl.o
diff --git a/ap/os/linux/linux-3.4.x/ipc/cross_msg.c b/ap/os/linux/linux-3.4.x/ipc/cross_msg.c
new file mode 100755
index 0000000..c5491bc
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/ipc/cross_msg.c
@@ -0,0 +1,142 @@
+/*
+ * linux/ipc/cross_msg.c
+ * Copyright (C) 2023 Sanechips Technology Co., Ltd.
+ */
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/uaccess.h>
+
+#include "cross_msg.h"
+
+extern int msg_chn_ready;
+extern struct mutex cross_msg_mutex;
+
+extern int sys_msgget(key_t key, int msgflg);
+extern long do_kmsgsnd(int msqid, struct msgbuf* msgp, size_t msgsz, int msgflg);
+
+static void msg_ap_icp_init(void)
+{
+ mutex_init(&cross_msg_mutex);
+
+ if (zDrvRpMsg_CreateChannel_Cap(CROSS_MSG_ACT, CROSS_MSG_CHN, CROSS_CHN_SIZE)) {
+ panic(CROSS_PRINT "Failed create ap->cap msg channel id %d!\n", CROSS_MSG_CHN);
+ } else {
+ printk(KERN_INFO CROSS_PRINT "create ap->cap msg channel success!\n");
+ msg_chn_ready = 1;
+ }
+
+ if (zDrvRpMsg_CreateChannel_Cap(CROSS_MSG_ACT, CROSS_MSG_CHN_CAP, CROSS_CHN_SIZE)) {
+ panic(CROSS_PRINT "Failed create cap->ap msg channel id %d\n", CROSS_MSG_CHN_CAP);
+ } else {
+ printk(KERN_INFO CROSS_PRINT "create cap->ap msg channel success!\n");
+ }
+}
+
+void cross_msg_recv()
+{
+ T_sc_msg_header *msgheader = NULL;
+ long *typeheader = NULL;
+ char *textheader = NULL;
+ T_ZDrvRpMsg_Msg Icp_Msg;
+ int ret;
+ int msqid, alen;
+
+ msgheader = (T_sc_msg_header *)kmalloc(CROSS_MSG_SIZE, GFP_KERNEL);
+ if (!msgheader) {
+ panic(CROSS_PRINT "Failed malloc send msgheader!\n");
+ }
+
+ while (1)
+ {
+ memset(msgheader, 0, sizeof(T_sc_msg_header));
+ Icp_Msg.actorID = CROSS_MSG_ACT;
+ Icp_Msg.chID = CROSS_MSG_CHN_CAP;
+ Icp_Msg.buf = msgheader;
+ Icp_Msg.len = CROSS_MSG_SIZE;
+ Icp_Msg.flag = 0;
+ ret = zDrvRpMsg_Read_Cap(&Icp_Msg);
+#if CROSS_DEBUG
+ printk(KERN_INFO CROSS_PRINT "cross message rpmsg recv header %x ops %x\n", msgheader->head, msgheader->ops);
+#endif
+ if(ret < 0) {
+ printk(KERN_ERR CROSS_PRINT "read rmpsg from cap error:(%d)\n", ret);
+ continue;
+ }
+ if (msgheader->head != CROSS_MSG_HEAD)
+ {
+ printk(KERN_ERR CROSS_PRINT "read rmpsg content error\n");
+ continue;
+ }
+ switch (msgheader->ops) {
+ case MSGGET_F: {
+ break;
+ }
+ case MSGCTL_F: {
+ break;
+ }
+ case MSGSND_F: {
+ typeheader = (long *)(msgheader + 1);
+ textheader = (char *)(typeheader + 1);
+#if CROSS_DEBUG
+ printk(KERN_INFO CROSS_PRINT "cross message msgget key:(%d) flag:(%d)\n", msgheader->sndp.getp.key, msgheader->sndp.getp.msgflg);
+#endif
+ msqid = sys_msgget(msgheader->sndp.getp.key, msgheader->sndp.getp.msgflg);
+ if (msqid < 0) {
+ printk(KERN_ERR CROSS_PRINT "msgget error:(%d)\n", msqid);
+ ret = msqid;
+ goto ack;
+ }
+#if CROSS_DEBUG
+ printk(KERN_INFO CROSS_PRINT "cross message msgsnd text:(%s) msgtyp:(%d)\n", textheader, *typeheader);
+ printk(KERN_INFO CROSS_PRINT "cross message msgsnd msgflg: %x, msgsz:(%d)\n", msgheader->sndp.msgflg, msgheader->sndp.msgsz);
+#endif
+ ret = do_kmsgsnd(msqid, typeheader, msgheader->sndp.msgsz, msgheader->sndp.msgflg);
+ if (ret < 0) {
+ printk(KERN_ERR CROSS_PRINT "msgsnd error:(%d)\n", ret);
+ goto ack;
+ }
+#if CROSS_DEBUG
+ printk(KERN_INFO CROSS_PRINT "cross message msgsnd ret:(%d)\n", ret);
+#endif
+ack:
+ msgheader->head = CROSS_MSG_HEAD;
+ msgheader->ret = ret;
+ Icp_Msg.actorID = CROSS_MSG_ACT;
+ Icp_Msg.chID = CROSS_MSG_CHN_CAP;
+ Icp_Msg.flag = RPMSG_WRITE_INT;
+ Icp_Msg.buf = msgheader;
+ Icp_Msg.len = sizeof(T_sc_msg_header);
+ ret = zDrvRpMsg_Write_Cap(&Icp_Msg);
+ if (ret < 0)
+ printk(KERN_INFO CROSS_PRINT "write rpmsg to cap error:(%d)\n", ret);
+ break;
+ }
+ case MSGRCV_F: {
+ break;
+ }
+ default: {
+ printk(KERN_INFO CROSS_PRINT "cross message msg options unknow:(%d)\n", ret);
+ break;
+ }
+ }
+ }
+
+}
+
+void __init cross_msg_init(void)
+{
+ struct task_struct *recv_msg_thread;
+ msg_ap_icp_init();
+
+ printk(KERN_INFO CROSS_PRINT "cross message init");
+ recv_msg_thread = kthread_run(cross_msg_recv, NULL, "cross_msg_recv");
+ if (IS_ERR(recv_msg_thread)) {
+ panic("create recv_msg_thread err");
+ }
+ else {
+ wake_up_process(recv_msg_thread);
+ }
+}
+late_initcall(cross_msg_init);
+
diff --git a/ap/os/linux/linux-3.4.x/ipc/cross_msg.h b/ap/os/linux/linux-3.4.x/ipc/cross_msg.h
new file mode 100755
index 0000000..eedcd6f
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/ipc/cross_msg.h
@@ -0,0 +1,62 @@
+/*
+ * linux/ipc/cross_msg.h
+ * Copyright (C) 2023 Sanechips Technology Co., Ltd.
+ */
+#ifndef _CROSS_MSG_H
+#define _CROSS_MSG_H
+
+#ifdef CONFIG_SYSVIPC_CROSSMSG
+#include <linux/soc/zte/rpm/rpmsg.h>
+
+#define CROSS_MSG_ACT CAP_ID
+#define CROSS_MSG_CHN 13
+#define CROSS_MSG_CHN_CAP 14
+#define CROSS_MSG_SIZE ((size_t)0x1000)
+#define CROSS_MSG_NUM (4)
+#define CROSS_CHN_SIZE ((size_t)CROSS_MSG_SIZE * CROSS_MSG_NUM)
+#define CROSS_PRINT "[MESSAGE QUEUE] "
+#define CROSS_DEBUG 0
+
+#define CROSS_MSG_HEAD 0xABBA
+#define CROSS_MSG_MASK 0xFFFF0000
+#define CROSS_REMOTE_MASK 0x80000000
+
+enum msg_function {
+ MSGGET_F = 0xAA,
+ MSGCTL_F = 0xBB,
+ MSGSND_F = 0xCC,
+ MSGRCV_F = 0xDD
+};
+
+struct msgget_para {
+ key_t key;
+ int msgflg;
+};
+
+struct msgctl_para {
+ struct msgget_para getp;
+ int cmd;
+};
+
+struct msgsnd_para {
+ struct msgget_para getp;
+ int msgflg;
+ size_t msgsz;
+};
+
+typedef struct msg_para {
+ unsigned short head;
+ unsigned short ops;
+ union {
+ struct msgctl_para ctlp;
+ struct msgsnd_para sndp;
+ struct {
+ int ret;
+ int merrno;
+ };
+ };
+} T_sc_msg_header;
+
+#endif
+#endif // _CROSS_MSG_H
+
diff --git a/ap/os/linux/linux-3.4.x/ipc/msg.c b/ap/os/linux/linux-3.4.x/ipc/msg.c
index a1cf543..038d954 100755
--- a/ap/os/linux/linux-3.4.x/ipc/msg.c
+++ b/ap/os/linux/linux-3.4.x/ipc/msg.c
@@ -40,6 +40,11 @@
#include <asm/current.h>
#include <asm/uaccess.h>
+#ifdef CONFIG_SYSVIPC_CROSSMSG
+#include <linux/soc/zte/rpm/rpmsg.h>
+#include "cross_msg.h"
+#endif
+
#include "util.h"
/*
@@ -62,6 +67,12 @@
struct task_struct *tsk;
};
+#ifdef CONFIG_SYSVIPC_CROSSMSG
+int msg_chn_ready;
+struct mutex cross_msg_mutex;
+T_sc_msg_header *msgheader = NULL;
+#endif
+
#define SEARCH_ANY 1
#define SEARCH_EQUAL 2
#define SEARCH_NOTEQUAL 3
@@ -136,7 +147,7 @@
init_ipc_ns.msg_ctlmni);
if (IS_ENABLED(CONFIG_PROC_STRIPPED))
- return 0;
+ return;
ipc_init_proc_interface("sysvipc/msg",
" key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n",
@@ -336,10 +347,63 @@
msg_params.key = key;
msg_params.flg = msgflg;
+#ifdef CONFIG_SYSVIPC_CROSSMSG
+ if ((key & CROSS_MSG_MASK) == CROSS_MSG_MASK)
+ msg_params.flg |= IPC_CREAT;
+#endif
return ipcget(ns, &msg_ids(ns), &msg_ops, &msg_params);
}
+#ifdef CONFIG_SYSVIPC_CROSSMSG
+static inline unsigned long
+copy_msqid_by_version(void *buf, struct msqid64_ds *in, int version)
+{
+ switch(version) {
+ case IPC_64:
+ memcpy(buf, in, sizeof(*in));
+ return 0;
+ case IPC_OLD:
+ {
+ struct msqid_ds out;
+
+ memset(&out, 0, sizeof(out));
+
+ ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm);
+
+ out.msg_stime = in->msg_stime;
+ out.msg_rtime = in->msg_rtime;
+ out.msg_ctime = in->msg_ctime;
+
+ if (in->msg_cbytes > USHRT_MAX)
+ out.msg_cbytes = USHRT_MAX;
+ else
+ out.msg_cbytes = in->msg_cbytes;
+ out.msg_lcbytes = in->msg_cbytes;
+
+ if (in->msg_qnum > USHRT_MAX)
+ out.msg_qnum = USHRT_MAX;
+ else
+ out.msg_qnum = in->msg_qnum;
+
+ if (in->msg_qbytes > USHRT_MAX)
+ out.msg_qbytes = USHRT_MAX;
+ else
+ out.msg_qbytes = in->msg_qbytes;
+ out.msg_lqbytes = in->msg_qbytes;
+
+ out.msg_lspid = in->msg_lspid;
+ out.msg_lrpid = in->msg_lrpid;
+
+ memcpy(buf, &out, sizeof(out));
+ return 0;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+#endif
+
static inline unsigned long
copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version)
{
@@ -479,6 +543,67 @@
return err;
}
+#ifdef CONFIG_SYSVIPC_CROSSMSG
+static int get_msgstat(int msqid, int cmd, struct msqid_ds * buf)
+{
+ struct msg_queue *msq;
+ int err, version;
+ struct ipc_namespace *ns;
+
+ if (msqid < 0 || cmd < 0)
+ return -EINVAL;
+
+ version = ipc_parse_version(&cmd);
+ ns = current->nsproxy->ipc_ns;
+
+ switch (cmd) {
+ case IPC_STAT:
+ {
+ struct msqid64_ds tbuf;
+ int success_return;
+
+ if (!buf)
+ return -EFAULT;
+
+
+ msq = msg_lock_check(ns, msqid);
+ if (IS_ERR(msq))
+ return PTR_ERR(msq);
+ success_return = 0;
+
+ err = -EACCES;
+ if (ipcperms(ns, &msq->q_perm, S_IRUGO))
+ goto out_unlock;
+
+ err = security_msg_queue_msgctl(msq, cmd);
+ if (err)
+ goto out_unlock;
+
+ memset(&tbuf, 0, sizeof(tbuf));
+
+ kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm);
+ tbuf.msg_stime = msq->q_stime;
+ tbuf.msg_rtime = msq->q_rtime;
+ tbuf.msg_ctime = msq->q_ctime;
+ tbuf.msg_cbytes = msq->q_cbytes;
+ tbuf.msg_qnum = msq->q_qnum;
+ tbuf.msg_qbytes = msq->q_qbytes;
+ tbuf.msg_lspid = msq->q_lspid;
+ tbuf.msg_lrpid = msq->q_lrpid;
+ msg_unlock(msq);
+ copy_msqid_by_version(buf, &tbuf, version);
+ return success_return;
+ }
+ default:
+ return -EINVAL;
+ }
+
+out_unlock:
+ msg_unlock(msq);
+ return err;
+}
+#endif
+
SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf)
{
struct msg_queue *msq;
@@ -744,6 +869,104 @@
return err;
}
+#ifdef CONFIG_SYSVIPC_CROSSMSG
+long do_kmsgsnd(int msqid, struct msgbuf* msgp, size_t msgsz, int msgflg)
+{
+ struct msg_queue *msq;
+ struct msg_msg *msg;
+ int err;
+ struct ipc_namespace *ns;
+ long mtype;
+ char *mtext;
+
+ ns = current->nsproxy->ipc_ns;
+
+ mtype = msgp->mtype;
+ mtext = msgp->mtext;
+
+ if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0)
+ return -EINVAL;
+ if (mtype < 1)
+ return -EINVAL;
+
+ msg = load_kmsg(mtext, msgsz);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ msg->m_type = mtype;
+ msg->m_ts = msgsz;
+
+ msq = msg_lock_check(ns, msqid);
+ if (IS_ERR(msq)) {
+ err = PTR_ERR(msq);
+ goto out_free;
+ }
+
+ for (;;) {
+ struct msg_sender s;
+
+ err = -EACCES;
+ if (ipcperms(ns, &msq->q_perm, S_IWUGO))
+ goto out_unlock_free;
+
+ err = security_msg_queue_msgsnd(msq, msg, msgflg);
+ if (err)
+ goto out_unlock_free;
+
+ if (msgsz + msq->q_cbytes <= msq->q_qbytes &&
+ 1 + msq->q_qnum <= msq->q_qbytes) {
+ break;
+ }
+
+ /* queue full, wait: */
+ if (msgflg & IPC_NOWAIT) {
+ err = -EAGAIN;
+ goto out_unlock_free;
+ }
+ ss_add(msq, &s);
+ ipc_rcu_getref(msq);
+ msg_unlock(msq);
+ schedule();
+
+ ipc_lock_by_ptr(&msq->q_perm);
+ ipc_rcu_putref(msq);
+ if (msq->q_perm.deleted) {
+ err = -EIDRM;
+ goto out_unlock_free;
+ }
+ ss_del(&s);
+
+ if (signal_pending(current)) {
+ err = -ERESTARTNOHAND;
+ goto out_unlock_free;
+ }
+ }
+
+ msq->q_lspid = task_tgid_vnr(current);
+ msq->q_stime = get_seconds();
+
+ if (!pipelined_send(msq, msg)) {
+ /* no one is waiting for this message, enqueue it */
+ list_add_tail(&msg->m_list, &msq->q_messages);
+ msq->q_cbytes += msgsz;
+ msq->q_qnum++;
+ atomic_add(msgsz, &ns->msg_bytes);
+ atomic_inc(&ns->msg_hdrs);
+ }
+
+ err = 0;
+ msg = NULL;
+
+out_unlock_free:
+ msg_unlock(msq);
+out_free:
+ if (msg != NULL)
+ free_msg(msg);
+ return err;
+
+}
+#endif
+
SYSCALL_DEFINE4(msgsnd, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz,
int, msgflg)
{
@@ -751,6 +974,99 @@
if (get_user(mtype, &msgp->mtype))
return -EFAULT;
+
+#ifdef CONFIG_SYSVIPC_CROSSMSG
+ struct msqid_ds msgque;
+ struct ipc_perm *ipcp = &msgque.msg_perm;
+ T_ZDrvRpMsg_Msg Icp_Msg;
+ T_sc_msg_header *msgrcvheader = NULL;
+ long *typeheader = NULL;
+ char *textheader = NULL;
+ int ret, key;
+ size_t alen;
+
+ ret = get_msgstat(msqid, IPC_STAT, &msgque);
+ if (ret < 0) {
+ return ret;
+ }
+ key = ipcp->key;
+ //跨核处理
+ if (msg_chn_ready && ((key & CROSS_MSG_MASK) == CROSS_MSG_MASK)) {
+#if CROSS_DEBUG
+ printk(KERN_INFO CROSS_PRINT "msg key beyond (%x)\n", key);
+#endif
+ mutex_lock(&cross_msg_mutex);
+ alen = sizeof(T_sc_msg_header) + sizeof(long) + msgsz;
+ if (alen > CROSS_MSG_SIZE) {
+ mutex_unlock(&cross_msg_mutex);
+ return -EINVAL;
+ }
+ if (!msgheader)
+ msgheader = (T_sc_msg_header *)kmalloc(CROSS_MSG_SIZE, GFP_KERNEL);
+ if (!msgheader) {
+ panic(CROSS_PRINT "Failed malloc send msgheader!\n");
+ }
+ memset(msgheader, 0, sizeof(T_sc_msg_header));
+ msgheader->head = CROSS_MSG_HEAD;
+ msgheader->ops = MSGSND_F;
+ msgheader->sndp.getp.key = key;
+ msgheader->sndp.getp.msgflg = ipcp->mode | IPC_CREAT;
+ msgheader->sndp.msgflg = msgflg;
+ msgheader->sndp.msgsz = msgsz;
+ typeheader = (long *)(msgheader + 1);
+ *typeheader = mtype;
+ textheader = (char *)(typeheader + 1);
+ ret = copy_from_user(textheader, msgp->mtext, alen - sizeof(T_sc_msg_header) - sizeof(long));
+ if (ret < 0) {
+ mutex_unlock(&cross_msg_mutex);
+ return -EFAULT;
+ }
+#if CROSS_DEBUG
+ printk(KERN_INFO CROSS_PRINT "msg send text:(%s) msgtyp:(%d) msgsize:(%d)\n", textheader, *typeheader, msgsz);
+#endif
+ Icp_Msg.actorID = CROSS_MSG_ACT;
+ Icp_Msg.chID = CROSS_MSG_CHN;
+ Icp_Msg.flag = RPMSG_WRITE_INT; /* 1- means send an icp interrupt> */
+ Icp_Msg.buf = msgheader;
+ Icp_Msg.len = alen;
+
+ ret = zDrvRpMsg_Write_Cap(&Icp_Msg);
+ if(ret < 0) {
+ printk(KERN_ERR CROSS_PRINT "write rpmsg error:(%d)\n", ret);
+ mutex_unlock(&cross_msg_mutex);
+ return ret;
+ }
+ else {
+#if CROSS_DEBUG
+ printk(KERN_INFO CROSS_PRINT "write rpmsg ok:(%d)\n", ret);
+#endif
+ }
+ Icp_Msg.actorID = CROSS_MSG_ACT;
+ Icp_Msg.chID = CROSS_MSG_CHN;
+ Icp_Msg.flag = 0;
+ Icp_Msg.buf = msgheader;
+ Icp_Msg.len = CROSS_MSG_SIZE;
+
+ ret = zDrvRpMsg_Read_Cap(&Icp_Msg);
+ mutex_unlock(&cross_msg_mutex);
+ if(ret < 0) {
+ printk(KERN_ERR CROSS_PRINT "read rpmsg error:(%d)\n", ret);
+ return ret;
+ }
+ else {
+#if CROSS_DEBUG
+ printk(KERN_INFO CROSS_PRINT "read rpmsg ok:(%d)\n", ret);
+#endif
+ }
+ msgrcvheader = (T_sc_msg_header *)Icp_Msg.buf;
+ ret = msgrcvheader->ret;
+#if CROSS_DEBUG
+ printk(KERN_INFO CROSS_PRINT "msgsnd return number:(%x)\n", ret);
+#endif
+ return ret;
+ }
+#endif
+
return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg);
}
diff --git a/ap/os/linux/linux-3.4.x/ipc/msgutil.c b/ap/os/linux/linux-3.4.x/ipc/msgutil.c
old mode 100644
new mode 100755
index 52be05a..e384793
--- a/ap/os/linux/linux-3.4.x/ipc/msgutil.c
+++ b/ap/os/linux/linux-3.4.x/ipc/msgutil.c
@@ -42,6 +42,61 @@
#define DATALEN_MSG ((size_t)PAGE_SIZE-sizeof(struct msg_msg))
#define DATALEN_SEG ((size_t)PAGE_SIZE-sizeof(struct msg_msgseg))
+#ifdef CONFIG_SYSVIPC_CROSSMSG
+struct msg_msg *load_kmsg(const void *src, size_t len)
+{
+ struct msg_msg *msg;
+ struct msg_msgseg **pseg;
+ int err;
+ size_t alen;
+
+ alen = len;
+ if (alen > DATALEN_MSG)
+ alen = DATALEN_MSG;
+
+ msg = kmalloc(sizeof(*msg) + alen, GFP_KERNEL);
+ if (msg == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ msg->next = NULL;
+ msg->security = NULL;
+
+ memcpy(msg + 1, src, alen);
+
+ len -= alen;
+ src = ((char *)src) + alen;
+ pseg = &msg->next;
+ while (len > 0) {
+ struct msg_msgseg *seg;
+ alen = len;
+ if (alen > DATALEN_SEG)
+ alen = DATALEN_SEG;
+ seg = kmalloc(sizeof(*seg) + alen,
+ GFP_KERNEL);
+ if (seg == NULL) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+ *pseg = seg;
+ seg->next = NULL;
+ memcpy(seg + 1, src, alen);
+ pseg = &seg->next;
+ len -= alen;
+ src = ((char *)src) + alen;
+ }
+
+ err = security_msg_msg_alloc(msg);
+ if (err)
+ goto out_err;
+
+ return msg;
+
+out_err:
+ free_msg(msg);
+ return ERR_PTR(err);
+}
+#endif
+
struct msg_msg *load_msg(const void __user *src, size_t len)
{
struct msg_msg *msg;
diff --git a/ap/os/linux/linux-3.4.x/ipc/shm.c b/ap/os/linux/linux-3.4.x/ipc/shm.c
index 054c4e0..47d3255 100755
--- a/ap/os/linux/linux-3.4.x/ipc/shm.c
+++ b/ap/os/linux/linux-3.4.x/ipc/shm.c
@@ -43,6 +43,9 @@
#include <asm/uaccess.h>
#include "util.h"
+#ifdef CONFIG_SYSVIPC_CROSS_SHM
+#include "shm_ctrl.h"
+#endif
struct shm_file_data {
int id;
@@ -69,6 +72,11 @@
static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
#endif
+#ifdef CONFIG_SYSVIPC_CROSS_SHM
+extern int shm_remote_free_pages(struct vm_area_struct *unmap_vma);
+extern int shm_do_remote_map_vma(struct vm_area_struct *vma, key_t key);
+#endif
+
void shm_init_ns(struct ipc_namespace *ns)
{
ns->shm_ctlmax = SHMMAX;
@@ -141,6 +149,24 @@
return container_of(ipcp, struct shmid_kernel, shm_perm);
}
+#ifdef CONFIG_SYSVIPC_CROSS_SHM
+void shm_mmap_pagetable(struct vm_area_struct *vma, struct file *file)
+{
+ int ret = 0;
+ struct shm_file_data *sfd;
+ struct shmid_kernel *shp;
+
+ sfd = shm_file_data(file);
+ shp = shm_lock(sfd->ns, sfd->id);
+
+ ret = shm_do_remote_map_vma(vma, shp->shm_perm.key);
+ if (ret < 0)
+ printk("shm_mmap_pagetable Error");
+
+ shm_unlock(shp);
+}
+#endif
+
static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
{
rcu_read_lock();
@@ -240,6 +266,10 @@
shp->shm_lprid = task_tgid_vnr(current);
shp->shm_dtim = get_seconds();
shp->shm_nattch--;
+#ifdef CONFIG_SYSVIPC_CROSS_SHM
+ if (shp->shm_perm.rpmflag == TRUE)
+ shm_remote_free_pages(shp->shm_perm.key);
+#endif
if (shm_may_destroy(ns, shp))
shm_destroy(ns, shp);
else
@@ -517,6 +547,12 @@
shp->shm_nattch = 0;
shp->shm_file = file;
shp->shm_creator = current;
+#ifdef CONFIG_SYSVIPC_CROSS_SHM
+ if((key & SHM_REMOTE_ATTR_MASK) == SHM_REMOTE_ATTR_MASK)
+ shp->shm_perm.rpmflag = TRUE;
+ else
+ shp->shm_perm.rpmflag = FALSE;
+#endif
/*
* shmid gets reported as "inode#" in /proc/pid/maps.
* proc-ps tools use this. Changing this will break them.
@@ -1049,7 +1085,10 @@
sfd->ns = get_ipc_ns(ns);
sfd->file = shp->shm_file;
sfd->vm_ops = NULL;
-
+#ifdef CONFIG_SYSVIPC_CROSS_SHM
+ if(shp->shm_perm.rpmflag == TRUE)
+ file->f_flags = SHM_REMOTE_ATTR_YES;
+#endif
down_write(¤t->mm->mmap_sem);
if (addr && !(shmflg & SHM_REMAP)) {
err = -EINVAL;
diff --git a/ap/os/linux/linux-3.4.x/ipc/shm_ctrl.c b/ap/os/linux/linux-3.4.x/ipc/shm_ctrl.c
new file mode 100755
index 0000000..598cb28
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/ipc/shm_ctrl.c
@@ -0,0 +1,504 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/ipc/shm_ctrl.c
+ * Copyright (C) 1992, 1993 Krishna Balasubramanian
+ * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
+ * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
+ *
+ * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
+ * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
+ * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
+ *
+ * Better ipc lock (kern_ipc_perm.lock) handling
+ * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
+ */
+#include <linux/mm.h>
+#include <asm/pgtable.h>
+#include "shm_ctrl.h"
+#include "../mm/internal.h"
+
+/**
+ * ºê¶¨Òå
+ */
+#define SHM_UNIT_BUFF_ORDER (12)
+#define SHM_KEYS_STATUS_LEN (4*1024)
+#define SHM_REMOTE_BUFF_LEN (128*1024)
+#define SHM_BUFF_BASE_PHY_ADDR (g_shm_phyAddr)
+
+#define SHM_UNIT_BUFF_SIZE (1UL<<SHM_UNIT_BUFF_ORDER) /*4KB*/
+#define SHM_UNIT_INDEX(addr) (((unsigned long)addr - SHM_BUFF_BASE_PHY_ADDR) >> SHM_UNIT_BUFF_ORDER)
+#define SHM_UNIT_PAGE_ADDR(index) ((void *)(SHM_BUFF_BASE_PHY_ADDR + ((unsigned long)index << SHM_UNIT_BUFF_ORDER)))
+#define SHM_UNIT_NUM_BITS (SHM_REMOTE_BUFF_LEN >> SHM_UNIT_BUFF_ORDER)
+#define SHM_CTRL_BITMAP_NUM (SHM_UNIT_NUM_BITS / SHM_CTRL_LONG_32BIT)
+
+struct shm_key_node {
+ key_t key;
+ unsigned int vma_count;
+ DECLARE_BITMAP(shm_inuse_index, SHM_UNIT_NUM_BITS);
+};
+
+struct shm_entity {
+ DECLARE_BITMAP(shm_regions_bitmap, SHM_UNIT_NUM_BITS);/*×ÜÄÚ´æ³Ø¹ÜÀíÐÅÏ¢*/
+ struct shm_key_node keys_info_head[SHM_UNIT_NUM_BITS]; /*ÿ¸öshmµÄkey¹ÜÀíÐÅÏ¢*/
+};
+
+/**
+ * È«¾Ö±äÁ¿¶¨Òå
+ */
+phys_addr_t g_shm_phyAddr = 0;
+void *g_shm_region = NULL;
+
+struct shm_entity *shm_remote_manager;
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö: shm_quary_keyArray
+* ²ÎÊý˵Ã÷:
+* (´«Èë²ÎÊý) void
+* (´«³ö²ÎÊý) void
+* ·µ »Ø Öµ: SHM_CTRL_OK or SHM_CTRL_ERROR
+* ÆäËü˵Ã÷: This function is used for search a special key in array
+*******************************************************************************/
+static int shm_quary_keyArray(const key_t key)
+{
+ unsigned int index = 0;
+ struct shm_key_node *shm_data = NULL;
+
+ shm_data = shm_remote_manager->keys_info_head;
+
+ for (; index < SHM_UNIT_NUM_BITS; index++)
+ {
+ if (shm_data[index].key == key)
+ return index;
+ }
+ return SHM_CTRL_ERROR;
+}
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö: shm_ctrl_pte_range
+* ²ÎÊý˵Ã÷:
+* (´«Èë²ÎÊý) void
+* (´«³ö²ÎÊý) void
+* ·µ »Ø Öµ: SHM_CTRL_OK or SHM_CTRL_ERROR
+* ÆäËü˵Ã÷: This function is used for clear the pagetable pte
+*******************************************************************************/
+unsigned long shm_ctrl_pte_range(struct mm_struct *mm,
+ struct vm_area_struct *vma, pmd_t *pmd,
+ unsigned long addr, unsigned long end)
+{
+ spinlock_t *ptl;
+ pte_t *start_pte;
+ pte_t *pte;
+
+ start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+ pte = start_pte;
+ arch_enter_lazy_mmu_mode();
+ do {
+ pte_t ptent = *pte;
+ if (pte_none(ptent)) {
+ continue;
+ }
+
+ if (pte_present(ptent)) {
+
+ ptent = ptep_get_and_clear(mm, addr, pte);
+ }
+ pte_clear_not_present_full(mm, addr, pte, 0);
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+
+ arch_leave_lazy_mmu_mode();
+ pte_unmap_unlock(start_pte, ptl);
+
+ return addr;
+}
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö: shm_ctrl_pmd_range
+* ²ÎÊý˵Ã÷:
+* (´«Èë²ÎÊý) mm: ÈÎÎñµÄÄÚ´æÃèÊö·û
+* (´«Èë²ÎÊý) vma£º¿çºË¹²ÏíÄÚ´æ½ø³ÌµØÖ·¿Õ¼ävma
+* (´«Èë²ÎÊý) pud£ºpudÒ³ÉϲãĿ¼
+* (´«Èë²ÎÊý) addr: ÐéÄâÆðʼµØÖ·
+* (´«Èë²ÎÊý) end: ÐéÄâ½áÊøµØÖ·
+* (´«³ö²ÎÊý) ¿Õ
+* ·µ »Ø Öµ: addr
+* ÆäËü˵Ã÷: This function is used for clear the pagetable pte
+*******************************************************************************/
+static inline unsigned long shm_ctrl_pmd_range(struct mm_struct *mm,
+ struct vm_area_struct *vma, pud_t *pud,
+ unsigned long addr, unsigned long end)
+{
+ pmd_t *pmd;
+ unsigned long next;
+
+ pmd = pmd_offset(pud, addr);
+ do {
+ next = pmd_addr_end(addr, end);
+ /*
+ * Here there can be other concurrent MADV_DONTNEED or
+ * trans huge page faults running, and if the pmd is
+ * none or trans huge it can change under us. This is
+ * because MADV_DONTNEED holds the mmap_sem in read
+ * mode.
+ */
+ if (pmd_none_or_trans_huge_or_clear_bad(pmd))
+ goto next;
+ next = shm_ctrl_pte_range(mm, vma, pmd, addr, next);
+next:
+ cond_resched();
+ } while (pmd++, addr = next, addr != end);
+
+ return addr;
+}
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö: shm_ctrl_pud_range
+* ²ÎÊý˵Ã÷:
+* (´«Èë²ÎÊý) mm: ÈÎÎñµÄÄÚ´æÃèÊö·û
+* (´«Èë²ÎÊý) vma: ¿çºË¹²ÏíÄÚ´æ½ø³ÌµØÖ·¿Õ¼ävma
+* (´«Èë²ÎÊý) pgd: pgdҳĿ¼Ïî
+* (´«Èë²ÎÊý) addr: ÐéÄâÆðʼµØÖ·
+* (´«Èë²ÎÊý) end: ÐéÄâ½áÊøµØÖ·
+* (´«³ö²ÎÊý) ÎÞ
+* ·µ »Ø Öµ: SHM_CTRL_OK or SHM_CTRL_ERROR
+* ÆäËü˵Ã÷: This function is used for find pud
+*******************************************************************************/
+static inline unsigned long shm_ctrl_pud_range(struct mm_struct *mm,
+ struct vm_area_struct *vma, pgd_t *pgd,
+ unsigned long addr, unsigned long end)
+{
+ pud_t *pud;
+ unsigned long next;
+
+ pud = pud_offset(pgd, addr);
+ do {
+ next = pud_addr_end(addr, end);
+ if (pud_none_or_clear_bad(pud))
+ continue;
+ next = shm_ctrl_pmd_range(mm, vma, pud, addr, next);
+ } while (pud++, addr = next, addr != end);
+
+ return addr;
+}
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö: shm_unmap_page_range
+* ²ÎÊý˵Ã÷:
+* (´«Èë²ÎÊý) mm: ÈÎÎñµÄÄÚ´æÃèÊö·û
+* (´«Èë²ÎÊý) vma ¿çºË¹²ÏíÄÚ´æ½ø³ÌµØÖ·¿Õ¼ävma
+* (´«Èë²ÎÊý) addr ÐéÄâÆðʼµØÖ·
+* (´«Èë²ÎÊý) end ÐéÄâ½áÊøµØÖ·
+* (´«³ö²ÎÊý) ¿Õ
+* ·µ »Ø Öµ: void
+* ÆäËü˵Ã÷: This function is used for unmap the shm memory
+*******************************************************************************/
+void shm_unmap_page_range(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, unsigned long end)
+{
+ pgd_t *pgd;
+ unsigned long next;
+
+ BUG_ON(addr >= end);
+ pgd = pgd_offset(vma->vm_mm, addr);
+ do {
+ next = pgd_addr_end(addr, end);
+ if (pgd_none_or_clear_bad(pgd))
+ continue;
+ next = shm_ctrl_pud_range(mm, vma, pgd, addr, next);
+ } while (pgd++, addr = next, addr != end);
+}
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö: shm_vma_write_pagetable
+* ²ÎÊý˵Ã÷:
+* (´«Èë²ÎÊý) void
+* (´«³ö²ÎÊý) void
+* ·µ »Ø Öµ: SHM_CTRL_OK or SHM_CTRL_ERROR
+* ÆäËü˵Ã÷: This function is used for create pagetable for shm mem region
+*******************************************************************************/
+static int shm_vma_write_pagetable(struct vm_area_struct *vma, unsigned long vm_addr,
+ phys_addr_t shmaddr_phy)
+{
+ pte_t *pte;
+ int retval = 0;
+ pte_t pte_val = 0;
+ spinlock_t *ptl;
+
+ if (vm_addr < vma->vm_start || vm_addr >= vma->vm_end)
+ return -EFAULT;
+
+ pte = get_locked_pte(vma->vm_mm, vm_addr, &ptl);
+ if ((!pte) || (!pte_none(*pte)))
+ return -EFAULT;
+
+ pte_val = __pte((phys_addr_t)(shmaddr_phy) | pgprot_val(vma->vm_page_prot));
+
+ set_pte_at(vma->vm_mm, vm_addr, pte, pte_val);
+ pte_unmap_unlock(pte, ptl);
+
+ return retval;
+}
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö: shm_fill_keytable
+* ²ÎÊý˵Ã÷:
+* (´«Èë²ÎÊý) void
+* (´«³ö²ÎÊý) void
+* ·µ »Ø Öµ: SHM_CTRL_OK or SHM_CTRL_ERROR
+* ÆäËü˵Ã÷: This function is used for record the key and index relation
+*******************************************************************************/
+int shm_fill_keytable(struct shm_key_node *keydata)
+{
+ unsigned int key_index = 0;
+
+ if (keydata == NULL)
+ return SHM_CTRL_ERROR;
+
+ for(; key_index < SHM_UNIT_NUM_BITS; key_index++)
+ {
+ if(shm_remote_manager->keys_info_head[key_index].key == 0)
+ {
+ memcpy(&shm_remote_manager->keys_info_head[key_index], keydata, sizeof(struct shm_key_node));
+ return SHM_CTRL_OK;
+ }
+ }
+ return SHM_CTRL_ERROR;
+}
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö: shm_remove_keynode
+* ²ÎÊý˵Ã÷:
+* (´«Èë²ÎÊý) void
+* (´«³ö²ÎÊý) void
+* ·µ »Ø Öµ: SHM_CTRL_OK or SHM_CTRL_ERROR
+* ÆäËü˵Ã÷: This function is used for remove the key and index relation
+*******************************************************************************/
+static void shm_remove_keynode(unsigned int key_index)
+{
+ memset(&shm_remote_manager->keys_info_head[key_index], 0, sizeof(struct shm_key_node));
+}
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö: shm_alloc_new_page
+* ²ÎÊý˵Ã÷:
+* (´«Èë²ÎÊý) void
+* (´«³ö²ÎÊý) void
+* ·µ »Ø Öµ: SHM_CTRL_OK or SHM_CTRL_ERROR
+* ÆäËü˵Ã÷: This function is used for alloc page from shm mem region
+*******************************************************************************/
+int shm_alloc_new_page(struct vm_area_struct *vma, key_t key)
+{
+ unsigned long vm_addr = 0;
+ unsigned int region_index = 0;
+ void *new_page = NULL;
+ struct shm_key_node new_key = {0};
+
+ if((vma == NULL) || (g_shm_region == NULL))
+ {
+ printk("Shm region is not ready\n");
+ return SHM_CTRL_ERROR;
+ }
+
+ vm_addr = vma->vm_start;
+
+ for (; vm_addr < vma->vm_end; vm_addr += PAGE_SIZE)
+ {
+ region_index = find_first_zero_bit(shm_remote_manager->shm_regions_bitmap, SHM_UNIT_NUM_BITS);
+
+ if (region_index < SHM_UNIT_NUM_BITS)
+ {
+ set_bit(region_index, shm_remote_manager->shm_regions_bitmap);
+ new_page = SHM_UNIT_PAGE_ADDR(region_index);
+
+ if (shm_vma_write_pagetable(vma, vm_addr, new_page))
+ {
+ return SHM_CTRL_ERROR;
+ }
+ set_bit(region_index, new_key.shm_inuse_index);
+ }
+ else
+ {
+ return SHM_CTRL_ERROR;
+ }
+ }
+
+ if (!bitmap_empty(new_key.shm_inuse_index, SHM_UNIT_NUM_BITS))
+ {
+ new_key.key = key;
+ new_key.vma_count++;
+ shm_fill_keytable(&new_key);
+ }
+ return SHM_CTRL_OK;
+}
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö: shm_do_remote_map_vma
+* ²ÎÊý˵Ã÷:
+* (´«Èë²ÎÊý) void
+* (´«³ö²ÎÊý) void
+* ·µ »Ø Öµ: SHM_CTRL_OK or SHM_CTRL_ERROR
+* ÆäËü˵Ã÷: This function is used for
+*
+/*²éѯkey,Èç¹ûÒÑ·ÖÅä¹ýʹÓÃkey¶ÔÓ¦µÄbitmap, ·ñÔò´Ó×ÜÄÚ´æ³Ø·ÖÅä
+*******************************************************************************/
+int shm_do_remote_map_vma(struct vm_area_struct *vma, key_t key)
+{
+ int ret = 0;
+ unsigned long vm_addr = 0;
+ unsigned int region_index = 0;
+ int key_index = 0;
+ void *new_page_phy = NULL;
+ struct shm_key_node *key_node = NULL;
+ DECLARE_BITMAP(shm_inuse_tmp, SHM_UNIT_NUM_BITS);
+
+ if((vma == NULL) || (g_shm_region == NULL))
+ {
+ printk("shm_do_remote_map_vma:Shm region is not ready\n");
+ return SHM_CTRL_ERROR;
+ }
+
+ /*Ó³ÉävmaΪ·ÇcacheÊôÐÔ*/
+ pgprot_noncached(vma->vm_page_prot);
+
+ soft_spin_lock(SHM_SFLOCK);
+
+ key_index = shm_quary_keyArray(key);
+
+ if (key_index < 0)
+ {
+ ret = shm_alloc_new_page(vma, key);
+ soft_spin_unlock(SHM_SFLOCK);
+ if (ret < 0)
+ panic("shm_alloc_new_page Fail\n");
+ return ret;
+ }
+
+ vm_addr = vma->vm_start;
+
+ if ((0 <= key_index) && (key_index < SHM_UNIT_NUM_BITS))
+ key_node = &shm_remote_manager->keys_info_head[key_index];
+ else
+ panic("key_index out of range: failed\n");
+
+ memcpy(shm_inuse_tmp, key_node->shm_inuse_index, sizeof(shm_inuse_tmp));
+
+ for (; vm_addr < vma->vm_end; vm_addr += PAGE_SIZE)
+ {
+ region_index = find_first_bit(shm_inuse_tmp, SHM_UNIT_NUM_BITS);
+ if (region_index < SHM_UNIT_NUM_BITS)
+ {
+ new_page_phy = SHM_UNIT_PAGE_ADDR(region_index);
+ if (shm_vma_write_pagetable(vma, vm_addr, new_page_phy))
+ {
+ soft_spin_unlock(SHM_SFLOCK);
+ panic("shm_do_remote_map_vma vm_insert_page failed\n");
+ return SHM_CTRL_ERROR;
+ }
+ clear_bit(region_index, shm_inuse_tmp);
+ }
+ else
+ {
+ return SHM_CTRL_ERROR;
+ }
+ }
+ key_node->vma_count++;
+
+ soft_spin_unlock(SHM_SFLOCK);
+ return SHM_CTRL_OK;
+}
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö: shm_remote_free_pages
+* ²ÎÊý˵Ã÷:
+* (´«Èë²ÎÊý) void
+* (´«³ö²ÎÊý) void
+* ·µ »Ø Öµ: SHM_CTRL_OK or SHM_CTRL_ERROR
+* ÆäËü˵Ã÷: This function is used for
+*******************************************************************************/
+int shm_remote_free_pages(key_t key)
+{
+ int key_index = 0;
+ unsigned int region_index = 0;
+ struct shm_key_node *key_node = NULL;
+
+ if(g_shm_region == NULL)
+ {
+ printk("shm_remote_free_pages: Shm region is not ready\n");
+ return SHM_CTRL_ERROR;
+ }
+
+ soft_spin_lock(SHM_SFLOCK);
+
+ /*²éѯkey*/
+ key_index = shm_quary_keyArray(key);
+ if(key_index < 0 || key_index >= SHM_UNIT_NUM_BITS)
+ {
+ soft_spin_unlock(SHM_SFLOCK);
+ panic("error\n");
+ }
+
+ /*ÊôÓÚ¿çºËµÄ¾ÍÊͷŵ½³Ø×ÓÀï*/
+ key_node = &shm_remote_manager->keys_info_head[key_index];
+
+ key_node->vma_count--;
+
+ if(key_node->vma_count == 0)
+ {
+ for_each_set_bit(region_index, key_node->shm_inuse_index, SHM_UNIT_NUM_BITS)
+ {
+ clear_bit(region_index, shm_remote_manager->shm_regions_bitmap);
+ }
+ shm_remove_keynode(key_index);
+ }
+ soft_spin_unlock(SHM_SFLOCK);
+ return SHM_CTRL_OK;
+}
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö: shm_rpcore_init
+* ²ÎÊý˵Ã÷:
+* (´«Èë²ÎÊý) void
+* (´«³ö²ÎÊý) void
+* ·µ »Ø Öµ:
+* ÆäËü˵Ã÷: This function is used for shm ctrl init
+*******************************************************************************/
+static int __init shm_rpcore_init(void)
+{
+ dma_addr_t dma_phys;
+ dma_addr_t shm_keyInfo_phys;
+ struct shm_pool_msg shm_msg = {0};
+
+ g_shm_region = dma_alloc_coherent(NULL,
+ (size_t)SHM_REMOTE_BUFF_LEN,
+ &dma_phys,
+ GFP_KERNEL);
+ if(!g_shm_region)
+ {
+ panic("g_shm_region NOMEM\n");
+ }
+
+ g_shm_phyAddr = dma_phys;
+
+ shm_remote_manager = dma_alloc_coherent(NULL,
+ (size_t)(SHM_KEYS_STATUS_LEN),
+ &shm_keyInfo_phys,
+ GFP_KERNEL);
+ if(!shm_remote_manager)
+ {
+ panic("shm_remote_manager NOMEM\n");
+ }
+
+ memset(shm_remote_manager, 0, sizeof(struct shm_entity));
+ shm_msg.shm_len = SHM_REMOTE_BUFF_LEN;
+ shm_msg.key_manage_len = SHM_KEYS_STATUS_LEN;
+ shm_msg.shm_memory_phy = g_shm_phyAddr;
+ shm_msg.key_manage_phy = shm_keyInfo_phys;
+
+ memcpy((void*)IRAM_BASE_ADDR_SHM_REMOTE_REGION, &shm_msg, sizeof(struct shm_pool_msg));
+
+ return SHM_CTRL_OK;
+
+}
+
+late_initcall(shm_rpcore_init);
+
+
+
diff --git a/ap/os/linux/linux-3.4.x/ipc/shm_ctrl.h b/ap/os/linux/linux-3.4.x/ipc/shm_ctrl.h
new file mode 100755
index 0000000..5a850a1
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/ipc/shm_ctrl.h
@@ -0,0 +1,56 @@
+/*
+ * linux/ipc/shm_ctrl.h
+ * Copyright (C) 2023 Sanechips Technology Co., Ltd.
+ */
+#ifndef _SHM_CTRL_H
+#define _SHM_CTRL_H
+
+#ifdef CONFIG_SYSVIPC_CROSS_SHM
+/**
+ * ºê¶¨Òå
+ */
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/shm.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/file.h>
+#include <linux/list.h>
+#include <linux/mman.h>
+#include <linux/shmem_fs.h>
+#include <linux/audit.h>
+#include <linux/capability.h>
+#include <linux/ptrace.h>
+#include <linux/rwsem.h>
+#include <linux/nsproxy.h>
+#include <linux/ipc_namespace.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <mach/spinlock.h>
+
+/**
+ * Êý¾ÝÀàÐͶ¨Òå
+ */
+#define TRUE 1
+#define FALSE 0
+#define SHM_REMOTE_ATTR_YES (0x594553) /*YES ASCIIÂë*/
+#define SHM_REMOTE_ATTR_MASK (0xFFFFF000)
+#define SHM_CTRL_OK (0)
+#define SHM_CTRL_ERROR (-1)
+#define SHM_CTRL_VMA_LINK_NUM (2)
+#define SHM_CTRL_MEMSYNC_CHANNEL (15)
+#define SHM_CTRL_CHANNEL_SIZE (0x40)
+#define SHM_CTRL_LONG_32BIT (32)
+
+struct shm_pool_msg
+{
+ unsigned int shm_len;
+ unsigned int key_manage_len;
+ phys_addr_t shm_memory_phy;
+ phys_addr_t key_manage_phy;
+};
+
+
+#endif
+#endif // _SHM_CTRL_H
+
diff --git a/ap/os/linux/linux-3.4.x/ipc/util.h b/ap/os/linux/linux-3.4.x/ipc/util.h
old mode 100644
new mode 100755
index 0bfc934..f8c1757
--- a/ap/os/linux/linux-3.4.x/ipc/util.h
+++ b/ap/os/linux/linux-3.4.x/ipc/util.h
@@ -138,6 +138,9 @@
#endif
extern void free_msg(struct msg_msg *msg);
+#ifdef CONFIG_SYSVIPC_CROSSMSG
+extern struct msg_msg *load_kmsg(const void __user *src, size_t len);
+#endif
extern struct msg_msg *load_msg(const void __user *src, size_t len);
extern int store_msg(void __user *dest, struct msg_msg *msg, size_t len);
diff --git a/ap/os/linux/linux-3.4.x/mm/kmemleak.bak b/ap/os/linux/linux-3.4.x/mm/kmemleak.bak
deleted file mode 100755
index c74827c..0000000
--- a/ap/os/linux/linux-3.4.x/mm/kmemleak.bak
+++ /dev/null
@@ -1,1882 +0,0 @@
-/*
- * mm/kmemleak.c
- *
- * Copyright (C) 2008 ARM Limited
- * Written by Catalin Marinas <catalin.marinas@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- *
- * For more information on the algorithm and kmemleak usage, please see
- * Documentation/kmemleak.txt.
- *
- * Notes on locking
- * ----------------
- *
- * The following locks and mutexes are used by kmemleak:
- *
- * - kmemleak_lock (rwlock): protects the object_list modifications and
- * accesses to the object_tree_root. The object_list is the main list
- * holding the metadata (struct kmemleak_object) for the allocated memory
- * blocks. The object_tree_root is a priority search tree used to look-up
- * metadata based on a pointer to the corresponding memory block. The
- * kmemleak_object structures are added to the object_list and
- * object_tree_root in the create_object() function called from the
- * kmemleak_alloc() callback and removed in delete_object() called from the
- * kmemleak_free() callback
- * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
- * the metadata (e.g. count) are protected by this lock. Note that some
- * members of this structure may be protected by other means (atomic or
- * kmemleak_lock). This lock is also held when scanning the corresponding
- * memory block to avoid the kernel freeing it via the kmemleak_free()
- * callback. This is less heavyweight than holding a global lock like
- * kmemleak_lock during scanning
- * - scan_mutex (mutex): ensures that only one thread may scan the memory for
- * unreferenced objects at a time. The gray_list contains the objects which
- * are already referenced or marked as false positives and need to be
- * scanned. This list is only modified during a scanning episode when the
- * scan_mutex is held. At the end of a scan, the gray_list is always empty.
- * Note that the kmemleak_object.use_count is incremented when an object is
- * added to the gray_list and therefore cannot be freed. This mutex also
- * prevents multiple users of the "kmemleak" debugfs file together with
- * modifications to the memory scanning parameters including the scan_thread
- * pointer
- *
- * The kmemleak_object structures have a use_count incremented or decremented
- * using the get_object()/put_object() functions. When the use_count becomes
- * 0, this count can no longer be incremented and put_object() schedules the
- * kmemleak_object freeing via an RCU callback. All calls to the get_object()
- * function must be protected by rcu_read_lock() to avoid accessing a freed
- * structure.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/sched.h>
-#include <linux/jiffies.h>
-#include <linux/delay.h>
-#include <linux/export.h>
-#include <linux/kthread.h>
-#include <linux/prio_tree.h>
-#include <linux/fs.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/cpumask.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
-#include <linux/rcupdate.h>
-#include <linux/stacktrace.h>
-#include <linux/cache.h>
-#include <linux/percpu.h>
-#include <linux/hardirq.h>
-#include <linux/mmzone.h>
-#include <linux/slab.h>
-#include <linux/thread_info.h>
-#include <linux/err.h>
-#include <linux/uaccess.h>
-#include <linux/string.h>
-#include <linux/nodemask.h>
-#include <linux/mm.h>
-#include <linux/workqueue.h>
-#include <linux/crc32.h>
-
-#include <asm/sections.h>
-#include <asm/processor.h>
-#include <linux/atomic.h>
-
-#include <linux/kmemcheck.h>
-#include <linux/kmemleak.h>
-#include <linux/memory_hotplug.h>
-
-/*
- * Kmemleak configuration and common defines.
- */
-#define MAX_TRACE 16 /* stack trace length */
-#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
-#define SECS_FIRST_SCAN 60 /* delay before the first scan */
-#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
-#define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
-
-#define BYTES_PER_POINTER sizeof(void *)
-
-/* GFP bitmask for kmemleak internal allocations */
-#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
- __GFP_NORETRY | __GFP_NOMEMALLOC | \
- __GFP_NOWARN)
-
-/* scanning area inside a memory block */
-struct kmemleak_scan_area {
- struct hlist_node node;
- unsigned long start;
- size_t size;
-};
-
-#define KMEMLEAK_GREY 0
-#define KMEMLEAK_BLACK -1
-
-/*
- * Structure holding the metadata for each allocated memory block.
- * Modifications to such objects should be made while holding the
- * object->lock. Insertions or deletions from object_list, gray_list or
- * tree_node are already protected by the corresponding locks or mutex (see
- * the notes on locking above). These objects are reference-counted
- * (use_count) and freed using the RCU mechanism.
- */
-struct kmemleak_object {
- spinlock_t lock;
- unsigned long flags; /* object status flags */
- struct list_head object_list;
- struct list_head gray_list;
- struct prio_tree_node tree_node;
- struct rcu_head rcu; /* object_list lockless traversal */
- /* object usage count; object freed when use_count == 0 */
- atomic_t use_count;
- unsigned long pointer;
- size_t size;
- /* minimum number of a pointers found before it is considered leak */
- int min_count;
- /* the total number of pointers found pointing to this object */
- int count;
- /* checksum for detecting modified objects */
- u32 checksum;
- /* memory ranges to be scanned inside an object (empty for all) */
- struct hlist_head area_list;
- unsigned long trace[MAX_TRACE];
- unsigned int trace_len;
- unsigned long jiffies; /* creation timestamp */
- pid_t pid; /* pid of the current task */
- char comm[TASK_COMM_LEN]; /* executable name */
-};
-
-/* flag representing the memory block allocation status */
-#define OBJECT_ALLOCATED (1 << 0)
-/* flag set after the first reporting of an unreference object */
-#define OBJECT_REPORTED (1 << 1)
-/* flag set to not scan the object */
-#define OBJECT_NO_SCAN (1 << 2)
-
-/* number of bytes to print per line; must be 16 or 32 */
-#define HEX_ROW_SIZE 16
-/* number of bytes to print at a time (1, 2, 4, 8) */
-#define HEX_GROUP_SIZE 1
-/* include ASCII after the hex output */
-#define HEX_ASCII 1
-/* max number of lines to be printed */
-#define HEX_MAX_LINES 2
-
-/* the list of all allocated objects */
-static LIST_HEAD(object_list);
-/* the list of gray-colored objects (see color_gray comment below) */
-static LIST_HEAD(gray_list);
-/* prio search tree for object boundaries */
-static struct prio_tree_root object_tree_root;
-/* rw_lock protecting the access to object_list and prio_tree_root */
-static DEFINE_RWLOCK(kmemleak_lock);
-
-/* allocation caches for kmemleak internal data */
-static struct kmem_cache *object_cache;
-static struct kmem_cache *scan_area_cache;
-
-/* set if tracing memory operations is enabled */
-static atomic_t kmemleak_enabled = ATOMIC_INIT(0);
-/* same as above but only for the kmemleak_free() callback */
-static int kmemleak_free_enabled;
-/* set in the late_initcall if there were no errors */
-static atomic_t kmemleak_initialized = ATOMIC_INIT(0);
-/* enables or disables early logging of the memory operations */
-static atomic_t kmemleak_early_log = ATOMIC_INIT(1);
-/* set if a kmemleak warning was issued */
-static atomic_t kmemleak_warning = ATOMIC_INIT(0);
-/* set if a fatal kmemleak error has occurred */
-static atomic_t kmemleak_error = ATOMIC_INIT(0);
-
-/* minimum and maximum address that may be valid pointers */
-static unsigned long min_addr = ULONG_MAX;
-static unsigned long max_addr;
-
-static struct task_struct *scan_thread;
-/* used to avoid reporting of recently allocated objects */
-static unsigned long jiffies_min_age;
-static unsigned long jiffies_last_scan;
-/* delay between automatic memory scannings */
-static signed long jiffies_scan_wait;
-/* enables or disables the task stacks scanning */
-static int kmemleak_stack_scan = 1;
-/* protects the memory scanning, parameters and debug/kmemleak file access */
-static DEFINE_MUTEX(scan_mutex);
-/* setting kmemleak=on, will set this var, skipping the disable */
-static int kmemleak_skip_disable;
-
-
-/*
- * Early object allocation/freeing logging. Kmemleak is initialized after the
- * kernel allocator. However, both the kernel allocator and kmemleak may
- * allocate memory blocks which need to be tracked. Kmemleak defines an
- * arbitrary buffer to hold the allocation/freeing information before it is
- * fully initialized.
- */
-
-/* kmemleak operation type for early logging */
-enum {
- KMEMLEAK_ALLOC,
- KMEMLEAK_ALLOC_PERCPU,
- KMEMLEAK_FREE,
- KMEMLEAK_FREE_PART,
- KMEMLEAK_FREE_PERCPU,
- KMEMLEAK_NOT_LEAK,
- KMEMLEAK_IGNORE,
- KMEMLEAK_SCAN_AREA,
- KMEMLEAK_NO_SCAN
-};
-
-/*
- * Structure holding the information passed to kmemleak callbacks during the
- * early logging.
- */
-struct early_log {
- int op_type; /* kmemleak operation type */
- const void *ptr; /* allocated/freed memory block */
- size_t size; /* memory block size */
- int min_count; /* minimum reference count */
- unsigned long trace[MAX_TRACE]; /* stack trace */
- unsigned int trace_len; /* stack trace length */
-};
-
-/* early logging buffer and current position */
-static struct early_log
- early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
-static int crt_early_log __initdata;
-
-static void kmemleak_disable(void);
-
-/*
- * Print a warning and dump the stack trace.
- */
-#define kmemleak_warn(x...) do { \
- pr_warning(x); \
- dump_stack(); \
- atomic_set(&kmemleak_warning, 1); \
-} while (0)
-
-/*
- * Macro invoked when a serious kmemleak condition occurred and cannot be
- * recovered from. Kmemleak will be disabled and further allocation/freeing
- * tracing no longer available.
- */
-#define kmemleak_stop(x...) do { \
- kmemleak_warn(x); \
- kmemleak_disable(); \
-} while (0)
-
-/*
- * Printing of the objects hex dump to the seq file. The number of lines to be
- * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
- * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
- * with the object->lock held.
- */
-static void hex_dump_object(struct seq_file *seq,
- struct kmemleak_object *object)
-{
- const u8 *ptr = (const u8 *)object->pointer;
- int i, len, remaining;
- unsigned char linebuf[HEX_ROW_SIZE * 5];
-
- /* limit the number of lines to HEX_MAX_LINES */
- remaining = len =
- min(object->size, (size_t)(HEX_MAX_LINES * HEX_ROW_SIZE));
-
- seq_printf(seq, " hex dump (first %d bytes):\n", len);
- for (i = 0; i < len; i += HEX_ROW_SIZE) {
- int linelen = min(remaining, HEX_ROW_SIZE);
-
- remaining -= HEX_ROW_SIZE;
- hex_dump_to_buffer(ptr + i, linelen, HEX_ROW_SIZE,
- HEX_GROUP_SIZE, linebuf, sizeof(linebuf),
- HEX_ASCII);
- seq_printf(seq, " %s\n", linebuf);
- }
-}
-
-/*
- * Object colors, encoded with count and min_count:
- * - white - orphan object, not enough references to it (count < min_count)
- * - gray - not orphan, not marked as false positive (min_count == 0) or
- * sufficient references to it (count >= min_count)
- * - black - ignore, it doesn't contain references (e.g. text section)
- * (min_count == -1). No function defined for this color.
- * Newly created objects don't have any color assigned (object->count == -1)
- * before the next memory scan when they become white.
- */
-static bool color_white(const struct kmemleak_object *object)
-{
- return object->count != KMEMLEAK_BLACK &&
- object->count < object->min_count;
-}
-
-static bool color_gray(const struct kmemleak_object *object)
-{
- return object->min_count != KMEMLEAK_BLACK &&
- object->count >= object->min_count;
-}
-
-/*
- * Objects are considered unreferenced only if their color is white, they have
- * not be deleted and have a minimum age to avoid false positives caused by
- * pointers temporarily stored in CPU registers.
- */
-static bool unreferenced_object(struct kmemleak_object *object)
-{
- return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
- time_before_eq(object->jiffies + jiffies_min_age,
- jiffies_last_scan);
-}
-
-/*
- * Printing of the unreferenced objects information to the seq file. The
- * print_unreferenced function must be called with the object->lock held.
- */
-static void print_unreferenced(struct seq_file *seq,
- struct kmemleak_object *object)
-{
- int i;
- unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
-
- seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
- object->pointer, object->size);
- seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
- object->comm, object->pid, object->jiffies,
- msecs_age / 1000, msecs_age % 1000);
- hex_dump_object(seq, object);
- seq_printf(seq, " backtrace:\n");
-
- for (i = 0; i < object->trace_len; i++) {
- void *ptr = (void *)object->trace[i];
- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
- }
-}
-
-/*
- * Print the kmemleak_object information. This function is used mainly for
- * debugging special cases when kmemleak operations. It must be called with
- * the object->lock held.
- */
-static void dump_object_info(struct kmemleak_object *object)
-{
- struct stack_trace trace;
-
- trace.nr_entries = object->trace_len;
- trace.entries = object->trace;
-
- pr_notice("Object 0x%08lx (size %zu):\n",
- object->tree_node.start, object->size);
- pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
- object->comm, object->pid, object->jiffies);
- pr_notice(" min_count = %d\n", object->min_count);
- pr_notice(" count = %d\n", object->count);
- pr_notice(" flags = 0x%lx\n", object->flags);
- pr_notice(" checksum = %d\n", object->checksum);
- pr_notice(" backtrace:\n");
- print_stack_trace(&trace, 4);
-}
-
-/*
- * Look-up a memory block metadata (kmemleak_object) in the priority search
- * tree based on a pointer value. If alias is 0, only values pointing to the
- * beginning of the memory block are allowed. The kmemleak_lock must be held
- * when calling this function.
- */
-static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
-{
- struct prio_tree_node *node;
- struct prio_tree_iter iter;
- struct kmemleak_object *object;
-
- prio_tree_iter_init(&iter, &object_tree_root, ptr, ptr);
- node = prio_tree_next(&iter);
- if (node) {
- object = prio_tree_entry(node, struct kmemleak_object,
- tree_node);
- if (!alias && object->pointer != ptr) {
- kmemleak_warn("Found object by alias at 0x%08lx\n",
- ptr);
- dump_object_info(object);
- object = NULL;
- }
- } else
- object = NULL;
-
- return object;
-}
-
-/*
- * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
- * that once an object's use_count reached 0, the RCU freeing was already
- * registered and the object should no longer be used. This function must be
- * called under the protection of rcu_read_lock().
- */
-static int get_object(struct kmemleak_object *object)
-{
- return atomic_inc_not_zero(&object->use_count);
-}
-
-/*
- * RCU callback to free a kmemleak_object.
- */
-static void free_object_rcu(struct rcu_head *rcu)
-{
- struct hlist_node *elem, *tmp;
- struct kmemleak_scan_area *area;
- struct kmemleak_object *object =
- container_of(rcu, struct kmemleak_object, rcu);
-
- /*
- * Once use_count is 0 (guaranteed by put_object), there is no other
- * code accessing this object, hence no need for locking.
- */
- hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) {
- hlist_del(elem);
- kmem_cache_free(scan_area_cache, area);
- }
- kmem_cache_free(object_cache, object);
-}
-
-/*
- * Decrement the object use_count. Once the count is 0, free the object using
- * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
- * delete_object() path, the delayed RCU freeing ensures that there is no
- * recursive call to the kernel allocator. Lock-less RCU object_list traversal
- * is also possible.
- */
-static void put_object(struct kmemleak_object *object)
-{
- if (!atomic_dec_and_test(&object->use_count))
- return;
-
- /* should only get here after delete_object was called */
- WARN_ON(object->flags & OBJECT_ALLOCATED);
-
- call_rcu(&object->rcu, free_object_rcu);
-}
-
-/*
- * Look up an object in the prio search tree and increase its use_count.
- */
-static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
-{
- unsigned long flags;
- struct kmemleak_object *object = NULL;
-
- rcu_read_lock();
- read_lock_irqsave(&kmemleak_lock, flags);
- if (ptr >= min_addr && ptr < max_addr)
- object = lookup_object(ptr, alias);
- read_unlock_irqrestore(&kmemleak_lock, flags);
-
- /* check whether the object is still available */
- if (object && !get_object(object))
- object = NULL;
- rcu_read_unlock();
-
- return object;
-}
-
-/*
- * Save stack trace to the given array of MAX_TRACE size.
- */
-static int __save_stack_trace(unsigned long *trace)
-{
- struct stack_trace stack_trace;
-
- stack_trace.max_entries = MAX_TRACE;
- stack_trace.nr_entries = 0;
- stack_trace.entries = trace;
- stack_trace.skip = 2;
- save_stack_trace(&stack_trace);
-
- return stack_trace.nr_entries;
-}
-
-/*
- * Create the metadata (struct kmemleak_object) corresponding to an allocated
- * memory block and add it to the object_list and object_tree_root.
- */
-static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
- int min_count, gfp_t gfp)
-{
- unsigned long flags;
- struct kmemleak_object *object;
- struct prio_tree_node *node;
-
- object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
- if (!object) {
- pr_warning("Cannot allocate a kmemleak_object structure\n");
- kmemleak_disable();
- return NULL;
- }
-
- INIT_LIST_HEAD(&object->object_list);
- INIT_LIST_HEAD(&object->gray_list);
- INIT_HLIST_HEAD(&object->area_list);
- spin_lock_init(&object->lock);
- atomic_set(&object->use_count, 1);
- object->flags = OBJECT_ALLOCATED;
- object->pointer = ptr;
- object->size = size;
- object->min_count = min_count;
- object->count = 0; /* white color initially */
- object->jiffies = jiffies;
- object->checksum = 0;
-
- /* task information */
- if (in_irq()) {
- object->pid = 0;
- strncpy(object->comm, "hardirq", sizeof(object->comm));
- } else if (in_softirq()) {
- object->pid = 0;
- strncpy(object->comm, "softirq", sizeof(object->comm));
- } else {
- object->pid = current->pid;
- /*
- * There is a small chance of a race with set_task_comm(),
- * however using get_task_comm() here may cause locking
- * dependency issues with current->alloc_lock. In the worst
- * case, the command line is not correct.
- */
- strncpy(object->comm, current->comm, sizeof(object->comm));
- }
-
- /* kernel backtrace */
- object->trace_len = __save_stack_trace(object->trace);
-
- INIT_PRIO_TREE_NODE(&object->tree_node);
- object->tree_node.start = ptr;
- object->tree_node.last = ptr + size - 1;
-
- write_lock_irqsave(&kmemleak_lock, flags);
-
- min_addr = min(min_addr, ptr);
- max_addr = max(max_addr, ptr + size);
- node = prio_tree_insert(&object_tree_root, &object->tree_node);
- /*
- * The code calling the kernel does not yet have the pointer to the
- * memory block to be able to free it. However, we still hold the
- * kmemleak_lock here in case parts of the kernel started freeing
- * random memory blocks.
- */
- if (node != &object->tree_node) {
- kmemleak_stop("Cannot insert 0x%lx into the object search tree "
- "(already existing)\n", ptr);
- object = lookup_object(ptr, 1);
- spin_lock(&object->lock);
- dump_object_info(object);
- spin_unlock(&object->lock);
-
- goto out;
- }
- list_add_tail_rcu(&object->object_list, &object_list);
-out:
- write_unlock_irqrestore(&kmemleak_lock, flags);
- return object;
-}
-
-/*
- * Remove the metadata (struct kmemleak_object) for a memory block from the
- * object_list and object_tree_root and decrement its use_count.
- */
-static void __delete_object(struct kmemleak_object *object)
-{
- unsigned long flags;
-
- write_lock_irqsave(&kmemleak_lock, flags);
- prio_tree_remove(&object_tree_root, &object->tree_node);
- list_del_rcu(&object->object_list);
- write_unlock_irqrestore(&kmemleak_lock, flags);
-
- WARN_ON(!(object->flags & OBJECT_ALLOCATED));
- WARN_ON(atomic_read(&object->use_count) < 2);
-
- /*
- * Locking here also ensures that the corresponding memory block
- * cannot be freed when it is being scanned.
- */
- spin_lock_irqsave(&object->lock, flags);
- object->flags &= ~OBJECT_ALLOCATED;
- spin_unlock_irqrestore(&object->lock, flags);
- put_object(object);
-}
-
-/*
- * Look up the metadata (struct kmemleak_object) corresponding to ptr and
- * delete it.
- */
-static void delete_object_full(unsigned long ptr)
-{
- struct kmemleak_object *object;
-
- object = find_and_get_object(ptr, 0);
- if (!object) {
-#ifdef DEBUG
- kmemleak_warn("Freeing unknown object at 0x%08lx\n",
- ptr);
-#endif
- return;
- }
- __delete_object(object);
- put_object(object);
-}
-
-/*
- * Look up the metadata (struct kmemleak_object) corresponding to ptr and
- * delete it. If the memory block is partially freed, the function may create
- * additional metadata for the remaining parts of the block.
- */
-static void delete_object_part(unsigned long ptr, size_t size)
-{
- struct kmemleak_object *object;
- unsigned long start, end;
-
- object = find_and_get_object(ptr, 1);
- if (!object) {
-#ifdef DEBUG
- kmemleak_warn("Partially freeing unknown object at 0x%08lx "
- "(size %zu)\n", ptr, size);
-#endif
- return;
- }
- __delete_object(object);
-
- /*
- * Create one or two objects that may result from the memory block
- * split. Note that partial freeing is only done by free_bootmem() and
- * this happens before kmemleak_init() is called. The path below is
- * only executed during early log recording in kmemleak_init(), so
- * GFP_KERNEL is enough.
- */
- start = object->pointer;
- end = object->pointer + object->size;
- if (ptr > start)
- create_object(start, ptr - start, object->min_count,
- GFP_KERNEL);
- if (ptr + size < end)
- create_object(ptr + size, end - ptr - size, object->min_count,
- GFP_KERNEL);
-
- put_object(object);
-}
-
-static void __paint_it(struct kmemleak_object *object, int color)
-{
- object->min_count = color;
- if (color == KMEMLEAK_BLACK)
- object->flags |= OBJECT_NO_SCAN;
-}
-
-static void paint_it(struct kmemleak_object *object, int color)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&object->lock, flags);
- __paint_it(object, color);
- spin_unlock_irqrestore(&object->lock, flags);
-}
-
-static void paint_ptr(unsigned long ptr, int color)
-{
- struct kmemleak_object *object;
-
- object = find_and_get_object(ptr, 0);
- if (!object) {
- kmemleak_warn("Trying to color unknown object "
- "at 0x%08lx as %s\n", ptr,
- (color == KMEMLEAK_GREY) ? "Grey" :
- (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
- return;
- }
- paint_it(object, color);
- put_object(object);
-}
-
-/*
- * Mark an object permanently as gray-colored so that it can no longer be
- * reported as a leak. This is used in general to mark a false positive.
- */
-static void make_gray_object(unsigned long ptr)
-{
- paint_ptr(ptr, KMEMLEAK_GREY);
-}
-
-/*
- * Mark the object as black-colored so that it is ignored from scans and
- * reporting.
- */
-static void make_black_object(unsigned long ptr)
-{
- paint_ptr(ptr, KMEMLEAK_BLACK);
-}
-
-/*
- * Add a scanning area to the object. If at least one such area is added,
- * kmemleak will only scan these ranges rather than the whole memory block.
- */
-static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
-{
- unsigned long flags;
- struct kmemleak_object *object;
- struct kmemleak_scan_area *area;
-
- object = find_and_get_object(ptr, 1);
- if (!object) {
- kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
- ptr);
- return;
- }
-
- area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
- if (!area) {
- pr_warning("Cannot allocate a scan area\n");
- goto out;
- }
-
- spin_lock_irqsave(&object->lock, flags);
- if (size == SIZE_MAX) {
- size = object->pointer + object->size - ptr;
- } else if (ptr + size > object->pointer + object->size) {
- kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
- dump_object_info(object);
- kmem_cache_free(scan_area_cache, area);
- goto out_unlock;
- }
-
- INIT_HLIST_NODE(&area->node);
- area->start = ptr;
- area->size = size;
-
- hlist_add_head(&area->node, &object->area_list);
-out_unlock:
- spin_unlock_irqrestore(&object->lock, flags);
-out:
- put_object(object);
-}
-
-/*
- * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
- * pointer. Such object will not be scanned by kmemleak but references to it
- * are searched.
- */
-static void object_no_scan(unsigned long ptr)
-{
- unsigned long flags;
- struct kmemleak_object *object;
-
- object = find_and_get_object(ptr, 0);
- if (!object) {
- kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
- return;
- }
-
- spin_lock_irqsave(&object->lock, flags);
- object->flags |= OBJECT_NO_SCAN;
- spin_unlock_irqrestore(&object->lock, flags);
- put_object(object);
-}
-
-/*
- * Log an early kmemleak_* call to the early_log buffer. These calls will be
- * processed later once kmemleak is fully initialized.
- */
-static void __init log_early(int op_type, const void *ptr, size_t size,
- int min_count)
-{
- unsigned long flags;
- struct early_log *log;
-
- if (atomic_read(&kmemleak_error)) {
- /* kmemleak stopped recording, just count the requests */
- crt_early_log++;
- return;
- }
-
- if (crt_early_log >= ARRAY_SIZE(early_log)) {
- kmemleak_disable();
- return;
- }
-
- /*
- * There is no need for locking since the kernel is still in UP mode
- * at this stage. Disabling the IRQs is enough.
- */
- local_irq_save(flags);
- log = &early_log[crt_early_log];
- log->op_type = op_type;
- log->ptr = ptr;
- log->size = size;
- log->min_count = min_count;
- log->trace_len = __save_stack_trace(log->trace);
- crt_early_log++;
- local_irq_restore(flags);
-}
-
-/*
- * Log an early allocated block and populate the stack trace.
- */
-static void early_alloc(struct early_log *log)
-{
- struct kmemleak_object *object;
- unsigned long flags;
- int i;
-
- if (!atomic_read(&kmemleak_enabled) || !log->ptr || IS_ERR(log->ptr))
- return;
-
- /*
- * RCU locking needed to ensure object is not freed via put_object().
- */
- rcu_read_lock();
- object = create_object((unsigned long)log->ptr, log->size,
- log->min_count, GFP_ATOMIC);
- if (!object)
- goto out;
- spin_lock_irqsave(&object->lock, flags);
- for (i = 0; i < log->trace_len; i++)
- object->trace[i] = log->trace[i];
- object->trace_len = log->trace_len;
- spin_unlock_irqrestore(&object->lock, flags);
-out:
- rcu_read_unlock();
-}
-
-/*
- * Log an early allocated block and populate the stack trace.
- */
-static void early_alloc_percpu(struct early_log *log)
-{
- unsigned int cpu;
- const void __percpu *ptr = log->ptr;
-
- for_each_possible_cpu(cpu) {
- log->ptr = per_cpu_ptr(ptr, cpu);
- early_alloc(log);
- }
-}
-
-/**
- * kmemleak_alloc - register a newly allocated object
- * @ptr: pointer to beginning of the object
- * @size: size of the object
- * @min_count: minimum number of references to this object. If during memory
- * scanning a number of references less than @min_count is found,
- * the object is reported as a memory leak. If @min_count is 0,
- * the object is never reported as a leak. If @min_count is -1,
- * the object is ignored (not scanned and not reported as a leak)
- * @gfp: kmalloc() flags used for kmemleak internal memory allocations
- *
- * This function is called from the kernel allocators when a new object
- * (memory block) is allocated (kmem_cache_alloc, kmalloc, vmalloc etc.).
- */
-void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
- gfp_t gfp)
-{
- pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
-
- if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
- create_object((unsigned long)ptr, size, min_count, gfp);
- else if (atomic_read(&kmemleak_early_log))
- log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
-}
-EXPORT_SYMBOL_GPL(kmemleak_alloc);
-
-/**
- * kmemleak_alloc_percpu - register a newly allocated __percpu object
- * @ptr: __percpu pointer to beginning of the object
- * @size: size of the object
- *
- * This function is called from the kernel percpu allocator when a new object
- * (memory block) is allocated (alloc_percpu). It assumes GFP_KERNEL
- * allocation.
- */
-void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size)
-{
- unsigned int cpu;
-
- pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
-
- /*
- * Percpu allocations are only scanned and not reported as leaks
- * (min_count is set to 0).
- */
- if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
- for_each_possible_cpu(cpu)
- create_object((unsigned long)per_cpu_ptr(ptr, cpu),
- size, 0, GFP_KERNEL);
- else if (atomic_read(&kmemleak_early_log))
- log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
-}
-EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
-
-/**
- * kmemleak_free - unregister a previously registered object
- * @ptr: pointer to beginning of the object
- *
- * This function is called from the kernel allocators when an object (memory
- * block) is freed (kmem_cache_free, kfree, vfree etc.).
- */
-void __ref kmemleak_free(const void *ptr)
-{
- pr_debug("%s(0x%p)\n", __func__, ptr);
-
- if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
- delete_object_full((unsigned long)ptr);
- else if (atomic_read(&kmemleak_early_log))
- log_early(KMEMLEAK_FREE, ptr, 0, 0);
-}
-EXPORT_SYMBOL_GPL(kmemleak_free);
-
-/**
- * kmemleak_free_part - partially unregister a previously registered object
- * @ptr: pointer to the beginning or inside the object. This also
- * represents the start of the range to be freed
- * @size: size to be unregistered
- *
- * This function is called when only a part of a memory block is freed
- * (usually from the bootmem allocator).
- */
-void __ref kmemleak_free_part(const void *ptr, size_t size)
-{
- pr_debug("%s(0x%p)\n", __func__, ptr);
-
- if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
- delete_object_part((unsigned long)ptr, size);
- else if (atomic_read(&kmemleak_early_log))
- log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
-}
-EXPORT_SYMBOL_GPL(kmemleak_free_part);
-
-/**
- * kmemleak_free_percpu - unregister a previously registered __percpu object
- * @ptr: __percpu pointer to beginning of the object
- *
- * This function is called from the kernel percpu allocator when an object
- * (memory block) is freed (free_percpu).
- */
-void __ref kmemleak_free_percpu(const void __percpu *ptr)
-{
- unsigned int cpu;
-
- pr_debug("%s(0x%p)\n", __func__, ptr);
-
- if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
- for_each_possible_cpu(cpu)
- delete_object_full((unsigned long)per_cpu_ptr(ptr,
- cpu));
- else if (atomic_read(&kmemleak_early_log))
- log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
-}
-EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
-
-/**
- * kmemleak_not_leak - mark an allocated object as false positive
- * @ptr: pointer to beginning of the object
- *
- * Calling this function on an object will cause the memory block to no longer
- * be reported as leak and always be scanned.
- */
-void __ref kmemleak_not_leak(const void *ptr)
-{
- pr_debug("%s(0x%p)\n", __func__, ptr);
-
- if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
- make_gray_object((unsigned long)ptr);
- else if (atomic_read(&kmemleak_early_log))
- log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
-}
-EXPORT_SYMBOL(kmemleak_not_leak);
-
-/**
- * kmemleak_ignore - ignore an allocated object
- * @ptr: pointer to beginning of the object
- *
- * Calling this function on an object will cause the memory block to be
- * ignored (not scanned and not reported as a leak). This is usually done when
- * it is known that the corresponding block is not a leak and does not contain
- * any references to other allocated memory blocks.
- */
-void __ref kmemleak_ignore(const void *ptr)
-{
- pr_debug("%s(0x%p)\n", __func__, ptr);
-
- if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
- make_black_object((unsigned long)ptr);
- else if (atomic_read(&kmemleak_early_log))
- log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
-}
-EXPORT_SYMBOL(kmemleak_ignore);
-
-/**
- * kmemleak_scan_area - limit the range to be scanned in an allocated object
- * @ptr: pointer to beginning or inside the object. This also
- * represents the start of the scan area
- * @size: size of the scan area
- * @gfp: kmalloc() flags used for kmemleak internal memory allocations
- *
- * This function is used when it is known that only certain parts of an object
- * contain references to other objects. Kmemleak will only scan these areas
- * reducing the number false negatives.
- */
-void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
-{
- pr_debug("%s(0x%p)\n", __func__, ptr);
-
- if (atomic_read(&kmemleak_enabled) && ptr && size && !IS_ERR(ptr))
- add_scan_area((unsigned long)ptr, size, gfp);
- else if (atomic_read(&kmemleak_early_log))
- log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
-}
-EXPORT_SYMBOL(kmemleak_scan_area);
-
-/**
- * kmemleak_no_scan - do not scan an allocated object
- * @ptr: pointer to beginning of the object
- *
- * This function notifies kmemleak not to scan the given memory block. Useful
- * in situations where it is known that the given object does not contain any
- * references to other objects. Kmemleak will not scan such objects reducing
- * the number of false negatives.
- */
-void __ref kmemleak_no_scan(const void *ptr)
-{
- pr_debug("%s(0x%p)\n", __func__, ptr);
-
- if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
- object_no_scan((unsigned long)ptr);
- else if (atomic_read(&kmemleak_early_log))
- log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
-}
-EXPORT_SYMBOL(kmemleak_no_scan);
-
-/*
- * Update an object's checksum and return true if it was modified.
- */
-static bool update_checksum(struct kmemleak_object *object)
-{
- u32 old_csum = object->checksum;
-
- if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
- return false;
-
- object->checksum = crc32(0, (void *)object->pointer, object->size);
- return object->checksum != old_csum;
-}
-
-/*
- * Memory scanning is a long process and it needs to be interruptable. This
- * function checks whether such interrupt condition occurred.
- */
-static int scan_should_stop(void)
-{
- if (!atomic_read(&kmemleak_enabled))
- return 1;
-
- /*
- * This function may be called from either process or kthread context,
- * hence the need to check for both stop conditions.
- */
- if (current->mm)
- return signal_pending(current);
- else
- return kthread_should_stop();
-
- return 0;
-}
-
-/*
- * Scan a memory block (exclusive range) for valid pointers and add those
- * found to the gray list.
- */
-static void scan_block(void *_start, void *_end,
- struct kmemleak_object *scanned, int allow_resched)
-{
- unsigned long *ptr;
- unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
- unsigned long *end = _end - (BYTES_PER_POINTER - 1);
-
- for (ptr = start; ptr < end; ptr++) {
- struct kmemleak_object *object;
- unsigned long flags;
- unsigned long pointer;
-
- if (allow_resched)
- cond_resched();
- if (scan_should_stop())
- break;
-
- /* don't scan uninitialized memory */
- if (!kmemcheck_is_obj_initialized((unsigned long)ptr,
- BYTES_PER_POINTER))
- continue;
-
- pointer = *ptr;
-
- object = find_and_get_object(pointer, 1);
- if (!object)
- continue;
- if (object == scanned) {
- /* self referenced, ignore */
- put_object(object);
- continue;
- }
-
- /*
- * Avoid the lockdep recursive warning on object->lock being
- * previously acquired in scan_object(). These locks are
- * enclosed by scan_mutex.
- */
- spin_lock_irqsave_nested(&object->lock, flags,
- SINGLE_DEPTH_NESTING);
- if (!color_white(object)) {
- /* non-orphan, ignored or new */
- spin_unlock_irqrestore(&object->lock, flags);
- put_object(object);
- continue;
- }
-
- /*
- * Increase the object's reference count (number of pointers
- * to the memory block). If this count reaches the required
- * minimum, the object's color will become gray and it will be
- * added to the gray_list.
- */
- object->count++;
- if (color_gray(object)) {
- list_add_tail(&object->gray_list, &gray_list);
- spin_unlock_irqrestore(&object->lock, flags);
- continue;
- }
-
- spin_unlock_irqrestore(&object->lock, flags);
- put_object(object);
- }
-}
-
-/*
- * Scan a memory block corresponding to a kmemleak_object. A condition is
- * that object->use_count >= 1.
- */
-static void scan_object(struct kmemleak_object *object)
-{
- struct kmemleak_scan_area *area;
- struct hlist_node *elem;
- unsigned long flags;
-
- /*
- * Once the object->lock is acquired, the corresponding memory block
- * cannot be freed (the same lock is acquired in delete_object).
- */
- spin_lock_irqsave(&object->lock, flags);
- if (object->flags & OBJECT_NO_SCAN)
- goto out;
- if (!(object->flags & OBJECT_ALLOCATED))
- /* already freed object */
- goto out;
- if (hlist_empty(&object->area_list)) {
- void *start = (void *)object->pointer;
- void *end = (void *)(object->pointer + object->size);
-
- while (start < end && (object->flags & OBJECT_ALLOCATED) &&
- !(object->flags & OBJECT_NO_SCAN)) {
- scan_block(start, min(start + MAX_SCAN_SIZE, end),
- object, 0);
- start += MAX_SCAN_SIZE;
-
- spin_unlock_irqrestore(&object->lock, flags);
- cond_resched();
- spin_lock_irqsave(&object->lock, flags);
- }
- } else
- hlist_for_each_entry(area, elem, &object->area_list, node)
- scan_block((void *)area->start,
- (void *)(area->start + area->size),
- object, 0);
-out:
- spin_unlock_irqrestore(&object->lock, flags);
-}
-
-/*
- * Scan the objects already referenced (gray objects). More objects will be
- * referenced and, if there are no memory leaks, all the objects are scanned.
- */
-static void scan_gray_list(void)
-{
- struct kmemleak_object *object, *tmp;
-
- /*
- * The list traversal is safe for both tail additions and removals
- * from inside the loop. The kmemleak objects cannot be freed from
- * outside the loop because their use_count was incremented.
- */
- object = list_entry(gray_list.next, typeof(*object), gray_list);
- while (&object->gray_list != &gray_list) {
- cond_resched();
-
- /* may add new objects to the list */
- if (!scan_should_stop())
- scan_object(object);
-
- tmp = list_entry(object->gray_list.next, typeof(*object),
- gray_list);
-
- /* remove the object from the list and release it */
- list_del(&object->gray_list);
- put_object(object);
-
- object = tmp;
- }
- WARN_ON(!list_empty(&gray_list));
-}
-
-/*
- * Scan data sections and all the referenced memory blocks allocated via the
- * kernel's standard allocators. This function must be called with the
- * scan_mutex held.
- */
-static void kmemleak_scan(void)
-{
- unsigned long flags;
- struct kmemleak_object *object;
- int i;
- int new_leaks = 0;
-
- jiffies_last_scan = jiffies;
-
- /* prepare the kmemleak_object's */
- rcu_read_lock();
- list_for_each_entry_rcu(object, &object_list, object_list) {
- spin_lock_irqsave(&object->lock, flags);
-#ifdef DEBUG
- /*
- * With a few exceptions there should be a maximum of
- * 1 reference to any object at this point.
- */
- if (atomic_read(&object->use_count) > 1) {
- pr_debug("object->use_count = %d\n",
- atomic_read(&object->use_count));
- dump_object_info(object);
- }
-#endif
- /* reset the reference count (whiten the object) */
- object->count = 0;
- if (color_gray(object) && get_object(object))
- list_add_tail(&object->gray_list, &gray_list);
-
- spin_unlock_irqrestore(&object->lock, flags);
- }
- rcu_read_unlock();
-
- /* data/bss scanning */
- scan_block(_sdata, _edata, NULL, 1);
- scan_block(__bss_start, __bss_stop, NULL, 1);
-
-#ifdef CONFIG_SMP
- /* per-cpu sections scanning */
- for_each_possible_cpu(i)
- scan_block(__per_cpu_start + per_cpu_offset(i),
- __per_cpu_end + per_cpu_offset(i), NULL, 1);
-#endif
-
- /*
- * Struct page scanning for each node.
- */
- lock_memory_hotplug();
- for_each_online_node(i) {
- pg_data_t *pgdat = NODE_DATA(i);
- unsigned long start_pfn = pgdat->node_start_pfn;
- unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
- unsigned long pfn;
-
- for (pfn = start_pfn; pfn < end_pfn; pfn++) {
- struct page *page;
-
- if (!pfn_valid(pfn))
- continue;
- page = pfn_to_page(pfn);
- /* only scan if page is in use */
- if (page_count(page) == 0)
- continue;
- scan_block(page, page + 1, NULL, 1);
- }
- }
- unlock_memory_hotplug();
-
- /*
- * Scanning the task stacks (may introduce false negatives).
- */
- if (kmemleak_stack_scan) {
- struct task_struct *p, *g;
-
- read_lock(&tasklist_lock);
- do_each_thread(g, p) {
- scan_block(task_stack_page(p), task_stack_page(p) +
- THREAD_SIZE, NULL, 0);
- } while_each_thread(g, p);
- read_unlock(&tasklist_lock);
- }
-
- /*
- * Scan the objects already referenced from the sections scanned
- * above.
- */
- scan_gray_list();
-
- /*
- * Check for new or unreferenced objects modified since the previous
- * scan and color them gray until the next scan.
- */
- rcu_read_lock();
- list_for_each_entry_rcu(object, &object_list, object_list) {
- spin_lock_irqsave(&object->lock, flags);
- if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
- && update_checksum(object) && get_object(object)) {
- /* color it gray temporarily */
- object->count = object->min_count;
- list_add_tail(&object->gray_list, &gray_list);
- }
- spin_unlock_irqrestore(&object->lock, flags);
- }
- rcu_read_unlock();
-
- /*
- * Re-scan the gray list for modified unreferenced objects.
- */
- scan_gray_list();
-
- /*
- * If scanning was stopped do not report any new unreferenced objects.
- */
- if (scan_should_stop())
- return;
-
- /*
- * Scanning result reporting.
- */
- rcu_read_lock();
- list_for_each_entry_rcu(object, &object_list, object_list) {
- spin_lock_irqsave(&object->lock, flags);
- if (unreferenced_object(object) &&
- !(object->flags & OBJECT_REPORTED)) {
- object->flags |= OBJECT_REPORTED;
- new_leaks++;
- }
- spin_unlock_irqrestore(&object->lock, flags);
- }
- rcu_read_unlock();
-
- if (new_leaks)
- pr_info("%d new suspected memory leaks (see "
- "/sys/kernel/debug/kmemleak)\n", new_leaks);
-
-}
-
-/*
- * Thread function performing automatic memory scanning. Unreferenced objects
- * at the end of a memory scan are reported but only the first time.
- */
-static int kmemleak_scan_thread(void *arg)
-{
- static int first_run = 1;
-
- pr_info("Automatic memory scanning thread started\n");
- set_user_nice(current, 10);
-
- /*
- * Wait before the first scan to allow the system to fully initialize.
- */
- if (first_run) {
- first_run = 0;
- ssleep(SECS_FIRST_SCAN);
- }
-
- while (!kthread_should_stop()) {
- signed long timeout = jiffies_scan_wait;
-
- mutex_lock(&scan_mutex);
- kmemleak_scan();
- mutex_unlock(&scan_mutex);
-
- /* wait before the next scan */
- while (timeout && !kthread_should_stop())
- timeout = schedule_timeout_interruptible(timeout);
- }
-
- pr_info("Automatic memory scanning thread ended\n");
-
- return 0;
-}
-
-/*
- * Start the automatic memory scanning thread. This function must be called
- * with the scan_mutex held.
- */
-static void start_scan_thread(void)
-{
- if (scan_thread)
- return;
- scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
- if (IS_ERR(scan_thread)) {
- pr_warning("Failed to create the scan thread\n");
- scan_thread = NULL;
- }
-}
-
-/*
- * Stop the automatic memory scanning thread. This function must be called
- * with the scan_mutex held.
- */
-static void stop_scan_thread(void)
-{
- if (scan_thread) {
- kthread_stop(scan_thread);
- scan_thread = NULL;
- }
-}
-
-/*
- * Iterate over the object_list and return the first valid object at or after
- * the required position with its use_count incremented. The function triggers
- * a memory scanning when the pos argument points to the first position.
- */
-static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
-{
- struct kmemleak_object *object;
- loff_t n = *pos;
- int err;
-
- err = mutex_lock_interruptible(&scan_mutex);
- if (err < 0)
- return ERR_PTR(err);
-
- rcu_read_lock();
- list_for_each_entry_rcu(object, &object_list, object_list) {
- if (n-- > 0)
- continue;
- if (get_object(object))
- goto out;
- }
- object = NULL;
-out:
- return object;
-}
-
-/*
- * Return the next object in the object_list. The function decrements the
- * use_count of the previous object and increases that of the next one.
- */
-static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
- struct kmemleak_object *prev_obj = v;
- struct kmemleak_object *next_obj = NULL;
- struct list_head *n = &prev_obj->object_list;
-
- ++(*pos);
-
- list_for_each_continue_rcu(n, &object_list) {
- struct kmemleak_object *obj =
- list_entry(n, struct kmemleak_object, object_list);
- if (get_object(obj)) {
- next_obj = obj;
- break;
- }
- }
-
- put_object(prev_obj);
- return next_obj;
-}
-
-/*
- * Decrement the use_count of the last object required, if any.
- */
-static void kmemleak_seq_stop(struct seq_file *seq, void *v)
-{
- if (!IS_ERR(v)) {
- /*
- * kmemleak_seq_start may return ERR_PTR if the scan_mutex
- * waiting was interrupted, so only release it if !IS_ERR.
- */
- rcu_read_unlock();
- mutex_unlock(&scan_mutex);
- if (v)
- put_object(v);
- }
-}
-
-/*
- * Print the information for an unreferenced object to the seq file.
- */
-static int kmemleak_seq_show(struct seq_file *seq, void *v)
-{
- struct kmemleak_object *object = v;
- unsigned long flags;
-
- spin_lock_irqsave(&object->lock, flags);
- if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
- print_unreferenced(seq, object);
- spin_unlock_irqrestore(&object->lock, flags);
- return 0;
-}
-
-static const struct seq_operations kmemleak_seq_ops = {
- .start = kmemleak_seq_start,
- .next = kmemleak_seq_next,
- .stop = kmemleak_seq_stop,
- .show = kmemleak_seq_show,
-};
-
-static int kmemleak_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &kmemleak_seq_ops);
-}
-
-static int kmemleak_release(struct inode *inode, struct file *file)
-{
- return seq_release(inode, file);
-}
-
-static int dump_str_object_info(const char *str)
-{
- unsigned long flags;
- struct kmemleak_object *object;
- unsigned long addr;
-
- addr= simple_strtoul(str, NULL, 0);
- object = find_and_get_object(addr, 0);
- if (!object) {
- pr_info("Unknown object at 0x%08lx\n", addr);
- return -EINVAL;
- }
-
- spin_lock_irqsave(&object->lock, flags);
- dump_object_info(object);
- spin_unlock_irqrestore(&object->lock, flags);
-
- put_object(object);
- return 0;
-}
-
-/*
- * We use grey instead of black to ensure we can do future scans on the same
- * objects. If we did not do future scans these black objects could
- * potentially contain references to newly allocated objects in the future and
- * we'd end up with false positives.
- */
-static void kmemleak_clear(void)
-{
- struct kmemleak_object *object;
- unsigned long flags;
-
- rcu_read_lock();
- list_for_each_entry_rcu(object, &object_list, object_list) {
- spin_lock_irqsave(&object->lock, flags);
- if ((object->flags & OBJECT_REPORTED) &&
- unreferenced_object(object))
- __paint_it(object, KMEMLEAK_GREY);
- spin_unlock_irqrestore(&object->lock, flags);
- }
- rcu_read_unlock();
-}
-
-/*
- * File write operation to configure kmemleak at run-time. The following
- * commands can be written to the /sys/kernel/debug/kmemleak file:
- * off - disable kmemleak (irreversible)
- * stack=on - enable the task stacks scanning
- * stack=off - disable the tasks stacks scanning
- * scan=on - start the automatic memory scanning thread
- * scan=off - stop the automatic memory scanning thread
- * scan=... - set the automatic memory scanning period in seconds (0 to
- * disable it)
- * scan - trigger a memory scan
- * clear - mark all current reported unreferenced kmemleak objects as
- * grey to ignore printing them
- * dump=... - dump information about the object found at the given address
- */
-static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
- size_t size, loff_t *ppos)
-{
- char buf[64];
- int buf_size;
- int ret;
-
- if (!atomic_read(&kmemleak_enabled))
- return -EBUSY;
-
- buf_size = min(size, (sizeof(buf) - 1));
- if (strncpy_from_user(buf, user_buf, buf_size) < 0)
- return -EFAULT;
- buf[buf_size] = 0;
-
- ret = mutex_lock_interruptible(&scan_mutex);
- if (ret < 0)
- return ret;
-
- if (strncmp(buf, "off", 3) == 0)
- kmemleak_disable();
- else if (strncmp(buf, "stack=on", 8) == 0)
- kmemleak_stack_scan = 1;
- else if (strncmp(buf, "stack=off", 9) == 0)
- kmemleak_stack_scan = 0;
- else if (strncmp(buf, "scan=on", 7) == 0)
- start_scan_thread();
- else if (strncmp(buf, "scan=off", 8) == 0)
- stop_scan_thread();
- else if (strncmp(buf, "scan=", 5) == 0) {
- unsigned long secs;
-
- ret = strict_strtoul(buf + 5, 0, &secs);
- if (ret < 0)
- goto out;
- stop_scan_thread();
- if (secs) {
- jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
- start_scan_thread();
- }
- } else if (strncmp(buf, "scan", 4) == 0)
- kmemleak_scan();
- else if (strncmp(buf, "clear", 5) == 0)
- kmemleak_clear();
- else if (strncmp(buf, "dump=", 5) == 0)
- ret = dump_str_object_info(buf + 5);
- else
- ret = -EINVAL;
-
-out:
- mutex_unlock(&scan_mutex);
- if (ret < 0)
- return ret;
-
- /* ignore the rest of the buffer, only one command at a time */
- *ppos += size;
- return size;
-}
-
-static const struct file_operations kmemleak_fops = {
- .owner = THIS_MODULE,
- .open = kmemleak_open,
- .read = seq_read,
- .write = kmemleak_write,
- .llseek = seq_lseek,
- .release = kmemleak_release,
-};
-
-/*
- * Stop the memory scanning thread and free the kmemleak internal objects if
- * no previous scan thread (otherwise, kmemleak may still have some useful
- * information on memory leaks).
- */
-static void kmemleak_do_cleanup(struct work_struct *work)
-{
- struct kmemleak_object *object;
- bool cleanup = scan_thread == NULL;
-
- mutex_lock(&scan_mutex);
- stop_scan_thread();
-
- /*
- * Once the scan thread has stopped, it is safe to no longer track
- * object freeing. Ordering of the scan thread stopping and the memory
- * accesses below is guaranteed by the kthread_stop() function.
- */
- kmemleak_free_enabled = 0;
-
- if (cleanup) {
- rcu_read_lock();
- list_for_each_entry_rcu(object, &object_list, object_list)
- delete_object_full(object->pointer);
- rcu_read_unlock();
- }
- mutex_unlock(&scan_mutex);
-}
-
-static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
-
-/*
- * Disable kmemleak. No memory allocation/freeing will be traced once this
- * function is called. Disabling kmemleak is an irreversible operation.
- */
-static void kmemleak_disable(void)
-{
- /* atomically check whether it was already invoked */
- if (atomic_cmpxchg(&kmemleak_error, 0, 1))
- return;
-
- /* stop any memory operation tracing */
- atomic_set(&kmemleak_enabled, 0);
-
- /* check whether it is too early for a kernel thread */
- if (atomic_read(&kmemleak_initialized))
- schedule_work(&cleanup_work);
- else
- kmemleak_free_enabled = 0;
-
- pr_info("Kernel memory leak detector disabled\n");
-}
-
-/*
- * Allow boot-time kmemleak disabling (enabled by default).
- */
-static int kmemleak_boot_config(char *str)
-{
- if (!str)
- return -EINVAL;
- if (strcmp(str, "off") == 0)
- kmemleak_disable();
- else if (strcmp(str, "on") == 0)
- kmemleak_skip_disable = 1;
- else
- return -EINVAL;
- return 0;
-}
-early_param("kmemleak", kmemleak_boot_config);
-
-static void __init print_log_trace(struct early_log *log)
-{
- struct stack_trace trace;
-
- trace.nr_entries = log->trace_len;
- trace.entries = log->trace;
-
- pr_notice("Early log backtrace:\n");
- print_stack_trace(&trace, 2);
-}
-
-/*
- * Kmemleak initialization.
- */
-void __init kmemleak_init(void)
-{
- int i;
- unsigned long flags;
-
-#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
- if (!kmemleak_skip_disable) {
- atomic_set(&kmemleak_early_log, 0);
- kmemleak_disable();
- return;
- }
-#endif
-
- jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
- jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
-
- object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
- scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
- INIT_PRIO_TREE_ROOT(&object_tree_root);
-
- if (crt_early_log >= ARRAY_SIZE(early_log))
- pr_warning("Early log buffer exceeded (%d), please increase "
- "DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n", crt_early_log);
-
- /* the kernel is still in UP mode, so disabling the IRQs is enough */
- local_irq_save(flags);
- atomic_set(&kmemleak_early_log, 0);
- if (atomic_read(&kmemleak_error)) {
- local_irq_restore(flags);
- return;
- } else {
- atomic_set(&kmemleak_enabled, 1);
- kmemleak_free_enabled = 1;
- }
- local_irq_restore(flags);
-
- /*
- * This is the point where tracking allocations is safe. Automatic
- * scanning is started during the late initcall. Add the early logged
- * callbacks to the kmemleak infrastructure.
- */
- for (i = 0; i < crt_early_log; i++) {
- struct early_log *log = &early_log[i];
-
- switch (log->op_type) {
- case KMEMLEAK_ALLOC:
- early_alloc(log);
- break;
- case KMEMLEAK_ALLOC_PERCPU:
- early_alloc_percpu(log);
- break;
- case KMEMLEAK_FREE:
- kmemleak_free(log->ptr);
- break;
- case KMEMLEAK_FREE_PART:
- kmemleak_free_part(log->ptr, log->size);
- break;
- case KMEMLEAK_FREE_PERCPU:
- kmemleak_free_percpu(log->ptr);
- break;
- case KMEMLEAK_NOT_LEAK:
- kmemleak_not_leak(log->ptr);
- break;
- case KMEMLEAK_IGNORE:
- kmemleak_ignore(log->ptr);
- break;
- case KMEMLEAK_SCAN_AREA:
- kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
- break;
- case KMEMLEAK_NO_SCAN:
- kmemleak_no_scan(log->ptr);
- break;
- default:
- kmemleak_warn("Unknown early log operation: %d\n",
- log->op_type);
- }
-
- if (atomic_read(&kmemleak_warning)) {
- print_log_trace(log);
- atomic_set(&kmemleak_warning, 0);
- }
- }
-}
-
-/*
- * Late initialization function.
- */
-static int __init kmemleak_late_init(void)
-{
- struct dentry *dentry;
-
- atomic_set(&kmemleak_initialized, 1);
-
- if (atomic_read(&kmemleak_error)) {
- /*
- * Some error occurred and kmemleak was disabled. There is a
- * small chance that kmemleak_disable() was called immediately
- * after setting kmemleak_initialized and we may end up with
- * two clean-up threads but serialized by scan_mutex.
- */
- schedule_work(&cleanup_work);
- return -ENOMEM;
- }
-
- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
- &kmemleak_fops);
- if (!dentry)
- pr_warning("Failed to create the debugfs kmemleak file\n");
- mutex_lock(&scan_mutex);
- start_scan_thread();
- mutex_unlock(&scan_mutex);
-
- pr_info("Kernel memory leak detector initialized\n");
-
- return 0;
-}
-late_initcall(kmemleak_late_init);
diff --git a/ap/os/linux/linux-3.4.x/mm/mmap.c b/ap/os/linux/linux-3.4.x/mm/mmap.c
old mode 100644
new mode 100755
index cb6456d..88d133b
--- a/ap/os/linux/linux-3.4.x/mm/mmap.c
+++ b/ap/os/linux/linux-3.4.x/mm/mmap.c
@@ -39,6 +39,19 @@
#include "internal.h"
+#ifdef CONFIG_SYSVIPC_CROSS_SHM
+#include <../ipc/shm_ctrl.h>
+extern void shm_mmap_pagetable(struct vm_area_struct *vma, struct file *file);
+extern void shm_unmap_page_range(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, unsigned long end);
+#define kenter(FMT, ...) \
+ no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
+#define kleave(FMT, ...) \
+ no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
+#define kdebug(FMT, ...) \
+ no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__)
+#endif
+
#ifndef arch_mmap_check
#define arch_mmap_check(addr, len, flags) (0)
#endif
@@ -1331,9 +1344,9 @@
/*
* Can we just expand an old mapping?
*/
- vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL);
- if (vma)
- goto out;
+ vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL);
+ if (vma)
+ goto out;
/*
* Determine the object being mapped and call the appropriate
@@ -1420,6 +1433,14 @@
mm->locked_vm += (len >> PAGE_SHIFT);
} else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
make_pages_present(addr, addr + len);
+
+#ifdef CONFIG_SYSVIPC_CROSS_SHM
+ /*Get real phy pgae*/
+ if (file && (file->f_flags == SHM_REMOTE_ATTR_YES))
+ {
+ shm_mmap_pagetable(vma, file);
+ }
+#endif
return addr;
unmap_and_free_vma:
@@ -2125,6 +2146,138 @@
return __split_vma(mm, vma, addr, new_below);
}
+#ifdef CONFIG_SYSVIPC_CROSS_SHM
+/*
+ * delete a VMA from its owning mm_struct and address space
+ */
+static void shm_delete_vma_from_mm(struct vm_area_struct *vma)
+{
+ struct address_space *mapping;
+ struct mm_struct *mm = vma->vm_mm;
+
+ mm->map_count--;
+ if (mm->mmap_cache == vma)
+ mm->mmap_cache = NULL;
+
+ /* remove the VMA from the mapping */
+ if (vma->vm_file) {
+ mapping = vma->vm_file->f_mapping;
+
+ mutex_lock(&mapping->i_mmap_mutex);
+ flush_dcache_mmap_lock(mapping);
+ vma_prio_tree_remove(vma, &mapping->i_mmap);
+ flush_dcache_mmap_unlock(mapping);
+ mutex_unlock(&mapping->i_mmap_mutex);
+ }
+
+ /* remove from the MM's tree and list */
+ rb_erase(&vma->vm_rb, &mm->mm_rb);
+
+ if (vma->vm_prev)
+ vma->vm_prev->vm_next = vma->vm_next;
+ else
+ mm->mmap = vma->vm_next;
+
+ if (vma->vm_next)
+ vma->vm_next->vm_prev = vma->vm_prev;
+}
+
+/*
+ * destroy a VMA record
+ */
+static void shm_delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
+{
+ if (vma->vm_ops && vma->vm_ops->close)
+ vma->vm_ops->close(vma);
+ if (vma->vm_file) {
+ fput(vma->vm_file);
+ if (vma->vm_flags & VM_EXECUTABLE)
+ removed_exe_file_vma(mm);
+ }
+ mpol_put(vma_policy(vma));
+ kmem_cache_free(vm_area_cachep, vma);
+}
+
+/*
+ * release a mapping
+ * - the chunk to be unmapped must be backed by a single
+ * VMA, though it need not cover the whole VMA
+ */
+int shm_ctrl_do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+{
+ int ret = 0;
+ struct vm_area_struct *vma;
+ unsigned long end;
+
+ len = PAGE_ALIGN(len);
+ if (len == 0)
+ return -EINVAL;
+
+ end = start + len;
+
+ /* find the first potentially overlapping VMA */
+ vma = find_vma(mm, start);
+ if (!vma) {
+ static int limit = 0;
+ if (limit < 5) {
+ printk(KERN_WARNING
+ "munmap of memory not mmapped by process %d"
+ " (%s): 0x%lx-0x%lx\n",
+ current->pid, current->comm,
+ start, start + len - 1);
+ limit++;
+ }
+ return -EINVAL;
+ }
+
+ /* we're allowed to split an anonymous VMA but not a file-backed one */
+ if (vma->vm_file) {
+ do {
+ if (start > vma->vm_start) {
+ kleave(" = -EINVAL [miss]");
+ return -EINVAL;
+ }
+ if (end == vma->vm_end)
+ goto erase_whole_vma;
+ vma = vma->vm_next;
+ } while (vma);
+ kleave(" = -EINVAL [split file]");
+ return -EINVAL;
+ } else {
+ /* the chunk must be a subset of the VMA found */
+ if (start == vma->vm_start && end == vma->vm_end)
+ goto erase_whole_vma;
+ if (start < vma->vm_start || end > vma->vm_end) {
+ kleave(" = -EINVAL [superset]");
+ return -EINVAL;
+ }
+ if (start & ~PAGE_MASK) {
+ kleave(" = -EINVAL [unaligned start]");
+ return -EINVAL;
+ }
+ if (end != vma->vm_end && end & ~PAGE_MASK) {
+ kleave(" = -EINVAL [unaligned split]");
+ return -EINVAL;
+ }
+ if (start != vma->vm_start && end != vma->vm_end) {
+ ret = split_vma(mm, vma, start, 1);
+ if (ret < 0) {
+ kleave(" = %d [split]", ret);
+ return ret;
+ }
+ }
+ return ret;
+ }
+
+erase_whole_vma:
+ shm_unmap_page_range(mm, vma, start, end);
+ shm_delete_vma_from_mm(vma);
+ shm_delete_vma(mm, vma);
+ return 0;
+}
+EXPORT_SYMBOL(shm_ctrl_do_munmap);
+#endif
+
/* Munmap is split into 2 main parts -- this part which finds
* what needs doing, and the areas themselves, which do the
* work. This now handles partial unmappings.
@@ -2185,6 +2338,13 @@
return error;
}
vma = prev? prev->vm_next: mm->mmap;
+
+#ifdef CONFIG_SYSVIPC_CROSS_SHM
+ if (vma->vm_file && (vma->vm_file->f_flags == SHM_REMOTE_ATTR_YES)) {
+ shm_ctrl_do_munmap(mm, start, len);
+ return 0;
+ }
+#endif
/*
* unlock any mlock()ed ranges before detaching vmas
@@ -2361,6 +2521,22 @@
struct vm_area_struct *vma;
unsigned long nr_accounted = 0;
+#ifdef CONFIG_SYSVIPC_CROSS_SHM
+ struct vm_area_struct *vma_shm;
+
+ vma_shm = mm->mmap;
+ while (vma_shm) {
+ if ((vma_shm->vm_file) &&
+ (vma_shm->vm_file->f_flags == SHM_REMOTE_ATTR_YES)) {
+ vma = vma_shm->vm_next;
+ shm_ctrl_do_munmap(mm, vma_shm->vm_start, (vma_shm->vm_end - vma_shm->vm_start));
+ vma_shm = vma;
+ continue;
+ }
+ else
+ vma_shm = vma_shm->vm_next;
+ }
+#endif
/* mm's last user has gone, and its about to be pulled down */
mmu_notifier_release(mm);
diff --git a/ap/os/linux/linux-3.4.x/mm/slob.c b/ap/os/linux/linux-3.4.x/mm/slob.c
index 5e6b4d7..f26c8d4 100755
--- a/ap/os/linux/linux-3.4.x/mm/slob.c
+++ b/ap/os/linux/linux-3.4.x/mm/slob.c
@@ -898,8 +898,16 @@
}else
panic("mem out!!");
slob_free_general(mem, sp);
- } else
+ } else {
+ struct page *page;
+ unsigned int order;
+ page = &sp->page;
+ order = get_order(page->private);
+ raw_spin_lock_irqsave(&g_slob_kmalloc_spin_lock, flags);
+ g_slob_kmalloc_pages -= (1 << order);
+ raw_spin_unlock_irqrestore(&g_slob_kmalloc_spin_lock, flags);
put_page(&sp->page);
+ }
#else
sp = slob_page(block);
if (is_slob_page(sp)) {
diff --git a/ap/os/linux/linux-3.4.x/net/core/ethtool.c b/ap/os/linux/linux-3.4.x/net/core/ethtool.c
index 7becb3f..e7680c9 100644
--- a/ap/os/linux/linux-3.4.x/net/core/ethtool.c
+++ b/ap/os/linux/linux-3.4.x/net/core/ethtool.c
@@ -691,10 +691,14 @@
static int ethtool_get_wol(struct net_device *dev, char __user *useraddr)
{
- struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+ struct ethtool_wolinfo wol;
if (!dev->ethtool_ops->get_wol)
return -EOPNOTSUPP;
+
+ //CVE-2014-9900
+ memset(&wol, 0, sizeof(struct ethtool_wolinfo));
+ wol.cmd = ETHTOOL_GWOL;
dev->ethtool_ops->get_wol(dev, &wol);
diff --git a/ap/os/linux/linux-3.4.x/net/core/fastproc/fast_common.c b/ap/os/linux/linux-3.4.x/net/core/fastproc/fast_common.c
index 0f45d4a..d3f740d 100755
--- a/ap/os/linux/linux-3.4.x/net/core/fastproc/fast_common.c
+++ b/ap/os/linux/linux-3.4.x/net/core/fastproc/fast_common.c
@@ -1809,7 +1809,7 @@
return 0;
}
- if (skb->nfct_reasm) {
+ if (skb->nfct_reasm && printk_ratelimit()) {
printk("fast6_fw reasm \n");
return 0;
}
diff --git a/ap/os/linux/linux-3.4.x/net/core/net-sysfs.c b/ap/os/linux/linux-3.4.x/net/core/net-sysfs.c
index 8ca2580..eb6e8eb 100644
--- a/ap/os/linux/linux-3.4.x/net/core/net-sysfs.c
+++ b/ap/os/linux/linux-3.4.x/net/core/net-sysfs.c
@@ -765,9 +765,9 @@
kobject_put(kobj);
return error;
}
+ dev_hold(queue->dev); //CVE-2019-20811
kobject_uevent(kobj, KOBJ_ADD);
- dev_hold(queue->dev);
return error;
}
@@ -1250,6 +1250,8 @@
if (error)
goto exit;
+ dev_hold(queue->dev); //CVE-2019-20811
+
#ifdef CONFIG_BQL
error = sysfs_create_group(kobj, &dql_group);
if (error)
@@ -1265,7 +1267,6 @@
#else
kobject_uevent(kobj, KOBJ_ADD);
#endif
- dev_hold(queue->dev);
return 0;
exit:
diff --git a/ap/os/linux/linux-3.4.x/net/core/net_namespace.c b/ap/os/linux/linux-3.4.x/net/core/net_namespace.c
index dd00b71..57e1b68 100644
--- a/ap/os/linux/linux-3.4.x/net/core/net_namespace.c
+++ b/ap/os/linux/linux-3.4.x/net/core/net_namespace.c
@@ -152,6 +152,7 @@
atomic_set(&net->count, 1);
atomic_set(&net->passive, 1);
+ get_random_bytes(&net->hash_mix, sizeof(u32));//BDSA-2019-2065
net->dev_base_seq = 1;
#ifdef NETNS_REFCNT_DEBUG
diff --git a/ap/os/linux/linux-3.4.x/net/core/sock.c b/ap/os/linux/linux-3.4.x/net/core/sock.c
index 4eba6a0..3204c53 100755
--- a/ap/os/linux/linux-3.4.x/net/core/sock.c
+++ b/ap/os/linux/linux-3.4.x/net/core/sock.c
@@ -583,23 +583,15 @@
break;
case SO_SNDBUF:
/* Don't error on this BSD doesn't and if you think
- about it this is right. Otherwise apps have to
- play 'guess the biggest size' games. RCVBUF/SNDBUF
- are treated in BSD as hints */
-
- if (val > sysctl_wmem_max)
- val = sysctl_wmem_max;
+ * about it this is right. Otherwise apps have to
+ * play 'guess the biggest size' games. RCVBUF/SNDBUF
+ * are treated in BSD as hints
+ */
+ val = min_t(u32, val, sysctl_wmem_max); //CVE-2012-6704
set_sndbuf:
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
- if ((val * 2) < SOCK_MIN_SNDBUF)
- sk->sk_sndbuf = SOCK_MIN_SNDBUF;
- else
- sk->sk_sndbuf = val * 2;
-
- /*
- * Wake up sending tasks if we
- * upped the value.
- */
+ sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF); //CVE-2012-6704
+ /* Wake up sending tasks if we upped the value. */
sk->sk_write_space(sk);
break;
@@ -612,12 +604,11 @@
case SO_RCVBUF:
/* Don't error on this BSD doesn't and if you think
- about it this is right. Otherwise apps have to
- play 'guess the biggest size' games. RCVBUF/SNDBUF
- are treated in BSD as hints */
-
- if (val > sysctl_rmem_max)
- val = sysctl_rmem_max;
+ * about it this is right. Otherwise apps have to
+ * play 'guess the biggest size' games. RCVBUF/SNDBUF
+ * are treated in BSD as hints
+ */
+ val = min_t(u32, val, sysctl_rmem_max); //CVE-2012-6704
set_rcvbuf:
sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
/*
@@ -635,10 +626,7 @@
* returning the value we actually used in getsockopt
* is the most desirable behavior.
*/
- if ((val * 2) < SOCK_MIN_RCVBUF)
- sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
- else
- sk->sk_rcvbuf = val * 2;
+ sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF); //CVE-2012-6704
break;
case SO_RCVBUFFORCE:
@@ -987,7 +975,7 @@
break;
case SO_PASSCRED:
- v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
+ v.val = !!test_bit(SOCK_PASSCRED, &sock->flags); //CVE-2012-6704
break;
case SO_PEERCRED:
@@ -1023,7 +1011,7 @@
break;
case SO_PASSSEC:
- v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
+ v.val = !!test_bit(SOCK_PASSSEC, &sock->flags); //CVE-2012-6704
break;
case SO_PEERSEC:
@@ -1323,6 +1311,8 @@
sock_copy(newsk, sk);
+ newsk->sk_prot_creator = sk->sk_prot; //BDSA-2017-3696
+
/* SANITY */
get_net(sock_net(newsk));
sk_node_init(&newsk->sk_node);
diff --git a/ap/os/linux/linux-3.4.x/net/core/speed_pool_dl.c b/ap/os/linux/linux-3.4.x/net/core/speed_pool_dl.c
index d761b6a..d20418c 100755
--- a/ap/os/linux/linux-3.4.x/net/core/speed_pool_dl.c
+++ b/ap/os/linux/linux-3.4.x/net/core/speed_pool_dl.c
@@ -15,10 +15,8 @@
typedef unsigned long skb_addr_t;
#define SKB_POOL_AUTO_EXTEND
-#define SKB_SYS_POOL_LARGE_NR (32U)
-#define SKB_SYS_POOL_NR ( SKB_SYS_POOL_LARGE_NR)
-#define SKB_SYS_POOL_BATCH_NR (100U)
-#define SKB_SYS_POOL_MAX_NR (1800U)
+#define SKB_SYS_POOL_NR (32U)
+#define SKB_SYS_POOL_BATCH_NR (100U)
#define SKB_SYS_POOL_0_SIZE (256U) /*skb*/
@@ -74,9 +72,9 @@
#define SKB_SYS_POOL_8_PATCH_NR (1)
#define SKB_SYS_POOL_8_RESERVE_NR (0)
-#define test_bit(nr, val) ((val) & (1UL<<(nr)))
-#define set_bit(nr, val) ((val) |= (1UL<<(nr)))
-#define clear_bit(nr, val) ((val) &= ~(1UL<<(nr)))
+#define pool_test_bit(nr, val) ((val) & (1UL<<(nr)))
+#define pool_set_bit(nr, val) ((val) |= (1UL<<(nr)))
+#define pool_clear_bit(nr, val) ((val) &= ~(1UL<<(nr)))
#define array_start(a) (&(a)[0])
#define array_nr(a) (sizeof(a) / sizeof((a)[0]))
@@ -134,14 +132,14 @@
struct skb_pool_node_impl_t
{
skb_pool_node_impl_t *free_next;
-#if _USE_VEHICLE_DC
+#ifdef _USE_VEHICLE_DC
u32 padding[15];//for cacheline
#endif
#ifdef SKB_DBG_POOL
struct list_head alloc_node;
const char *file;
- unsigned long tick;
- u32 line;
+ unsigned long tick;
+ u32 line;
skb_addr_t *magic_top;
skb_addr_t magic_bottom[SKB_POOL_MAGIC_NR];
#endif
@@ -150,11 +148,11 @@
typedef struct
{
skb_pool_node_impl_t *free_head;
- size_t obj_size;
- skb_count_t obj_cur_nr;
+ size_t obj_size;
+ skb_count_t obj_cur_nr; //µ±Ç°¿ÉÓÃ
//#ifdef SKB_POOL_AUTO_EXTEND
- skb_count_t obj_nr;
+ skb_count_t obj_nr; //×ܹ²
// skb_count_t obj_max_nr;
skb_count_t obj_batch_nr;
skb_count_t obj_reserve_nr;
@@ -163,15 +161,12 @@
#ifdef SKB_DBG_POOL
struct list_head alloc_head;
- skb_count_t obj_cnt;
- skb_count_t obj_max_used_cnt;
+ skb_count_t obj_used_cnt; //µ±Ç°ÒÑʹÓÃ(obj_nr - obj_cur_nr)
+ skb_count_t obj_max_used_cnt; //×î´óʹÓÃ(·åÖµ)
#endif
#ifdef SKB_TRACE
- size_t obj_real_size[2048];
- size_t obj_real_size_cnt;
- skb_count_t obj_used_cnt;
- skb_count_t obj_max_used_cnt;
+ size_t obj_real_size[2048];
#endif
} skb_pool_inner_t;
@@ -205,9 +200,8 @@
static unsigned long skb_sys_pool_bitmap;
static skb_pool_inner_t *skb_sys_pool_inner[SKB_SYS_POOL_NR];
-static skb_count_t skb_sys_pool_large_nr;
-static skb_count_t skb_sys_pool_large_dl_nr;
-static size_t skb_sys_pool_large_size[SKB_SYS_POOL_LARGE_NR];
+static skb_count_t skb_sys_pool_nr;
+static size_t skb_sys_pool_sizes[SKB_SYS_POOL_NR];
#if (SKB_SYS_POOL_0_NR > 0)
static u8 skb_sys_pool_0[skb_pool_size(SKB_SYS_POOL_0_SIZE, SKB_SYS_POOL_0_NR)] skb_align_data;
@@ -245,107 +239,15 @@
static u8 skb_sys_pool_8[skb_pool_size(SKB_SYS_POOL_8_SIZE, SKB_SYS_POOL_8_NR)] skb_align_data;
#endif
-#if (SKB_SYS_POOL_9_NR > 0)
- static u8 skb_sys_pool_9[skb_pool_size(SKB_SYS_POOL_9_SIZE, SKB_SYS_POOL_9_NR)] skb_align_data;
-#endif
-
-#if (SKB_SYS_POOL_10_NR > 0)
- static u8 skb_sys_pool_10[skb_pool_size(SKB_SYS_POOL_10_SIZE, SKB_SYS_POOL_10_NR)] skb_align_data;
-#endif
-
-#if (SKB_SYS_POOL_11_NR > 0)
- static u8 skb_sys_pool_11[skb_pool_size(SKB_SYS_POOL_11_SIZE, SKB_SYS_POOL_11_NR)] skb_align_data;
-#endif
-
-#if (SKB_SYS_POOL_12_NR > 0)
- static u8 skb_sys_pool_12[skb_pool_size(SKB_SYS_POOL_12_SIZE, SKB_SYS_POOL_12_NR)] skb_align_data;
-#endif
-
-#if (SKB_SYS_POOL_13_NR > 0)
- static u8 skb_sys_pool_13[skb_pool_size(SKB_SYS_POOL_13_SIZE, SKB_SYS_POOL_13_NR)] skb_align_data;
-#endif
-
-#if (SKB_SYS_POOL_14_NR > 0)
- static u8 skb_sys_pool_14[skb_pool_size(SKB_SYS_POOL_14_SIZE, SKB_SYS_POOL_14_NR)] skb_align_data;
-#endif
-
-#if (SKB_SYS_POOL_15_NR > 0)
- static u8 skb_sys_pool_15[skb_pool_size(SKB_SYS_POOL_15_SIZE, SKB_SYS_POOL_15_NR)] skb_align_data;
-#endif
-
-#if (SKB_SYS_POOL_16_NR > 0)
- static u8 skb_sys_pool_16[skb_pool_size(SKB_SYS_POOL_16_SIZE, SKB_SYS_POOL_16_NR)] skb_align_data;
-#endif
-
-#if (SKB_SYS_POOL_17_NR > 0)
- static u8 skb_sys_pool_17[skb_pool_size(SKB_SYS_POOL_17_SIZE, SKB_SYS_POOL_17_NR)] skb_align_data;
-#endif
-
-#if (SKB_SYS_POOL_18_NR > 0)
- static u8 skb_sys_pool_18[skb_pool_size(SKB_SYS_POOL_18_SIZE, SKB_SYS_POOL_18_NR)] skb_align_data;
-#endif
-
-#if (SKB_SYS_POOL_19_NR > 0)
- static u8 skb_sys_pool_19[skb_pool_size(SKB_SYS_POOL_19_SIZE, SKB_SYS_POOL_19_NR)] skb_align_data;
-#endif
-
-#if (SKB_SYS_POOL_20_NR > 0)
- static u8 skb_sys_pool_20[skb_pool_size(SKB_SYS_POOL_20_SIZE, SKB_SYS_POOL_20_NR)] skb_align_data;
-#endif
-
-#if (SKB_SYS_POOL_21_NR > 0)
- static u8 skb_sys_pool_21[skb_pool_size(SKB_SYS_POOL_21_SIZE, SKB_SYS_POOL_21_NR)] skb_align_data;
-#endif
-
-#if (SKB_SYS_POOL_22_NR > 0)
- static u8 skb_sys_pool_22[skb_pool_size(SKB_SYS_POOL_22_SIZE, SKB_SYS_POOL_22_NR)] skb_align_data;
-#endif
-
-#if (SKB_SYS_POOL_23_NR > 0)
- static u8 skb_sys_pool_23[skb_pool_size(SKB_SYS_POOL_23_SIZE, SKB_SYS_POOL_23_NR)] skb_align_data;
-#endif
-
-#if (SKB_SYS_POOL_24_NR > 0)
- static u8 skb_sys_pool_24[skb_pool_size(SKB_SYS_POOL_24_SIZE, SKB_SYS_POOL_24_NR)] skb_align_data;
-#endif
-
-#if (SKB_SYS_POOL_25_NR > 0)
- static u8 skb_sys_pool_25[skb_pool_size(SKB_SYS_POOL_25_SIZE, SKB_SYS_POOL_25_NR)] skb_align_data;
-#endif
-
-#if (SKB_SYS_POOL_26_NR > 0)
- static u8 skb_sys_pool_26[skb_pool_size(SKB_SYS_POOL_26_SIZE, SKB_SYS_POOL_26_NR)] skb_align_data;
-#endif
-
-#if (SKB_SYS_POOL_27_NR > 0)
- static u8 skb_sys_pool_27[skb_pool_size(SKB_SYS_POOL_27_SIZE, SKB_SYS_POOL_27_NR)] skb_align_data;
-#endif
-
-#if (SKB_SYS_POOL_28_NR > 0)
- static u8 skb_sys_pool_28[skb_pool_size(SKB_SYS_POOL_28_SIZE, SKB_SYS_POOL_28_NR)] skb_align_data;
-#endif
-
-#if (SKB_SYS_POOL_29_NR > 0)
- static u8 skb_sys_pool_29[skb_pool_size(SKB_SYS_POOL_29_SIZE, SKB_SYS_POOL_29_NR)] skb_align_data;
-#endif
-
-#if (SKB_SYS_POOL_30_NR > 0)
- static u8 skb_sys_pool_30[skb_pool_size(SKB_SYS_POOL_30_SIZE, SKB_SYS_POOL_30_NR)] skb_align_data;
-#endif
-
-#if (SKB_SYS_POOL_31_NR > 0)
- static u8 skb_sys_pool_31[skb_pool_size(SKB_SYS_POOL_31_SIZE, SKB_SYS_POOL_31_NR)] skb_align_data;
-#endif
-
- static struct skb_pool_config_t skb_sys_pool_config[] =
+static struct skb_pool_config_t skb_sys_pool_config[] =
{
#if (SKB_SYS_POOL_0_NR > 0)
{
(void *)array_start(skb_sys_pool_0), /* base address */
SKB_SYS_POOL_0_SIZE, /* object size */
SKB_SYS_POOL_0_NR, /* object number */
- SKB_SYS_POOL_0_PATCH_NR,
- SKB_SYS_POOL_0_RESERVE_NR,
+ SKB_SYS_POOL_0_PATCH_NR,
+ SKB_SYS_POOL_0_RESERVE_NR,
},
#endif
@@ -354,8 +256,8 @@
(void *)array_start(skb_sys_pool_1), /* base address */
SKB_SYS_POOL_1_SIZE, /* object size */
SKB_SYS_POOL_1_NR, /* object number */
- SKB_SYS_POOL_1_PATCH_NR,
- SKB_SYS_POOL_1_RESERVE_NR,
+ SKB_SYS_POOL_1_PATCH_NR,
+ SKB_SYS_POOL_1_RESERVE_NR,
},
#endif
@@ -364,8 +266,8 @@
(void *)array_start(skb_sys_pool_2), /* base address */
SKB_SYS_POOL_2_SIZE, /* object size */
SKB_SYS_POOL_2_NR, /* object number */
- SKB_SYS_POOL_2_PATCH_NR,
- SKB_SYS_POOL_2_RESERVE_NR,
+ SKB_SYS_POOL_2_PATCH_NR,
+ SKB_SYS_POOL_2_RESERVE_NR,
},
#endif
@@ -384,8 +286,8 @@
(void *)array_start(skb_sys_pool_4), /* base address */
SKB_SYS_POOL_4_SIZE, /* object size */
SKB_SYS_POOL_4_NR, /* object number */
- SKB_SYS_POOL_4_PATCH_NR,
- SKB_SYS_POOL_4_RESERVE_NR,
+ SKB_SYS_POOL_4_PATCH_NR,
+ SKB_SYS_POOL_4_RESERVE_NR,
},
#endif
@@ -414,8 +316,8 @@
(void *)array_start(skb_sys_pool_7), /* base address */
SKB_SYS_POOL_7_SIZE, /* object size */
SKB_SYS_POOL_7_NR, /* object number */
- SKB_SYS_POOL_7_PATCH_NR,
- SKB_SYS_POOL_7_RESERVE_NR,
+ SKB_SYS_POOL_7_PATCH_NR,
+ SKB_SYS_POOL_7_RESERVE_NR,
},
#endif
@@ -424,240 +326,8 @@
(void *)array_start(skb_sys_pool_8), /* base address */
SKB_SYS_POOL_8_SIZE, /* object size */
SKB_SYS_POOL_8_NR, /* object number */
- SKB_SYS_POOL_8_PATCH_NR,
- SKB_SYS_POOL_8_RESERVE_NR,
- },
-#endif
-
-#if (SKB_SYS_POOL_9_NR > 0)
- {
- (void *)array_start(skb_sys_pool_9), /* base address */
- SKB_SYS_POOL_9_SIZE, /* object size */
- SKB_SYS_POOL_9_NR, /* object number */
- SKB_SYS_POOL_9_PATCH_NR,
- SKB_SYS_POOL_9_RESERVE_NR,
- },
-#endif
-
-#if (SKB_SYS_POOL_10_NR > 0)
- {
- (void *)array_start(skb_sys_pool_10), /* base address */
- SKB_SYS_POOL_10_SIZE, /* object size */
- SKB_SYS_POOL_10_NR, /* object number */
- SKB_SYS_POOL_10_PATCH_NR,
- SKB_SYS_POOL_10_RESERVE_NR,
- },
-#endif
-
-#if (SKB_SYS_POOL_11_NR > 0)
- {
- (void *)array_start(skb_sys_pool_11), /* base address */
- SKB_SYS_POOL_11_SIZE, /* object size */
- SKB_SYS_POOL_11_NR, /* object number */
- SKB_SYS_POOL_11_PATCH_NR,
- SKB_SYS_POOL_11_RESERVE_NR,
- },
-#endif
-
-#if (SKB_SYS_POOL_12_NR > 0)
- {
- (void *)array_start(skb_sys_pool_12), /* base address */
- SKB_SYS_POOL_12_SIZE, /* object size */
- SKB_SYS_POOL_12_NR, /* object number */
- SKB_SYS_POOL_12_PATCH_NR,
- SKB_SYS_POOL_12_RESERVE_NR,
- },
-#endif
-
-#if (SKB_SYS_POOL_13_NR > 0)
- {
- (void *)array_start(skb_sys_pool_13), /* base address */
- SKB_SYS_POOL_13_SIZE, /* object size */
- SKB_SYS_POOL_13_NR, /* object number */
- SKB_SYS_POOL_13_PATCH_NR,
- SKB_SYS_POOL_13_RESERVE_NR,
- },
-#endif
-
-#if (SKB_SYS_POOL_14_NR > 0)
- {
- (void *)array_start(skb_sys_pool_14), /* base address */
- SKB_SYS_POOL_14_SIZE, /* object size */
- SKB_SYS_POOL_14_NR, /* object number */
- SKB_SYS_POOL_14_PATCH_NR,
- SKB_SYS_POOL_14_RESERVE_NR,
- },
-#endif
-
-#if (SKB_SYS_POOL_15_NR > 0)
- {
- (void *)array_start(skb_sys_pool_15), /* base address */
- SKB_SYS_POOL_15_SIZE, /* object size */
- SKB_SYS_POOL_15_NR, /* object number */
- SKB_SYS_POOL_15_PATCH_NR,
- SKB_SYS_POOL_15_RESERVE_NR,
- },
-#endif
-
-#if (SKB_SYS_POOL_16_NR > 0)
- {
- (void *)array_start(skb_sys_pool_16), /* base address */
- SKB_SYS_POOL_16_SIZE, /* object size */
- SKB_SYS_POOL_16_NR, /* object number */
- SKB_SYS_POOL_16_PATCH_NR,
- SKB_SYS_POOL_16_RESERVE_NR,
- },
-#endif
-
-#if (SKB_SYS_POOL_17_NR > 0)
- {
- (void *)array_start(skb_sys_pool_17), /* base address */
- SKB_SYS_POOL_17_SIZE, /* object size */
- SKB_SYS_POOL_17_NR, /* object number */
- SKB_SYS_POOL_17_PATCH_NR,
- SKB_SYS_POOL_17_RESERVE_NR,
- },
-#endif
-
-#if (SKB_SYS_POOL_18_NR > 0)
- {
- (void *)array_start(skb_sys_pool_18), /* base address */
- SKB_SYS_POOL_18_SIZE, /* object size */
- SKB_SYS_POOL_18_NR, /* object number */
- SKB_SYS_POOL_18_PATCH_NR,
- SKB_SYS_POOL_18_RESERVE_NR,
-
- },
-#endif
-
-#if (SKB_SYS_POOL_19_NR > 0)
- {
- (void *)array_start(skb_sys_pool_19), /* base address */
- SKB_SYS_POOL_19_SIZE, /* object size */
- SKB_SYS_POOL_19_NR, /* object number */
- SKB_SYS_POOL_19_PATCH_NR,
- SKB_SYS_POOL_19_RESERVE_NR,
-
- },
-#endif
-
-#if (SKB_SYS_POOL_20_NR > 0)
- {
- (void *)array_start(skb_sys_pool_20), /* base address */
- SKB_SYS_POOL_20_SIZE, /* object size */
- SKB_SYS_POOL_20_NR, /* object number */
- SKB_SYS_POOL_20_PATCH_NR,
- SKB_SYS_POOL_20_RESERVE_NR,
- },
-#endif
-
-#if (SKB_SYS_POOL_21_NR > 0)
- {
- (void *)array_start(skb_sys_pool_21), /* base address */
- SKB_SYS_POOL_21_SIZE, /* object size */
- SKB_SYS_POOL_21_NR, /* object number */
- SKB_SYS_POOL_21_PATCH_NR,
- SKB_SYS_POOL_21_RESERVE_NR,
- },
-#endif
-
-#if (SKB_SYS_POOL_22_NR > 0)
- {
- (void *)array_start(skb_sys_pool_22), /* base address */
- SKB_SYS_POOL_22_SIZE, /* object size */
- SKB_SYS_POOL_22_NR, /* object number */
- SKB_SYS_POOL_22_PATCH_NR,
- SKB_SYS_POOL_22_RESERVE_NR,
- },
-#endif
-
-#if (SKB_SYS_POOL_23_NR > 0)
- {
- (void *)array_start(skb_sys_pool_23), /* base address */
- SKB_SYS_POOL_23_SIZE, /* object size */
- SKB_SYS_POOL_23_NR, /* object number */
- SKB_SYS_POOL_23_PATCH_NR,
- SKB_SYS_POOL_23_RESERVE_NR,
- },
-#endif
-
-#if (SKB_SYS_POOL_24_NR > 0)
- {
- (void *)array_start(skb_sys_pool_24), /* base address */
- SKB_SYS_POOL_24_SIZE, /* object size */
- SKB_SYS_POOL_24_NR, /* object number */
- SKB_SYS_POOL_24_PATCH_NR,
- SKB_SYS_POOL_24_RESERVE_NR,
- },
-#endif
-
-#if (SKB_SYS_POOL_25_NR > 0)
- {
- (void *)array_start(skb_sys_pool_25), /* base address */
- SKB_SYS_POOL_25_SIZE, /* object size */
- SKB_SYS_POOL_25_NR, /* object number */
- SKB_SYS_POOL_25_PATCH_NR,
- SKB_SYS_POOL_25_RESERVE_NR,
- },
-#endif
-
-#if (SKB_SYS_POOL_26_NR > 0)
- {
- (void *)array_start(skb_sys_pool_26), /* base address */
- SKB_SYS_POOL_26_SIZE, /* object size */
- SKB_SYS_POOL_26_NR, /* object number */
- SKB_SYS_POOL_26_PATCH_NR,
- SKB_SYS_POOL_26_RESERVE_NR,
- },
-#endif
-
-#if (SKB_SYS_POOL_27_NR > 0)
- {
- (void *)array_start(skb_sys_pool_27), /* base address */
- SKB_SYS_POOL_27_SIZE, /* object size */
- SKB_SYS_POOL_27_NR, /* object number */
- SKB_SYS_POOL_27_PATCH_NR,
- SKB_SYS_POOL_27_RESERVE_NR,
- },
-#endif
-
-#if (SKB_SYS_POOL_28_NR > 0)
- {
- (void *)array_start(skb_sys_pool_28), /* base address */
- SKB_SYS_POOL_28_SIZE, /* object size */
- SKB_SYS_POOL_28_NR, /* object number */
- SKB_SYS_POOL_28_PATCH_NR,
- SKB_SYS_POOL_28_RESERVE_NR,
- },
-#endif
-
-#if (SKB_SYS_POOL_29_NR > 0)
- {
- (void *)array_start(skb_sys_pool_29), /* base address */
- SKB_SYS_POOL_29_SIZE, /* object size */
- SKB_SYS_POOL_29_NR, /* object number */
- SKB_SYS_POOL_29_PATCH_NR,
- SKB_SYS_POOL_29_RESERVE_NR,
- },
-#endif
-
-#if (SKB_SYS_POOL_30_NR > 0)
- {
- (void *)array_start(skb_sys_pool_30), /* base address */
- SKB_SYS_POOL_30_SIZE, /* object size */
- SKB_SYS_POOL_30_NR, /* object number */
- SKB_SYS_POOL_30_PATCH_NR,
- SKB_SYS_POOL_30_RESERVE_NR,
- },
-#endif
-
-#if (SKB_SYS_POOL_31_NR > 0)
- {
- (void *)array_start(skb_sys_pool_31), /* base address */
- SKB_SYS_POOL_31_SIZE, /* object size */
- SKB_SYS_POOL_31_NR, /* object number */
- SKB_SYS_POOL_31_PATCH_NR,
- SKB_SYS_POOL_31_RESERVE_NR,
+ SKB_SYS_POOL_8_PATCH_NR,
+ SKB_SYS_POOL_8_RESERVE_NR,
},
#endif
};
@@ -665,15 +335,6 @@
/*******************************************************************************
* È«¾Öº¯ÊýÉùÃ÷ *
*******************************************************************************/
-skb_pool_inner_t *skb_pool_create_inner(
- skb_pool_inner_t *inner,
- size_t obj_size,
- skb_count_t obj_nr,
- skb_count_t obj_max_nr,
- skb_count_t obj_batch_nr,
- skb_count_t obj_reserve_nr);
-
-
#ifdef SKB_DBG_POOL
int skb_pool_magic_check(skb_pool_node_impl_t *node)
{
@@ -705,49 +366,50 @@
#else
static inline int skb_pool_magic_check(skb_pool_node_impl_t *node) { return 0; }
-static inline void skb_pool_magic_init(skb_pool_node_impl_t *node){}
+static inline void skb_pool_magic_init(skb_pool_node_impl_t *node) {}
#endif
+
static inline int is_memory_enough(void)
{
return (global_page_state(NR_FREE_PAGES) > wm_min_pages);
}
-int skb_pool_add_inner(
- skb_pool_inner_t *inner,
- unsigned int obj_nr)
+int skb_pool_add_inner(skb_pool_inner_t *inner, unsigned int obj_nr)
{
- u32 obj_size;
+ u32 alloc_size;
skb_count_t cnt;
skb_pool_node_impl_t *node = NULL;
- BUG_ON( inner == NULL || obj_nr <= 0);
-//#ifdef SKB_POOL_AUTO_EXTEND
+
+ BUG_ON(inner == NULL || obj_nr == 0);
+
inner->obj_cur_nr += obj_nr;
inner->obj_nr += obj_nr;
-//#endif
- obj_size = inner->obj_size;
+ alloc_size = skb_pool_node_size(inner->obj_size);
// ÉêÇëǰÏÈÅжÏÊ£ÓàÄÚ´æÊÇ·ñ³ä×ã
- if(is_memory_enough())
- node = ( skb_pool_node_impl_t *)kmalloc(skb_pool_node_size(obj_size), GFP_ATOMIC);
- if (node == NULL){
+ if (is_memory_enough())
+ node = (skb_pool_node_impl_t *)kmalloc(alloc_size, GFP_ATOMIC);
+
+ if (node == NULL)
+ {
inner->obj_cur_nr -= obj_nr;
inner->obj_nr -= obj_nr;
return -1;
}
inner->free_head = node;
#ifdef SKB_DBG_POOL
- node->magic_top = skb_pool_magic_top(node, obj_size);
+ node->magic_top = skb_pool_magic_top(node, inner->obj_size);
skb_pool_magic_init(node);
#endif
- for (cnt = 0x01, obj_size = inner->obj_size; cnt < obj_nr; cnt++)
+ for (cnt = 1; cnt < obj_nr; cnt++)
{
// ÉêÇëǰÏÈÅжÏÊ£ÓàÄÚ´æÊÇ·ñ³ä×ã
- if(is_memory_enough())
- node->free_next =( skb_pool_node_impl_t *)kmalloc(skb_pool_node_size(obj_size), GFP_ATOMIC);
+ if (is_memory_enough())
+ node->free_next = (skb_pool_node_impl_t *)kmalloc(alloc_size, GFP_ATOMIC);
else
- node->free_next = NULL;/*no mem*/
- if ( node->free_next == NULL)/*no mem*/
+ node->free_next = NULL; /* no mem */
+ if (node->free_next == NULL)
{
inner->obj_nr -= obj_nr;
inner->obj_nr += cnt;
@@ -757,84 +419,64 @@
node = node->free_next;
#ifdef SKB_DBG_POOL
- node->magic_top = skb_pool_magic_top(node, obj_size);
+ node->magic_top = skb_pool_magic_top(node, inner->obj_size);
skb_pool_magic_init(node);
#endif
}
node->free_next = NULL;
return 0;
-
}
-static void skb_pool_delete_inner(skb_pool_inner_t *inner,unsigned int type)
+static void skb_pool_delete_inner(skb_pool_inner_t *inner, unsigned int type)
{
skb_pool_node_impl_t *node;
skb_pool_node_impl_t *next_free;
BUG_ON(inner == NULL);
node = inner->free_head;
- switch(type){
- case SKB_POOL_RESERVE:
- while (inner->obj_cur_nr > inner->obj_reserve_nr)
- {
- next_free = node->free_next;
-#ifdef SKB_TRACE
- inner->obj_used_cnt--;
-#endif
-#ifdef SKB_DBG_POOL
- list_del(&node->alloc_node);
-#endif
- kfree(node);
- inner->obj_nr--;
- inner->obj_cur_nr--;
- node = next_free;
- }
- if(node != NULL){
- inner->free_head = node;
- }else{
- inner->free_head = NULL;
-#ifdef SKB_DBG_POOL
- INIT_LIST_HEAD(&inner->alloc_head);
- inner->obj_cnt = 0x00;
- inner->obj_max_used_cnt = 0x00;
-#endif
- }
- case SKB_POOL_NORESERVE:
- inner->obj_nr -= inner->obj_cur_nr;
- while (node != NULL)
- {
- next_free = node->free_next;
- kfree(node);
- inner->obj_cur_nr--;
- node = next_free;
- }
- inner->free_head = NULL;
-#ifdef SKB_DBG_POOL
- INIT_LIST_HEAD(&inner->alloc_head);
- inner->obj_cnt = 0x00;
- inner->obj_max_used_cnt = 0x00;
-#endif
- default:
- return;
- }
+ switch(type)
+ {
+ case SKB_POOL_RESERVE:
+ while (inner->obj_cur_nr > inner->obj_reserve_nr)
+ {
+ next_free = node->free_next;
+ kfree(node);
+ inner->obj_nr--;
+ inner->obj_cur_nr--;
+ node = next_free;
+ }
+ inner->free_head = node;
+ break;
+ case SKB_POOL_NORESERVE:
+ inner->obj_nr -= inner->obj_cur_nr;
+ while (node != NULL)
+ {
+ next_free = node->free_next;
+ kfree(node);
+ inner->obj_cur_nr--;
+ node = next_free;
+ }
+ inner->free_head = NULL;
+ break;
+ default:
+ break;
+ }
+ return;
}
void skb_sys_pool_delete(void)
{
- int cnt;
int index;
- int large_nr;
unsigned long flags;
spin_lock_irqsave(&skb_sys_pool_spinlock, flags);
- for (index = 0x00; index < skb_sys_pool_large_nr; index++)
+ for (index = 0; index < skb_sys_pool_nr; index++)
{
if (skb_sys_pool_inner[index] != NULL)
{
- skb_pool_delete_inner(skb_sys_pool_inner[index],SKB_POOL_NORESERVE);
+ skb_pool_delete_inner(skb_sys_pool_inner[index], SKB_POOL_NORESERVE);
}
-
}
spin_unlock_irqrestore(&skb_sys_pool_spinlock, flags);
@@ -844,32 +486,28 @@
skb_pool_inner_t *inner,
size_t obj_size,
skb_count_t obj_nr,
- skb_count_t obj_max_nr,
skb_count_t obj_batch_nr,
skb_count_t obj_reserve_nr)
{
- skb_pool_node_impl_t *node;
+ BUG_ON(!ptr_is_aligned(inner) || obj_size <= 0 || obj_nr <= 0);
- BUG_ON(! ptr_is_aligned(inner) || obj_size <= 0 || obj_nr <= 0);
-
-//#ifdef SKB_POOL_AUTO_EXTEND
- inner->obj_nr = 00;
+ inner->obj_nr = 0;
inner->obj_batch_nr = obj_batch_nr;
inner->obj_reserve_nr = obj_reserve_nr;
-//#endif
#ifdef SKB_TRACE
- inner->obj_cnt = 0x00;
- inner->obj_used_cnt = 0x00;
- inner->obj_max_used_cnt = 0x00;
+ memset(inner->obj_real_size, 0, sizeof(inner->obj_real_size));
#endif
#ifdef SKB_DBG_POOL
INIT_LIST_HEAD(&inner->alloc_head);
+ inner->obj_max_used_cnt = 0;
+ inner->obj_used_cnt = 0;
#endif
+ inner->free_head = NULL;
inner->obj_size = obj_size;
- node = NULL;
+ inner->obj_cur_nr = 0;
if (skb_pool_add_inner(inner, obj_nr) < 0)
return NULL;
@@ -877,31 +515,15 @@
return inner;
}
-static inline int skb_sys_pool_match_by_inner(skb_pool_inner_t **inner)
-{
- BUG_ON(
- inner < array_start(skb_sys_pool_inner) ||
- inner >= array_end(skb_sys_pool_inner));
-
- return array_index(skb_sys_pool_inner, inner);
-}
-
static inline int skb_sys_pool_match_by_size(u32 size)
{
int index;
- int large_nr;
- for (index = 0x00; index < skb_sys_pool_large_nr; index++)
+ for (index = 0; index < skb_sys_pool_nr; index++)
{
- if (size > skb_sys_pool_large_size[index])
+ if (size > skb_sys_pool_sizes[index])
continue;
-
- large_nr = skb_sys_pool_large_nr ;
- for (index += 0; index < large_nr; index++)
- {
- if (test_bit(index, skb_sys_pool_bitmap))
- return index;
- }
+ return index;
}
return -1;
@@ -915,8 +537,9 @@
node = skb_pool_node(ptr);
index = *(int*)node;
- return skb_sys_pool_inner[index]->obj_size;
+ return skb_sys_pool_inner[index]->obj_size;
}
+
void *skb_sys_pool_alloc(
size_t size
#ifdef SKB_DBG_POOL
@@ -930,35 +553,37 @@
skb_pool_inner_t *inner;
skb_pool_node_impl_t *node;
- BUG_ON( size <= 0);
+ BUG_ON(size == 0);
spin_lock_irqsave(&skb_sys_pool_spinlock, flags);
index = skb_sys_pool_match_by_size(size);
- if (index == -1 )
- {
- if (skb_sys_pool_large_nr >= SKB_SYS_POOL_LARGE_NR - 1)
+ if (index == -1)
+ {
+ skb_pool_inner_t *inner;
+
+ if (skb_sys_pool_nr >= SKB_SYS_POOL_NR)
{
spin_unlock_irqrestore(&skb_sys_pool_spinlock, flags);
return NULL;
- }
- skb_pool_inner_t *inner =(skb_pool_inner_t*)kzalloc(sizeof(skb_pool_inner_t) , GFP_ATOMIC );
- if (inner == NULL){
- spin_unlock_irqrestore(&skb_sys_pool_spinlock, flags);
+ }
+ inner = (skb_pool_inner_t*)kzalloc(sizeof(skb_pool_inner_t), GFP_ATOMIC);
+ if (inner == NULL)
+ {
+ spin_unlock_irqrestore(&skb_sys_pool_spinlock, flags);
return NULL;
}
- if ((skb_sys_pool_inner[skb_sys_pool_large_nr] = skb_pool_create_inner(inner,size,1,SKB_SYS_POOL_MAX_NR,1UL,0))== NULL)
+ if ((skb_sys_pool_inner[skb_sys_pool_nr] = skb_pool_create_inner(inner, size, 1, 1, 0)) == NULL)
{
kfree(inner);
spin_unlock_irqrestore(&skb_sys_pool_spinlock, flags);
return NULL;
}
- skb_sys_pool_large_size[skb_sys_pool_large_nr] = size;
- set_bit(skb_sys_pool_large_nr , skb_sys_pool_bitmap);
- index = skb_sys_pool_large_nr;
- skb_sys_pool_large_nr++;
-
+ skb_sys_pool_sizes[skb_sys_pool_nr] = size;
+ pool_set_bit(skb_sys_pool_nr, skb_sys_pool_bitmap);
+ index = skb_sys_pool_nr;
+ skb_sys_pool_nr++;
}
inner = skb_sys_pool_inner[index];
@@ -966,41 +591,13 @@
if (node == NULL)
{
-#ifdef SKB_POOL_AUTO_EXTEND
if (skb_pool_add_inner(inner, inner->obj_batch_nr) < 0)
{
- spin_unlock_irqrestore(&skb_sys_pool_spinlock, flags);
- return NULL;
- }
-
-#else
- BUG_ON(1);
- spin_unlock_irqrestore(&skb_sys_pool_spinlock, flags);
- return NULL;
-#endif
+ spin_unlock_irqrestore(&skb_sys_pool_spinlock, flags);
+ return NULL;
+ }
}
-#ifdef SKB_TRACE
- if (size < 2048)
- {
- inner->obj_real_size[size] = inner->obj_real_size[size]++;
-
- inner->obj_used_cnt++;
- if (inner->obj_max_used_cnt < inner->obj_used_cnt)
- inner->obj_max_used_cnt = inner->obj_used_cnt;
- }
- else
- {
- if (inner->obj_real_size_cnt >=2048)
- inner->obj_real_size_cnt = 0;
- inner->obj_real_size[ inner->obj_real_size_cnt] = size;
- inner->obj_real_size_cnt++;
-
- inner->obj_used_cnt++;
- if (inner->obj_max_used_cnt < inner->obj_used_cnt)
- inner->obj_max_used_cnt = inner->obj_used_cnt;
- }
-#endif
inner->obj_cur_nr--;
node = inner->free_head;
inner->free_head = node->free_next;
@@ -1008,6 +605,20 @@
#ifdef SKB_DBG_POOL
list_add_tail(&node->alloc_node, &inner->alloc_head);
+ inner->obj_used_cnt++;
+ if (inner->obj_max_used_cnt < inner->obj_used_cnt)
+ inner->obj_max_used_cnt = inner->obj_used_cnt;
+#endif
+
+#ifdef SKB_TRACE
+ if (size < 2048)
+ {
+ inner->obj_real_size[size]++;
+ }
+ else
+ {
+ inner->obj_real_size[0]++;
+ }
#endif
spin_unlock_irqrestore(&skb_sys_pool_spinlock, flags);
@@ -1052,7 +663,7 @@
node = skb_pool_node(ptr);
index = *(int *)node;
- BUG_ON(index == -1 || skb_sys_pool_inner[index] == NULL);
+ BUG_ON(index == -1 || index > skb_sys_pool_nr || skb_sys_pool_inner[index] == NULL);
inner = skb_sys_pool_inner[index];
node->free_next = inner->free_head;
@@ -1061,21 +672,11 @@
#ifdef SKB_DBG_POOL
list_del(&node->alloc_node);
+ inner->obj_used_cnt--;
#endif
if (inner->obj_cur_nr > inner->obj_reserve_nr)
- skb_pool_delete_inner(skb_sys_pool_inner[index],SKB_POOL_RESERVE);
- else{
-
-#ifdef SKB_TRACE
- inner->obj_used_cnt--;
-#endif
- }
-
-#if 0
- if (!test_bit(index, skb_sys_pool_bitmap))
- set_bit(index, skb_sys_pool_bitmap);
-#endif
+ skb_pool_delete_inner(skb_sys_pool_inner[index], SKB_POOL_RESERVE);
spin_unlock_irqrestore(&skb_sys_pool_spinlock, flags);
@@ -1088,7 +689,7 @@
{
u32 cnt;
- BUG_ON( cfg == NULL || cfg_nr > SKB_SYS_POOL_NR);
+ BUG_ON( cfg == NULL || cfg_nr > SKB_SYS_POOL_NR);
for (cnt = 0x00; cnt < cfg_nr; cnt++, cfg++)
{
@@ -1097,17 +698,15 @@
(skb_pool_inner_t *)cfg->addr,
cfg->obj_size,
cfg->obj_nr,
- SKB_SYS_POOL_MAX_NR,
cfg->patch_nr,
cfg->reserve_nr);
- skb_sys_pool_large_size[cnt] = cfg->obj_size;
- skb_sys_pool_large_nr++;
- set_bit(cnt , skb_sys_pool_bitmap);
+ skb_sys_pool_sizes[cnt] = cfg->obj_size;
+ skb_sys_pool_nr++;
+ pool_set_bit(cnt , skb_sys_pool_bitmap);
if (cnt && cnt > 1)
- BUG_ON( cfg->obj_size <= skb_sys_pool_large_size[cnt - 1]);
+ BUG_ON(cfg->obj_size < skb_sys_pool_sizes[cnt - 1]);
}
- skb_sys_pool_large_dl_nr = skb_sys_pool_large_nr;
}
int __init skb_sys_pool_init(void)
diff --git a/ap/os/linux/linux-3.4.x/net/ipv4/ip_sockglue.c b/ap/os/linux/linux-3.4.x/net/ipv4/ip_sockglue.c
index df9f330..ad439ed 100644
--- a/ap/os/linux/linux-3.4.x/net/ipv4/ip_sockglue.c
+++ b/ap/os/linux/linux-3.4.x/net/ipv4/ip_sockglue.c
@@ -254,6 +254,8 @@
return -EINVAL;
new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
+ if (on && !new_ra) //CVE-2019-12381(BDSA-2019-1652)
+ return -ENOMEM;
spin_lock_bh(&ip_ra_lock);
for (rap = &ip_ra_chain;
diff --git a/ap/os/linux/linux-3.4.x/net/ipv4/ping.c b/ap/os/linux/linux-3.4.x/net/ipv4/ping.c
index 9f471c3..5ff3ed1 100644
--- a/ap/os/linux/linux-3.4.x/net/ipv4/ping.c
+++ b/ap/os/linux/linux-3.4.x/net/ipv4/ping.c
@@ -665,11 +665,13 @@
if (msg->msg_name) {
struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
- sin->sin_family = AF_INET;
- sin->sin_port = 0 /* skb->h.uh->source */;
- sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
- memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
- *addr_len = sizeof(*sin);
+ if (sin) { //CVE-2013-6432
+ sin->sin_family = AF_INET;
+ sin->sin_port = 0 /* skb->h.uh->source */;
+ sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
+ memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
+ *addr_len = sizeof(*sin);
+ }
}
if (isk->cmsg_flags)
ip_cmsg_recv(msg, skb);
diff --git a/ap/os/linux/linux-3.4.x/net/ipv4/tcp.c b/ap/os/linux/linux-3.4.x/net/ipv4/tcp.c
index a18ff4e..82bb0e7 100755
--- a/ap/os/linux/linux-3.4.x/net/ipv4/tcp.c
+++ b/ap/os/linux/linux-3.4.x/net/ipv4/tcp.c
@@ -2174,6 +2174,10 @@
tcp_set_ca_state(sk, TCP_CA_Open);
tcp_clear_retrans(tp);
inet_csk_delack_init(sk);
+ /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
+ * issue in __tcp_select_window()
+ */
+ icsk->icsk_ack.rcv_mss = TCP_MIN_MSS; //CVE-2017-14106(BDSA-2017-1152)
tcp_init_send_head(sk);
memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
__sk_dst_reset(sk);
diff --git a/ap/os/linux/linux-3.4.x/net/ipv4/tcp_timer.c b/ap/os/linux/linux-3.4.x/net/ipv4/tcp_timer.c
index 34d4a02..94159fc 100644
--- a/ap/os/linux/linux-3.4.x/net/ipv4/tcp_timer.c
+++ b/ap/os/linux/linux-3.4.x/net/ipv4/tcp_timer.c
@@ -124,6 +124,7 @@
mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
mss = min(sysctl_tcp_base_mss, mss);
mss = max(mss, 68 - tp->tcp_header_len);
+ mss = max(mss, TCP_MIN_SND_MSS);//BDSA-2019-1812(CVE-2019-11479)
icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
}
diff --git a/ap/os/linux/linux-3.4.x/net/ipv6/ip6_output.c b/ap/os/linux/linux-3.4.x/net/ipv6/ip6_output.c
index daadf4c..97aca44 100755
--- a/ap/os/linux/linux-3.4.x/net/ipv6/ip6_output.c
+++ b/ap/os/linux/linux-3.4.x/net/ipv6/ip6_output.c
@@ -563,7 +563,7 @@
int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
{
- u16 offset = sizeof(struct ipv6hdr);
+ unsigned int offset = sizeof(struct ipv6hdr);//CVE-2017-7542(BDSA-2017-0992)
//struct ipv6_opt_hdr *exthdr =//CVE-2017-9074
//(struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
unsigned int packet_len = skb->tail - skb->network_header;
@@ -573,6 +573,7 @@
//while (offset + 1 <= packet_len) {//CVE-2017-9074
while (offset <= packet_len) {
struct ipv6_opt_hdr *exthdr;//CVE-2017-9074
+ unsigned int len;
switch (**nexthdr) {
case NEXTHDR_HOP:
@@ -598,7 +599,11 @@
return -EINVAL;//CVE-2017-9074
exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
offset);
- offset += ipv6_optlen(exthdr);
+ //CVE-2017-7542(BDSA-2017-0992)
+ len = ipv6_optlen(exthdr);
+ if (len + offset >= IPV6_MAXPLEN)
+ return -EINVAL;
+ offset += len;
*nexthdr = &exthdr->nexthdr;//CVE-2017-9074
}
@@ -1430,6 +1435,11 @@
*/
alloclen += sizeof(struct frag_hdr);
+ copy = datalen - transhdrlen - fraggap; //CVE-2017-9242(BDSA-2017-1000)
+ if (copy < 0) {
+ err = -EINVAL;
+ goto error;
+ }
if (transhdrlen) {
skb = sock_alloc_send_skb(sk,
alloclen + hh_len,
@@ -1481,13 +1491,9 @@
data += fraggap;
pskb_trim_unique(skb_prev, maxfraglen);
}
- copy = datalen - transhdrlen - fraggap;
-
- if (copy < 0) {
- err = -EINVAL;
- kfree_skb(skb);
- goto error;
- } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
+ if (copy > 0 &&
+ getfrag(from, data + transhdrlen, offset,
+ copy, fraggap, skb) < 0) { //CVE-2017-9242(BDSA-2017-1000)
err = -EFAULT;
kfree_skb(skb);
goto error;
diff --git a/ap/os/linux/linux-3.4.x/net/ipv6/ipv6_sockglue.c b/ap/os/linux/linux-3.4.x/net/ipv6/ipv6_sockglue.c
index ba074eb..8ad6b2f 100755
--- a/ap/os/linux/linux-3.4.x/net/ipv6/ipv6_sockglue.c
+++ b/ap/os/linux/linux-3.4.x/net/ipv6/ipv6_sockglue.c
@@ -67,6 +67,8 @@
return -ENOPROTOOPT;
new_ra = (sel>=0) ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
+ if (sel >= 0 && !new_ra) //CVE-2019-12378(BDSA-2019-1655)
+ return -ENOMEM;
write_lock_bh(&ip6_ra_lock);
for (rap = &ip6_ra_chain; (ra=*rap) != NULL; rap = &ra->next) {
@@ -150,6 +152,13 @@
lock_sock(sk);
+ /* BDSA-2022-2918(CVE-2022-3524) */
+ /* Another thread has converted the socket into IPv4 with
+ * IPV6_ADDRFORM concurrently.
+ */
+ if (unlikely(sk->sk_family != AF_INET6))
+ goto unlock;
+
switch (optname) {
case IPV6_ADDRFORM:
@@ -854,6 +863,7 @@
break;
}
+unlock: //BDSA-2022-2918(CVE-2022-3524)
release_sock(sk);
return retv;
diff --git a/ap/os/linux/linux-3.4.x/net/ipv6/udp.c b/ap/os/linux/linux-3.4.x/net/ipv6/udp.c
index 0bf27fb..18328b5 100755
--- a/ap/os/linux/linux-3.4.x/net/ipv6/udp.c
+++ b/ap/os/linux/linux-3.4.x/net/ipv6/udp.c
@@ -718,7 +718,8 @@
}
extern void fast_sk_add_ct(struct sk_buff * skb,struct sock *sk);
-
+int udp6_zero_csum_ctrl = 1;
+module_param(udp6_zero_csum_ctrl,int,0644);
int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
int proto)
{
@@ -759,7 +760,7 @@
}
}
- if (udp6_csum_init(skb, uh, proto))
+ if (!(udp6_zero_csum_ctrl && ((IPPROTO_UDP == proto)&&(0 == uh->check))) && udp6_csum_init(skb, uh, proto))
goto discard;
/*
diff --git a/ap/os/linux/linux-3.4.x/net/netfilter/nf_conntrack_netlink.c b/ap/os/linux/linux-3.4.x/net/netfilter/nf_conntrack_netlink.c
index ca7e835..5dfc44b 100644
--- a/ap/os/linux/linux-3.4.x/net/netfilter/nf_conntrack_netlink.c
+++ b/ap/os/linux/linux-3.4.x/net/netfilter/nf_conntrack_netlink.c
@@ -848,6 +848,8 @@
if (!tb[CTA_TUPLE_IP])
return -EINVAL;
+ if (l3num != NFPROTO_IPV4 && l3num != NFPROTO_IPV6) //CVE-2020-25211(BDSA-2020-2321)
+ return -EOPNOTSUPP;
tuple->src.l3num = l3num;
err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple);
diff --git a/ap/os/linux/linux-3.4.x/net/netfilter/nfnetlink_queue.c b/ap/os/linux/linux-3.4.x/net/netfilter/nfnetlink_queue.c
index e3c436d..e0275e4 100755
--- a/ap/os/linux/linux-3.4.x/net/netfilter/nfnetlink_queue.c
+++ b/ap/os/linux/linux-3.4.x/net/netfilter/nfnetlink_queue.c
@@ -461,7 +461,7 @@
if (diff < 0) {
unsigned int min_len = skb_transport_offset(e->skb);
//CVE-2022-36946
- if (data_len < min_len)
+ if (data_len < (int)min_len)
return -EINVAL;
if (pskb_trim(e->skb, data_len))
diff --git a/ap/os/linux/linux-3.4.x/net/netfilter/xt_TCPMSS.c b/ap/os/linux/linux-3.4.x/net/netfilter/xt_TCPMSS.c
index 190ad37..31d8a4e 100644
--- a/ap/os/linux/linux-3.4.x/net/netfilter/xt_TCPMSS.c
+++ b/ap/os/linux/linux-3.4.x/net/netfilter/xt_TCPMSS.c
@@ -62,7 +62,7 @@
tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
/* Header cannot be larger than the packet */
- if (tcplen < tcph->doff*4)
+ if (tcplen < tcph->doff*4 || tcph->doff*4 < sizeof(struct tcphdr)) //CVE-2017-18017(BDSA-2017-2798)
return -1;
if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
@@ -113,6 +113,11 @@
if (tcplen > tcph->doff*4)
return 0;
+ /* CVE-2017-18017(BDSA-2017-2798) */
+ /* tcph->doff has 4 bits, do not wrap it to 0 */
+ if (tcph->doff*4 >= 15*4)
+ return 0;
+
/*
* MSS Option not found ?! add it..
*/
diff --git a/ap/os/linux/linux-3.4.x/net/packet/af_packet.c b/ap/os/linux/linux-3.4.x/net/packet/af_packet.c
index dc845ff..8ea15eb 100755
--- a/ap/os/linux/linux-3.4.x/net/packet/af_packet.c
+++ b/ap/os/linux/linux-3.4.x/net/packet/af_packet.c
@@ -568,7 +568,8 @@
default:
return DEFAULT_PRB_RETIRE_TOV;
}
- }
+ } else
+ return DEFAULT_PRB_RETIRE_TOV; //CVE-2019-20812(BDSA-2019-4411)
mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
@@ -1782,8 +1783,11 @@
copy_skb = skb_get(skb);
skb_head = skb->data;
}
- if (copy_skb)
+ if (copy_skb) {
+ memset(&PACKET_SKB_CB(copy_skb)->sa.ll, 0,
+ sizeof(PACKET_SKB_CB(copy_skb)->sa.ll)); //BDSA-2022-2562(CVE-2022-20368)
skb_set_owner_r(copy_skb, sk);
+ }
}
snaplen = po->rx_ring.frame_size - macoff;
if ((int)snaplen < 0)
@@ -2810,15 +2814,25 @@
sock_recv_ts_and_drops(msg, sk, skb);
if (msg->msg_name) {
+ const size_t max_len = min(sizeof(skb->cb),
+ sizeof(struct sockaddr_storage)); //BDSA-2022-2562(CVE-2022-20368)
+ int copy_len;
+
/* If the address length field is there to be filled
* in, we fill it in now.
*/
if (sock->type == SOCK_PACKET) {
msg->msg_namelen = sizeof(struct sockaddr_pkt);
+ copy_len = msg->msg_namelen;
} else {
struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
msg->msg_namelen = sll->sll_halen +
offsetof(struct sockaddr_ll, sll_addr);
+ copy_len = msg->msg_namelen;
+ }
+ if (WARN_ON_ONCE(copy_len > max_len)) { //BDSA-2022-2562(CVE-2022-20368)
+ copy_len = max_len;
+ msg->msg_namelen = copy_len;
}
memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
msg->msg_namelen);
diff --git a/ap/os/linux/linux-3.4.x/net/wireless/nl80211.c b/ap/os/linux/linux-3.4.x/net/wireless/nl80211.c
index 42ebb4a..72d765e 100755
--- a/ap/os/linux/linux-3.4.x/net/wireless/nl80211.c
+++ b/ap/os/linux/linux-3.4.x/net/wireless/nl80211.c
@@ -6328,6 +6328,9 @@
if (err)
return err;
+ if (!tb[NL80211_REKEY_DATA_REPLAY_CTR] || !tb[NL80211_REKEY_DATA_KEK] ||
+ !tb[NL80211_REKEY_DATA_KCK]) //CVE-2017-12153(BDSA-2017-1184)
+ return -EINVAL;
if (nla_len(tb[NL80211_REKEY_DATA_REPLAY_CTR]) != NL80211_REPLAY_CTR_LEN)
return -ERANGE;
if (nla_len(tb[NL80211_REKEY_DATA_KEK]) != NL80211_KEK_LEN)
diff --git a/ap/os/linux/linux-3.4.x/net/xfrm/xfrm_user.c b/ap/os/linux/linux-3.4.x/net/xfrm/xfrm_user.c
index 812845e..ccf0f6e 100755
--- a/ap/os/linux/linux-3.4.x/net/xfrm/xfrm_user.c
+++ b/ap/os/linux/linux-3.4.x/net/xfrm/xfrm_user.c
@@ -390,7 +390,15 @@
up = nla_data(rp);
ulen = xfrm_replay_state_esn_len(up);
- if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen)
+ /* CVE-2017-7184 */
+ /* Check the overall length and the internal bitmap length to avoid
+ * potential overflow. */
+ if (nla_len(rp) < ulen ||
+ xfrm_replay_state_esn_len(replay_esn) != ulen ||
+ replay_esn->bmp_len != up->bmp_len)
+ return -EINVAL;
+
+ if (up->replay_window > up->bmp_len * sizeof(__u32) * 8)
return -EINVAL;
return 0;
@@ -850,6 +858,7 @@
static int xfrm_dump_sa_done(struct netlink_callback *cb)
{
struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
+ if(walk)
xfrm_state_walk_done(walk);
return 0;
}
@@ -860,6 +869,8 @@
struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
struct xfrm_dump_info info;
+ if(walk == NULL)
+ return 0;
BUILD_BUG_ON(sizeof(struct xfrm_state_walk) >
sizeof(cb->args) - sizeof(cb->args[0]));
@@ -1527,7 +1538,7 @@
static int xfrm_dump_policy_done(struct netlink_callback *cb)
{
struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
-
+ if(walk)
xfrm_policy_walk_done(walk);
return 0;
}
@@ -1538,6 +1549,8 @@
struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
struct xfrm_dump_info info;
+ if(walk == NULL)
+ return 0;
BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) >
sizeof(cb->args) - sizeof(cb->args[0]));
diff --git a/ap/os/linux/linux-3.4.x/sound/soc/sanechips/zx29_snd_ctrl.c b/ap/os/linux/linux-3.4.x/sound/soc/sanechips/zx29_snd_ctrl.c
index b802c2e..bebc0cb 100644
--- a/ap/os/linux/linux-3.4.x/sound/soc/sanechips/zx29_snd_ctrl.c
+++ b/ap/os/linux/linux-3.4.x/sound/soc/sanechips/zx29_snd_ctrl.c
@@ -53,6 +53,10 @@
static int vp_SetTxvol(struct snd_kcontrol *kcontrol,struct snd_ctl_elem_value *ucontrol);
static int vp_GetModuleState(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol);
static int vp_SetModuleState(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol);
+static int vp_SetVpLoop(struct snd_kcontrol *kcontrol,struct snd_ctl_elem_value *ucontrol);
+static int vp_GetVpLoop(struct snd_kcontrol *kcontrol,struct snd_ctl_elem_value *ucontrol);
+
+
const struct snd_kcontrol_new voice_process_controls[] =
{
@@ -61,7 +65,8 @@
SOC_SINGLE_EXT("tx ns mode", 0, 0, 3, 0, vp_GetTxNsMode, vp_SetTxNsMode),
SOC_SINGLE_EXT("rx ns mode", 0, 0, 3, 0, vp_GetRxNsMode, vp_SetRxNsMode),
SOC_SINGLE_EXT("tx vol", 0, 0, 5, 0, vp_GetTxVol, vp_SetTxvol),
- SND_SOC_INTEGER_EXT("vp ctrl", 9,vp_GetModuleState, vp_SetModuleState),
+ SND_SOC_INTEGER_EXT("vp ctrl", 9,vp_GetModuleState, vp_SetModuleState),
+ SOC_SINGLE_EXT("vp loop",0, 5, 5, 0,vp_GetVpLoop, vp_SetVpLoop),
};
int vp_controls_size = sizeof(voice_process_controls) / sizeof(voice_process_controls[0]);
@@ -181,3 +186,35 @@
CPPS_FUNC(cpps_callbacks, zDrvVp_SetModuleState_Wrap)(val,count);
return 0;
}
+
+
+static int vp_SetVpLoop(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int path = 0,ret = 0;
+ path = ucontrol->value.integer.value[0];
+ pr_info("ALSA:%s start path=%d \n",__func__,path);
+
+ //ret = zDrvVp_SetVpLoop_Wrap(path);
+
+ ret = CPPS_FUNC(cpps_callbacks, zDrvVp_Loop)(path);
+ if(ret < 0)
+ {
+ printk(KERN_ERR "vp_SetVpLoop fail = %d\n",path);
+ return ret;
+ }
+ pr_info("ALSA:%s end\n",__func__);
+
+ return 0;
+}
+
+static int vp_GetVpLoop(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ //ucontrol->value.integer.value[0] = zDrvVp_GetVpLoop_Wrap();
+ ucontrol->value.integer.value[0] = CPPS_FUNC(cpps_callbacks, zDrvVp_GetVpLoop_Wrap)();
+ pr_info("ALSA:%s end vploop state =%d \n",__func__,ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+