[T106][ZXW-22]7520V3SCV2.01.01.02P42U09_VEC_V0.8_AP_VEC origin source commit
Change-Id: Ic6e05d89ecd62fc34f82b23dcf306c93764aec4b
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/Kconfig b/ap/os/linux/linux-3.4.x/drivers/soc/Kconfig
new file mode 100644
index 0000000..af1395a
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/Kconfig
@@ -0,0 +1 @@
+source "drivers/soc/zte/Kconfig"
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/Makefile b/ap/os/linux/linux-3.4.x/drivers/soc/Makefile
new file mode 100644
index 0000000..290794b
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_PLAT_ZTE) += zte/
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/Kconfig b/ap/os/linux/linux-3.4.x/drivers/soc/zte/Kconfig
new file mode 100644
index 0000000..c50070f
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/Kconfig
@@ -0,0 +1,51 @@
+# When adding new entries keep the list in alphabetical order
+
+config ZX_RAM_CONSOLE
+ bool "Write log to ram when suspend"
+ depends on SUSPEND
+ default y
+ ---help---
+ Write log to ram when system in suspend or sleep state that
+ uart may not print log normally!
+
+config ZX_PM_DEBUG
+ bool "PM debug"
+ depends on SUSPEND
+ default y
+ ---help---
+ Debug interface for power management!
+
+config ZX_PM_DEBUG_TIME
+ bool "debug sleep time"
+ depends on SUSPEND
+ default n
+ ---help---
+ Use to debug sleep time!
+
+config AXI_FREQ
+ bool "change axi clock"
+ depends on CPU_FREQ
+ default y
+ ---help---
+ Use to change axi clock!
+
+config RPM_ZX29
+ bool "icp and rpmsg driver"
+ depends on PLAT_ZTE
+ default n
+ ---help---
+ Communicate between cores with each other!
+
+config TSC_ZX29
+ bool "tsc driver"
+ depends on PLAT_ZTE
+ default n
+ ---help---
+ Communicate between cores with each other!
+
+config DDR_ZX29
+ tristate "DDR driver"
+ depends on PLAT_ZTE
+ ---help---
+ Communicate between cores with each other!
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/Makefile b/ap/os/linux/linux-3.4.x/drivers/soc/zte/Makefile
new file mode 100644
index 0000000..27c1a01
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/Makefile
@@ -0,0 +1,13 @@
+# When adding new entries keep the list in alphabetical order
+
+obj-y += power/
+ifndef CONFIG_SYSTEM_RECOVERY
+ifneq ($(USE_TestHarness),yes)
+obj-y += rpm/
+endif
+ifndef CONFIG_SYSTEM_CAP
+obj-y += efuse/
+endif
+endif
+obj-y += tsc/
+obj-y += ddr/
\ No newline at end of file
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/ddr/Makefile b/ap/os/linux/linux-3.4.x/drivers/soc/zte/ddr/Makefile
new file mode 100644
index 0000000..43e213f
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/ddr/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for ddr driver.
+#
+
+obj-$(CONFIG_DDR_ZX29) += zx-ddr-test.o
+obj-y += zx-ddr.o
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/ddr/zx-ddr-test.c b/ap/os/linux/linux-3.4.x/drivers/soc/zte/ddr/zx-ddr-test.c
new file mode 100644
index 0000000..8841596
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/ddr/zx-ddr-test.c
@@ -0,0 +1,69 @@
+/*
+ * ZTE ddr driver
+ *
+ * Copyright (C) 2013 ZTE Ltd.
+ * by tsp
+ *
+ */
+#ifdef CONFIG_DDR_ZX29_MODULE
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/io.h>
+#include <mach/iomap.h>
+#include <linux/dev_info.h>
+#include <linux/slab.h>
+#include <mach/timex.h>
+#include <linux/soc/zte/ddr/drv_ddr.h>
+#include <linux/syscalls.h>
+#include <asm/uaccess.h>
+#include <linux/miscdevice.h> /* For handling misc devices */
+
+
+
+//#define DDR_TEST_SIZE (0x8000)//32k
+
+#if 0
+static const struct file_operations zx_ddr_fops = {
+ .owner = THIS_MODULE,
+};
+
+static struct miscdevice zx_ddr_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "zx_ddr",
+ .fops = &zx_ddr_fops,
+};
+#endif
+
+extern void zx_ddr_test(void);
+static void __init zx_ddr_init(void)
+{
+ #if 0
+ int err;
+ err = misc_register(&zx_ddr_miscdev);
+ if (err != 0) {
+ pr_info("[zx ddr]: cannot register miscdev (err=%d)\n", err);
+ return err;
+ }
+ pr_info("[zx ddr test]: Success to register miscdev!!! \n");
+ #endif
+ pr_info("[zx ddr test]: zx_ddr_init!!! \n");
+
+ zx_ddr_test();
+
+}
+
+static void __exit zx_ddr_exit(void)
+{
+ // misc_deregister(&zx_ddr_miscdev);
+}
+
+
+module_init(zx_ddr_init);
+module_exit(zx_ddr_exit);
+#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/ddr/zx-ddr.c b/ap/os/linux/linux-3.4.x/drivers/soc/zte/ddr/zx-ddr.c
new file mode 100644
index 0000000..c15e1dd
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/ddr/zx-ddr.c
@@ -0,0 +1,587 @@
+/*
+ * ZTE ddr driver
+ *
+ * Copyright (C) 2013 ZTE Ltd.
+ * by tsp
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/io.h>
+#include <mach/iomap.h>
+#include <linux/dev_info.h>
+#include <linux/dmaengine.h>
+#include <mach/dma.h>
+#include <linux/dma-mapping.h>
+#include <asm/outercache.h>
+#include <asm/cacheflush.h>
+#include <linux/slab.h>
+#include <mach/timex.h>
+#include <linux/soc/zte/ddr/drv_ddr.h>
+#include <linux/syscalls.h>
+#include <asm/uaccess.h>
+#include <linux/delay.h>
+
+#ifdef CONFIG_DDR_ZX29_MODULE
+
+#define DDR_TEST_SIZE (0x8000)//32k
+
+extern int ddr_get_info(struct flash_ddr_info *info);
+extern bool zx29_dma_filter_fn(struct dma_chan *chan, void *param);
+
+static volatile unsigned int zx_noncache_dma_flag=0 ;
+static volatile unsigned int zx_cacheable_dma_flag=0 ;
+static struct dma_chan * ddrtest_chan = NULL;
+static unsigned char * ddrtest_buffer = NULL;
+static dma_addr_t ddrtest_phy_addr;
+static void * vir_addr=NULL;
+
+struct zx_ddr {
+ unsigned int size;
+ struct ddr_test_result *result;
+};
+
+#if 0
+static void ddr_clean_range(unsigned int addr, unsigned int size)
+{
+ dmac_map_area(addr, size,DMA_BIDIRECTIONAL);
+ outer_clean_range(addr, addr + size);
+}
+
+static void ddr_invalide_range(unsigned int addr, unsigned int size)
+{
+ outer_inv_range(ddrtest_phy_addr+size , ddrtest_phy_addr+size + size);
+ dmac_unmap_area(addr, size,DMA_BIDIRECTIONAL);
+}
+#endif
+static void ddr_dma_config(unsigned int src_addr, unsigned int dst_addr, unsigned int size)
+{
+ dma_channel_def temp_config;
+
+ temp_config.src_addr = src_addr;
+ temp_config.dest_addr = dst_addr;
+ temp_config.count = size;
+ temp_config.dma_control.tran_mode = TRAN_MEM_TO_MEM;
+ temp_config.dma_control.src_burst_size = DMA_BURST_SIZE_8BIT;
+ temp_config.dma_control.src_burst_len = DMA_BURST_LEN_16;
+ temp_config.dma_control.dest_burst_size = DMA_BURST_SIZE_8BIT;
+ temp_config.dma_control.dest_burst_len = DMA_BURST_LEN_16;
+ temp_config.dma_control.irq_mode = DMA_ALL_IRQ_ENABLE;
+
+ temp_config.link_addr = 0;
+
+ dmaengine_slave_config(ddrtest_chan,(struct dma_slave_config*)&temp_config);
+
+}
+
+static void ddr_dma_start(struct zx_ddr *ddr ,dma_async_tx_callback callbk)
+{
+ struct dma_async_tx_descriptor *desc =NULL;
+
+ /* start transfer */
+ desc = ddrtest_chan->device->device_prep_interleaved_dma(ddrtest_chan,NULL,0);
+ desc->callback = callbk;
+ desc->callback_param = ddr;
+ dmaengine_submit(desc);
+ dma_async_issue_pending(ddrtest_chan);
+}
+
+static int ddr_noncache_cpu_test(unsigned int size, struct ddr_test_result *result_info)
+{
+ unsigned int addr;
+
+ if( NULL==result_info )
+ return 1;
+
+ vir_addr = dma_alloc_coherent(NULL, size, &ddrtest_phy_addr, GFP_KERNEL);
+ if(!vir_addr){
+ pr_info("ddr_noncache_cpu_test alloc ddr failed . \n");
+ return 1;
+ }
+
+ //pr_info("ddr_noncache_cpu_test alloc buffer (0x%x). \n", (unsigned int)vir_addr);
+
+ for(addr=(unsigned int)vir_addr; addr<(unsigned int)vir_addr+size; addr+=4) {
+ zx_write_reg(addr, addr);
+ if(zx_read_reg(addr) !=addr) {
+
+ pr_info(KERN_INFO "\n addr 0x%x ddr_noncache_cpu_test failed! \n ", addr);
+ sprintf(result_info->noncache_cpu, "addr:0x%x ddr_noncache_cpu failed", addr);
+ dma_free_coherent(NULL, size,vir_addr, ddrtest_phy_addr);
+ return 1;
+ }
+ }
+
+ dma_free_coherent(NULL, size,vir_addr, ddrtest_phy_addr);
+
+ pr_info("ddr_noncache_cpu_test succeeded . \n");
+ sprintf(result_info->noncache_cpu, "ddr_noncache_cpu_test succeeded");
+ return 0;
+}
+
+
+static int ddr_cacheable_cpu_test(unsigned int size, struct ddr_test_result *result_info)
+{
+ unsigned int addr;
+
+ if( NULL==result_info )
+ return 1;
+
+ /* alloc buffer */
+ ddrtest_buffer = kzalloc(size, GFP_KERNEL);
+ if (!ddrtest_buffer) {
+ pr_info("ddr_cacheable_cpu_test alloc ddr failed . \n");
+ return 1;
+ }
+ //pr_info("ddr_cacheable_cpu_test alloc buffer (0x%x). \n", (unsigned int)ddrtest_buffer);
+
+ memset(ddrtest_buffer, 0, size);
+
+ for(addr=(unsigned int)ddrtest_buffer; addr<(unsigned int)ddrtest_buffer+size; addr+=4 )
+ zx_write_reg(addr, addr);
+
+ //addr=(unsigned int)ddrtest_buffer;
+ //ddr_clean_range(addr, size);
+ //ddr_invalide_range(addr, size);
+ dma_sync_single_for_device(NULL,ddrtest_phy_addr , size,DMA_BIDIRECTIONAL);
+ dma_sync_single_for_cpu(NULL,ddrtest_phy_addr, size,DMA_BIDIRECTIONAL);
+
+ for(addr=(unsigned int)ddrtest_buffer; addr<(unsigned int)ddrtest_buffer+size; addr+=4 ){
+ if(zx_read_reg(addr) !=addr) {
+ pr_info(KERN_INFO "\n ddr_cacheable_cpu_test error! \n ");
+ sprintf(result_info->cacheable_cpu, "addr:0x%x ddr_cacheable_cpu failed", addr);
+ kfree(ddrtest_buffer);
+ return 1;
+ }
+ }
+ kfree(ddrtest_buffer);
+
+ pr_info("ddr_cacheable_cpu_test succeeded. \n");
+ sprintf(result_info->cacheable_cpu, "ddr_cacheable_cpu_test succeeded");
+ return 0;
+}
+
+void noncache_dma_cb(void *data)
+{
+ #if 0
+ unsigned int addr1,addr2;
+ struct zx_ddr *ddr=(struct zx_ddr *)data;
+ unsigned int size=ddr->size;
+ struct ddr_test_result *result =ddr->result;
+
+ for(addr1=(unsigned int)vir_addr,addr2=(unsigned int)vir_addr+size; addr1<(unsigned int)vir_addr+size; addr1+=4,addr2+=4) {
+ if(zx_read_reg(addr1) != zx_read_reg(addr2)) {
+
+ pr_info("addr:0x%x cacheable_dma failed. \n", addr1);
+ sprintf(result->noncache_dma, "addr:0x%x noncache_dma failed", addr1);
+ zx_noncache_dma_flag=1;
+ return ;
+ }
+ }
+
+ pr_info("ddr_noncache_dma_test succeeded . \n");
+ sprintf(result->noncache_dma, "ddr_noncache_dma_test succeeded");
+ #endif
+ zx_noncache_dma_flag=1;
+}
+
+static int ddr_noncache_dma_test(unsigned int size, struct ddr_test_result *result_info)
+{
+ dma_cap_mask_t mask;
+ unsigned int addr,addr1,addr2;
+ struct zx_ddr ddr;
+
+ if( NULL==result_info )
+ return 1;
+
+ zx_noncache_dma_flag=0;
+
+ vir_addr = dma_alloc_coherent(NULL, 2*size, &ddrtest_phy_addr, GFP_KERNEL);
+ if(!vir_addr){
+ pr_info("ddr_noncache_cpu_test alloc ddr failed . \n");
+ return 1;
+ }
+ //pr_info("ddr_noncache_dma_test alloc vir_addr (0x%x), phy_addr (0x%x) \n", (unsigned int)vir_addr, ddrtest_phy_addr);
+
+ for(addr=(unsigned int)vir_addr; addr<(unsigned int)vir_addr+size; addr+=4) {
+ zx_write_reg(addr, addr);
+ }
+
+ //config dma
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ ddrtest_chan=dma_request_channel(mask, zx29_dma_filter_fn, (void*)DMA_CH_MEMORY);
+ if (!ddrtest_chan){
+ pr_info("[DMA]test request channel failed \n");
+ dma_free_coherent(NULL, 2*size, vir_addr, ddrtest_phy_addr );
+ return 1;
+ }
+ ddr.result =result_info;
+ ddr.size = size;
+ ddr_dma_config(ddrtest_phy_addr, ddrtest_phy_addr+size, size);
+ ddr_dma_start(&ddr, noncache_dma_cb);
+
+ while(!zx_noncache_dma_flag);
+
+ for(addr1=(unsigned int)vir_addr,addr2=(unsigned int)vir_addr+size; addr1<(unsigned int)vir_addr+size; addr1+=4,addr2+=4) {
+ if(zx_read_reg(addr1) != zx_read_reg(addr2)) {
+
+ pr_info("addr:0x%x cacheable_dma failed. \n", addr1);
+ sprintf(result_info->noncache_dma, "addr:0x%x noncache_dma failed", addr1);
+ dma_free_coherent(NULL, 2*size, vir_addr, ddrtest_phy_addr );
+ dma_release_channel(ddrtest_chan);
+ return 1;
+ }
+ }
+ zx_noncache_dma_flag=0;
+
+ pr_info("ddr_noncache_dma_test succeeded . \n");
+ sprintf(result_info->noncache_dma, "ddr_noncache_dma_test succeeded");
+
+ dma_free_coherent(NULL, 2*size, vir_addr, ddrtest_phy_addr );
+ dma_release_channel(ddrtest_chan);
+
+ return 0;
+}
+
+void cacheable_dma_cb(void *data)
+{
+ #if 0
+ unsigned int addr, addr1 ,addr2;
+ struct zx_ddr *ddr=(struct zx_ddr *)data;
+ unsigned int size=ddr->size;
+ struct ddr_test_result *result =ddr->result;
+
+ //addr=(unsigned int)ddrtest_buffer +size;
+ //ddr_invalide_range(addr, size);
+ dma_sync_single_for_cpu(NULL,ddrtest_phy_addr+size , size,DMA_BIDIRECTIONAL);
+
+ for(addr1=(unsigned int)ddrtest_buffer, addr2=(unsigned int)ddrtest_buffer+size; addr1<(unsigned int)ddrtest_buffer+size; addr1+=4, addr2+=4 ){
+ if(zx_read_reg(addr1) != zx_read_reg(addr2)) {
+
+ pr_info("addr:0x%x cacheable_dma failed. \n", addr1);
+ sprintf(result->cacheable_dma, "addr:0x%x cacheable_dma failed", addr1);
+ zx_cacheable_dma_flag=1;
+
+ return;
+ }
+ }
+
+ pr_info("ddr_cacheable_dma_test succeeded. \n");
+ sprintf(result->cacheable_dma, "ddr_cacheable_dma_test succeeded");
+ #endif
+ zx_cacheable_dma_flag=1;
+
+}
+
+static int ddr_cacheable_dma_test(unsigned int size, struct ddr_test_result *result_info)
+{
+ unsigned int addr, addr1, addr2;
+ dma_cap_mask_t mask;
+ volatile unsigned int i=100;
+ struct zx_ddr ddr;
+
+ if( NULL==result_info )
+ return 1;
+
+ zx_cacheable_dma_flag=0;
+
+ /* alloc buffer */
+ ddrtest_buffer = kzalloc(2*size, GFP_KERNEL);
+ if (!ddrtest_buffer) {
+ pr_info("ddr_cacheable_dma_test alloc ddr failed. \n");
+ return 1;
+ }
+ //pr_info("ddr_cacheable_dma_test alloc buffer (0x%x). \n", (unsigned int)ddrtest_buffer);
+
+ memset(ddrtest_buffer, 0, size);
+ for(addr=(unsigned int)ddrtest_buffer; addr<(unsigned int)ddrtest_buffer+size; addr+=4 )
+ zx_write_reg(addr, addr);
+
+ //addr=(unsigned int)ddrtest_buffer;
+ //ddr_clean_range(addr, size);
+ //ddr_invalide_range(addr, size);
+ dma_sync_single_for_device(NULL,ddrtest_phy_addr , size,DMA_BIDIRECTIONAL);
+ dma_sync_single_for_cpu(NULL,ddrtest_phy_addr, size,DMA_BIDIRECTIONAL);
+
+ /* map dma address */
+ ddrtest_phy_addr = dma_map_single(NULL, (void *)ddrtest_buffer, 2*size, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(NULL, ddrtest_phy_addr)) {
+ pr_info("dma_mapping_error ddr failed . \n");
+ kfree(ddrtest_buffer);
+ return 1;
+ }
+
+ //config dma
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ ddrtest_chan=dma_request_channel(mask, zx29_dma_filter_fn, (void*)DMA_CH_MEMORY);
+ if (!ddrtest_chan){
+ pr_info("[DMA]test request channel failed \n");
+ dma_unmap_single(NULL, ddrtest_phy_addr, 2* size, DMA_BIDIRECTIONAL);
+ kfree(ddrtest_buffer);
+ return 1;
+ }
+ ddr.result =result_info;
+ ddr.size = size;
+ ddr_dma_config(ddrtest_phy_addr, ddrtest_phy_addr+size, size);
+ ddr_dma_start(&ddr, cacheable_dma_cb);
+
+ while(!zx_cacheable_dma_flag);
+
+ dma_sync_single_for_cpu(NULL,ddrtest_phy_addr+size , size,DMA_BIDIRECTIONAL);
+ for(addr1=(unsigned int)ddrtest_buffer, addr2=(unsigned int)ddrtest_buffer+size; addr1<(unsigned int)ddrtest_buffer+size; addr1+=4, addr2+=4 ){
+ if(zx_read_reg(addr1) != zx_read_reg(addr2)) {
+
+ pr_info("addr:0x%x cacheable_dma failed. \n", addr1);
+ sprintf(result_info->cacheable_dma, "addr:0x%x cacheable_dma failed", addr1);
+ dma_release_channel(ddrtest_chan);
+ dma_unmap_single(NULL, ddrtest_phy_addr, 2* size, DMA_BIDIRECTIONAL);
+ kfree(ddrtest_buffer);
+ return 1;
+ }
+ }
+ zx_cacheable_dma_flag=0;
+
+ pr_info("ddr_cacheable_dma_test succeeded. \n");
+ sprintf(result_info->cacheable_dma, "ddr_cacheable_dma_test succeeded");
+
+ dma_release_channel(ddrtest_chan);
+ dma_unmap_single(NULL, ddrtest_phy_addr, 2* size, DMA_BIDIRECTIONAL);
+ kfree(ddrtest_buffer);
+
+ return 0;
+}
+
+
+static void cacheable_speed_cb(void *data)
+{
+
+ #if 0
+ unsigned int addr, addr1 ,addr2;
+ struct zx_ddr *ddr=(struct zx_ddr *)data;
+ unsigned int size=ddr->size;
+ struct ddr_test_result *result =ddr->result;
+
+ //addr=(unsigned int)ddrtest_buffer +size;
+ dma_sync_single_for_cpu(NULL,ddrtest_phy_addr+size , size,DMA_BIDIRECTIONAL);
+
+ for(addr1=(unsigned int)ddrtest_buffer, addr2=(unsigned int)ddrtest_buffer+size; addr1<(unsigned int)ddrtest_buffer+size; addr1+=4, addr2+=4 ){
+ if(zx_read_reg(addr1) != zx_read_reg(addr2))
+ {
+ pr_info("cacheable_speed_cb failed failed. \n");
+ sprintf(result->speed, "addr:0x%x speed failed", addr1);
+ zx_cacheable_dma_flag=1;
+ return;
+ }
+ }
+ #endif
+
+ zx_cacheable_dma_flag=1;
+
+}
+
+
+static int ddr_cacheable_speed_test(unsigned int size, struct ddr_test_result *result_info)
+{
+ unsigned int cnt=0;
+ unsigned int addr;
+ dma_cap_mask_t mask;
+ unsigned int time1=0;
+ unsigned int time2=0;
+ unsigned int elapse_time=0;
+ unsigned int speed=0;
+ struct zx_ddr ddr;
+
+ if( NULL==result_info )
+ return 1;
+
+ /* alloc buffer */
+ ddrtest_buffer = kzalloc(2*size, GFP_KERNEL);
+ if (!ddrtest_buffer) {
+ pr_info("ddr_cacheable_speed_test alloc ddr failed. \n");
+ return 1;
+ }
+ //pr_info("ddr_cacheable_speed_test alloc buffer (0x%x). \n", (unsigned int)ddrtest_buffer);
+
+ memset(ddrtest_buffer, 0, size);
+ for(addr=(unsigned int)ddrtest_buffer; addr<(unsigned int)ddrtest_buffer+size; addr+=4 )
+ zx_write_reg(addr, addr);
+
+ //addr=(unsigned int)ddrtest_buffer;
+ //ddr_clean_range(addr, size);
+ //ddr_invalide_range(addr, size);
+ dma_sync_single_for_device(NULL,ddrtest_phy_addr , size,DMA_BIDIRECTIONAL);
+ dma_sync_single_for_cpu(NULL,ddrtest_phy_addr, size,DMA_BIDIRECTIONAL);
+
+ /* map dma address */
+ ddrtest_phy_addr = dma_map_single(NULL, (void *)ddrtest_buffer, 2*size, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(NULL, ddrtest_phy_addr)) {
+ pr_info("dma_mapping_error ddr failed . \n");
+ kfree(ddrtest_buffer);
+ return 1;
+ }
+
+ //config dma
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ ddrtest_chan=dma_request_channel(mask, zx29_dma_filter_fn, (void*)DMA_CH_MEMORY);
+ if (!ddrtest_chan){
+ pr_info("[DMA]test request channel failed \n");
+ dma_unmap_single(NULL, ddrtest_phy_addr, 2* size, DMA_BIDIRECTIONAL);
+ kfree(ddrtest_buffer);
+ return 1;
+ }
+
+ time1= ioread32(CLOCKDELAY_BASE+CUR_VALUE);
+ ddr.result =result_info;
+ ddr.size = size;
+ for(cnt=0; cnt<1000;cnt++){
+ //a to b
+ zx_cacheable_dma_flag=0;
+ ddr_dma_config(ddrtest_phy_addr, ddrtest_phy_addr+size, size);
+ ddr_dma_start(&ddr,cacheable_speed_cb);
+ while(!zx_cacheable_dma_flag);
+
+ //b to a
+ zx_cacheable_dma_flag=0;
+ ddr_dma_config(ddrtest_phy_addr+size, ddrtest_phy_addr, size);
+ ddr_dma_start(&ddr,cacheable_speed_cb);
+ while(!zx_cacheable_dma_flag);
+ zx_cacheable_dma_flag=0;
+
+ }
+
+ time2 = ioread32(CLOCKDELAY_BASE+CUR_VALUE);
+
+ if(time1>=time2)
+ elapse_time=(time1 - time2)/(2*cnt*26);
+ else
+ elapse_time=(0xffffffff -(time2 - time1))/(2*cnt*26);
+
+ speed = (size/1024 *1000000/elapse_time/1024);
+
+
+ dma_release_channel(ddrtest_chan);
+ dma_unmap_single(NULL, ddrtest_phy_addr, 2* size, DMA_BIDIRECTIONAL);
+ kfree(ddrtest_buffer);
+
+ pr_info("zx_ddr_cacheable_speed: %d MByte/s . elapse_time: %d us elapse_time \n", speed , elapse_time);
+ sprintf(result_info->speed, "ddr read_write speed:%d MByte/s", speed);
+
+ return 0;
+}
+
+#endif
+
+
+SYSCALL_DEFINE5(get_ddrtestinfo, char __user *, noncache_cpu,
+ char __user *, cacheable_cpu,
+ char __user *, noncache_dma,
+ char __user *, cacheable_dma,
+ char __user *, speed)
+{
+ int err = 0;
+
+#ifdef CONFIG_DDR_ZX29_MODULE
+
+ struct ddr_test_result result_info ={0};
+
+ //pr_info("\n get_ddrtestinfo begin \n");
+
+
+ if (noncache_cpu) {
+ err =ddr_noncache_cpu_test(DDR_TEST_SIZE, &result_info);
+ if(err)
+ return -EINVAL;
+
+ err |= copy_to_user(noncache_cpu, &result_info.noncache_cpu, sizeof(result_info.noncache_cpu));
+ }
+
+ if (cacheable_cpu) {
+ err =ddr_cacheable_cpu_test(DDR_TEST_SIZE, &result_info);
+ if(err)
+ return -EINVAL;
+ err |= copy_to_user(cacheable_cpu, &result_info.cacheable_cpu, sizeof(result_info.cacheable_cpu));
+ }
+ if (noncache_dma) {
+ err =ddr_noncache_dma_test(DDR_TEST_SIZE, &result_info);
+ if(err)
+ return -EINVAL;
+ err |= copy_to_user(noncache_dma, &result_info.noncache_dma, sizeof(result_info.noncache_dma));
+ }
+ if (cacheable_dma) {
+ err =ddr_cacheable_dma_test(DDR_TEST_SIZE, &result_info);
+ if(err)
+ return -EINVAL;
+ err |= copy_to_user(cacheable_dma, &result_info.cacheable_dma, sizeof(result_info.cacheable_dma));
+ }
+ if (speed) {
+ err =ddr_cacheable_speed_test(DDR_TEST_SIZE, &result_info);
+ if(err)
+ return -EINVAL;
+ err |= copy_to_user(speed, &result_info.speed, sizeof(result_info.speed));
+ }
+
+ //pr_info("\n get_ddrtestinfo end \n");
+#endif
+
+
+ return err ? -EFAULT : 0;
+
+}
+#ifdef CONFIG_DDR_ZX29_MODULE
+
+#define TEST_TIMES 30
+void zx_ddr_test(void)
+{
+ int err = 0;
+ int i =TEST_TIMES;
+
+ struct ddr_test_result result_info ={0};
+
+ pr_info("\n ddr_test begin \n");
+
+ while(i--)
+ {
+
+ pr_info("\n test times =%d\n", TEST_TIMES-i);
+
+ err= ddr_noncache_cpu_test(DDR_TEST_SIZE, &result_info);
+ if(err)
+ pr_info("ddr_noncache_cpu_test failed!");
+
+ err=ddr_cacheable_cpu_test(DDR_TEST_SIZE, &result_info);
+ if(err)
+ pr_info("ddr_cacheable_cpu_test failed!");
+
+ err=ddr_noncache_dma_test(DDR_TEST_SIZE, &result_info);
+ if(err)
+ pr_info("ddr_noncache_dma_test failed!");
+
+ err=ddr_cacheable_dma_test(DDR_TEST_SIZE, &result_info);
+ if(err)
+ pr_info("ddr_cacheable_dma_test failed!");
+
+ err =ddr_cacheable_speed_test(DDR_TEST_SIZE, &result_info);
+ if(err)
+ pr_info("ddr_cacheable_speed_test failed!");
+
+ msleep(2);
+
+ }
+
+ //pr_info("\n ddr_test end \n");
+}
+
+EXPORT_SYMBOL(zx_ddr_test);
+#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/efuse/Makefile b/ap/os/linux/linux-3.4.x/drivers/soc/zte/efuse/Makefile
new file mode 100755
index 0000000..74e643a
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/efuse/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for rpm driver.
+#
+
+obj-y += efuse_zx.o
+
+#ccflags-y += -I$(TOPDIR)/cp/ps/modem/com/inc
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/efuse/efuse_zx.c b/ap/os/linux/linux-3.4.x/drivers/soc/zte/efuse/efuse_zx.c
new file mode 100644
index 0000000..3c4da88
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/efuse/efuse_zx.c
@@ -0,0 +1,401 @@
+/*
+ * efuse_zx.c
+ *
+ * Copyright (C) 2015 ZTE-TSP
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <mach/board.h>
+#include <mach/iomap.h>
+#include <mach/debug.h>
+#include <linux/io.h>
+#include <mach/spinlock.h>
+#include <mach/pcu.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/soc/zte/efuse/efuse_zx.h>
+#include <asm/setup.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+
+#define EFUSE_RAM_BASE (ZX_EFUSE_BASE+0x40)
+
+#define SECURE_EN_OFFSET 0
+#define PUB_KEY_HASH_OFFSET 1
+#define DEVICE_ID_OFFSET 5
+
+#define CHIP_COM_EN_FLAG 0xF86302FF
+#define CHIP_COM_DIS_FLAG 0xF8630200
+
+#define CHIP_SPE_NYB_EN_FLAG 0x1E871EFF
+#define CHIP_SPE_NYB_DIS_FLAG 0x1E871E00
+#define CHIP_SPE_NYC_EN_FLAG 0x1E871FFF
+#define CHIP_SPE_NYC_DIS_FLAG 0x1E871F00
+#define CHIPE_SPE_APM_EN_FLAG 0x1E8720FF
+#define CHIPE_SPE_APM_DIS_FLAG 0x1E872000
+#define CHIPECO_SPE_APM_EN_FLAG 0x1E8721FF
+#define CHIPECO_SPE_APM_DIS_FLAG 0x1E872100
+#define CHIPECO_SPE_NYB_EN_FLAG 0x1E8722FF
+#define CHIPECO_SPE_NYB_DIS_FLAG 0x1E872200
+#define CHIPECO_SPE_NYC_EN_FLAG 0x1E8723FF
+#define CHIPECO_SPE_NYC_DIS_FLAG 0x1E872300
+#define CHIPECO_SPE_256M_EN_FLAG 0x1E8724FF
+#define CHIPECO_SPE_256M_DIS_FLAG 0x1E872400
+
+
+#define EFUSE_PROGRAM_MAX_CNT 5
+
+typedef volatile struct
+{
+ u32 secure_flag;
+ u32 puk_hash[4];
+ u32 secureDevId[3];
+} s_EfuseStruct;
+
+typedef struct
+{
+ u32 uiPubKeyRsaE[32];
+ u32 uiPubKeyRsaN[32];
+} s_ImageHeader;
+
+typedef volatile struct
+{
+ u32 version; /*°æ±¾ºÅ¼Ä´æÆ÷*/
+ u32 ctrl; /*¿ØÖƼĴæÆ÷£¬[0]start/busy [3:1]op_code [4]chip_sel [5]read_mode [6]int_en*/
+ u32 addr; /*²Ù×÷µØÖ·¼Ä´æÆ÷[7:0]*/
+ u32 pdata; /*±à³ÌÊý¾Ý¼Ä´æÆ÷[31:0]*/
+ u32 rdata; /*¶ÁÊý¾Ý¼Ä´æÆ÷[31:0]*/
+ u32 status; /*״̬¼Ä´æÆ÷£¬[0]int_status [1]cache_valid*/
+ u32 reserved0[2];
+ u32 timing1; /*ʱÐò¼Ä´æÆ÷1*/
+ u32 timing2;
+ u32 timing3;
+ u32 timing4;
+ u32 reserved1[4];
+ u32 cache[8]; /*»º´æ¼Ä´æÆ÷1*/
+} s_EfuseRegister;
+
+struct mutex efuseMutex;
+
+/*******************************************************************************
+* Function: efuse_Program
+* Description: program 32bits data
+* Á¬Ðø¶Á²Ù×÷ʱÓÉ¿ØÖÆÆ÷ÄÚ²¿Éú³É²Ù×÷µØÖ·£¬ADDR¼Ä´æÆ÷½«±»ºöÂÔ£»
+* Ëæ»ú¶ÁʱʹÓÃADDR[2:0]£¬¸ßλ±»ºöÂÔ£»Á¬ÐøÐ´Ê±Ö»Ê¹ÓÃADDR[7:5]£¬µØÖ·µÍλÓÉ¿ØÖÆÆ÷×Ô¶¯Éú³É£»
+* Ëæ»úдʱʹÓÃADDR[7:0]
+* Parameters:
+* Input:
+* None
+* Output:
+* None
+* Returns:
+* None
+* Others:
+*******************************************************************************/
+static int efuse_Program(u32 cache_offset, u32 data)
+{
+ s_EfuseRegister * pReg = (s_EfuseRegister *)(ZX_EFUSE_BASE);
+ int i = 0;
+
+ for(i = 0; i < EFUSE_PROGRAM_MAX_CNT; i++) {
+ /*********************write***********************/
+ /*step1, wait operation done*/
+ while(pReg->ctrl & 0x1);/*bit0=1 ctl is busy*/
+
+ /*step2, program data & addr*/
+ pReg->addr = (cache_offset<<5);
+ pReg->pdata = data;
+
+ /*step3, select efuse0 32bits program*/
+ pReg->ctrl = (0<<4)|(2<<1)|1;/*cs0 program 32bit*/
+
+ /*step4, wait operation done*/
+ while(pReg->ctrl & 0x1);/* bit0=1 ctl is busy*/
+
+ /*step5, select efuse1 32bits program*/
+ pReg->ctrl = (1<<4)|(2<<1)|1;/*cs1 program 32bit*/
+
+ /*step6, wait operation done*/
+ while(pReg->ctrl & 0x1);/*bit0=1 ctl is busy*/
+
+ /*********************read************************/
+ /*step7, select efuse0 read all*/
+ pReg->ctrl = 1;
+ /*step8, wait cache valid*/
+ while(!(pReg->status & 0x2));/* bit0=1 ctl is busy*/
+ /********************compare**********************/
+ if(SECURE_EN_OFFSET == cache_offset) {
+ if(data == (pReg->cache[cache_offset] & 0xFF))
+ return 0;
+ }else{
+ if(data == pReg->cache[cache_offset])
+ return 0;
+ }
+
+ }
+
+ return -1;
+}
+
+/*******************************************************************************
+* Function: efuse_ReadEn
+* Description:
+* Parameters:
+* Input:
+* None
+* Output:
+* None
+* Returns:
+* None
+* Others:
+*******************************************************************************/
+static void efuse_ReadEn(void)
+{
+ s_EfuseRegister * pReg = (s_EfuseRegister *)(ZX_EFUSE_BASE);
+
+ /*step1, wait operation done*/
+ while(pReg->ctrl & 0x1);/*bit0=1 ctl is busy*/
+
+ /*step2, select efuse0 read all*/
+ pReg->ctrl = 1;
+
+ /*step3, wait cache valid*/
+ while(!(pReg->status & 0x2));/* bit0=1 ctl is busy*/
+
+}
+
+/*******************************************************************************
+* Function: zDrvEfuse_GetSecureMsg
+* Description: get pub key from iram, load by uboot
+* Parameters:
+* Input:
+* None
+* Output:
+* None
+* Returns:
+* None
+* Others:
+*******************************************************************************/
+void zDrvEfuse_GetSecureMsg(T_ZDrvEfuse_Secure *secure)
+{
+ u32 i = 0;
+ s_ImageHeader *s_header = (s_ImageHeader *)(SECURE_PUK_BASE);
+ s_EfuseStruct *s_efuse = (s_EfuseStruct *)(EFUSE_RAM_BASE);
+
+ mutex_lock(&efuseMutex);
+
+ efuse_ReadEn();
+
+ for(i = 0; i < 32; i ++)
+ {
+ secure->pubKeyRsaE[i] = s_header->uiPubKeyRsaE[i];
+ secure->pubKeyRsaN[i] = s_header->uiPubKeyRsaN[i];
+ }
+
+ secure->secureFlag = s_efuse->secure_flag;
+
+ for(i = 0; i < 4; i ++)
+ {
+ secure->pubKeyHash[i] = s_efuse->puk_hash[i];
+ }
+
+ for(i = 0; i < 3; i ++)
+ {
+ secure->secureDevId[i] = s_efuse->secureDevId[i];
+ }
+
+ mutex_unlock(&efuseMutex);
+}
+EXPORT_SYMBOL(zDrvEfuse_GetSecureMsg);
+/*******************************************************************************
+* Function: zDrvEfuse_SetSecureMsg
+* Description: set efuse value
+* Parameters:
+* Input:
+* None
+* Output:
+* None
+* Returns:
+* None
+* Others:
+*******************************************************************************/
+s32 zDrvEfuse_SetSecureMsg(E_ZDrvEfuse_SecureMsg secure_msg, u32 *secure_buf)
+{
+ u32 i = 0;
+ T_ZDrvEfuse_Secure secure = {0};
+ u32 tmpbuf[4] = {0};
+ int ret = 0;
+
+ if(secure_msg >= MAX_ENUM)
+ {
+ return -EINVAL;
+ }
+
+ zDrvEfuse_GetSecureMsg(&secure);
+
+ if(((secure_msg == SECURE_EN)&&(secure.secureFlag & 0xff))||
+ ((secure_msg == PUB_KEY_HASH)&&(memcmp(secure.pubKeyHash,tmpbuf,sizeof(secure.pubKeyHash)) != 0))||
+ ((secure_msg == DEVICE_ID)&&(memcmp(secure.secureDevId,tmpbuf,sizeof(secure.secureDevId)) != 0)))
+ return -1;
+
+ mutex_lock(&efuseMutex);
+
+ switch(secure_msg)
+ {
+ case SECURE_EN:
+ {
+ ret =efuse_Program(SECURE_EN_OFFSET,*secure_buf);
+ if(ret < 0){
+ mutex_unlock(&efuseMutex);
+ return -1;
+ }
+ break;
+ }
+ case PUB_KEY_HASH:
+ {
+ for(i = 0; i < 4; i ++)
+ {
+ ret = efuse_Program((PUB_KEY_HASH_OFFSET+i),*(secure_buf+i));
+ if(ret < 0){
+ mutex_unlock(&efuseMutex);
+ return -1;
+ }
+ }
+ break;
+ }
+ case DEVICE_ID:
+ {
+ for(i = 0; i < 3; i ++)
+ {
+ ret =efuse_Program((DEVICE_ID_OFFSET+i),*(secure_buf+i));
+ if(ret < 0){
+ mutex_unlock(&efuseMutex);
+ return -1;
+ }
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ mutex_unlock(&efuseMutex);
+
+ return 0;
+}
+
+int zx29_sfuse_set_secure(E_ZDrvEfuse_SecureMsg secure_msg, u32 *secure_buf)
+{
+ int ret = 0;
+ int len = 0;
+ void *buf=NULL;
+
+ switch(secure_msg)
+ {
+ case SECURE_EN:
+ len = sizeof(u32);
+ break;
+ case PUB_KEY_HASH:
+ len = 4 * sizeof(u32);
+ break;
+ case DEVICE_ID:
+ len = 3 * sizeof(u32);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ buf = kmalloc(len, GFP_KERNEL);
+ if(NULL == buf)
+ return -ENOSPC;
+
+ ret = copy_from_user(buf, secure_buf, len);
+ if(ret)
+ printk("%s: copy user failed\n",__func__);
+
+ ret = zDrvEfuse_SetSecureMsg(secure_msg, buf);
+ kfree(buf);
+
+ return ret;
+}
+
+
+static long zx_efuse_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int ret = 0;
+ T_ZDrvEfuse_Secure secure;
+
+ if (arg == NULL)
+ return -EFAULT;
+
+ switch(cmd)
+ {
+ case EFUSE_GET_DATA:
+ zDrvEfuse_GetSecureMsg(&secure);
+ ret = copy_to_user((void *)arg, &secure, sizeof(T_ZDrvEfuse_Secure));
+ if(ret)
+ printk("%s: copy user failed\n",__func__);
+ break;
+ case EFUSE_SET_SECURE_EN:
+ ret = zx29_sfuse_set_secure(SECURE_EN, (u32 *)arg);
+ break;
+ case EFUSE_SET_PUB_KEY_HASH:
+ ret = zx29_sfuse_set_secure(PUB_KEY_HASH, (u32 *)arg);
+ break;
+ case EFUSE_SET_DEVICE_ID:
+ ret = zx29_sfuse_set_secure(DEVICE_ID, (u32 *)arg);
+ break;
+ default:
+ return -EPERM;
+ }
+
+ return ret;
+}
+
+static const struct file_operations zx_efuse_ops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = zx_efuse_ioctl,
+};
+
+static struct miscdevice zx_efuse_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "efuse",
+ .fops = &zx_efuse_ops,
+};
+
+
+static int __init zx_efuse_init(void)
+{
+ int ret = 0;
+
+ mutex_init(&efuseMutex);
+
+ ret = misc_register(&zx_efuse_miscdev);
+ if (ret) {
+ printk(KERN_ERR"%s: efuse failed to register miscdev (ret = %d)\n", __FILE__, ret);
+ return ret;
+ }
+
+ printk("[xxx] efuse dev inited! \n");
+
+ return ret;
+}
+
+module_init(zx_efuse_init);
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/Makefile b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/Makefile
new file mode 100644
index 0000000..48beca6
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/Makefile
@@ -0,0 +1,26 @@
+# When adding new entries keep the list in alphabetical order
+
+ifeq ($(CONFIG_PM),y)
+obj-y += zx-pm.o zx-pm-suspend.o zx-sleep.o zx-pm-context.o \
+ zx-cpu-sleep.o zx-pm-helpers.o zx-pm-custom.o
+obj-$(CONFIG_ARCH_ZX297520V3) += zx-pm-a53.o
+obj-$(CONFIG_ARCH_ZX297520V3) += zx-pm-v8.o
+obj-$(CONFIG_ARCH_ZX297520V2) += zx-pm-a9.o
+obj-$(CONFIG_ARCH_ZX297520V2) += zx-pm-v7.o
+obj-y += zx29-pm.o
+
+else
+obj-y += zx-pm-null.o
+endif
+
+ifeq ($(CONFIG_CPU_IDLE),y)
+obj-y += zx29-cpuidle.o
+obj-y += zx-cpuidle.o
+ccflags-y += -I/$(CP_ROOT_DIR)/ps/driver/inc/misc
+endif
+
+ifeq ($(CONFIG_CPU_FREQ),y)
+obj-y += zx29-cpufreq.o
+obj-y += zx-cpufreq.o
+endif
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-cpu-sleep.S b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-cpu-sleep.S
new file mode 100644
index 0000000..d888ddd
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-cpu-sleep.S
@@ -0,0 +1,104 @@
+/*
+ * ZTE CPU low power powerdown and powerup code.
+ *
+ * Copyright (C) 2013 ZTE, Inc.
+ * Written by ZXP
+ *
+ */
+#include <linux/linkage.h>
+#include <linux/threads.h>
+#include <asm/asm-offsets.h>
+#include <asm/assembler.h>
+#include <asm/glue-cache.h>
+#include <asm/glue-proc.h>
+
+#include <asm/memory.h>
+
+.arm
+/* =======================================================================
+ * The wakeup code will jump to this code in ddr-mem.
+ * This code will build C environment for resume code.
+ *
+ * =======================================================================*/
+ENTRY(cpu_reset_handler)
+ blx invalidate_icache_v7 /* Clear I cache */
+
+ blx enable_icache_v7
+ blx invalidate_dcache_v7_all /* Clear all data cache levels visible to CPU */
+
+ blx cpu_resume
+ENDPROC(cpu_reset_handler)
+
+
+/* =======================================================================
+ * Because IRAM may power down, M0 will copy this code to IRAM(address 0)
+ * after CPU_AP is waked up.
+ *
+ * So this code will run in IRAM.
+ * =======================================================================*/
+ENTRY(cpu_wake_up)
+ nop /* add 16 nops */
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ mov r2, #0x3e8 /*ldr r2, =1000 cpu_reset_handler */
+
+
+ ldr r5, [r2]
+ bx r5 /* call cpu_reset_handler */
+ENDPROC(cpu_wake_up)
+
+/* =======================================================================
+ * When ddr dfs, A9 must run in iram and waiting till this process finished.
+ *
+ * r0 -- address for dfs finish flag
+ * =======================================================================*/
+#define DDR_DFS_FINISH 0x2
+
+ENTRY(waiting_ddr_dfs)
+waiting_ddr_dfs:
+ stmfd sp!, {r2, lr}
+
+acquire_again:
+ ldr r2, [r0]
+ cmp r2, #DDR_DFS_FINISH
+ bne acquire_again
+
+ ldmfd sp!, {r2, pc}
+ENDPROC(waiting_ddr_dfs)
+
+/* =======================================================================
+ *
+ * disable ddr port3.
+ * addr assigned 0x200
+ * =======================================================================*/
+ENTRY(do_sleep_cpu)
+
+ isb
+ dsb
+ wfi
+
+ bx lr
+ENDPROC(do_sleep_cpu)
+
+ENTRY(zx_jump_addr)
+ //mov pc, r0
+ B do_sleep_cpu
+ENDPROC(zx_jump_addr)
+
+wake_up_buf: .space 1024
+
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-cpufreq.c b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-cpufreq.c
new file mode 100644
index 0000000..0434bc3
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-cpufreq.c
@@ -0,0 +1,367 @@
+/*
+ * arch/arm/mach-zx297510/zx-cpufreq.c
+ *
+ * Author:
+ * dongjian <dong.jian@zte.com.cn>
+ *
+ * Copyright (C) 2010-2013 ZTE CORPORATION. All rights reserved.
+ *
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/suspend.h>
+#include <linux/cpu.h>
+#include <linux/slab.h>
+
+#include <asm/system.h>
+
+//#include "mach/clock.h"
+#include "zx-pm.h"
+
+zx29xx_cpufreq_init_cb zx29xx_cpufreq_init;
+
+static struct zx_dvfs_info *zx_info;
+
+static struct cpufreq_frequency_table *freq_table;
+static struct cpufreq_freqs freqs;
+
+static unsigned long target_cpu_speed[CONFIG_NR_CPUS];
+DEFINE_MUTEX(cpufreq_lock);
+static unsigned int locking_frequency;
+static bool frequency_locked;
+extern unsigned int freq_change_enabled_by_startup;
+extern unsigned int cpu_dfs_is_not_allowed;
+
+#ifdef CONFIG_PM
+static int zx_cpufreq_suspend(struct cpufreq_policy *policy)
+{
+ return 0;
+}
+
+static int zx_cpufreq_resume(struct cpufreq_policy *policy)
+{
+ return 0;
+}
+#endif
+
+int zx_verify_speed(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy, freq_table);
+}
+
+unsigned int zx_getspeed(unsigned int cpu)
+{
+ unsigned int rate;
+
+ if (cpu >= CONFIG_NR_CPUS)
+ return 0;
+
+ rate = clk_get_rate(zx_info->cpu_clk) / 1000;
+
+ //pr_info("[CPUFREQ] get_cpu_rate: %d\n", rate);
+
+ return rate;
+}
+
+
+unsigned long zx_cpu_lowest_speed(void)
+{
+ unsigned long rate = ULONG_MAX;
+ int i;
+
+ for_each_online_cpu(i)
+ rate = min(rate, target_cpu_speed[i]);
+
+ return rate;
+}
+
+unsigned long zx_cpu_highest_speed(void)
+{
+ unsigned long rate = 0;
+ int i;
+
+ for_each_online_cpu(i)
+ rate = max(rate, target_cpu_speed[i]);
+
+ return rate;
+}
+
+extern u32 zDrvTsCtrl_DfsEn(void);
+static int zx_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned int index, old_index;
+ int ret = 0;
+ unsigned int freq;
+ unsigned int cpu;
+
+ mutex_lock(&cpufreq_lock);
+
+ if((pm_get_mask_info()&PM_NO_CPU_FREQ) || cpu_dfs_is_not_allowed||zDrvTsCtrl_DfsEn())
+ {
+ ret = -EAGAIN;
+ goto out;
+ }
+ if(freq_change_enabled_by_startup == 0)
+ goto out;
+
+ if (frequency_locked && target_freq != locking_frequency)
+ {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ freqs.old = policy->cur;
+ if (cpufreq_frequency_table_target(policy, freq_table,
+ freqs.old, relation, &old_index))
+ {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (cpufreq_frequency_table_target(policy, freq_table,
+ target_freq, relation, &index))
+ {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ freq = freq_table[index].frequency;
+
+ freqs.new = freq_table[index].frequency;
+ freqs.cpu = policy->cpu;
+
+ for_each_online_cpu(cpu)
+ {
+ freqs.cpu = cpu;
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ }
+
+ if (zx_info->set_freq)
+ {
+ if(!zx_info->set_freq(old_index, index))
+ zx_info->freq_cur_idx = index;
+ }
+
+ for_each_online_cpu(cpu)
+ {
+ freqs.cpu = cpu;
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ }
+
+ target_cpu_speed[policy->cpu] = freq;
+
+out:
+ mutex_unlock(&cpufreq_lock);
+
+ return ret;
+}
+
+
+static int zx_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ if (policy->cpu >= CONFIG_NR_CPUS)
+ return -EINVAL;
+
+ cpufreq_frequency_table_cpuinfo(policy, freq_table);
+ cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
+ policy->cur = zx_getspeed(policy->cpu);
+ target_cpu_speed[policy->cpu] = policy->cur;
+ locking_frequency = policy->cur;
+ pr_info("[CPUFREQ] %s cpu: %u, target_freq:%u\n", __func__, policy->cpu, policy->cur);
+
+ /* FIXME: what's the actual transition time? */
+ policy->cpuinfo.transition_latency = 300 * 1000;
+
+ if (num_online_cpus() == 1)
+ {
+ cpumask_copy(policy->related_cpus, cpu_possible_mask);
+ cpumask_copy(policy->cpus, cpu_online_mask);
+ }
+ else
+ {
+ policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
+ cpumask_setall(policy->cpus);
+ }
+
+ return 0;
+}
+
+
+static int zx_cpufreq_policy_notifier(
+ struct notifier_block *nb, unsigned long event, void *data)
+{
+ int i, ret;
+ struct cpufreq_policy *policy = data;
+
+ if (event == CPUFREQ_NOTIFY)
+ {
+ ret = cpufreq_frequency_table_target(policy, freq_table,
+ policy->max, CPUFREQ_RELATION_H, &i);
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block zx_cpufreq_policy_nb =
+{
+ .notifier_call = zx_cpufreq_policy_notifier,
+};
+
+/**
+ * zx_cpufreq_pm_notifier - block CPUFREQ's activities in suspend-resume
+ * context
+ * @notifier
+ * @pm_event
+ * @v
+ *
+ * While frequency_locked == true, target() ignores every frequency but
+ * locking_frequency. The locking_frequency value is the initial frequency,
+ * which is set by the bootloader. In order to eliminate possible
+ * inconsistency in clock values, we save and restore frequencies during
+ * suspend and resume and block CPUFREQ activities. Note that the standard
+ * suspend/resume cannot be used as they are too deep (syscore_ops) for
+ * regulator actions.
+ */
+static int zx_cpufreq_pm_notifier(struct notifier_block *notifier,
+ unsigned long pm_event, void *v)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */
+ static unsigned int saved_frequency;
+ unsigned int temp;
+
+ mutex_lock(&cpufreq_lock);
+ if(NULL == policy)
+ goto out;
+
+ switch (pm_event)
+ {
+ case PM_SUSPEND_PREPARE:
+ if (frequency_locked)
+ goto out;
+
+ frequency_locked = true;
+
+ if (locking_frequency)
+ {
+ saved_frequency = zx_getspeed(0);
+
+ mutex_unlock(&cpufreq_lock);
+ zx_target(policy, locking_frequency,
+ CPUFREQ_RELATION_H);
+ mutex_lock(&cpufreq_lock);
+ }
+ break;
+
+ case PM_POST_SUSPEND:
+ if (saved_frequency)
+ {
+ /*
+ * While frequency_locked, only locking_frequency
+ * is valid for target(). In order to use
+ * saved_frequency while keeping frequency_locked,
+ * we temporarly overwrite locking_frequency.
+ */
+ temp = locking_frequency;
+ locking_frequency = saved_frequency;
+
+ mutex_unlock(&cpufreq_lock);
+ zx_target(policy, locking_frequency,
+ CPUFREQ_RELATION_H);
+ mutex_lock(&cpufreq_lock);
+
+ locking_frequency = temp;
+ }
+ frequency_locked = false;
+ break;
+ }
+out:
+ mutex_unlock(&cpufreq_lock);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block zx_cpufreq_nb =
+{
+ .notifier_call = zx_cpufreq_pm_notifier,
+};
+
+
+static struct freq_attr *zx_cpufreq_attr[] =
+{
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static struct cpufreq_driver zx_cpufreq_driver =
+{
+ .flags = CPUFREQ_STICKY,
+ .verify = zx_verify_speed,
+ .target = zx_target,
+ .get = zx_getspeed,
+ .init = zx_cpufreq_cpu_init,
+ .name = "zx_cpufreq",
+ .attr = zx_cpufreq_attr,
+#ifdef CONFIG_PM
+ .suspend = zx_cpufreq_suspend,
+ .resume = zx_cpufreq_resume,
+#endif
+};
+
+static int __init zx_cpufreq_init(void)
+{
+ int ret = -EINVAL;
+
+ /* init dvfs driver */
+ zx_info = kzalloc(sizeof(struct zx_dvfs_info), GFP_KERNEL);
+ if (!zx_info)
+ return -ENOMEM;
+ ret = zx29xx_cpufreq_init(zx_info);
+ if (ret)
+ goto err_mach_init;
+ freq_table = zx_info->freq_table;
+
+ /* cpufreq notify */
+ ret = cpufreq_register_notifier(
+ &zx_cpufreq_policy_nb, CPUFREQ_POLICY_NOTIFIER);
+ if (ret)
+ goto err_mach_init;
+
+ /* pm notify */
+ register_pm_notifier(&zx_cpufreq_nb);
+
+ /* register driver */
+ if (cpufreq_register_driver(&zx_cpufreq_driver))
+ {
+ pr_info("[CPUFREQ] %s: failed to register cpufreq driver\n", __func__);
+ goto err_cpufreq;
+ }
+
+ pr_info("[CPUFREQ] register cpufreq driver OK!\n");
+
+ return 0;
+
+err_cpufreq:
+ cpufreq_unregister_notifier(&zx_cpufreq_policy_nb, CPUFREQ_POLICY_NOTIFIER);
+ unregister_pm_notifier(&zx_cpufreq_nb);
+
+err_mach_init:
+ kfree(zx_info);
+ pr_info("[CPUFREQ] %s: failed initialization\n", __func__);
+
+ return -EINVAL;
+}
+
+late_initcall(zx_cpufreq_init);
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-cpufreq.h b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-cpufreq.h
new file mode 100644
index 0000000..7e4068b
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-cpufreq.h
@@ -0,0 +1,117 @@
+/*
+ * arch/arm/mach-zx297510/zx-cpufreq.h
+ *
+ * Copyright (c) 2013, ZTE Corporation.
+ * write by zxp
+ *
+ */
+
+#ifndef __MACH_ZX_CPU_FREQ_H
+#define __MACH_ZX_CPU_FREQ_H
+#ifdef CONFIG_ARCH_ZX297520V2
+#else
+#define SET_AXI_BY_HW
+//#define CONFIG_DDR_FREQ
+//#define SET_DDR_BY_HW
+#endif
+enum cpufreq_level_index {
+ L0, L1, L2, L3, L4,
+ L5, L6, L7, L8, L9,
+ L10, L11, L12, L13, L14,
+ L15, L16, L17, L18, L19,
+ L20,
+};
+
+struct zx_dvfs_info {
+ unsigned int freq_cur_idx;
+ unsigned int pll_safe_idx;
+ unsigned int max_support_idx;
+ unsigned int min_support_idx;
+ struct clk *cpu_clk;
+ unsigned int *volt_table;
+ struct cpufreq_frequency_table *freq_table;
+ int (*set_freq)(unsigned int, unsigned int);
+};
+
+#ifdef CONFIG_AXI_FREQ
+#ifdef CONFIG_ARCH_ZX297520V2
+typedef enum
+{
+ AXI_FREQ_26M = 0,
+ AXI_FREQ_39M = 1,
+ AXI_FREQ_52M = 2,
+ AXI_FREQ_78M = 3,
+ AXI_FREQ_104M = 4,
+ AXI_FREQ_122_88M = 5,
+ AXI_FREQ_156M = 6,
+ AXI_FREQ_NULL,
+ MAX_AXI_FREQ
+}zx29_axi_freq;
+
+#else
+typedef enum
+{
+ AXI_FREQ_26M = 0,
+ AXI_FREQ_156M = 1,
+ AXI_FREQ_124_8M = 2,
+ AXI_FREQ_104M = 3,
+ AXI_FREQ_78M = 4,
+ AXI_FREQ_52M = 5,
+ AXI_FREQ_39M = 6,
+ AXI_FREQ_NULL,
+ MAX_AXI_FREQ
+}zx29_axi_freq;
+#endif
+typedef enum
+{
+ VOL_VO_800 = 0,
+ VOL_VO_825 = 1,
+ VOL_VO_850 = 2,
+ VOL_VO_875 = 3,
+ VOL_VO_900 = 4,
+ MAX_VOL
+}zx29_vol;
+
+#endif
+
+#ifdef CONFIG_DDR_FREQ
+#ifdef CONFIG_ARCH_ZX297520V2
+typedef enum
+{
+ DDR_FREQ_156M = 0,
+ DDR_FREQ_312M = 1,
+ MAX_DDR_FREQ
+}zx29_ddr_freq;
+
+#else
+typedef enum
+{
+ DDR_FREQ_312M = 0,
+ DDR_FREQ_400M = 1,
+ DDR_FREQ_208M = 2,
+ DDR_FREQ_156M = 3,
+ MAX_DDR_FREQ
+}zx29_ddr_freq;
+#endif
+#endif
+
+typedef int (*zx29xx_cpufreq_init_cb)(struct zx_dvfs_info *info);
+extern zx29xx_cpufreq_init_cb zx29xx_cpufreq_init;
+
+extern unsigned int zx_getspeed(unsigned int cpu);
+extern int zx_update_cpu_speed(unsigned long rate);
+extern int zx_cpu_set_speed_cap(unsigned int *speed_cap);
+extern unsigned int zx_count_slow_cpus(unsigned long speed_limit);
+extern unsigned int zx_get_slowest_cpu_n(void);
+extern unsigned long zx_cpu_lowest_speed(void);
+extern unsigned long zx_cpu_highest_speed(void);
+
+#ifdef CONFIG_ZX_PM_DEBUG
+extern void cpufreq_test(unsigned int old_index, unsigned int new_index);
+#endif
+
+#ifdef CONFIG_AXI_FREQ
+extern int request_axi_freq(zx29_axi_freq axi_freq);
+#endif
+
+#endif /* __MACH_ZX_CPU_FREQ_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-cpuidle.c b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-cpuidle.c
new file mode 100644
index 0000000..6b5f3cf
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-cpuidle.c
@@ -0,0 +1,1117 @@
+/*
+ * zx297510 CPU idle Routines
+ *
+ * Copyright (C) 2013 ZTE, Ltd.
+ * Shine Yu <yu.xiang5@zte.com.cn>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/sched.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/cpu.h>
+#include <linux/tick.h>
+#include <linux/suspend.h>
+#include <linux/interrupt.h>
+#include <linux/soc/zte/pm/drv_idle.h>
+#include <linux/module.h>
+#include "zx-pm.h"
+
+#include <linux/cp_types.h>
+#include "drvs_lpm.h"
+
+#ifdef CONFIG_ZX_PM_DEBUG
+//struct zx_idle_stats idle_stats;
+#endif
+#if 0
+struct cpuidle_driver zx_idle_driver = {
+ .name = "zx_idle",
+ .owner = THIS_MODULE,
+ .en_core_tk_irqen = 0, /* no use cpuidle time keeping */
+};
+#endif
+
+static unsigned int deep_idle_disabled_by_startup = 0;
+static unsigned int deep_idle_disabled_by_suspend = 0;
+static unsigned int deep_idle_disabled_by_debug = 0;
+static unsigned int print_enabled_by_debug = 0;
+DEFINE_PER_CPU(struct cpuidle_device, zx_idle_dev);
+
+static struct delayed_work pm_idle_work;
+#define PM_IDLE_DELAY msecs_to_jiffies(30000)
+
+static unsigned int sleep_mode_flag = 0;
+static unsigned int drv_cpuidle_flag = 0;
+static DEFINE_SPINLOCK(zx_idle_lock);
+
+void zx_cpuidle_set_busy(drv_idle_flag devId)
+{
+ unsigned long flags;
+
+ if( devId >= IDLE_FLAG_MAX)
+ {
+ printk("[zx_cpuidle_set_busy] devId err, devId = %d\n",devId);
+ }
+ else
+ {
+ raw_spin_lock_irqsave(&zx_idle_lock, flags);
+ drv_cpuidle_flag |= (1<<devId);
+ raw_spin_unlock_irqrestore(&zx_idle_lock, flags);
+ }
+
+}
+EXPORT_SYMBOL(zx_cpuidle_set_busy);
+
+void zx_cpuidle_set_free(drv_idle_flag devId)
+{
+ unsigned long flags;
+
+ if( devId >= IDLE_FLAG_MAX)
+ {
+ printk("[zx_cpuidle_set_free] devId err, devId = %d\n",devId);
+ }
+ else
+ {
+ raw_spin_lock_irqsave(&zx_idle_lock, flags);
+ drv_cpuidle_flag &= ~(1<<devId);
+ raw_spin_unlock_irqrestore(&zx_idle_lock, flags);
+ }
+}
+EXPORT_SYMBOL(zx_cpuidle_set_free);
+
+static void pm_idle_func(struct work_struct *work)
+{
+ deep_idle_disabled_by_startup = 1;
+}
+
+/**
+ * idle_can_enter_deep_sleep - check can enter deep sleep state?
+ *
+ *
+ */
+static int idle_can_enter_deep_sleep(void)
+{
+ /* can not enter deep sleep now */
+ if (deep_idle_disabled_by_suspend)
+ return false;
+
+ /* This mode only can be entered when other core's are offline */
+ if(deep_idle_disabled_by_debug || num_online_cpus() > 1)
+ return false;
+
+ /* can not enter deep sleep when kernel startup, we delay 30s now! */
+ if(!deep_idle_disabled_by_startup)
+ return false;
+
+ if(pm_dma_used())
+ return false;
+
+ if(pm_get_mask_info()&PM_IDLE_WFI)
+ return false;
+
+ if(drv_cpuidle_flag != 0)
+ return false;
+
+
+ return true;
+}
+
+
+static int zx_pm_idle_prepare(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ int new_index = index;
+
+ if(new_index == drv->safe_state_index)
+ return new_index;
+
+ if(!idle_can_enter_deep_sleep())
+ {
+ new_index = drv->safe_state_index;
+ return new_index;
+ }
+
+ return new_index;
+}
+/****************************************************/
+#define MAX_TIME_PER_CYCLE div64_u64(div64_u64((u64)0x7fffffff*USEC_PER_SEC, (u64)PERSISTENT_TIMER_CLOCK_RATE),(u64)1000)
+
+#define SAVE_RESTORE_TIME (9) //ms
+#define SHUTDOWN_SLEEP_TIME (2) //ms
+#define MAX_PM_CB_CNT (20)
+
+enum {
+ DEBUG_IDLE_MSG = 1U << 0,
+ DEBUG_WAKE_LOCK = 1U << 1,
+};
+
+typedef int (*pm_callback_fn)(void);
+typedef struct
+{
+ pm_callback_fn cb;
+ void *is_sucess;
+}pm_cb_t;
+
+static int idle_debug_mask = 0; //DEBUG_IDLE_MSG;
+
+/* /sys/module/zx297520_cpuidle/parameters/debug_mask */
+module_param(idle_debug_mask, int, 0644);
+
+static u32 sleep_count = 0;
+static pm_cb_t pm_enter_cb[MAX_PM_CB_CNT];
+static pm_cb_t pm_exit_cb[MAX_PM_CB_CNT];
+static unsigned int pm_cb_cnt = 0;
+
+extern void pm_idle_sram_start(void);
+extern void pm_idle_sram_end(void);
+extern void (*arm_pm_idle)(void);
+extern void idle_set_sleeptime(s64 sleep_time);
+extern void pm_debug_wakelocks(void);
+extern void zDrvInt_MaskIrq( u32 uiLine );
+extern void zDrvInt_UnmaskIrq( u32 uiLine );
+
+extern bool zPs_IsTdMasterMode(void);
+extern bool zPs_IsLteMasterMode(void);
+extern bool zPs_IsFddMasterMode(void);
+
+int zx_idle_get_debug_flag(void)
+{
+ return idle_debug_mask;
+}
+
+int zx_idle_get_idle_flag(void)
+{
+ return drv_cpuidle_flag;
+}
+/**
+ * zx_pm_register_callback
+ *
+ * register callback for sleep enter and exit,
+ * enter_cb: callback for sleep enter, callback for sleep exit
+ *
+ *
+ */
+int zx_pm_register_callback(pm_callback_fn enter_cb, pm_callback_fn exit_cb)
+{
+ int i = 0;
+ if(pm_cb_cnt >= MAX_PM_CB_CNT)
+ return -ENOMEM;
+
+ if(!enter_cb)
+ return -EINVAL;
+
+ if(!exit_cb)
+ return -EINVAL;
+
+ for(i = 0; i < pm_cb_cnt; i++){
+ if(pm_enter_cb[i].cb == enter_cb ||
+ pm_exit_cb[i].cb == exit_cb)
+ return EINVAL;
+ }
+ pm_enter_cb[pm_cb_cnt].cb = enter_cb;
+ pm_enter_cb[pm_cb_cnt].is_sucess=0;
+ pm_exit_cb[pm_cb_cnt].cb = exit_cb;
+ pm_exit_cb[pm_cb_cnt].is_sucess=0;
+
+ pm_cb_cnt ++;
+
+ return 0;
+}
+EXPORT_SYMBOL(zx_pm_register_callback);
+
+/**
+ * zx_cpu_dev_idle_enter
+ *
+ *
+ *
+ *
+ *
+ */
+int zx_cpu_dev_idle_enter(void)
+{
+ int i;
+ int ret=0;
+
+ for(i=0;i<pm_cb_cnt; i++){
+ pm_enter_cb[i].is_sucess = 0;
+ }
+
+ for(i=0;i<pm_cb_cnt; i++){
+
+ if(pm_enter_cb[i].cb)
+ ret = pm_enter_cb[i].cb();
+
+ if(ret)
+ return ret;
+
+ pm_enter_cb[i].is_sucess = 1;
+
+ }
+
+ return ret;
+}
+
+/**
+ * zx_cpu_dev_idle_exit
+ *
+ * only when dev is is_sucess when enter sleep,the exit callback is visited
+ *
+ *
+ *
+ */
+int zx_cpu_dev_idle_exit(void)
+{
+ int i;
+ int ret=0;
+
+ for(i=0;i<pm_cb_cnt; i++){
+ if(pm_exit_cb[i].cb && pm_enter_cb[i].is_sucess )
+ ret = pm_exit_cb[i].cb();
+
+ if(ret)
+ return ret;
+ }
+
+ return ret;
+}
+/**
+ * zx_debug_check
+ *
+ * check whether debug allowed sleep.
+ *
+ * when pm_get_mask_info()&PM_IDLE_WFI is 1, return "false",
+ * else return "ture"
+ */
+static bool zx_debug_check(void)
+{
+ if(pm_get_mask_info()&PM_IDLE_WFI)
+ return false;
+ else
+ return true;
+}
+/**
+ * zx_sleep_wakelock_check
+ *
+ * check whether linux allowed sleep.
+ *
+ * when active wakelock count is 0, return "true",
+ * else return "false"
+ */
+static bool zx_sleep_wakelock_check(void)
+{
+ unsigned int temp_count;
+
+ if (idle_debug_mask & DEBUG_WAKE_LOCK)
+ pm_debug_wakelocks();
+
+ if(pm_get_wakeup_count(&temp_count, false))
+ {
+ if (pm_save_wakeup_count(temp_count))
+ {
+ return true;
+ }
+ else
+ {
+ pr_info("[SLP]: error save wakeup_count: %d ", temp_count);
+ }
+ }
+
+ return false;
+}
+
+/**
+ * zx_sleep_idle_check
+ *
+ * check whether linux allowed idlesleep.
+ *
+ * when idle flag is 0, return "true",
+ * else return "false"
+ */
+static bool zx_sleep_idle_check(void)
+{
+ if(pm_dma_used())
+ return false;
+
+ if(drv_cpuidle_flag != 0)
+ return false;
+
+ return true;
+
+}
+
+/**
+ * zx_cpu_kernel_sleep
+ *
+ *
+ *
+ */
+void zx_cpu_kernel_sleep(void)
+{
+ cpu_do_idle();
+}
+
+/**
+ * zx_sleep_set_timer
+ *
+ * input: expect_time_ms ---the expect_time can sleep, unit:ms
+ *
+ * return: elapsed time when enter idle
+ */
+volatile u32 tick_cur_cnt =0;
+
+u64 zx_sleep_set_timer(u32 expect_time_ms)
+{
+ u64 tmptime = 0;
+
+ idle_set_sleeptime((s64)expect_time_ms*(s64)1000);
+ pm_stop_tick();
+ tick_cur_cnt=pm_read_tick();
+ tmptime =div64_u64(read_persistent_us(),(u64)1000);// read_persistent_us()/1000; //ms
+ return tmptime;
+}
+
+/**
+ * zx_sleep_read_timer
+ *
+ * return: elapsed time when exit idle
+ *
+ *
+ */
+u64 zx_sleep_read_timer(void)
+{
+ u64 tmptime = 0;
+ zx29_stop_wake_timer();
+
+ tmptime =div64_u64(read_persistent_us(),(u64)1000);// read_persistent_us()/1000; //ms
+ pm_restart_tick(tick_cur_cnt);
+
+ return tmptime;
+}
+
+/**
+ * zx_sleep_mask_int
+ *
+ * mask int needed
+ *
+ *
+ */
+void zx_sleep_mask_int(void)
+{
+ /*******************¡§¡§¡ê¡è?¨¢¡§????D??***********************/
+#ifndef CONFIG_ARCH_ZX297520V3_CAP
+ CPPS_FUNC(cpps_callbacks, zDrvLpm_IrqDisable)(LPM_RAT_TD);
+ CPPS_FUNC(cpps_callbacks, zDrvLpm_IrqDisable)(LPM_RAT_LTE);
+ CPPS_FUNC(cpps_callbacks, zDrvLpm_IrqDisable)(LPM_RAT_W);
+#endif
+ /*********************************************************/
+ // zDrvInt_MaskIrq(TD_FRM_INT);
+ // zDrvInt_MaskIrq(LTE_LPM5_INT);
+ // zDrvInt_MaskIrq(WD_FRM_INT);
+ // CPPS_FUNC(cpps_callbacks, zDrvInt_MaskIrq)(PS_TIMER0_INT);
+ pm_save_gic_wake_enable();
+}
+
+/**
+ * zx_sleep_unmaskInt
+ *
+ * unmask int needed
+ *
+ *
+ */
+static void zx_sleep_unmaskInt(void)
+{
+#ifndef CONFIG_ARCH_ZX297520V3_CAP
+
+ if (CPPS_FUNC(cpps_callbacks, zPs_IsTdMasterMode)())
+ CPPS_FUNC(cpps_callbacks, zDrvLpm_IrqEnable)(LPM_RAT_TD);
+ if (CPPS_FUNC(cpps_callbacks, zPs_IsLteMasterMode)())
+ CPPS_FUNC(cpps_callbacks, zDrvLpm_IrqEnable)(LPM_RAT_LTE);
+ if (CPPS_FUNC(cpps_callbacks, zPs_IsFddMasterMode)())
+ CPPS_FUNC(cpps_callbacks, zDrvLpm_IrqEnable)(LPM_RAT_W);
+ #endif
+ // zDrvInt_UnmaskIrq(TD_FRM_INT);
+// zDrvInt_UnmaskIrq(LTE_LPM5_INT);
+// zDrvInt_UnmaskIrq(WD_FRM_INT);
+ //CPPS_FUNC(cpps_callbacks, zDrvInt_UnmaskIrq)(PS_TIMER0_INT);
+ pm_restore_gic_wake_enable();
+}
+
+
+/**
+ * zx_cpu_idle
+ * the deep sleep function, enter and exit WFI, dormant or shutdown sleep
+ *
+ *
+ *
+ */
+extern unsigned int zx_getspeed(unsigned int cpu);
+extern void clock_event_handler(void);
+extern void pm_uart_mod_timer(void);
+extern void pm_uart_del_timer(void);
+
+
+#ifdef CONFIG_ARCH_ZX297520V3_CAP
+#define CAPCORE_SLEEP_TIME (18*60*60*1000) //ms
+
+void zx_cpu_idle(void)
+{
+ u32 expect_time = 0;
+ u64 elapsed_time_enter = 0;
+ u64 elapsed_time_exit = 0;
+ u64 idle_time = 0;
+
+ #ifdef CONFIG_ZX_PM_DEBUG
+ pm_write_reg(AP_IDLE_SLEEP_STATUS_FLAG,0xff01);
+ #endif
+
+ if(!zx_sleep_wakelock_check())
+ goto IRQ_LOCK_EXIT;
+
+ if(!zx_debug_check())
+ goto IRQ_LOCK_EXIT;
+
+ expect_time =CAPCORE_SLEEP_TIME;
+
+ elapsed_time_enter=zx_sleep_set_timer(expect_time );
+
+ if(expect_time <= SHUTDOWN_SLEEP_TIME)
+ goto IRQ_LOCK_EXIT;
+
+ sleep_count++;
+
+ zx_sleep_mask_int();
+
+ #ifdef CONFIG_ZX_PM_DEBUG
+ pm_write_reg(AP_IDLE_SLEEP_STATUS_FLAG,0xff03);
+ #endif
+
+ if(expect_time >= SAVE_RESTORE_TIME)
+ zx_enter_sleep(CPU_SLEEP_TYPE_LP1);
+ else
+ zx_enter_sleep(CPU_SLEEP_TYPE_LP3);
+
+ #ifdef CONFIG_ZX_PM_DEBUG
+ pm_write_reg(AP_IDLE_SLEEP_STATUS_FLAG,0xfffe);
+ #endif
+
+ zx_sleep_unmaskInt();
+
+ elapsed_time_exit = zx_sleep_read_timer();
+ if(elapsed_time_exit>=elapsed_time_enter)
+ idle_time=elapsed_time_exit - elapsed_time_enter;
+ else
+ idle_time=(elapsed_time_exit - elapsed_time_enter)+MAX_TIME_PER_CYCLE;
+
+ clock_event_handler();
+
+ pm_ram_log(" @@sleep exit:sleep_count=%d,real_idle_time=%lld,jiffies:%u\n",sleep_count, idle_time, jiffies);
+
+IRQ_LOCK_EXIT:
+ #ifdef CONFIG_ZX_PM_DEBUG
+ pm_write_reg(AP_IDLE_SLEEP_STATUS_FLAG,0xffff);
+ #endif
+ zx_cpu_kernel_sleep();
+
+}
+#else
+void zx_cpu_idle(void)
+{
+
+ u32 expect_time = 0;
+ u64 elapsed_time_enter = 0;
+ u64 elapsed_time_exit = 0;
+ u64 idle_time = 0;
+ //s64 request = 0;
+ s64 remainder_timer = 0;
+
+ #ifdef CONFIG_ZX_PM_DEBUG
+ pm_write_reg(AP_IDLE_SLEEP_STATUS_FLAG,0xff01);
+ pm_write_reg(IRAM_PS_SLEEP_FLAG_ADDR,1);
+ #endif
+
+ #ifdef USE_CPPS_KO
+ if(cpps_callbacks.psm_ModemSleepCheck) {
+ if(!CPPS_FUNC(cpps_callbacks,psm_ModemSleepCheck)())
+ goto IRQ_LOCK_EXIT;
+ } else{
+ goto IRQ_LOCK_EXIT;
+ }
+
+ #else
+ if(!psm_ModemSleepCheck())
+ goto IRQ_LOCK_EXIT;
+ #endif
+ if(!zx_sleep_idle_check())
+ goto IRQ_LOCK_EXIT;
+ if(!zx_sleep_wakelock_check())
+ //goto IRQ_LOCK_EXIT;
+ sleep_mode_flag=1;
+ if(!zx_debug_check())
+ goto IRQ_LOCK_EXIT;
+
+ #ifdef USE_CPPS_KO
+ if(!sleep_mode_flag)
+ expect_time=CPPS_FUNC(cpps_callbacks,psm_ModemSleepTimeGet)();
+ else
+ {
+ remainder_timer = pm_get_remainder_time();
+ //request = ktime_to_us(tick_nohz_get_sleep_length());
+ expect_time=CPPS_FUNC(cpps_callbacks,psm_ModemSleepTimeGet)();
+ if(div64_long(remainder_timer, 1000) < expect_time)
+ expect_time =div64_long(remainder_timer, 1000);
+
+ //if(div64_long(request, 1000) < expect_time)
+ //expect_time =div64_long(request, 1000);
+
+ }
+ #else
+ if(!sleep_mode_flag)
+ expect_time = psm_ModemSleepTimeGet();
+ else
+ {
+ remainder_timer = pm_get_remainder_time();
+ //request = ktime_to_us(tick_nohz_get_sleep_length());
+ expect_time=psm_ModemSleepTimeGet();
+ if(div64_long(remainder_timer, 1000) < expect_time)
+ expect_time =div64_long(remainder_timer, 1000);
+
+ //if(div64_long(request, 1000) < expect_time)
+ //expect_time =div64_long(request, 1000);
+
+ }
+
+ #endif
+ if(expect_time <= SHUTDOWN_SLEEP_TIME)
+ goto IRQ_LOCK_EXIT;
+
+ elapsed_time_enter=zx_sleep_set_timer(expect_time );
+
+ #ifdef CONFIG_ZX_RAM_CONSOLE
+ pm_uart_del_timer();
+ pm_idle_sram_start();
+ #endif
+
+#ifdef USE_CPPS_KO
+ if(cpps_callbacks.zDrvEdcp_IsBusy) {
+ if(CPPS_FUNC(cpps_callbacks,zDrvEdcp_IsBusy)(0) ||
+ CPPS_FUNC(cpps_callbacks,zDrvEdcp_IsBusy)(1) ||
+ CPPS_FUNC(cpps_callbacks,zDrvEdcp_IsBusy)(2))
+ BUG_ON(1);
+ }
+#endif
+
+ sleep_count++;
+ pm_ram_log(" @@sleep enter,expect_time:%u,jiffies:%u\n", expect_time, jiffies);
+
+ pm_ram_log(" freq_sel=0x%x \n", pm_read_reg(ZX_MATRIX_CRM_BASE+0x158) );
+
+ #ifdef CONFIG_ZX_PM_DEBUG
+ pm_write_reg(AP_IDLE_SLEEP_STATUS_FLAG,0xff02);
+ #endif
+
+
+ if(zx_cpu_dev_idle_enter()) {
+ printk(KERN_WARNING"dev idle enter error\n");
+ if(zx_cpu_dev_idle_exit())
+ printk(KERN_WARNING"dev idle exit error\n");
+ goto IRQ_LOCK_EXIT;
+ }
+
+ zx_sleep_mask_int();
+
+ #ifdef CONFIG_ZX_PM_DEBUG
+ pm_write_reg(AP_IDLE_SLEEP_STATUS_FLAG,0xff03);
+ #endif
+
+ if((expect_time >= SAVE_RESTORE_TIME) && (!sleep_mode_flag) )
+ zx_enter_sleep(CPU_SLEEP_TYPE_LP1);
+ else
+ zx_enter_sleep(CPU_SLEEP_TYPE_LP3);
+
+ #ifdef CONFIG_ZX_PM_DEBUG
+ pm_write_reg(AP_IDLE_SLEEP_STATUS_FLAG,0xfffe);
+ #endif
+
+ zx_sleep_unmaskInt();
+
+ if(zx_cpu_dev_idle_exit()) {
+ printk(KERN_WARNING"dev idle exit error\n");
+ goto IRQ_LOCK_EXIT;
+ }
+
+
+
+ elapsed_time_exit = zx_sleep_read_timer();
+ if(elapsed_time_exit>=elapsed_time_enter)
+ idle_time=elapsed_time_exit - elapsed_time_enter;
+ else
+ idle_time=(elapsed_time_exit - elapsed_time_enter)+MAX_TIME_PER_CYCLE;
+
+ clock_event_handler();
+ #ifdef USE_CPPS_KO
+ CPPS_FUNC(cpps_callbacks,psm_TimeCompensate)(idle_time);
+ #else
+ psm_TimeCompensate(idle_time);
+ #endif
+
+ pm_ram_log(" @@sleep exit:sleep_count=%d,real_idle_time=%lld,jiffies:%u\n",sleep_count, idle_time, jiffies);
+
+ #ifdef CONFIG_ZX_RAM_CONSOLE
+ pm_uart_mod_timer();
+ pm_idle_sram_end();
+ #endif
+
+
+IRQ_LOCK_EXIT:
+ #ifdef CONFIG_ZX_PM_DEBUG
+ pm_write_reg(IRAM_PS_SLEEP_FLAG_ADDR,0);
+ pm_write_reg(AP_IDLE_SLEEP_STATUS_FLAG,0xffff);
+ #endif
+ sleep_mode_flag=0;
+ zx_cpu_kernel_sleep();
+
+}
+#endif
+/****************************************************/
+
+/**
+ * zx_enter_idle
+ * @dev: cpuidle device
+ * @state: The target state to be programmed
+ *
+ * Idle function for C1 state, WFI on a single CPU.
+ * Called with irqs off, returns with irqs on.
+ * Returns the amount of time spent in the low power state.
+ */
+int zx_enter_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ ktime_t entry_time, exit_time;
+ s64 idle_time;
+ int new_index = index;
+
+ local_irq_disable();
+
+#ifdef CONFIG_ARCH_ZX297520V2
+#else
+ local_fiq_disable();
+#endif
+ entry_time = ktime_get();
+
+/*=================================================================
+ *=======begin enter idle sleep====================================
+ *=================================================================
+ */
+ new_index = zx_pm_idle_prepare(dev, drv, index);
+
+ index = zx_pm_idle_enter(new_index);
+
+/*=================================================================
+ *=======end enter idle sleep======================================
+ *=================================================================
+ */
+ exit_time = ktime_get();
+#ifdef CONFIG_ARCH_ZX297520V2
+#else
+ local_fiq_enable();
+#endif
+ local_irq_enable();
+
+ idle_time = ktime_to_us(ktime_sub(exit_time, entry_time));
+
+ dev->last_residency = (int)idle_time;
+
+ if(print_enabled_by_debug != 0)
+ {
+ printk(KERN_INFO "[CPUIDLE] exit idle: idle time= %d , enter level= %d !\n", (u32)idle_time, index);
+ }
+
+ return index;
+}
+
+
+
+static int idle_pm_notify(struct notifier_block *nb,
+ unsigned long event, void *dummy)
+{
+#ifdef CONFIG_PM_SLEEP
+ if (event == PM_SUSPEND_PREPARE)
+ deep_idle_disabled_by_suspend = true;
+ else if (event == PM_POST_SUSPEND)
+ deep_idle_disabled_by_suspend = false;
+#endif
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block idle_pm_notifier =
+{
+ .notifier_call = idle_pm_notify,
+};
+
+
+void zx_apmgclken_set(unsigned en)
+{
+ unsigned tmp;
+ if(en){
+ //set ps_clk_switch=1
+ tmp = pm_read_reg(CORE_SWITCH_CONFIG_REG);
+ #ifdef CONFIG_ARCH_ZX297520V3_CAP
+ tmp |= (0x1<<2);
+ #else
+ tmp |= (0x1<<0);
+ #endif
+ pm_write_reg(CORE_SWITCH_CONFIG_REG, tmp);
+ } else{
+ //set ps_clk_switch=0
+ tmp = pm_read_reg(CORE_SWITCH_CONFIG_REG);
+ #ifdef CONFIG_ARCH_ZX297520V3_CAP
+ tmp &= ~(0x1<<2);
+ #else
+ tmp &= ~(0x1<<0);
+ #endif
+ pm_write_reg(CORE_SWITCH_CONFIG_REG, tmp);
+ }
+}
+/**
+ * zx_cpuidle_init - Init routine for zx29xx idle
+ *
+ * Registers the cpuidle driver with the cpuidle
+ * framework with the valid set of states.
+ */
+int zx_cpuidle_init(void)
+{
+
+#if 1
+ arm_pm_idle = zx_cpu_idle;
+ zx_apmgclken_set(0);
+ printk(KERN_INFO "[CPUIDLE] zx_cpu_idle init OK\n,");
+#else
+ int cpu_id;
+ struct cpuidle_device *device;
+ struct cpuidle_driver *drv = &zx_idle_driver;
+
+ /* Setup cpuidle driver */
+ drv->state_count = zx_fill_cpuidle_data(drv);
+ cpuidle_register_driver(drv);
+
+ /* Setup cpuidle device for each cpu */
+ for_each_cpu(cpu_id, cpu_online_mask)
+ {
+ device = &per_cpu(zx_idle_dev, cpu_id);
+ device->cpu = cpu_id;
+
+ if (cpu_id == 0)
+ device->state_count = drv->state_count;
+ else
+ device->state_count = 1; /* None boot cpu Support IDLE only now ! */
+
+ if (cpuidle_register_device(device))
+ {
+ printk(KERN_ERR "[CPUIDLE] register device failed\n,");
+ return -EIO;
+ }
+ }
+
+ register_pm_notifier(&idle_pm_notifier);
+
+ INIT_DELAYED_WORK_DEFERRABLE(&pm_idle_work, pm_idle_func);
+ schedule_delayed_work(&pm_idle_work, PM_IDLE_DELAY);
+
+ printk(KERN_INFO "[CPUIDLE] register device OK\n,");
+#endif
+
+ return 0;
+}
+
+#if 0
+static void __exit zx_cpuidle_exit(void)
+{
+ unregister_pm_notifier(&idle_pm_notifier);
+ cpuidle_unregister_driver(&zx_idle_driver);
+}
+
+late_initcall(zx_cpuidle_init);
+module_exit(zx_cpuidle_exit);
+#endif
+
+#ifdef CONFIG_ZX_PM_DEBUG
+static char* lp2_debug_show(char *s)
+{
+ #if 0
+ int i;
+
+ s += sprintf(s, "%-30s%8s %8s %8s %8s\n", " ", "cpu0","cpu1","cpu2","cpu3");
+ s += sprintf(s, "%s\n", "---------------------------------------------------------------");
+ s += sprintf(s, "%-30s%8u %8u %8u %8u\n", "lp3 in count:",
+ idle_stats.lp3_count[0],
+ idle_stats.lp3_count[1],
+ idle_stats.lp3_count[2],
+ idle_stats.lp3_count[3]);
+
+ s += sprintf(s, "%-30s%8u %8u %8u %8u\n", "lp2 in count:",
+ idle_stats.lp2_count[0],
+ idle_stats.lp2_count[1],
+ idle_stats.lp2_count[2],
+ idle_stats.lp2_count[3]);
+
+ s += sprintf(s, "%-30s%8u %8u %8u %8u\n", "lp2 completed:",
+ idle_stats.lp2_completed_count[0],
+ idle_stats.lp2_completed_count[1],
+ idle_stats.lp2_completed_count[2],
+ idle_stats.lp2_completed_count[3]);
+
+ s += sprintf(s, "%-30s%7u%% %7u%% %7u%% %7u%%\n", "lp2 completed%:",
+ idle_stats.lp2_completed_count [0]* 100 / (idle_stats.lp2_count[0] ?: 1),
+ idle_stats.lp2_completed_count [1]* 100 / (idle_stats.lp2_count[1] ?: 1),
+ idle_stats.lp2_completed_count [2]* 100 / (idle_stats.lp2_count[2] ?: 1),
+ idle_stats.lp2_completed_count [3]* 100 / (idle_stats.lp2_count[3] ?: 1));
+ s += sprintf(s, "%-30s%8u\n", "all idle count:", idle_stats.idle_count);
+
+ s += sprintf(s, "\n%-30s%8llu %8llu %8llu %8llu ms\n", "cpu ready time:",
+ div64_u64(idle_stats.cpu_wants_lp2_time[0], 1000),
+ div64_u64(idle_stats.cpu_wants_lp2_time[1], 1000),
+ div64_u64(idle_stats.cpu_wants_lp2_time[2], 1000),
+ div64_u64(idle_stats.cpu_wants_lp2_time[3], 1000));
+
+ s += sprintf(s, "%-30s%8llu %8llu %8llu %8llu ms\n", "lp2 in time:",
+ div64_u64(idle_stats.in_lp2_time[0], 1000),
+ div64_u64(idle_stats.in_lp2_time[1], 1000),
+ div64_u64(idle_stats.in_lp2_time[2], 1000),
+ div64_u64(idle_stats.in_lp2_time[3], 1000));
+
+ s += sprintf(s, "%-30s%7d%% %7d%% %7d%% %7d%%\n", "lp2 in time%:",
+ (int)(idle_stats.cpu_wants_lp2_time[0] ?
+ div64_u64(idle_stats.in_lp2_time[0] * 100,
+ idle_stats.cpu_wants_lp2_time[0]) : 0),
+ (int)(idle_stats.cpu_wants_lp2_time[1] ?
+ div64_u64(idle_stats.in_lp2_time[1] * 100,
+ idle_stats.cpu_wants_lp2_time[1]) : 0),
+ (int)(idle_stats.cpu_wants_lp2_time[2] ?
+ div64_u64(idle_stats.in_lp2_time[2] * 100,
+ idle_stats.cpu_wants_lp2_time[2]) : 0),
+ (int)(idle_stats.cpu_wants_lp2_time[3] ?
+ div64_u64(idle_stats.in_lp2_time[3] * 100,
+ idle_stats.cpu_wants_lp2_time[3]) : 0));
+
+ s += sprintf(s, "\n\n%3s %20s %6s %10s\n",
+ "int", "name", "count", "last count");
+ s += sprintf(s, "%s", "--------------------------------------------\n");
+ for (i = 0; i < NR_IRQS; i++) {
+ if (idle_stats.lp2_int_count[i] == 0)
+ continue;
+ s += sprintf(s, "%3d %20s %6d %10d\n",
+ i - GIC_SPI_START,
+ irq_to_desc(i)->action ? irq_to_desc(i)->action->name ?: "???" : "???",
+ idle_stats.lp2_int_count[i],
+ idle_stats.lp2_int_count[i] - idle_stats.last_lp2_int_count[i]);
+
+ idle_stats.last_lp2_int_count[i] = idle_stats.lp2_int_count[i];
+ };
+ #endif
+
+ return s;
+}
+
+/******************************************************
+ *** 1 -- lp2 ******************************
+ ******************************************************
+ */
+static ssize_t lp2_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char *s = buf;
+
+ s = lp2_debug_show(s);
+
+
+ return (s - buf);
+}
+
+static ssize_t lp2_store(struct kobject *kobj, struct kobj_attribute *attr,
+
+ const char *buf, size_t n)
+{
+
+ int error = 0;
+
+
+ return error ;
+}
+
+zte_pm_attr(lp2);
+
+
+/*=============================================================================
+ *======== /sys/zte_pm/cpuidle/disable_lp2 ==================================
+ *=============================================================================
+ */
+static ssize_t disable_lp2_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char *s = buf;
+
+ s += sprintf(s, "%s %d\n", "[CPUIDLE] deep_idle_disabled_by_debug:", deep_idle_disabled_by_debug);
+
+ return (s - buf);
+}
+
+/* usage: "echo 1 > disable_lp2" */
+static ssize_t disable_lp2_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ int error = 0;
+ long temp;
+
+ if(strict_strtol(buf, 0, &temp))
+ error = -EINVAL;
+
+ deep_idle_disabled_by_debug = temp;
+
+ return error ? error : n;
+}
+zte_pm_attr(disable_lp2);
+
+
+/*=============================================================================
+ *======== /sys/zte_pm/cpuidle/enable_print ==================================
+ *=============================================================================
+ */
+static ssize_t enable_print_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char *s = buf;
+
+ s += sprintf(s, "%s %d\n", "[CPUIDLE] print_enabled_by_debug:", print_enabled_by_debug);
+
+ return (s - buf);
+}
+
+/* usage: "echo 1 > enable_print" */
+static ssize_t enable_print_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ int error = 0;
+ long temp;
+
+ if(strict_strtol(buf, 0, &temp))
+ error = -EINVAL;
+
+ print_enabled_by_debug = temp;
+
+ return error ? error : n;
+}
+
+zte_pm_attr(enable_print);
+
+
+/*=============================================================================
+ *======== /sys/zte_pm/cpuidle/drv_cpuidle_flag ==================================
+ *=============================================================================
+ */
+static ssize_t drv_cpuidle_flag_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char *s = buf;
+
+ s += sprintf(s, "%s %x %x\n", "[CPUIDLE] drv_cpuidle_flag, pm_dma_used(): ", drv_cpuidle_flag, pm_dma_used());
+
+ return (s - buf);
+}
+
+/* usage: "echo * > drv_cpuidle_flag" */
+static ssize_t drv_cpuidle_flag_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ int error = 0;
+ long temp;
+
+ if(strict_strtol(buf, 0, &temp))
+ error = -EINVAL;
+
+ drv_cpuidle_flag = temp;
+
+ return error ? error : n;
+}
+
+zte_pm_attr(drv_cpuidle_flag);
+
+
+/*=============================================================================
+ *======== /sys/zte_pm/cpuidle/idle_debug_mask ==================================
+ *=============================================================================
+ */
+static ssize_t enable_idle_debug_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char *s = buf;
+
+ s += sprintf(s, "%s %d\n", "[CPUIDLE] idle_print_enabled_by_debug:", idle_debug_mask);
+
+ return (s - buf);
+}
+
+/* usage: "echo 1 > idle_debug_mask" */
+static ssize_t enable_idle_debug_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ int error = 0;
+ long temp;
+
+ if(strict_strtol(buf, 0, &temp))
+ error = -EINVAL;
+
+ idle_debug_mask = temp;
+
+ return error ? error : n;
+}
+zte_pm_attr(enable_idle_debug);
+
+
+static struct attribute * g[] =
+{
+ &lp2_attr.attr,
+ &disable_lp2_attr.attr,
+ &enable_print_attr.attr,
+ &drv_cpuidle_flag_attr.attr,
+ &enable_idle_debug_attr.attr,
+ NULL,
+};
+
+
+static struct attribute_group idle_attr_group =
+{
+ .attrs = g,
+};
+/**
+ * idle_debug_init
+ * create cpuidle sysfs, we can use cat /sys/zte_pm/cpuidle/lp2 command view debug info
+ *
+ */
+static struct kobject *idle_kobj;
+
+int __init idle_debug_init(void)
+{
+ int ret;
+
+ idle_kobj = kobject_create_and_add("cpuidle", pm_debug_kobj);
+ if (!idle_kobj)
+ return -ENOMEM;
+
+ ret = sysfs_create_group(idle_kobj, &idle_attr_group);
+ if (ret)
+ {
+ pr_info("[CPUIDLE] sysfs_create_group ret %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+#endif
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-cpuidle.h b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-cpuidle.h
new file mode 100644
index 0000000..10abd7d
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-cpuidle.h
@@ -0,0 +1,56 @@
+/*
+ * arch/arm/mach-zx297510/zx-cpuidle.h
+ *
+ * Copyright (c) 2013, ZTE Corporation.
+ * write by zxp
+ *
+ */
+
+#ifndef __MACH_ZX_CPU_IDLE_H
+#define __MACH_ZX_CPU_IDLE_H
+
+#ifdef CONFIG_ZX_PM_DEBUG
+#define DEBUG_CPU_NUM 4
+/*record idle states info*/
+struct zx_idle_stats
+{
+ unsigned int cpu_ready_count[DEBUG_CPU_NUM];
+ unsigned int tear_down_count[DEBUG_CPU_NUM];
+ unsigned long long cpu_wants_lp2_time[DEBUG_CPU_NUM];
+ unsigned long long in_lp2_time[DEBUG_CPU_NUM];
+ unsigned int lp2_count[DEBUG_CPU_NUM];
+ unsigned int lp2_completed_count[DEBUG_CPU_NUM];
+ unsigned int lp3_count[DEBUG_CPU_NUM];
+ //unsigned int lp2_count_bin[32];
+ //unsigned int lp2_completed_count_bin[32];
+ unsigned int lp2_int_count[NR_IRQS];
+ unsigned int last_lp2_int_count[NR_IRQS];
+ unsigned int idle_count;
+};
+#endif
+
+#define ZX_IDLE_CSTATE_LP3 0
+#define ZX_IDLE_CSTATE_LP2 1
+#define ZX_IDLE_MAX_CSTATE 2
+
+/*
+ *cpuidle functions
+ */
+extern int __init zx_cpuidle_init(void);
+extern s64 idle_get_sleeptime(void);
+#ifdef CONFIG_ZX_PM_DEBUG
+extern int __init idle_debug_init(void);
+#endif
+extern int __init zx_fill_cpuidle_data(struct cpuidle_driver *drv);
+extern int zx_enter_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index);
+extern int zx_pm_idle_enter(int index);
+
+#if defined CONFIG_SYSTEM_RECOVERY || defined _USE_TestHarness
+static int zx_idle_get_debug_flag(void){return 0;}
+static int zx_idle_get_idle_flag(void){return 0;}
+#endif
+
+#endif /* __MACH_ZX_CPU_IDLE_H */
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm-a53.c b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm-a53.c
new file mode 100644
index 0000000..4fbbfc3
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm-a53.c
@@ -0,0 +1,1349 @@
+/*
+ * ZTE cpu context save&restore driver
+ *
+ * Copyright (C) 2013 ZTE Ltd.
+ * by zxp
+ *
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/suspend.h>
+
+#include "zx-pm.h"
+
+/*=======================================================================
+ *=======================================================================
+ *======= [ZX-PM] timer interface for power management ===========================
+ *=======================================================================
+ *=======================================================================*/
+
+typedef struct
+{
+ /* 0x00 */ volatile unsigned timer_load;
+ /* 0x04 */ volatile unsigned timer_counter;
+ /* 0x08 */ volatile unsigned timer_control;
+ /* 0x0c */ volatile unsigned timer_interrupt_status;
+ char padding1[0x10];
+ /* 0x20 */ volatile unsigned watchdog_load;
+ /* 0x24 */ volatile unsigned watchdog_counter;
+ /* 0x28 */ volatile unsigned watchdog_control;
+ /* 0x2c */ volatile unsigned watchdog_interrupt_status;
+ /* 0x30 */ volatile unsigned watchdog_reset_status;
+ /* 0x34 */ volatile unsigned watchdog_disable;
+} a9_timer_registers;
+
+typedef struct
+{
+ unsigned timer_load;
+ unsigned timer_counter;
+ unsigned timer_control;
+ unsigned timer_interrupt_status;
+ unsigned watchdog_load;
+ unsigned watchdog_counter;
+ unsigned watchdog_control;
+ unsigned watchdog_interrupt_status;
+} a9_timer_context;
+
+
+void save_a9_timers(u32 *pointer, unsigned twd_address)
+{
+ a9_timer_context *context = (a9_timer_context *)pointer;
+ a9_timer_registers *timers = (a9_timer_registers *)twd_address;
+
+ /*
+ * First, stop the timers
+ */
+ context->timer_control = timers->timer_control;
+ timers->timer_control = 0;
+ context->watchdog_control = timers->watchdog_control;
+ timers->watchdog_control = 0;
+
+ context->timer_load = timers->timer_load;
+ context->timer_counter = timers->timer_counter;
+ context->timer_interrupt_status = timers->timer_interrupt_status;
+ context->watchdog_load = timers->watchdog_load;
+ context->watchdog_counter = timers->watchdog_counter;
+ context->watchdog_interrupt_status = timers->watchdog_interrupt_status;
+ /*
+ * We ignore watchdog_reset_status, since it can only clear the status bit.
+ * If the watchdog has reset the system, the OS will want to know about it.
+ * Similarly, we have no use for watchdog_disable - this is only used for
+ * returning to timer mode, which is the default mode after reset.
+ */
+}
+
+void restore_a9_timers(u32 *pointer, unsigned twd_address)
+{
+ a9_timer_context *context = (a9_timer_context *)pointer;
+ a9_timer_registers *timers = (a9_timer_registers *)twd_address;
+
+ timers->timer_control = 0;
+ timers->watchdog_control = 0;
+
+ /*
+ * We restore the load register first, because it also sets the counter register.
+ */
+ timers->timer_load = context->timer_load;
+ timers->watchdog_load = context->watchdog_load;
+
+ /*
+ * If a timer has reached zero (presumably during the context save) and triggered
+ * an interrupt, then we set it to the shortest possible expiry time, to make it
+ * trigger again real soon.
+ * We could fake this up properly, but we would have to wait around until the timer
+ * ticked, which could be some time if PERIPHCLK is slow. This approach should be
+ * good enough in most cases.
+ */
+ if (context->timer_interrupt_status)
+ {
+ timers->timer_counter = 1;
+ }
+ else
+ {
+ timers->timer_counter = context->timer_counter;
+ }
+
+ if (context->watchdog_interrupt_status)
+ {
+ timers->watchdog_counter = 1;
+ }
+ else
+ {
+ timers->watchdog_counter = context->watchdog_counter;
+ }
+
+ timers->timer_control = context->timer_control;
+ timers->watchdog_control = context->watchdog_control;
+}
+
+
+typedef struct
+{
+ /* 0x00 */ volatile unsigned timer_version;
+ /* 0x04 */ volatile unsigned timer_config;
+ /* 0x08 */ volatile unsigned timer_load;
+ /* 0x0c */ volatile unsigned timer_start;
+ /* 0x10 */ volatile unsigned timer_set_en;
+ /* 0x14 */ volatile unsigned timer_ack;
+ /* 0x18 */ volatile unsigned timer_count;
+} a53_global_timer_registers;
+
+typedef struct
+{
+ unsigned timer_version;
+ unsigned timer_config;
+ unsigned timer_load;
+ unsigned timer_start;
+ unsigned timer_set_en;
+ unsigned timer_ack;
+ unsigned timer_count;
+
+} a53_global_timer_context;
+
+#define A53_GT_TIMER_ENABLE (1<<0)
+#define A53_GT_COMPARE_ENABLE (1<<1)
+#define A53_GT_AUTO_INCREMENT_ENABLE (1<<3)
+#define A53_GT_EVENT_FLAG (1<<0)
+
+void save_a53_sys_timer(u32 *pointer, unsigned timer_address)
+{
+ a53_global_timer_registers *timer = (void*)timer_address;
+ a53_global_timer_context *context = (void*)pointer;
+
+ context->timer_config = timer->timer_config;
+
+ context->timer_load = timer->timer_load;
+ context->timer_start = timer->timer_start;
+ timer->timer_start = 0;
+ context->timer_set_en = timer->timer_set_en;
+ context->timer_ack = timer->timer_ack; /* Ö»¶Á£¬²»Óûָ´*/
+ context->timer_count = timer->timer_count;
+
+}
+
+void restore_a53_sys_timer(u32 *pointer, unsigned timer_address)
+{
+ a53_global_timer_registers *timer = (void*)timer_address;
+ a53_global_timer_context *context = (void*)pointer;
+
+ timer->timer_config =context->timer_config;
+ timer->timer_load = context->timer_load;/*ÕâÀïÓ¦¸ÃÊǸ³³õÖµ*/
+ timer->timer_set_en =context->timer_set_en;
+ timer->timer_start = context->timer_start;
+}
+
+
+/*=======================================================================
+ *=======================================================================
+ *======= [ZX-PM] SCU interface for power management ============================
+ *=======================================================================
+ *=======================================================================*/
+typedef struct
+{
+ /* 0x00 */ volatile unsigned int control;
+ /* 0x04 */ const unsigned int configuration;
+ /* 0x08 */ union
+ {
+ volatile unsigned int w;
+ volatile unsigned char b[4];
+ } power_status;
+ /* 0x0c */ volatile unsigned int invalidate_all;
+ char padding1[48];
+ /* 0x40 */ volatile unsigned int filtering_start;
+ /* 0x44 */ volatile unsigned int filtering_end;
+ char padding2[8];
+ /* 0x50 */ volatile unsigned int access_control;
+ /* 0x54 */ volatile unsigned int ns_access_control;
+} a53_scu_registers;
+
+/*
+ * TODO: we need to use the power status register, not save it!
+ */
+
+void save_a53_scu(u32 *pointer, unsigned scu_address)
+{
+ a53_scu_registers *scu = (a53_scu_registers *)scu_address;
+
+ pointer[0] = scu->control;
+ pointer[1] = scu->power_status.w;
+ pointer[2] = scu->filtering_start;
+ pointer[3] = scu->filtering_end;
+ pointer[4] = scu->access_control;
+ pointer[5] = scu->ns_access_control;
+}
+
+void restore_a53_scu(u32 *pointer, unsigned scu_address)
+{
+ a53_scu_registers *scu = (a53_scu_registers *)scu_address;
+
+ scu->invalidate_all = 0xffff;
+ scu->filtering_start = pointer[2];
+ scu->filtering_end = pointer[3];
+//zxp scu->access_control = pointer[4];
+ scu->ns_access_control = pointer[5];
+ scu->power_status.w = pointer[1];
+ scu->control = pointer[0];
+}
+
+void set_status_a53_scu(unsigned cpu_index, unsigned status, unsigned scu_address)
+{
+ a53_scu_registers *scu = (a53_scu_registers *)scu_address;
+ unsigned power_status;
+
+ switch(status)
+ {
+ case CPU_POWER_MODE_STANDBY:
+ case CPU_POWER_MODE_DORMANT:
+ power_status = 2;
+ break;
+ case CPU_POWER_MODE_SHUTDOWN:
+ power_status = 3;
+ break;
+ default:
+ power_status = 0;
+ }
+
+ scu->power_status.b[cpu_index] = power_status;
+ dsb();
+}
+
+void init_lp_of_scu(unsigned scu_address)
+{
+ a53_scu_registers *scu = (a53_scu_registers *)scu_address;
+
+ scu->control |= 0x61;
+}
+
+
+int num_cpus_from_a53_scu(unsigned scu_address)
+{
+ a53_scu_registers *scu = (a53_scu_registers *)scu_address;
+
+ return ((scu->configuration) & 0x3) + 1;
+}
+
+
+/*=======================================================================
+ *=======================================================================
+ *======= [ZX-PM] PL310 interface for power management ==========================
+ *=======================================================================
+ *=======================================================================*/
+
+
+#define C_BIT 0x01
+
+struct lockdown_regs
+{
+ unsigned int d, i;
+};
+
+typedef struct
+{
+ /* 0x000 */ const unsigned cache_id;
+ /* 0x004 */ const unsigned cache_type;
+ char padding1[0x0F8];
+ /* 0x100 */ volatile unsigned control;
+ /* 0x104 */ volatile unsigned aux_control;
+ /* 0x108 */ volatile unsigned tag_ram_control;
+ /* 0x10C */ volatile unsigned data_ram_control;
+ char padding2[0x0F0];
+ /* 0x200 */ volatile unsigned ev_counter_ctrl;
+ /* 0x204 */ volatile unsigned ev_counter1_cfg;
+ /* 0x208 */ volatile unsigned ev_counter0_cfg;
+ /* 0x20C */ volatile unsigned ev_counter1;
+ /* 0x210 */ volatile unsigned ev_counter0;
+ /* 0x214 */ volatile unsigned int_mask;
+ /* 0x218 */ const volatile unsigned int_mask_status;
+ /* 0x21C */ const volatile unsigned int_raw_status;
+ /* 0x220 */ volatile unsigned int_clear;
+ char padding3[0x50C];
+ /* 0x730 */ volatile unsigned cache_sync;
+ char padding4[0x03C];
+ /* 0x770 */ volatile unsigned inv_pa;
+ char padding5[0x008];
+ /* 0x77C */ volatile unsigned inv_way;
+ char padding6[0x030];
+ /* 0x7B0 */ volatile unsigned clean_pa;
+ char padding7[0x004];
+ /* 0x7B8 */ volatile unsigned clean_index;
+ /* 0x7BC */ volatile unsigned clean_way;
+ char padding8[0x030];
+ /* 0x7F0 */ volatile unsigned clean_inv_pa;
+ char padding9[0x004];
+ /* 0x7F8 */ volatile unsigned clean_inv_index;
+ /* 0x7FC */ volatile unsigned clean_inv_way;
+ char paddinga[0x100];
+ /* 0x900 */ volatile struct lockdown_regs lockdown[8];
+ char paddingb[0x010];
+ /* 0x950 */ volatile unsigned lock_line_en;
+ /* 0x954 */ volatile unsigned unlock_way;
+ char paddingc[0x2A8];
+ /* 0xC00 */ volatile unsigned addr_filtering_start;
+ /* 0xC04 */ volatile unsigned addr_filtering_end;
+ char paddingd[0x338];
+ /* 0xF40 */ volatile unsigned debug_ctrl;
+ char paddinge[0x01C];
+ /* 0xF60 */ volatile unsigned prefetch_ctrl;
+ char paddingf[0x01C];
+ /* 0xF80 */ volatile unsigned power_ctrl;
+} pl310_registers;
+
+
+typedef struct
+{
+ unsigned int aux_control;
+ unsigned int tag_ram_control;
+ unsigned int data_ram_control;
+ unsigned int ev_counter_ctrl;
+ unsigned int ev_counter1_cfg;
+ unsigned int ev_counter0_cfg;
+ unsigned int ev_counter1;
+ unsigned int ev_counter0;
+ unsigned int int_mask;
+ unsigned int lock_line_en;
+ struct lockdown_regs lockdown[8];
+ unsigned int unlock_way;
+ unsigned int addr_filtering_start;
+ unsigned int addr_filtering_end;
+ unsigned int debug_ctrl;
+ unsigned int prefetch_ctrl;
+ unsigned int power_ctrl;
+} pl310_context;
+
+/* TODO: should be determined from cache? */
+static unsigned const cache_line_size = 32;
+
+void clean_inv_range_pl310(void *start, unsigned size, unsigned pl310_address)
+{
+ unsigned addr;
+ pl310_registers *pl310 = (pl310_registers *)pl310_address;
+
+ /* Align the start address to the start of a cache line */
+ addr = (unsigned)start & ~(cache_line_size - 1);
+
+ /* Wait for any background operations to finish */
+ while(pl310->clean_inv_pa & C_BIT);
+
+ while(addr <= size + (unsigned)start)
+ {
+ pl310->clean_inv_pa = addr;
+ addr += cache_line_size;
+ /* For this to work on L220 we would have to poll the C bit now */
+ }
+ dmb();
+}
+
+void clean_range_pl310(void *start, unsigned size, unsigned pl310_address)
+{
+ unsigned addr;
+ pl310_registers *pl310 = (pl310_registers *)pl310_address;
+
+ /* Align the start address to the start of a cache line */
+ addr = (unsigned)start & ~(cache_line_size - 1);
+
+ /* Wait for any background operations to finish */
+ while(pl310->clean_pa & C_BIT);
+
+ while(addr <= size + (unsigned)start)
+ {
+ pl310->clean_pa = addr;
+ addr += cache_line_size;
+ /* For this to work on L220 we would have to poll the C bit now */
+ }
+ dmb();
+}
+
+void inv_range_pl310(void *start, unsigned size, unsigned pl310_address)
+{
+ unsigned addr;
+ pl310_registers *pl310 = (pl310_registers *)pl310_address;
+
+ /* Align the start address to the start of a cache line */
+ addr = (unsigned)start & ~(cache_line_size - 1);
+
+ /* Wait for any background operations to finish */
+ while(pl310->inv_pa & C_BIT);
+
+ while(addr <= size + (unsigned)start)
+ {
+ pl310->inv_pa = addr;
+ addr += cache_line_size;
+ /* For this to work on L220 we would have to poll the C bit now */
+ }
+}
+
+void clean_inv_pl310(unsigned pl310_address)
+{
+ pl310_registers *pl310 = (pl310_registers *)pl310_address;
+ int i;
+
+ pl310->clean_inv_way = 0xffff;
+ while (pl310->clean_inv_way)
+ {
+ /* Spin */
+ for (i=10; i>0; --i)
+ {
+ __nop();
+ }
+ }
+}
+
+void clean_pl310(unsigned pl310_address)
+{
+ pl310_registers *pl310 = (pl310_registers *)pl310_address;
+ int i;
+
+ pl310->clean_way = 0xffff;
+ while (pl310->clean_way)
+ {
+ /* Spin */
+ for (i=10; i>0; --i)
+ {
+ __nop();
+ }
+ }
+}
+
+static void inv_pl310(unsigned pl310_address)
+{
+ pl310_registers *pl310 = (pl310_registers *)pl310_address;
+ int i;
+
+ pl310->inv_way = 0xffff;
+ while (pl310->inv_way)
+ {
+ /* Spin */
+ for (i=10; i>0; --i)
+ {
+ __nop();
+ }
+ }
+}
+
+void clean_disable_pl310(unsigned pl310_address)
+{
+ pl310_registers *pl310 = (pl310_registers *)pl310_address;
+ int i;
+
+ pl310->clean_way = 0xffff;
+ while (pl310->clean_way)
+ {
+ /* Spin */
+ for (i=10; i>0; --i)
+ {
+ __nop();
+ }
+ }
+
+ pl310->cache_sync = 0;
+ dsb();
+ pl310->control = 0;
+}
+
+
+int is_enabled_pl310(unsigned pl310_address)
+{
+ pl310_registers *pl310 = (pl310_registers *)pl310_address;
+
+ return (pl310->control & 1);
+}
+
+void save_pl310(u32 *pointer, unsigned pl310_address)
+{
+ pl310_registers *pl310 = (pl310_registers *)pl310_address;
+ pl310_context *context = (pl310_context *)pointer;
+ int i;
+
+ /* TODO: are all these registers are present in earlier PL310 versions? */
+ context->aux_control = pl310->aux_control;
+ context->tag_ram_control = pl310->tag_ram_control;
+ context->data_ram_control = pl310->data_ram_control;
+ context->ev_counter_ctrl = pl310->ev_counter_ctrl;
+ context->ev_counter1_cfg = pl310->ev_counter1_cfg;
+ context->ev_counter0_cfg = pl310->ev_counter0_cfg;
+ context->ev_counter1 = pl310->ev_counter1;
+ context->ev_counter0 = pl310->ev_counter0;
+ context->int_mask = pl310->int_mask;
+ context->lock_line_en = pl310->lock_line_en;
+
+ /*
+ * The lockdown registers repeat 8 times for L310, the L210 has only one
+ * D and one I lockdown register at 0x0900 and 0x0904.
+ */
+ for (i=0; i<8; ++i)
+ {
+ context->lockdown[i].d = pl310->lockdown[i].d;
+ context->lockdown[i].i = pl310->lockdown[i].i;
+ }
+
+ context->addr_filtering_start = pl310->addr_filtering_start;
+ context->addr_filtering_end = pl310->addr_filtering_end;
+ context->debug_ctrl = pl310->debug_ctrl;
+ context->prefetch_ctrl = pl310->prefetch_ctrl;
+ context->power_ctrl = pl310->power_ctrl;
+}
+
+void restore_pl310(u32 *pointer, unsigned pl310_address, int dormant)
+{
+ pl310_registers *pl310 = (pl310_registers *)pl310_address;
+ pl310_context *context = (pl310_context *)pointer;
+ int i;
+
+ /* We may need to disable the PL310 if the boot code has turned it on */
+ if (pl310->control)
+ {
+ /* Wait for the cache to be idle, then disable */
+ pl310->cache_sync = 0;
+ dsb();
+ pl310->control = 0;
+ }
+
+ /* TODO: are all these registers present in earlier PL310 versions? */
+ pl310->aux_control = context->aux_control;
+ pl310->tag_ram_control = context->tag_ram_control;
+ pl310->data_ram_control = context->data_ram_control;
+ pl310->ev_counter_ctrl = context->ev_counter_ctrl;
+ pl310->ev_counter1_cfg = context->ev_counter1_cfg;
+ pl310->ev_counter0_cfg = context->ev_counter0_cfg;
+ pl310->ev_counter1 = context->ev_counter1;
+ pl310->ev_counter0 = context->ev_counter0;
+ pl310->int_mask = context->int_mask;
+ pl310->lock_line_en = context->lock_line_en;
+
+ for (i=0; i<8; ++i)
+ {
+ pl310->lockdown[i].d = context->lockdown[i].d;
+ pl310->lockdown[i].i = context->lockdown[i].i;
+ }
+
+ pl310->addr_filtering_start = context->addr_filtering_start;
+ pl310->addr_filtering_end = context->addr_filtering_end;
+ pl310->debug_ctrl = context->debug_ctrl;
+ pl310->prefetch_ctrl = context->prefetch_ctrl;
+ pl310->power_ctrl = context->power_ctrl;
+ dsb();
+
+ /*
+ * If the RAMs were powered off, we need to invalidate the cache
+ */
+ if (!dormant)
+ {
+ inv_pl310(pl310_address);
+ }
+
+ pl310->control = 1;
+ dsb();
+}
+
+void set_enabled_pl310(unsigned enabled, unsigned pl310_address)
+{
+ pl310_registers *pl310 = (pl310_registers *)pl310_address;
+
+ if (enabled)
+ {
+ inv_pl310(pl310_address);
+
+ pl310->control |= 1;
+ pl310->cache_sync = 0;
+ dsb();
+ }
+ else
+ {
+ /* Wait for the cache to be idle */
+ pl310->cache_sync = 0;
+ dsb();
+ pl310->control &= ~1;
+ }
+}
+
+void set_status_pl310(unsigned status, unsigned pl310_address)
+{
+ pl310_registers *pl310 = (pl310_registers *)pl310_address;
+
+ if (status == CPU_POWER_MODE_STANDBY)
+ {
+ /* Wait for the cache to be idle */
+ pl310->cache_sync = 0;
+ dsb();
+ pl310->power_ctrl |= 1;
+ }
+ else
+ {
+ pl310->power_ctrl &= ~1;
+ }
+}
+
+void init_lp_of_l2(unsigned pl310_address)
+{
+ pl310_registers *pl310 = (pl310_registers *)pl310_address;
+
+ pl310->power_ctrl |= 3;
+}
+
+/*=======================================================================
+ *=======================================================================
+ *======= [ZX-PM] GIC interface for power management ============================
+ *=======================================================================
+ *=======================================================================*/
+
+
+/* This macro sets either the NS or S enable bit in the GIC distributor control register */
+#define GIC_DIST_ENABLE 0x00000001
+
+struct set_and_clear_regs
+{
+ volatile unsigned int set[32], clear[32];
+};
+
+
+typedef struct
+{
+ volatile uint32_t GICD_CTLR; // +0x0000 - RW - Distributor Control Register
+ const volatile uint32_t GICD_TYPRE; // +0x0004 - RO - Interrupt Controller Type Register
+ const volatile uint32_t GICD_IIDR; // +0x0008 - RO - Distributor Implementer Identification Register
+
+ const volatile uint32_t padding0; // +0x000C - RESERVED
+
+ volatile uint32_t GICD_STATUSR; // +0x0010 - RW - ????
+
+ const volatile uint32_t padding1[3]; // +0x0014 - RESERVED
+
+ volatile uint32_t IMP_DEF[8]; // +0x0020 - RW - Implementation defined registers
+
+ volatile uint32_t GICD_SETSPI_NSR; // +0x0040 - WO - Non-Secure Set SPI Pending (Used when SPI is signalled using MSI)
+ const volatile uint32_t padding2; // +0x0044 - RESERVED
+ volatile uint32_t GICD_CLRSPI_NSR; // +0x0048 - WO - Non-Secure Clear SPI Pending (Used when SPI is signalled using MSI)
+ const volatile uint32_t padding3; // +0x004C - RESERVED
+ volatile uint32_t GICD_SETSPI_SR; // +0x0050 - WO - Secure Set SPI Pending (Used when SPI is signalled using MSI)
+ const volatile uint32_t padding4; // +0x0054 - RESERVED
+ volatile uint32_t GICD_CLRSPI_SR; // +0x0058 - WO - Secure Clear SPI Pending (Used when SPI is signalled using MSI)
+
+ const volatile uint32_t padding5[3]; // +0x005C - RESERVED
+
+ volatile uint32_t GICD_SEIR; // +0x0068 - WO - System Error Interrupt Register (Note: This was recently removed from the spec)
+
+ const volatile uint32_t padding6[5]; // +0x006C - RESERVED
+
+ volatile uint32_t GICD_IGROUPR[32]; // +0x0080 - RW - Interrupt Group Registers (Security Registers in GICv1)
+
+ volatile uint32_t GICD_ISENABLER[32]; // +0x0100 - RW - Interrupt Set-Enable Registers
+ volatile uint32_t GICD_ICENABLER[32]; // +0x0180 - RW - Interrupt Clear-Enable Registers
+ volatile uint32_t GICD_ISPENDR[32]; // +0x0200 - RW - Interrupt Set-Pending Registers
+ volatile uint32_t GICD_ICPENDR[32]; // +0x0280 - RW - Interrupt Clear-Pending Registers
+ volatile uint32_t GICD_ISACTIVER[32]; // +0x0300 - RW - Interrupt Set-Active Register
+ volatile uint32_t GICD_ICACTIVER[32]; // +0x0380 - RW - Interrupt Clear-Active Register
+
+ volatile uint8_t GICD_IPRIORITYR[1024]; // +0x0400 - RW - Interrupt Priority Registers
+ volatile uint32_t GICD_ITARGETSR[256]; // +0x0800 - RW - Interrupt Processor Targets Registers
+ volatile uint32_t GICD_ICFGR[64]; // +0x0C00 - RW - Interrupt Configuration Registers
+ volatile uint32_t GICD_GRPMODR[32]; // +0x0D00 - RW - ????
+ const volatile uint32_t padding7[32]; // +0x0D80 - RESERVED
+ volatile uint32_t GICD_NSACR[64]; // +0x0E00 - RW - Non-Secure Access Control Register
+
+ volatile uint32_t GICD_SGIR; // +0x0F00 - WO - Software Generated Interrupt Register
+
+ const volatile uint32_t padding8[3]; // +0x0F04 - RESERVED
+
+ volatile uint32_t GICD_CPENDSGIR[4]; // +0x0F10 - RW - ???
+ volatile uint32_t GICD_SPENDSGIR[4]; // +0x0F20 - RW - ???
+
+ const volatile uint32_t padding9[52]; // +0x0F30 - RESERVED
+ const volatile uint32_t padding10[5120]; // +0x1000 - RESERVED
+
+ volatile uint64_t GICD_ROUTER[1024]; // +0x6000 - RW - Controls SPI routing when ARE=1
+}interrupt_distributor;
+
+typedef struct
+{
+ const volatile uint32_t padding1[32]; // +0x0000 - RESERVED
+ volatile uint32_t GICR_IGROUPR; // +0x0080 - RW - Interrupt Group Registers (Security Registers in GICv1)
+ const volatile uint32_t padding2[31]; // +0x0084 - RESERVED
+ volatile uint32_t GICR_ISENABLER; // +0x0100 - RW - Interrupt Set-Enable Registers
+ const volatile uint32_t padding3[31]; // +0x0104 - RESERVED
+ volatile uint32_t GICR_ICENABLER; // +0x0180 - RW - Interrupt Clear-Enable Registers
+ const volatile uint32_t padding4[31]; // +0x0184 - RESERVED
+ volatile uint32_t GICR_ISPENDR; // +0x0200 - RW - Interrupt Set-Pending Registers
+ const volatile uint32_t padding5[31]; // +0x0204 - RESERVED
+ volatile uint32_t GICR_ICPENDR; // +0x0280 - RW - Interrupt Clear-Pending Registers
+ const volatile uint32_t padding6[31]; // +0x0284 - RESERVED
+ volatile uint32_t GICR_ISACTIVER; // +0x0300 - RW - Interrupt Set-Active Register
+ const volatile uint32_t padding7[31]; // +0x0304 - RESERVED
+ volatile uint32_t GICR_ICACTIVER; // +0x0380 - RW - Interrupt Clear-Active Register
+ const volatile uint32_t padding8[31]; // +0x0184 - RESERVED
+ volatile uint8_t GICR_IPRIORITYR[32]; // +0x0400 - RW - Interrupt Priority Registers
+ const volatile uint32_t padding9[504]; // +0x0420 - RESERVED
+ volatile uint32_t GICR_ICFGR[2]; // +0x0C00 - RW - Interrupt Configuration Registers
+ const volatile uint32_t padding10[62]; // +0x0C08 - RESERVED
+ volatile uint32_t GICR_GRPMODR; // +0x0D00 - RW - ????
+ const volatile uint32_t padding11[63]; // +0x0D04 - RESERVED
+ volatile uint32_t GICR_NSACR; // +0x0E00 - RW - Non-Secure Access Control Register
+
+}interrupt_redistributor;
+
+
+typedef struct
+{
+ /* 0x00 */ volatile unsigned int GICC_CTLR; /*control*/
+ /* 0x04 */ volatile unsigned int GICC_PMR; /*priority mask register*/
+ /* 0x08 */ volatile unsigned int GICC_BPR; /* binary Point register*/
+ /* 0x0c */ volatile unsigned const int GICC_IAR;
+ /* 0x10 */ volatile unsigned int GICC_EOIR;
+ /* 0x14 */ volatile unsigned const int GICC_RPR;
+ /* 0x18 */ volatile unsigned const int GICC_HPPIR;
+ /* 0x1c */ volatile unsigned int GICC_ABPR;
+ /* 0x1c */ volatile unsigned int GICC_AIAR;
+ /* 0x1c */ volatile unsigned int GICC_AEOIR;
+ /* 0x1c */ volatile unsigned int GICC_AHPPIR;
+ /* 0x1c */ volatile unsigned int GICC_APR0;
+ /* 0x1c */ volatile unsigned int GICC_NSAPR0;
+ /* 0x1c */ volatile unsigned int GICC_IIDR;
+
+} cpu_interface;
+
+
+/*
+ * Saves the GIC CPU interface context
+ * Requires 3 or 4 words of memory
+ */
+void save_gic_interface(u32 *pointer, unsigned gic_interface_address, int is_secure)
+{
+#if 1
+ cpu_interface *ci = (cpu_interface *)gic_interface_address;
+
+ pointer[0] = ci->GICC_CTLR;
+ pointer[1] = ci->GICC_PMR;
+ pointer[2] = ci->GICC_BPR;
+
+ //ci->GICC_PMR = 0;
+ // ci->GICC_BPR = 0;
+ //ci->GICC_CTLR = 0;
+ if (is_secure)
+ {
+ pointer[3] = ci->GICC_ABPR;
+ // ci->GICC_ABPR = 0;// Çå0
+ }
+#else
+{
+ save_cpu_if(pointer);
+}
+
+#endif
+}
+
+/*
+ * Enables or disables the GIC distributor (for the current security state)
+ * Parameter 'enabled' is boolean.
+ * Return value is boolean, and reports whether GIC was previously enabled.
+ */
+int gic_distributor_set_enabled(int enabled, unsigned gic_distributor_address)
+{
+ unsigned tmp;
+ interrupt_distributor *id = (interrupt_distributor *)gic_distributor_address;
+
+ tmp = id->GICD_CTLR;
+ if (enabled)
+ {
+ id->GICD_CTLR = tmp | GIC_DIST_ENABLE;
+ }
+ else
+ {
+ id->GICD_CTLR = tmp & ~GIC_DIST_ENABLE;
+ }
+ return (tmp & GIC_DIST_ENABLE) != 0;
+}
+
+/*
+ * Saves this CPU's banked parts of the distributor
+ * Returns non-zero if an SGI/PPI interrupt is pending (after saving all required context)
+ * Requires 19 words of memory
+ */
+int save_gic_distributor_private(u32 *pointer, unsigned gic_distributor_address, int is_secure)
+{
+#if 0 //zhangpei
+ interrupt_distributor *id = (interrupt_distributor *)gic_distributor_address;
+
+ *pointer = id->enable.set[0];
+ ++pointer;
+ pointer = copy_words(pointer, id->priority, 8);
+ pointer = copy_words(pointer, id->target, 8);
+ if (is_secure)
+ {
+ *pointer = id->security[0];
+ ++pointer;
+ }
+ /* Save just the PPI configurations (SGIs are not configurable) */
+ *pointer = id->configuration[1];
+ ++pointer;
+ *pointer = id->pending.set[0];
+ if (*pointer)
+ {
+ return -1;
+ }
+ else
+ {
+ return 0;
+ }
+#endif
+
+return 0;
+
+}
+
+/*
+ * Saves the shared parts of the distributor.
+ * Requires 1 word of memory, plus 20 words for each block of 32 SPIs (max 641 words)
+ * Returns non-zero if an SPI interrupt is pending (after saving all required context)
+ */
+int save_gic_distributor_shared(u32 *pointer, unsigned int gic_distributor_address, int is_secure)
+{
+
+ interrupt_distributor *id = (interrupt_distributor *)gic_distributor_address;
+ // interrupt_redistributor *ird =(interrupt_redistributor *)(gic_distributor_address+0x40000)
+ int i, retval = 0;
+ #if 0
+ unsigned num_spis, *saved_pending;
+
+
+
+ /* Calculate how many SPIs the GIC supports */
+ num_spis = 32 * (id->GICD_TYPRE & 0x1f);
+
+ /* TODO: add nonsecure stuff */
+
+ /* Save rest of GIC configuration */
+ if (num_spis)
+ {
+ pointer = copy_words(pointer, id->enable.set + 1, num_spis / 32);
+ pointer = copy_words(pointer, id->priority + 8, num_spis / 4);
+ pointer = copy_words(pointer, id->target + 8, num_spis / 4);
+ pointer = copy_words(pointer, id->configuration + 2, num_spis / 16);
+ if (is_secure)
+ {
+ pointer = copy_words(pointer, id->security + 1, num_spis / 32);
+ }
+ saved_pending = pointer;
+ pointer = copy_words(pointer, id->pending.set + 1, num_spis / 32);
+
+ /* Check interrupt pending bits */
+ /* zxp-- later we will check only useful int line */
+ for (i=0; i<num_spis/32; ++i)
+ {
+ if (saved_pending[i])
+ {
+ retval = -1;
+ break;
+ }
+ }
+ }
+ /* Save control register */
+ *pointer = id->control;
+#else
+#if 1/*GICV3*//*±£´æ²»Çå0*/
+pointer = copy_words(pointer, id->GICD_ISENABLER,32); /*0x100~0x17C interrupt set-enable reg*/
+pointer = copy_words(pointer, id->GICD_ICFGR,64); /*0xc00~0xcfc interrupt config reg*/
+pointer = copy_words(pointer, id->GICD_GRPMODR,32); /*0xd00~d7c interrupt group modifer gre */
+pointer = copy_words(pointer, id->GICD_IGROUPR,32); /*0x80~0xfc interrupt group reg*/
+pointer = copy_words(pointer, id->GICD_ISPENDR,32); /*0x200~0x27c interrupt set-pending reg*/
+pointer = copy_words(pointer, id->GICD_IPRIORITYR,32); /*0x400~0x7f8 Interrupt Priority Reg reg*/
+pointer = copy_words(pointer, (volatile unsigned int *)id->GICD_ROUTER,32); /*0x6100~0x7ef8 Interrupt Priority Reg reg 1024 64bit*/
+pointer = copy_words(pointer, &(id->GICD_CTLR),1); /*0x0~0x3, ditribrutor control reg*/
+pointer = copy_words(pointer, (gic_distributor_address+0x40000+0x14),1); /* GICR_WAKER*/
+#else/*GICV3*//*±£´æ²¢Çå0*/
+pointer = copy_wordsandclear(pointer, id->GICD_ISENABLER,32); /*0x100~0x17C interrupt set-enable reg*/
+pointer = copy_wordsandclear(pointer, id->GICD_ICFGR,64); /*0xc00~0xcfc interrupt config reg*/
+pointer = copy_wordsandclear(pointer, id->GICD_GRPMODR,32); /*0xd00~d7c interrupt group modifer gre */
+pointer = copy_wordsandclear(pointer, id->GICD_IGROUPR,32); /*0x80~0xfc interrupt group reg*/
+pointer = copy_wordsandclear(pointer, id->GICD_ISPENDR,32); /*0x200~0x27c interrupt set-pending reg*/
+pointer = copy_wordsandclear(pointer, id->GICD_IPRIORITYR,32); /*0x400~0x7f8 Interrupt Priority Reg reg*/
+pointer = copy_wordsandclear(pointer, (volatile unsigned int *)id->GICD_ROUTER,32); /*0x6100~0x7ef8 Interrupt Priority Reg reg 1024 64bit*/
+pointer = copy_wordsandclear(pointer, &(id->GICD_CTLR),1); /*0x0~0x3, ditribrutor control reg*/
+pointer = copy_wordsandclear(pointer, (gic_distributor_address+0x40000+0x14),1); /* GICR_WAKER*/
+#endif
+
+#endif
+
+ return retval;
+}
+
+void restore_gic_interface(u32 *pointer, unsigned gic_interface_address, int is_secure)
+{
+#if 1
+ cpu_interface *ci = (cpu_interface *)gic_interface_address;
+
+ ci->GICC_PMR = pointer[1];
+ ci->GICC_BPR = pointer[2];
+
+ if (is_secure)
+ {
+ ci->GICC_ABPR = pointer[3];
+ }
+
+ /* Restore control register last */
+ ci->GICC_CTLR = pointer[0];
+#else
+ restore_cpu_if(pointer);
+#endif
+
+}
+
+void restore_gic_distributor_private(u32 *pointer, unsigned gic_distributor_address, int is_secure)
+{
+#if 0 //zhangpei
+ interrupt_distributor *id = (interrupt_distributor *)gic_distributor_address;
+
+ /* We assume the distributor is disabled so we can write to its config registers */
+
+ id->enable.set[0] = *pointer;
+ ++pointer;
+ copy_words(id->priority, pointer, 8);
+ pointer += 8;
+ copy_words(id->target, pointer, 8);
+ pointer += 8;
+ if (is_secure)
+ {
+ id->security[0] = *pointer;
+ ++pointer;
+ }
+ /* Restore just the PPI configurations (SGIs are not configurable) */
+ id->configuration[1] = *pointer;
+ ++pointer;
+ id->pending.set[0] = *pointer;
+#endif
+}
+
+void restore_gic_distributor_shared(u32 *pointer, unsigned gic_distributor_address, int is_secure)
+{
+ interrupt_distributor *id = (interrupt_distributor *)gic_distributor_address;
+ unsigned num_spis;
+
+ /* Make sure the distributor is disabled */
+ // gic_distributor_set_enabled(false, gic_distributor_address);
+
+#if 0
+ /* Calculate how many SPIs the GIC supports */
+ num_spis = 32 * ((id->controller_type) & 0x1f);
+
+ /* TODO: add nonsecure stuff */
+
+ /* Restore rest of GIC configuration */
+ if (num_spis)
+ {
+ copy_words(id->enable.set + 1, pointer, num_spis / 32);
+ pointer += num_spis / 32;
+ copy_words(id->priority + 8, pointer, num_spis / 4);
+ pointer += num_spis / 4;
+ copy_words(id->target + 8, pointer, num_spis / 4);
+ pointer += num_spis / 4;
+ copy_words(id->configuration + 2, pointer, num_spis / 16);
+ pointer += num_spis / 16;
+ if (is_secure)
+ {
+ copy_words(id->security + 1, pointer, num_spis / 32);
+ pointer += num_spis / 32;
+ }
+ copy_words(id->pending.set + 1, pointer, num_spis / 32);
+ pointer += num_spis / 32;
+ }
+
+ /* Restore control register - if the GIC was disabled during save, it will be restored as disabled. */
+ id->control = *pointer;
+#else
+/*GICV3*/
+ copy_words( id->GICD_ISENABLER ,pointer , 32); /*0x100~0x17C interrupt set-enable reg*/
+ pointer+=32;
+ copy_words(id->GICD_ICFGR , pointer , 64); /*0xc00~0xcfc interrupt config reg*/
+ pointer+=64;
+ copy_words(id->GICD_GRPMODR, pointer , 32); /*0xd00~d7c interrupt group modifer gre */
+ pointer+=32;
+ copy_words(id->GICD_IGROUPR, pointer , 32); /*0x80~0xfc interrupt group reg*/
+ pointer+=32;
+ copy_words(id->GICD_ISPENDR, pointer , 32); /*0x200~0x27c interrupt set-pending reg*/
+ pointer+=32;
+ copy_words(id->GICD_IPRIORITYR,pointer , 32); /*0x400~0x7f8 Interrupt Priority Reg reg*/
+ pointer+=32;
+ copy_words(id->GICD_ROUTER, pointer, 32); /*0x6100~0x7ef8 Interrupt Priority Reg reg 1024 64bit*/
+ pointer+=32;
+ copy_words(&id->GICD_CTLR, pointer , 1); /*0x0~0x3, ditribrutor control reg*/
+ pointer+=1;
+ copy_words((gic_distributor_address+0x40000+0x14),pointer,1); /* GICR_WAKER*/
+
+#endif
+return;
+}
+/* ÐÂÔö*/
+unsigned int gic_set_processorsleep(bool issleep)
+{
+ if(issleep)
+ {
+ *(volatile unsigned int *)(GIC_REDIST_BASE+0x14) = 0x2;/**/
+ while((*(volatile unsigned int *) (GIC_REDIST_BASE+0x14) &0x4) == 0); /*µÈ´ýGICÖжϴ¦ÀíÍê³É*/
+ }
+ else
+ {
+ *(volatile unsigned int *) (GIC_REDIST_BASE+0x14) = 0x0;/**/
+ }
+
+
+ return 0;
+}
+
+unsigned int gic_get_cur_pending(unsigned gic_interface_address)
+{
+
+#if 1
+ cpu_interface *ci = (cpu_interface *)gic_interface_address;;
+
+ return ci->GICC_HPPIR&0x3ff;
+#else
+ unsigned int value = 0;
+ // unsigned int value_addr = &value;
+ // asm volatile(
+ // "ldr r1,=value_addr\n\t"
+ // "mrc p15, 0, r1, c12, c8, 2\n\t" /* ICC_HPPIR0 */
+
+ // );
+ return value;
+#endif
+}
+
+extern unsigned int gic_wake_enable[3];
+static u32 pm_gic_enable[3] =
+{
+ 0xFFFFFFFF, 0xFFFFFFFF, 0x0FFFFFF,
+};
+void pm_save_gic_wake_enable(void)
+{
+ interrupt_distributor *id = (interrupt_distributor *)GIC_DIST_BASE;
+ pm_gic_enable[0]=id->GICD_ISENABLER[1];
+ pm_gic_enable[1]=id->GICD_ISENABLER[2];
+ pm_gic_enable[2]=id->GICD_ISENABLER[3];
+
+ id->GICD_ICENABLER[1] = ~gic_wake_enable[0];
+ id->GICD_ICENABLER[2] = ~gic_wake_enable[1];
+ id->GICD_ICENABLER[3] = ~gic_wake_enable[2];
+}
+
+void pm_restore_gic_wake_enable(void)
+{
+ interrupt_distributor *id = (interrupt_distributor *)GIC_DIST_BASE;
+ id->GICD_ISENABLER[1] = pm_gic_enable[0];
+ id->GICD_ISENABLER[2] = pm_gic_enable[1];
+ id->GICD_ISENABLER[3]= pm_gic_enable[2];
+
+}
+
+void pm_mask_tick(void)
+{
+// ÐÞ¸Ä
+#if 1
+ interrupt_distributor *id = (interrupt_distributor *)(GIC_DIST_BASE);
+
+ //id->enable.clear[1] = 0x8000;
+ id->GICD_ICENABLER[1] = 0x8000;
+#endif
+}
+
+void pm_unmask_tick(void)
+{
+// ÐÞ¸Ä
+#if 1
+ interrupt_distributor *id = (interrupt_distributor *)(GIC_DIST_BASE);
+
+ id->GICD_ISENABLER[1] |= 0x8000;
+#endif
+}
+
+
+/*=======================================================================
+ *=======================================================================
+ *======= [ZX-PM] V7 debug interface for power management ============================
+ *=======================================================================
+ *=======================================================================*/
+
+
+#define DIDR_VERSION_SHIFT 16
+#define DIDR_VERSION_MASK 0xF
+#define DIDR_VERSION_7_1 5
+#define DIDR_BP_SHIFT 24
+#define DIDR_BP_MASK 0xF
+#define DIDR_WP_SHIFT 28
+#define DIDR_WP_MASK 0xF
+#define CLAIMCLR_CLEAR_ALL 0xff
+
+#define DRAR_VALID_MASK 0x00000003
+#define DSAR_VALID_MASK 0x00000003
+#define DRAR_ADDRESS_MASK 0xFFFFF000
+#define DSAR_ADDRESS_MASK 0xFFFFF000
+#define OSLSR_OSLM_MASK 0x00000009
+#define OSLAR_UNLOCKED 0x00000000
+#define OSLAR_LOCKED 0xC5ACCE55
+#define LAR_UNLOCKED 0xC5ACCE55
+#define LAR_LOCKED 0x00000000
+#define OSDLR_UNLOCKED 0x00000000
+#define OSDLR_LOCKED 0x00000001
+
+typedef volatile struct
+{ /* Registers Save? */
+ u32 const didr; /* 0 Read only */
+ u32 dscr_i; /* 1 ignore - use dscr_e instead */
+ u32 const dummy1[3]; /* 2-4 ignore */
+ u32 dtrrx_dtrtx_i; /* 5 ignore */
+ u32 wfar; /* 6 ignore - transient information */
+ u32 vcr; /* 7 Save */
+ u32 const dummy2; /* 8 ignore */
+ u32 ecr; /* 9 ignore */
+ u32 dsccr; /* 10 ignore */
+ u32 dsmcr; /* 11 ignore */
+ u32 const dummy3[20]; /* 12-31 ignore */
+ u32 dtrrx_e; /* 32 ignore */
+ u32 itr_pcsr; /* 33 ignore */
+ u32 dscr_e; /* 34 Save */
+ u32 dtrtx_e; /* 35 ignore */
+ u32 drcr; /* 36 ignore */
+ u32 eacr; /* 37 Save - V7.1 only */
+ u32 const dummy4[2]; /* 38-39 ignore */
+ u32 pcsr; /* 40 ignore */
+ u32 cidsr; /* 41 ignore */
+ u32 vidsr; /* 42 ignore */
+ u32 const dummy5[21]; /* 43-63 ignore */
+ u32 bvr[16]; /* 64-79 Save */
+ u32 bcr[16]; /* 80-95 Save */
+ u32 wvr[16]; /* 96-111 Save */
+ u32 wcr[16]; /* 112-127 Save */
+ u32 const dummy6[16]; /* 128-143 ignore */
+ u32 bxvr[16]; /* 144-159 Save if have Virtualization extensions */
+ u32 const dummy7[32]; /* 160-191 ignore */
+ u32 oslar; /* 192 If oslsr[0] is 1, unlock before save/restore */
+ u32 const oslsr; /* 193 ignore */
+ u32 ossrr; /* 194 ignore */
+ u32 const dummy8; /* 195 ignore */
+ u32 prcr; /* 196 ignore */
+ u32 prsr; /* 197 clear SPD on restore */
+ u32 const dummy9[762]; /* 198-959 ignore */
+ u32 itctrl; /* 960 ignore */
+ u32 const dummy10[39]; /* 961-999 ignore */
+ u32 claimset; /* 1000 Restore claim bits to here */
+ u32 claimclr; /* 1001 Save claim bits from here */
+ u32 const dummy11[2]; /* 1002-1003 ignore */
+ u32 lar; /* 1004 Unlock before restore */
+ u32 const lsr; /* 1005 ignore */
+ u32 const authstatus; /* 1006 Read only */
+ u32 const dummy12; /* 1007 ignore */
+ u32 const devid2; /* 1008 Read only */
+ u32 const devid1; /* 1009 Read only */
+ u32 const devid; /* 1010 Read only */
+ u32 const devtype; /* 1011 Read only */
+ u32 const pid[8]; /* 1012-1019 Read only */
+ u32 const cid[4]; /* 1020-1023 Read only */
+} debug_registers_t;
+
+typedef struct
+{
+ u32 vcr;
+ u32 dscr_e;
+ u32 eacr;
+ u32 bvr[16];
+ u32 bcr[16];
+ u32 wvr[16];
+ u32 wcr[16];
+ u32 bxvr[16];
+ u32 claim;
+} debug_context_t; /* total size 86 * 4 = 344 bytes */
+
+debug_registers_t *read_debug_address(void)
+{
+ unsigned drar, dsar;
+
+ drar = read_drar();
+ dsar = read_dsar();
+
+ if (!(drar & DRAR_VALID_MASK)
+ || !(dsar & DSAR_VALID_MASK))
+ {
+ return 0; /* No memory-mapped debug on this processor */
+ }
+
+ return (debug_registers_t *)((drar & DRAR_ADDRESS_MASK)
+ + (dsar & DSAR_ADDRESS_MASK));
+}
+
+/*
+ * We assume that before save (and after restore):
+ * - OSLAR is NOT locked, or the debugger would not work properly
+ * - LAR is locked, because the ARM ARM says it must be
+ * - OSDLR is NOT locked, or the debugger would not work properly
+ */
+
+void save_v7_debug(u32 *context)
+{
+ debug_registers_t *dbg = (void*)read_debug_address();
+ debug_context_t *ctx = (void*)context;
+ unsigned v71, num_bps, num_wps, i;
+ u32 didr;
+
+ if (!dbg)
+ {
+ return;
+ }
+
+ didr = dbg->didr;
+ /*
+ * Work out what version of debug we have
+ */
+ v71 = (((didr >> DIDR_VERSION_SHIFT) & DIDR_VERSION_MASK) == DIDR_VERSION_7_1);
+
+ /*
+ * Save all context to memory
+ */
+ ctx->vcr = dbg->vcr;
+ ctx->dscr_e = dbg->dscr_e;
+ ctx->claim = dbg->claimclr;
+
+ if (v71)
+ {
+ ctx->eacr = dbg->eacr;
+ }
+
+ num_bps = 1 + ((didr >> DIDR_BP_SHIFT) & DIDR_BP_MASK);
+ for (i=0; i<num_bps; ++i)
+ {
+ ctx->bvr[i] = dbg->bvr[i];
+ ctx->bcr[i] = dbg->bcr[i];
+#ifdef VIRTUALIZATION
+ ctx->bxvr[i] = dbg->bxvr[i]; /* TODO: don't save the ones that don't exist */
+#endif
+ }
+
+ num_wps = 1 + ((didr >> DIDR_WP_SHIFT) & DIDR_WP_MASK);
+ for (i=0; i<num_wps; ++i)
+ {
+ ctx->wvr[i] = dbg->wvr[i];
+ ctx->wcr[i] = dbg->wcr[i];
+ }
+
+ /*
+ * If Debug V7.1, we must set osdlr (by cp14 interface) before power down.
+ * Once we have done this, debug becomes inaccessible.
+ */
+ if (v71)
+ {
+ write_osdlr(OSDLR_LOCKED);
+ }
+}
+
+void restore_v7_debug(u32 *context)
+{
+ debug_registers_t *dbg = (void*)read_debug_address();
+ debug_context_t *ctx = (void*)context;
+ unsigned v71, num_bps, num_wps, i;
+ u32 didr;
+
+ if (!dbg)
+ {
+ return;
+ }
+
+ didr = dbg->didr;
+ /*
+ * Work out what version of debug we have
+ */
+ v71 = (((didr >> DIDR_VERSION_SHIFT) & DIDR_VERSION_MASK) == DIDR_VERSION_7_1);
+
+ /* Enable write access to registers */
+ dbg->lar = LAR_UNLOCKED;
+ /*
+ * If Debug V7.1, we must unset osdlr (by cp14 interface) before restoring.
+ * (If the CPU has not actually power-cycled, osdlr may not be reset).
+ */
+ if (v71)
+ {
+ write_osdlr(OSDLR_UNLOCKED);
+ }
+
+ /*
+ * Restore all context from memory
+ */
+ dbg->vcr = ctx->vcr;
+ dbg->claimclr = CLAIMCLR_CLEAR_ALL;
+ dbg->claimset = ctx->claim;
+
+ if (v71)
+ {
+ dbg->eacr = ctx->eacr;
+ }
+
+ num_bps = 1 + ((didr >> DIDR_BP_SHIFT) & DIDR_BP_MASK);
+ for (i=0; i<num_bps; ++i)
+ {
+ dbg->bvr[i] = ctx->bvr[i];
+ dbg->bcr[i] = ctx->bcr[i];
+#ifdef VIRTUALIZATION
+ dbg->bxvr[i] = ctx->bxvr[i]; /* TODO: don't restore the ones that don't exist */
+#endif
+ }
+
+ num_wps = 1 + ((didr >> DIDR_WP_SHIFT) & DIDR_WP_MASK);
+ for (i=0; i<num_wps; ++i)
+ {
+ dbg->wvr[i] = ctx->wvr[i];
+ dbg->wcr[i] = ctx->wcr[i];
+ }
+
+ /* Clear PRSR.SPD by reading PRSR */
+ if (!v71)
+ {
+ (dbg->prsr);
+ }
+
+ /* Re-enable debug */
+ dbg->dscr_e = ctx->dscr_e;
+
+ /* Disable write access to registers */
+ dbg->lar = LAR_LOCKED;
+}
+
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm-a9.c b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm-a9.c
new file mode 100644
index 0000000..10e4ed5
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm-a9.c
@@ -0,0 +1,1168 @@
+/*
+ * ZTE cpu context save&restore driver
+ *
+ * Copyright (C) 2013 ZTE Ltd.
+ * by zxp
+ *
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/suspend.h>
+
+#include "zx-pm.h"
+
+/*=======================================================================
+ *=======================================================================
+ *======= [ZX-PM] timer interface for power management ===========================
+ *=======================================================================
+ *=======================================================================*/
+
+typedef struct
+{
+ volatile unsigned timer_load;/* 0x00 */
+ volatile unsigned timer_counter;/* 0x04 */
+ volatile unsigned timer_control;/* 0x08 */
+ volatile unsigned timer_interrupt_status;/* 0x0c */
+ char padding1[0x10];
+ volatile unsigned watchdog_load;/* 0x20 */
+ volatile unsigned watchdog_counter;/* 0x24 */
+ volatile unsigned watchdog_control;/* 0x28 */
+ volatile unsigned watchdog_interrupt_status;/* 0x2c */
+ volatile unsigned watchdog_reset_status;/* 0x30 */
+ volatile unsigned watchdog_disable;/* 0x34 */
+} a9_timer_registers;
+
+typedef struct
+{
+ unsigned timer_load;
+ unsigned timer_counter;
+ unsigned timer_control;
+ unsigned timer_interrupt_status;
+ unsigned watchdog_load;
+ unsigned watchdog_counter;
+ unsigned watchdog_control;
+ unsigned watchdog_interrupt_status;
+} a9_timer_context;
+
+typedef struct
+{
+ volatile unsigned counter_lo;
+ volatile unsigned counter_hi;
+ volatile unsigned control;
+ volatile unsigned status;
+ volatile unsigned comparator_lo;
+ volatile unsigned comparator_hi;
+ volatile unsigned auto_increment;
+} a9_global_timer_registers;
+
+typedef struct
+{
+ unsigned counter_lo;
+ unsigned counter_hi;
+ unsigned control;
+ unsigned status;
+ unsigned comparator_lo;
+ unsigned comparator_hi;
+ unsigned auto_increment;
+} a9_global_timer_context;
+
+#define A9_GT_TIMER_ENABLE (1<<0)
+#define A9_GT_COMPARE_ENABLE (1<<1)
+#define A9_GT_AUTO_INCREMENT_ENABLE (1<<3)
+#define A9_GT_EVENT_FLAG (1<<0)
+
+void save_a9_timers(u32 *pointer, unsigned twd_address)
+{
+ a9_timer_context *context = (a9_timer_context *)pointer;
+ a9_timer_registers *timers = (a9_timer_registers *)twd_address;
+
+ context->timer_control = timers->timer_control;
+ timers->timer_control = 0;
+ context->watchdog_control = timers->watchdog_control;
+ timers->watchdog_control = 0;
+
+ context->timer_load = timers->timer_load;
+ context->timer_counter = timers->timer_counter;
+ context->timer_interrupt_status = timers->timer_interrupt_status;
+ context->watchdog_load = timers->watchdog_load;
+ context->watchdog_counter = timers->watchdog_counter;
+ context->watchdog_interrupt_status = timers->watchdog_interrupt_status;
+}
+
+void restore_a9_timers(u32 *pointer, unsigned twd_address)
+{
+ a9_timer_context *context = (a9_timer_context *)pointer;
+ a9_timer_registers *timers = (a9_timer_registers *)twd_address;
+
+ timers->timer_control = 0;
+ timers->watchdog_control = 0;
+
+ timers->timer_load = context->timer_load;
+ timers->watchdog_load = context->watchdog_load;
+
+ if (context->timer_interrupt_status)
+ {
+ timers->timer_counter = 1;
+ }
+ else
+ {
+ timers->timer_counter = context->timer_counter;
+ }
+
+ if (context->watchdog_interrupt_status)
+ {
+ timers->watchdog_counter = 1;
+ }
+ else
+ {
+ timers->watchdog_counter = context->watchdog_counter;
+ }
+
+ timers->timer_control = context->timer_control;
+ timers->watchdog_control = context->watchdog_control;
+}
+
+
+void save_a9_global_timer(u32 *pointer, unsigned timer_address)
+{
+ a9_global_timer_registers *timer = (void*)timer_address;
+ a9_global_timer_context *context = (void*)pointer;
+
+ unsigned tmp_lo, tmp_hi, tmp2_hi;
+
+ do
+ {
+ tmp_hi = timer->counter_hi;
+ tmp_lo = timer->counter_lo;
+ tmp2_hi = timer->counter_hi;
+ } while (tmp_hi != tmp2_hi);
+
+ context->counter_lo = tmp_lo;
+ context->counter_hi = tmp_hi;
+ context->control = timer->control;
+ context->status = timer->status;
+ context->comparator_lo = timer->comparator_lo;
+ context->comparator_hi = timer->comparator_hi;
+ context->auto_increment = timer->auto_increment;
+}
+
+void restore_a9_global_timer(u32 *pointer, unsigned timer_address)
+{
+ a9_global_timer_registers *timer = (void*)timer_address;
+ a9_global_timer_context *context = (void*)pointer;
+
+ unsigned long long comparator_ull, current_ull;
+ unsigned current_hi, current_lo;
+
+ /* Is the timer currently enabled? */
+ if (timer->control & A9_GT_TIMER_ENABLE)
+ {
+ /* Temporarily stop the timer so we can mess with it */
+ timer->control &= ~A9_GT_TIMER_ENABLE;
+ }
+ else /* We must be the first CPU back up, or the timer is not in use */
+ {
+ timer->counter_lo = context->counter_lo;
+ timer->counter_hi = context->counter_hi;
+ }
+
+ current_hi = timer->counter_hi;
+ current_lo = timer->counter_lo;
+ current_ull = ((unsigned long long)current_hi << 32) + current_lo;
+ comparator_ull = ((unsigned long long)context->comparator_hi << 32) + context->comparator_lo;
+
+ if ( ((context->status & A9_GT_EVENT_FLAG) ||
+ (current_ull > comparator_ull) ) &&
+ ((context->control & (A9_GT_COMPARE_ENABLE | A9_GT_TIMER_ENABLE))
+ == (A9_GT_COMPARE_ENABLE | A9_GT_TIMER_ENABLE)))
+ {
+ /* Set the comparator to the current counter value */
+ timer->comparator_hi = current_hi;
+ timer->comparator_lo = current_lo;
+
+ /* Start the timer */
+ timer->control = context->control;
+
+ /* Wait for the timer event */
+ while (timer->status == 0)
+ {
+ /* Do nothing */
+ }
+
+ /* Set the comparator to the original value */
+ timer->comparator_lo = context->comparator_lo;
+ timer->comparator_hi = context->comparator_hi;
+ }
+ else /* Event flag does not need to be set */
+ {
+ timer->comparator_lo = context->comparator_lo;
+ timer->comparator_hi = context->comparator_hi;
+ /* Start the timer */
+ timer->control = context->control;
+ }
+
+ timer->auto_increment = context->auto_increment;
+}
+
+
+/*=======================================================================
+ *=======================================================================
+ *======= [ZX-PM] SCU interface for power management ============================
+ *=======================================================================
+ *=======================================================================*/
+typedef struct
+{
+ /* 0x00 */ volatile unsigned int control;
+ /* 0x04 */ const unsigned int configuration;
+ /* 0x08 */ union
+ {
+ volatile unsigned int w;
+ volatile unsigned char b[4];
+ } power_status;
+ /* 0x0c */ volatile unsigned int invalidate_all;
+ char padding1[48];
+ /* 0x40 */ volatile unsigned int filtering_start;
+ /* 0x44 */ volatile unsigned int filtering_end;
+ char padding2[8];
+ /* 0x50 */ volatile unsigned int access_control;
+ /* 0x54 */ volatile unsigned int ns_access_control;
+} a9_scu_registers;
+
+
+void save_a9_scu(u32 *pointer, unsigned scu_address)
+{
+ a9_scu_registers *scu = (a9_scu_registers *)scu_address;
+
+ pointer[0] = scu->control;
+ pointer[1] = scu->power_status.w;
+ pointer[2] = scu->filtering_start;
+ pointer[3] = scu->filtering_end;
+ pointer[4] = scu->access_control;
+ pointer[5] = scu->ns_access_control;
+}
+
+void restore_a9_scu(u32 *pointer, unsigned scu_address)
+{
+ a9_scu_registers *scu = (a9_scu_registers *)scu_address;
+
+ scu->invalidate_all = 0xffff;
+ scu->filtering_start = pointer[2];
+ scu->filtering_end = pointer[3];
+//zxp scu->access_control = pointer[4];
+ scu->ns_access_control = pointer[5];
+ scu->power_status.w = pointer[1];
+ scu->control = pointer[0];
+}
+
+void set_status_a9_scu(unsigned cpu_index, unsigned status, unsigned scu_address)
+{
+ a9_scu_registers *scu = (a9_scu_registers *)scu_address;
+ unsigned power_status;
+
+ switch(status)
+ {
+ case CPU_POWER_MODE_STANDBY:
+ case CPU_POWER_MODE_DORMANT:
+ power_status = 2;
+ break;
+ case CPU_POWER_MODE_SHUTDOWN:
+ power_status = 3;
+ break;
+ default:
+ power_status = 0;
+ }
+
+ scu->power_status.b[cpu_index] = power_status;
+ dsb();
+}
+
+void init_lp_of_scu(unsigned scu_address)
+{
+ a9_scu_registers *scu = (a9_scu_registers *)scu_address;
+
+ scu->control |= 0x61;
+}
+
+
+int num_cpus_from_a9_scu(unsigned scu_address)
+{
+ a9_scu_registers *scu = (a9_scu_registers *)scu_address;
+
+ return ((scu->configuration) & 0x3) + 1;
+}
+
+
+/*=======================================================================
+ *=======================================================================
+ *======= [ZX-PM] PL310 interface for power management ==========================
+ *=======================================================================
+ *=======================================================================*/
+
+
+#define C_BIT 0x01
+
+struct lockdown_regs
+{
+ unsigned int d, i;
+};
+
+typedef struct
+{
+ /* 0x000 */ const unsigned cache_id;
+ /* 0x004 */ const unsigned cache_type;
+ char padding1[0x0F8];
+ /* 0x100 */ volatile unsigned control;
+ /* 0x104 */ volatile unsigned aux_control;
+ /* 0x108 */ volatile unsigned tag_ram_control;
+ /* 0x10C */ volatile unsigned data_ram_control;
+ char padding2[0x0F0];
+ /* 0x200 */ volatile unsigned ev_counter_ctrl;
+ /* 0x204 */ volatile unsigned ev_counter1_cfg;
+ /* 0x208 */ volatile unsigned ev_counter0_cfg;
+ /* 0x20C */ volatile unsigned ev_counter1;
+ /* 0x210 */ volatile unsigned ev_counter0;
+ /* 0x214 */ volatile unsigned int_mask;
+ /* 0x218 */ const volatile unsigned int_mask_status;
+ /* 0x21C */ const volatile unsigned int_raw_status;
+ /* 0x220 */ volatile unsigned int_clear;
+ char padding3[0x50C];
+ /* 0x730 */ volatile unsigned cache_sync;
+ char padding4[0x03C];
+ /* 0x770 */ volatile unsigned inv_pa;
+ char padding5[0x008];
+ /* 0x77C */ volatile unsigned inv_way;
+ char padding6[0x030];
+ /* 0x7B0 */ volatile unsigned clean_pa;
+ char padding7[0x004];
+ /* 0x7B8 */ volatile unsigned clean_index;
+ /* 0x7BC */ volatile unsigned clean_way;
+ char padding8[0x030];
+ /* 0x7F0 */ volatile unsigned clean_inv_pa;
+ char padding9[0x004];
+ /* 0x7F8 */ volatile unsigned clean_inv_index;
+ /* 0x7FC */ volatile unsigned clean_inv_way;
+ char paddinga[0x100];
+ /* 0x900 */ volatile struct lockdown_regs lockdown[8];
+ char paddingb[0x010];
+ /* 0x950 */ volatile unsigned lock_line_en;
+ /* 0x954 */ volatile unsigned unlock_way;
+ char paddingc[0x2A8];
+ /* 0xC00 */ volatile unsigned addr_filtering_start;
+ /* 0xC04 */ volatile unsigned addr_filtering_end;
+ char paddingd[0x338];
+ /* 0xF40 */ volatile unsigned debug_ctrl;
+ char paddinge[0x01C];
+ /* 0xF60 */ volatile unsigned prefetch_ctrl;
+ char paddingf[0x01C];
+ /* 0xF80 */ volatile unsigned power_ctrl;
+} pl310_registers;
+
+
+typedef struct
+{
+ unsigned int aux_control;
+ unsigned int tag_ram_control;
+ unsigned int data_ram_control;
+ unsigned int ev_counter_ctrl;
+ unsigned int ev_counter1_cfg;
+ unsigned int ev_counter0_cfg;
+ unsigned int ev_counter1;
+ unsigned int ev_counter0;
+ unsigned int int_mask;
+ unsigned int lock_line_en;
+ struct lockdown_regs lockdown[8];
+ unsigned int unlock_way;
+ unsigned int addr_filtering_start;
+ unsigned int addr_filtering_end;
+ unsigned int debug_ctrl;
+ unsigned int prefetch_ctrl;
+ unsigned int power_ctrl;
+} pl310_context;
+
+/* TODO: should be determined from cache? */
+static unsigned const cache_line_size = 32;
+
+void clean_inv_range_pl310(void *start, unsigned size, unsigned pl310_address)
+{
+ unsigned addr;
+ pl310_registers *pl310 = (pl310_registers *)pl310_address;
+
+ addr = (unsigned)start & ~(cache_line_size - 1);
+
+ while(pl310->clean_inv_pa & C_BIT);
+
+ while(addr <= size + (unsigned)start)
+ {
+ pl310->clean_inv_pa = addr;
+ addr += cache_line_size;
+ }
+ dmb();
+}
+
+void clean_range_pl310(void *start, unsigned size, unsigned pl310_address)
+{
+ unsigned addr;
+ pl310_registers *pl310 = (pl310_registers *)pl310_address;
+
+ /* Align the start address to the start of a cache line */
+ addr = (unsigned)start & ~(cache_line_size - 1);
+
+ /* Wait for any background operations to finish */
+ while(pl310->clean_pa & C_BIT);
+
+ while(addr <= size + (unsigned)start)
+ {
+ pl310->clean_pa = addr;
+ addr += cache_line_size;
+ /* For this to work on L220 we would have to poll the C bit now */
+ }
+ dmb();
+}
+
+void inv_range_pl310(void *start, unsigned size, unsigned pl310_address)
+{
+ unsigned addr;
+ pl310_registers *pl310 = (pl310_registers *)pl310_address;
+
+ /* Align the start address to the start of a cache line */
+ addr = (unsigned)start & ~(cache_line_size - 1);
+
+ /* Wait for any background operations to finish */
+ while(pl310->inv_pa & C_BIT);
+
+ while(addr <= size + (unsigned)start)
+ {
+ pl310->inv_pa = addr;
+ addr += cache_line_size;
+ /* For this to work on L220 we would have to poll the C bit now */
+ }
+}
+
+void clean_inv_pl310(unsigned pl310_address)
+{
+ pl310_registers *pl310 = (pl310_registers *)pl310_address;
+ int i;
+
+ pl310->clean_inv_way = 0xffff;
+ while (pl310->clean_inv_way)
+ {
+ /* Spin */
+ for (i=10; i>0; --i)
+ {
+ __nop();
+ }
+ }
+}
+
+void clean_pl310(unsigned pl310_address)
+{
+ pl310_registers *pl310 = (pl310_registers *)pl310_address;
+ int i;
+
+ pl310->clean_way = 0xffff;
+ while (pl310->clean_way)
+ {
+ /* Spin */
+ for (i=10; i>0; --i)
+ {
+ __nop();
+ }
+ }
+}
+
+static void inv_pl310(unsigned pl310_address)
+{
+ pl310_registers *pl310 = (pl310_registers *)pl310_address;
+ int i;
+
+ pl310->inv_way = 0xffff;
+ while (pl310->inv_way)
+ {
+ /* Spin */
+ for (i=10; i>0; --i)
+ {
+ __nop();
+ }
+ }
+}
+
+void clean_disable_pl310(unsigned pl310_address)
+{
+ pl310_registers *pl310 = (pl310_registers *)pl310_address;
+ int i;
+
+ pl310->clean_way = 0xffff;
+ while (pl310->clean_way)
+ {
+ /* Spin */
+ for (i=10; i>0; --i)
+ {
+ __nop();
+ }
+ }
+
+ pl310->cache_sync = 0;
+ dsb();
+ pl310->control = 0;
+}
+
+
+int is_enabled_pl310(unsigned pl310_address)
+{
+ pl310_registers *pl310 = (pl310_registers *)pl310_address;
+
+ return (pl310->control & 1);
+}
+
+void save_pl310(u32 *pointer, unsigned pl310_address)
+{
+ pl310_registers *pl310 = (pl310_registers *)pl310_address;
+ pl310_context *context = (pl310_context *)pointer;
+ int i;
+
+ /* TODO: are all these registers are present in earlier PL310 versions? */
+ context->aux_control = pl310->aux_control;
+ context->tag_ram_control = pl310->tag_ram_control;
+ context->data_ram_control = pl310->data_ram_control;
+ context->ev_counter_ctrl = pl310->ev_counter_ctrl;
+ context->ev_counter1_cfg = pl310->ev_counter1_cfg;
+ context->ev_counter0_cfg = pl310->ev_counter0_cfg;
+ context->ev_counter1 = pl310->ev_counter1;
+ context->ev_counter0 = pl310->ev_counter0;
+ context->int_mask = pl310->int_mask;
+ context->lock_line_en = pl310->lock_line_en;
+
+ /*
+ * The lockdown registers repeat 8 times for L310, the L210 has only one
+ * D and one I lockdown register at 0x0900 and 0x0904.
+ */
+ for (i=0; i<8; ++i)
+ {
+ context->lockdown[i].d = pl310->lockdown[i].d;
+ context->lockdown[i].i = pl310->lockdown[i].i;
+ }
+
+ context->addr_filtering_start = pl310->addr_filtering_start;
+ context->addr_filtering_end = pl310->addr_filtering_end;
+ context->debug_ctrl = pl310->debug_ctrl;
+ context->prefetch_ctrl = pl310->prefetch_ctrl;
+ context->power_ctrl = pl310->power_ctrl;
+}
+
+void restore_pl310(u32 *pointer, unsigned pl310_address, int dormant)
+{
+ pl310_registers *pl310 = (pl310_registers *)pl310_address;
+ pl310_context *context = (pl310_context *)pointer;
+ int i;
+
+ /* We may need to disable the PL310 if the boot code has turned it on */
+ if (pl310->control)
+ {
+ /* Wait for the cache to be idle, then disable */
+ pl310->cache_sync = 0;
+ dsb();
+ pl310->control = 0;
+ }
+
+ /* TODO: are all these registers present in earlier PL310 versions? */
+ pl310->aux_control = context->aux_control;
+ pl310->tag_ram_control = context->tag_ram_control;
+ pl310->data_ram_control = context->data_ram_control;
+ pl310->ev_counter_ctrl = context->ev_counter_ctrl;
+ pl310->ev_counter1_cfg = context->ev_counter1_cfg;
+ pl310->ev_counter0_cfg = context->ev_counter0_cfg;
+ pl310->ev_counter1 = context->ev_counter1;
+ pl310->ev_counter0 = context->ev_counter0;
+ pl310->int_mask = context->int_mask;
+ pl310->lock_line_en = context->lock_line_en;
+
+ for (i=0; i<8; ++i)
+ {
+ pl310->lockdown[i].d = context->lockdown[i].d;
+ pl310->lockdown[i].i = context->lockdown[i].i;
+ }
+
+ pl310->addr_filtering_start = context->addr_filtering_start;
+ pl310->addr_filtering_end = context->addr_filtering_end;
+ pl310->debug_ctrl = context->debug_ctrl;
+ pl310->prefetch_ctrl = context->prefetch_ctrl;
+ pl310->power_ctrl = context->power_ctrl;
+ dsb();
+
+ /*
+ * If the RAMs were powered off, we need to invalidate the cache
+ */
+ if (!dormant)
+ {
+ inv_pl310(pl310_address);
+ }
+
+ pl310->control = 1;
+ dsb();
+}
+
+void set_enabled_pl310(unsigned enabled, unsigned pl310_address)
+{
+ pl310_registers *pl310 = (pl310_registers *)pl310_address;
+
+ if (enabled)
+ {
+ inv_pl310(pl310_address);
+
+ pl310->control |= 1;
+ pl310->cache_sync = 0;
+ dsb();
+ }
+ else
+ {
+ /* Wait for the cache to be idle */
+ pl310->cache_sync = 0;
+ dsb();
+ pl310->control &= ~1;
+ }
+}
+
+void set_status_pl310(unsigned status, unsigned pl310_address)
+{
+ pl310_registers *pl310 = (pl310_registers *)pl310_address;
+
+ if (status == CPU_POWER_MODE_STANDBY)
+ {
+ /* Wait for the cache to be idle */
+ pl310->cache_sync = 0;
+ dsb();
+ pl310->power_ctrl |= 1;
+ }
+ else
+ {
+ pl310->power_ctrl &= ~1;
+ }
+}
+
+void init_lp_of_l2(unsigned pl310_address)
+{
+ pl310_registers *pl310 = (pl310_registers *)pl310_address;
+
+ pl310->power_ctrl |= 3;
+}
+
+/*=======================================================================
+ *=======================================================================
+ *======= [ZX-PM] GIC interface for power management ============================
+ *=======================================================================
+ *=======================================================================*/
+
+
+/* This macro sets either the NS or S enable bit in the GIC distributor control register */
+#define GIC_DIST_ENABLE 0x00000001
+
+struct set_and_clear_regs
+{
+ volatile unsigned int set[32], clear[32];
+};
+
+typedef struct
+{
+ /* 0x000 */ volatile unsigned int control;
+ const unsigned int controller_type;
+ const unsigned int implementer;
+ const char padding1[116];
+ /* 0x080 */ volatile unsigned int security[32];
+ /* 0x100 */ struct set_and_clear_regs enable;
+ /* 0x200 */ struct set_and_clear_regs pending;
+ /* 0x300 */ volatile const unsigned int active[32];
+ const char padding2[128];
+ /* 0x400 */ volatile unsigned int priority[256];
+ /* 0x800 */ volatile unsigned int target[256];
+ /* 0xC00 */ volatile unsigned int configuration[64];
+ /* 0xD00 */ const char padding3[512];
+ /* 0xF00 */ volatile unsigned int software_interrupt;
+ const char padding4[220];
+ /* 0xFE0 */ unsigned const int peripheral_id[4];
+ /* 0xFF0 */ unsigned const int primecell_id[4];
+} interrupt_distributor;
+
+typedef struct
+{
+ /* 0x00 */ volatile unsigned int control;
+ /* 0x04 */ volatile unsigned int priority_mask;
+ /* 0x08 */ volatile unsigned int binary_point;
+ /* 0x0c */ volatile unsigned const int interrupt_ack;
+ /* 0x10 */ volatile unsigned int end_of_interrupt;
+ /* 0x14 */ volatile unsigned const int running_priority;
+ /* 0x18 */ volatile unsigned const int highest_pending;
+ /* 0x1c */ volatile unsigned int aliased_binary_point;
+} cpu_interface;
+
+
+/*
+ * Saves the GIC CPU interface context
+ * Requires 3 or 4 words of memory
+ */
+void save_gic_interface(u32 *pointer, unsigned gic_interface_address, int is_secure)
+{
+ cpu_interface *ci = (cpu_interface *)gic_interface_address;
+
+ pointer[0] = ci->control;
+ pointer[1] = ci->priority_mask;
+ pointer[2] = ci->binary_point;
+
+ if (is_secure)
+ {
+ pointer[3] = ci->aliased_binary_point;
+ }
+}
+
+/*
+ * Enables or disables the GIC distributor (for the current security state)
+ * Parameter 'enabled' is boolean.
+ * Return value is boolean, and reports whether GIC was previously enabled.
+ */
+int gic_distributor_set_enabled(int enabled, unsigned gic_distributor_address)
+{
+ unsigned tmp;
+ interrupt_distributor *id = (interrupt_distributor *)gic_distributor_address;
+
+ tmp = id->control;
+ if (enabled)
+ {
+ id->control = tmp | GIC_DIST_ENABLE;
+ }
+ else
+ {
+ id->control = tmp & ~GIC_DIST_ENABLE;
+ }
+ return (tmp & GIC_DIST_ENABLE) != 0;
+}
+
+/*
+ * Saves this CPU's banked parts of the distributor
+ * Returns non-zero if an SGI/PPI interrupt is pending (after saving all required context)
+ * Requires 19 words of memory
+ */
+int save_gic_distributor_private(u32 *pointer, unsigned gic_distributor_address, int is_secure)
+{
+ interrupt_distributor *id = (interrupt_distributor *)gic_distributor_address;
+
+ *pointer = id->enable.set[0];
+ ++pointer;
+ pointer = copy_words(pointer, id->priority, 8);
+ pointer = copy_words(pointer, id->target, 8);
+ if (is_secure)
+ {
+ *pointer = id->security[0];
+ ++pointer;
+ }
+ /* Save just the PPI configurations (SGIs are not configurable) */
+ *pointer = id->configuration[1];
+ ++pointer;
+ *pointer = id->pending.set[0];
+ if (*pointer)
+ {
+ return -1;
+ }
+ else
+ {
+ return 0;
+ }
+}
+
+int save_gic_distributor_shared(u32 *pointer, unsigned gic_distributor_address, int is_secure)
+{
+ interrupt_distributor *id = (interrupt_distributor *)gic_distributor_address;
+ unsigned num_spis, *saved_pending;
+ int i, retval = 0;
+
+
+ num_spis = 32 * (id->controller_type & 0x1f);
+
+
+ if (num_spis)
+ {
+ pointer = copy_words(pointer, id->enable.set + 1, num_spis / 32);
+ pointer = copy_words(pointer, id->priority + 8, num_spis / 4);
+ pointer = copy_words(pointer, id->target + 8, num_spis / 4);
+ pointer = copy_words(pointer, id->configuration + 2, num_spis / 16);
+ if (is_secure)
+ {
+ pointer = copy_words(pointer, id->security + 1, num_spis / 32);
+ }
+ saved_pending = pointer;
+ pointer = copy_words(pointer, id->pending.set + 1, num_spis / 32);
+
+ for (i=0; i<num_spis/32; ++i)
+ {
+ if (saved_pending[i])
+ {
+ retval = -1;
+ break;
+ }
+ }
+ }
+
+ *pointer = id->control;
+
+ return retval;
+}
+
+void restore_gic_interface(u32 *pointer, unsigned gic_interface_address, int is_secure)
+{
+ cpu_interface *ci = (cpu_interface *)gic_interface_address;
+
+ ci->priority_mask = pointer[1];
+ ci->binary_point = pointer[2];
+
+ if (is_secure)
+ {
+ ci->aliased_binary_point = pointer[3];
+ }
+
+ /* Restore control register last */
+ ci->control = pointer[0];
+}
+
+void restore_gic_distributor_private(u32 *pointer, unsigned gic_distributor_address, int is_secure)
+{
+ interrupt_distributor *id = (interrupt_distributor *)gic_distributor_address;
+
+
+ id->enable.set[0] = *pointer;
+ ++pointer;
+ copy_words(id->priority, pointer, 8);
+ pointer += 8;
+ copy_words(id->target, pointer, 8);
+ pointer += 8;
+ if (is_secure)
+ {
+ id->security[0] = *pointer;
+ ++pointer;
+ }
+ /* Restore just the PPI configurations (SGIs are not configurable) */
+ id->configuration[1] = *pointer;
+ ++pointer;
+ id->pending.set[0] = *pointer;
+}
+
+void restore_gic_distributor_shared(u32 *pointer, unsigned gic_distributor_address, int is_secure)
+{
+ interrupt_distributor *id = (interrupt_distributor *)gic_distributor_address;
+ unsigned num_spis;
+
+ /* Make sure the distributor is disabled */
+ gic_distributor_set_enabled(false, gic_distributor_address);
+
+ /* Calculate how many SPIs the GIC supports */
+ num_spis = 32 * ((id->controller_type) & 0x1f);
+
+ /* TODO: add nonsecure stuff */
+
+ /* Restore rest of GIC configuration */
+ if (num_spis)
+ {
+ copy_words(id->enable.set + 1, pointer, num_spis / 32);
+ pointer += num_spis / 32;
+ copy_words(id->priority + 8, pointer, num_spis / 4);
+ pointer += num_spis / 4;
+ copy_words(id->target + 8, pointer, num_spis / 4);
+ pointer += num_spis / 4;
+ copy_words(id->configuration + 2, pointer, num_spis / 16);
+ pointer += num_spis / 16;
+ if (is_secure)
+ {
+ copy_words(id->security + 1, pointer, num_spis / 32);
+ pointer += num_spis / 32;
+ }
+ copy_words(id->pending.set + 1, pointer, num_spis / 32);
+ pointer += num_spis / 32;
+ }
+
+ /* Restore control register - if the GIC was disabled during save, it will be restored as disabled. */
+ id->control = *pointer;
+
+ return;
+}
+
+unsigned int gic_get_cur_pending(unsigned gic_interface_address)
+{
+ cpu_interface *ci = (cpu_interface *)gic_interface_address;;
+
+ return ci->highest_pending&0x3ff;
+}
+
+
+
+void pm_mask_tick(void)
+{
+ interrupt_distributor *id = (interrupt_distributor *)(SCU_ADDRESS+IC_DISTRIBUTOR_OFFSET);
+
+ id->enable.clear[1] = 0x8000;
+}
+
+void pm_unmask_tick(void)
+{
+ interrupt_distributor *id = (interrupt_distributor *)(SCU_ADDRESS+IC_DISTRIBUTOR_OFFSET);
+
+ id->enable.set[1] |= 0x8000;
+}
+
+
+/*=======================================================================
+ *=======================================================================
+ *======= [ZX-PM] V7 debug interface for power management ============================
+ *=======================================================================
+ *=======================================================================*/
+
+
+#define DIDR_VERSION_SHIFT 16
+#define DIDR_VERSION_MASK 0xF
+#define DIDR_VERSION_7_1 5
+#define DIDR_BP_SHIFT 24
+#define DIDR_BP_MASK 0xF
+#define DIDR_WP_SHIFT 28
+#define DIDR_WP_MASK 0xF
+#define CLAIMCLR_CLEAR_ALL 0xff
+
+#define DRAR_VALID_MASK 0x00000003
+#define DSAR_VALID_MASK 0x00000003
+#define DRAR_ADDRESS_MASK 0xFFFFF000
+#define DSAR_ADDRESS_MASK 0xFFFFF000
+#define OSLSR_OSLM_MASK 0x00000009
+#define OSLAR_UNLOCKED 0x00000000
+#define OSLAR_LOCKED 0xC5ACCE55
+#define LAR_UNLOCKED 0xC5ACCE55
+#define LAR_LOCKED 0x00000000
+#define OSDLR_UNLOCKED 0x00000000
+#define OSDLR_LOCKED 0x00000001
+
+typedef volatile struct
+{ /* Registers Save? */
+ u32 const didr; /* 0 Read only */
+ u32 dscr_i; /* 1 ignore - use dscr_e instead */
+ u32 const dummy1[3]; /* 2-4 ignore */
+ u32 dtrrx_dtrtx_i; /* 5 ignore */
+ u32 wfar; /* 6 ignore - transient information */
+ u32 vcr; /* 7 Save */
+ u32 const dummy2; /* 8 ignore */
+ u32 ecr; /* 9 ignore */
+ u32 dsccr; /* 10 ignore */
+ u32 dsmcr; /* 11 ignore */
+ u32 const dummy3[20]; /* 12-31 ignore */
+ u32 dtrrx_e; /* 32 ignore */
+ u32 itr_pcsr; /* 33 ignore */
+ u32 dscr_e; /* 34 Save */
+ u32 dtrtx_e; /* 35 ignore */
+ u32 drcr; /* 36 ignore */
+ u32 eacr; /* 37 Save - V7.1 only */
+ u32 const dummy4[2]; /* 38-39 ignore */
+ u32 pcsr; /* 40 ignore */
+ u32 cidsr; /* 41 ignore */
+ u32 vidsr; /* 42 ignore */
+ u32 const dummy5[21]; /* 43-63 ignore */
+ u32 bvr[16]; /* 64-79 Save */
+ u32 bcr[16]; /* 80-95 Save */
+ u32 wvr[16]; /* 96-111 Save */
+ u32 wcr[16]; /* 112-127 Save */
+ u32 const dummy6[16]; /* 128-143 ignore */
+ u32 bxvr[16]; /* 144-159 Save if have Virtualization extensions */
+ u32 const dummy7[32]; /* 160-191 ignore */
+ u32 oslar; /* 192 If oslsr[0] is 1, unlock before save/restore */
+ u32 const oslsr; /* 193 ignore */
+ u32 ossrr; /* 194 ignore */
+ u32 const dummy8; /* 195 ignore */
+ u32 prcr; /* 196 ignore */
+ u32 prsr; /* 197 clear SPD on restore */
+ u32 const dummy9[762]; /* 198-959 ignore */
+ u32 itctrl; /* 960 ignore */
+ u32 const dummy10[39]; /* 961-999 ignore */
+ u32 claimset; /* 1000 Restore claim bits to here */
+ u32 claimclr; /* 1001 Save claim bits from here */
+ u32 const dummy11[2]; /* 1002-1003 ignore */
+ u32 lar; /* 1004 Unlock before restore */
+ u32 const lsr; /* 1005 ignore */
+ u32 const authstatus; /* 1006 Read only */
+ u32 const dummy12; /* 1007 ignore */
+ u32 const devid2; /* 1008 Read only */
+ u32 const devid1; /* 1009 Read only */
+ u32 const devid; /* 1010 Read only */
+ u32 const devtype; /* 1011 Read only */
+ u32 const pid[8]; /* 1012-1019 Read only */
+ u32 const cid[4]; /* 1020-1023 Read only */
+} debug_registers_t;
+
+typedef struct
+{
+ u32 vcr;
+ u32 dscr_e;
+ u32 eacr;
+ u32 bvr[16];
+ u32 bcr[16];
+ u32 wvr[16];
+ u32 wcr[16];
+ u32 bxvr[16];
+ u32 claim;
+} debug_context_t; /* total size 86 * 4 = 344 bytes */
+
+debug_registers_t *read_debug_address(void)
+{
+ unsigned drar, dsar;
+
+ drar = read_drar();
+ dsar = read_dsar();
+
+ if (!(drar & DRAR_VALID_MASK)
+ || !(dsar & DSAR_VALID_MASK))
+ {
+ return 0; /* No memory-mapped debug on this processor */
+ }
+
+ return (debug_registers_t *)((drar & DRAR_ADDRESS_MASK)
+ + (dsar & DSAR_ADDRESS_MASK));
+}
+
+/*
+ * We assume that before save (and after restore):
+ * - OSLAR is NOT locked, or the debugger would not work properly
+ * - LAR is locked, because the ARM ARM says it must be
+ * - OSDLR is NOT locked, or the debugger would not work properly
+ */
+
+void save_v7_debug(u32 *context)
+{
+ debug_registers_t *dbg = (void*)read_debug_address();
+ debug_context_t *ctx = (void*)context;
+ unsigned v71, num_bps, num_wps, i;
+ u32 didr;
+
+ if (!dbg)
+ {
+ return;
+ }
+
+ didr = dbg->didr;
+ /*
+ * Work out what version of debug we have
+ */
+ v71 = (((didr >> DIDR_VERSION_SHIFT) & DIDR_VERSION_MASK) == DIDR_VERSION_7_1);
+
+ /*
+ * Save all context to memory
+ */
+ ctx->vcr = dbg->vcr;
+ ctx->dscr_e = dbg->dscr_e;
+ ctx->claim = dbg->claimclr;
+
+ if (v71)
+ {
+ ctx->eacr = dbg->eacr;
+ }
+
+ num_bps = 1 + ((didr >> DIDR_BP_SHIFT) & DIDR_BP_MASK);
+ for (i=0; i<num_bps; ++i)
+ {
+ ctx->bvr[i] = dbg->bvr[i];
+ ctx->bcr[i] = dbg->bcr[i];
+#ifdef VIRTUALIZATION
+ ctx->bxvr[i] = dbg->bxvr[i]; /* TODO: don't save the ones that don't exist */
+#endif
+ }
+
+ num_wps = 1 + ((didr >> DIDR_WP_SHIFT) & DIDR_WP_MASK);
+ for (i=0; i<num_wps; ++i)
+ {
+ ctx->wvr[i] = dbg->wvr[i];
+ ctx->wcr[i] = dbg->wcr[i];
+ }
+
+ /*
+ * If Debug V7.1, we must set osdlr (by cp14 interface) before power down.
+ * Once we have done this, debug becomes inaccessible.
+ */
+ if (v71)
+ {
+ write_osdlr(OSDLR_LOCKED);
+ }
+}
+
+void restore_v7_debug(u32 *context)
+{
+ debug_registers_t *dbg = (void*)read_debug_address();
+ debug_context_t *ctx = (void*)context;
+ unsigned v71, num_bps, num_wps, i;
+ u32 didr;
+
+ if (!dbg)
+ {
+ return;
+ }
+
+ didr = dbg->didr;
+ /*
+ * Work out what version of debug we have
+ */
+ v71 = (((didr >> DIDR_VERSION_SHIFT) & DIDR_VERSION_MASK) == DIDR_VERSION_7_1);
+
+ /* Enable write access to registers */
+ dbg->lar = LAR_UNLOCKED;
+ /*
+ * If Debug V7.1, we must unset osdlr (by cp14 interface) before restoring.
+ * (If the CPU has not actually power-cycled, osdlr may not be reset).
+ */
+ if (v71)
+ {
+ write_osdlr(OSDLR_UNLOCKED);
+ }
+
+ /*
+ * Restore all context from memory
+ */
+ dbg->vcr = ctx->vcr;
+ dbg->claimclr = CLAIMCLR_CLEAR_ALL;
+ dbg->claimset = ctx->claim;
+
+ if (v71)
+ {
+ dbg->eacr = ctx->eacr;
+ }
+
+ num_bps = 1 + ((didr >> DIDR_BP_SHIFT) & DIDR_BP_MASK);
+ for (i=0; i<num_bps; ++i)
+ {
+ dbg->bvr[i] = ctx->bvr[i];
+ dbg->bcr[i] = ctx->bcr[i];
+#ifdef VIRTUALIZATION
+ dbg->bxvr[i] = ctx->bxvr[i]; /* TODO: don't restore the ones that don't exist */
+#endif
+ }
+
+ num_wps = 1 + ((didr >> DIDR_WP_SHIFT) & DIDR_WP_MASK);
+ for (i=0; i<num_wps; ++i)
+ {
+ dbg->wvr[i] = ctx->wvr[i];
+ dbg->wcr[i] = ctx->wcr[i];
+ }
+
+ /* Clear PRSR.SPD by reading PRSR */
+ if (!v71)
+ {
+ (dbg->prsr);
+ }
+
+ /* Re-enable debug */
+ dbg->dscr_e = ctx->dscr_e;
+
+ /* Disable write access to registers */
+ dbg->lar = LAR_LOCKED;
+}
+#if 1
+//add by 0045002184wangzhen just for test: read the pending state of usb pwrdwn_up\pwrdwn_down irq
+unsigned int pm_get_usb_pdd_pending(void)
+{
+ interrupt_distributor *id = (interrupt_distributor *)(SCU_ADDRESS+IC_DISTRIBUTOR_OFFSET);
+
+ return id->pending.set[2];
+}
+#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm-context.c b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm-context.c
new file mode 100644
index 0000000..9ad479f
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm-context.c
@@ -0,0 +1,622 @@
+/*
+ * ZTE cpu context save&restore driver
+ *
+ * Copyright (C) 2013 ZTE Ltd.
+ * by zxp
+ *
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/suspend.h>
+
+#include <asm/memory.h>
+
+#include "zx-pm.h"
+
+
+struct zx_pm_main_table zx_pm_main_table;
+unsigned pm_device_memory [PM_MEMORY_SIZE/4];
+
+extern volatile int sleep_ret_flag[];
+
+/**
+ * This is where the reset vector jumps to.
+ *
+ */
+static unsigned get_device_memory(unsigned size);
+/*********************************************************************
+ * FUNCTION DEFINATIONS
+ ********************************************************************/
+/**
+ * zx_pm_context_init - initial context for cpu suspend and resume.
+ *
+ * initial the struct variable for context.
+ */
+int zx_pm_context_init(void)
+{
+ int i;
+ struct zx_cluster_context* temp_cluster_context;
+ struct zx_cpu_context* temp_cpu_context;
+
+ pr_info("[SLP] Power/PM_CONTEXT_INIT \n");
+
+ zx_set_wakeup_address(SYSTEM_WAKEUP_ADDR);
+
+ zx_pm_main_table.num_cpus = MAX_CPU_NUM;
+
+#ifdef CONFIG_ARCH_ZX297520V2
+ zx_pm_main_table.scu_address = (u32)SCU_ADDRESS;
+ zx_pm_main_table.ic_dist_address = (u32)SCU_ADDRESS + IC_DISTRIBUTOR_OFFSET;
+ zx_pm_main_table.ic_interface_address = (u32)SCU_ADDRESS + IC_INTERFACE_OFFSET;
+ zx_pm_main_table.l2_address = (u32)L2_CONTROLLER_ADDRESS;
+ zx_pm_main_table.crm_address = (u32)A9_CRM_ADDRESS;
+
+ zx_pm_main_table.scu_address_p = (u32)SCU_ADDRESS_P;
+ zx_pm_main_table.l2_address_p = (u32)L2_CONTROLLER_ADDRESS_P;
+ zx_pm_main_table.crm_address_p = (u32)A9_CRM_ADDRESS_P;
+#else
+ zx_pm_main_table.scu_address = (u32)0;
+ zx_pm_main_table.ic_dist_address = (u32)(GIC_DIST_BASE);
+ zx_pm_main_table.ic_interface_address = (u32)(ZX_GICC_BASE);
+ zx_pm_main_table.l2_address = (u32)0;
+ zx_pm_main_table.crm_address = (u32)AP_CRM_BASE;
+
+ zx_pm_main_table.scu_address_p = (u32)0;
+ zx_pm_main_table.l2_address_p = (u32)0;
+ zx_pm_main_table.crm_address_p = (u32)(ZX29_AP_PERIPHERAL_PHYS+0x20000);
+#endif
+ /*cluster info*/
+ temp_cluster_context = (void *)get_device_memory(sizeof(struct zx_cluster_context));
+ temp_cluster_context->flags = 0;
+ temp_cluster_context->saved_items = 0;
+ temp_cluster_context->gic_dist_shared_data = (void *)get_device_memory(GIC_DIST_SHARED_DATA_SIZE);
+ temp_cluster_context->l2_data = (void *)get_device_memory(L2_DATA_SIZE);
+ temp_cluster_context->scu_data = (void *)get_device_memory(SCU_DATA_SIZE);
+ temp_cluster_context->global_timer_data = (void *)get_device_memory(GLOBAL_TIMER_DATA_SIZE);
+ temp_cluster_context->crm_data = (void *)get_device_memory(CRM_DATA_SIZE);
+ temp_cluster_context->power_state = CPU_POWER_MODE_RUN;
+
+ zx_pm_main_table.cluster_context = temp_cluster_context;
+
+ /*cpu info*/
+ for(i=0;i<zx_pm_main_table.num_cpus;i++)
+ {
+ temp_cpu_context = (void *)get_device_memory(sizeof(struct zx_cpu_context));
+ temp_cpu_context->flags = 0;
+ temp_cpu_context->saved_items = 0;
+ temp_cpu_context->control_data = (void *)get_device_memory(CONTROL_DATA_SIZE);
+ temp_cpu_context->pmu_data = (void *)get_device_memory(PMU_DATA_SIZE);
+ temp_cpu_context->timer_data = (void *)get_device_memory(TIMER_DATA_SIZE);
+ temp_cpu_context->vfp_data = (void *)get_device_memory(VFP_DATA_SIZE);
+ temp_cpu_context->gic_interface_data = (void *)get_device_memory(GIC_INTERFACE_DATA_SIZE);
+ temp_cpu_context->gic_dist_private_data = (void *)get_device_memory(GIC_DIST_PRIVATE_DATA_SIZE);
+ temp_cpu_context->banked_registers = (void *)get_device_memory(BANKED_REGISTERS_SIZE);
+ temp_cpu_context->cp15_data = (void *)get_device_memory(CP15_DATA_SIZE);
+ temp_cpu_context->debug_data = (void *)get_device_memory(DEBUG_DATA_SIZE);
+ temp_cpu_context->mmu_data = (void *)get_device_memory(MMU_DATA_SIZE);
+ temp_cpu_context->other_data = (void *)get_device_memory(OTHER_DATA_SIZE);
+ temp_cpu_context->power_state = CPU_POWER_MODE_RUN;
+ temp_cpu_context->sleep_type = CPU_SLEEP_TYPE_NULL;
+
+ zx_pm_main_table.cpu_context[i] = temp_cpu_context;
+ }
+
+ pr_info("[SLP] Power/PM_CONTEXT_INIT END\n");
+
+ return 0;
+}
+
+/**
+ * sleep_type - idle/suspend.
+ *
+ * set the context flag will be saved according to sleep type.
+ *
+ */
+int zx_set_context_level (cpu_sleep_type_t sleep_type)
+{
+ unsigned cpu_id;
+
+ cpu_id = read_cpuid();
+ zx_pm_main_table.cur_cpu = cpu_id;
+
+ if(CPU_SLEEP_TYPE_LP1 == sleep_type) //suspend
+ {
+ zx_pm_main_table.cluster_context->flags = LP1_MG_SAVE_FLAG;
+ zx_pm_main_table.cpu_context[cpu_id]->flags = LP1_CPU_SAVE_FLAG;
+
+ zx_pm_main_table.cpu_context[cpu_id]->power_state = CPU_POWER_MODE_SHUTDOWN;
+ zx_pm_main_table.cluster_context->power_state = CPU_POWER_MODE_SHUTDOWN;
+ }
+ else if(CPU_SLEEP_TYPE_IDLE_LP2 == sleep_type)//deep idle
+ {
+ /*if(0 == cpu_id)*/
+ {
+ zx_pm_main_table.cluster_context->flags = LP2_MG_SAVE_FLAG;
+ zx_pm_main_table.cpu_context[cpu_id]->flags = LP2_CPU0_SAVE_FLAG;
+
+ zx_pm_main_table.cpu_context[cpu_id]->power_state = CPU_POWER_MODE_DORMANT;
+ zx_pm_main_table.cluster_context->power_state = CPU_POWER_MODE_DORMANT;
+ }
+ /*else
+ {
+ zx_pm_main_table.cluster_context->flags = MG_SAVE_FLAG_NULL;
+ zx_pm_main_table.cpu_context[cpu_id]->flags = LP2_CPUX_SAVE_FLAG;
+
+ zx_pm_main_table.cpu_context[cpu_id]->power_state = CPU_POWER_MODE_DORMANT;
+ }*/
+ }
+ else if(CPU_SLEEP_TYPE_LP3 == sleep_type)//wfi only
+ {
+ zx_pm_main_table.cluster_context->flags = MG_SAVE_FLAG_NULL;
+ zx_pm_main_table.cpu_context[cpu_id]->flags = MG_SAVE_FLAG_NULL;
+
+ zx_pm_main_table.cpu_context[cpu_id]->power_state = CPU_POWER_MODE_STANDBY;
+ zx_pm_main_table.cluster_context->power_state = CPU_POWER_MODE_STANDBY;
+ }
+
+ zx_pm_main_table.cpu_context[cpu_id]->sleep_type = sleep_type;
+
+ return 0;
+}
+
+/**
+ * This function saves all the context that will be lost
+ * when a CPU and cluster enter a low power state.
+ *
+ */
+void zx_pm_save_context(void)
+{
+ struct zx_cpu_context *context;
+ struct zx_cluster_context *cluster_context;
+ int is_secure = true;
+ int ret=0;
+
+ context = zx_pm_main_table.cpu_context[zx_pm_main_table.cur_cpu];
+ cluster_context = zx_pm_main_table.cluster_context;
+#ifdef CONFIG_ARCH_ZX297520V2 /* v3 ÎÞ ÄÚ²¿timer*/
+ if(context->flags&CPU_SAVE_TIMERS)
+ {
+ save_a9_timers(context->timer_data,
+ zx_pm_main_table.scu_address+PRIVATE_TWD_OFFSET);
+ context->saved_items |= CPU_SAVE_TIMERS;
+ }
+#endif
+ if(context->flags&CPU_SAVE_PMU)
+ {
+ save_performance_monitors(context->pmu_data);
+ context->saved_items |= CPU_SAVE_PMU;
+ }
+
+ if(context->flags&CPU_SAVE_VFP)
+ {
+ save_vfp(context->vfp_data);
+ context->saved_items |= CPU_SAVE_VFP;
+ }
+
+ /*only for smp */
+ if(zx_pm_main_table.ic_interface_address)
+ {
+#ifdef CONFIG_ARCH_ZX297520V2
+#else
+ gic_set_processorsleep(1);
+#endif
+ save_gic_interface(context->gic_interface_data,
+ zx_pm_main_table.ic_interface_address,
+ is_secure);
+#ifdef CONFIG_ARCH_ZX297520V2
+ ret = save_gic_distributor_private(context->gic_dist_private_data,
+ zx_pm_main_table.ic_dist_address,
+ is_secure);
+#endif
+ }
+
+ //if(ret == -1)
+ //{
+ // BUG();
+ //while(1);
+ //}
+
+ if(context->flags&CPU_SAVE_DEBUG)
+ {
+ save_v7_debug(context->debug_data);
+ context->saved_items |= CPU_SAVE_DEBUG;
+ }
+
+
+ save_banked_registers(context->banked_registers);
+ save_cp15(context->cp15_data);
+
+ if (context->flags&CPU_SAVE_OTHER)
+ {
+#ifdef CONFIG_ARCH_ZX297520V2
+ save_a9_other(context->other_data, is_secure);
+#else
+ #if 0
+ save_a53_other(context->other_data, is_secure);
+ #endif
+#endif
+ context->saved_items |= CPU_SAVE_OTHER;
+ }
+
+ if (cluster_context->flags&CPU_SAVE_GLOBAL_TIMER)
+ {
+
+
+#ifdef CONFIG_ARCH_ZX297520V2
+ save_a9_global_timer(cluster_context->global_timer_data,
+ zx_pm_main_table.scu_address+GLOBAL_TIMER_OFFSET);
+#else
+ #if 0
+ save_a53_sys_timer(cluster_context->global_timer_data,
+ (0x01401000)); /*AP Timer0*/
+ #endif
+#endif
+ cluster_context->saved_items |= CPU_SAVE_GLOBAL_TIMER;
+ }
+
+ if(cluster_context->flags&CPU_SAVE_GIC)
+ {
+ ret = save_gic_distributor_shared(cluster_context->gic_dist_shared_data,
+ zx_pm_main_table.ic_dist_address,
+ is_secure);
+ cluster_context->saved_items |= CPU_SAVE_GIC;
+ }
+/*
+ if(ret == -1)
+ {
+ while(1);
+ }
+*/
+
+ save_control_registers(context->control_data, is_secure);
+ save_mmu(context->mmu_data);
+
+ if (cluster_context->flags&CPU_SAVE_SCU)
+ {
+#ifdef CONFIG_ARCH_ZX297520V2
+ save_a9_scu(cluster_context->scu_data, zx_pm_main_table.scu_address);
+#endif
+ cluster_context->saved_items |= CPU_SAVE_SCU;
+ }
+
+ if (cluster_context->flags&CPU_SAVE_L2)
+ {
+#ifdef CONFIG_ARCH_ZX297520V2
+ save_pl310(cluster_context->l2_data, zx_pm_main_table.l2_address);
+#endif
+ cluster_context->saved_items |= CPU_SAVE_L2;
+ }
+
+ if (cluster_context->flags&CPU_SAVE_CRM)
+ {
+ cluster_context->saved_items |= CPU_SAVE_CRM;
+ save_crm(cluster_context->crm_data, zx_pm_main_table.crm_address);
+ }
+
+ /*saved completely*/
+ sleep_ret_flag[zx_pm_main_table.cur_cpu] = 0;
+}
+
+/**
+ * This function restores all the context that was lost
+ * when a CPU and cluster entered a low power state. It is called shortly after
+ * reset, with the MMU and data cache off.
+ *
+ * note:before MMU is enable, all address should convert to PA
+ */
+void zx_pm_restore_context(void)
+{
+ unsigned cpu_id;
+ struct zx_cpu_context *context;
+ struct zx_cluster_context *cluster_context;
+ int is_secure = true;
+
+ cpu_id = read_cpuid();
+
+ context = zx_pm_main_table.cpu_context[cpu_id];
+ cluster_context = zx_pm_main_table.cluster_context;
+
+ if (cluster_context->saved_items & CPU_SAVE_CRM)
+ {
+ restore_crm(cluster_context->crm_data, zx_pm_main_table.crm_address);
+ cluster_context->saved_items &= ~CPU_SAVE_CRM;
+ }
+
+ if (cluster_context->saved_items & CPU_SAVE_SCU)
+ {
+ #ifdef CONFIG_ARCH_ZX297520V2
+ restore_a9_scu(cluster_context->scu_data, zx_pm_main_table.scu_address);
+ #endif
+ cluster_context->saved_items &= ~CPU_SAVE_SCU;
+ }
+
+ if (cluster_context->saved_items & CPU_SAVE_L2)
+ {
+ #ifdef CONFIG_ARCH_ZX297520V2
+ restore_pl310(cluster_context->l2_data,
+ zx_pm_main_table.l2_address,
+ cluster_context->power_state == CPU_POWER_MODE_DORMANT);
+ #endif
+ cluster_context->saved_items &= ~CPU_SAVE_L2;
+ }
+
+ /* Next get the MMU back on */
+ restore_mmu(context->mmu_data);
+ restore_control_registers(context->control_data, is_secure);
+ /*
+ * MMU and L1 and L2 caches are on, we may now read/write any data.
+ * Now we need to restore the rest of this CPU's context
+ */
+
+ /* Restore shared items if necessary */
+ if (cluster_context->saved_items & CPU_SAVE_GIC)
+ {
+ gic_distributor_set_enabled(false, zx_pm_main_table.ic_dist_address);
+ restore_gic_distributor_shared(cluster_context->gic_dist_shared_data, zx_pm_main_table.ic_dist_address, is_secure);
+ gic_distributor_set_enabled(true, zx_pm_main_table.ic_dist_address);
+#ifdef CONFIG_ARCH_ZX297520V2
+
+ restore_gic_distributor_private(context->gic_dist_private_data, zx_pm_main_table.ic_dist_address, is_secure);
+#endif
+ restore_gic_interface(context->gic_interface_data, zx_pm_main_table.ic_interface_address, is_secure);
+ cluster_context->saved_items &= ~CPU_SAVE_GIC;
+ }
+ if (cluster_context->saved_items & CPU_SAVE_GLOBAL_TIMER)
+ {
+#ifdef CONFIG_ARCH_ZX297520V2
+ restore_a9_global_timer(cluster_context->global_timer_data,
+ zx_pm_main_table.scu_address+GLOBAL_TIMER_OFFSET);
+#else
+ #if 0
+ restore_a53_sys_timer(cluster_context->global_timer_data,
+ (0x01401000)); /*AP timer 0*/
+ #endif
+#endif
+ cluster_context->saved_items &= ~CPU_SAVE_GLOBAL_TIMER;
+ }
+
+
+ /* Get the debug registers restored, so we can debug most of the APPF code sensibly! */
+ if (context->saved_items&CPU_SAVE_DEBUG)
+ {
+ restore_v7_debug(context->debug_data);
+ context->saved_items &= ~CPU_SAVE_DEBUG;
+ }
+
+ if (context->saved_items&CPU_SAVE_OTHER)
+ {
+#ifdef CONFIG_ARCH_ZX297520V2
+ restore_a9_other(context->other_data, is_secure);
+#else
+ #if 0
+ restore_a53_other(context->other_data, is_secure);
+ #endif
+#endif
+ context->saved_items &= ~CPU_SAVE_OTHER;
+ }
+
+ restore_cp15(context->cp15_data);
+ restore_banked_registers(context->banked_registers);
+
+ if (context->saved_items&CPU_SAVE_VFP)
+ {
+ restore_vfp(context->vfp_data);
+ context->saved_items &= ~CPU_SAVE_VFP;
+ }
+
+ if (context->saved_items&CPU_SAVE_TIMERS)
+ {
+#ifdef CONFIG_ARCH_ZX297520V2
+ restore_a9_timers(context->timer_data,
+ zx_pm_main_table.scu_address+PRIVATE_TWD_OFFSET);
+#endif
+ context->saved_items &= ~CPU_SAVE_TIMERS;
+ }
+
+ if (context->saved_items&CPU_SAVE_PMU)
+ {
+ restore_performance_monitors(context->pmu_data);
+ context->saved_items &= ~CPU_SAVE_PMU;
+ }
+
+ cluster_context->power_state = CPU_POWER_MODE_RUN;
+ context->power_state = CPU_POWER_MODE_RUN;
+#ifdef CONFIG_ARCH_ZX297520V2
+#else
+ gic_set_processorsleep(0);
+#endif
+ /*restore completely*/
+ sleep_ret_flag[cpu_id] = 1;
+}
+/**
+ * This function restores the context that lost when cpu not power down correctly.
+ *
+ */
+void zx_pm_restore_abort_context(void)
+{
+ unsigned cpu_id;
+ struct zx_cpu_context *context;
+ struct zx_cluster_context *cluster_context;
+
+ cpu_id = read_cpuid();
+ context = zx_pm_main_table.cpu_context[cpu_id];
+ cluster_context = zx_pm_main_table.cluster_context;
+
+ if (cluster_context->saved_items & CPU_SAVE_CRM)
+ {
+ restore_crm(cluster_context->crm_data, zx_pm_main_table.crm_address);
+ cluster_context->saved_items &= ~CPU_SAVE_CRM;
+ }
+
+ if (cluster_context->saved_items & CPU_SAVE_SCU)
+ {
+ cluster_context->saved_items &= ~CPU_SAVE_SCU;
+ }
+
+ if (cluster_context->saved_items & CPU_SAVE_L2)
+ {
+ cluster_context->saved_items &= ~CPU_SAVE_L2;
+ }
+
+ /*
+ * MMU and L1 and L2 caches are on, we may now read/write any data.
+ * Now we need to restore the rest of this CPU's context
+ */
+
+ /* Restore shared items if necessary */
+ if (cluster_context->saved_items & CPU_SAVE_GIC)
+ {
+ cluster_context->saved_items &= ~CPU_SAVE_GIC;
+ }
+
+ if (cluster_context->saved_items & CPU_SAVE_GLOBAL_TIMER)
+ {
+#ifdef CONFIG_ARCH_ZX297520V2
+ restore_a9_global_timer(cluster_context->global_timer_data,
+ zx_pm_main_table.scu_address+GLOBAL_TIMER_OFFSET);
+#else
+ #if 0
+ restore_a53_sys_timer(cluster_context->global_timer_data,
+ (0x01401000));
+ #endif
+#endif
+ cluster_context->saved_items &= ~CPU_SAVE_GLOBAL_TIMER;
+ }
+
+
+ /* Get the debug registers restored, so we can debug most of the APPF code sensibly! */
+ if (context->saved_items&CPU_SAVE_DEBUG)
+ {
+ context->saved_items &= ~CPU_SAVE_DEBUG;
+ }
+
+ if (context->saved_items&CPU_SAVE_OTHER)
+ {
+ context->saved_items &= ~CPU_SAVE_OTHER;
+ }
+
+ if (context->saved_items&CPU_SAVE_VFP)
+ {
+ restore_vfp(context->vfp_data);
+ context->saved_items &= ~CPU_SAVE_VFP;
+ }
+
+ if (context->saved_items&CPU_SAVE_TIMERS)
+ {
+#ifdef CONFIG_ARCH_ZX297520V2
+ restore_a9_timers(context->timer_data,
+ zx_pm_main_table.scu_address+PRIVATE_TWD_OFFSET);
+#endif
+ context->saved_items &= ~CPU_SAVE_TIMERS;
+ }
+
+ if (context->saved_items&CPU_SAVE_PMU)
+ {
+ restore_performance_monitors(context->pmu_data);
+ context->saved_items &= ~CPU_SAVE_PMU;
+ }
+
+ cluster_context->power_state = CPU_POWER_MODE_RUN;
+ context->power_state = CPU_POWER_MODE_RUN;
+#ifdef CONFIG_ARCH_ZX297520V2
+#else
+ gic_set_processorsleep(0);
+#endif
+ /*restore completely*/
+ sleep_ret_flag[cpu_id] = 1;
+}
+
+
+/**
+ * Simple Device memory allocator function.
+ * Returns start address of allocated region
+ * Allocates region of size bytes, size will be rounded up to multiple of sizeof(long long)
+ * Memory is zero-initialized.
+ *
+ * This function is from ARM.
+ */
+static long long *device_memory = (void *)pm_device_memory;
+static unsigned get_device_memory(unsigned size)
+{
+ static unsigned watermark = 0;
+ static unsigned total_size = 0;
+ unsigned ret, chunks_required;
+
+ ret = watermark;
+ chunks_required = (size + sizeof(long long) - 1) / sizeof(long long);
+ watermark += chunks_required;
+
+ if (watermark >= PM_MEMORY_SIZE / sizeof(long long))
+ {
+ pr_info("[SLP] error alloc size: %d Bytes \n", size);
+ BUG();
+ // while(1);
+ return 0; /* No output possible, so loop */
+ }
+
+ total_size += size;
+ pr_info("[SLP] alloc size: %d Bytes , total size %d Bytes\n", size, total_size);
+
+
+ return (unsigned) &device_memory[ret];
+}
+
+/**
+ * This function tell the wakeup code address to CPU_M0 for cpu waked up from deep sleep(shutdown or dormant),
+ * the wakeup code will exist in iram (address 0), but iram will power down when AP sub-system or
+ * whole chip power down, so A9 tell CPU_M0 the address, before A9 reset, CPU_M0 will copy the code
+ * from this address to the iram.
+ *
+ * This code should call after zx_pm_context_init.
+ */
+void zx_set_wakeup_address(u32 wakeup_addr)
+{
+ wakeup_ram_area *wakeup_ram;
+
+ pr_info("[SLP] Power/WAKEUP_ADDRESS \n");
+
+#ifdef CONFIG_ARCH_ZX297520V2
+ zx_pm_main_table.wakeup_vaddr = (u32)ioremap_mem(wakeup_addr, WAKEUP_RAM_SIZE);
+#else
+ zx_pm_main_table.wakeup_vaddr = (u32)__arm_ioremap_exec(wakeup_addr, WAKEUP_RAM_SIZE,0);
+#endif
+ BUG_ON((void *)zx_pm_main_table.wakeup_vaddr == NULL);
+ zx_pm_main_table.wakeup_ram_size = WAKEUP_RAM_SIZE;
+ memset((void *)zx_pm_main_table.wakeup_vaddr, 0, zx_pm_main_table.wakeup_ram_size);
+
+ wakeup_ram = (wakeup_ram_area *)zx_pm_main_table.wakeup_vaddr;
+
+ /*copy the ddr reset code to iram*/
+ memcpy(wakeup_ram->wakeup_code, (void *)cpu_wake_up, WAKEUP_CODE_LENGTH);
+ memcpy(wakeup_ram->sleep_code, (void *)do_sleep_cpu, SLEEP_CODE_LENGTH);
+ memcpy(wakeup_ram->ddr_dfs_code, (void *)waiting_ddr_dfs, DDR_DFS_CODE_LENGTH);
+
+ wakeup_ram->reset_handler_vaddr = __pa((u32)((void *)cpu_reset_handler));
+}
+
+/**
+ * get current sleep_type helper function.
+ *
+ * This code only used pm internel.
+ */
+cpu_sleep_type_t pm_get_sleep_type(void)
+{
+ return zx_pm_main_table.cpu_context[zx_pm_main_table.cur_cpu]->sleep_type;
+}
+
+/**
+ * init lp for scu/l2.
+ *
+ * This code only used pm internel.
+ */
+void pm_init_l2_and_scu(void)
+{
+#ifdef CONFIG_ARCH_ZX297520V2
+ init_lp_of_scu(zx_pm_main_table.scu_address);
+ init_lp_of_l2(zx_pm_main_table.l2_address);
+#endif
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm-context.h b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm-context.h
new file mode 100644
index 0000000..8df5056
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm-context.h
@@ -0,0 +1,135 @@
+/*
+ * zx-pm-context.h - cpu sleep context save&restore interface for power management.
+ *
+ * Written by zxp.
+ *
+ */
+
+#ifndef _ZX_PM_CONTEXT_H
+#define _ZX_PM_CONTEXT_H
+
+#define CPU_SAVE_PMU (1U << 0)
+#define CPU_SAVE_TIMERS (1U << 1)
+#define CPU_SAVE_VFP (1U << 2)
+#define CPU_SAVE_DEBUG (1U << 3)
+#define CPU_SAVE_GIC (1U << 4)
+#define CPU_SAVE_OTHER (1U << 5)
+
+#define CPU_SAVE_GLOBAL_TIMER (1U << 16)
+#define CPU_SAVE_L2 (1U << 17)
+#define CPU_SAVE_L2_RAM (1U << 18)
+#define CPU_SAVE_SCU (1U << 19)
+#define CPU_SAVE_CRM (1U << 20)
+
+#define SCU_ADDRESS (ZX_A9_PERIPHERAL_BASE)
+#define SCU_ADDRESS_P (ZX_A9_PERIPHERAL_PHYS)
+#define IC_INTERFACE_OFFSET (0x100)
+#define GLOBAL_TIMER_OFFSET (0x200)
+#define PRIVATE_TWD_OFFSET (0x600)
+#define IC_DISTRIBUTOR_OFFSET (0x1000)
+#define L2_CONTROLLER_ADDRESS (ZX_L2CACHE_CONFIG_BASE)
+#define L2_CONTROLLER_ADDRESS_P (ZX_L2CACHE_CONFIG_PHYS)
+#define A9_CRM_ADDRESS (A9_CRM_BASE)
+#define A9_CRM_ADDRESS_P (A9_CRM_PHYS)
+
+#define GIC_DIST_SET_PENDING (SCU_ADDRESS+IC_DISTRIBUTOR_OFFSET+0x200)
+#define GIC_CPU_HIGHPRI_PENDING (SCU_ADDRESS+IC_INTERFACE_OFFSET+0x18)
+#ifdef CONFIG_ARCH_ZX297520V2
+#define LP1_MG_SAVE_FLAG (CPU_SAVE_GIC|/*CPU_SAVE_GLOBAL_TIMER|*/CPU_SAVE_L2|CPU_SAVE_L2_RAM|CPU_SAVE_SCU|CPU_SAVE_CRM)
+#define LP1_CPU_SAVE_FLAG (CPU_SAVE_PMU|CPU_SAVE_TIMERS|/*CPU_SAVE_VFP|*//*CPU_SAVE_DEBUG|*/CPU_SAVE_GIC|CPU_SAVE_OTHER)
+#else
+#define LP1_MG_SAVE_FLAG (CPU_SAVE_GIC|/*CPU_SAVE_GLOBAL_TIMER|*//*CPU_SAVE_L2|CPU_SAVE_L2_RAM|CPU_SAVE_SCU|*/CPU_SAVE_CRM)
+#define LP1_CPU_SAVE_FLAG (CPU_SAVE_PMU|/*CPU_SAVE_TIMERS|*//*CPU_SAVE_VFP|*//*CPU_SAVE_DEBUG|*/CPU_SAVE_GIC|CPU_SAVE_OTHER)
+#endif
+
+#define LP2_MG_SAVE_FLAG (CPU_SAVE_GIC|/*CPU_SAVE_GLOBAL_TIMER|*/CPU_SAVE_L2|CPU_SAVE_L2_RAM|CPU_SAVE_SCU|CPU_SAVE_CRM)
+#define LP2_CPU0_SAVE_FLAG (CPU_SAVE_PMU|/*CPU_SAVE_TIMERS|CPU_SAVE_VFP|*//*CPU_SAVE_DEBUG|*/CPU_SAVE_GIC|CPU_SAVE_OTHER)
+#define LP2_CPUX_SAVE_FLAG (CPU_SAVE_PMU|/*CPU_SAVE_TIMERS|CPU_SAVE_VFP|*//*CPU_SAVE_DEBUG|*/CPU_SAVE_GIC|CPU_SAVE_OTHER)
+#define MG_SAVE_FLAG_NULL (0)
+
+/* Maximum size of each item of context, in bytes */
+#define PMU_DATA_SIZE (128)
+#define TIMER_DATA_SIZE (128)
+#define VFP_DATA_SIZE (288)
+#define GIC_INTERFACE_DATA_SIZE (64)
+#define GIC_DIST_PRIVATE_DATA_SIZE (96)
+#define BANKED_REGISTERS_SIZE (128)
+#define CP15_DATA_SIZE (64)
+#define DEBUG_DATA_SIZE (352)
+#define MMU_DATA_SIZE (64)
+#define OTHER_DATA_SIZE (32)
+#define CONTROL_DATA_SIZE (64)
+
+#define GIC_DIST_SHARED_DATA_SIZE (2592)
+#define SCU_DATA_SIZE (32)
+#define L2_DATA_SIZE (96)
+#define GLOBAL_TIMER_DATA_SIZE (128)
+#ifdef CONFIG_ARCH_ZX297520V2
+#define CRM_DATA_SIZE (136)
+#else
+#define CRM_DATA_SIZE (64)//(136)
+#endif
+#define PM_MEMORY_SIZE (5120)
+#define MAX_CPU_NUM (1)
+#define WAKEUP_RAM_SIZE (SZ_1K)
+
+struct zx_cluster_context
+{
+ u32 flags; /*before save context, set this flag*/
+ u32 saved_items; /*after save a context, set a flag for restore*/
+ u32 *gic_dist_shared_data;
+ u32 *l2_data;
+ u32 *scu_data;
+ u32 *global_timer_data;
+ u32 *crm_data;
+ cpu_power_mode_t power_state; /*dormant or shutdown*/
+};
+
+struct zx_cpu_context
+{
+ u32 flags; /*before save context, set this flag*/
+ u32 saved_items; /*after save a context, set a flag for restore*/
+ u32 *control_data;
+ u32 *pmu_data;
+ u32 *timer_data;
+ u32 *vfp_data;
+ u32 *gic_interface_data;
+ u32 *gic_dist_private_data;
+ u32 *banked_registers;
+ u32 *cp15_data;
+ u32 *debug_data;
+ u32 *mmu_data;
+ u32 *other_data;
+ cpu_power_mode_t power_state; /*dormant or shutdown*/
+ cpu_sleep_type_t sleep_type; /*hotplug or idle or suspend*/
+};
+
+
+struct zx_pm_main_table
+{
+ u32 wakeup_vaddr; /*the iram:0 shadow ram*/
+ u32 wakeup_ram_size;
+ u32 num_cpus;
+ u32 cur_cpu;
+ u32 scu_address; /* 0 => no SCU */
+ u32 scu_address_p;
+ u32 ic_dist_address; /* 0 => no Interrupt Controller */
+ u32 ic_interface_address;
+ u32 l2_address; /* 0 => no L2CC */
+ u32 l2_address_p;
+ u32 crm_address;
+ u32 crm_address_p;
+ struct zx_cluster_context* cluster_context;
+ struct zx_cpu_context* cpu_context[MAX_CPU_NUM];
+};
+
+
+
+extern int zx_pm_context_init(void);
+extern int zx_set_context_level (cpu_sleep_type_t sleep_type);
+extern void zx_pm_save_context(void);
+extern void zx_pm_restore_context(void);
+extern void zx_pm_restore_abort_context(void);
+extern void pm_init_l2_and_scu(void);
+
+#endif /*_ZX_PM_CONTEXT_H*/
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm-custom.c b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm-custom.c
new file mode 100644
index 0000000..c2873a0
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm-custom.c
@@ -0,0 +1,105 @@
+/*
+ * zx-pm-custom.c - power management custom interface.
+ *
+ * Written by zxp.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/suspend.h>
+#include <linux/tick.h>
+
+#include <mach/irqs.h>
+#include "zx-pm.h"
+
+/*===================================================================
+ *== 7520v2 ap interrupt arrangement ===============================
+ *===================================================================
+ *=================================== evb dc mifi =======
+ *== timer1 -- wake y y y
+ *== m02ap_icp y y y
+ *== ps2ap_icp y y y
+ *== ext0 -- pmu y n y
+ *== ext1 -- pg(charger) y n y
+ *== ext2 -- wps_key n n y
+ *== ext3 -- rst_key y n y
+ *== ext4 -- pwr_key y n n
+ *== ext7 -- wps_key y n n
+ *== ext6 -- wifi_wake y n y
+ *== alarm y n n
+ *== rtc y n n
+ *===================================================================
+ */
+/* evb */
+#if defined(CONFIG_ARCH_ZX297520V3_MIFI) || defined(CONFIG_ARCH_ZX297520V3_EVB) || defined(CONFIG_ARCH_ZX297520V3_MDL) ||\
+ defined(CONFIG_ARCH_ZX297520V3_PHONE) || defined(CONFIG_ARCH_ZX297520V3_WATCH) || defined(CONFIG_ARCH_ZX297520V3_CPE) ||\
+ defined(CONFIG_ARCH_ZX297520V3_POC)|| defined(CONFIG_ARCH_ZX297520V3_FWP)|| defined(CONFIG_ARCH_ZX297520V3_CAP) ||\
+ defined(CONFIG_ARCH_ZX297520V3_UFI)|| defined(CONFIG_ARCH_ZX297520V3_CPE_SWITCH)
+
+
+static unsigned int wake_source_for_sleep[] =
+{
+#ifndef CONFIG_ARCH_ZX297520V3_CAP
+ WAKE_SRC_ICP_M02PS | WAKE_SRC_ICP_AP2PS | WAKE_SRC_PS_TIMER1 | \
+ WAKE_SRC_ICP_PHY2PS |WAKE_SRC_SPCU_PW,
+#else
+ WAKE_SRC_ICP_M02AP | WAKE_SRC_ICP_PS2AP | WAKE_SRC_AP_TIMER1 | \
+ WAKE_SRC_ICP_PHY2AP ,
+#endif
+ /*WAKE_SRC_RTC_ALARM | WAKE_SRC_RTC_TIMER | \
+ WAKE_SRC_EXTERNAL0 | WAKE_SRC_EXTERNAL1 | WAKE_SRC_EXTERNAL2 | \
+ WAKE_SRC_EXTERNAL3 | WAKE_SRC_EXTERNAL4 | WAKE_SRC_EXTERNAL5 | \
+ WAKE_SRC_EXTERNAL6 | WAKE_SRC_EXTERNAL13 | WAKE_SRC_EXTERNAL14 |\
+ WAKE_SRC_EXTERNAL7 ,*/
+ /*|\
+ WAKE_SRC_USB_POWERDWN_UP | WAKE_SRC_USB_POWERDWN_DOWN,*/
+};
+
+#elif defined(CONFIG_ARCH_ZX297520V2EVB)
+static unsigned int wake_source_for_sleep[] =
+{
+ WAKE_SRC_ICP_M02AP | WAKE_SRC_ICP_PS2AP | WAKE_SRC_AP_TIMER1,/*| \
+ WAKE_SRC_RTC_ALARM | WAKE_SRC_RTC_TIMER |
+ WAKE_SRC_EXTERNAL0 | WAKE_SRC_EXTERNAL1 | WAKE_SRC_EXTERNAL2 | \
+ WAKE_SRC_EXTERNAL3 | WAKE_SRC_EXTERNAL4 | WAKE_SRC_EXTERNAL5 | \
+ WAKE_SRC_EXTERNAL6 | WAKE_SRC_EXTERNAL13 | WAKE_SRC_EXTERNAL14 |\
+ WAKE_SRC_EXTERNAL7 ,\*/
+ /*|\
+ WAKE_SRC_USB_POWERDWN_UP | WAKE_SRC_USB_POWERDWN_DOWN,*/
+};
+#else
+
+/* dc */
+static unsigned int wake_source_for_sleep[] =
+{
+ WAKE_SRC_ICP_M02AP | WAKE_SRC_ICP_PS2AP | WAKE_SRC_AP_TIMER1 | \
+ WAKE_SRC_EXTERNAL0,
+};
+
+#endif
+
+unsigned int pm_get_wakesource(void)
+{
+ return wake_source_for_sleep[0];
+}
+
+/*===================================================================
+ *== 7520v2 ap uart debug interface ===============================
+ *===================================================================
+ */
+//#define DEBUG_UART0
+#define DEBUG_UART1
+void __iomem *debug_uart_base(void)
+{
+#if defined(DEBUG_UART0)
+ return ZX29_UART0_VA;
+#elif defined(DEBUG_UART1)
+ return ZX29_UART1_VA;
+#endif
+}
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm-helpers.S b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm-helpers.S
new file mode 100644
index 0000000..8b09038
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm-helpers.S
@@ -0,0 +1,151 @@
+/*
+ * ZTE CPU low power powerdown and powerup helper code.
+ *
+ * Copyright (C) 2013 ZTE, Inc.
+ * Written by ZXP
+ *
+ * This program is free software,you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <linux/threads.h>
+#include <asm/asm-offsets.h>
+#include <asm/assembler.h>
+#include <asm/glue-cache.h>
+#include <asm/glue-proc.h>
+
+
+#define MIDR_CPU_MASK 0xff00fff0
+#define MMU_DISABLE_MASK 0x10001807 /* clear TRE, I Z C M */
+
+
+#define SCTLR_I (1<<12)
+#define SCTLR_Z (1<<11)
+#define SCTLR_C (1<<2)
+
+/**
+ * This function takes three arguments
+ * r0: Destination start address (must be word aligned)
+ * r1: Source start address (must be word aligned)
+ * r2: Number of words to copy
+ * Return value is updated destination pointer (first unwritten word)
+ */
+ENTRY(copy_words)
+ cmp r2, #0
+ beq copy_end
+loop_copy:
+ ldr r3, [r1], #4
+ str r3, [r0], #4
+ subs r2, r2, #1
+ bne loop_copy
+copy_end:
+ bx lr
+ENDPROC(copy_words)
+
+ENTRY(copy_wordsandclear)
+ cmp r2, #0
+ beq copyclear_end
+loop_copyclear:
+ ldr r3, [r1]
+ str r3, [r0], #4
+ str r2,[r1]/*Ö»Êǽ«Ô¼Ä´æÆ÷ÖµÐ޸ģ¬ÓÃÓÚÁÙʱÑéÖ¤£¬ÕýÈ·×÷·¨ÊÇÇå0*/
+ add r1,r1,#4
+ subs r2, r2, #1
+ bne loop_copyclear
+copyclear_end:
+ bx lr
+ENDPROC(copy_wordsandclear)
+/* ; Note: assumes conversion will be successful! */
+ENTRY(va_to_pa)
+ mov r1, r0
+ mcr p15, 0, r0, c7, c8, 1 /* Priv Write Current World VA-PA */
+ mrc p15, 0, r0, c7, c4, 0 /* Get PA */
+ bfc r0, #0, #12 /* We want top bits of translated addr */
+ bfc r1, #12, #20 /* plus bottom bits of input addr */
+ orr r0, r0, r1
+ bx lr
+ENDPROC(va_to_pa)
+
+
+ENTRY(read_sctlr)
+ mrc p15, 0, r0, c1, c0, 0
+ bx lr
+ENDPROC(read_sctlr)
+
+
+ENTRY(write_sctlr)
+ mcr p15, 0, r0, c1, c0, 0
+ isb
+ bx lr
+ENDPROC(write_sctlr)
+
+ENTRY(read_drar)
+ mrc p14, 0, r0, c1, c0, 0 /* Read Debug ROM Address Register */
+ bx lr
+ENDPROC(read_drar)
+
+
+ENTRY(read_dsar)
+ mrc p14, 0, r0, c2, c0, 0 /* Read Debug Self Address Offset Register */
+ bx lr
+ENDPROC(read_dsar)
+
+
+
+ENTRY(write_osdlr)
+ mcr p14, 0, r0, c1, c3, 4 /* Write OS Double Lock Register */
+ bx lr
+ENDPROC(write_osdlr)
+
+
+
+ENTRY(disable_mmu)
+ mrc p15, 0, r3, c1, c0, 0
+ ldr r2, =MMU_DISABLE_MASK
+ bic r3, r3, r2
+ dsb
+ mcr p15, 0, r3, c1, c0, 0
+ isb
+ bx lr
+ENDPROC(disable_mmu)
+
+ENTRY(read_cpuid)
+ mrc p15, 0, r0, c0, c0, 5 /* Read MPIDR */
+ and r0, r0, #0xff /* extract CPU number */
+ bx lr
+ENDPROC(read_cpuid)
+
+
+ENTRY(enable_cache)
+ mrc p15, 0, r0, c1, c0, 0
+ movw r1, #SCTLR_I | SCTLR_Z | SCTLR_C
+ orr r0, r0, r1
+ mcr p15, 0, r0, c1, c0, 0
+ bx lr
+ENDPROC(enable_cache)
+
+
+ENTRY(exit_coherency)
+ isb
+ dsb
+ mrc p15,0,r0,c1,c0,1
+ bic r0,r0,#0x00000040
+ mcr p15,0,r0,c1,c0,1
+ isb
+ dsb
+ bx lr
+ENDPROC(exit_coherency)
+
+
+ENTRY(join_coherency)
+ isb
+ dsb
+ mrc p15,0,r0,c1,c0,1
+ orr r0,r0,#0x00000040
+ mcr p15,0,r0,c1,c0,1
+ isb
+ dsb
+ bx lr
+ENDPROC(join_coherency)
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm-helpers.h b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm-helpers.h
new file mode 100644
index 0000000..1d7be2f
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm-helpers.h
@@ -0,0 +1,170 @@
+/*
+ * zx-pm.h - power management helpers interface.
+ *
+ * Written by zxp.
+ *
+ */
+
+#ifndef _ZX_PM_HELPERS_H
+#define _ZX_PM_HELPERS_H
+
+/*
+ * common helper functions
+ */
+
+extern unsigned va_to_pa(unsigned virtual_address);
+extern unsigned read_drar(void);
+extern unsigned read_dsar(void);
+extern void write_osdlr(unsigned value);
+extern unsigned read_sctlr(void);
+extern void write_sctlr(unsigned value);
+extern void disable_mmu(void);
+extern unsigned read_cpuid(void);
+extern void enable_cache(void);
+extern void exit_coherency(void);
+extern void join_coherency(void);
+
+extern unsigned * copy_words(volatile unsigned *destination, volatile unsigned *source, unsigned num_words);
+extern unsigned * copy_wordsandclear(volatile unsigned *destination, volatile unsigned *source, unsigned num_words);
+
+/*
+ * V7 functions
+ */
+extern void save_control_registers(u32 *pointer, int is_secure);
+extern void save_mmu(u32 *pointer);
+extern void save_cpu_if(u32 *pointer);
+extern void gic_mask(void);
+extern void save_performance_monitors(u32 *pointer);
+extern void save_banked_registers(u32 *pointer);
+extern void save_cp15(u32 *pointer);
+extern void save_vfp(u32 *pointer);
+extern void save_generic_timer(u32 *pointer);
+extern void save_v7_debug(u32 *pointer);
+
+extern void restore_control_registers(u32 *pointer, int is_secure);
+extern void zx_restore_control_registers(u32 *pointer, int is_secure);
+extern void restore_mmu(u32 *pointer);
+extern void restore_cpu_if(u32 *pointer);
+extern void gic_unmask(void);
+extern void restore_performance_monitors(u32 *pointer);
+extern void restore_banked_registers(u32 *pointer);
+extern void restore_cp15(u32 *pointer);
+extern void restore_vfp(u32 *pointer);
+extern void restore_generic_timer(u32 *pointer);
+extern void restore_v7_debug(u32 *pointer);
+
+extern void disable_clean_inv_dcache_v7_l1(void);
+extern void disable_clean_inv_cache_pl310(unsigned pl310_address, unsigned stack_start, unsigned stack_size, int disable);
+extern void disable_clean_inv_dcache_v7_all(void);
+extern void clean_dcache_v7_l1(void);
+extern void clean_mva_dcache_v7_l1(void *mva);
+extern void invalidate_icache_v7_pou(void);
+extern void invalidate_icache_v7(void);
+extern void invalidate_dcache_v7_all(void);
+extern void enable_icache_v7(void);
+extern void appf_smc_handler(void);
+extern void enter_secure_monitor_mode(void);
+extern void enter_nonsecure_svc_mode(void);
+extern void set_security_state(int ns);
+
+/*
+ * PL310 functions
+ */
+extern void clean_inv_range_pl310(void *start, unsigned size, unsigned pl310_address); /* addresses are inclusive */
+extern void clean_range_pl310(void *start, unsigned size, unsigned pl310_address); /* addresses are inclusive */
+extern void inv_range_pl310(void *start, unsigned size, unsigned pl310_address);
+extern void clean_inv_pl310(unsigned pl310_address);
+extern void clean_pl310(unsigned pl310_address);
+extern void save_pl310(u32 *pointer, unsigned pl310_address);
+extern void restore_pl310(u32 *pointer, unsigned pl310_address, int dormant);
+extern void set_enabled_pl310(unsigned enabled, unsigned pl310_address);
+extern void set_status_pl310(unsigned status, unsigned pl310_address);
+extern int is_enabled_pl310(unsigned pl310_address);
+extern void init_lp_of_l2(unsigned pl310_address);
+extern void clean_disable_pl310(unsigned pl310_address);
+
+/*
+ * GIC functions
+ */
+extern int gic_distributor_set_enabled(int enabled, unsigned gic_distributor_address);
+extern void save_gic_interface(u32 *pointer, unsigned gic_interface_address, int is_secure);
+extern int save_gic_distributor_private(u32 *pointer, unsigned gic_distributor_address, int is_secure);
+extern int save_gic_distributor_shared(u32 *pointer, unsigned gic_distributor_address, int is_secure);
+extern void restore_gic_interface(u32 *pointer, unsigned gic_interface_address, int is_secure);
+extern void restore_gic_distributor_private(u32 *pointer, unsigned gic_distributor_address, int is_secure);
+extern void restore_gic_distributor_shared(u32 *pointer, unsigned gic_distributor_address, int is_secure);
+extern unsigned int gic_get_cur_pending(unsigned gic_interface_address);
+
+extern unsigned int gic_set_processorsleep(bool issleep);
+extern void pm_save_gic_wake_enable(void);
+extern void pm_restore_gic_wake_enable(void);
+/*
+ * A9 functions
+ */
+extern void save_a9_timers(u32 *pointer, unsigned twd_address);
+extern void save_a9_global_timer(u32 *pointer, unsigned timer_address);
+extern void save_a9_other(u32 *pointer, int is_secure);
+
+extern void restore_a9_timers(u32 *pointer, unsigned twd_address);
+extern void restore_a9_global_timer(u32 *pointer, unsigned timer_address);
+extern void restore_a9_other(u32 *pointer, int is_secure);
+
+
+/*
+ * A9 SCU functions
+ */
+extern void save_a9_scu(u32 *pointer, unsigned scu_address);
+extern void restore_a9_scu(u32 *pointer, unsigned scu_address);
+extern void set_status_a9_scu(unsigned cpu_index, unsigned status, unsigned scu_address);
+extern int num_cpus_from_a9_scu(unsigned scu_address);
+extern void init_lp_of_scu(unsigned scu_address);
+
+/*
+ * pm common functions
+ */
+extern void zx_set_wakeup_address(u32 wakeup_addr);
+extern void pm_printk(const char *fmt, ...);
+extern cpu_sleep_type_t pm_get_sleep_type(void);
+extern int zx_get_pll_used(void);
+extern int pm_get_pll_used(void);
+extern u32 pm_get_sleep_time(void);
+extern void pm_init_crm_temp(void);
+extern u64 read_persistent_us(void);
+
+extern void save_a53_sys_timer(u32 *pointer, unsigned timer_address);
+extern void restore_a53_sys_timer(u32 *pointer, unsigned timer_address);
+extern void disable_flush_dcache_L1_flush_cache_L2(void);
+
+/*
+ *pm watchdog functions
+ */
+#ifdef CONFIG_ZX29_WATCHDOG
+extern void zx_wdt_handle_before_psm(void);
+extern void zx_wdt_handle_after_psm(void);
+#else
+static void zx_wdt_handle_before_psm(void){}
+static void zx_wdt_handle_after_psm(void){}
+#endif
+
+/*
+ *pm suspend functions
+ */
+extern void zx_suspend_init(void);
+
+/*
+ *cpu sleep functions
+ */
+extern void cpu_reset_handler(void);
+extern void cpu_wake_up(void);
+extern void do_sleep_cpu(void);
+extern void zx_jump_addr(unsigned long addr);
+extern void waiting_ddr_dfs(unsigned long flag_addr);
+
+/*
+ * GCC Compatibility
+ */
+#ifndef __ARMCC_VERSION
+#define __nop() __asm__ __volatile__( "nop\n" )
+#endif
+
+#endif /*_ZX_PM_HELPERS_H*/
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm-null.c b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm-null.c
new file mode 100644
index 0000000..969dddd
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm-null.c
@@ -0,0 +1,24 @@
+/*
+ * ZTE power management main driver
+ *
+ * Copyright (C) 2013 ZTE Ltd.
+ * by zxp
+ *
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/io.h>
+#include <linux/suspend.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+
+unsigned int pm_get_wakesource(void)
+{
+ return 0;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm-suspend.c b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm-suspend.c
new file mode 100644
index 0000000..8540535
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm-suspend.c
@@ -0,0 +1,140 @@
+/*
+ * ZTE suspend power management driver
+ *
+ * Copyright (C) 2013 ZTE Ltd.
+ * by zxp
+ *
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/suspend.h>
+#include <linux/math64.h>
+
+#include <asm/system_misc.h>
+
+#include "zx-pm.h"
+
+/**************************************
+ * SW code for suspend
+ **************************************/
+
+/*********************************************************************
+ * FUNCTION DEFINATIONS
+ ********************************************************************/
+static int zx_suspend_ops_valid(suspend_state_t state)
+{
+ return state == PM_SUSPEND_MEM;
+}
+
+static int zx_suspend_ops_begin(suspend_state_t state)
+{
+ pm_ram_log("@@@@@@@@@@ Chip_pm_begin @@@@@@@@@@\n");
+
+
+ disable_hlt();
+ pm_set_wakeup_reason(WR_NONE);
+
+ return 0;
+}
+
+static int zx_suspend_ops_prepare(void)
+{
+ pm_ram_log("@@@@@@@@@@ Chip_pm_prepare @@@@@@@@@@\n");
+
+ return 0;
+}
+
+#define AP_SLEEP_IN_TIMERS IRAM_ADDR_FOR_WAKE_CNT
+#define AP_SLEEP_OUT_TIMERS IRAM_ADDR_FOR_SLEEP_CNT
+
+static int zx_suspend_ops_enter(suspend_state_t state)
+{
+ /* legacy log */
+ pm_ram_log("@@@@@@@@@@ Chip_pm_enter @@@@@@@@@@\n");
+
+ pm_write_reg(AP_SLEEP_IN_TIMERS, (unsigned int)read_persistent_us());
+
+#ifdef CONFIG_ZX_PM_DEBUG
+ pm_write_reg(AP_SUSPEND_STATUS_FLAG,0x1);
+#endif
+ /* deal soc/clk/powerdomain/pll out of A9 module
+ *¢ssuspend debug uart¡¢GPIO and other device out of A9 */
+ zx_board_suspend();
+ /*¢close clock&powerdomains that PCU does not controls */
+ zx_dpm_suspend();
+
+ /*¢mask all and backup, then unmask wakeup interrupts */
+ zx_unmask_wakeup_interrupt();
+
+ zx_pm_pre_suspend();
+// setup_timer_wakeup(__SLEEP_TIME_1s__*20); //20s
+// setup_timer_wakeup(60); //61us each is 30.5us used ot test abnormal exit from sleep
+
+ if(pm_get_mask_info()&PM_SUSPEND_WFI)
+ do_wfi();
+ else
+ {
+ /*¢cpu enter lowpower mode */
+#ifdef CONFIG_ZX_PM_DEBUG
+ // zx_enter_sleep(CPU_SLEEP_TYPE_IDLE_LP2);
+ pm_write_reg(AP_SUSPEND_STATUS_FLAG,0x2);
+#endif
+ zx_enter_sleep(CPU_SLEEP_TYPE_LP1);
+ }
+
+ /* get wakeup reason */
+ //pm_wake_reason = pm_get_wakeup_reason();
+ zx_pm_post_suspend();
+
+#ifdef CONFIG_ZX_PM_DEBUG
+ pm_write_reg(AP_SUSPEND_STATUS_FLAG,0x8);
+#endif
+ /* restore interrupt that masked */
+ zx_interrupt_mask_restore();
+
+ /*¢resume clock&powerdomains */
+ zx_dpm_resume();
+ /* resume debug uart¡¢GPIO and other device out of A9 */
+ zx_board_resume();
+
+ pm_write_reg(AP_SLEEP_OUT_TIMERS, (unsigned int)read_persistent_us());
+ pm_write_reg(AP_SLEEP_TIME_ADDR,(pm_read_reg(AP_SLEEP_OUT_TIMERS) - pm_read_reg(AP_SLEEP_IN_TIMERS)));
+
+ return 0;
+}
+
+static void zx_suspend_ops_finish(void)
+{
+ pm_ram_log("@@@@@@@@@@ Chip_pm_finish @@@@@@@@@@\n");
+}
+
+static void zx_suspend_ops_end(void)
+{
+ pm_ram_log("@@@@@@@@@@ Chip_pm_end @@@@@@@@@@\n");
+
+ enable_hlt();
+}
+
+static struct platform_suspend_ops zx_suspend_ops = {
+ .valid = zx_suspend_ops_valid,
+ .begin = zx_suspend_ops_begin,
+ .prepare = zx_suspend_ops_prepare,
+ .enter = zx_suspend_ops_enter,
+ .finish = zx_suspend_ops_finish,
+ .end = zx_suspend_ops_end,
+};
+
+void zx_suspend_init(void)
+{
+ pr_info("[SLP] Power/SPM_INIT \n");
+
+ suspend_set_ops(&zx_suspend_ops);
+
+ pm_write_reg(AP_SLEEP_IN_TIMERS, 0);
+ pm_write_reg(AP_SLEEP_OUT_TIMERS, 0);
+}
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm-v7.S b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm-v7.S
new file mode 100644
index 0000000..bcf7844
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm-v7.S
@@ -0,0 +1,883 @@
+/*
+ * ZTE CPU low power powerdown and powerup helper code.
+ *
+ * Copyright (C) 2013 ZTE, Inc.
+ * Written by ZXP
+ *
+ * This program is free software,you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <linux/threads.h>
+#include <asm/asm-offsets.h>
+#include <asm/assembler.h>
+#include <asm/glue-cache.h>
+#include <asm/glue-proc.h>
+#include <asm/vfp.h>
+
+
+/* Aliases for mode encodings - do not change */
+#define MODE_USR 0x10
+#define MODE_FIQ 0x11
+#define MODE_IRQ 0x12
+#define MODE_SVC 0x13
+#define MODE_ABT 0x17
+#define MODE_UND 0x1B
+#define MODE_SYS 0x1F
+
+#define MODE_MON 0x16 /* A-profile (Security Extensions) only */
+#define SCR_NS 0x01 /* A-profile (Security Extensions) only */
+
+#define TTBCR_EAE (1<<31) /* Are we using LPAE? */
+
+#define CACHE_LINE_SIZE 32 /* TODO: remove this */
+#define SCTLR_I (1<<12)
+
+
+ENTRY(save_performance_monitors)
+
+ stmfd sp!, {r4, r8, r9, r10}
+
+ /* Ignore:
+ * Count Enable Clear Register
+ * Software Increment Register
+ * Interrupt Enable Clear Register
+ */
+
+ mrc p15,0,r8,c9,c12,0 /* PMon: Control Register */
+ bic r1,r8,#1
+ mcr p15,0,r1,c9,c12,0 /* disable counter updates from here */
+ isb /* 0b0 => PMCR<0> */
+ mrc p15,0,r9,c9,c12,3 /* PMon: Overflow Flag Status Reg */
+ mrc p15,0,r10,c9,c12,5 /* PMon: Event Counter Selection Reg */
+ stm r0!, {r8-r10}
+ ubfx r9,r8,#11,#5 /* extract # of event counters, N */
+ tst r9, r9
+ beq next_backup
+
+loop_backup:
+ subs r9,r9,#1 /* decrement N */
+ mcr p15,0,r9,c9,c12,5 /* PMon: select CounterN */
+ isb
+ mrc p15,0,r3,c9,c13,1 /* PMon: save Event Type register */
+ mrc p15,0,r4,c9,c13,2 /* PMon: save Event Counter register */
+ stm r0!, {r3,r4}
+ bne loop_backup
+
+next_backup:
+ mrc p15,0,r1,c9,c13,0 /* PMon: Cycle Count Register */
+ mrc p15,0,r2,c9,c14,0 /* PMon: User Enable Register */
+ mrc p15,0,r3,c9,c14,1 /* PMon: Interrupt Enable Set Reg */
+ mrc p15,0,r4,c9,c12,1 /* PMon: Count Enable Set Register */
+ stm r0!, {r1-r4}
+
+ ldmfd sp!, {r4, r8, r9, r10}
+ bx lr
+ENDPROC(save_performance_monitors)
+
+ENTRY(restore_performance_monitors)
+
+ stmfd sp!, {r4-r5, r8-r10, lr}
+ /* NOTE: all counters disabled by PMCR<0> == 0 on reset */
+
+ /* Restore performance counters */
+ ldm r0!,{r8-r10} /* recover first block of PMon context */
+ /* (PMCR, PMOVSR, PMSELR) */
+ mov r1, #0 /* generate register of all 0's */
+ mvn r2, #0 /* generate register of all 1's */
+ mcr p15,0,r2,c9,c14,2 /* disable all counter related interrupts */
+ mcr p15,0,r2,c9,c12,3 /* clear all overflow flags */
+ isb
+
+ ubfx r12,r8,#11,#5 /* extract # of event counters, N (0-31) */
+ tst r12, r12
+ beq 20f
+ mov r3, r12 /* for N >0, generate a 2nd copy of N */
+ mov r4, #1
+ lsl r4, r4, r3
+ sub r4, r4, #1 /* set bits<N-1:0> to all 1's */
+
+0:
+ subs r3,r3,#1 /* decrement N */
+ mcr p15,0,r3,c9,c12,5 /* select Event CounterN */
+ isb
+ mrc p15,0,r5,c9,c13,1 /* read Event Type register */
+ bfc r5,#0,#8
+ mcr p15,0,r5,c9,c13,1 /* set Event Type to 0x0 */
+ mcr p15,0,r2,c9,c13,2 /* set Event Counter to all 1's */
+ isb
+ bne 0b
+
+ mov r3, #1
+ bic r5, r9, #1<<31
+ mcr p15,0,r5,c9,c12,1 /* enable Event Counters */
+ /* (PMOVSR bits set) */
+ mcr p15,0,r3,c9,c12,0 /* set the PMCR global enable bit */
+ isb
+ mcr p15,0,r9,c9,c12,4 /* set event count overflow bits */
+ isb
+ mcr p15,0,r4,c9,c12,2 /* disable Event Counters */
+
+ /* restore the event counters */
+10:
+ subs r12,r12,#1 /* decrement N */
+ mcr p15,0,r12,c9,c12,5 /* select Event CounterN */
+ isb
+ ldm r0!,{r3-r4}
+ mcr p15,0,r3,c9,c13,1 /* restore Event Type */
+ mcr p15,0,r4,c9,c13,2 /* restore Event Counter */
+ isb
+ bne 10b
+
+20:
+ tst r9, #0x80000000 /* check for cycle count overflow flag */
+ beq 40f
+ mcr p15,0,r2,c9,c13,0 /* set Cycle Counter to all 1's */
+ isb
+ mov r3, #0x80000000
+ mcr p15,0,r3,c9,c12,1 /* enable the Cycle Counter */
+ isb
+
+30:
+ mrc p15,0,r4,c9,c12,3 /* check cycle count overflow now set */
+ movs r4,r4 /* test bit<31> */
+ bpl 30b
+ mcr p15,0,r3,c9,c12,2 /* disable the Cycle Counter */
+
+40:
+ mcr p15,0,r1,c9,c12,0 /* clear the PMCR global enable bit */
+ isb
+
+ /* restore the remaining PMon registers */
+ ldm r0!,{r1-r4}
+ mcr p15,0,r1,c9,c13,0 /* restore Cycle Count Register */
+ mcr p15,0,r2,c9,c14,0 /* restore User Enable Register */
+ mcr p15,0,r3,c9,c14,1 /* restore Interrupt Enable Set Reg */
+ mcr p15,0,r4,c9,c12,1 /* restore Count Enable Set Register */
+ mcr p15,0,r10,c9,c12,5 /* restore Event Counter Selection */
+ isb
+ mcr p15,0,r8,c9,c12,0 /* restore the PM Control Register */
+ isb
+
+ ldmfd sp!, {r4-r5, r8-r10, pc}
+ENDPROC(restore_performance_monitors)
+
+
+
+ENTRY(save_banked_registers)
+ mrs r2, CPSR /* save current mode */
+ cps #MODE_SYS /* switch to System mode */
+ str sp,[r0], #4 /* save the User SP */
+ str lr,[r0], #4 /* save the User LR */
+ cps #MODE_ABT /* switch to Abort mode */
+ str sp,[r0], #4 /* save the current SP */
+ mrs r3,SPSR
+ stm r0!,{r3,lr} /* save the current SPSR, LR */
+ cps #MODE_UND /* switch to Undefined mode */
+ str sp,[r0], #4 /* save the current SP */
+ mrs r3,SPSR
+ stm r0!,{r3,lr} /* save the current SPSR, LR */
+ cps #MODE_IRQ /* switch to IRQ mode */
+ str sp,[r0], #4 /* save the current SP */
+ mrs r3,SPSR
+ stm r0!,{r3,lr} /* save the current SPSR, LR */
+ cps #MODE_FIQ /* switch to FIQ mode */
+ str SP,[r0], #4 /* save the current SP */
+ mrs r3,SPSR
+ stm r0!,{r8-r12,lr} /* save the current SPSR,r8-r12,LR */
+ msr CPSR_cxsf, r2 /* switch back to original mode */
+
+ bx lr
+ENDPROC(save_banked_registers)
+
+ENTRY(restore_banked_registers)
+ mrs r2, CPSR /* save current mode */
+ cps #MODE_SYS /* switch to System mode */
+ ldr sp,[r0],#4 /* restore the User SP */
+ ldr lr,[r0],#4 /* restore the User LR */
+ cps #MODE_ABT /* switch to Abort mode */
+ ldr sp,[r0],#4 /* restore the current SP */
+ ldm r0!,{r3,lr} /* restore the current LR */
+ msr SPSR_fsxc,r3 /* restore the current SPSR */
+ cps #MODE_UND /* switch to Undefined mode */
+ ldr sp,[r0],#4 /* restore the current SP */
+ ldm r0!,{r3,lr} /* restore the current LR */
+ msr SPSR_fsxc,r3 /* restore the current SPSR */
+ cps #MODE_IRQ /* switch to IRQ mode */
+ ldr sp,[r0],#4 /* restore the current SP */
+ ldm r0!,{r3,lr} /* restore the current LR */
+ msr SPSR_fsxc,r3 /* restore the current SPSR */
+ cps #MODE_FIQ /* switch to FIQ mode */
+ ldr sp,[r0],#4 /* restore the current SP */
+ ldm r0!,{r8-r12,lr} /* restore the current r8-r12,LR */
+ msr SPSR_fsxc,r4 /* restore the current SPSR */
+ msr CPSR_cxsf, r2 /* switch back to original mode */
+
+//0
+ bx lr
+ENDPROC(restore_banked_registers)
+
+
+
+ENTRY(save_cp15)
+ /* CSSELR Cache Size Selection Register */
+ mrc p15,2,r3,c0,c0,0
+ str r3,[r0], #4
+
+ /* IMPLEMENTATION DEFINED - proprietary features:
+ * (CP15 register 15, TCM support, lockdown support, etc.)
+ */
+
+ /* NOTE: IMP DEF registers might have save and restore order that relate
+ * to other CP15 registers or logical grouping requirements and can
+ * therefore occur at any point in this sequence.
+ */
+ bx lr
+ENDPROC(save_cp15)
+
+ENTRY(restore_cp15)
+ /* CSSELR Cache Size Selection Register */
+ ldr r3,[r0], #4
+ mcr p15,2,r3,c0,c0,0
+
+ bx lr
+ENDPROC(restore_cp15)
+
+
+ /* Function called with two arguments:
+ * r0 contains address to store control registers
+ * r1 is non-zero if we are Secure
+ */
+ENTRY(save_control_registers)
+ cmp r1, #0 /* Are we Secure? */
+ mrc p15,0,r2,c1,c0,1 /* ACTLR - Auxiliary Control Register */
+ mrc p15,0,r3,c1,c0,0 /* SCTLR - System Control Register */
+ mrc p15,0,r12,c1,c0,2 /* CPACR - Coprocessor Access Control Register */
+ stm r0!, {r2-r3, r12}
+ mrcne p15,0,r1,c12,c0,1 /* MVBAR - Monitor Vector Base Address Register */
+ mrcne p15,0,r2,c1,c1,0 /* Secure Configuration Register */
+ mrcne p15,0,r3,c1,c1,1 /* Secure Debug Enable Register */
+ mrcne p15,0,r12,c1,c1,2 /* Non-Secure Access Control Register */
+ stmne r0!, {r1-r3,r12}
+ mrc p14,6,r1,c0,c0,0 /* TEECR */
+ mrc p14,6,r2,c1,c0,0 /* TEEHBR */
+ mrc p14,7,r3,c1,c0,0 /* JOSCR */
+ mrc p14,7,r12,c2,c0,0 /* JMCR */
+ stm r0!, {r1-r3,r12}
+ bx lr
+ENDPROC(save_control_registers)
+
+
+ /* Function called with two arguments:
+ * r0 contains address to read control registers
+ * r1 is non-zero if we are Secure
+ */
+ENTRY(restore_control_registers)
+ cmp r1, #0 /* Are we Secure? */
+ ldm r0!, {r2-r3, r12}
+ mcr p15,0,r2,c1,c0,1 /* ACTLR - Auxiliary Control Register */
+ mcr p15,0,r3,c1,c0,0 /* SCTLR - System Control Register */
+ mcr p15,0,r12,c1,c0,2 /* CPACR - Coprocessor Access Control Register */
+ ldmne r0!, {r1-r3,r12}
+ mcrne p15,0,r1,c12,c0,1 /* MVBAR - Monitor Vector Base Address Register */
+ mcrne p15,0,r2,c1,c1,0 /* Secure Configuration Register */
+ mcrne p15,0,r3,c1,c1,1 /* Secure Debug Enable Register */
+ mcrne p15,0,r12,c1,c1,2 /* Non-Secure Access Control Register */
+ ldm r0!, {r1-r3,r12}
+ mcr p14,6,r1,c0,c0,0 /* TEECR */
+ mcr p14,6,r2,c1,c0,0 /* TEEHBR */
+ mcr p14,7,r3,c1,c0,0 /* JOSCR */
+ mcr p14,7,r12,c2,c0,0 /* JMCR */
+ isb
+ bx lr
+ENDPROC(restore_control_registers)
+
+ENTRY(save_mmu)
+ stmfd sp!, {r4, r5, r6, r7}
+ /* ASSUMPTION: no useful fault address / fault status information
+
+ mrc p15,0,r4,c12,c0,0 /* VBAR */
+ mrc p15,0,r5,c2,c0,2 /* TTBCR */
+
+ tst r5, #TTBCR_EAE /* Are we using LPAE? */
+
+ /* save 32 or 64 bit TTBRs */
+ mrceq p15,0,r6,c2,c0,0 /* 32 bit TTBR0 */
+ mrceq p15,0,r7,c2,c0,1 /* 32 bit TTBR1 */
+ mrrcne p15,0,r6,r7,c2 /* 64 bit TTBR0 */
+ stm r0!, {r4-r7}
+ mrrcne p15,1,r6,r7,c2 /* 64 bit TTBR1 */
+ stmne r0!, {r6-r7}
+
+ mrc p15,0,r4,c3,c0,0 /* DACR */
+ mrc p15,0,r5,c7,c4,0 /* PAR */
+ mrc p15,0,r6,c10,c2,0 /* PRRR */
+ mrc p15,0,r7,c10,c2,1 /* NMRR */
+ stm r0!, {r4-r7}
+
+ /* TODO: IMPLEMENTATION DEFINED - TCM, lockdown and performance monitor support
+ * CP15 registers 9 and 11
+ */
+
+ mrc p15,0,r4,c13,c0,1 /* CONTEXTIDR */
+ mrc p15,0,r5,c13,c0,2 /* TPIDRURW */
+ mrc p15,0,r6,c13,c0,3 /* TPIDRURO */
+ mrc p15,0,r7,c13,c0,4 /* TPIDRPRW */
+ stm r0!, {r4-r7}
+
+ ldmfd sp!, {r4, r5, r6, r7}
+ bx lr
+ENDPROC(save_mmu)
+
+
+
+ENTRY(restore_mmu)
+
+ stmfd sp!, {r4, r5, r6, r7}
+ ldm r0!, {r4-r7}
+ mcr p15,0,r4,c12,c0,0 /* VBAR */
+ mcr p15,0,r5,c2,c0,2 /* TTBCR */
+
+ tst r5, #TTBCR_EAE /* Are we using LPAE? */
+
+ /* restore 32 or 64 bit TTBRs */
+ mcreq p15,0,r6,c2,c0,0 /* 32 bit TTBR0 */
+ mcreq p15,0,r7,c2,c0,1 /* 32 bit TTBR1 */
+ mcrrne p15,0,r6,r7,c2 /* 64-bit TTBR0 */
+ ldmne r0!, {r6-r7}
+ mcrrne p15,1,r6,r7,c2 /* 64-bit TTBR1 */
+
+ ldm r0!, {r4-r7}
+ mcr p15,0,r4,c3,c0,0 /* DACR */
+ mcr p15,0,r5,c7,c4,0 /* PAR */
+ mcr p15,0,r6,c10,c2,0 /* PRRR */
+ mcr p15,0,r7,c10,c2,1 /* NMRR */
+
+ /* TODO: IMPLEMENTATION DEFINED - TCM, lockdown and performance monitor support
+ * CP15 registers 9 and 11
+ */
+
+ ldm r0!, {r4-r7}
+ mcr p15,0,r4,c13,c0,1 /* CONTEXTIDR */
+ mcr p15,0,r5,c13,c0,2 /* TPIDRURW */
+ mcr p15,0,r6,c13,c0,3 /* TPIDRURO */
+ mcr p15,0,r7,c13,c0,4 /* TPIDRPRW */
+
+ ldmfd sp!, {r4, r5, r6, r7}
+ bx lr
+ENDPROC(restore_mmu)
+
+#if 0
+ENTRY(save_vfp)
+ /* FPU state save/restore.
+ * FPSID,MVFR0 and MVFR1 don't get serialized/saved (Read Only).
+ */
+ mrc p15,0,r3,c1,c0,2 /* CPACR allows CP10 and CP11 access */
+ ORR r2,r3,#0xF00000
+ mcr p15,0,r2,c1,c0,2
+ isb
+ mrc p15,0,r2,c1,c0,2
+ and r2,r2,#0xF00000
+ cmp r2,#0xF00000
+ beq f0
+ movs r2, #0
+ b f2
+
+0:
+ /* Save configuration registers and enable. */
+ vmrs r12,FPEXC
+ str r12,[r0],#4 /* Save the FPEXC */
+ /* Enable FPU access to save/restore the other registers. */
+ ldr r2,=0x40000000
+ vmsr FPEXC,r2
+ vmrs r2,FPSCR
+ str r2,[r0],#4 /* Save the FPSCR */
+ /* Store the VFP-D16 registers. */
+ vstm r0!, {D0-D15}
+ /* Check for Advanced SIMD/VFP-D32 support */
+ vmrs r2,MVFR0
+ and r2,r2,#0xF /* extract the A_SIMD bitfield */
+ cmp r2, #0x2
+ blt f1
+ /* Store the Advanced SIMD/VFP-D32 additional registers. */
+ vstm r0!, {D16-D31}
+
+ /* IMPLEMENTATION DEFINED: save any subarchitecture defined state
+ * NOTE: Don't change the order of the FPEXC and CPACR restores
+ */
+1:
+ vmsr FPEXC,r12 /* Restore the original En bit of FPU. */
+2:
+ mcr p15,0,r3,c1,c0,2 /* Restore the original CPACR value. */
+ bx lr
+ENDPROC(save_vfp)
+
+
+restore_vfp FUNCTION
+ /* FPU state save/restore. Obviously FPSID,MVFR0 and MVFR1 don't get
+ * serialized (RO).
+ * Modify CPACR to allow CP10 and CP11 access
+ */
+ mrc p15,0,r1,c1,c0,2
+ ORR r2,r1,#0x00F00000
+ mcr p15,0,r2,c1,c0,2
+ /* Enable FPU access to save/restore the rest of registers. */
+ ldr r2,=0x40000000
+ vmsr FPEXC, r2
+ /* Recover FPEXC and FPSCR. These will be restored later. */
+ ldm r0!,{r3,r12}
+ /* Restore the VFP-D16 registers. */
+ vldm r0!, {D0-D15}
+ /* Check for Advanced SIMD/VFP-D32 support */
+ vmrs r2, MVFR0
+ and r2,r2,#0xF /* extract the A_SIMD bitfield */
+ cmp r2, #0x2
+ blt f0
+
+ /* Store the Advanced SIMD/VFP-D32 additional registers. */
+ vldm r0!, {D16-D31}
+
+ /* IMPLEMENTATION DEFINED: restore any subarchitecture defined state */
+
+0 /* Restore configuration registers and enable.
+ * Restore FPSCR _before_ FPEXC since FPEXC could disable FPU
+ * and make setting FPSCR unpredictable.
+ */
+ vmsr FPSCR,r12
+ vmsr FPEXC,r3 /* Restore FPEXC after FPSCR */
+ /* Restore CPACR */
+ mcr p15,0,r1,c1,c0,2
+ bx lr
+ ENDFUNC
+#endif
+
+ENTRY(save_vfp)
+#if 0 //zxp
+
+ /* FPU state save/restore. */
+ /* FPSID,MVFR0 and MVFR1 don't get serialized/saved (Read Only). */
+ mrc p15,0,r3,c1,c0,2 /* CPACR allows CP10 and CP11 access */
+ ORR r2,r3,#0xF00000
+ mcr p15,0,r2,c1,c0,2
+ isb
+ mrc p15,0,r2,c1,c0,2
+ and r2,r2,#0xF00000
+ cmp r2,#0xF00000
+ beq 0f
+ movs r2, #0
+ b 2f
+
+ /* Save configuration registers and enable. */
+0:
+ FMRX r12,FPEXC /* vmrs r12,FPEXC */
+ str r12,[r0],#4 /* Save the FPEXC */
+ /* Enable FPU access to save/restore the other registers. */
+ ldr r2,=0x40000000
+ FMXR FPEXC,r2 /* vmsr FPEXC,r2 */
+ FMRX r2,FPSCR /* vmrs r2,FPSCR */
+ str r2,[r0],#4 /* Save the FPSCR */
+ /* Store the VFP-D16 registers. */
+ vstm r0!, {D0-D15}
+ /* Check for Advanced SIMD/VFP-D32 support */
+ FMRX r2,MVFR0 /* vmrs r2,MVFR0 */
+ and r2,r2,#0xF /* extract the A_SIMD bitfield */
+ cmp r2, #0x2
+ blt 1f
+ /* Store the Advanced SIMD/VFP-D32 additional registers. */
+ vstm r0!, {D16-D31}
+
+ /* IMPLEMENTATION DEFINED: save any subarchitecture defined state */
+ /* NOTE: Don't change the order of the FPEXC and CPACR restores */
+
+ /* Restore the original En bit of FPU. */
+1:
+ FMXR FPEXC,r12 /* vmsr FPEXC,r12 */
+
+ /* Restore the original CPACR value. */
+2:
+ mcr p15,0,r3,c1,c0,2
+#endif
+ bx lr
+ENDPROC(save_vfp)
+
+
+ENTRY(restore_vfp)
+ /* FPU state save/restore. Obviously FPSID,MVFR0 and MVFR1 don't get
+ * serialized (RO).
+ * Modify CPACR to allow CP10 and CP11 access
+ */
+#if 0 //zxp
+ mrc p15,0,r1,c1,c0,2
+ ORR r2,r1,#0x00F00000
+ mcr p15,0,r2,c1,c0,2
+ /* Enable FPU access to save/restore the rest of registers. */
+ ldr r2,=0x40000000
+ FMXR FPEXC, r2 /* vmsr FPEXC, r2 */
+ /* Recover FPEXC and FPSCR. These will be restored later. */
+ ldm r0!,{r3,r12}
+ /* Restore the VFP-D16 registers. */
+ vldm r0!, {D0-D15}
+ /* Check for Advanced SIMD/VFP-D32 support */
+ FMRX r2, MVFR0 /* vmrs r2, MVFR0 */
+ and r2,r2,#0xF /* extract the A_SIMD bitfield */
+ cmp r2, #0x2
+ blt 0f
+
+ /* Store the Advanced SIMD/VFP-D32 additional registers. */
+ vldm r0!, {D16-D31}
+
+ /* IMPLEMENTATION DEFINED: restore any subarchitecture defined state */
+0:
+ /* Restore configuration registers and enable.
+ * Restore FPSCR _before_ FPEXC since FPEXC could disable FPU
+ * and make setting FPSCR unpredictable.
+ */
+ FMXR FPSCR,r12 /* vmsr FPSCR,r12 */
+ /* Restore FPEXC after FPSCR */
+ FMXR FPEXC,r3 /* vmsr FPEXC,r3 */
+ /* Restore CPACR */
+ /* will restore in mt_restore_control_registers */
+ /* mcr p15,0,r1,c1,c0,2 */
+#endif
+ bx lr
+ENDPROC(restore_vfp)
+
+
+ /* We assume that the OS is not using the Virtualization extensions,
+ * and that the warm boot code will set up CNTHCTL correctly.
+ * CNTP_CVAL will be preserved as it is in the always-on domain.
+ */
+
+ENTRY(save_generic_timer)
+ mrc p15,0,r2,c14,c2,1 /* read CNTP_CTL */
+ mrc p15,0,r3,c14,c2,0 /* read CNTP_TVAL */
+ mrc p15,0,r12,c14,c1,0 /* read CNTKCTL */
+ stm r0!, {r2, r3, r12}
+ bx lr
+ENDPROC(save_generic_timer)
+
+
+ENTRY(restore_generic_timer)
+ ldm r0!, {r2, r3, r12}
+ mcr p15,0,r3,c14,c2,0 /* write CNTP_TVAL */
+ mcr p15,0,r12,c14,c1,0 /* write CNTKCTL */
+ mcr p15,0,r2,c14,c2,1 /* write CNTP_CTL */
+ bx lr
+ENDPROC(restore_generic_timer)
+
+
+
+ /* This function disables L1 data caching, then cleans and invalidates
+ the whole L1 data cache.
+ */
+
+ENTRY(disable_clean_inv_dcache_v7_l1)
+ stmfd sp!, {r4, lr}
+
+ /* Disable L1 cache */
+ dsb
+ mrc p15,0,r3,c1,c0,0
+ bic r3, #4 /* Clear C bit */
+ mcr p15,0,r3,c1,c0,0
+ dsb
+
+ /* No more Data cache allocations can happen at L1.
+ Until we finish cleaning the Inner cache, any accesses to dirty data
+ (e.g. by translation table walks) may get the wrong (Outer) data, so
+ we have to be sure everything that might be accessed is clean.
+ We already know that the translation tables are clean (see late_init).
+ */
+
+ mov r0, #0 /* Select L1 Data/Unified cache */
+ mcr p15,2,r0,c0,c0,0
+ mrc p15,1,r0,c0,c0,0 /* Read size */
+ ubfx r3, r0, #13, #15 /* sets - 1 */
+ add r3, r3, #1 /* sets */
+ ubfx r4, r0, #0, #3 /* log2(words per line) - 2 */
+ add r4, r4, #4 /* set shift = log2(bytes per line) */
+ ubfx r2, r0, #3, #10 /* ways - 1 */
+ clz r12, r2 /* way shift */
+ add r2, r2, #1 /* ways */
+
+ /* r2,r3 inner, outer loop targets, r1 inner loop counter, r0 zero */
+5:
+ cmp r3, #0
+ beq 20f
+ sub r3, r3, #1
+ mov r1, r2
+
+10:
+ cmp r1, #0
+ beq 5b
+ sub r1, r1, #1
+ mov r0, r1, lsl r12 /* Fill in Way field */
+ orr r0, r0, r3, lsl r4 /* Fill in Set field */
+ mcr p15,0,r0,c7,c14,2 /* DCCISW */
+ b 10b
+
+20:
+ dsb
+ ldmfd sp!, {r4, lr}
+ bx lr
+ENDPROC(disable_clean_inv_dcache_v7_l1)
+
+
+ENTRY(invalidate_icache_v7_pou)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 /* ICIALLU */
+ bx lr
+ENDPROC(invalidate_icache_v7_pou)
+
+
+ENTRY(invalidate_icache_v7)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 /* ICIALLU */
+ bx lr
+ENDPROC(invalidate_icache_v7)
+ENTRY(enable_icache_v7)
+ mrc p15, 0, r0, c1, c0, 0 //enable Icache
+ movw r1, #SCTLR_I
+ orr r0, r0, r1
+ mcr p15, 0, r0, c1, c0, 0
+ENDPROC(enable_icache_v7)
+
+
+ENTRY(invalidate_dcache_v7_all)
+ /* Must iterate over the caches in order to synthesise a complete invalidation
+ of data/unified cache
+ */
+//zxp stmfd sp!, {r4-r11} /* zxp delete for no sp to use */
+ mrc p15, 1, r0, c0, c0, 1 /* read clidr */
+ ands r3, r0, #0x7000000 /* extract loc from clidr */
+ mov r3, r3, lsr #23 /* left align loc bit field */
+ beq finished /* if loc is 0, then no need to clean */
+ mov r10, #0 /* start clean at cache level 0 (in r10) */
+loop1:
+ add r2, r10, r10, lsr #1 /* work out 3x current cache level */
+ mov r12, r0, lsr r2 /* extract cache type bits from clidr */
+ and r12, r12, #7 /* mask of bits for current cache only */
+ cmp r12, #2 /* see what cache we have at this level */
+ blt skip /* skip if no cache, or just i-cache */
+ mcr p15, 2, r10, c0, c0, 0 /* select current cache level in cssr */
+ mov r12, #0
+ mcr p15, 0, r12, c7, c5, 4 /* prefetchflush to sync new cssr&csidr */
+ mrc p15, 1, r12, c0, c0, 0 /* read the new csidr */
+ and r2, r12, #7 /* extract the length of the cache lines */
+ add r2, r2, #4 /* add 4 (line length offset) */
+ ldr r6, =0x3ff
+ ands r6, r6, r12, lsr #3 /* find maximum number on the way size */
+ clz r5, r6 /* find bit pos of way size increment */
+ ldr r7, =0x7fff
+ ands r7, r7, r12, lsr #13 /* extract max number of the index size */
+loop2:
+ mov r8, r6 /* create working copy of max way size */
+loop3:
+ orr r11, r10, r8, lsl r5 /* factor way and cache number into r11 */
+ orr r11, r11, r7, lsl r2 /* factor index number into r11 */
+ mcr p15, 0, r11, c7, c6, 2 /* invalidate by set/way */
+ subs r8, r8, #1 /* decrement the way */
+ bge loop3
+ subs r7, r7, #1 /* decrement the index */
+ bge loop2
+skip:
+ add r10, r10, #2 /* increment cache number */
+ cmp r3, r10
+ bgt loop1
+finished:
+ mov r10, #0
+
+ mcr p15, 0, r10, c7, c10, 4 /* drain write buffer */
+ mcr p15, 0, r10, c8, c7, 0 /* invalidate I + D TLBs */
+ mcr p15, 0, r10, c2, c0, 2 /* TTB control register */
+//zxp ldmfd sp!, {r4-r11}
+ bx lr
+ENDPROC(invalidate_dcache_v7_all)
+
+
+ENTRY(disable_clean_inv_dcache_v7_all)
+ /* Must iterate over the caches in order to synthesise a complete clean
+ of data/unified cache */
+ stmfd sp!, {r4-r11}
+
+ /* Disable integrated data/unified cache */
+ dsb
+ mrc p15, 0, r3, c1, c0, 0
+ bic r3, #4 /* Clear C bit */
+ mcr p15, 0, r3, c1, c0, 0
+ isb
+
+ /* No more Data cache allocations can happen.
+ Until we finish cleaning the cache, any accesses to dirty data
+ (e.g. by translation table walks) may get the wrong (Outer) data, so
+ we have to be sure everything that might be accessed is clean.
+ We already know that the translation tables are clean (see late_init).
+ */
+
+
+ mrc p15, 1, r0, c0, c0, 1 /* read clidr */
+ ands r3, r0, #0x7000000 /* extract loc from clidr */
+ mov r3, r3, lsr #23 /* left align loc bit field */
+ beq 50f /* if loc is 0, then no need to clean */
+ mov r10, #0 /* start clean at cache level 0 (in r10) */
+10:
+ add r2, r10, r10, lsr #1 /* work out 3x current cache level */
+ mov r12, r0, lsr r2 /* extract cache type bits from clidr */
+ and r12, r12, #7 /* mask of bits for current cache only */
+ cmp r12, #2 /* see what cache we have at this level */
+ blt 40f /* skip if no cache, or just i-cache */
+ mcr p15, 2, r10, c0, c0, 0 /* select current cache level in cssr */
+ mov r12, #0
+ mcr p15, 0, r12, c7, c5, 4 /* prefetchflush to sync new cssr&csidr */
+ mrc p15, 1, r12, c0, c0, 0 /* read the new csidr */
+ and r2, r12, #7 /* extract the length of the cache lines */
+ add r2, r2, #4 /* add 4 (line length offset) */
+ ldr r6, =0x3ff
+ ands r6, r6, r12, lsr #3 /* find maximum number on the way size */
+ clz r5, r6 /* find bit pos of way size increment */
+ ldr r7, =0x7fff
+ ands r7, r7, r12, lsr #13 /* extract max number of the index size */
+20:
+ mov r8, r6 /* create working copy of max way size */
+30:
+ orr r11, r10, r8, lsl r5 /* factor way and cache number into r11 */
+ orr r11, r11, r7, lsl r2 /* factor index number into r11 */
+ mcr p15, 0, r11, c7, c14, 2 /* clean & invalidate by set/way */
+ subs r8, r8, #1 /* decrement the way */
+ bge 30b
+ subs r7, r7, #1 /* decrement the index */
+ bge 20b
+40:
+ add r10, r10, #2 /* increment cache number */
+ cmp r3, r10
+ bgt 10b
+50:
+ mov r10, #0
+ mcr p15, 0, r10, c7, c10, 4 /* drain write buffer */
+ ldmfd sp!, {r4-r11}
+ bx lr
+ENDPROC(disable_clean_inv_dcache_v7_all)
+
+
+ /* This function cleans the whole L1 data cache */
+ENTRY(clean_dcache_v7_l1)
+ stmfd sp!, {r4, lr}
+
+ mov r0, #0 /* Select L1 Data/Unified cache */
+ mcr p15,2,r0,c0,c0,0
+ mrc p15,1,r0,c0,c0,0 /* Read size (CCSIDR) */
+ ubfx r3, r0, #13, #15 /* sets - 1 */
+ add r3, r3, #1 /* sets */
+ ubfx r4, r0, #0, #3 /* log2(words per line) - 2 */
+ add r4, r4, #4 /* set shift = log2(bytes per line) */
+ ubfx r2, r0, #3, #10 /* ways - 1 */
+ clz r12, r2 /* way shift */
+ add r2, r2, #1 /* ways */
+
+ /* r2,r3 inner, outer loop targets, r1 inner loop counter, r0 zero */
+0:
+ cmp r3, #0
+ beq 20f
+ sub r3, r3, #1
+ mov r1, r2
+
+10:
+ cmp r1, #0
+ beq 0b
+ sub r1, r1, #1
+ mov r0, r1, lsl r12 /* Fill in Way field */
+ orr r0, r0, r3, lsl r4 /* Fill in Set field */
+ mcr p15,0,r0,c7,c10,2 /* DCCSW */
+ b 10b
+
+20:
+ dsb
+ pop {r4, lr}
+ bx lr
+ENDPROC(clean_dcache_v7_l1)
+
+#if 0 //zxp
+ /* This function cleans a single line from the L1 dcache */
+clean_mva_dcache_v7_l1
+ mcr p15,0,r0,c7,c10,1 /* DCCMVAC */
+ bx lr
+
+enter_secure_monitor_mode FUNCTION
+ mov r0, lr
+ mov r1, sp
+ smc #0
+appf_smc_handler
+ /* We are now in Monitor mode, make sure we're Secure */
+ mrc p15, 0, r12, c1, c1, 0
+ bic r12, #SCR_NS
+ mcr p15, 0, r12, c1, c1, 0
+ /* Restore sp and return - stack must be uncached or in NS memory! */
+ mov sp, r1
+ bx r0
+ ENDFUNC
+
+enter_nonsecure_svc_mode FUNCTION
+ /* Copy the Monitor mode sp and lr values */
+ mov r2, lr
+ mov r3, sp
+ mrc p15, 0, r1, c1, c1, 0
+ orr r1, #SCR_NS
+ mcr p15, 0, r1, c1, c1, 0
+ adr lr, non_secure
+ movs pc, lr
+non_secure
+ /* We are now in non-secure state */
+ /* Restore sp and return */
+ mov sp, r3
+ bx r2
+ ENDFUNC
+#endif
+
+ENTRY(save_a9_other)
+ mrc p15,0,r12,c15,c0,0 /* Read Power Control Register */
+ str r12, [r0], #4
+ mrc p15, 4, r12, c15, c0, 0 /* Read Configuration Base Address Register */
+ str r12, [r0], #4
+
+ mrc p15,0,r3,c0,c0,0 /* Read Main ID Register */
+ ubfx r3, r3, #20, #4 /* Extract major version number */
+ cmp r3, #2
+ blt 1f /* PLE only possible in r2p0 onwards */
+ mrc p15,0,r3,c11,c0,0 /* Read PLE IDR */
+ cmp r3, #0
+ beq 1f /* No PLE present */
+
+ mrc p15,0,r3,c11,c1,0 /* Read PLE UAR */
+ mrc p15,0,r12,c11,c1,1 /* Read PLE PCR */
+ stm r0!, {r3, r12}
+
+1:
+ bx lr
+ENDPROC(save_a9_other)
+
+
+ENTRY(restore_a9_other)
+ cmp r1, #0 /* Check we are secure */
+ ldr r12, [r0], #4
+ andne r12, r12, #0x01 /* We only restore the Dynamic Clock gating bit */
+ mcrne p15,0,r12,c15,c0,0 /* Write Power Control Register (if secure) */
+ ldr r12, [r0], #4
+ mcrne p15, 4, r12, c15, c0, 0 /* Write Configuration Base Address Register (if Secure) */
+
+ mrc p15,0,r3,c0,c0,0 /* Read Main ID Register */
+ ubfx r3, r3, #20, #4 /* Extract major version number */
+ cmp r3, #2
+ blt 1f /* PLE only possible in r2p0 onwards */
+ mrc p15,0,r3,c11,c0,0 /* Read PLE IDR */
+ cmp r3, #0
+ beq 1f /* No PLE present */
+
+ ldm r0!, {r3, r12}
+ mcr p15,0,r3,c11,c1,0 /* Write PLE UAR */
+ mcr p15,0,r12,c11,c1,1 /* Write PLE PCR */
+
+1:
+ bx lr
+ENDPROC(restore_a9_other)
+
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm-v8.S b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm-v8.S
new file mode 100644
index 0000000..8eeaa10
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm-v8.S
@@ -0,0 +1,1131 @@
+/*
+ * ZTE CPU low power powerdown and powerup helper code.
+ *
+ * Copyright (C) 2013 ZTE, Inc.
+ * Written by ZXP
+ *
+ * This program is free software,you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <linux/threads.h>
+#include <asm/asm-offsets.h>
+#include <asm/assembler.h>
+#include <asm/glue-cache.h>
+#include <asm/glue-proc.h>
+#include <asm/vfp.h>
+
+
+/* Aliases for mode encodings - do not change */
+#define MODE_USR 0x10
+#define MODE_FIQ 0x11
+#define MODE_IRQ 0x12
+#define MODE_SVC 0x13
+#define MODE_ABT 0x17
+#define MODE_UND 0x1B
+#define MODE_SYS 0x1F
+
+#define MODE_MON 0x16 /* A-profile (Security Extensions) only */
+#define SCR_NS 0x01 /* A-profile (Security Extensions) only */
+
+#define TTBCR_EAE (1<<31) /* Are we using LPAE? */
+
+#define CACHE_LINE_SIZE 32 /* TODO: remove this */
+#define SCTLR_I (1<<12)
+
+.arm
+
+/*cpu interface */
+ENTRY(save_cpu_if)
+ /* mrc p15, 0, r3, c4, c6, 0 ICC PMR ÓÅÏȼ¶ÆÁ±Î¼Ä´æÆ÷*/
+ str r3,[r0], #4
+ /* mrc p15, 0, r3, c12, c12, 3 BPR1*/
+ str r3,[r0], #4
+ /* mrc p15, 6, r3, c12, c12, 7 ICC MGRPEN1 */
+ str r3,[r0], #4
+ /* mrc p15, 0, r3, c12, c12, 7 int group enable reg 1 */
+ str r3,[r0], #4
+ /* mrc p15, 0, r3, c12, c12, 6 int group enable reg 0 */
+ str r3,[r0], #4
+ mrc p15, 0, r3, c12, c12, 5 /*ICC_SRE system reg enable */
+ str r3,[r0], #4
+ mrc p15, 6, r3, c12, c12, 5 /*ICC_MSRE system reg for EL3 */
+ str r3,[r0], #4
+ mrc p15, 0, r3, c12, c12, 4 /*CTRL*/
+ str r3,[r0], #4
+ bx lr
+ENDPROC(save_cpu_if)
+
+ENTRY(restore_cpu_if)
+ ldr r3,[r0], #4
+ mcr p15, 0, r3, c4, c6, 0 /*ICC PMR ÓÅÏȼ¶ÆÁ±Î¼Ä´æÆ÷*/
+ ldr r3,[r0], #4
+ mcr p15, 0, r3, c12, c12, 3 /*BPR1*/
+ ldr r3,[r0], #4
+ mcr p15, 6, r0, c12, c12, 7 /* ICC MGRPEN1 */
+ ldr r3,[r0], #4
+ mcr p15, 0, r0, c12, c12, 7 /* int group enable reg 1 */
+ ldr r3,[r0], #4
+ mcr p15, 0, r0, c12, c12, 6 /* int group enable reg 0 */
+ ldr r3,[r0], #4
+ mcr p15, 0, r0, c12, c12, 5 /*ICC_SRE system reg enable */
+ ldr r3,[r0], #4
+ mcr p15, 6, r0, c12, c12, 5 /*ICC_MSRE system reg for EL3 */
+ ldr r3,[r0], #4
+ mcr p15, 0, r3, c12, c12, 4 /*CTRL*/
+ bx lr
+ENDPROC(restore_cpu_if)
+
+ENTRY(gic_mask) /*cpu int disable*/
+ mov r0,#0
+ mcr p15, 0, r0, c12, c12, 6 /* ICC_IGRPEN0*/
+ mrc p15, 0, r0, c12, c12, 7 /* ICC_IGRPEN1*/
+ bic r0, r0, #1
+ mcr p15, 0, r0, c12, c12, 7 /* ICC_IGRPEN1*/
+ mrc p15, 6, r0, c12, c12, 7 /* ICC MGRPEN1 */
+ bic r0, r0, #1
+ mcr p15, 6, r0, c12, c12, 7 /* ICC MGRPEN1 */
+ bx lr
+ENDPROC(gic_mask)
+
+ENTRY(gic_unmask)
+
+ENDPROC(gic_unmask)
+
+ENTRY(save_performance_monitors)
+
+ stmfd sp!, {r4, r8, r9, r10}
+
+ /* Ignore:
+ * Count Enable Clear Register
+ * Software Increment Register
+ * Interrupt Enable Clear Register
+ */
+
+ mrc p15,0,r8,c9,c12,0 /* PMon: Control Register */
+ bic r1,r8,#1
+ mcr p15,0,r1,c9,c12,0 /* disable counter updates from here */
+ isb /* 0b0 => PMCR<0> */
+ mrc p15,0,r9,c9,c12,3 /* PMon: Overflow Flag Status Reg */
+ mrc p15,0,r10,c9,c12,5 /* PMon: Event Counter Selection Reg */
+ stm r0!, {r8-r10}
+ ubfx r9,r8,#11,#5 /* extract # of event counters, N */
+ tst r9, r9
+ beq next_backup
+
+loop_backup:
+ subs r9,r9,#1 /* decrement N */
+ mcr p15,0,r9,c9,c12,5 /* PMon: select CounterN */
+ isb
+ mrc p15,0,r3,c9,c13,1 /* PMon: save Event Type register */
+ mrc p15,0,r4,c9,c13,2 /* PMon: save Event Counter register */
+ stm r0!, {r3,r4}
+ bne loop_backup
+
+next_backup:
+ mrc p15,0,r1,c9,c13,0 /* PMon: Cycle Count Register */
+ mrc p15,0,r2,c9,c14,0 /* PMon: User Enable Register */
+ mrc p15,0,r3,c9,c14,1 /* PMon: Interrupt Enable Set Reg */
+ mrc p15,0,r4,c9,c12,1 /* PMon: Count Enable Set Register */
+ stm r0!, {r1-r4}
+
+ ldmfd sp!, {r4, r8, r9, r10}
+ bx lr
+ENDPROC(save_performance_monitors)
+
+ENTRY(restore_performance_monitors)
+
+ stmfd sp!, {r4-r5, r8-r10, lr}
+ /* NOTE: all counters disabled by PMCR<0> == 0 on reset */
+
+ /* Restore performance counters */
+ ldm r0!,{r8-r10} /* recover first block of PMon context */
+ /* (PMCR, PMOVSR, PMSELR) */
+ mov r1, #0 /* generate register of all 0's */
+ mvn r2, #0 /* generate register of all 1's */
+ mcr p15,0,r2,c9,c14,2 /* disable all counter related interrupts */
+ mcr p15,0,r2,c9,c12,3 /* clear all overflow flags */
+ isb
+
+ ubfx r12,r8,#11,#5 /* extract # of event counters, N (0-31) */
+ tst r12, r12
+ beq 20f
+ mov r3, r12 /* for N >0, generate a 2nd copy of N */
+ mov r4, #1
+ lsl r4, r4, r3
+ sub r4, r4, #1 /* set bits<N-1:0> to all 1's */
+
+0:
+ subs r3,r3,#1 /* decrement N */
+ mcr p15,0,r3,c9,c12,5 /* select Event CounterN */
+ isb
+ mrc p15,0,r5,c9,c13,1 /* read Event Type register */
+ bfc r5,#0,#8
+ mcr p15,0,r5,c9,c13,1 /* set Event Type to 0x0 */
+ mcr p15,0,r2,c9,c13,2 /* set Event Counter to all 1's */
+ isb
+ bne 0b
+
+ mov r3, #1
+ bic r5, r9, #1<<31
+ mcr p15,0,r5,c9,c12,1 /* enable Event Counters */
+ /* (PMOVSR bits set) */
+ mcr p15,0,r3,c9,c12,0 /* set the PMCR global enable bit */
+ isb
+ mcr p15,0,r9,c9,c12,4 /* set event count overflow bits */
+ isb
+ mcr p15,0,r4,c9,c12,2 /* disable Event Counters */
+
+ /* restore the event counters */
+10:
+ subs r12,r12,#1 /* decrement N */
+ mcr p15,0,r12,c9,c12,5 /* select Event CounterN */
+ isb
+ ldm r0!,{r3-r4}
+ mcr p15,0,r3,c9,c13,1 /* restore Event Type */
+ mcr p15,0,r4,c9,c13,2 /* restore Event Counter */
+ isb
+ bne 10b
+
+20:
+ tst r9, #0x80000000 /* check for cycle count overflow flag */
+ beq 40f
+ mcr p15,0,r2,c9,c13,0 /* set Cycle Counter to all 1's */
+ isb
+ mov r3, #0x80000000
+ mcr p15,0,r3,c9,c12,1 /* enable the Cycle Counter */
+ isb
+
+30:
+ mrc p15,0,r4,c9,c12,3 /* check cycle count overflow now set */
+ movs r4,r4 /* test bit<31> */
+ bpl 30b
+ mcr p15,0,r3,c9,c12,2 /* disable the Cycle Counter */
+
+40:
+ mcr p15,0,r1,c9,c12,0 /* clear the PMCR global enable bit */
+ isb
+
+ /* restore the remaining PMon registers */
+ ldm r0!,{r1-r4}
+ mcr p15,0,r1,c9,c13,0 /* restore Cycle Count Register */
+ mcr p15,0,r2,c9,c14,0 /* restore User Enable Register */
+ mcr p15,0,r3,c9,c14,1 /* restore Interrupt Enable Set Reg */
+ mcr p15,0,r4,c9,c12,1 /* restore Count Enable Set Register */
+ mcr p15,0,r10,c9,c12,5 /* restore Event Counter Selection */
+ isb
+ mcr p15,0,r8,c9,c12,0 /* restore the PM Control Register */
+ isb
+
+ ldmfd sp!, {r4-r5, r8-r10, pc}
+ENDPROC(restore_performance_monitors)
+
+
+
+ENTRY(save_banked_registers)
+ mrs r2, CPSR /* save current mode */
+ cps #MODE_SYS /* switch to System mode */
+ str sp,[r0], #4 /* save the User SP */
+ str lr,[r0], #4 /* save the User LR */
+ cps #MODE_ABT /* switch to Abort mode */
+ str sp,[r0], #4 /* save the current SP */
+ mrs r3,SPSR
+ stm r0!,{r3,lr} /* save the current SPSR, LR */
+ cps #MODE_UND /* switch to Undefined mode */
+ str sp,[r0], #4 /* save the current SP */
+ mrs r3,SPSR
+ stm r0!,{r3,lr} /* save the current SPSR, LR */
+ cps #MODE_IRQ /* switch to IRQ mode */
+ str sp,[r0], #4 /* save the current SP */
+ mrs r3,SPSR
+ stm r0!,{r3,lr} /* save the current SPSR, LR */
+ cps #MODE_FIQ /* switch to FIQ mode */
+ str SP,[r0], #4 /* save the current SP */
+ mrs r3,SPSR
+ stm r0!,{r3,r8-r12,lr} /* save the current SPSR,r8-r12,LR */
+ msr CPSR_cxsf, r2 /* switch back to original mode */
+
+ bx lr
+ENDPROC(save_banked_registers)
+
+ENTRY(restore_banked_registers)
+ mrs r2, CPSR /* save current mode */
+ cps #MODE_SYS /* switch to System mode */
+ ldr sp,[r0],#4 /* restore the User SP */
+ ldr lr,[r0],#4 /* restore the User LR */
+ cps #MODE_ABT /* switch to Abort mode */
+ ldr sp,[r0],#4 /* restore the current SP */
+ ldm r0!,{r3,lr} /* restore the current LR */
+ msr SPSR_fsxc,r3 /* restore the current SPSR */
+ cps #MODE_UND /* switch to Undefined mode */
+ ldr sp,[r0],#4 /* restore the current SP */
+ ldm r0!,{r3,lr} /* restore the current LR */
+ msr SPSR_fsxc,r3 /* restore the current SPSR */
+ cps #MODE_IRQ /* switch to IRQ mode */
+ ldr sp,[r0],#4 /* restore the current SP */
+ ldm r0!,{r3,lr} /* restore the current LR */
+ msr SPSR_fsxc,r3 /* restore the current SPSR */
+ cps #MODE_FIQ /* switch to FIQ mode */
+ ldr sp,[r0],#4 /* restore the current SP */
+ ldm r0!,{r3,r8-r12,lr} /* restore the current r8-r12,LR */
+ msr SPSR_fsxc,r3 /* restore the current SPSR */
+ msr CPSR_cxsf, r2 /* switch back to original mode */
+
+//0
+ bx lr
+ENDPROC(restore_banked_registers)
+
+
+
+ENTRY(save_cp15)
+ /* CSSELR Cache Size Selection Register */
+ mrc p15,2,r3,c0,c0,0
+ str r3,[r0], #4
+
+ /* IMPLEMENTATION DEFINED - proprietary features:
+ * (CP15 register 15, TCM support, lockdown support, etc.)
+ */
+
+ /* NOTE: IMP DEF registers might have save and restore order that relate
+ * to other CP15 registers or logical grouping requirements and can
+ * therefore occur at any point in this sequence.
+ */
+ bx lr
+ENDPROC(save_cp15)
+
+ENTRY(restore_cp15)
+ /* CSSELR ?Cache Size Selection Register */
+ ldr r3,[r0], #4
+ mcr p15,2,r3,c0,c0,0
+ bx lr
+ENDPROC(restore_cp15)
+
+
+ /* Function called with two arguments:
+ * r0 contains address to store control registers
+ * r1 is non-zero if we are Secure
+ */
+ENTRY(save_control_registers)
+ cmp r1, #0 /* Are we Secure? */
+ mrc p15,0,r2,c1,c0,1 /* ACTLR - Auxiliary Control Register */
+ mrc p15,0,r3,c1,c0,0 /* SCTLR - System Control Register */
+ mrc p15,0,r12,c1,c0,2 /* CPACR - Coprocessor Access Control Register */
+ stm r0!, {r2-r3, r12}
+ mrcne p15,0,r1,c12,c0,1 /* MVBAR - Monitor Vector Base Address Register */
+ mrcne p15,0,r2,c1,c1,0 /* Secure Configuration Register */
+ mrcne p15,0,r3,c1,c1,1 /* Secure Debug Enable Register */
+ mrcne p15,0,r12,c1,c1,2 /* Non-Secure Access Control Register */
+ stmne r0!, {r1-r3,r12}
+#if 0
+ mrc p14,6,r1,c0,c0,0 /* TEECR */
+ mrc p14,6,r2,c1,c0,0 /* TEEHBR */
+ mrc p14,7,r3,c1,c0,0 /* JOSCR */
+ mrc p14,7,r12,c2,c0,0 /* JMCR */
+ stm r0!, {r1-r3,r12}
+#else
+ mrc p15,0,r1,c5,c0,0 /* ifsr */
+ mrc p15,0,r2,c6,c0,0 /* dfar */
+ mrc p15,0,r3,c6,c0,2 /* ifar */
+ mrc p15,0,r12,c5,c1,0 /* adfsr */
+ stm r0!, {r1-r3, r12}
+
+ mrc p15,0,r1,c5,c1,1 /* aifsr */
+ mrc p15,1,r12,c15,c0,0 /* l2actrl */
+ stm r0!, {r1,r12}
+#endif
+
+ bx lr
+ENDPROC(save_control_registers)
+
+
+ /* Function called with two arguments:
+ * r0 contains address to read control registers
+ * r1 is non-zero if we are Secure
+ */
+ENTRY(restore_control_registers)
+ cmp r1, #0 /* Are we Secure? */
+ ldm r0!, {r2-r3, r12}
+ mcr p15,0,r2,c1,c0,1 /* ACTLR - Auxiliary Control Register */
+ mcr p15,0,r3,c1,c0,0 /* SCTLR - System Control Register */
+ mcr p15,0,r12,c1,c0,2 /* CPACR - Coprocessor Access Control Register */
+ ldmne r0!, {r1-r3,r12}
+ mcrne p15,0,r1,c12,c0,1 /* MVBAR - Monitor Vector Base Address Register */
+ mcrne p15,0,r2,c1,c1,0 /* Secure Configuration Register */
+ mcrne p15,0,r3,c1,c1,1 /* Secure Debug Enable Register */
+ mcrne p15,0,r12,c1,c1,2 /* Non-Secure Access Control Register */
+ #if 0
+ ldm r0!, {r1-r3,r12}
+ mcr p14,6,r1,c0,c0,0 /* TEECR */
+ mcr p14,6,r2,c1,c0,0 /* TEEHBR */
+ mcr p14,7,r3,c1,c0,0 /* JOSCR */
+ mcr p14,7,r12,c2,c0,0 /* JMCR */
+ #else
+ ldm r0!, {r1-r3, r12}
+ mcr p15,0,r1,c5,c0,0 /* ifsr */
+ mcr p15,0,r2,c6,c0,0 /* dfar */
+ mcr p15,0,r3,c6,c0,2 /* ifar */
+ mcr p15,0,r12,c5,c1,0 /* adfsr */
+
+ ldm r0!, {r1, r12}
+ mcr p15,0,r1,c5,c1,1 /* aifsr */
+ mcr p15,1,r12,c15,c0,0 /* l2actrl */
+ #endif
+ isb
+ bx lr
+ENDPROC(restore_control_registers)
+
+ENTRY(save_mmu)
+ stmfd sp!, {r4, r5, r6, r7}
+ /* ASSUMPTION: no useful fault address / fault status information*/
+
+ mrc p15,0,r4,c12,c0,0 /* VBAR */
+ mrc p15,0,r5,c2,c0,2 /* TTBCR */
+
+ tst r5, #TTBCR_EAE /* Are we using LPAE? */
+
+ /* save 32 or 64 bit TTBRs */
+ mrceq p15,0,r6,c2,c0,0 /* 32 bit TTBR0 */
+ mrceq p15,0,r7,c2,c0,1 /* 32 bit TTBR1 */
+ #if 0
+ mrrcne p15,0,r6,r7,c2 /* 64 bit TTBR0 */
+ #endif
+ stm r0!, {r4-r7}
+ #if 0
+ mrrcne p15,1,r6,r7,c2 /* 64 bit TTBR1 */
+ stmne r0!, {r6-r7}
+ #endif
+ mrc p15,0,r4,c3,c0,0 /* DACR */
+ mrc p15,0,r5,c7,c4,0 /* PAR */
+ mrc p15,0,r6,c10,c2,0 /* PRRR/mair0*/
+ mrc p15,0,r7,c10,c2,1 /* NMRR/mair1 */
+ stm r0!, {r4-r7}
+
+ /* TODO: IMPLEMENTATION DEFINED - TCM, lockdown and performance monitor support
+ * CP15 registers 9 and 11
+ */
+
+ mrc p15,0,r4,c13,c0,1 /* CONTEXTIDR */
+ mrc p15,0,r5,c13,c0,2 /* TPIDRURW */
+ mrc p15,0,r6,c13,c0,3 /* TPIDRURO */
+ mrc p15,0,r7,c13,c0,4 /* TPIDRPRW */
+ stm r0!, {r4-r7}
+
+ mrc p15,0,r4,c10,c3,0 /* amair */
+ mrc p15,0,r5,c10,c3,1 /* amair */
+ mrc p15,0,r6,c2,c0,2 /* ttbcr */
+ stm r0!, {r4-r6}
+
+ ldmfd sp!, {r4, r5, r6, r7}
+ bx lr
+ENDPROC(save_mmu)
+
+ENTRY(restore_mmu)
+
+ stmfd sp!, {r4, r5, r6, r7}
+ ldm r0!, {r4-r7}
+ mcr p15,0,r4,c12,c0,0 /* VBAR */
+ mcr p15,0,r5,c2,c0,2 /* TTBCR */
+
+ tst r5, #TTBCR_EAE /* Are we using LPAE? */
+
+ /* restore 32 or 64 bit TTBRs */
+ mcreq p15,0,r6,c2,c0,0 /* 32 bit TTBR0 */
+ mcreq p15,0,r7,c2,c0,1 /* 32 bit TTBR1 */
+ #if 0
+ mcrrne p15,0,r6,r7,c2 /* 64-bit TTBR0 */
+ ldmne r0!, {r6-r7}
+ mcrrne p15,1,r6,r7,c2 /* 64-bit TTBR1 */
+ #endif
+ ldm r0!, {r4-r7}
+ mcr p15,0,r4,c3,c0,0 /* DACR */
+ mcr p15,0,r5,c7,c4,0 /* PAR */
+ mcr p15,0,r6,c10,c2,0 /* PRRR */
+ mcr p15,0,r7,c10,c2,1 /* NMRR */
+
+ /* TODO: IMPLEMENTATION DEFINED - TCM, lockdown and performance monitor support
+ * CP15 registers 9 and 11
+ */
+
+ ldm r0!, {r4-r7}
+ mcr p15,0,r4,c13,c0,1 /* CONTEXTIDR */
+ mcr p15,0,r5,c13,c0,2 /* TPIDRURW */
+ mcr p15,0,r6,c13,c0,3 /* TPIDRURO */
+ mcr p15,0,r7,c13,c0,4 /* TPIDRPRW */
+
+ ldm r0!, {r4-r6}
+ mcr p15,0,r4,c10,c3,0 /* amair */
+ mcr p15,0,r5,c10,c3,1 /* amair */
+ mcr p15,0,r6,c2,c0,2 /* ttbcr */
+
+ ldmfd sp!, {r4, r5, r6, r7}
+
+ bx lr
+ENDPROC(restore_mmu)
+
+#if 0
+ENTRY(save_vfp)
+ /* FPU state save/restore.
+ * FPSID,MVFR0 and MVFR1 don't get serialized/saved (Read Only).
+ */
+ mrc p15,0,r3,c1,c0,2 /* CPACR allows CP10 and CP11 access */
+ ORR r2,r3,#0xF00000
+ mcr p15,0,r2,c1,c0,2
+ isb
+ mrc p15,0,r2,c1,c0,2
+ and r2,r2,#0xF00000
+ cmp r2,#0xF00000
+ beq f0
+ movs r2, #0
+ b f2
+
+0:
+ /* Save configuration registers and enable. */
+ vmrs r12,FPEXC
+ str r12,[r0],#4 /* Save the FPEXC */
+ /* Enable FPU access to save/restore the other registers. */
+ ldr r2,=0x40000000
+ vmsr FPEXC,r2
+ vmrs r2,FPSCR
+ str r2,[r0],#4 /* Save the FPSCR */
+ /* Store the VFP-D16 registers. */
+ vstm r0!, {D0-D15}
+ /* Check for Advanced SIMD/VFP-D32 support */
+ vmrs r2,MVFR0
+ and r2,r2,#0xF /* extract the A_SIMD bitfield */
+ cmp r2, #0x2
+ blt f1
+ /* Store the Advanced SIMD/VFP-D32 additional registers. */
+ vstm r0!, {D16-D31}
+
+ /* IMPLEMENTATION DEFINED: save any subarchitecture defined state
+ * NOTE: Don't change the order of the FPEXC and CPACR restores
+ */
+1:
+ vmsr FPEXC,r12 /* Restore the original En bit of FPU. */
+2:
+ mcr p15,0,r3,c1,c0,2 /* Restore the original CPACR value. */
+ bx lr
+ENDPROC(save_vfp)
+
+
+restore_vfp FUNCTION
+ /* FPU state save/restore. Obviously FPSID,MVFR0 and MVFR1 don't get
+ * serialized (RO).
+ * Modify CPACR to allow CP10 and CP11 access
+ */
+ mrc p15,0,r1,c1,c0,2
+ ORR r2,r1,#0x00F00000
+ mcr p15,0,r2,c1,c0,2
+ /* Enable FPU access to save/restore the rest of registers. */
+ ldr r2,=0x40000000
+ vmsr FPEXC, r2
+ /* Recover FPEXC and FPSCR. These will be restored later. */
+ ldm r0!,{r3,r12}
+ /* Restore the VFP-D16 registers. */
+ vldm r0!, {D0-D15}
+ /* Check for Advanced SIMD/VFP-D32 support */
+ vmrs r2, MVFR0
+ and r2,r2,#0xF /* extract the A_SIMD bitfield */
+ cmp r2, #0x2
+ blt f0
+
+ /* Store the Advanced SIMD/VFP-D32 additional registers. */
+ vldm r0!, {D16-D31}
+
+ /* IMPLEMENTATION DEFINED: restore any subarchitecture defined state */
+
+0 /* Restore configuration registers and enable.
+ * Restore FPSCR _before_ FPEXC since FPEXC could disable FPU
+ * and make setting FPSCR unpredictable.
+ */
+ vmsr FPSCR,r12
+ vmsr FPEXC,r3 /* Restore FPEXC after FPSCR */
+ /* Restore CPACR */
+ mcr p15,0,r1,c1,c0,2
+ bx lr
+ ENDFUNC
+#endif
+
+ENTRY(save_vfp)
+#if 0 //zxp
+
+ /* FPU state save/restore. */
+ /* FPSID,MVFR0 and MVFR1 don't get serialized/saved (Read Only). */
+ mrc p15,0,r3,c1,c0,2 /* CPACR allows CP10 and CP11 access */
+ ORR r2,r3,#0xF00000
+ mcr p15,0,r2,c1,c0,2
+ isb
+ mrc p15,0,r2,c1,c0,2
+ and r2,r2,#0xF00000
+ cmp r2,#0xF00000
+ beq 0f
+ movs r2, #0
+ b 2f
+
+ /* Save configuration registers and enable. */
+0:
+ FMRX r12,FPEXC /* vmrs r12,FPEXC */
+ str r12,[r0],#4 /* Save the FPEXC */
+ /* Enable FPU access to save/restore the other registers. */
+ ldr r2,=0x40000000
+ FMXR FPEXC,r2 /* vmsr FPEXC,r2 */
+ FMRX r2,FPSCR /* vmrs r2,FPSCR */
+ str r2,[r0],#4 /* Save the FPSCR */
+ /* Store the VFP-D16 registers. */
+ vstm r0!, {D0-D15}
+ /* Check for Advanced SIMD/VFP-D32 support */
+ FMRX r2,MVFR0 /* vmrs r2,MVFR0 */
+ and r2,r2,#0xF /* extract the A_SIMD bitfield */
+ cmp r2, #0x2
+ blt 1f
+ /* Store the Advanced SIMD/VFP-D32 additional registers. */
+ vstm r0!, {D16-D31}
+
+ /* IMPLEMENTATION DEFINED: save any subarchitecture defined state */
+ /* NOTE: Don't change the order of the FPEXC and CPACR restores */
+
+ /* Restore the original En bit of FPU. */
+1:
+ FMXR FPEXC,r12 /* vmsr FPEXC,r12 */
+
+ /* Restore the original CPACR value. */
+2:
+ mcr p15,0,r3,c1,c0,2
+#endif
+ bx lr
+ENDPROC(save_vfp)
+
+
+ENTRY(restore_vfp)
+ /* FPU state save/restore. Obviously FPSID,MVFR0 and MVFR1 don't get
+ * serialized (RO).
+ * Modify CPACR to allow CP10 and CP11 access
+ */
+#if 0 //zxp
+ mrc p15,0,r1,c1,c0,2
+ ORR r2,r1,#0x00F00000
+ mcr p15,0,r2,c1,c0,2
+ /* Enable FPU access to save/restore the rest of registers. */
+ ldr r2,=0x40000000
+ FMXR FPEXC, r2 /* vmsr FPEXC, r2 */
+ /* Recover FPEXC and FPSCR. These will be restored later. */
+ ldm r0!,{r3,r12}
+ /* Restore the VFP-D16 registers. */
+ vldm r0!, {D0-D15}
+ /* Check for Advanced SIMD/VFP-D32 support */
+ FMRX r2, MVFR0 /* vmrs r2, MVFR0 */
+ and r2,r2,#0xF /* extract the A_SIMD bitfield */
+ cmp r2, #0x2
+ blt 0f
+
+ /* Store the Advanced SIMD/VFP-D32 additional registers. */
+ vldm r0!, {D16-D31}
+
+ /* IMPLEMENTATION DEFINED: restore any subarchitecture defined state */
+0:
+ /* Restore configuration registers and enable.
+ * Restore FPSCR _before_ FPEXC since FPEXC could disable FPU
+ * and make setting FPSCR unpredictable.
+ */
+ FMXR FPSCR,r12 /* vmsr FPSCR,r12 */
+ /* Restore FPEXC after FPSCR */
+ FMXR FPEXC,r3 /* vmsr FPEXC,r3 */
+ /* Restore CPACR */
+ /* will restore in mt_restore_control_registers */
+ /* mcr p15,0,r1,c1,c0,2 */
+#endif
+ bx lr
+ENDPROC(restore_vfp)
+
+
+ /* We assume that the OS is not using the Virtualization extensions,
+ * and that the warm boot code will set up CNTHCTL correctly.
+ * CNTP_CVAL will be preserved as it is in the always-on domain.
+ */
+#if 0
+ENTRY(save_generic_timer)
+ mrc p15,0,r2,c14,c2,1 /* read CNTP_CTL */
+ mrc p15,0,r3,c14,c2,0 /* read CNTP_TVAL */
+ mrc p15,0,r12,c14,c1,0 /* read CNTKCTL */
+ stm r0!, {r2, r3, r12}
+ bx lr
+ENDPROC(save_generic_timer)
+
+
+ENTRY(restore_generic_timer)
+ ldm r0!, {r2, r3, r12}
+ mcr p15,0,r3,c14,c2,0 /* write CNTP_TVAL */
+ mcr p15,0,r12,c14,c1,0 /* write CNTKCTL */
+ mcr p15,0,r2,c14,c2,1 /* write CNTP_CTL */
+ bx lr
+ENDPROC(restore_generic_timer)
+#endif
+
+
+ /* This function disables L1 data caching, then cleans and invalidates
+ the whole L1 data cache.
+ */
+
+ENTRY(disable_clean_inv_dcache_v7_l1)
+ stmfd sp!, {r4, lr}
+
+ /* Disable L1 cache */
+ dsb
+ mrc p15,0,r3,c1,c0,0
+ bic r3, #4 /* Clear C bit */
+ mcr p15,0,r3,c1,c0,0
+ dsb
+
+ /* No more Data cache allocations can happen at L1.
+ Until we finish cleaning the Inner cache, any accesses to dirty data
+ (e.g. by translation table walks) may get the wrong (Outer) data, so
+ we have to be sure everything that might be accessed is clean.
+ We already know that the translation tables are clean (see late_init).
+ */
+
+ mov r0, #0 /* Select L1 Data/Unified cache */
+ mcr p15,2,r0,c0,c0,0
+ mrc p15,1,r0,c0,c0,0 /* Read size */
+ ubfx r3, r0, #13, #15 /* sets - 1 */
+ add r3, r3, #1 /* sets */
+ ubfx r4, r0, #0, #3 /* log2(words per line) - 2 */
+ add r4, r4, #4 /* set shift = log2(bytes per line) */
+ ubfx r2, r0, #3, #10 /* ways - 1 */
+ clz r12, r2 /* way shift */
+ add r2, r2, #1 /* ways */
+
+ /* r2,r3 inner, outer loop targets, r1 inner loop counter, r0 zero */
+5:
+ cmp r3, #0
+ beq 20f
+ sub r3, r3, #1
+ mov r1, r2
+
+10:
+ cmp r1, #0
+ beq 5b
+ sub r1, r1, #1
+ mov r0, r1, lsl r12 /* Fill in Way field */
+ orr r0, r0, r3, lsl r4 /* Fill in Set field */
+ mcr p15,0,r0,c7,c14,2 /* DCCISW */
+ b 10b
+
+20:
+ dsb
+ ldmfd sp!, {r4, lr}
+ bx lr
+ENDPROC(disable_clean_inv_dcache_v7_l1)
+
+
+ENTRY(invalidate_icache_v7_pou)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 /* ICIALLU */
+ bx lr
+ENDPROC(invalidate_icache_v7_pou)
+
+
+ENTRY(invalidate_icache_v7)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 /* ICIALLU */
+ bx lr
+ENDPROC(invalidate_icache_v7)
+
+
+ENTRY(enable_icache_v7)
+ mrc p15, 0, r0, c1, c0, 0 //enable Icache
+ movw r1, #SCTLR_I
+ orr r0, r0, r1
+ mcr p15, 0, r0, c1, c0, 0
+ENDPROC(enable_icache_v7)
+
+
+ENTRY(invalidate_dcache_v7_all)
+ /* Must iterate over the caches in order to synthesise a complete invalidation
+ of data/unified cache
+ */
+//zxp stmfd sp!, {r4-r11} /* zxp delete for no sp to use */
+ mrc p15, 1, r0, c0, c0, 1 /* read clidr */
+ ands r3, r0, #0x7000000 /* extract loc from clidr */
+ mov r3, r3, lsr #23 /* left align loc bit field */
+ beq finished /* if loc is 0, then no need to clean */
+ mov r10, #0 /* start clean at cache level 0 (in r10) */
+loop1:
+ add r2, r10, r10, lsr #1 /* work out 3x current cache level */
+ mov r12, r0, lsr r2 /* extract cache type bits from clidr */
+ and r12, r12, #7 /* mask of bits for current cache only */
+ cmp r12, #2 /* see what cache we have at this level */
+ blt skip /* skip if no cache, or just i-cache */
+ mcr p15, 2, r10, c0, c0, 0 /* select current cache level in cssr */
+ mov r12, #0
+ mcr p15, 0, r12, c7, c5, 4 /* prefetchflush to sync new cssr&csidr */
+ mrc p15, 1, r12, c0, c0, 0 /* read the new csidr */
+ and r2, r12, #7 /* extract the length of the cache lines */
+ add r2, r2, #4 /* add 4 (line length offset) */
+ ldr r6, =0x3ff
+ ands r6, r6, r12, lsr #3 /* find maximum number on the way size */
+ clz r5, r6 /* find bit pos of way size increment */
+ ldr r7, =0x7fff
+ ands r7, r7, r12, lsr #13 /* extract max number of the index size */
+loop2:
+ mov r8, r6 /* create working copy of max way size */
+loop3:
+ orr r11, r10, r8, lsl r5 /* factor way and cache number into r11 */
+ orr r11, r11, r7, lsl r2 /* factor index number into r11 */
+ mcr p15, 0, r11, c7, c6, 2 /* invalidate by set/way */
+ subs r8, r8, #1 /* decrement the way */
+ bge loop3
+ subs r7, r7, #1 /* decrement the index */
+ bge loop2
+skip:
+ add r10, r10, #2 /* increment cache number */
+ cmp r3, r10
+ bgt loop1
+finished:
+ mov r10, #0
+
+ mcr p15, 0, r10, c7, c10, 4 /* drain write buffer */
+ mcr p15, 0, r10, c8, c7, 0 /* invalidate I + D TLBs */
+ mcr p15, 0, r10, c2, c0, 2 /* TTB control register */
+//zxp ldmfd sp!, {r4-r11}
+ bx lr
+ENDPROC(invalidate_dcache_v7_all)
+
+
+ENTRY(disable_clean_inv_dcache_v7_all)
+ /* Must iterate over the caches in order to synthesise a complete clean
+ of data/unified cache */
+ stmfd sp!, {r4-r11}
+
+ /* Disable integrated data/unified cache */
+ dsb
+ mrc p15, 0, r3, c1, c0, 0
+ bic r3, #4 /* Clear C bit */
+ mcr p15, 0, r3, c1, c0, 0
+ isb
+
+ /* No more Data cache allocations can happen.
+ Until we finish cleaning the cache, any accesses to dirty data
+ (e.g. by translation table walks) may get the wrong (Outer) data, so
+ we have to be sure everything that might be accessed is clean.
+ We already know that the translation tables are clean (see late_init).
+ */
+
+
+ mrc p15, 1, r0, c0, c0, 1 /* read clidr */
+ ands r3, r0, #0x7000000 /* extract loc from clidr */
+ mov r3, r3, lsr #23 /* left align loc bit field */
+ beq 50f /* if loc is 0, then no need to clean */
+ mov r10, #0 /* start clean at cache level 0 (in r10) */
+10:
+ add r2, r10, r10, lsr #1 /* work out 3x current cache level */
+ mov r12, r0, lsr r2 /* extract cache type bits from clidr */
+ and r12, r12, #7 /* mask of bits for current cache only */
+ cmp r12, #2 /* see what cache we have at this level */
+ blt 40f /* skip if no cache, or just i-cache */
+ mcr p15, 2, r10, c0, c0, 0 /* select current cache level in cssr */
+ mov r12, #0
+ mcr p15, 0, r12, c7, c5, 4 /* prefetchflush to sync new cssr&csidr */
+ mrc p15, 1, r12, c0, c0, 0 /* read the new csidr */
+ and r2, r12, #7 /* extract the length of the cache lines */
+ add r2, r2, #4 /* add 4 (line length offset) */
+ ldr r6, =0x3ff
+ ands r6, r6, r12, lsr #3 /* find maximum number on the way size */
+ clz r5, r6 /* find bit pos of way size increment */
+ ldr r7, =0x7fff
+ ands r7, r7, r12, lsr #13 /* extract max number of the index size */
+20:
+ mov r8, r6 /* create working copy of max way size */
+30:
+ orr r11, r10, r8, lsl r5 /* factor way and cache number into r11 */
+ orr r11, r11, r7, lsl r2 /* factor index number into r11 */
+ mcr p15, 0, r11, c7, c14, 2 /* clean & invalidate by set/way */
+ subs r8, r8, #1 /* decrement the way */
+ bge 30b
+ subs r7, r7, #1 /* decrement the index */
+ bge 20b
+40:
+ add r10, r10, #2 /* increment cache number */
+ cmp r3, r10
+ bgt 10b
+50:
+ mov r10, #0
+ mcr p15, 0, r10, c7, c10, 4 /* drain write buffer */
+ ldmfd sp!, {r4-r11}
+ bx lr
+ENDPROC(disable_clean_inv_dcache_v7_all)
+
+
+ /* This function cleans the whole L1 data cache */
+ENTRY(clean_dcache_v7_l1)
+ stmfd sp!, {r4, lr}
+
+ mov r0, #0 /* Select L1 Data/Unified cache */
+ mcr p15,2,r0,c0,c0,0
+ mrc p15,1,r0,c0,c0,0 /* Read size (CCSIDR) */
+ ubfx r3, r0, #13, #15 /* sets - 1 */
+ add r3, r3, #1 /* sets */
+ ubfx r4, r0, #0, #3 /* log2(words per line) - 2 */
+ add r4, r4, #4 /* set shift = log2(bytes per line) */
+ ubfx r2, r0, #3, #10 /* ways - 1 */
+ clz r12, r2 /* way shift */
+ add r2, r2, #1 /* ways */
+
+ /* r2,r3 inner, outer loop targets, r1 inner loop counter, r0 zero */
+0:
+ cmp r3, #0
+ beq 20f
+ sub r3, r3, #1
+ mov r1, r2
+
+10:
+ cmp r1, #0
+ beq 0b
+ sub r1, r1, #1
+ mov r0, r1, lsl r12 /* Fill in Way field */
+ orr r0, r0, r3, lsl r4 /* Fill in Set field */
+ mcr p15,0,r0,c7,c10,2 /* DCCSW */
+ b 10b
+
+20:
+ dsb
+ pop {r4, lr}
+ bx lr
+ENDPROC(clean_dcache_v7_l1)
+
+#if 0 //zxp
+ /* This function cleans a single line from the L1 dcache */
+clean_mva_dcache_v7_l1
+ mcr p15,0,r0,c7,c10,1 /* DCCMVAC */
+ bx lr
+
+enter_secure_monitor_mode FUNCTION
+ mov r0, lr
+ mov r1, sp
+ smc #0
+appf_smc_handler
+ /* We are now in Monitor mode, make sure we're Secure */
+ mrc p15, 0, r12, c1, c1, 0
+ bic r12, #SCR_NS
+ mcr p15, 0, r12, c1, c1, 0
+ /* Restore sp and return - stack must be uncached or in NS memory! */
+ mov sp, r1
+ bx r0
+ ENDFUNC
+
+enter_nonsecure_svc_mode FUNCTION
+ /* Copy the Monitor mode sp and lr values */
+ mov r2, lr
+ mov r3, sp
+ mrc p15, 0, r1, c1, c1, 0
+ orr r1, #SCR_NS
+ mcr p15, 0, r1, c1, c1, 0
+ adr lr, non_secure
+ movs pc, lr
+non_secure
+ /* We are now in non-secure state */
+ /* Restore sp and return */
+ mov sp, r3
+ bx r2
+ ENDFUNC
+#endif
+
+ENTRY(save_a53_other)
+#if 0//A53ûÓÐ
+ mrc p15,0,r12,c15,c0,0 /* Read Power Control Register */
+ str r12, [r0], #4
+ mrc p15, 4, r12, c15, c0, 0 /* Read Configuration Base Address Register */
+ str r12, [r0], #4
+
+ mrc p15,0,r3,c0,c0,0 /* Read Main ID Register */
+ ubfx r3, r3, #20, #4 /* Extract major version number */
+ cmp r3, #2
+ blt 1f /* PLE only possible in r2p0 onwards */
+ mrc p15,0,r3,c11,c0,0 /* Read PLE IDR */
+ cmp r3, #0
+ beq 1f /* No PLE present */
+
+ mrc p15,0,r3,c11,c1,0 /* Read PLE UAR */
+ mrc p15,0,r12,c11,c1,1 /* Read PLE PCR */
+ stm r0!, {r3, r12}
+
+1:
+ bx lr
+#else
+
+#endif
+ENDPROC(save_a53_other)
+
+
+ENTRY(restore_a53_other)
+#if 0
+ cmp r1, #0 /* Check we are secure */
+ ldr r12, [r0], #4
+ andne r12, r12, #0x01 /* We only restore the Dynamic Clock gating bit */
+ mcrne p15,0,r12,c15,c0,0 /* Write Power Control Register (if secure) */
+ ldr r12, [r0], #4
+ mcrne p15, 4, r12, c15, c0, 0 /* Write Configuration Base Address Register (if Secure) */
+
+ mrc p15,0,r3,c0,c0,0 /* Read Main ID Register */
+ ubfx r3, r3, #20, #4 /* Extract major version number */
+ cmp r3, #2
+ blt 1f /* PLE only possible in r2p0 onwards */
+ mrc p15,0,r3,c11,c0,0 /* Read PLE IDR */
+ cmp r3, #0
+ beq 1f /* No PLE present */
+
+ ldm r0!, {r3, r12}
+ mcr p15,0,r3,c11,c1,0 /* Write PLE UAR */
+ mcr p15,0,r12,c11,c1,1 /* Write PLE PCR */
+
+1:
+ bx lr
+#else
+#endif
+ENDPROC(restore_a53_other)
+
+
+.equ C1_IBIT , 0x00001000
+.equ C1_CBIT , 0x00000004
+ENTRY(disable_flush_dcache_L1_flush_cache_L2)
+/******************************************************************************* *
+push stack push {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
+* ******************************************************************************/
+
+ stmfd sp!, {r4,r5,r6,r7,r8, r9, r10,r11,lr}
+/*******************************************************************************
+*__disable_dcache
+* ******************************************************************************/
+ MRC p15,0,r0,c1,c0,0
+ BIC r0,r0,#C1_CBIT
+ dsb
+ MCR p15,0,r0,c1,c0,0
+ dsb
+ isb
+/*Erratum:794322,An instruction fetch can be allocated into the L2 cache after the cache is disabled Status
+This erratum can be avoided by inserting both of the following after the SCTLR.C bit is cleared to 0, and before the caches are cleaned or invalidated:
+1) A TLBIMVA operation to any address.
+2) A DSB instruction.*/
+ MCR p15,0,r0,c8,c7,1
+ dsb
+ isb
+/*******************************************************************************
+* __inner_flush_dcache_L1
+* ******************************************************************************/
+ dmb /*@ ensure ordering with previous memory accesses */
+ mrc p15, 1, r0, c0, c0, 1 /* @ read clidr */
+ ands r3, r0, #0x7000000 /* @ extract loc from clidr */
+ mov r3, r3, lsr #23 /* @ left align loc bit field */
+ beq DF1F2_L1_finished /* @ if loc is 0, then no need to clean */
+ mov r10, #0 /* @ start clean at cache level 1 */
+DF1F2_L1_loop1:
+ add r2, r10, r10, lsr #1 /* @ work out 3x current cache level */
+ mov r1, r0, lsr r2 /* @ extract cache type bits from clidr */
+ and r1, r1, #7 /* @ mask of the bits for current cache only */
+ cmp r1, #2 /* @ see what cache we have at this level */
+ blt DF1F2_L1_skip /* @ skip if no cache, or just i-cache */
+ mcr p15, 2, r10, c0, c0, 0 /* @ select current cache level in cssr */
+ isb /* @ isb to sych the new cssr&csidr */
+ mrc p15, 1, r1, c0, c0, 0 /* @ read the new csidr */
+ and r2, r1, #7 /* @ extract the length of the cache lines */
+ add r2, r2, #4 /* @ add 4 (line length offset) */
+ ldr r4, =0x3ff
+ ands r4, r4, r1, lsr #3 /* @ find maximum number on the way size */
+ clz r5, r4 /* @ find bit position of way size increment */
+ ldr r7, =0x7fff
+ ands r7, r7, r1, lsr #13 /* @ extract max number of the index size */
+DF1F2_L1_loop2:
+ mov r9, r4 /* @ create working copy of max way size */
+DF1F2_L1_loop3:
+ orr r11, r10, r9, lsl r5 /* @ factor way and cache number into r11 */
+ orr r11, r11, r7, lsl r2 /* @ factor index number into r11*/
+#if 1
+ mcr p15, 0, r11, c7, c10, 2 /* @ clean by set/way */
+ mcr p15, 0, r11, c7, c6, 2 /* @ invalidate by set/way*/
+#endif
+#if 0
+ mcr p15, 0, r11, c7, c14, 2 /* @ clean & invalidate by set/way*/
+#endif
+ subs r9, r9, #1 /* @ decrement the way */
+ bge DF1F2_L1_loop3
+ subs r7, r7, #1 /* @ decrement the index */
+ bge DF1F2_L1_loop2
+DF1F2_L1_skip:
+ /* @add r10, r10, #2 */ /* @ increment cache number */
+ /*@cmp r3, r10
+ @bgt DF1F2_L1_loop1*/
+DF1F2_L1_finished:
+ mov r10, #0 /* @ swith back to cache level 0 */
+ mcr p15, 2, r10, c0, c0, 0 /* @ select current cache level in cssr */
+ dsb
+ isb
+/******************************************************************************* *
+clrex
+* ******************************************************************************/
+ clrex
+/*******************************************************************************
+* __inner_flush_dcache_L2
+* ******************************************************************************/
+ dmb /* @ ensure ordering with previous memory accesses */
+ mrc p15, 1, r0, c0, c0, 1 /* @ read clidr */
+ ands r3, r0, #0x7000000 /* @ extract loc from clidr */
+ mov r3, r3, lsr #23 /* @ left align loc bit field */
+ beq DF1F2_L2_finished /* @ if loc is 0, then no need to clean */
+ mov r10, #2 /* @ start clean at cache level 2*/
+DF1F2_L2_loop1:
+ add r2, r10, r10, lsr #1 /* @ work out 3x current cache level */
+ mov r1, r0, lsr r2 /* @ extract cache type bits from clidr */
+ and r1, r1, #7 /* @ mask of the bits for current cache only */
+ cmp r1, #2 /* @ see what cache we have at this level */
+ blt DF1F2_L2_skip /* @ skip if no cache, or just i-cache */
+ mcr p15, 2, r10, c0, c0, 0 /* @ select current cache level in cssr */
+ isb /* @ isb to sych the new cssr&csidr */
+ mrc p15, 1, r1, c0, c0, 0 /* @ read the new csidr */
+ and r2, r1, #7 /* @ extract the length of the cache lines */
+ add r2, r2, #4 /* @ add 4 (line length offset) */
+ ldr r4, =0x3ff
+ ands r4, r4, r1, lsr #3 /* @ find maximum number on the way size */
+ clz r5, r4 /* @ find bit position of way size increment */
+ ldr r7, =0x7fff
+ ands r7, r7, r1, lsr #13 /* @ extract max number of the index size*/
+DF1F2_L2_loop2:
+ mov r9, r4 /* @ create working copy of max way size*/
+DF1F2_L2_loop3:
+ orr r11, r10, r9, lsl r5 /* @ factor way and cache number into r11 */
+ orr r11, r11, r7, lsl r2 /* @ factor index number into r11 */
+ mcr p15, 0, r11, c7, c14, 2 /* @ clean & invalidate by set/way */
+ subs r9, r9, #1 /* @ decrement the way */
+ bge DF1F2_L2_loop3
+ subs r7, r7, #1 /* @ decrement the index */
+ bge DF1F2_L2_loop2
+DF1F2_L2_skip:
+ /*@add r10, r10, #2 @ increment cache number */
+ /*@cmp r3, r10 */
+ /*@bgt DF1F2_L2_loop1 */
+DF1F2_L2_finished:
+ mov r10, #0 /* @ swith back to cache level 0 */
+ mcr p15, 2, r10, c0, c0, 0 /* @ select current cache level in cssr */
+ dsb
+ isb
+/*******************************************************************************
+* pop stack pop {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
+* ******************************************************************************/
+ ldmfd sp!, {r4, r5,r6,r7,r8, r9, r10,r11,lr}
+ bx lr
+ENDPROC(disable_flush_dcache_L1_flush_cache_L2)
+
+#if 0
+.equ HAL_CPSR_SVC_MODE, 0x13
+.equ HAL_CPSR_MON_MODE, 0x16
+ENTRY(tse_monitor_init)
+ldr r0, =monitor_exception_vectors // Get address of Monitor's vector table
+ mcr p15, 0, r0, c12, c0, 1 // Write Monitor Vector Base Address Register
+
+ cps #HAL_CPSR_MON_MODE // disabled irq and fiq when enter monitor mode
+ ldr sp, =mon_stack // init sp for monitor mode
+ cps #HAL_CPSR_SVC_MODE
+
+ bx lr
+ENDPROC(tse_monitor_init)
+#endif
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm.c b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm.c
new file mode 100755
index 0000000..a1b90ab
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm.c
@@ -0,0 +1,646 @@
+/*
+ * ZTE power management main driver
+ *
+ * Copyright (C) 2013 ZTE Ltd.
+ * by zxp
+ *
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/io.h>
+#include <linux/suspend.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/cpu.h>
+#include "zx-pm.h"
+#include <linux/timer.h>
+#include <linux/kthread.h> /*For kthread_run()*/
+#include <mach/spinlock.h>
+
+#ifdef CONFIG_ZX_PM_DEBUG
+
+static struct delayed_work pm_debug_work;
+struct kobject *pm_debug_kobj;
+static struct timer_list pm_debug_timer;
+
+#define PM_DEBUG_DELAY msecs_to_jiffies(10000) //10s
+
+#endif
+
+/*********************************************************************
+ * some common pm functions
+ ********************************************************************/
+#ifdef CONFIG_ZX_RAM_CONSOLE
+#define PM_LOG_SRAM_SIZE (4*1024)
+static char pm_sram_printk_buf[PM_LOG_SRAM_SIZE]; // loop buffer
+static u32 pm_sram_point = 0;
+static u32 pm_sram_inited = 0;
+//static char pm_sram_temp_buf[512] = {0};
+
+static void pm_sram_cpy(char *s, unsigned len)
+{
+ if(pm_sram_point + len >= PM_LOG_SRAM_SIZE)
+ pm_sram_point = 0;
+
+ memcpy(pm_sram_printk_buf+pm_sram_point, s, len);
+ pm_sram_point += len;
+}
+
+/* ------------------for idle print msg-------------------------*/
+
+#define PM_IDLE_SRAM_ITEM_SIZE (200)
+#define PM_IDLE_SRAM_ITEM_CNT (8)
+#define PM_IDLE_TOTAL_SRAM_SIZE (PM_IDLE_SRAM_ITEM_SIZE*PM_IDLE_SRAM_ITEM_CNT)
+
+typedef struct
+{
+ char buf[PM_IDLE_SRAM_ITEM_SIZE];
+}pm_idle_sram;
+
+static pm_idle_sram pm_idle_sram_buf[PM_IDLE_SRAM_ITEM_CNT];
+static unsigned int pm_idle_sram_state = 0;
+static unsigned int pm_idle_sram_cur_item = 0;
+
+static u32 last_uart_jiffies =0;
+struct timer_list timer_uart_print;
+
+
+#ifdef CONFIG_CPU_IDLE
+extern int zx_idle_get_debug_flag(void);
+extern int zx_idle_get_idle_flag(void); //ap idle flag
+#endif
+
+#ifndef CONFIG_CPU_IDLE
+typedef int (*pm_callback_fn)(void);
+int zx_pm_register_callback(pm_callback_fn enter_cb, pm_callback_fn exit_cb)
+{
+ return 0;
+}
+EXPORT_SYMBOL(zx_pm_register_callback);
+#endif
+
+/* for idle print msg */
+
+void pm_idle_sram_start(void)
+{
+ pm_idle_sram_cur_item = 0;
+ pm_idle_sram_state = 1;
+}
+
+void pm_idle_sram_end(void)
+{
+ pm_idle_sram_state = 0;
+}
+
+
+bool pm_idle_sram_is_permit(void)
+{
+#ifdef CONFIG_CPU_IDLE
+ if(pm_idle_sram_state && (pm_get_mask_info()&PM_SLEEP_FLAG_PRINT))//if(pm_idle_sram_state && (zx_idle_get_debug_flag()&1))
+ return true;
+ else
+ return false;
+#endif
+}
+
+static void pm_idle_sram_cpy(char *s, unsigned len)
+{
+ if(!pm_idle_sram_is_permit())
+ return;
+
+ if((pm_idle_sram_cur_item >= PM_IDLE_SRAM_ITEM_CNT) || (len >= PM_IDLE_SRAM_ITEM_SIZE))
+ {
+ BUG();
+ //return ;
+ }
+
+ memcpy(pm_idle_sram_buf[pm_idle_sram_cur_item].buf, s, len);
+ pm_idle_sram_buf[pm_idle_sram_cur_item].buf[len] = 0;
+ pm_idle_sram_cur_item ++;
+}
+
+ /**
+ * pm_uart_print
+ *
+ */
+
+ static void pm_update_uart_jiffies(void)
+ {
+ last_uart_jiffies=jiffies;
+ }
+
+static void pm_sleep_flag_print(void)
+{
+ u32 cur_jiffies = jiffies;
+ u32 work_jiffies = cur_jiffies - last_uart_jiffies;
+
+ if( (work_jiffies > 200) ) {
+
+ last_uart_jiffies = cur_jiffies;
+ printk(" jiffies:%u; ", cur_jiffies);
+ #ifndef CONFIG_SYSTEM_RECOVERY
+ #ifndef CONFIG_ARCH_ZX297520V3_CAP
+
+ #ifdef USE_CPPS_KO
+ if(cpps_callbacks.psm_GetModemSleepFlagStatus)
+ CPPS_FUNC(cpps_callbacks,psm_GetModemSleepFlagStatus)();
+ #else
+ psm_GetModemSleepFlagStatus();
+ #endif
+ #endif
+ #endif
+ }
+}
+
+static void pm_sleepflag_print_func(unsigned long een)
+{
+
+ if(pm_get_mask_info()&PM_SLEEP_FLAG_PRINT) {
+
+ soft_spin_lock_psm(UART_SFLOCK);
+
+ pm_sleep_flag_print();
+ mod_timer(&timer_uart_print ,jiffies + msecs_to_jiffies(1 * 1000));
+ soft_spin_unlock_psm(UART_SFLOCK);
+ }
+
+}
+
+static void pm_uart_createtimer(void)
+{
+
+ init_timer(&timer_uart_print);
+ timer_uart_print.function = pm_sleepflag_print_func;
+ timer_uart_print.expires = jiffies + msecs_to_jiffies(59 * 1000);
+ add_timer(&timer_uart_print);
+
+}
+
+ void pm_uart_mod_timer(void)
+ {
+ if(pm_get_mask_info()&PM_SLEEP_FLAG_PRINT) {
+ pm_update_uart_jiffies();
+ mod_timer(&timer_uart_print ,jiffies + msecs_to_jiffies(1 * 1000));
+ }
+ }
+
+ void pm_uart_del_timer(void)
+ {
+ if(pm_get_mask_info()&PM_SLEEP_FLAG_PRINT) {
+ del_timer(&timer_uart_print);
+ }
+ }
+/* ------------------for idle print msg-------------------------*/
+
+#endif
+
+void pm_psm_flag_print(u32 *sleepflag)
+{
+ #ifdef CONFIG_CPU_IDLE
+ #ifndef CONFIG_ARCH_ZX297520V3_CAP
+ printk("L1e w t: 0x%x 0x%x 0x%x; drv:0x%x; app:0x%x; plat:0x%x; gsm:0x%x ; idle_flag:0x%x;dma_used: 0x%x \n ",
+ sleepflag[0], sleepflag[1], sleepflag[2], sleepflag[3], sleepflag[4] , sleepflag[5], sleepflag[6], zx_idle_get_idle_flag(),pm_dma_used());
+ #endif
+ #endif
+}
+EXPORT_SYMBOL(pm_psm_flag_print);
+
+u32 print_cnt=0;
+
+void pm_idle_sram_print(void)
+{
+#ifdef CONFIG_ZX_RAM_CONSOLE
+ int i;
+
+ if((print_cnt++) % 100 == 0) {
+ for(i=0; i<pm_idle_sram_cur_item; i++)
+ {
+ printk("%s", pm_idle_sram_buf[i].buf);
+ }
+
+ pm_idle_sram_cur_item = 0;
+ }
+#endif
+}
+
+/**
+ * usage: like printk(...)
+ */
+void pm_printk(const char *fmt, ...)
+{
+#ifdef CONFIG_ZX_RAM_CONSOLE
+ va_list args;
+ unsigned long long t;
+ unsigned long nanosec_rem;
+ int tlen, len;
+ char pm_sram_temp_buf[512] = {0};
+
+ if(!pm_sram_inited)
+ return;
+
+ va_start(args, fmt);
+ preempt_disable();
+
+ /* add time stamp */
+ t = cpu_clock(read_cpuid());
+ nanosec_rem = do_div(t, 1000000000);
+ tlen = sprintf(pm_sram_temp_buf, ">%5lu.%06lu< ",
+ (unsigned long) t, nanosec_rem / 1000);
+
+ len = vsprintf(pm_sram_temp_buf+tlen, fmt, args);
+ len += tlen;
+
+ pm_sram_cpy(pm_sram_temp_buf, len);
+ pm_idle_sram_cpy(pm_sram_temp_buf, len);
+
+ preempt_enable();
+ va_end(args);
+#endif
+}
+
+void pm_sram_init(void)
+{
+#ifdef CONFIG_ZX_RAM_CONSOLE
+ pr_info("[SLP] Power/SRAM_INIT \n");
+
+ pm_sram_printk_buf[0] = 0;
+ pm_sram_point = 0;
+
+ pm_sram_inited = 1;
+#endif
+}
+
+/*********************************************************************
+ * some pm debug functions
+ *
+ * we use sysfs interface
+ ********************************************************************/
+#ifdef CONFIG_ZX_PM_DEBUG
+
+static void pm_debug_timer_expired(unsigned long data)
+{
+ mod_timer(&pm_debug_timer, jiffies + msecs_to_jiffies(6*1000));
+
+ pr_info("[SLP] pm timer !!!");
+}
+
+
+/*=============================================================================
+ *======== /sys/zte_pm/state =================================================
+ *=============================================================================
+ */
+
+/**
+ * put the string to the buf, and return the string length
+ */
+static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char *s = buf;
+
+ s += sprintf(s, "%s\n", "[SLP] no debug info now!");
+
+ return (s - buf);
+}
+
+/**
+ * the buf store the input string , n is the string length
+ * return the status
+ */
+static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ int error = 0;
+
+// zDrvRpMsg_CreateChannel(M0_ID, channel_1, 0x20);
+
+ return error;
+}
+
+zte_pm_attr(state);
+
+/*=============================================================================
+ *======== /sys/zte_pm/cpufreq =================================================
+ *=============================================================================
+ */
+
+/**
+ * put the string to the buf, and return the string length
+ */
+ extern void debug_cpu_freq_info(void);
+/* usage: "cat cpufreq" */
+static ssize_t cpufreq_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char *s = buf;
+
+// s += sprintf(s, "%s\n", "[SLP] cpufreq debug !");
+#ifdef CONFIG_CPU_FREQ
+ debug_cpu_freq_info();
+#endif
+
+ return (s - buf);
+}
+
+/**
+ * the buf store the input string , n is the string length
+ * return the status
+ */
+static ssize_t cpufreq_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ int error = 0;
+
+ return error ;
+}
+
+zte_pm_attr(cpufreq);
+
+
+/*=============================================================================
+ *======== /sys/zte_pm/debug_work ===========================================
+ *=============================================================================
+ */
+#if 0
+static unsigned test_item = 0;
+static void pm_test_switch_clock(void)
+{
+ printk("[SLP] pm_test_switch_clock: %d \n\r", test_item);
+
+ switch(test_item)
+ {
+ case 0: /* ufi 400M */
+ cpufreq_test(1, 0);
+
+ break;
+ case 1: /* ufi 800M */
+ cpufreq_test(0, 1);
+ break;
+ case 2: /* 624 624M */
+ cpufreq_test(1, 2);
+ break;
+ case 3: /* 624 156M */
+ cpufreq_test(2, 3);
+ break;
+ case 4: /* main 26M */
+ cpufreq_test(3, 4);
+ test_item = 0;
+ return;
+ break;
+ }
+
+ test_item ++;
+}
+#endif
+
+static void pm_debug_func(struct work_struct *work)
+{
+ printk("[SLP] runs in %s. \n\r", __func__);
+
+ pm_suspend(PM_SUSPEND_MEM);
+// pm_test_switch_clock();
+
+ schedule_delayed_work(&pm_debug_work, PM_DEBUG_DELAY);
+}
+
+static ssize_t pm_debug_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char *s = buf;
+
+ s += sprintf(s, "%s\n", "[SLP] pm debug !");
+
+ return (s - buf);
+}
+
+/* usage: "echo 1 > pm_debug" */
+static ssize_t pm_debug_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ int error = 0;
+ long temp;
+
+ if(strict_strtol(buf, 0, &temp))
+ error = -EINVAL;
+
+ if(temp == 1)
+ schedule_delayed_work(&pm_debug_work, PM_DEBUG_DELAY);
+ else
+ cancel_delayed_work(&pm_debug_work);
+
+ return error ? error : n;
+}
+
+zte_pm_attr(pm_debug);
+
+/*=============================================================================
+ *======== /sys/zte_pm/wakelock =============================================
+ *=============================================================================
+ */
+extern void test_wakelock(void);
+extern void pm_debug_wakelocks(void);
+static ssize_t wakelock_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char *s = buf;
+
+ pm_debug_wakelocks();
+
+ return (s - buf);
+}
+
+static ssize_t wakelock_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ int error = 0;
+
+#ifdef CONFIG_ZX_PM_SUSPEND
+ test_wakelock();
+#endif
+
+ return error ;
+}
+
+zte_pm_attr(wakelock);
+
+#ifdef CONFIG_ZX_AUTOSLEEP
+/*=============================================================================
+ *======== /sys/zte_pm/app_done =============================================
+ *=============================================================================
+ */
+extern void app_start_done(void);
+static ssize_t app_done_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char *s = buf;
+
+ return (s - buf);
+}
+
+static ssize_t app_done_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ int error = 0;
+
+ app_start_done();
+
+ return error;
+}
+
+zte_pm_attr(app_done);
+#endif
+
+static struct attribute * g[] =
+{
+ &state_attr.attr,
+ &pm_debug_attr.attr,
+ &wakelock_attr.attr,
+#ifdef CONFIG_ZX_AUTOSLEEP
+ &app_done_attr.attr,
+#endif
+ &cpufreq_attr.attr,
+ NULL,
+};
+
+
+static struct attribute_group zte_pm_attr_group =
+{
+ .attrs = g,
+};
+
+
+/**
+ * 1¡¢create sysfs "/sys/zte_pm"
+ * 2¡¢add attr
+ */
+static int __init pm_debug_init(void)
+{
+ int ret;
+
+ printk(KERN_INFO "[SLP] create sysfs interface\n");
+ pm_debug_kobj = kobject_create_and_add("zte_pm", NULL);
+ if (!pm_debug_kobj)
+ return -ENOMEM;
+ ret = sysfs_create_group(pm_debug_kobj, &zte_pm_attr_group);
+ if (ret)
+ {
+ printk(KERN_WARNING "[SLP] sysfs_create_group ret %d\n", ret);
+ return ret;
+ }
+
+ /* init delayed work */
+ INIT_DELAYED_WORK_DEFERRABLE(&pm_debug_work, pm_debug_func);
+ /* we will start this in sysfs */
+// schedule_delayed_work(&pm_debug_work, PM_DEBUG_DELAY);
+
+ setup_timer(&pm_debug_timer, pm_debug_timer_expired, 0);
+// mod_timer(&pm_debug_timer, jiffies + msecs_to_jiffies(6*1000));
+
+#ifdef CONFIG_CPU_IDLE
+ /* cpuidle debug init */
+ idle_debug_init();
+#endif
+
+ pm_debug_mask_info_init();
+
+ return 0;
+}
+
+#endif
+
+
+static int zx_pm_pre_idle_notifier(struct notifier_block *nb,
+ unsigned long val,
+ void *data)
+{
+ switch (val) {
+ case IDLE_START:
+ #ifndef CONFIG_SYSTEM_RECOVERY
+ #ifndef CONFIG_ARCH_ZX297520V3_CAP
+ #ifdef USE_CPPS_KO
+ if(cpps_callbacks.psm_ModemDevSleep)
+ CPPS_FUNC(cpps_callbacks,psm_ModemDevSleep)();
+ #else
+ psm_ModemDevSleep();
+ #endif
+ #endif
+ #endif
+ break;
+ case IDLE_END:
+ break;
+ }
+
+ return 0;
+}
+
+static struct notifier_block zx_pm_pre__idle_nb = {
+ .notifier_call = zx_pm_pre_idle_notifier,
+};
+
+
+
+
+/*********************************************************************
+ * FUNCTION DEFINATIONS
+ ********************************************************************/
+#if 0//defined(CONFIG_ARCH_ZX297520V2EVB)
+static int __init zx_pm_init(void)
+{
+ return 0;
+}
+#else
+static int __init zx_pm_init(void)
+{
+ pr_info("[SLP] Power/PM_INIT \n");
+
+#ifdef CONFIG_PM_SLEEP
+ /* 1¡¢Suspend driver */
+ zx_suspend_init();
+#endif
+
+#if 0 //added when debug
+ /* 2¡¢power domain initial */
+ zx_pwr_init();
+#endif
+
+ /* 3¡¢context memory initial */
+ zx_pm_context_init();
+
+ /* 4¡¢SCU/l2 lowerpower setting */
+ pm_init_l2_and_scu(); /*20V5 A53 dont't need set l2,becasue l2 is ARM internal */
+
+
+ /* 5¡¢acs */
+ //pm_init_acs();
+
+ /* 6¡¢init the ram log for pm */
+ pm_sram_init();
+
+#ifdef CONFIG_ZX_PM_DEBUG
+ pm_debug_init();
+#endif
+
+#ifdef CONFIG_CPU_IDLE
+ /* 7¡¢idle driver initial */
+ zx_cpuidle_init();
+#ifdef CONFIG_ZX_RAM_CONSOLE
+#ifndef CONFIG_ARCH_ZX297520V3_CAP
+ pm_uart_createtimer();
+#endif
+#endif
+#endif
+
+ idle_notifier_register(&zx_pm_pre__idle_nb);
+
+ return 0;
+}
+#endif
+
+late_initcall(zx_pm_init);
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm.h b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm.h
new file mode 100644
index 0000000..9ff996d
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-pm.h
@@ -0,0 +1,103 @@
+/*
+ * zx-pm.h - power management interface.
+ *
+ * Written by zxp.
+ *
+ */
+
+#ifndef _ZX_PM_H
+#define _ZX_PM_H
+
+#include <asm/io.h>
+#include <asm/mach/map.h>
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/cpuidle.h>
+
+#include <mach/board.h>
+#include <mach/iomap.h>
+#include <mach/pcu.h>
+#include <mach/gpio.h>
+
+#if defined(CONFIG_ARCH_ZX297520V2)
+#include "zx29-pm.h"
+#elif defined(CONFIG_ARCH_ZX297510)
+#include "zx297510-pm.h"
+#elif defined(CONFIG_ARCH_ZX297520V3)
+#include "zx29-pm.h"
+#else
+#error wrong architecture for the spm device
+#endif
+#include "zx-sleep.h"
+#include "zx-pm-context.h"
+#include "zx-pm-helpers.h"
+#include "zx-cpuidle.h"
+#include "zx-cpufreq.h"
+//extern int request_ddr_freq(zx29_ddr_freq ddr_freq);
+
+#ifdef CONFIG_ZX_PM_DEBUG
+
+#define zte_pm_attr(_name) \
+static struct kobj_attribute _name##_attr = \
+{ \
+ .attr = \
+ { \
+ .name = __stringify(_name), \
+ .mode = 0644, \
+ }, \
+ .show = _name##_show, \
+ .store = _name##_store, \
+}
+
+/* /sys/zte_pm */
+extern struct kobject *pm_debug_kobj;
+#endif
+#if 1
+
+#define pm_ram_log(fmt, args...) \
+{ \
+ pm_printk("[SLP] " fmt, ##args); \
+}
+#else
+#define pm_ram_log(fmt, args...) \
+{ \
+ printk(KERN_INFO "[SLP] " fmt, ##args); \
+ pm_printk("[SLP] " fmt, ##args); \
+}
+
+#endif
+
+/* Weak implementations for optional arch specific functions */
+#ifdef CONFIG_SYSTEM_RECOVERY
+#ifndef USE_CPPS_KO
+void __weak psm_ModemDevSleep(void)
+{
+}
+bool __weak psm_ModemSleepCheck(void)
+{
+ return 0;
+}
+
+u32 __weak psm_ModemSleepTimeGet(void)
+{
+ return 0;
+}
+
+void __weak psm_TimeCompensate(u32 sleepTime)
+{
+}
+void __weak psm_GetModemSleepFlagStatus(void)
+{
+}
+#endif
+#else
+extern void psm_ModemDevSleep(void);
+extern void psm_GetModemSleepFlagStatus(void);
+extern bool psm_ModemSleepCheck(void);
+extern unsigned int psm_ModemSleepTimeGet(void);
+extern void psm_TimeCompensate(unsigned int);
+#endif
+
+
+#endif /*_ZX_PM_H*/
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-sleep.c b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-sleep.c
new file mode 100644
index 0000000..fb85f77
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-sleep.c
@@ -0,0 +1,350 @@
+/*
+ * ZTE cpu sleep driver
+ *
+ * Copyright (C) 2013 ZTE Ltd.
+ * by zxp
+ *
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/suspend.h>
+#include <linux/cpufreq.h>
+
+#include <asm/suspend.h>
+
+#include "zx-pm.h"
+
+#ifdef CONFIG_HW_BREAKPOINT_MANAGE
+#include <linux/hw_breakpoint_manage.h>
+#endif
+/* used to return the value, if 0 represent sleeping process ok,
+ * if 1 represent reset and restore back.
+ */
+volatile int sleep_ret_flag[MAX_CPU_NUM] = {0};
+
+static pm_wake_reason_t pm_wake_reason;
+
+extern struct zx_pm_main_table zx_pm_main_table;
+
+//extern int request_ddr_freq(zx29_ddr_freq ddr_freq);
+
+/**
+ * suspend platform device, for example: gpio, uart and so on.
+ *
+ */
+int zx_board_suspend(void)
+{
+ //gpio
+
+// debug_uart_suspend();
+
+
+ return 0;
+}
+
+/**
+ * resume debug uart¡¢GPIO and other device out of A9.
+ *
+ */
+int zx_board_resume(void)
+{
+ //gpio
+// debug_uart_resume();
+
+ //uart
+
+ return 0;
+}
+
+/**
+ * close clocks and power domains that PCU does not controls.
+ *
+ */
+int zx_dpm_suspend(void)
+{
+
+ return 0;
+}
+
+/**
+ * resume debug uart¡¢GPIO and other device out of A9.
+ *
+ */
+int zx_dpm_resume(void)
+{
+
+ return 0;
+}
+
+/**
+ * set cpu power state before do wfi.
+ *
+ * cpu_context.power_state should filled in before call this function.
+ */
+static void set_power_state(void)
+{
+#ifdef CONFIG_ARCH_ZX297520V2
+ set_status_a9_scu(zx_pm_main_table.cur_cpu,
+ zx_pm_main_table.cpu_context[zx_pm_main_table.cur_cpu]->power_state,
+ zx_pm_main_table.scu_address);
+#else
+#if 0
+ set_status_a53_scu(zx_pm_main_table.cur_cpu,
+ zx_pm_main_table.cpu_context[zx_pm_main_table.cur_cpu]->power_state,
+ zx_pm_main_table.scu_address);
+#endif
+#endif
+}
+
+#ifdef CONFIG_ZX_PM_DEBUG_TIME
+unsigned int suspend_start_time;
+unsigned int suspend_enter_time;
+unsigned int suspend_exit_time;
+unsigned int suspend_finish_time;
+unsigned int suspend_cur_time;
+unsigned int suspend_save_start_time = 0;
+unsigned int suspend_save_end_time = 0;
+unsigned int suspend_restore_start_time = 0;
+unsigned int suspend_restore_end_time = 0;
+unsigned int suspendabort_restore_start_time = 0;
+unsigned int suspendabort_restore_end_time = 0;
+unsigned int suspendabort_cnt = 0;
+unsigned int suspendsuscess_cnt = 0;
+unsigned int zx_get_cur_time(void)
+{
+ return (unsigned int)read_persistent_us();
+}
+#endif
+static void zx_sleep_before_wfi(void)
+{
+// pm_switch_clk_to_26m();
+}
+
+static void zx_sleep_after_wfi(void)
+{
+// pm_switch_clk_from_26m();
+}
+
+
+/**
+ * when return pointor is stored,
+ * cpu will poweroff now.
+ */
+static int zx_finish_suspend(unsigned long param)
+{
+ /* deal l1&l2 cache */
+ #ifdef CONFIG_ARCH_ZX297520V2 /* 2975V2 A9*/
+ disable_clean_inv_dcache_v7_l1();
+ clean_disable_pl310(zx_pm_main_table.l2_address);
+ #else/* 2975V5 A53/*
+ /*disable&clean&inv interface*/
+ disable_flush_dcache_L1_flush_cache_L2();
+ #endif
+
+#ifdef CONFIG_ZX_PM_DEBUG_TIME
+ pm_write_reg(SLEEP_TIME_ADDR, zx_get_cur_time());
+#endif
+
+ exit_coherency();
+
+ set_power_state();
+
+ zx_sleep_before_wfi();
+
+
+#ifdef CONFIG_ZX_PM_DEBUG
+ /**/
+ pm_write_reg(AP_SUSPEND_STATUS_FLAG,0x5);
+#endif
+
+ zx_jump_addr(zx_pm_main_table.wakeup_vaddr+WAKEUP_CODE_LENGTH);
+
+ zx_sleep_after_wfi();
+
+ /* when the sleep is abnormal exit, can run here */
+ return 1;
+}
+
+/**
+ * when cpu sleep procedure is abort,
+ * then return true.
+ */
+static inline bool is_cpu_sleep_abort(void)
+{
+ unsigned int cpu_id;
+ struct zx_cpu_context *context;
+
+ cpu_id = read_cpuid();
+ context = zx_pm_main_table.cpu_context[cpu_id];
+
+ if (context->power_state != CPU_POWER_MODE_RUN)
+ return true;
+
+ return false;
+}
+
+/**
+ * cpu sleep stage may be abort for some interrupt or event pending.
+ * This function is used to deal this situation.
+ *
+ * if state is CPU_POWER_MODE_RUN, indicated not back from restore,
+ * so sleep stage is abort.
+ */
+static void cpu_check_sleep_abort(void)
+{
+ if (is_cpu_sleep_abort())
+ {
+ /*enable l1-cache, l2,
+ then code can run well */
+#ifdef CONFIG_ARCH_ZX297520V2 /* 2975V2 A9*/
+ set_enabled_pl310(true, zx_pm_main_table.l2_address);
+#endif
+ enable_cache();
+#ifdef CONFIG_ZX_PM_DEBUG_TIME
+ suspendabort_cnt++;
+ suspendabort_restore_start_time = zx_get_cur_time();
+#endif
+ zx_pm_restore_abort_context();
+
+#ifdef CONFIG_ZX_PM_DEBUG_TIME
+ suspendabort_restore_end_time = zx_get_cur_time();
+#endif
+ set_power_state();
+ pm_set_wakeup_reason(WR_WAKE_SRC_ABNORMAL);
+ }
+ else
+ {
+ }
+}
+
+
+/**
+ * cpu enter&resume interface code.
+ *
+ * sleep_type -- CPU_SLEEP_TYPE_LP1/CPU_SLEEP_TYPE_IDLE_LP2
+ */
+u32 ap_susnpend_for_sleep_cnt =0;
+u32 ap_suspeend_for_poweroff_cnt =0;
+#ifdef CONFIG_PM_SLEEP
+void zx_enter_sleep(cpu_sleep_type_t sleep_type)
+{
+#ifdef CONFIG_ZX_PM_DEBUG_TIME
+ pm_write_reg(SUSPEND_START_TIME_ADDR, zx_get_cur_time());
+#endif
+
+ zx_set_context_level(sleep_type);
+
+ /* set&enable PCU for interrupt/clock/powerdomain/pll/iram */
+ zx_set_pcu();
+
+#ifdef CONFIG_ZX_PM_DEBUG
+ pm_write_reg(AP_IDLE_SLEEP_STATUS_FLAG,0xff04);
+#endif
+ if(CPU_SLEEP_TYPE_LP3 == sleep_type)
+ {
+#ifdef CONFIG_ZX_PM_DEBUG
+ ap_susnpend_for_sleep_cnt++;
+#endif
+ zx_wdt_handle_before_psm();
+ do_wfi();
+
+ zx_wdt_handle_after_psm();
+
+#ifdef CONFIG_ZX_PM_DEBUG
+ pm_write_reg(AP_IDLE_SLEEP_STATUS_FLAG,0xff05);
+#endif
+
+ pm_get_wake_cause();
+
+#ifdef CONFIG_ZX_PM_DEBUG
+ pm_write_reg(AP_SUSPEND_FOR_SLEEP_CNT,ap_susnpend_for_sleep_cnt);
+#endif
+ }
+ else
+ {
+/*=================================================================
+ *=== the following code is for dormant or shutdown
+ *=================================================================
+ */
+ zx_wdt_handle_before_psm();
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ hw_breakpoint_context_save();
+#endif
+ zx_pm_save_context();
+
+#ifdef CONFIG_ZX_PM_DEBUG
+ pm_write_reg(AP_IDLE_SLEEP_STATUS_FLAG,0xff05);
+#endif
+ if(!cpu_suspend(0, zx_finish_suspend))
+ {
+ zx_sleep_after_wfi();
+ zx_pm_restore_context();
+#ifdef CONFIG_ZX_PM_DEBUG
+ ap_suspeend_for_poweroff_cnt++;
+ pm_write_reg(AP_SUSPEND_FOR_POWEROFF_CNT,ap_suspeend_for_poweroff_cnt);
+ pm_write_reg(AP_IDLE_SLEEP_STATUS_FLAG,0xff06);
+#endif
+ }
+
+ zx_wdt_handle_after_psm();
+
+ /*exit from sleep*/
+ join_coherency();
+
+ cpu_check_sleep_abort();
+/*=================================================================
+ *=== ending with dormant or shutdown
+ *=================================================================
+ */
+
+ /* get cause of exiting sleep */
+ pm_get_wake_cause();
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ hw_breakpoint_restore_context();
+#endif //CONFIG_HAVE_HW_BREAKPOINT
+ }
+#ifdef CONFIG_ZX_PM_DEBUG
+ pm_write_reg(AP_IDLE_SLEEP_STATUS_FLAG,0xff07);
+#endif
+
+ zx_clear_pcu();
+
+#ifdef CONFIG_ZX_PM_DEBUG_TIME
+ suspend_finish_time = zx_get_cur_time();
+ suspend_start_time =pm_read_reg(SUSPEND_START_TIME_ADDR);
+// pr_info("[SLP] suspend time: start:%d enter:%d total:%d\n",suspend_start_time, suspend_enter_time, suspend_start_time-suspend_enter_time);
+// pr_info("[SLP] suspend time: exit:%d finish:%d total:%d \n",suspend_exit_time, suspend_finish_time, suspend_exit_time-suspend_finish_time);
+ pm_ram_log("####suspend start = [%u],end = [%u],time=[%u]\n",suspend_start_time,suspend_finish_time,(suspend_finish_time-suspend_start_time));
+ pm_ram_log("####sleep time = [%u],sucess_cn=[%u],sucess_abort=[%u]\n",(suspend_exit_time-suspend_enter_time),suspendsuscess_cnt,suspendabort_cnt);
+ pm_ram_log("####save time = [%u],restore time = [%u]\n",(pm_read_reg(SUSPEND_SAVE_TIME_ADDR)),(pm_read_reg(SUSPEND_RESTORE_TIME_ADDR)));
+#endif
+
+
+}
+#else
+void zx_enter_sleep(cpu_sleep_type_t sleep_type){}
+#endif
+
+/**
+ * get wakeup reason .
+ *
+ *
+ */
+pm_wake_reason_t pm_get_wakeup_reason(void)
+{
+ return pm_wake_reason;
+}
+
+/**
+ * set wakeup reason .
+ *
+ *
+ */
+void pm_set_wakeup_reason(pm_wake_reason_t reason)
+{
+ pm_wake_reason = reason;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-sleep.h b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-sleep.h
new file mode 100644
index 0000000..4a1b76f
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx-sleep.h
@@ -0,0 +1,53 @@
+/*
+ * zx-sleep.h - cpu sleep and wakeup interface.
+ *
+ * Written by zxp.
+ *
+ */
+
+#ifndef _ZX_SLEEP_H
+#define _ZX_SLEEP_H
+
+typedef enum
+{
+ CPU_SLEEP_TYPE_NULL = 0,
+ CPU_SLEEP_TYPE_LP1,
+ CPU_SLEEP_TYPE_IDLE_LP2,
+ CPU_SLEEP_TYPE_HOTPLUG_LP2,
+ CPU_SLEEP_TYPE_LP3, /* maybe only for debug */
+} cpu_sleep_type_t;
+
+typedef enum
+{
+ CPU_POWER_MODE_RUN = 0,
+ CPU_POWER_MODE_STANDBY = 1,
+ CPU_POWER_MODE_DORMANT = 2,
+ CPU_POWER_MODE_SHUTDOWN = 3,
+} cpu_power_mode_t;
+
+typedef enum {
+ WR_NONE = 0,
+ WR_WAKE_SRC_NORMAL,
+ WR_WAKE_SRC_UNKNOWN,
+ WR_WAKE_SRC_ABNORMAL,
+ WR_SW_ABORT,
+} pm_wake_reason_t;
+
+
+#define do_wfi() \
+do { \
+ __asm__ __volatile__("isb" : : : "memory"); \
+ __asm__ __volatile__("dsb" : : : "memory"); \
+ __asm__ __volatile__("wfi" : : : "memory"); \
+} while (0)
+
+extern int zx_board_suspend(void);
+extern int zx_board_resume(void);
+extern int zx_dpm_suspend(void);
+extern int zx_dpm_resume(void);
+extern void zx_enter_sleep(cpu_sleep_type_t sleep_type);
+extern void cpu_start_restore(void);
+extern pm_wake_reason_t pm_get_wakeup_reason(void);
+extern void pm_set_wakeup_reason(pm_wake_reason_t reason);
+
+#endif /*_ZX_SLEEP_H*/
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx29-cpufreq.c b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx29-cpufreq.c
new file mode 100644
index 0000000..27f947f
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx29-cpufreq.c
@@ -0,0 +1,890 @@
+/*
+ * ZTE zx297510 dvfs driver
+ *
+ * Copyright (C) 2013 ZTE Ltd.
+ * by zxp
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/cpufreq.h>
+#include <linux/suspend.h>
+
+#include <linux/soc/zte/rpm/rpmsg.h>
+//#include "mach/clock.h"
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include "zx-pm.h"
+#define ZX_CPUFREQ_IOC_MAGIC 'W'
+
+/*ioctl cmd usd by device*/
+#define ZX_CPUFREQ_SET_FREQ _IOW(ZX_CPUFREQ_IOC_MAGIC, 1, char *)
+#define ZX_CPUFREQ_GET_FREQ _IOW(ZX_CPUFREQ_IOC_MAGIC, 2, char *)
+
+#define ZX_CPUFREQ_DEV "/dev/zx_cpufreq"
+
+#define PM_FREQ_TRACE 1
+#if PM_FREQ_TRACE
+
+#define FREQ_CHANGE_COUNT 20
+
+typedef struct
+{
+ volatile unsigned int old_index;
+ volatile unsigned int new_idex;
+ volatile unsigned int time;
+}freq_change_view_trace_t;
+
+static freq_change_view_trace_t freq_change_view[FREQ_CHANGE_COUNT] ;
+static unsigned int freq_change_index = 0;
+
+void trace_freq_change(unsigned int old_index,unsigned int new_index)
+{
+ freq_change_view[freq_change_index].old_index = old_index;
+ freq_change_view[freq_change_index].new_idex = new_index;
+ freq_change_view[freq_change_index].time = ktime_to_us(ktime_get());
+ freq_change_index++;
+ if(freq_change_index == FREQ_CHANGE_COUNT)
+ {
+ freq_change_index = 0;
+ }
+}
+#else
+void trace_freq_change(unsigned int old_index,unsigned int new_index){}
+#endif
+
+unsigned int freq_change_enabled_by_startup = 0;
+static struct delayed_work pm_freq_work;
+#define PM_FREQ_DELAY msecs_to_jiffies(25000)
+
+/* for count change time by M0 */
+#define DEBUG_CPUFREQ_TIME 1
+
+#ifdef CONFIG_DDR_FREQ
+#ifdef CONFIG_ARCH_ZX297520V2
+#define get_cur_ddr() pm_read_reg_16(AXI_CURRENT_FREQ)
+#define set_target_ddr(f) pm_write_reg_16(AXI_AP2M0_TARGET, f)
+#define set_ddr_req() pm_write_reg_16(AXI_AP2M0_FLAG, 1)
+#define clr_ddr_ack() pm_write_reg_16(AXI_M02AP_ACK, 0)
+
+#define wait_ddr_ack() while(!pm_read_reg_16(AXI_M02AP_ACK))
+#else
+static ddr_freq_regs *ddr_regs = (ddr_freq_regs *)IRAM_CHANGE_DDR_BASE;
+#define get_cur_ddr() (ddr_regs->cur_freq)
+#define set_target_ddr(f) (ddr_regs->ap_exp_freq = f)
+#define set_ddr_req() (ddr_regs->ap_req_flag = 1)
+
+#endif
+#endif
+
+//#undef CONFIG_AXI_FREQ
+#ifdef CONFIG_AXI_FREQ
+static DEFINE_MUTEX(axifreq_lock);
+
+static axi_freq_regs *axi_regs = (axi_freq_regs *)IRAM_CHANGE_AXI_BASE;
+static vol_dvs_regs *vol_regs = (vol_dvs_regs *)IRAM_CHANGE_DVS_BASE;
+
+#define get_cur_axi() (axi_regs->cur_freq)
+#define set_target_axi_sw(f) (axi_regs->ap_exp_freq = f)
+#define set_axi_req() (axi_regs->ap_req_flag = 1)
+
+#define get_target_axi_hw(addr) (pm_read_reg(addr)&(0x7))
+
+#if 1
+#define DDR_FREQ_156M_HW (0x4e)
+#define DDR_FREQ_208M_HW (0x68)
+#define DDR_FREQ_312M_HW (0x9c)
+#define DDR_FREQ_400M_HW (0xc8)
+
+#define set_ddr_freq_hw(addr,f) (pm_read_reg(addr)&(~0xff)|f)
+#define set_ddr_freq_sync(addr,f) (pm_read_reg(addr)&(~0x1)|f)
+#endif
+
+#define get_cur_vol() (vol_regs->cur_vol)
+#define set_target_vol(f) (vol_regs->ap_exp_vol = f)
+#define set_vol_req() (vol_regs->ap_req_flag = 1)
+
+#if 0
+#define WAIT_AXI_ACK_TIMEOUT (jiffies + msecs_to_jiffies(2)) /* wait 2 ms, we count max 200us also */
+#define wait_axi_ack(timeout) while(!pm_read_reg_16(AXI_M02AP_ACK) && time_before(jiffies, timeout))
+#else
+#define WAIT_AXI_ACK_TIMEOUT (200) /* wait 120us, we count max 200us also */
+static void wait_axi_ack(unsigned timeout)
+{
+ ktime_t begin_time = ktime_get();
+
+ while(((vol_regs->ap_req_flag) ||(axi_regs->ap_req_flag) )&& (unsigned)ktime_to_us(ktime_sub(ktime_get(), begin_time))<timeout);
+}
+#endif
+
+#ifdef CONFIG_ZX_PM_DEBUG
+static unsigned axi_freq_table[]=
+{
+#ifdef CONFIG_ARCH_ZX297520V2
+ 26000,
+ 39000,
+ 52000,
+ 78000,
+ 104000,
+ 122880,
+ 156000,
+#else
+ 6500,
+ 26000,
+ 39000,
+ 52000,
+ 78000,
+ 104000,
+ 124800,
+ 156000,
+#endif
+};
+
+static unsigned get_axi_freq(void)
+{
+#if 1
+ return 0;
+#else
+ void __iomem *regaddr = NULL;
+ unsigned int data = 0;
+ unsigned int axi_rate = 0;
+
+ regaddr = (ZX29_TOP_VA+0x54);
+ data = ioread32(regaddr);
+ data &= 0x3;
+ switch (data) {
+ case 0: axi_rate = 104000000;break;
+ case 1: axi_rate = 26000000;break;
+ case 2: axi_rate = 122880000;break;
+ case 3: axi_rate = 156000000;break;
+ default:
+ break;
+ }
+
+ regaddr = (ZX29_TOP_VA+0x7c);
+ data = ioread32(regaddr);
+ data &= 0x3;
+ return axi_rate/(0x1<<data);
+#endif
+}
+static void debug_axi_clk_info(void)
+{
+ pr_info("current_axi_freq:%d[KHz] request_axi_freq:%d[KHz]\n", axi_freq_table[get_cur_axi()] ,axi_freq_table[axi_regs->ap_exp_freq]);
+}
+#endif
+
+static int send_msg_to_m0(void)
+{
+ unsigned int ap_m0_buf = AXI_VOL_CHANGE_ICP_BUF; /* the icp interface need a buffer */
+ T_ZDrvRpMsg_Msg Icp_Msg;
+ int ret;
+
+ Icp_Msg.actorID = M0_ID;
+ Icp_Msg.chID = ICP_CHANNEL_PSM;
+ Icp_Msg.flag = RPMSG_WRITE_INT; /* 1- means send an icp interrupt> */
+ Icp_Msg.buf = &ap_m0_buf;
+ Icp_Msg.len = 0x4;
+
+ ret = zDrvRpMsg_Write(&Icp_Msg);
+ if(Icp_Msg.len == ret)
+ return 0;
+ else
+ return ret;
+}
+
+static int axi_freq_change_allowed(void)
+{
+ if(pm_get_mask_info()&PM_NO_AXI_FREQ)
+ return false;
+
+ return true;
+}
+
+/**
+ * request to change vol.
+ *
+ * vol_dvs: input vol enum
+ */
+int request_vol(zx29_vol vol_dvs)
+{
+ unsigned int current_vol = get_cur_vol();
+
+ set_target_vol(vol_dvs);
+#if DEBUG_CPUFREQ_TIME
+ pm_printk("[CPUFREQ] current_vol(%d) request_vol(%d) \n",(u32)current_vol,(u32)vol_dvs);
+#endif
+
+ if(vol_dvs != current_vol)
+ {
+ /* request freq */
+ set_vol_req();
+ }
+
+ return 0;
+}
+
+/**
+ * input axi freq.
+ */
+static zx29_vol request_vol_by_axi(zx29_axi_freq axi_freq)
+{
+ if(axi_freq == AXI_FREQ_156M)
+ return VOL_VO_900;
+ else
+ return VOL_VO_850;
+}
+
+/**
+ * set vol .
+ *
+ * we will do this by M0.
+ */
+static int set_vol_by_axi(zx29_axi_freq axi_freq)
+{
+ zx29_vol vol_dvs= request_vol_by_axi(axi_freq);
+
+ /* set new vol*/
+ return request_vol(vol_dvs);
+}
+
+
+/**
+ * request to change axi freq.
+ *
+ * axi_freq: input freq enum
+ */
+int request_axi_freq(zx29_axi_freq axi_freq)
+{
+ unsigned int current_axi_freq = get_cur_axi();
+ unsigned int tmp;
+ int ret = 0;
+
+#if DEBUG_CPUFREQ_TIME
+ ktime_t begin_time, end_time;
+ s64 total_time;
+#endif
+
+ if(!axi_freq_change_allowed())
+ return 0;
+
+#ifdef SET_AXI_BY_HW
+ tmp = (pm_read_reg(PS_MATRIX_AXI_SEL)&(~0x7))|axi_freq;
+ pm_write_reg(PS_MATRIX_AXI_SEL,tmp);
+ pm_printk("[CPUFREQ] current_axi_freq(%d) request_axi_freq(%d) after_request_axi_freq(%d) after_request_vol(%d)\n",(u32)current_axi_freq,(u32)axi_freq,get_cur_axi(),get_cur_vol());
+#else
+ set_target_axi_sw(axi_freq);
+
+ if(axi_freq != current_axi_freq)
+ {
+ /* request freq */
+ set_axi_req();
+
+// set_vol_by_axi(axi_freq);//set vol
+
+ ret = send_msg_to_m0();
+#if DEBUG_CPUFREQ_TIME
+ begin_time = ktime_get();
+#endif
+ if(!ret)
+ {
+ /* wait axi freq changed ok! we will set a timeout for safety~ */
+ wait_axi_ack(WAIT_AXI_ACK_TIMEOUT);
+ }
+ else
+ {
+ pm_printk("[CPUFREQ] request_axi_freq(%d) failed: (%d) \n",(u32)axi_freq, ret);
+ }
+
+#if DEBUG_CPUFREQ_TIME
+ end_time = ktime_get();
+ total_time = ktime_to_us(ktime_sub(end_time, begin_time));
+ pm_printk("[CPUFREQ] total axi time: %d us current_axi_freq(%d) request_axi_freq(%d) after_request_axi_freq(%d) after_request_vol(%d)\n",(u32)total_time,(u32)current_axi_freq,(u32)axi_freq,get_cur_axi(),get_cur_vol());
+ }
+ else
+ {
+ pm_printk("[CPUFREQ] current_axi_freq(%d) request_axi_freq(%d) \n",(u32)current_axi_freq,(u32)axi_freq);
+#endif
+ }
+#endif
+
+ return 0;
+}
+
+
+/**
+ * input cpu freq [KHz].
+ */
+static zx29_axi_freq request_axi_freq_by_cpu(unsigned int freq)
+{
+ if(freq >= 600*1000)
+ return AXI_FREQ_156M;
+ else
+ return AXI_FREQ_78M;
+}
+
+/**
+ * set axi freq .
+ *
+ * we will do this by M0.
+ */
+static int set_axi_frequency_by_cpu(unsigned int freq)
+{
+ zx29_axi_freq axi_freq = request_axi_freq_by_cpu(freq);
+
+ /* set new freq */
+ return request_axi_freq(axi_freq);
+}
+#endif
+
+static int zx29_get_frequency(void);
+
+int zx29_set_frequency(unsigned int old_index,
+ unsigned int new_index);
+static int zx29_cpufreq_init(struct zx_dvfs_info *info);
+
+#define CPUFREQ_LEVEL_END L3
+
+static int max_support_idx = L0;
+#ifdef CONFIG_ARCH_ZX297520V2
+static int min_support_idx = (CPUFREQ_LEVEL_END - 1);
+#else
+static int min_support_idx = (CPUFREQ_LEVEL_END - 2);
+#endif
+static int cpufreq_driver_inited = 0;
+static struct clk *cpu_clk;
+
+/* in fact, zx297510 can not adjust core voltage, we reserved it for future if it be... */
+static unsigned int zx29_volt_table[CPUFREQ_LEVEL_END] = {
+ 1250000, 1150000, 1050000, /*975000, 950000,*/
+};
+
+static struct cpufreq_frequency_table zx29_freq_table[] = {
+#ifdef CONFIG_ARCH_ZX297520V2
+ {L0, 624*1000},
+ {L1, 312*1000},
+ {L2, 208*1000},
+ {0, CPUFREQ_TABLE_END},
+#else
+ {L0, 624*1000},
+ {L1, 312*1000},
+ //{L2, 156*1000},
+ {0, CPUFREQ_TABLE_END},
+#endif
+};
+
+#ifdef CONFIG_ZX_PM_DEBUG
+
+/* for debug freq */
+void debug_cpu_freq_info(void)
+{
+ printk("[CPUFREQ] current_cpufreq(%d) ",zx29_get_frequency());
+ #ifdef CONFIG_AXI_FREQ
+ printk("request_axi_freq(%d) current_axi_freq(%d)\n",axi_regs->ap_exp_freq,get_cur_axi());
+ #endif
+}
+
+/* for debug clock state */
+static void debug_cpu_clk_info(void)
+{
+#if 0 //zxp
+ unsigned cpu_clock;
+ char * pll_used;
+ unsigned ufi_clk_div;
+
+ ufi_clk_div = 1 << (pm_read_reg(TOP_UFI_DIV_REG)&0x3);
+ pll_used = ufi_pll_str[pm_read_reg(TOP_UFI_SEL_REG)&3];
+ cpu_clock = clk_get_rate(cpu_clk) / 1000;
+
+ pr_info("cpu_clock[KHz] pll_used ufi_clk_div\n");
+ pr_info("%d %s %d\n", cpu_clock, pll_used, ufi_clk_div);
+#endif
+}
+
+static struct zx_dvfs_info test_dvfs_info;
+void cpufreq_test(unsigned int old_index, unsigned int new_index)
+{
+ zx29_cpufreq_init(&test_dvfs_info);
+
+ zx29_set_frequency(old_index, new_index);
+}
+
+#else
+static void debug_cpu_clk_info(void){}
+static void debug_axi_clk_info(void){}
+#endif
+
+
+static void pm_freq_func(struct work_struct *work)
+{
+ freq_change_enabled_by_startup = 1;
+}
+
+static int zx29_get_frequency(void)
+{
+#ifdef CONFIG_ARCH_ZX297520V2
+ int ret = L2;
+ unsigned int cpufreq;
+ cpufreq = pm_read_reg(AP_CORE_SEL_ADDR)&0x7;
+ switch(cpufreq)
+ {
+ case 0: //624M
+ ret = L0;
+ break;
+ case 3: //312M
+ ret = L1;
+ break;
+ case 4: //208M
+ ret = L2;
+ break;
+ default:
+ printk("[CPUFREQ] get freq fail cpufreq :0x%x\n",cpufreq);
+ break;
+ }
+ return ret;
+#else
+ int ret = L1;
+ unsigned int cpufreq;
+ cpufreq = pm_read_reg(AP_CORE_SEL_ADDR)&0x3;
+ switch(cpufreq)
+ {
+ case 1: //624M
+ ret = L0;
+ break;
+ case 2: //312M
+ ret = L1;
+ break;
+ case 3: //156M
+ ret = L2;
+ break;
+ default:
+ printk("[CPUFREQ] get freq fail cpufreq :0x%x\n",cpufreq);
+ break;
+ }
+ return ret;
+#endif
+}
+
+/**
+ * set freq according to index of freq_table.
+ *
+ */
+int zx29_set_frequency(unsigned int old_index,
+ unsigned int new_index)
+{
+ int ret = 0;
+
+ if(!freq_change_enabled_by_startup)
+ return -1;
+
+ if(old_index == new_index)
+ return ret;
+
+ ret = clk_set_rate(cpu_clk, zx29_freq_table[new_index].frequency * 1000);
+ if (ret)
+ pm_printk("[CPUFREQ] Failed to set rate %dkHz: ret = %d\n", zx29_freq_table[new_index].frequency, ret);
+
+ pm_printk("[CPUFREQ] set cpufreq:old index:%d new index:%d \n", old_index, new_index);
+// printk("[CPUFREQ] set cpufreq:old index:%d new index:%d current_axi_freq(%d)\n", old_index, new_index,get_cur_axi());
+ debug_cpu_clk_info();
+
+ trace_freq_change(old_index,new_index);
+
+#ifdef CONFIG_AXI_FREQ
+ mutex_lock(&axifreq_lock);
+ set_axi_frequency_by_cpu(zx29_freq_table[new_index].frequency);
+ mutex_unlock(&axifreq_lock);
+#endif
+
+ return ret;
+}
+
+
+/**
+ * set freq according to index of freq_table.
+ *
+ */
+ unsigned int cpu_dfs_is_not_allowed =0; /*1: when 3g upa/dpa, the A53 core freq should always be 624M**/
+ unsigned int cpufreq_level =0;
+ extern struct mutex cpufreq_lock;
+
+int zx_set_frequency(unsigned int freq)
+{
+ int ret = 0;
+ unsigned int new_index;
+
+ if(pm_get_mask_info()&PM_NO_CPU_FREQ)
+ return 0;
+
+
+ mutex_lock(&cpufreq_lock);
+
+ if(freq==624000000) {
+ new_index =L0;
+ cpu_dfs_is_not_allowed=1;
+ //cpufreq_level = zx29_get_frequency();
+ } else{
+ new_index =L1;
+ cpu_dfs_is_not_allowed=0;
+ }
+
+ ret = clk_set_rate(cpu_clk, zx29_freq_table[new_index].frequency * 1000);
+ if (ret)
+ pm_printk("[CPUFREQ] Failed to set rate %dkHz: ret = %d\n", zx29_freq_table[new_index].frequency, ret);
+
+ pm_printk("[CPUFREQ] zx_set_frequency:new index:%d \n", new_index);
+ debug_cpu_clk_info();
+
+ mutex_unlock(&cpufreq_lock);
+
+#ifdef CONFIG_AXI_FREQ
+ mutex_lock(&axifreq_lock);
+ set_axi_frequency_by_cpu(zx29_freq_table[new_index].frequency);
+ mutex_unlock(&axifreq_lock);
+#endif
+
+ return ret;
+}
+EXPORT_SYMBOL(zx_set_frequency);
+
+static int zx29_cpufreq_init(struct zx_dvfs_info *info)
+{
+ if(cpufreq_driver_inited)
+ return 0;
+
+ cpu_clk = clk_get(NULL, "cpu_clk");
+ if (IS_ERR(cpu_clk))
+ {
+ pr_info("[CPUFREQ] get cpu_clk error \n");
+ return PTR_ERR(cpu_clk);
+ }
+#ifdef CONFIG_ARCH_ZX297520V2
+ info->freq_cur_idx = L2;
+ info->pll_safe_idx = L2;
+#else
+ info->freq_cur_idx = L1;
+ info->pll_safe_idx = L1;
+#endif
+ info->max_support_idx = max_support_idx;
+ info->min_support_idx = min_support_idx;
+ info->cpu_clk = cpu_clk;
+ info->volt_table = zx29_volt_table;
+ info->freq_table = zx29_freq_table;
+ info->set_freq = zx29_set_frequency;
+
+ cpufreq_driver_inited = 1;
+
+ INIT_DELAYED_WORK_DEFERRABLE(&pm_freq_work, pm_freq_func);
+ schedule_delayed_work(&pm_freq_work, PM_FREQ_DELAY);
+ pr_info("[CPUFREQ] zx29_cpufreq_init ok \n");
+ return 0;
+}
+
+static int __init zx29_freq_register(void)
+{
+ zx29xx_cpufreq_init = zx29_cpufreq_init;
+
+ return 0;
+}
+device_initcall(zx29_freq_register);
+
+#ifdef CONFIG_AXI_FREQ
+/**
+ * zx_axifreq_pm_notifier - acquire axifreq in suspend-resume context
+ *
+ * @notifier
+ * @pm_event
+ * @v
+ *
+ */
+
+static int zx_axifreq_pm_notifier(struct notifier_block *notifier,
+ unsigned long pm_event, void *v)
+{
+ mutex_lock(&axifreq_lock);
+
+ switch (pm_event)
+ {
+ case PM_SUSPEND_PREPARE:
+ request_axi_freq(AXI_FREQ_78M);
+ break;
+
+ case PM_POST_SUSPEND:
+ request_axi_freq(AXI_FREQ_156M);
+ break;
+ }
+
+ mutex_unlock(&axifreq_lock);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block zx_axifreq_nb =
+{
+ .notifier_call = zx_axifreq_pm_notifier,
+};
+
+static int __init zx29_axifreq_init(void)
+{
+
+ /* pm notify */
+ register_pm_notifier(&zx_axifreq_nb);
+// request_vol(VOL_VO_900);
+ request_axi_freq(AXI_FREQ_156M);
+
+ return 0;
+}
+
+//late_initcall(zx29_axifreq_init);
+#endif
+
+/*=============================================================================
+ *======== zx29 DDR freq ===============================================
+ *** ap/phy request --> m0 notify --> jump to iram --> wait completely --> ***
+ *** -->jump to ddr ***====
+ *=============================================================================
+ */
+#ifdef CONFIG_DDR_FREQ
+static DEFINE_MUTEX(ddrfreq_lock);
+static int ddr_freq_change_allowed(void)
+{
+ if(pm_get_mask_info()&PM_NO_DDR_FREQ)
+ return false;
+
+ return true;
+}
+
+static int send_msg_to_ps(void)
+{
+ unsigned int ap_m0_buf = AXI_VOL_CHANGE_ICP_BUF; /* the icp interface need a buffer */
+ T_ZDrvRpMsg_Msg Icp_Msg;
+ int ret;
+ Icp_Msg.actorID = PS_ID;
+ Icp_Msg.chID = ICP_CHANNEL_PSM;
+ Icp_Msg.flag = RPMSG_WRITE_INT; /* 1- means send an icp interrupt> */
+ Icp_Msg.buf = &ap_m0_buf;
+ Icp_Msg.len = 0x4;
+ ret = zDrvRpMsg_Write(&Icp_Msg);
+ if(Icp_Msg.len == ret)
+ return 0;
+ else
+ return ret;
+}
+
+int request_ddr_freq_hw(unsigned int ddr_freq)
+{
+ if(!ddr_freq_change_allowed())
+ return 0;
+ pm_write_reg(AP_DDR_FFC_SEL_SYNC,0x0);
+ pm_write_reg(AP_DDR_FFC_SEL,ddr_freq);
+ pm_write_reg(AP_DDR_FFC_SEL_SYNC,0x1);
+ return 0;
+}
+
+int request_ddr_freq(zx29_ddr_freq ddr_freq)
+{
+ int ret = 0;
+ unsigned current_ddr_freq = get_cur_ddr();
+ if(!ddr_freq_change_allowed())
+ return 0;
+
+ if(ddr_freq == current_ddr_freq)
+ return 0;
+
+#ifdef SET_DDR_BY_HW
+ //set_ddr_freq_hw(AP_DDR_FFC_SEL, ddr_exp_freq);
+ set_ddr_freq_sync(AP_DDR_FFC_SEL_SYNC,0x1);
+#else
+ set_target_ddr(ddr_freq);
+ ret = send_msg_to_ps();
+ if(!ret)
+ {
+ printk("[DDRFREQ] ddr_freq [%d]\n",get_cur_ddr());
+ }
+ else
+ {
+ printk("[DDRFREQ] request_ddr_freq failed\n");
+ }
+#endif
+#if 0
+ unsigned current_ddr_freq = get_cur_ddr();
+ int ret = 0;
+
+#if DEBUG_CPUFREQ_TIME
+ ktime_t begin_time, end_time;
+ s64 total_time;
+#endif
+
+ if(!ddr_freq_change_allowed())
+ return 0;
+
+ set_target_ddr(ddr_freq);
+
+ if(ddr_freq != current_ddr_freq)
+ {
+ /* request freq */
+ clr_ddr_ack();
+ set_ddr_req();
+
+#if DEBUG_CPUFREQ_TIME
+ begin_time = ktime_get();
+#endif
+
+ ret = send_msg_to_m0();
+ if(!ret)
+ /* wait axi freq changed ok! we will set a timeout for safety~ */
+ wait_ddr_ack();
+ else
+ pr_info("[CPUFREQ] request_ddr_freq(%d) failed: (%d) \n",(u32)ddr_freq, ret);
+
+#if DEBUG_CPUFREQ_TIME
+ end_time = ktime_get();
+ total_time = ktime_to_us(ktime_sub(end_time, begin_time));
+ pr_info("[CPUFREQ] total ddr time: %d us\n",(u32)total_time);
+#endif
+ }
+#endif
+ return 0;
+}
+
+#ifdef CONFIG_ARCH_ZX297520V2
+static void ddr_freq_handler(void)
+{
+ local_irq_disable();
+ waiting_ddr_dfs((unsigned long)DDR_DFS_CODE_ADDR);
+ local_irq_enable();
+}
+#else
+static int zx_ddrfreq_pm_notifier(struct notifier_block *notifier,
+ unsigned long pm_event, void *v)
+{
+ mutex_lock(&ddrfreq_lock);
+ switch (pm_event)
+ {
+ case PM_SUSPEND_PREPARE:
+ request_ddr_freq_hw(0);
+ break;
+ case PM_POST_SUSPEND:
+ request_ddr_freq_hw(0x9c);
+ break;
+ }
+ mutex_unlock(&ddrfreq_lock);
+ return NOTIFY_OK;
+}
+static struct notifier_block zx_ddrfreq_nb =
+{
+ .notifier_call = zx_ddrfreq_pm_notifier,
+};
+#endif
+
+static int __init zx29_ddrfreq_init(void)
+{
+#ifdef CONFIG_ARCH_ZX297520V2
+#else
+ register_pm_notifier(&zx_ddrfreq_nb);
+#endif
+ return 0;
+}
+
+#endif
+
+static void pm_m0_handler(void *buf, unsigned int len)
+{
+ /* deal msg from m0 */
+}
+
+static long zx_cpufreq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+
+ int ret = 0;
+ unsigned int temp;
+
+ if (arg == NULL)
+ return -EFAULT;
+
+ switch(cmd)
+ {
+ case ZX_CPUFREQ_SET_FREQ:
+ ret = copy_from_user(&temp, (unsigned int*)arg, sizeof(unsigned int));
+ if(ret)
+ printk("%s: copy_from_user failed\n",__func__);
+ zx_set_frequency(temp);
+ break;
+
+ case ZX_CPUFREQ_GET_FREQ:
+ //cpufreq_level = zx29_get_frequency();
+ //printk("%s: cpufreq_level:%d\n",__func__, cpufreq_level);
+ temp = zx_getspeed(0)*1000;
+ ret = copy_to_user((void *)arg, &temp, sizeof(unsigned int));
+ if(ret)
+ printk("%s: copy user failed\n",__func__);
+ break;
+
+ default:
+ return -EPERM;
+ }
+
+ return ret;
+}
+
+static const struct file_operations zx_cpufreq_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = zx_cpufreq_ioctl,
+};
+
+static struct miscdevice zx_cpufreq_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "zx_cpufreq",
+ .fops = &zx_cpufreq_fops,
+};
+static int __init zx29_intercore_init(void)
+{
+ int ret = 0;
+
+#ifdef CONFIG_ARCH_ZX297520V2
+ ret = zDrvRpMsg_CreateChannel(M0_ID, ICP_CHANNEL_PSM, 0x20);
+ if(ret)
+ {
+ pr_info("[CPUFREQ] Failed create psm icp channel, err:%d ! \n", ret);
+ return -EPERM;
+ }
+ pr_info("[CPUFREQ] Success create psm icp channel!!! \n");
+
+ zDrvRpMsg_RegCallBack(M0_ID, ICP_CHANNEL_PSM, pm_m0_handler);
+
+#ifdef CONFIG_AXI_FREQ
+ zx29_axifreq_init();
+#endif
+
+#ifdef CONFIG_DDR_FREQ
+ #if 0
+ ret = zDrvRpMsg_CreateChannel(PS_ID, ICP_CHANNEL_PSM, 0x20);
+ if(ret)
+ {
+ printk("[DDRFREQ] Failed create psm icp channel, err:%d ! \n", ret);
+ return -EPERM;
+ }
+ printk("[DDRFREQ] Success create psm icp channel!!! \n");
+ #endif
+ zx29_ddrfreq_init();
+#endif
+#endif
+
+ ret = misc_register(&zx_cpufreq_miscdev);
+ if (ret) {
+ printk(KERN_ERR"%s: cpufreq failed to register miscdev (ret = %d)\n", __FILE__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+#ifdef CONFIG_ARCH_ZX297520V2
+late_initcall(zx29_intercore_init);
+#else
+late_initcall(zx29_intercore_init);
+#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx29-cpuidle.c b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx29-cpuidle.c
new file mode 100644
index 0000000..3d8719f
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx29-cpuidle.c
@@ -0,0 +1,344 @@
+/*
+ * zx297520v2 CPU idle Routines
+ *
+ * Copyright (C) 2013 ZTE, Ltd.
+ * Shine Yu <yu.xiang5@zte.com.cn>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/sched.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/cpu.h>
+#include <linux/tick.h>
+#include <linux/suspend.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+
+
+#include <mach/timex.h>
+#include "zx-pm.h"
+
+#define WHOLE_CHIP_EXIT_LATENCY (4000) /* us */
+
+#define LP2_DEFAULT_EXIT_LATENCY (500 + WHOLE_CHIP_EXIT_LATENCY) /* us */
+#define LP2_MIN_POWER_OFF_TIME (500) /* us */
+
+#define LP2_DELTA_EXIT_LATENCY (100) /* us -- for timer setting refresh time, should > 2us.
+ Donnot modify this. */
+
+static s64 zx_idle_sleeptime = 0xffffffff;
+static struct cpuidle_driver *cur_idle_drv;
+
+#ifdef CONFIG_ZX_PM_DEBUG
+//extern struct zx_idle_stats idle_stats;
+#endif
+
+/*===================================================================
+ *========= idle states description ==============================
+ *===================================================================
+ *========= LP3 -- wfi(target_residency = 5) =============
+ *========= LP1 -- pwroff(target_residency = 5000) =============
+ *===================================================================
+ */
+static struct cpuidle_state zx29_cpuidle_set[] __initdata =
+{
+ /* LP3 -- wfi */
+ [ZX_IDLE_CSTATE_LP3] =
+ {
+ .enter = zx_enter_idle,
+ .exit_latency = 2,
+ .target_residency = 5,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .name = "LP3",
+ .desc = "clock gating(WFI)",
+ },
+ /* LP2 -- POWEROFF */
+ [ZX_IDLE_CSTATE_LP2] =
+ {
+ .enter = zx_enter_idle,
+ .exit_latency = LP2_DEFAULT_EXIT_LATENCY,
+ .target_residency = LP2_DEFAULT_EXIT_LATENCY+LP2_MIN_POWER_OFF_TIME,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .name = "LP2",
+ .desc = "POWEROFF",
+ },
+};
+
+int __init zx_fill_cpuidle_data(struct cpuidle_driver *drv)
+{
+ int i, max_cpuidle_state;
+
+ max_cpuidle_state = sizeof(zx29_cpuidle_set) / sizeof(struct cpuidle_state);
+
+ for (i = 0; i < max_cpuidle_state; i++)
+ memcpy(&drv->states[i], &zx29_cpuidle_set[i], sizeof(struct cpuidle_state));
+
+ drv->safe_state_index = ZX_IDLE_CSTATE_LP3;
+
+ cur_idle_drv = drv;
+
+ return max_cpuidle_state;
+}
+
+
+/**
+ * idle_update_sleep_param
+ *
+ * when exit from one lp2 level sleep, update the exit_latency/target_residency.
+ */
+/* our idle exit lattency may */
+#if 0
+static unsigned int lp2_exit_latencies[MAX_CPU_NUM];
+static void idle_update_sleep_param(void)
+{
+
+}
+#endif
+/**
+ * idle_set_sleeptime
+ *
+ * set the wakeup timer
+ *
+ * sleep_time (us)
+ */
+extern void setup_timer_wakeup(s64 us);
+void idle_set_sleeptime(s64 sleep_time)
+{
+ /* set timer */
+ setup_timer_wakeup(sleep_time);
+
+ zx_idle_sleeptime = sleep_time;
+}
+
+/**
+ * idle_get_sleeptime
+ *
+ * for PCU sleeptime
+ */
+s64 idle_get_sleeptime(void)
+{
+ return zx_idle_sleeptime;
+}
+
+/**
+ * idle_unmask_interrupt
+ *
+ *
+ */
+static void idle_unmask_interrupt(void)
+{
+}
+
+static void idle_unmask_interrupt_restore(void)
+{
+}
+
+static unsigned int idle_get_exit_latency(int index)
+{
+ struct cpuidle_state* state = &(cur_idle_drv->states[index]);
+
+ return state->exit_latency;
+}
+
+/**
+ * When enter deep sleep the tick timer maybe stopped for
+ * 26M osc will be closed. So we stop tick before entering
+ * deep sleep and get sleeping time, then we restart the
+ * tick(minus the sleeping time).
+ */
+static u64 idle_enter_time = 0;
+static void idle_pre_enter(void)
+{
+ pm_stop_tick();
+
+ idle_enter_time = read_persistent_us();
+}
+
+static void idle_post_enter(s64 rem_us)
+{
+ u64 cur_time = read_persistent_us();
+ s64 delta;
+ u64 max_persist_us;
+
+ if(cur_time >= idle_enter_time)
+ delta = cur_time - idle_enter_time;
+ else
+ {
+ max_persist_us = div64_u64((u64)(0x7fffffff)*USEC_PER_SEC, (u64)PERSISTENT_TIMER_CLOCK_RATE);
+ delta = max_persist_us - idle_enter_time + cur_time;
+ }
+
+ if(delta > rem_us + LP2_DELTA_EXIT_LATENCY)
+ delta -= rem_us;
+ else
+ delta = LP2_DELTA_EXIT_LATENCY;
+
+ pm_start_tick(delta);
+}
+
+#define PM_IDLE_TRACE 0
+#if PM_IDLE_TRACE
+
+#define TRACE_IDLE_COUNT 1000
+
+typedef struct
+{
+ s64 request;
+ s64 enter_remainder;
+ s64 exit_remainder;
+}pm_idle_trace_t;
+
+volatile pm_idle_trace_t pm_idle_view[TRACE_IDLE_COUNT];
+volatile unsigned int pm_idle_index = 0;
+
+void trace_pm_idle_enter(s64 req_t, s64 remainder_t)
+{
+ pm_idle_view[pm_idle_index].request = req_t;
+ pm_idle_view[pm_idle_index].enter_remainder = remainder_t;
+}
+
+void trace_pm_idle_exit(s64 remainder_t)
+{
+ pm_idle_view[pm_idle_index].exit_remainder = remainder_t;
+
+ pm_idle_index++;
+ if(pm_idle_index==TRACE_IDLE_COUNT)
+ pm_idle_index=0;
+}
+#else
+void trace_pm_idle_enter(s64 req_t, s64 remainder_t){}
+void trace_pm_idle_exit(s64 remainder_t){}
+#endif
+
+/**
+ * zx_enter_deep_idle
+ *
+ * enter lp2 mode
+ */
+ s64 sleep_time = 0;
+ s64 request = 0;
+ s64 remainder_timer = 0;
+ s64 enter_deep_idle_enter_cnt =0;
+ s64 enter_deep_idle_exit_cnt =0;
+static int zx_enter_deep_idle(int index)
+{
+ bool sleep_completed = false;
+ pm_wake_reason_t wake_reason;
+
+
+
+#ifdef CONFIG_ZX_PM_DEBUG
+ ktime_t entry_time, exit_time;
+ s64 idle_time;
+ unsigned int cpu = read_cpuid();
+
+ //idle_stats.cpu_ready_count[cpu]++;
+ // idle_stats.tear_down_count[cpu]++;
+
+ entry_time = ktime_get();
+#endif
+ enter_deep_idle_enter_cnt++;
+
+ //s64 sleep_time;
+ /*s64 */request = ktime_to_us(tick_nohz_get_sleep_length());
+ //s64 remainder_timer;
+
+ /* */
+ idle_unmask_interrupt();
+
+ /* set wakeup timer */
+ remainder_timer = pm_get_remainder_time();
+ sleep_time = request - idle_get_exit_latency(index);
+ if ((sleep_time > LP2_DELTA_EXIT_LATENCY) && (sleep_time < remainder_timer))
+ idle_set_sleeptime(sleep_time);
+ else
+ return zx_pm_idle_enter(ZX_IDLE_CSTATE_LP3);
+
+ trace_pm_idle_enter(request, remainder_timer);
+
+ idle_pre_enter();
+
+#ifdef CONFIG_ZX_PM_DEBUG
+ //idle_stats.lp2_count[cpu]++;
+#endif
+
+ /* sleep */
+ zx_enter_sleep(CPU_SLEEP_TYPE_LP3); /*ÐÞ¸ÄΪOFF/sleepģʽ*/
+
+ enter_deep_idle_exit_cnt++;// tmp
+ idle_post_enter(remainder_timer);
+
+ remainder_timer = pm_get_remainder_time();
+ trace_pm_idle_exit(remainder_timer);
+
+ /* get wakeup cause */
+ wake_reason = pm_get_wakeup_reason();
+ if (wake_reason != WR_WAKE_SRC_ABNORMAL)
+ {
+ sleep_completed = true;
+ }
+ else
+ {
+#ifdef CONFIG_ZX_PM_DEBUG
+ int irq = 0;
+ irq = zx29_gic_pending_interrupt();
+ //idle_stats.lp2_int_count[irq]++;
+#endif
+ }
+
+ /* */
+ idle_unmask_interrupt_restore();
+/*=================================================================
+ *=======end enter deep sleep======================================
+ *=================================================================
+ */
+#ifdef CONFIG_ZX_PM_DEBUG
+ exit_time = ktime_get();
+ idle_time = ktime_to_us(ktime_sub(exit_time, entry_time));
+
+ //idle_stats.cpu_wants_lp2_time[cpu] += idle_time;
+ //idle_stats.in_lp2_time[cpu] += idle_time;
+
+ //if (sleep_completed)
+ //idle_stats.lp2_completed_count[cpu]++;
+#endif
+
+ return index;
+}
+
+/**
+ * zx_enter_lowpower - Programs cpu to enter the specified state
+ * @dev: cpuidle device
+ * @state: The target state to be programmed
+ *
+ * Called from the CPUidle framework to program the device to the
+ * specified low power state selected by the governor.
+ * Called with irqs off, returns with irqs on.
+ * Returns the amount of time spent in the low power state.
+ */
+int zx_pm_idle_enter(int index)
+{
+#ifdef CONFIG_ZX_PM_DEBUG
+ //idle_stats.idle_count++;
+#endif
+ if(ZX_IDLE_CSTATE_LP2 == index)
+ {
+ return zx_enter_deep_idle(index);
+ }
+ else
+ {
+#ifdef CONFIG_ZX_PM_DEBUG
+ unsigned cpu = read_cpuid();
+ // idle_stats.lp3_count[cpu]++;
+#endif
+ //gpio_direction_output(ZX29_GPIO_35, GPIO_HIGH);
+ cpu_do_idle();
+ //gpio_direction_output(ZX29_GPIO_35, GPIO_LOW);
+ return ZX_IDLE_CSTATE_LP3;
+ }
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx29-pm.c b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx29-pm.c
new file mode 100644
index 0000000..997a9d0
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx29-pm.c
@@ -0,0 +1,354 @@
+/*
+ * ZTE power management common driver
+ *
+ * Copyright (C) 2015 ZTE Ltd.
+ * by zxp
+ *
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/suspend.h>
+#include <linux/tick.h>
+
+#include <mach/dma.h>
+#include <mach/irqs.h>
+#include "zx-pm.h"
+
+
+
+
+static unsigned int pm_enter_flag = false;
+
+unsigned int pm_get_sleep_flag(void)
+{
+ return pm_enter_flag;
+}
+
+/**
+ * config pcu before enter lp mode.
+ *
+ */
+void pm_set_pcu(void)
+{
+ cpu_sleep_type_t sleep_type;
+ u32 sleep_time;
+
+ sleep_type = pm_get_sleep_type();
+ sleep_time = pm_get_sleep_time();
+
+ if(CPU_SLEEP_TYPE_LP1 == sleep_type)
+ {
+ pm_set_pcu_poweroff(sleep_time);
+ }
+ else if(CPU_SLEEP_TYPE_LP3 == sleep_type)
+ {
+ pm_set_pcu_sleep(sleep_time);
+ }
+ else
+ WARN_ON(1);
+}
+
+/**
+ * get sleep_time helper function.
+ * used for idle sleep type.
+ *
+ * This code only used pm internel.
+ *
+ * return : unit is 26M cycle(38.4ns)
+ * note: the max value is 0x7FFFFFFF (about 82.5s)
+ */
+u32 pm_get_sleep_time(void)
+{
+#ifdef CONFIG_CPU_IDLE
+ if(pm_enter_flag == false) {
+ if(idle_get_sleeptime() >=(82500000) )
+ return 0xffffffff;
+ else
+ return (u32)(idle_get_sleeptime()*26);
+ }
+ else
+ return 0xffffffff;
+#else
+ return 0xffffffff;
+#endif
+}
+
+/*=============================================================================
+ *======== zx297520v2 CRM driver ===============================================
+ *=============================================================================
+ */
+typedef struct
+{
+ /* 0x00 */ const volatile unsigned version;
+ /* 0x04 */ volatile unsigned clkdiv;
+ /* 0x08 */ volatile unsigned clken;
+ char padding1[0x4];
+ /* 0x10 */ volatile unsigned rsten;
+ char padding2[0xC];
+ /* 0x20 */ volatile unsigned gate_clk;
+ char padding3[0x2C];
+ /* 0x50 */ volatile unsigned int_mode[14];
+} crm_registers;
+
+typedef struct
+{
+ unsigned int clkdiv;
+ unsigned int clken;
+ unsigned int rsten;
+ unsigned int gate_clk;
+ unsigned int int_mode[14];
+} crm_context;
+
+/**
+ * save & restore CRM register interface for zx297520v2.
+ *
+ */
+void zx29_save_crm(u32 *pointer, u32 crm_base)
+{
+#ifdef CONFIG_ARCH_ZX297520V2 // 7520V2
+ crm_registers *crm = (crm_registers *)crm_base;
+ crm_context *context = (crm_context *)pointer;
+
+ context->clkdiv = crm->clkdiv;
+ context->clken = crm->clken;
+ context->rsten = crm->rsten;
+ context->gate_clk = crm->gate_clk;
+ copy_words(context->int_mode, crm->int_mode, 14);
+#else
+ pointer =copy_words(pointer,(crm_base+0x78), 10); // 0x78-0xa0;
+ pointer =copy_words(pointer,(crm_base+0xB0), 1); // probe
+
+#endif
+}
+
+void zx29_restore_crm(u32 *pointer, u32 crm_base)
+{
+#ifdef CONFIG_ARCH_ZX297520V2 //7520V2
+ crm_registers *crm = (crm_registers *)crm_base;
+ crm_context *context = (crm_context *)pointer;
+
+ crm->clkdiv = context->clkdiv;
+ crm->clken = context->clken;
+ crm->rsten = context->rsten;
+ crm->gate_clk = context->gate_clk;
+
+ copy_words(crm->int_mode, context->int_mode, 14);
+#else
+ copy_words((crm_base+0x78), (pointer), 10); // 0x78-0xa0;
+ pointer += 10;
+ copy_words((crm_base+0xB0), (pointer), 1); // probe
+ pointer += 1;
+#endif
+}
+
+/*=============================================================================
+ *======== zx297520v2 PM&IDLE ==================================================
+ *=============================================================================
+ */
+static inline int pm_start_wake_timer(s64 us)
+{
+ /* 1 cycle == 1/32768(s)*/
+ /* max setting value = 0xffffffff/32768 = 131072s = 36h */
+ unsigned long cycles = div64_long(us*32768, 1000000);
+
+ zx29_set_wake_timer(cycles);
+
+ return 0;
+}
+
+void setup_timer_wakeup(s64 us)
+{
+ pm_start_wake_timer(us);
+}
+
+unsigned int zx29_gic_pending_interrupt(void)
+{
+#ifdef CONFIG_ARCH_ZX297520V2
+ return gic_get_cur_pending((unsigned int)GIC_CPU_BASE);
+#else
+ return gic_get_cur_pending((unsigned int)ZX_GICC_BASE);//fiy
+#endif
+}
+
+s64 pm_get_remainder_time(void)
+{
+ return div64_long(((s64)read_timer_clk(CLOCKEVENT_BASE))*1000000, EVENT_CLOCK_RATE);
+}
+
+void pm_stop_tick(void)
+{
+ timer_stop(CLOCKEVENT_BASE);
+}
+u32 pm_read_tick(void)
+{
+ return read_timer_clk(CLOCKEVENT_BASE);
+}
+void pm_restart_tick(u32 cycles)
+{
+ timer_set_load(CLOCKEVENT_BASE,cycles);
+ timer_start(CLOCKEVENT_BASE);
+
+}
+
+void pm_start_tick(u64 us)
+{
+ unsigned long cycles = div64_long(us*EVENT_CLOCK_RATE, 1000000);
+ struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
+ ktime_t expires;
+
+#if 0
+ dev->set_next_event(cycles, dev);
+#else
+ expires = ktime_add_ns(ktime_get(), us*1000);
+ clockevents_program_event(dev, expires, 1);
+#endif
+ timer_start(CLOCKEVENT_BASE);
+}
+
+unsigned int pm_dma_used(void)
+{
+#ifdef CONFIG_ZX29_DMA
+ return zx29_dma_get_status();
+#else
+ return 0;
+#endif
+}
+
+/*=============================================================================
+ *======== zx297520v2 DEBUG UART ===============================================
+ *====== note: uart is in wakeup powerdomain =================================
+ *=============================================================================
+ */
+
+static struct zx_suspend_context suspend_context;
+
+void debug_uart_suspend(void)
+{
+ void __iomem *uart_base = debug_uart_base();
+
+#if 1
+ suspend_context.uart.ibrd = pm_read_reg(uart_base + ZX29_UART_IBRD);
+ suspend_context.uart.fbrd = pm_read_reg(uart_base + ZX29_UART_FBRD);
+ suspend_context.uart.lcrh = pm_read_reg(uart_base + ZX29_UART_LCRH);
+ suspend_context.uart.ifls = pm_read_reg(uart_base + ZX29_UART_IFLS);
+ suspend_context.uart.imsc = pm_read_reg(uart_base + ZX29_UART_IMSC);
+ suspend_context.uart.dmacr = pm_read_reg(uart_base + ZX29_UART_DMACR);
+#endif
+ suspend_context.uart.cr = pm_read_reg(uart_base + ZX29_UART_CR);
+
+ /* disable */
+ pm_clr_reg(uart_base + ZX29_UART_CR, UART_CR_UARTEN | UART_CR_TXE | UART_CR_LBE);
+
+ /* gate pclk/wclk */
+#if 0
+#ifdef DEBUG_UART0
+ pm_clr_reg(A1_CRM_PCLK_EN_REG, A1_CRM_UART0_BIT);
+ pm_clr_reg(A1_CRM_WCLK_EN_REG, A1_CRM_UART0_BIT);
+#else
+ pm_clr_reg(A1_CRM_PCLK_EN_REG, A1_CRM_UART1_BIT);
+ pm_clr_reg(A1_CRM_WCLK_EN_REG, A1_CRM_UART1_BIT);
+#endif
+#endif
+}
+
+void debug_uart_resume(void)
+{
+ void __iomem *uart_base = debug_uart_base();
+
+ /* open pclk/wclk */
+#if 0
+#ifdef DEBUG_UART0
+ pm_set_reg(A1_CRM_PCLK_EN_REG, A1_CRM_UART0_BIT);
+ pm_set_reg(A1_CRM_WCLK_EN_REG, A1_CRM_UART0_BIT);
+#else
+ pm_set_reg(A1_CRM_PCLK_EN_REG, A1_CRM_UART1_BIT);
+ pm_set_reg(A1_CRM_WCLK_EN_REG, A1_CRM_UART1_BIT);
+#endif
+#endif
+
+#if 1
+ pm_write_reg(uart_base + ZX29_UART_IBRD, suspend_context.uart.ibrd);
+ pm_write_reg(uart_base + ZX29_UART_FBRD, suspend_context.uart.fbrd);
+ pm_write_reg(uart_base + ZX29_UART_LCRH, suspend_context.uart.lcrh);
+ pm_write_reg(uart_base + ZX29_UART_IFLS, suspend_context.uart.ifls);
+ pm_write_reg(uart_base + ZX29_UART_IMSC, suspend_context.uart.imsc);
+ pm_write_reg(uart_base + ZX29_UART_DMACR, suspend_context.uart.dmacr);
+#endif
+ pm_write_reg(uart_base + ZX29_UART_CR, suspend_context.uart.cr);
+}
+
+void pm_mask_tick(void);
+void pm_unmask_tick(void);
+/* we use ap_timer1 as idle wakeup source when poweroff */
+void zx_pm_pre_suspend(void)
+{
+ setup_timer_wakeup(__SLEEP_TIME_1h__*18);//
+ pm_mask_tick();
+
+ pm_enter_flag = true;
+}
+
+void zx_pm_post_suspend(void)
+{
+ zx29_stop_wake_timer();
+
+ pm_unmask_tick();
+
+ pm_enter_flag = false;
+}
+
+static unsigned int at_command_read_flag = 0;
+static unsigned int pm_mask_info = 0;
+void pm_debug_mask_info_init(void)
+{
+// pm_mask_info = 0; /* should get value from iram */
+ pm_get_mask_info();
+ pm_ram_log("pm_mask_info=(%8lu)\n", pm_mask_info);
+}
+
+unsigned int pm_get_mask_info(void)
+{
+ if(at_command_read_flag != AT_COMMAND_READ_FLAG)
+ {
+ at_command_read_flag = zx_read_reg(IRAM_AT_COMMAND_ADDR + 0x10);
+ if(at_command_read_flag == AT_COMMAND_READ_FLAG)
+ {
+ pm_mask_info = zx_read_reg(IRAM_AT_COMMAND_ADDR + 0x4);
+ if((zx_read_reg(IRAM_AT_COMMAND_ADDR)&PM_ALL_NO_SLEEP)||(pm_mask_info&PM_NO_SLEEP))//AP²»½øË¯ÃßÁ÷³Ì
+ {
+ #ifdef CONFIG_ARCH_ZX297520V3_CAP
+ pm_mask_info |= PM_IDLE_WFI;
+ #else
+ pm_mask_info |= (PM_IDLE_WFI|PM_NO_SUSPEND|PM_SUSPEND_WFI|PM_NO_CPU_FREQ|PM_NO_AXI_FREQ);
+ #endif
+ }
+ }
+ }
+
+ return pm_mask_info;
+}
+
+bool pm_disable_suspend(void)
+{
+ return (pm_get_mask_info()&PM_NO_SUSPEND);
+}
+
+void pm_init_acs(void)
+{
+#ifdef CONFIG_ARCH_ZX297520V2
+ zx_set_reg(AP_CORE_SEL_ADDR, L2_STOPPED_SEL_EN|CORE_ACS_CLK_SEL_EN);
+#else
+ //zx_clr_reg(AP_CORE_CLK_GATE_ADDR, AP_PROBE_GATE_EN|AP_PMC_GTAE_EN|AP_PROBE_BYPASS_EN);
+ // zx_set_reg(AP_CORE_SEL_ADDR, CORE_ACS_CLK_SEL_EN);
+
+ /*Ö§³ÖACS clk sel default 26M*/
+ //zx_set_reg(AP_CORE_SEL_ADDR, /*L2_STOPPED_SEL_EN|*/CORE_ACS_CLK_SEL_EN);
+ /* clk×Ô¶¯ÃÅ¿Ø*/
+ //zx_set_reg(AP_AXI_CLKEN_ADDR, AP_TODDR_CLKEN_AUTO|AP_TOMATRIX_CLKEN_AUTO);
+#endif
+}
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx29-pm.h b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx29-pm.h
new file mode 100644
index 0000000..2778a83
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/power/zx29-pm.h
@@ -0,0 +1,360 @@
+/*
+ * zx297520v2-pm.h - zx297520v2 power management interface.
+ *
+ * Written by zxp.
+ *
+ */
+
+#ifndef _ZX29_PM_H
+#define _ZX29_PM_H
+#include <mach/iomap.h>
+
+#define pm_reg_sync_write zx_reg_sync_write
+#define pm_read_reg zx_read_reg
+#define pm_write_reg zx_write_reg
+#define pm_set_reg zx_set_reg
+#define pm_clr_reg zx_clr_reg
+#define pm_reg_sync_write_16 zx_reg_sync_write_16
+#define pm_read_reg_16 zx_read_reg_16
+#define pm_write_reg_16 zx_write_reg_16
+
+#define ZX_A9_PERIPHERAL_PHYS (ZX29_A9_PERIPHERAL_PHYS)
+#define A9_CRM_PHYS (ZX_A9_PERIPHERAL_PHYS + 0x3000)
+#define ZX_L2CACHE_CONFIG_PHYS (ZX29_L2CACHE_CONFIG_PHYS)
+
+#define INVALID_INT_NUM (0xFFFF)
+
+#define __SLEEP_TIME_1s__ ((s64)(1000000))
+#define __SLEEP_TIME_1m__ (__SLEEP_TIME_1s__*60)
+#define __SLEEP_TIME_1h__ (__SLEEP_TIME_1m__*60)
+#define __MAX_SLEEP_TIME__ (__SLEEP_TIME_1h__*18)
+
+/* iram1 is for axi_freq and debug, data will not lost when system died */
+/*===================================================================
+ *== iram1 address allocation =====================================
+ *===================================================================
+ *========= 0x2000 ~~ 0x2FFF : AP debug ram ===================
+ *========= 0x3000 ~~ 0x37FF : common use ===================
+ *========= 0x30A0 ~~ 0x30AF : at command ===================
+ *========= 0x3140 ~~ 0x316F : ddr freq area ===================
+ *========= 0x3170 ~~ 0x318F : axi freq area ===================
+ *========= 0x3190 ~~ 0x31bF : dvs area ===================
+ *===================================================================
+ */
+#ifdef CONFIG_ARCH_ZX297520V2
+#define IRAM_PM_BASE (ZX_IRAM1_BASE)
+
+#define IRAM_AP_DEBUG_ADDR (IRAM_PM_BASE + 0x2000)
+#define IRAM_AP_DEBUG_LEN (0x1000)
+
+#define IRAM_COMMON_USE_ADDR (IRAM_PM_BASE + 0x3000)
+#define IRAM_COMMON_USE_LEN (0x800)
+
+#define IRAM_PS_SLEEP_FLAG_ADDR (IRAM_COMMON_USE_ADDR + 0x30)
+
+#define IRAM_AT_COMMAND_ADDR (IRAM_COMMON_USE_ADDR + 0xA0)
+
+#define IRAM_CHANGE_DDR_BASE (IRAM_COMMON_USE_ADDR + 0x140)
+#define IRAM_CHANGE_AXI_BASE (IRAM_COMMON_USE_ADDR + 0x180)
+#define IRAM_CHANGE_DVS_BASE (IRAM_COMMON_USE_ADDR + 0x1a0) /* TBD */
+
+#define AP_SUSPEND_FOR_POWEROFF_CNT (IRAM_COMMON_USE_ADDR + 0x114)
+#define AP_SLEEP_TIME_ADDR (IRAM_COMMON_USE_ADDR + 0x130)
+
+/*test flag*/
+#define IRAM_ADDR_FOR_SLEEP_CNT (IRAM_AP_DEBUG_ADDR + 0x0)
+#define IRAM_ADDR_FOR_WAKE_CNT (IRAM_AP_DEBUG_ADDR + 0x4)
+#define SLEEP_TIME_ADDR (SYSTEM_WAKEUP_ADDR + 0x8)
+#define AP_SUSPEND_STATUS_FLAG (IRAM_AP_DEBUG_ADDR + 0xC)
+#define AP_SUSPEND_FOR_SLEEP_CNT (IRAM_AP_DEBUG_ADDR + 0x10)
+#define AP_IDLE_SLEEP_STATUS_FLAG (IRAM_AP_DEBUG_ADDR + 0x14)
+#else
+
+#ifdef CONFIG_ARCH_ZX297520V3_CAP
+#define IRAM_PM_BASE (ZX_IRAM1_BASE + 0x2000) /*0x00102000*/
+#else
+#define IRAM_PM_BASE (ZX_IRAM1_BASE) /*0x00100000*/
+#endif
+
+#define IRAM_AP_DEBUG_ADDR (IRAM_PM_BASE + 0x400) /*0x00102400/0x00100400*/
+
+#define IRAM_AP_DEBUG_LEN (0x1000)/*4K*/
+
+
+#define IRAM_COMMON_USE_ADDR (ZX_IRAM1_BASE + 0x1000)/*0x00101000 for psm flag*/
+
+//#define IRAM_COMMON_USE_LEN (0x800)
+
+#define IRAM_PS_SLEEP_FLAG_ADDR (IRAM_COMMON_USE_ADDR + 0x30)
+#define IRAM_AT_COMMAND_ADDR (IRAM_COMMON_USE_ADDR + 0xA0)
+#define IRAM_AP_DRV_FLAG_BASE (IRAM_COMMON_USE_ADDR + 0x58) /* USB FLAG TO PROXY */
+
+#define IRAM_CHANGE_DDR_BASE (IRAM_COMMON_USE_ADDR + 0x140)
+#define IRAM_CHANGE_AXI_BASE (IRAM_COMMON_USE_ADDR + 0x180)
+#define IRAM_CHANGE_DVS_BASE (IRAM_COMMON_USE_ADDR + 0x1a0) /* TBD */
+
+#ifdef CONFIG_ARCH_ZX297520V3_CAP
+#define AP_SUSPEND_FOR_POWEROFF_CNT (IRAM_COMMON_USE_ADDR + 0x114)
+#define AP_SLEEP_TIME_ADDR (IRAM_COMMON_USE_ADDR + 0x134)
+#else
+#define AP_SUSPEND_FOR_POWEROFF_CNT (IRAM_COMMON_USE_ADDR + 0x110)
+#define AP_SLEEP_TIME_ADDR (IRAM_COMMON_USE_ADDR + 0x130)
+#endif
+
+/*test flag*/
+#define IRAM_ADDR_FOR_SLEEP_CNT (IRAM_AP_DEBUG_ADDR + 0x0)
+#define IRAM_ADDR_FOR_WAKE_CNT (IRAM_AP_DEBUG_ADDR + 0x4)
+#define SLEEP_TIME_ADDR (IRAM_AP_DEBUG_ADDR + 0x8)//(SYSTEM_WAKEUP_ADDR + 0x8)
+#define AP_SUSPEND_STATUS_FLAG (IRAM_AP_DEBUG_ADDR + 0xC)
+#define AP_SUSPEND_FOR_SLEEP_CNT (IRAM_AP_DEBUG_ADDR + 0x10)
+#define AP_IDLE_SLEEP_STATUS_FLAG (IRAM_AP_DEBUG_ADDR + 0x14)
+
+
+
+//#define SUSPEND_START_TIME_ADDR (IRAM_AP_DEBUG_ADDR + 0x10)
+//#define SUSPEND_SAVE_TIME_ADDR (IRAM_AP_DEBUG_ADDR + 0x14)
+//#define SUSPEND_RESTORE_TIME_ADDR (IRAM_AP_DEBUG_ADDR + 0x18)
+#endif
+
+/*
+ * flag : m0 set 0 when get request, ap set 1 to request m0
+ * target : ap axi target, defined by zx297510_axi_freq
+ * cur : current axi freq, defined by zx297510_axi_freq
+ * ack : m0 set 1 when done request, ap set 0 before new request
+**/
+typedef struct
+{
+ /* 0x00 */ volatile unsigned int ps_req_flag;
+ /* 0x04 */ volatile unsigned int ps_exp_freq;
+
+ /* 0x08 */ volatile unsigned int phy_req_flag;
+ /* 0x0C */ volatile unsigned int phy_exp_freq;
+
+ /* 0x10 */ volatile unsigned int ap_req_flag;
+ /* 0x14 */ volatile unsigned int ap_exp_freq;
+
+ /* 0x18 */ volatile unsigned int cur_freq;
+
+}axi_freq_regs;
+
+typedef struct
+{
+ /* 0x00 */ volatile unsigned int ps_req_flag;
+ /* 0x04 */ volatile unsigned int ps_exp_vol;
+
+ /* 0x08 */ volatile unsigned int phy_req_flag;
+ /* 0x0C */ volatile unsigned int phy_exp_vol;
+
+ /* 0x10 */ volatile unsigned int ap_req_flag;
+ /* 0x14 */ volatile unsigned int ap_exp_vol;
+
+ /* 0x18 */ volatile unsigned int cur_vol;
+
+}vol_dvs_regs;
+
+typedef struct
+{
+ /* 0x00 */ volatile unsigned int ps_req_flag;
+ /* 0x04 */ volatile unsigned int ps_ack_flag;
+ /* 0x08 */ volatile unsigned int ps_exp_freq;
+
+ /* 0x0C */ volatile unsigned int phy_req_flag;
+ /* 0x10 */ volatile unsigned int phy_ack_flag;
+ /* 0x14 */ volatile unsigned int phy_exp_freq;
+
+ /* 0x18 */ volatile unsigned int ap_req_flag;
+ /* 0x1C */ volatile unsigned int ap_ack_flag;
+ /* 0x20 */ volatile unsigned int ap_exp_freq;
+
+ /* 0x24 */ volatile unsigned int cur_freq;
+ /* 0x28 */ volatile unsigned int status;
+}ddr_freq_regs;
+
+/* pm mask flag for test */
+#define AT_COMMAND_READ_FLAG (0x49435001)
+#define AXI_VOL_CHANGE_ICP_BUF (0x49435002)
+
+#ifdef CONFIG_ARCH_ZX297520V3_CAP
+#define PM_ALL_NO_SLEEP (0x1)
+#define PM_NO_SLEEP (0x1)
+#define PM_IDLE_WFI (0x1)
+#define PM_SLEEP_FLAG_PRINT (0x200)
+#define PM_NO_SUSPEND (0x40000)
+#define PM_SUSPEND_WFI (0x80000)
+
+#else
+#define PM_ALL_NO_SLEEP (0x1)
+#define PM_SLEEP_FLAG_PRINT (0x200)
+#define PM_NO_SLEEP (0x10000)
+#define PM_IDLE_WFI (0x20000)
+#define PM_NO_SUSPEND (0x40000)
+#define PM_SUSPEND_WFI (0x80000)
+#define PM_NO_CPU_FREQ (0x100000)
+#define PM_NO_AXI_FREQ (0x200000)
+#define PM_NO_DDR_FREQ (0x1000000)
+#endif
+
+/*===================================================================
+ *== iram address allocation ======================================
+ *===================================================================
+ *========= 0 ~~ 0x1FF : wakeup code area ===================
+ *========= 0x200 ~~ 0x27F : code area_1 ===================
+ *========= 0x280 ~~ 0x2ff : code area_2 ===================
+ *========= 0x300 ~~ 0x33f : debug area ===================
+ *========= 0x340 ~~ 999 : reserved[0] ===================
+ *========= 1000 ~~ 1003 : code addr[0] ===================
+ *========= 1004 ~~ 1023 : reserved[1] ===================
+ *===================================================================
+ */
+#define ioremap_mem(cookie,size) __arm_ioremap((cookie), (size), MT_MEMORY)
+#ifdef CONFIG_ARCH_ZX297520V2
+#define SYSTEM_WAKEUP_ADDR (ZX29_IRAM0_PHYS)
+#else
+#ifdef CONFIG_ARCH_ZX297520V3_CAP
+#define SYSTEM_WAKEUP_ADDR (ZX29_IRAM1_PHYS + 0x2000)//cap
+#else
+#define SYSTEM_WAKEUP_ADDR (ZX29_IRAM1_PHYS ) //(ZX29_IRAM1_PHYS + 0x2000)//ap
+#endif
+#endif
+
+#define WAKEUP_CODE_LENGTH (0x200)
+#define SLEEP_CODE_LENGTH (0x80)
+#define DDR_DFS_CODE_LENGTH (0x80)
+
+typedef struct
+{
+ /* 0x000 */ unsigned char wakeup_code[WAKEUP_CODE_LENGTH];
+ /* 0x200 */ unsigned char sleep_code[SLEEP_CODE_LENGTH];
+ /* 0x280 */ unsigned char ddr_dfs_code[DDR_DFS_CODE_LENGTH];
+ /* 0x300 */ char padding1[1000-0x300];
+ /* 1000 */ unsigned int reset_handler_vaddr;
+ /* 1004 */ char padding2[20];
+} wakeup_ram_area;
+
+#ifdef CONFIG_ARCH_ZX297520V3_CAP
+#define AP_CORE_SEL_ADDR (ZX_MATRIX_CRM_BASE + 0x40)
+#else
+#define AP_CORE_SEL_ADDR (ZX_MATRIX_CRM_BASE + 0x20)
+#endif
+#define CORE_ACS_CLK_SEL_EN (1<<8)
+#ifdef CONFIG_ARCH_ZX297520V2
+#define L2_STOPPED_SEL_EN (1<<9) /*a53 not has*/
+#else
+#define AP_AXI_CLKEN_ADDR (ZX_MATRIX_CRM_BASE + 0x44)
+#define AP_TODDR_CLKEN_AUTO (1<<5)
+#define AP_TOMATRIX_CLKEN_AUTO (1<<4)
+
+#define PS_MATRIX_AXI_SEL (ZX_MATRIX_CRM_BASE + 0x120)
+#define AP_MATRIX_AXI_SEL (ZX_MATRIX_CRM_BASE + 0x124)
+#define AP_DDR_FFC_SEL (ZX_DDR_FFC_BASE + 0x220)
+#define AP_DDR_FFC_SEL_SYNC (ZX_DDR_FFC_BASE + 0x210)
+
+
+
+
+#define AP_CORE_CLK_GATE_ADDR (AP_CRM_BASE+0xB0)
+#define AP_PROBE_GATE_EN (1<<8)
+#define AP_PMC_GTAE_EN (1<<3)
+#define AP_PROBE_BYPASS_EN (1<<15)
+#endif
+
+/* copied from zx29_uart.h */
+#define ZX29_UART0_VA (ZX_UART0_BASE)
+#define ZX29_UART1_VA (ZX_UART1_BASE)
+
+#define ZX29_UART_IBRD 0x24 /* Integer baud rate divisor register. */
+#define ZX29_UART_FBRD 0x28 /* Fractional baud rate divisor register. */
+#define ZX29_UART_LCRH 0x30 /* Line control register. */
+#define ZX29_UART_CR 0x34 /* Control register. */
+#define ZX29_UART_IFLS 0x38 /* Interrupt fifo level select. */
+#define ZX29_UART_IMSC 0x40 /* Interrupt mask. */
+#define ZX29_UART_ICR 0x4c /* Interrupt clear register. */
+#define ZX29_UART_DMACR 0x50 /* DMA control register. */
+
+/*------ uart control reg -----*/
+#define UART_CR_CTSEN (1<<15) /* CTS hardware flow control */
+#define UART_CR_RTSEN (1<<14) /* RTS hardware flow control */
+#define UART_CR_OUT2 (1<<13) /* OUT2 */
+#define UART_CR_OUT1 (1<<12) /* OUT1 */
+#define UART_CR_RTS (1<<11) /* RTS */
+#define UART_CR_DTR (1<<10) /* DTR */
+#define UART_CR_RXE (1<<9) /* receive enable */
+#define UART_CR_TXE (1<<8) /* transmit enable */
+#define UART_CR_LBE (1<<7) /* loopback enable */
+#define UART_CR_SIRLP (1<<2) /* SIR low power mode */
+#define UART_CR_SIREN (1<<1) /* SIR enable */
+#define UART_CR_UARTEN (1<<0) /* UART enable */
+
+struct zx_uart_context
+{
+ unsigned int ibrd; /*0x24 Integer baud rate divisor register. */
+ unsigned int fbrd; /*0x28 Fractional baud rate divisor register. */
+ unsigned int lcrh; /*0x30 Line control register. */
+ unsigned int cr; /*0x34 Control register. */
+ unsigned int ifls; /*0x38 Interrupt fifo level select. */
+ unsigned int imsc; /*0x40 Interrupt mask. */
+ unsigned int dmacr; /*0x50 DMA control register. */
+};
+
+struct zx_timer_context
+{
+ unsigned int cfg; /*0x04 config register. */
+ unsigned int load; /*0x08 load register. */
+ unsigned int start; /*0x0C timer start register. */
+ unsigned int count; /*0x18 current counter register. */
+};
+
+/* for device or sw to restore */
+struct zx_suspend_context
+{
+ struct zx_uart_context uart;
+ struct zx_timer_context timer;
+};
+
+/*used as pm common interface*/
+void pm_set_pcu(void);
+
+extern void zx29_save_crm(u32 *pointer, u32 crm_base);
+extern void zx29_restore_crm(u32 *pointer, u32 crm_base);
+
+extern void pm_stop_tick(void);
+extern u32 pm_read_tick(void);
+extern void pm_restart_tick(u32 cycles);
+extern void pm_start_tick(u64 us);
+extern s64 pm_get_remainder_time(void);
+extern void setup_timer_wakeup(s64 us);
+extern void zx29_set_wake_timer(unsigned long cycles);
+extern void zx29_stop_wake_timer(void);
+extern u64 read_persistent_us(void);
+extern unsigned int pm_dma_used(void);
+
+extern unsigned pm_get_wakeup_int_no(void);
+extern char * pm_get_wakeup_int_name(void);
+extern void pm_get_wake_cause(void);
+extern unsigned int pm_get_wakesource(void);
+
+extern void __iomem *debug_uart_base(void);
+extern void debug_uart_suspend(void);
+extern void debug_uart_resume(void);
+
+extern void pm_timer_suspend(void);
+extern void pm_timer_resume(void);
+
+extern void zx_pm_pre_suspend(void);
+extern void zx_pm_post_suspend(void);
+
+#define zx_set_pcu pm_set_pcu
+#define zx_clear_pcu pm_clear_pcu
+
+#define save_crm zx29_save_crm
+#define restore_crm zx29_restore_crm
+
+#define zx_unmask_wakeup_interrupt()
+#define zx_interrupt_mask_restore()
+
+extern void pm_debug_mask_info_init(void);
+extern unsigned int pm_get_mask_info(void);
+
+void pm_init_acs(void);
+
+#endif /*_ZX297520V2_PM_H*/
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/rpm/Makefile b/ap/os/linux/linux-3.4.x/drivers/soc/zte/rpm/Makefile
new file mode 100644
index 0000000..2e9bd70
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/rpm/Makefile
@@ -0,0 +1,18 @@
+#
+# Makefile for rpm driver.
+#
+#ifndef CONFIG_PREEMPT_RT_FULL
+#obj-$(CONFIG_RPM_ZX29) += icp.o icp_rpmsg.o rpmsg.o rpmsg_log.o
+#obj-y += icp.o icp_rpmsg.o rpmsg.o rpmsg_log.o
+#else
+
+ifdef CONFIG_ARCH_ZX297520V3_CAP
+obj-$(CONFIG_RPM_ZX29) += icp.o icp_rpmsg.o rpmsg.o rpmsg_log.o
+else
+obj-y += rpmsg_sim.o rpmsg_log.o at_io.o amt_io.o
+obj-$(CONFIG_RPM_ZX29) += icp.o icp_rpmsg.o rpmsg.o
+endif
+
+ccflags-y += -I$(TOPDIR)/cp/ps/modem/com/inc
+ccflags-y += -I$(TOPDIR)/cp/ps/modem/tools/inc/amt
+#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/rpm/amt_io.c b/ap/os/linux/linux-3.4.x/drivers/soc/zte/rpm/amt_io.c
new file mode 100644
index 0000000..3561e4d
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/rpm/amt_io.c
@@ -0,0 +1,110 @@
+/**
+ *
+ * @file amt_io.c
+ * @brief
+ * This file is part of FTM.
+ * AMT¹¤¾ßÓ¦ÓòãºÍÄں˲ãͨÐÅÄ£¿é
+ *
+ * @details
+ * @author Tools Team.
+ * @email
+ * @copyright Copyright (C) 2013 Sanechips Technology Co., Ltd.
+ * @warning
+ * @date 2019/02/02
+ * @version 1.1
+ * @pre
+ * @post
+ *
+ * @par
+ * Change History :
+ * ---------------------------------------------------------------------------
+ * date version author description
+ * ---------------------------------------------------------------------------
+ * 2018/03/08 1.0 liu.xin Create file
+ * 2019/02/02 1.1 jiang.fenglin ¹æ·¶º¯ÊýÔÐÍ
+ * ---------------------------------------------------------------------------
+ *
+ *
+ */
+
+#include "amt_io.h"
+#include <linux/module.h>
+#include <linux/cp_types.h>
+#include <linux/soc/zte/rpm/rpmsg_sim.h>
+#include "amt.h"
+
+
+extern zx29_rpmsg_ser rpmsg_sim_zx29;
+//extern BOOL zAmt_IsAmtMode(VOID);
+
+
+static UINT32 AmtAgent_SendDataToAmtApp(UINT8 *buf, UINT32 buf_len)
+{
+ //panic("AmtAgent_SendDataToAmtApp");
+ TransdataToAmtApp(9, buf, buf_len);
+ return buf_len;
+ /*
+ if (TransdataToAmtApp(9, buf, buf_len) == 0)
+ {
+ return buf_len;
+ }
+ else
+ {
+ return 0;
+ }
+ */
+}
+
+
+static void AMTchannelOpen(unsigned int chid)
+{
+#ifdef USE_CPPS_KO
+ cpps_callbacks.RegSendDataToAmtAppFunction(AmtAgent_SendDataToAmtApp);
+#else
+ RegSendDataToAmtAppFunction(AmtAgent_SendDataToAmtApp);
+#endif
+ return;
+}
+static void AMTchannelClose(unsigned int chid)
+{
+
+}
+
+static void TransdataToAmtagent(unsigned int chid, const void *buffer, unsigned int length)
+{
+#ifdef USE_CPPS_KO
+ cpps_callbacks.AmtAgent_ComposeAndProcess(buffer, length);
+#else
+ AmtAgent_ComposeAndProcess(buffer, length);
+#endif
+ return;
+}
+
+static int TransdataToAmtApp(unsigned int chid, const void *buffer, unsigned int length)
+{
+ //zx29_rpmsg_channel *rpmsg_channel = &rpmsg_sim_zx29.rpmsg_channel[chid-1];
+ rpmsg_recv_notify(chid, buffer, length);
+ return 0;
+}
+
+
+static int __init AMTchannelInit(void)
+{
+ registerOpsCallback(9, AMTchannelOpen, AMTchannelClose, TransdataToAmtagent);
+
+ return 0;
+}
+
+static void __exit AMTchannelExit(void)
+{
+
+}
+
+late_initcall(AMTchannelInit);
+module_exit(AMTchannelExit);
+
+MODULE_AUTHOR("ZTE");
+MODULE_DESCRIPTION("ZTE Amt IO Module");
+MODULE_LICENSE("GPL");
+
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/rpm/amt_io.h b/ap/os/linux/linux-3.4.x/drivers/soc/zte/rpm/amt_io.h
new file mode 100644
index 0000000..01b18d2
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/rpm/amt_io.h
@@ -0,0 +1,56 @@
+/**
+ *
+ * @file amt_io.h
+ * @brief
+ * This file is part of FTM.
+ * AMT¹¤¾ßÓ¦ÓòãºÍÄں˲ãͨÐÅÄ£¿é
+ *
+ * @details
+ * @author Tools Team.
+ * @email
+ * @copyright Copyright (C) 2013 Sanechips Technology Co., Ltd.
+ * @warning
+ * @date 2019/02/02
+ * @version 1.1
+ * @pre
+ * @post
+ *
+ * @par
+ * Change History :
+ * ---------------------------------------------------------------------------
+ * date version author description
+ * ---------------------------------------------------------------------------
+ * 2018/03/08 1.0 liu.xin Create file
+ * 2019/02/02 1.1 jiang.fenglin ¹æ·¶º¯ÊýÔÐÍ
+ * ---------------------------------------------------------------------------
+ *
+ *
+ */
+
+#ifndef AMTIO_H
+#define AMTIO_H
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/cpumask.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <asm/irq.h>
+#include <asm/setup.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/irq.h>
+#include <asm/mach/map.h>
+
+
+static void AMTchannelOpen(unsigned int chid);
+static void AMTchannelClose(unsigned int chid);
+static void TransdataToAmtagent(unsigned int chid, const void *buffer, unsigned int length);
+static int TransdataToAmtApp(unsigned int chid, const void *buffer, unsigned int length);
+
+#endif
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/rpm/at_io.c b/ap/os/linux/linux-3.4.x/drivers/soc/zte/rpm/at_io.c
new file mode 100755
index 0000000..5e8683d
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/rpm/at_io.c
@@ -0,0 +1,601 @@
+#include "at_io.h"
+#include <linux/module.h>
+#include <linux/cp_types.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/soc/zte/rpm/rpmsg_sim.h>
+#include "zpsi_api.h"
+
+extern zx29_rpmsg_ser rpmsg_sim_zx29;
+
+#define ATIO_SUCCESS 0
+#define ATIO_ERROR 1
+
+#define ZATI2_SUCCESS 0
+#define ZATI2_ERROR 1
+
+#define VSIM_CHID_BASE 90
+#define VSIM_MAX_MSG_LEN 1024
+#define VSIM_APDU_TIMEOUT 6
+
+enum{
+ MSG_CMD_VSIM_GET_SWITCH = 1,
+ MSG_CMD_VSIM_SET_SWITCH,
+ MSG_CMD_VSIM_GET_STANDBY,
+ MSG_CMD_VSIM_SET_STANDBY,
+ MSG_CMD_VSIM_GET_TAUING,
+ MSG_CMD_VSIM_SET_TAUING,
+ MSG_CMD_VSIM_GET_AUTHING,
+ MSG_CMD_VSIM_SET_AUTHING,
+ MSG_CMD_VSIM_GET_CARD_STAT,
+ MSG_CMD_VSIM_APDU,
+ MSG_CMD_VSIM_GET_FLOW_STATISTIC,//»ñÈ¡µ±Ç°Á÷Á¿Í³¼Æ
+ MSG_CMD_VSIM_RESET_FLOW_STATISTIC,//ÖØÖõ±Ç°Á÷Á¿Í³¼Æ
+ MSG_CMD_VSIM_MAX,
+};
+typedef struct {
+ unsigned short usMsgCmd; /* ÏûÏ¢ÀàÐÍ */
+ unsigned short usDataLen; /* ÏûÏ¢³¤¶È£¬°üÀ¨Í·²¿ */
+ unsigned char aucDataBuf[VSIM_MAX_MSG_LEN]; /* ÏûÏ¢ÕýÎÄ */
+} VSIM_MSG_BUF;
+typedef int (*vsim_async_CB)(unsigned char *msg);
+typedef int (*vsim_sync_CB)(unsigned char *in_msg, unsigned char *out_msg);
+
+extern vsim_async_CB g_vsim_read;
+extern vsim_async_CB g_vsim_write;
+extern vsim_sync_CB g_vsim_proc;
+extern unsigned char cid_reserved;
+
+extern unsigned char zAti_GetDualCardStat(unsigned char * pbSim1Stat, unsigned char * pbSim2Stat, unsigned char * pbIsCardSwitching);
+extern unsigned char psnet_get_pschid_stat_all(void);
+extern void psnet_set_pschid_stat_by_sim(unsigned char stat, unsigned char sim);
+extern void psnet_get_flow_statistic_by_sim(unsigned long *tx_bytes, unsigned long *rx_bytes, unsigned char sim);
+extern void psnet_reset_stat_by_sim(unsigned char sim);
+
+VSIM_MSG_BUF g_vsim_msg = {0};
+int g_vsim_msg_flag = 0;
+struct mutex g_vsim_msg_mutex;
+struct semaphore g_vsim_read_sem;
+struct semaphore g_vsim_write_sem;
+atomic_t g_vsim_switch = ATOMIC_INIT(0);
+atomic_t g_vsim_standby = ATOMIC_INIT(0);
+atomic_t g_vsim_tauing = ATOMIC_INIT(0);
+atomic_t g_vsim_authing = ATOMIC_INIT(0);
+unsigned char g_vsim_psnet_stat = 0;
+unsigned char g_vsim_cid_reserved = 0;
+
+static int vsim_rw_sync(VSIM_MSG_BUF *in_msg, VSIM_MSG_BUF *out_msg, int timeout)
+{
+ mutex_lock(&g_vsim_msg_mutex);
+ if(g_vsim_msg_flag){
+ mutex_unlock(&g_vsim_msg_mutex);
+ printk("vsim_rw busy flag=%d !\n",g_vsim_msg_flag);
+ return 0;
+ }
+ memcpy(&g_vsim_msg, in_msg, sizeof(VSIM_MSG_BUF));
+ g_vsim_msg_flag = 1;
+ mutex_unlock(&g_vsim_msg_mutex);
+ up(&g_vsim_read_sem);
+ if(down_timeout(&g_vsim_write_sem, timeout*HZ) != 0){
+ mutex_lock(&g_vsim_msg_mutex);
+ g_vsim_msg_flag = 0;
+ mutex_unlock(&g_vsim_msg_mutex);
+ printk("vsim_rw timeout !\n");
+ down_trylock(&g_vsim_read_sem);
+ return 0;
+ }
+ mutex_lock(&g_vsim_msg_mutex);
+ if(g_vsim_msg_flag != 3){//¿ÉÄÜÓû§½ø³ÌÍ˳ö
+ g_vsim_msg_flag = 0;
+ mutex_unlock(&g_vsim_msg_mutex);
+ printk("vsim_rw fail!\n");
+ down_trylock(&g_vsim_read_sem);
+ return 0;
+ }
+ memcpy(out_msg, &g_vsim_msg, sizeof(VSIM_MSG_BUF));
+ g_vsim_msg_flag = 0;
+ mutex_unlock(&g_vsim_msg_mutex);
+ return 1;
+}
+
+static int vsim_app_read(unsigned char *msg)
+{
+ if(down_interruptible(&g_vsim_read_sem) < 0){
+ printk("vsim_read interrupt!\n");
+ return 0;
+ }
+ mutex_lock(&g_vsim_msg_mutex);
+ if(g_vsim_msg_flag != 1){
+ mutex_unlock(&g_vsim_msg_mutex);
+ printk("vsim_read fail flag=%d !\n",g_vsim_msg_flag);
+ return 0;
+ }
+ memcpy(msg, &g_vsim_msg, sizeof(VSIM_MSG_BUF));
+ g_vsim_msg_flag = 2;
+ mutex_unlock(&g_vsim_msg_mutex);
+ return 1;
+}
+
+static int vsim_app_write(unsigned char *msg)
+{
+ mutex_lock(&g_vsim_msg_mutex);
+ if(g_vsim_msg_flag != 2){
+ mutex_unlock(&g_vsim_msg_mutex);
+ printk("vsim_write fail flag=%d !\n",g_vsim_msg_flag);
+ return 0;
+ }
+ memcpy(&g_vsim_msg, msg, sizeof(VSIM_MSG_BUF));
+ g_vsim_msg_flag = 3;
+ mutex_unlock(&g_vsim_msg_mutex);
+ up(&g_vsim_write_sem);
+ return 1;
+}
+
+static int vsim_app_proc(unsigned char *in_msg, unsigned char *out_msg)
+{
+ VSIM_MSG_BUF *msg_buf = (VSIM_MSG_BUF *)in_msg;
+ VSIM_MSG_BUF *out_buf = (VSIM_MSG_BUF *)out_msg;
+ unsigned char default_param = msg_buf->aucDataBuf[0];
+
+ switch (msg_buf->usMsgCmd)
+ {
+ case MSG_CMD_VSIM_GET_SWITCH:
+ out_buf->usMsgCmd = MSG_CMD_VSIM_GET_SWITCH;
+ out_buf->aucDataBuf[0] = atomic_read(&g_vsim_switch);
+ out_buf->usDataLen = 1;
+ break;
+ case MSG_CMD_VSIM_SET_SWITCH:
+ if(default_param == 0 || default_param == 1) {
+ atomic_set(&g_vsim_switch, default_param);
+ }else{
+ printk("vsim_app_proc SET_SWITCH=%d fail!\n",default_param);
+ return 0;
+ }
+ break;
+ case MSG_CMD_VSIM_GET_STANDBY:
+ out_buf->usMsgCmd = MSG_CMD_VSIM_GET_STANDBY;
+ out_buf->aucDataBuf[0] = atomic_read(&g_vsim_standby);
+ out_buf->usDataLen = 1;
+ break;
+ case MSG_CMD_VSIM_SET_STANDBY:
+ if(default_param == 0 || default_param == 1) {
+ if(atomic_read(&g_vsim_standby) != default_param){
+ if(!atomic_read(&g_vsim_tauing)){
+ unsigned char old_stat = g_vsim_psnet_stat;//È¡³ö»º´æµÄÁ´Â·×´Ì¬
+ g_vsim_psnet_stat = psnet_get_pschid_stat_all();//¼Ç¼µ±Ç°´ýµÄÁ´Â·×´Ì¬
+ psnet_set_pschid_stat_by_sim(0, !default_param);//¹Ø±Õµ±Ç°´ýµÄÁ´Â·
+ psnet_set_pschid_stat_by_sim(old_stat, default_param);//»¹Ô»º´æµÄÁ´Â·×´Ì¬
+ }
+ if(default_param){
+ g_vsim_cid_reserved = cid_reserved;
+ cid_reserved |= 15;
+ } else {
+ cid_reserved = g_vsim_cid_reserved;
+ }
+ atomic_set(&g_vsim_standby, default_param);
+ }
+ }else{
+ printk("vsim_app_proc SET_STANDBY=%d fail!\n",default_param);
+ return 0;
+ }
+ break;
+ case MSG_CMD_VSIM_GET_TAUING:
+ out_buf->usMsgCmd = MSG_CMD_VSIM_GET_TAUING;
+ out_buf->aucDataBuf[0] = atomic_read(&g_vsim_tauing);
+ out_buf->usDataLen = 1;
+ break;
+ case MSG_CMD_VSIM_SET_TAUING:
+ if(default_param == 0 || default_param == 1) {
+ static int old_standby = 0;
+ static unsigned char stat = 0;
+ if(atomic_read(&g_vsim_tauing) == default_param){
+ printk("vsim_app_proc SET_TAUING=%d dup!\n",default_param);
+ return 0;
+ }
+ if(default_param){
+ stat = psnet_get_pschid_stat_all();
+ old_standby = atomic_read(&g_vsim_standby);
+ psnet_set_pschid_stat_by_sim(0, old_standby);//¹Ø±Õµ±Ç°´ýµÄÁ´Â·
+ }else{
+ if(old_standby == atomic_read(&g_vsim_standby)){
+ psnet_set_pschid_stat_by_sim(stat, old_standby);//»¹ÔTAU»º´æµÄÁ´Â·×´Ì¬
+ }else{
+ psnet_set_pschid_stat_by_sim(g_vsim_psnet_stat, !old_standby);//»¹Ô»º´æµÄÁ´Â·×´Ì¬
+ g_vsim_psnet_stat = stat;//¼Ç¼µ±Ç°´ýµÄÁ´Â·×´Ì¬
+ }
+ }
+ atomic_set(&g_vsim_tauing, default_param);
+ }else{
+ printk("vsim_app_proc SET_TAUING=%d fail!\n",default_param);
+ return 0;
+ }
+ break;
+ case MSG_CMD_VSIM_GET_AUTHING:
+ out_buf->usMsgCmd = MSG_CMD_VSIM_GET_AUTHING;
+ out_buf->aucDataBuf[0] = atomic_read(&g_vsim_authing);
+ out_buf->usDataLen = 1;
+ break;
+ case MSG_CMD_VSIM_SET_AUTHING:
+ if(default_param == 0 || default_param == 1) {
+ atomic_set(&g_vsim_authing, default_param);
+ }else{
+ printk("vsim_app_proc SET_AUTHING=%d fail!\n",default_param);
+ return 0;
+ }
+ break;
+ case MSG_CMD_VSIM_GET_CARD_STAT:
+ out_buf->usMsgCmd = MSG_CMD_VSIM_GET_CARD_STAT;
+#ifdef USE_CPPS_KO
+ cpps_callbacks.zAti_GetDualCardStat(&out_buf->aucDataBuf[0], &out_buf->aucDataBuf[1], &out_buf->aucDataBuf[2]);
+#else
+ zAti_GetDualCardStat(&out_buf->aucDataBuf[0], &out_buf->aucDataBuf[1], &out_buf->aucDataBuf[2]);
+#endif
+ printk("vsim_app_proc GET_CARD_STAT=%d %d %d !\n",out_buf->aucDataBuf[0], out_buf->aucDataBuf[1], out_buf->aucDataBuf[2]);
+ out_buf->usDataLen = 3;
+ break;
+ case MSG_CMD_VSIM_GET_FLOW_STATISTIC:
+ out_buf->usMsgCmd = MSG_CMD_VSIM_GET_FLOW_STATISTIC;
+ if(default_param == 0 || default_param == 1) {
+ psnet_get_flow_statistic_by_sim(out_buf->aucDataBuf, out_buf->aucDataBuf+sizeof(unsigned long), default_param);
+ out_buf->usDataLen = sizeof(unsigned long)+sizeof(unsigned long);
+ }else{
+ printk("vsim_app_proc GET_FLOW_STATISTIC=%d fail!\n",default_param);
+ return 0;
+ }
+ break;
+ case MSG_CMD_VSIM_RESET_FLOW_STATISTIC:
+ if(default_param == 0 || default_param == 1) {
+ psnet_reset_stat_by_sim(default_param);
+ }else{
+ printk("vsim_app_proc RESET_STAT=%d fail!\n",default_param);
+ return 0;
+ }
+ break;
+ default:
+ printk("vsim_app_proc fail msgid=%d!\n",msg_buf->usMsgCmd);
+ return 0;
+ }
+ return 1;
+}
+
+static void *vsim_switch_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(RCU)
+{
+ if (*pos >= 1)
+ return NULL;
+ return 1;
+}
+
+static void *vsim_switch_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return NULL;
+}
+
+static void vsim_switch_seq_stop(struct seq_file *s, void *v)
+ __releases(RCU)
+{
+ return;
+}
+
+static int vsim_switch_seq_show(struct seq_file *s, void *v)
+{
+ seq_printf(s, "vsim state:%d standby:%d tauing:%d authing:%d datastat:%d msgflg:%d cid:%d-%d\n",
+ atomic_read(&g_vsim_switch), atomic_read(&g_vsim_standby), atomic_read(&g_vsim_tauing),
+ atomic_read(&g_vsim_authing), g_vsim_psnet_stat, g_vsim_msg_flag, g_vsim_cid_reserved, cid_reserved);
+ return 0;
+}
+
+static const struct seq_operations vsim_switch_seq_ops = {
+ .start = vsim_switch_seq_start,
+ .next = vsim_switch_seq_next,
+ .stop = vsim_switch_seq_stop,
+ .show = vsim_switch_seq_show
+};
+
+static int vsim_switch_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &vsim_switch_seq_ops);
+}
+
+static ssize_t vsim_switch_set(struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
+{
+ size_t size;
+ char vsim_switch[2] = {0};
+
+ //countβ²¿°üº¬ÁË1¸ö½áÊø·û£¬Ö»Ö§³Ö0-1ÊäÈë
+ if (count != 2)
+ return -EINVAL;
+
+ if (copy_from_user(vsim_switch, buffer, 1))
+ return -EFAULT;
+
+ if (vsim_switch[0] < '0' || vsim_switch[0] > '1')
+ return -EINVAL;
+ atomic_set(&g_vsim_switch, vsim_switch[0]-'0');
+ return count;
+}
+
+static const struct file_operations vsim_switch_file_ops = {
+ .owner = THIS_MODULE,
+ .open = vsim_switch_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+ .write = vsim_switch_set,
+};
+
+int atio_vsim_apdu_cfg(unsigned char*apdu_req, unsigned short apdu_req_len, unsigned char *apdu_rsp, unsigned short *apdu_rsp_len, unsigned char slot,unsigned char is_auth)
+{
+ VSIM_MSG_BUF in_msg = {0};
+ VSIM_MSG_BUF out_msg = {0};
+
+ if(apdu_req == NULL || apdu_req_len == 0 || apdu_req_len+2 > VSIM_MAX_MSG_LEN || apdu_rsp == NULL || apdu_rsp_len == NULL){
+ printk("vsim_apdu param err in=0x%p %d out=0x%p 0x%p!\n", apdu_req, apdu_req_len, apdu_rsp, apdu_rsp_len);
+ return ATIO_ERROR;
+ }
+ in_msg.usMsgCmd = MSG_CMD_VSIM_APDU;
+ in_msg.usDataLen = apdu_req_len;
+ in_msg.aucDataBuf[0] = slot;
+ in_msg.aucDataBuf[1] = is_auth;
+ memcpy(&in_msg.aucDataBuf[2], apdu_req, apdu_req_len);
+ if(vsim_rw_sync(&in_msg, &out_msg, VSIM_APDU_TIMEOUT)){
+ if(out_msg.usDataLen == 0 || out_msg.usDataLen > 264){//Çý¶¯buff264
+ printk("vsim_apdu rsp len=%d fail!\n", out_msg.usDataLen);
+ return ATIO_ERROR;
+ }
+ memcpy(apdu_rsp, out_msg.aucDataBuf, out_msg.usDataLen);
+ *apdu_rsp_len = out_msg.usDataLen;
+ printk("vsim_apdu succ len=%d msg=%d!\n", out_msg.usDataLen, out_msg.usMsgCmd);
+ return ATIO_SUCCESS;
+ }
+ return ATIO_ERROR;
+}
+
+static int PS_CallBack_Init = 0;
+extern int transdataToTCPIP(unsigned int index, void *buffer, unsigned int length);
+extern void psnet_set_pschid_stat(unsigned int chid, unsigned int newstat);
+
+//PL add 4GPIO power save
+#ifdef BTRUNK_SUPPORT
+static int g_bFGpioSet = 0;
+extern int zDrvXp2xp_Cp2ApWakeupAp(void);
+#endif
+
+int PSCallBackEntry(unsigned char ch_ID, unsigned char *data, unsigned short dataLen, T_zAti2_CtrmChInd chInd)
+{
+ int ret = ATIO_SUCCESS;
+ int ret2 = 0;
+ zx29_rpmsg_channel *rpmsg_channel = NULL;
+ unsigned char chID = ch_ID;
+
+ if(chID == 72)
+ {
+#ifdef USE_CPPS_KO
+ cpps_callbacks.zUsat_SendAtCmd(72, data, dataLen, 0);
+#else
+ zUsat_SendAtCmd(72, data, dataLen, 0);
+#endif
+ return ATIO_SUCCESS;
+ }
+ //µ±Ç°ÎªETHͨµÀµÄÊý¾Ý£¬Í¨¹ý»Øµ÷·¢Ë͸øTCPIPÐÒéÕ»
+ if( (ZATI2_CHIND_PSD == chInd) )
+ {
+#ifdef BTRUNK_SUPPORT
+ if(1 == g_bFGpioSet)
+ {
+ //PL add 4GPIO power save
+ ret2 = zDrvXp2xp_Cp2ApWakeupAp();
+ if(ret2 < 0)
+ {
+ printk(KERN_ERR "ZTE-TSP zx29 USB wake up failed\n");
+ }
+ }
+#endif
+ ret = transdataToTCPIP(chID, data, dataLen);
+ return ret;
+ }
+ //µ±Ç°ÎªETHͨµÀ£¬Êý¾ÝͨµÀδ¼¤»î»òÒÑÈ¥¼¤»îºóÊý¾ÝͨµÀÉÏPS·¢ËÍÁËÊý¾Ý
+ else if(ZATI2_CHIND_TURN_PSD == chInd)
+ {
+ //zOss_ASSERT(0);
+ //printk("TURN_PSD :PS chid [%d] is opened\n", chID);
+ psnet_set_pschid_stat(chID, 1);
+ return ATIO_SUCCESS;
+ }
+ else if(ZATI2_CHIND_TURN_AT == chInd)
+ {
+ psnet_set_pschid_stat(chID, 0);
+ return ATIO_SUCCESS;
+ }
+ if(chID > VSIM_CHID_BASE) {
+ if(atomic_read(&g_vsim_switch) == 0){
+ printk("err vsim0 chid=%d cmd=%s!\n",chID, data);
+ return ATIO_SUCCESS;
+ }
+ chID = chID - VSIM_CHID_BASE;
+ } else if(atomic_read(&g_vsim_switch)){
+ printk("err vsim1 chid=%d cmd=%s!\n",chID, data);
+ return ATIO_SUCCESS;
+ }
+ if (chID > CHANNEL_NUM || chID == 0)
+ {
+ printk("err chid=%d cmd=%s!\n",ch_ID, data);
+ return ATIO_SUCCESS;
+ }
+ //µ±Ç°ÎªATͨµÀµÄÊý¾Ý
+ if( (ZATI2_CHIND_AT == chInd) && ((rpmsg_channel = &rpmsg_sim_zx29.rpmsg_channel[chID-1]) != NULL) )
+ {
+ //printk("channel %d ind cmd %s\n",chID, data);
+
+#ifdef BTRUNK_SUPPORT
+ //PL add 4GPIO power save
+ if(1 == g_bFGpioSet)
+ {
+ ret2 = zDrvXp2xp_Cp2ApWakeupAp();
+ if(ret2 < 0)
+ {
+ printk(KERN_ERR "ZTE-TSP zx29 USB wake up failed\n");
+ }
+ }
+#endif
+ ret = TransdataToRpmsg(chID, data, dataLen);
+ }
+ //¶ÔÓÚATµÄÖ÷¶¯Éϱ¨£¬Ã¿¸öÎïÀíͨµÀ½Ô»á·¢ËÍ£¬Ã»Óдò¿ªµÄͨµÀ£¬´Ë´¦·µ»ØERR£¬ÒÔ±ãPS½øÐÐÊÍ·ÅÄÚ´æµÄ´¦Àí£»
+ else
+ {
+ //printk("%s : PS chid [%d] not open, chInd = [%d] \r\n", __func__, chID, chInd);
+ return ATIO_ERROR;
+ }
+
+ return ret;
+}
+
+int zUsatFuncCb(UINT8 *data, UINT16 dataLen)
+{
+ int ret =-1;
+
+ printk("zUsatFuncCb: %s.\n",data);
+#ifdef USE_CPPS_KO
+ ret = cpps_callbacks.zAti2_Send(72,data,dataLen,ZATI2_CHIND_AT);
+#else
+ ret = zAti2_Send(72,data,dataLen,ZATI2_CHIND_AT);
+#endif
+ return ret;
+}
+
+void ATchannelOpen(unsigned int chid)
+{
+#ifdef USE_DSDS_VSIM
+#ifdef USE_CPPS_KO
+ cpps_callbacks.zAti2_Open(chid + VSIM_CHID_BASE);
+#else
+ zAti2_Open(chid + VSIM_CHID_BASE);
+#endif
+#endif
+ //rpmsg_channel->chID= PS_AT_CH[index].at_chid;
+#ifdef USE_CPPS_KO
+ if(cpps_callbacks.zAti2_Open(chid) != ZATI2_SUCCESS)
+#else
+ if(zAti2_Open(chid) != ZATI2_SUCCESS)
+#endif
+ //panic("open ps channel failed!!!!");
+ //else
+ {
+ //printk("open channel fail %d !\n",chid);
+ }
+
+ if(PS_CallBack_Init == 0)
+ {
+#ifdef USE_CPPS_KO
+ if(cpps_callbacks.zAti2_Open(72) != ZATI2_SUCCESS)
+#else
+ if(zAti2_Open(72) != ZATI2_SUCCESS)
+#endif
+ panic("open ps channel 72 failed!!!!");
+#ifdef USE_CPPS_KO
+ cpps_callbacks.zUsat_RegisterSendMsgFun((ZUSAT_SENDATCMDTOACCESSCHANNEL_FUNC)zUsatFuncCb);
+ cpps_callbacks.zAti2_RegRecvCb((T_ZAti2_AtRecvFunc)PSCallBackEntry);
+#else
+ zUsat_RegisterSendMsgFun((ZUSAT_SENDATCMDTOACCESSCHANNEL_FUNC)zUsatFuncCb);
+ zAti2_RegRecvCb((T_ZAti2_AtRecvFunc)PSCallBackEntry);
+#endif
+ PS_CallBack_Init = 1;
+ }
+ return ;
+}
+void ATchannelClose(unsigned int chid)
+{
+ //ʵÏÖΪ¿Õ£¬ATͨµÀ²»Ö§³Ö¶¯Ì¬¹Ø±Õ
+}
+
+void TransdataToPS(unsigned int ch_id, const void *buffer, unsigned int length)
+{
+ unsigned int chid = ch_id;
+ int writeRst=0;
+
+ if(atomic_read(&g_vsim_switch) && chid < VSIM_CHID_BASE)
+ chid = chid + VSIM_CHID_BASE;
+
+ //printk("TransdataToPS chid:%d, buffer:%s !\n",chid, (char *)buffer);
+
+#ifdef BTRUNK_SUPPORT
+ char *str = strstr(buffer, "AT+PTTFGPIOSET=");
+ if(str != NULL)
+ {
+ if(str[15] == '1')
+ {
+ g_bFGpioSet = 1;
+ PSCallBackEntry(chid,"\r\nOK\r\n",1+strlen("\r\nOK\r\n"),ZATI2_CHIND_AT);
+ }
+ else if(str[15] == '0')
+ {
+ g_bFGpioSet = 0;
+ PSCallBackEntry(chid,"\r\nOK\r\n",1+strlen("\r\nOK\r\n"),ZATI2_CHIND_AT);
+ }
+ else
+ {
+ PSCallBackEntry(chid,"\r\n+CME:ERROR:6003\r\n",1+strlen("\r\n+CME:ERROR:6003\r\n"),ZATI2_CHIND_AT);
+ }
+ return;
+ }
+#endif
+#ifdef USE_CPPS_KO
+ writeRst = cpps_callbacks.zAti2_Send(chid, buffer, length, ZATI2_CHIND_AT);
+#else
+ writeRst = zAti2_Send(chid, buffer, length, ZATI2_CHIND_AT);
+#endif
+ if(writeRst != ZATI2_SUCCESS)
+ {
+ //panic("send at string to ps failed!!!!");
+ PSCallBackEntry(chid,"\r\n+CME ERROR: 8006\r\n",1+strlen("\r\n+CME ERROR: 8006\r\n"),ZATI2_CHIND_AT);
+ }
+ else
+ {
+ //printk("TransdataToPS channel %d !\n",chid);
+ }
+ return ;
+}
+
+int TransdataToRpmsg(unsigned int chid, const void *buffer, unsigned int length)
+{
+ //zx29_rpmsg_channel *rpmsg_channel = &rpmsg_sim_zx29.rpmsg_channel[chid-1];
+ rpmsg_recv_notify(chid,buffer,length);
+ return 0;
+}
+#if 1
+int ATchannelInit(void)
+{
+ int i = 0;
+ for (i = 0; i < rpmsg_sim_zx29.num; i++)
+ { if((i != 9) && (i != 0)) //channel_9 for amt use channel_0 for vsimagt use
+ {
+ registerOpsCallback(i, ATchannelOpen, ATchannelClose, TransdataToPS);
+ }
+ }
+#ifdef USE_DSDS_VSIM
+ mutex_init(&g_vsim_msg_mutex);
+ sema_init(&g_vsim_read_sem, 0);
+ sema_init(&g_vsim_write_sem, 0);
+ proc_create("vsim_switch", 0440, NULL, &vsim_switch_file_ops);
+ g_vsim_read = vsim_app_read;
+ g_vsim_write = vsim_app_write;
+ g_vsim_proc = vsim_app_proc;
+#endif
+ return 0;
+}
+
+int ATchannelExit(void)
+{
+ return 0;
+}
+
+late_initcall(ATchannelInit);
+module_exit(ATchannelExit);
+
+MODULE_AUTHOR("ZTE");
+MODULE_DESCRIPTION("ZTE Lan Net Device");
+MODULE_LICENSE("GPL");
+#endif
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/rpm/at_io.h b/ap/os/linux/linux-3.4.x/drivers/soc/zte/rpm/at_io.h
new file mode 100644
index 0000000..4193992
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/rpm/at_io.h
@@ -0,0 +1,38 @@
+
+#ifndef RPMSGIO_H
+#define RPMSGIO_H
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/cpumask.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+
+
+#include <asm/irq.h>
+#include <asm/setup.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/irq.h>
+#include <asm/mach/map.h>
+
+struct atchan_node{
+ struct list_head node; //ÓÃÓÚÁ´±í²Ù×÷
+ int type; //
+ int ch_id; //ÐéÄâͨµÀID
+ int ps_chid; //ÐÒéջͨµÀ
+ unsigned char used;
+};
+
+void ATchannelOpen(unsigned int chid);
+void ATchannelClose(unsigned int chid);
+void TransdataToPS(unsigned int chid, const void *buffer, unsigned int length);
+int TransdataToRpmsg(unsigned int chid, const void *buffer, unsigned int length);
+
+#endif //RPMSGIO_H
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/rpm/icp.c b/ap/os/linux/linux-3.4.x/drivers/soc/zte/rpm/icp.c
new file mode 100644
index 0000000..7d060ca
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/rpm/icp.c
@@ -0,0 +1,269 @@
+/*******************************************************************************
+ * Copyright (C) 2013, ZTE Corporation.
+ *
+ * File Name: icp.c
+ * File Mark:
+ * Description:
+ * Others:
+ * Version: V0.1
+ * Author: ShiDeYou
+ * Date: 2013-3-13
+ * History 1:
+ * Date:
+ * Version:
+ * Author:
+ * Modification:
+ * History 2:
+ ******************************************************************************/
+
+/*******************************************************************************
+* Include files *
+*******************************************************************************/
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <mach/irqs.h>
+#include <mach/iomap.h>
+#include <asm/mach/map.h>
+#include <linux/soc/zte/rpm/rpmsg.h>
+#include <linux/kthread.h>
+
+/*******************************************************************************
+* Macro *
+*******************************************************************************/
+#define icp_get_reg(actor_id) icp_hwdata[actor_id].reg
+
+/*******************************************************************************
+* Global Variable *
+*******************************************************************************/
+static struct zx29_icp_hwdata icp_hwdata[ACTOR_MAXID];
+
+/*******************************************************************************
+* Function: icp_set_int
+* Description: This function is used for generating icp interrupt to inform remote cpu;
+* Parameters:
+* Input:
+ actorID: id of remote cpu
+ chID: id of channel
+* Output:None
+*
+* Returns:None
+*
+*
+* Others:
+********************************************************************************/
+static void icp_set_int(T_ZDrvRpMsg_ActorID actorID, T_ZDrvRpMsg_ChID chID)
+{
+ T_HalIcp_Reg *icp_reg = icp_get_reg(actorID);
+
+ if(chID<32)
+ icp_reg->control.low_word = (1<<chID);
+ else
+ icp_reg->control.high_word = (1<<(chID-32));
+}
+
+/*******************************************************************************
+* Function: icp_clear_int
+* Description: This function is used for clear icp interrupt from remote cpu;
+* Parameters:
+* Input:
+ actorID: id of remote cpu
+ chID: id of channel
+* Output:None
+*
+* Returns:None
+*
+*
+* Others:
+********************************************************************************/
+static void icp_clear_int(T_ZDrvRpMsg_ActorID actorID, T_ZDrvRpMsg_ChID chID)
+{
+ T_HalIcp_Reg *icp_reg = icp_get_reg(actorID);
+
+ if(chID<32)
+ icp_reg->clear.low_word = (1<<chID);
+ else
+ icp_reg->clear.high_word = (1<<(chID-32)) ;
+}
+
+/*******************************************************************************
+* Function: icp_get_int
+* Description: This function is used for get icp interrupt from remote cpu;
+* Parameters:
+* Input:
+* actorID: id of remote cpu
+* chID: id of channel
+* Output:None
+*
+* Returns:None
+*
+*
+* Others:
+********************************************************************************/
+static T_HalIcp_Dword icp_get_int(T_ZDrvRpMsg_ActorID actorID)
+{
+ T_HalIcp_Reg *icp_reg = icp_get_reg(actorID);
+ T_HalIcp_Dword IcpState = {0};
+
+ IcpState.high_word = icp_reg->state.high_word;
+ IcpState.low_word = icp_reg->state.low_word;
+
+ return IcpState;
+}
+
+/*******************************************************************************
+* Function: icp_get_int_state
+* Description: This function is used for get the state of icp interruptting of remote cpu;
+* Parameters:
+* Input:
+ actorID: id of remote cpu
+ chID: id of channel
+* Output:None
+*
+* Returns:None
+*
+*
+* Others:
+********************************************************************************/
+static int icp_get_int_state(T_ZDrvRpMsg_ActorID actorID, T_ZDrvRpMsg_ChID chID)
+{
+ T_HalIcp_Reg *icp_reg = icp_get_reg(actorID);
+
+ if(chID<32)
+ {
+ if(icp_reg->in_state.low_word & (0x1<<chID))
+ return TRUE;
+ else
+ return FALSE;
+ }
+ else
+ {
+ if(icp_reg->in_state.high_word & (0x1<<(chID-32)))
+ return TRUE;
+ else
+ return FALSE;
+ }
+}
+
+/*******************************************************************************
+* Function: icp_mask_int
+* Description: This function is used for Mask interrupt of channel;
+* Parameters:
+* Input:
+* Output:
+*
+* Returns: NONE
+*
+*
+* Others:
+********************************************************************************/
+static void icp_mask_int(T_ZDrvRpMsg_ActorID actorID, T_ZDrvRpMsg_ChID chID)
+{
+ T_HalIcp_Reg *icp_reg = icp_get_reg(actorID);
+
+ if(chID<32)
+ icp_reg->mask.low_word |= (0x1<<chID);
+ else
+ icp_reg->mask.high_word |= (0x1<<(chID-32));
+}
+
+/*******************************************************************************
+* Function: icp_unmask_int
+* Description: This function is used for unmask interrupt of channel;
+* Parameters:
+* Input:
+* Output:
+*
+* Returns:
+* NONE
+*
+*
+* Others:
+********************************************************************************/
+static void icp_unmask_int(T_ZDrvRpMsg_ActorID actorID, T_ZDrvRpMsg_ChID chID)
+{
+ T_HalIcp_Reg *icp_reg = icp_get_reg(actorID);
+
+ if(chID < 32)
+ icp_reg->mask.low_word &= ~(0x1<<chID);
+ else
+ icp_reg->mask.high_word &= ~(0x1<<(chID-32));
+}
+
+static icp_operations icp_general_ops = {
+ .Icp_SetInt = icp_set_int,
+ .Icp_ClearInt = icp_clear_int,
+ .Icp_GetInt = icp_get_int,
+ .Icp_GetIntState = icp_get_int_state,
+ .Icp_Mask = icp_mask_int,
+ .Icp_UnMask = icp_unmask_int,
+};
+
+static int __devinit icp_probe(struct platform_device *pdev)
+{
+ struct resource *icp_mem = NULL;
+ struct resource *irq = NULL;
+ int ret;
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if( !irq ){
+ dev_err(&pdev->dev, "[ICP]Cannot get IORESOURCE_IRQ\n");
+ return -ENOENT;
+ }
+
+ icp_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if ( !icp_mem ){
+ dev_err(&pdev->dev, "[ICP]Cannot get IORESOURCE_MEM\n");
+ return -ENOENT;
+ }
+
+ icp_hwdata[pdev->id].int_line = irq->start;
+ icp_hwdata[pdev->id].reg = (T_HalIcp_Reg *)(icp_mem->start);
+
+ icp_hwdata[pdev->id].reg->mask.high_word = 0xffffffff;
+ icp_hwdata[pdev->id].reg->mask.low_word = 0xffffffff;
+
+ ret = icp_rpmsg_register(pdev, (void *)&icp_hwdata[pdev->id]);
+ if (ret)
+ printk("rpmsg: icp_probe actorID = %d, failed[%d]!\n", pdev->id, ret);
+ else
+ printk("rpmsg: icp_probe actorID = %d, success!\n", pdev->id);
+
+ rpmsg_set_ops(&icp_general_ops);
+
+ return ret;
+}
+
+static int __devexit icp_remove(struct platform_device *pdev)
+{
+ int ret = 0;
+ ret = icp_rpmsg_unregister(pdev);
+ return ret;
+}
+
+static struct platform_driver icp_driver = {
+ .probe = icp_probe,
+ .remove = __devexit_p(icp_remove),
+ .driver = {
+ .name = "icp",
+ },
+};
+
+static int __init icp_init(void)
+{
+ return platform_driver_register(&icp_driver);
+}
+
+subsys_initcall(icp_init);
+
+static void __exit icp_exit(void)
+{
+ platform_driver_unregister(&icp_driver);
+}
+
+module_exit(icp_exit);
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/rpm/icp_rpmsg.c b/ap/os/linux/linux-3.4.x/drivers/soc/zte/rpm/icp_rpmsg.c
new file mode 100644
index 0000000..b4645a8
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/rpm/icp_rpmsg.c
@@ -0,0 +1,1844 @@
+/*******************************************************************************
+ * Copyright (C) 2013, ZTE Corporation.
+ *
+ * File Name: icp_rpmsg.c
+ * File Mark:
+ * Description:
+ * Others:
+ * Version: V0.1
+ * Author: ShiDeYou
+ * Date: 2013-3-13
+ * History 1:
+ * Date:
+ * Version:
+ * Author:
+ * Modification:
+ * History 2:
+ ******************************************************************************/
+
+/*******************************************************************************
+* Include files *
+*******************************************************************************/
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/kfifo.h>
+#include <linux/err.h>
+#include <linux/notifier.h>
+#include <linux/semaphore.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/irq.h>
+#include <asm/mach/map.h>
+#include <linux/device.h>
+#include <linux/soc/zte/rpm/rpmsg.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <asm/system.h>
+#include <linux/reboot.h>
+#include <linux/platform_device.h>
+
+#include <linux/wakelock.h>
+#include <mach/board.h>
+
+//#define ZX29_ICP_TEST 1
+#define ICP_TEST_SELF_TXRX //AP×Ô²â
+#define ICP_TEST_PS2AP
+
+#define CIRCUL_BUFFER_USED 1
+/*******************************************************************************
+* Macro *
+*******************************************************************************/
+#define RPMSG_ALIGN 0x4
+#if CIRCUL_BUFFER_USED
+#else
+#define RPMSG_ADDTIONAL_BYTES 0x4
+#endif
+#define RPMSG_SPACEEND 0x4445 /*ED*/
+#define ALIGN_RPMSG(size,align) (((unsigned int)size + align - 1)&(~(align - 1)))
+#define RPMSG_MSGHEAD_FLAG 0x5453 /*ST*/
+/*******************************************************************************
+* Global Variable *
+*******************************************************************************/
+typedef struct _T_HalRpoc
+{
+ T_ZDrvRpMsg_ActorID actorID;
+ volatile u64 chID;
+}T_HalRpoc;
+
+typedef struct _T_RpMsg_struct
+{
+ T_HalRpoc event;
+ struct work_struct rpmsg_work;
+}T_RpMsg_struct;
+
+typedef struct _rpmsg_resource
+{
+ unsigned int *RpMsgRead_Exit;
+ struct mutex *rpmsgread_mutex;
+ struct mutex *rpmsgwrite_mutex;
+ wait_queue_head_t *rpmsgwaitq;
+ struct semaphore *RpmsgSema;
+ T_ZDrvRpMsg_CallbackFunction *s_RpMsgCallbackList;
+ unsigned int *sendPos;
+}rpmsg_resource_struct;
+
+struct workqueue_struct *rpmsg_workqueue[ACTOR_MAXID];
+T_RpMsg_struct rpmsg_struct[ACTOR_MAXID];
+struct mutex rpmsgch_mutex[ACTOR_MAXID];
+
+/* some resource for rpmsg */
+static unsigned int RpMsgRead_Exit_ps[CHANNEL_AP2PS_MAXID] = {0};
+static unsigned int RpMsgRead_Exit_m0[CHANNEL_AP2M0_MAXID] = {0};
+static struct mutex rpmsgread_mutex_ps[CHANNEL_AP2PS_MAXID];
+static struct mutex rpmsgread_mutex_m0[CHANNEL_AP2M0_MAXID];
+static struct mutex rpmsgwrite_mutex_ps[CHANNEL_AP2PS_MAXID];
+static struct mutex rpmsgwrite_mutex_m0[CHANNEL_AP2M0_MAXID];
+static wait_queue_head_t rpmsgwaitq_ps[CHANNEL_AP2PS_MAXID];
+static wait_queue_head_t rpmsgwaitq_m0[CHANNEL_AP2M0_MAXID];
+static struct semaphore RpmsgSema_ps[CHANNEL_AP2PS_MAXID];
+static struct semaphore RpmsgSema_m0[CHANNEL_AP2M0_MAXID];
+static T_ZDrvRpMsg_CallbackFunction s_RpMsgCallbackList_ps[CHANNEL_AP2PS_MAXID];
+static T_ZDrvRpMsg_CallbackFunction s_RpMsgCallbackList_m0[CHANNEL_AP2M0_MAXID];
+static unsigned int s_SendPos_ps[CHANNEL_AP2PS_MAXID];
+static unsigned int s_SendPos_m0[CHANNEL_AP2M0_MAXID];
+static DEFINE_SPINLOCK(rpmsg_lock);
+
+static rpmsg_resource_struct rpmsg_resource[] =
+{
+ [0] = {
+ .RpMsgRead_Exit = RpMsgRead_Exit_m0,
+ .rpmsgread_mutex = rpmsgread_mutex_m0,
+ .rpmsgwrite_mutex = rpmsgwrite_mutex_m0,
+ .rpmsgwaitq = rpmsgwaitq_m0,
+ .RpmsgSema = RpmsgSema_m0,
+ .s_RpMsgCallbackList= s_RpMsgCallbackList_m0,
+ .sendPos = s_SendPos_m0,
+ },
+ [1] = {
+ .RpMsgRead_Exit = RpMsgRead_Exit_ps,
+ .rpmsgread_mutex = rpmsgread_mutex_ps,
+ .rpmsgwrite_mutex = rpmsgwrite_mutex_ps,
+ .rpmsgwaitq = rpmsgwaitq_ps,
+ .RpmsgSema = RpmsgSema_ps,
+ .s_RpMsgCallbackList= s_RpMsgCallbackList_ps,
+ .sendPos = s_SendPos_ps,
+ },
+};
+
+static T_HalRpMsg_ChRam ps_channel_config[CHANNEL_AP2PS_MAXID] =
+{
+ CH_DDR, CH_DDR, CH_DDR, CH_DDR, CH_DDR, CH_DDR,CH_DDR,CH_DDR, /* 0~~7 */
+ CH_DDR, CH_DDR, CH_DDR, CH_DDR, CH_DDR, CH_DDR, CH_DDR, CH_DDR,
+ CH_DDR, CH_DDR, CH_DDR, CH_DDR, CH_DDR, CH_DDR, CH_DDR, CH_DDR,
+ CH_DDR, CH_DDR, CH_DDR, CH_DDR, CH_DDR, CH_DDR, CH_DDR, CH_DDR,
+
+ CH_DDR, CH_DDR, CH_DDR, CH_DDR, CH_DDR, CH_DDR, CH_DDR, CH_DDR,
+ CH_DDR, CH_DDR, CH_DDR, CH_DDR, CH_DDR, CH_DDR, CH_DDR, CH_DDR,
+ CH_DDR, CH_DDR, CH_DDR, CH_DDR, CH_DDR, CH_DDR, CH_DDR, CH_DDR,
+ CH_DDR, CH_DDR, CH_DDR, CH_DDR, CH_DDR, CH_DDR, CH_DDR, CH_DDR
+};
+
+static T_HalRpMsg_ChRam m0_channel_config[CHANNEL_AP2M0_MAXID] =
+{
+ CH_IRAM, CH_IRAM, CH_IRAM, CH_IRAM, CH_IRAM,
+ CH_IRAM, CH_IRAM, CH_IRAM, CH_IRAM, CH_IRAM
+};
+
+static Icp_rpmsg_drv rpmsg_ps_info = {
+ .name = "icp_ps",
+ .Channel_config =
+ {
+ .ChConfig = ps_channel_config,
+ },
+};
+
+static Icp_rpmsg_drv rpmsg_m0_info = {
+ .name = "icp_m0",
+ .Channel_config =
+ {
+ .ChConfig = m0_channel_config,
+ },
+};
+
+static Icp_rpmsg_drv *Icp_rpmsg_drvs[] = { &rpmsg_m0_info, &rpmsg_ps_info, NULL };
+static const icp_operations *icp_ops;
+
+static struct wake_lock icp_wake_lock;
+extern void at_local_wakelock_timeout(long timeout);
+
+/**************************************************************************************
+ * macro
+ **************************************************************************************/
+#define icp_rpmsg_get_buffer_type(actor_id, ch_id) \
+ (Icp_rpmsg_drvs[actor_id]->Channel_config.ChConfig[ch_id])
+
+#define icp_rpmsg_get_channel_send(actor_id, ch_id) \
+ (Icp_rpmsg_drvs[actor_id]->Channel_config.ChInfo_Send_Base + ch_id)
+
+#define icp_rpmsg_get_channel_recv(actor_id, ch_id) \
+ (Icp_rpmsg_drvs[actor_id]->Channel_config.ChInfo_Recv_Base + ch_id)
+
+#define icp_rpmsg_get_max_channel(actor_id) \
+ (Icp_rpmsg_drvs[actor_id]->channel_cnt)
+
+wait_queue_head_t *icp_rpmsg_get_waitq(T_ZDrvRpMsg_ActorID actorID, T_ZDrvRpMsg_ChID chID)
+{
+ return &(rpmsg_resource[actorID].rpmsgwaitq[chID]);
+}
+/*******************************************************************************
+* Function: icp_rpmsg_get_sendbase_offset/icp_rpmsg_get_recvbase_offset
+* Description: helper functions, for get offset of channel base address
+* Parameters:
+* Input:
+* actor_id:
+* ch_id:
+* Output:
+* offset:
+* Others:
+********************************************************************************/
+unsigned int ddr_base_offset = 0;
+static unsigned int iram_base_offset = (ZX_IRAM0_BASE-ZX29_IRAM0_PHYS);
+
+
+static unsigned long icp_rpmsg_pa2va(T_ZDrvRpMsg_ActorID actorID, T_ZDrvRpMsg_ChID chID, unsigned long pa)
+{
+ if (icp_rpmsg_get_buffer_type(actorID, chID) == CH_IRAM)
+ return (pa+iram_base_offset);
+ else
+#ifdef _USE_CAP_SYS
+ return (pa + ddr_base_offset);
+#else
+ return (pa-ZX_RPMSG_DDR_BASE+ddr_base_offset);
+#endif
+}
+
+#if CIRCUL_BUFFER_USED
+/*******************************************************************************
+* Function: icp_rpmsg_read
+* Description: helper functions for copydata from msg_ram
+* Parameters:
+* Input:
+* channel_info:channel used
+* __dest:
+* __src_base:the offset of src buffer, when used, should +recvPos
+* __n:
+* Output:None
+*
+* Others:
+********************************************************************************/
+static void icp_rpmsg_read(T_HalRpMsg_ChInfo *channel_info,
+ void *__dest,
+ __const void *__src_base,
+ size_t __n)
+{
+ u32 free_cnt = 0;
+
+ free_cnt = channel_info->size - channel_info->RecvPos;
+
+ /* |+++sp-----rp+++| */
+ if(free_cnt >= __n)
+ {
+ memcpy(__dest, __src_base+channel_info->RecvPos, __n);
+ channel_info->RecvPos += (__n+__n%2);
+
+ if(channel_info->RecvPos == channel_info->size)
+ channel_info->RecvPos = 0;
+ }
+ else
+ {
+ memcpy(__dest, __src_base+channel_info->RecvPos, free_cnt);
+ memcpy(__dest+free_cnt, __src_base, __n-free_cnt);
+
+ channel_info->RecvPos = __n-free_cnt+__n%2;
+ }
+}
+
+/*******************************************************************************
+* Function: icp_rpmsg_adjust
+* Description: helper functions for adjust buffer pointer after reading data
+* when there is n bytes in buffer, but user only read m bytes,
+* then the recvPos is not correct, so we move it to next buffer
+* header
+* Parameters:
+* Input:
+* channel_info:channel used
+* __dest:
+* __src_base:the offset of src buffer, when used, should +recvPos
+* __n:
+* Output:None
+*
+* Others:
+********************************************************************************/
+static void icp_rpmsg_move_recv_pos(T_HalRpMsg_ChInfo *channel_info,
+ size_t __n)
+{
+ u32 free_cnt = 0;
+
+ free_cnt = channel_info->size - channel_info->RecvPos;
+
+ /* |+++sp-----rp+++| */
+ if(free_cnt >= __n)
+ {
+ channel_info->RecvPos += (__n+__n%2);
+
+ if(channel_info->RecvPos == channel_info->size)
+ channel_info->RecvPos = 0;
+ }
+ else
+ {
+ channel_info->RecvPos = __n-free_cnt+__n%2;
+ }
+}
+
+
+/*******************************************************************************
+* Function: icp_rpmsg_write
+* Description: helper functions for copydata to msg_ram
+* Parameters:
+* Input:
+* channel_info:channel used
+* __dest_base:the offset of dest buffer, when used, should +sendPos
+* __src:
+* __n:
+* Output:None
+*
+* Others:
+********************************************************************************/
+static void icp_rpmsg_write(T_HalRpMsg_ChInfo *channel_info,
+ void *__dest_base,
+ __const void *__src,
+ size_t __n,
+ T_ZDrvRpMsg_ActorID actorID,
+ T_ZDrvRpMsg_ChID chID)
+{
+ u32 free_cnt = 0;
+
+ free_cnt = channel_info->size - rpmsg_resource[actorID].sendPos[chID];//channel_info->SendPos;
+
+ /* |---sp+++++rp---| */
+ /* |+++rp-----sp+++| */
+ if(free_cnt >= __n)
+ {
+ memcpy(__dest_base+rpmsg_resource[actorID].sendPos[chID], __src, __n);
+ rpmsg_resource[actorID].sendPos[chID] += (__n+__n%2);
+
+ if(rpmsg_resource[actorID].sendPos[chID] == channel_info->size)
+ rpmsg_resource[actorID].sendPos[chID] = 0;
+ }
+ else
+ {
+ memcpy(__dest_base+rpmsg_resource[actorID].sendPos[chID], __src, free_cnt);
+ memcpy(__dest_base, __src+free_cnt, __n-free_cnt);
+ rpmsg_resource[actorID].sendPos[chID] = (__n-free_cnt+__n%2);
+ }
+}
+#endif
+
+/*******************************************************************************
+* Function: halRpMsg_IsRecvChEmpty
+* Description: This function is used for checking the channel used to receive message is empty or not;
+* Parameters:
+* Input:
+* channel_send:channel message will be send to
+* channel_recv:channel message will be received from
+* Output:None
+*
+* Returns:
+* TRUE: the Recch is empty .
+* FALSE: the Recch is not empty .
+*
+* Others:
+********************************************************************************/
+//static
+bool halRpMsg_IsRecvChEmpty(T_ZDrvRpMsg_ActorID actorID, T_ZDrvRpMsg_ChID chID)
+{
+ unsigned int RecvBase_remap = 0;
+ T_HalRpMsg_ChInfo *channel_recv = icp_rpmsg_get_channel_recv(actorID, chID);
+
+ if(!(channel_recv->flag & CHANNEL_FLAG)){
+ return TRUE;
+ }
+
+// RecvBase_remap = channel_recv->Base_Addr + icp_rpmsg_get_recvbase_offset(actorID, chID);
+ RecvBase_remap = icp_rpmsg_pa2va(actorID, chID, channel_recv->Base_Addr);
+
+ if (channel_recv->SendPos == channel_recv->RecvPos){
+ return TRUE;
+ }
+ else{
+#if CIRCUL_BUFFER_USED
+#else
+ if (((T_HalRpMsg_RpMsg *)(channel_recv->RecvPos + RecvBase_remap))->MsgHeader.flag == RPMSG_SPACEEND){
+ channel_recv->RecvPos = 0x0;/*ÐÞ\u017eĶÁÖ\u017eÕë*/
+ }
+ if (channel_recv->SendPos == channel_recv->RecvPos){
+ return TRUE;
+ }
+#endif
+ return FALSE;
+ }
+}
+
+/*******************************************************************************
+* Function: zDrvRpMsg_Read
+* Description: This function is used for reading message;
+* Parameters:
+* Input:
+* pMsg:message which will be read
+* Output:
+* pMsg
+*
+* Returns:
+* size:the length of data to be written
+* DRV_ERROR
+*
+* Others:
+********************************************************************************/
+#ifdef CONFIG_ARCH_ZX297520V3_CAP
+int zDrvRpMsg_Read(const T_ZDrvRpMsg_Msg *pMsg)
+#else
+int zDrvRpMsg_Read_Cap(const T_ZDrvRpMsg_Msg *pMsg)
+#endif
+{
+ unsigned int RecvBase_remap = 0;
+ T_HalRpMsg_ChInfo *channel_send = NULL;
+ T_HalRpMsg_ChInfo *channel_recv = NULL;
+#if CIRCUL_BUFFER_USED
+ T_HalRpMsg_MsgHeader tmpMsgHeader;
+#else
+ unsigned int size=0;
+ T_HalRpMsg_RpMsg *RpMsg = NULL;
+#endif
+ unsigned int result_len = 0;
+
+ if ((pMsg == NULL)|| (pMsg->actorID >= ACTOR_MAXID) || (pMsg->chID >= icp_rpmsg_get_max_channel(pMsg->actorID))){
+ return RPMSG_INVALID_PARAMETER;
+ }
+
+ channel_send = icp_rpmsg_get_channel_send(pMsg->actorID, pMsg->chID);
+ channel_recv = icp_rpmsg_get_channel_recv(pMsg->actorID, pMsg->chID);
+
+ if (!(channel_send->flag & CHANNEL_FLAG)){
+ return RPMSG_CHANNEL_INEXISTANCE;
+ }
+
+ if ((pMsg->flag & RPMSG_READ_POLL) &&(halRpMsg_IsRecvChEmpty(pMsg->actorID, pMsg->chID) == TRUE)){
+ return RPMSG_NO_MSG;
+ }
+
+ rpmsg_resource[pMsg->actorID].RpMsgRead_Exit[pMsg->chID] = 0;
+
+ mutex_lock(&(rpmsg_resource[pMsg->actorID].rpmsgread_mutex[pMsg->chID]));
+ /*check the read buf is empty,if empty wait semaphore*/
+ while (halRpMsg_IsRecvChEmpty(pMsg->actorID, pMsg->chID) == TRUE)/*È·±£Ã»ÓÐÐźÅÁ¿¶Ñ»ý*/
+ {
+ if(down_interruptible(&(rpmsg_resource[pMsg->actorID].RpmsgSema[pMsg->chID]))){
+ mutex_unlock(&(rpmsg_resource[pMsg->actorID].rpmsgread_mutex[pMsg->chID]));
+ return -ERESTARTSYS;
+ }
+ if(rpmsg_resource[pMsg->actorID].RpMsgRead_Exit[pMsg->chID] == 1){
+ mutex_unlock(&(rpmsg_resource[pMsg->actorID].rpmsgread_mutex[pMsg->chID]));
+ return 0;
+ }
+ }
+
+// RecvBase_remap = channel_recv->Base_Addr + icp_rpmsg_get_recvbase_offset(pMsg->actorID, pMsg->chID);
+ RecvBase_remap = icp_rpmsg_pa2va(pMsg->actorID, pMsg->chID, channel_recv->Base_Addr);
+
+#if CIRCUL_BUFFER_USED
+ /* get msg header */
+ icp_rpmsg_read(channel_recv,
+ (unsigned char *)&tmpMsgHeader,
+ (unsigned char *)RecvBase_remap,
+ sizeof(T_HalRpMsg_MsgHeader));
+
+ if (tmpMsgHeader.flag != RPMSG_MSGHEAD_FLAG){
+ mutex_unlock(&(rpmsg_resource[pMsg->actorID].rpmsgread_mutex[pMsg->chID]));
+ return RPMSG_CHANNEL_MSG_ERR;
+ }
+
+ result_len = min(pMsg->len, (unsigned int)(tmpMsgHeader.len));
+ icp_rpmsg_read(channel_recv,
+ (unsigned char *)pMsg->buf,
+ (unsigned char *)RecvBase_remap,
+ result_len);
+ if((unsigned int)(tmpMsgHeader.len) > result_len)
+ icp_rpmsg_move_recv_pos(channel_recv, (unsigned int)(tmpMsgHeader.len) - result_len - result_len%2);
+#else
+ RpMsg = (T_HalRpMsg_RpMsg *)(channel_recv->RecvPos + RecvBase_remap);
+ if (RpMsg->MsgHeader.flag != RPMSG_MSGHEAD_FLAG){
+ mutex_unlock(&(rpmsg_resource[pMsg->actorID].rpmsgread_mutex[pMsg->chID]));
+ return RPMSG_CHANNEL_MSG_ERR;
+ }
+
+ result_len = min(pMsg->len, (unsigned int)(RpMsg->MsgHeader.len));
+ size = RpMsg->MsgHeader.len + sizeof(T_HalRpMsg_MsgHeader);
+ size = ALIGN_RPMSG(size, RPMSG_ALIGN);
+ memcpy((unsigned int *)pMsg->buf, &(RpMsg->data), result_len);
+ channel_recv->RecvPos += size;
+#endif
+ mutex_unlock(&(rpmsg_resource[pMsg->actorID].rpmsgread_mutex[pMsg->chID]));
+
+ return result_len;
+}
+//EXPORT_SYMBOL(zDrvRpMsg_Read);
+
+/*******************************************************************************
+* Function: zDrvRpMsg_ReadLockIrq
+* Description: This function is used for reading message and lock irq;
+* Parameters:
+* Input:
+* pMsg:message which will be read
+* Output:
+* pMsg
+*
+* Returns:
+* size:the length of data to be written
+* DRV_ERROR
+*
+* Others:
+********************************************************************************/
+int zDrvRpMsg_ReadLockIrq(const T_ZDrvRpMsg_Msg *pMsg)
+{
+ unsigned int RecvBase_remap = 0;
+ T_HalRpMsg_ChInfo *channel_send = NULL;
+ T_HalRpMsg_ChInfo *channel_recv = NULL;
+#if CIRCUL_BUFFER_USED
+ T_HalRpMsg_MsgHeader tmpMsgHeader;
+#else
+ unsigned int size=0;
+ T_HalRpMsg_RpMsg *RpMsg = NULL;
+#endif
+ unsigned int result_len = 0;
+ unsigned long flags;
+
+ if ((pMsg == NULL) || !(pMsg->flag & RPMSG_READ_POLL) ||(pMsg->actorID >= ACTOR_MAXID) || (pMsg->chID >= icp_rpmsg_get_max_channel(pMsg->actorID))){
+ return RPMSG_INVALID_PARAMETER;
+ }
+
+ channel_send = icp_rpmsg_get_channel_send(pMsg->actorID, pMsg->chID);
+ channel_recv = icp_rpmsg_get_channel_recv(pMsg->actorID, pMsg->chID);
+
+ if (!(channel_send->flag & CHANNEL_FLAG) || !(channel_recv->flag & CHANNEL_FLAG)){
+ return RPMSG_CHANNEL_INEXISTANCE;
+ }
+
+ local_irq_save(flags);
+ /*check the read buf is empty,if empty wait semaphore*/
+ if(halRpMsg_IsRecvChEmpty(pMsg->actorID, pMsg->chID) == TRUE){
+ local_irq_restore(flags);
+ return RPMSG_NO_MSG;
+ }
+
+// RecvBase_remap = channel_recv->Base_Addr + icp_rpmsg_get_recvbase_offset(pMsg->actorID, pMsg->chID);;
+ RecvBase_remap = icp_rpmsg_pa2va(pMsg->actorID, pMsg->chID, channel_recv->Base_Addr);
+
+#if CIRCUL_BUFFER_USED
+ /* get msg header */
+ icp_rpmsg_read(channel_recv,
+ (unsigned char *)&tmpMsgHeader,
+ (unsigned char *)RecvBase_remap,
+ sizeof(T_HalRpMsg_MsgHeader));
+
+ if (tmpMsgHeader.flag != RPMSG_MSGHEAD_FLAG){
+ local_irq_restore(flags);
+ return RPMSG_CHANNEL_MSG_ERR;
+ }
+
+ result_len = min(pMsg->len, (unsigned int)(tmpMsgHeader.len));
+ icp_rpmsg_read(channel_recv,
+ (unsigned char *)pMsg->buf,
+ (unsigned char *)RecvBase_remap,
+ result_len);
+ if((unsigned int)(tmpMsgHeader.len) > result_len)
+ icp_rpmsg_move_recv_pos(channel_recv, (unsigned int)(tmpMsgHeader.len) - result_len - result_len%2);
+#else
+ RpMsg = (T_HalRpMsg_RpMsg *)(channel_recv->RecvPos + RecvBase_remap);
+ if (RpMsg->MsgHeader.flag != RPMSG_MSGHEAD_FLAG){
+ local_irq_restore(flags);
+ return RPMSG_CHANNEL_MSG_ERR;
+ }
+
+ result_len = min(pMsg->len, (unsigned int)(RpMsg->MsgHeader.len));
+ size = RpMsg->MsgHeader.len + sizeof(T_HalRpMsg_MsgHeader);
+ size = ALIGN_RPMSG(size, RPMSG_ALIGN);
+ memcpy((unsigned int *)pMsg->buf, &(RpMsg->data), result_len);
+ channel_recv->RecvPos += size;
+#endif
+ local_irq_restore(flags);
+
+ return result_len;
+}
+//EXPORT_SYMBOL(zDrvRpMsg_ReadLockIrq);
+
+/*******************************************************************************
+* Function: halRpMsg_IsChFreeSpace
+* Description: This function is used for checking channel free buffer;
+* Parameters:
+* Input:
+* channel_send:channel used for sending message
+* channel_recv:channel used for receiving message
+* size:size of message
+* Output:None
+*
+* Returns:
+* TRUE: the channel has free buffer .
+* FALSE: the channel no free buffer .
+*
+* Others:
+********************************************************************************/
+static bool halRpMsg_IsChFreeSpace(T_ZDrvRpMsg_ActorID actorID, T_ZDrvRpMsg_ChID chID, unsigned int size)
+{
+ unsigned int SendBase_remap = 0;
+ T_HalRpMsg_ChInfo *channel_send = icp_rpmsg_get_channel_send(actorID, chID);
+
+// SendBase_remap = channel_send->Base_Addr + icp_rpmsg_get_sendbase_offset(actorID, chID);
+ SendBase_remap = icp_rpmsg_pa2va(actorID, chID, channel_send->Base_Addr);
+
+ /* |---sp+++++rp---| */
+ if (channel_send->SendPos < channel_send->RecvPos){
+#if CIRCUL_BUFFER_USED
+ if ((channel_send->RecvPos - channel_send->SendPos) > size){
+#else
+ if ((channel_send->RecvPos - channel_send->SendPos - RPMSG_ADDTIONAL_BYTES) >= size){
+#endif
+ return TRUE;
+ }
+ else{
+ return FALSE;
+ }
+ }
+ else{
+#if CIRCUL_BUFFER_USED
+ /* |+++rp-----sp+++| */
+ if ((channel_send->size - channel_send->SendPos + channel_send->RecvPos ) > size){
+#else
+ /* |---rp-----sp+++| */
+ if ((channel_send->size - channel_send->SendPos - RPMSG_ADDTIONAL_BYTES) >= size){
+ return TRUE;
+ }
+ /* |+++rp-----sp---| */
+ else if ((size + RPMSG_ADDTIONAL_BYTES) <= channel_send->RecvPos){
+ *(unsigned short *)(SendBase_remap + channel_send->SendPos) = RPMSG_SPACEEND;
+ channel_send->EndAddr = channel_send->SendPos;
+ channel_send->SendPos = 0x0;
+#endif
+ return TRUE;
+ }
+ else{
+ return FALSE;
+ }
+ }
+}
+
+/*******************************************************************************
+* Function: zDrvRpMsg_Write
+* Description: This function is used for writing message;
+* Parameters:
+* Input:
+* pMsg:message which will be writed
+* Output:
+* pMsg
+*
+* Returns:
+* size:the length of data to be read
+* DRV_ERROR
+*
+* Others:
+********************************************************************************/
+#ifdef CONFIG_ARCH_ZX297520V3_CAP
+int zDrvRpMsg_Write(const T_ZDrvRpMsg_Msg *pMsg)
+#else
+int zDrvRpMsg_Write_Cap(const T_ZDrvRpMsg_Msg *pMsg)
+#endif
+{
+ unsigned int size=0;
+ unsigned int SendBase_remap = 0;
+ T_HalRpMsg_ChInfo *channel_send = NULL;
+#if CIRCUL_BUFFER_USED
+ T_HalRpMsg_MsgHeader tmpMsgHeader;
+#else
+ T_HalRpMsg_RpMsg *RpMsg = NULL;
+#endif
+
+ if ((pMsg == NULL)||(pMsg->actorID >= ACTOR_MAXID) || (pMsg->chID >= icp_rpmsg_get_max_channel(pMsg->actorID))){
+ return RPMSG_INVALID_PARAMETER;
+ }
+
+ channel_send = icp_rpmsg_get_channel_send(pMsg->actorID, pMsg->chID);
+
+ if (!(channel_send->flag & CHANNEL_FLAG)){
+ return RPMSG_CHANNEL_INEXISTANCE;
+ }
+ size = pMsg->len+ sizeof(T_HalRpMsg_MsgHeader);
+ size = ALIGN_RPMSG(size,RPMSG_ALIGN);
+
+// SendBase_remap = channel_send->Base_Addr + icp_rpmsg_get_sendbase_offset(pMsg->actorID, pMsg->chID);
+ SendBase_remap = icp_rpmsg_pa2va(pMsg->actorID, pMsg->chID, channel_send->Base_Addr);
+
+ mutex_lock(&(rpmsg_resource[pMsg->actorID].rpmsgwrite_mutex[pMsg->chID]));
+ if (halRpMsg_IsChFreeSpace(pMsg->actorID, pMsg->chID, size) == TRUE){
+
+#if CIRCUL_BUFFER_USED
+ tmpMsgHeader.flag = (unsigned short)RPMSG_MSGHEAD_FLAG;
+ tmpMsgHeader.len = (unsigned short)(pMsg->len);
+
+ rpmsg_resource[pMsg->actorID].sendPos[pMsg->chID] = channel_send->SendPos;
+
+ icp_rpmsg_write(channel_send,
+ (unsigned char *)SendBase_remap,
+ (unsigned char *)&tmpMsgHeader,
+ sizeof(T_HalRpMsg_MsgHeader),
+ pMsg->actorID,
+ pMsg->chID);
+
+ icp_rpmsg_write(channel_send,
+ (unsigned char *)SendBase_remap,
+ pMsg->buf,
+ pMsg->len,
+ pMsg->actorID,
+ pMsg->chID);
+
+ channel_send->SendPos = rpmsg_resource[pMsg->actorID].sendPos[pMsg->chID];
+#else
+ RpMsg = (T_HalRpMsg_RpMsg *)(channel_send->SendPos + SendBase_remap);
+ RpMsg->MsgHeader.flag = (unsigned short)RPMSG_MSGHEAD_FLAG;
+ RpMsg->MsgHeader.len = (unsigned short)(pMsg->len);
+
+ memcpy(&(RpMsg->data), pMsg->buf, pMsg->len);
+ channel_send->SendPos += size;
+#endif
+ }
+ else{
+ mutex_unlock(&(rpmsg_resource[pMsg->actorID].rpmsgwrite_mutex[pMsg->chID]));
+ return RPMSG_SPACE_NOT_ENOUGH;
+ }
+ if (icp_ops->Icp_GetIntState(pMsg->actorID, pMsg->chID) == FALSE){
+ if (pMsg->flag & RPMSG_WRITE_INT){
+ icp_ops->Icp_SetInt(pMsg->actorID, pMsg->chID);
+ }
+ }
+ mutex_unlock(&(rpmsg_resource[pMsg->actorID].rpmsgwrite_mutex[pMsg->chID]));
+
+ return (pMsg->len);
+}
+
+//EXPORT_SYMBOL(zDrvRpMsg_Write);
+
+/*******************************************************************************
+* Function: zDrvRpMsg_WriteIrqLock
+* Description: This function is used for writing message;
+* Parameters:
+* Input:
+* pMsg:message which will be writed
+* Output:
+* pMsg
+*
+* Returns:
+* size:the length of data to be read
+* DRV_ERROR
+*
+* Others:
+********************************************************************************/
+#ifdef CONFIG_ARCH_ZX297520V3_CAP
+int zDrvRpMsg_WriteLockIrq(const T_ZDrvRpMsg_Msg *pMsg)
+#else
+int zDrvRpMsg_WriteLockIrq_Cap(const T_ZDrvRpMsg_Msg *pMsg)
+#endif
+{
+ unsigned int size=0;
+ unsigned int SendBase_remap = 0;
+ T_HalRpMsg_ChInfo *channel_send = NULL;
+#if CIRCUL_BUFFER_USED
+ T_HalRpMsg_MsgHeader tmpMsgHeader;
+#else
+ T_HalRpMsg_RpMsg *RpMsg = NULL;
+#endif
+ unsigned long flags;
+
+ if ((pMsg == NULL)||(pMsg->actorID >= ACTOR_MAXID) || (pMsg->chID >= icp_rpmsg_get_max_channel(pMsg->actorID))){
+ return RPMSG_INVALID_PARAMETER;
+ }
+
+ channel_send = icp_rpmsg_get_channel_send(pMsg->actorID, pMsg->chID);
+
+ if (!(channel_send->flag & CHANNEL_FLAG)){
+ return RPMSG_CHANNEL_INEXISTANCE;
+ }
+ size = pMsg->len+ sizeof(T_HalRpMsg_MsgHeader);
+ size = ALIGN_RPMSG(size,RPMSG_ALIGN);
+
+// SendBase_remap = channel_send->Base_Addr + icp_rpmsg_get_sendbase_offset(pMsg->actorID, pMsg->chID);
+ SendBase_remap = icp_rpmsg_pa2va(pMsg->actorID, pMsg->chID, channel_send->Base_Addr);
+
+ local_irq_save(flags);
+ if (halRpMsg_IsChFreeSpace(pMsg->actorID, pMsg->chID, size) == TRUE){
+#if CIRCUL_BUFFER_USED
+ tmpMsgHeader.flag = (unsigned short)RPMSG_MSGHEAD_FLAG;
+ tmpMsgHeader.len = (unsigned short)(pMsg->len);
+
+ rpmsg_resource[pMsg->actorID].sendPos[pMsg->chID] = channel_send->SendPos;
+ icp_rpmsg_write(channel_send,
+ (unsigned char *)SendBase_remap,
+ (unsigned char *)&tmpMsgHeader,
+ sizeof(T_HalRpMsg_MsgHeader),
+ pMsg->actorID,
+ pMsg->chID);
+
+ icp_rpmsg_write(channel_send,
+ (unsigned char *)SendBase_remap,
+ pMsg->buf,
+ pMsg->len,
+ pMsg->actorID,
+ pMsg->chID);
+ channel_send->SendPos = rpmsg_resource[pMsg->actorID].sendPos[pMsg->chID];
+#else
+ RpMsg = (T_HalRpMsg_RpMsg *)(channel_send->SendPos + SendBase_remap);
+ RpMsg->MsgHeader.flag = (unsigned short)RPMSG_MSGHEAD_FLAG;
+ RpMsg->MsgHeader.len = (unsigned short)(pMsg->len);
+ memcpy(&(RpMsg->data), pMsg->buf, pMsg->len);
+ channel_send->SendPos += size;
+#endif
+
+ }
+ else{
+ local_irq_restore(flags);
+ return RPMSG_SPACE_NOT_ENOUGH;
+ }
+ if (icp_ops->Icp_GetIntState(pMsg->actorID, pMsg->chID) == FALSE){
+ if (pMsg->flag & RPMSG_WRITE_INT){
+ icp_ops->Icp_SetInt(pMsg->actorID, pMsg->chID);
+ }
+ }
+ local_irq_restore(flags);
+
+ return (pMsg->len);
+}
+
+//EXPORT_SYMBOL(zDrvRpMsg_WriteLockIrq);
+
+/*******************************************************************************
+* Function: zDrvRpMsg_CreateChannel
+* Description: This function is used for creating channel to send message;
+* Parameters:
+* Input:
+* actorID:remote cpu
+* chID: ID of channel
+* size: size of channel
+* Output:None
+*
+* Returns:
+* DRV_SUCCESS: successfully .
+* DRV_ERROR: fail .
+*
+* Others:
+********************************************************************************/
+#ifdef CONFIG_ARCH_ZX297520V3_CAP
+int zDrvRpMsg_CreateChannel(T_ZDrvRpMsg_ActorID actorID, T_ZDrvRpMsg_ChID chID, unsigned int size)
+#else
+int zDrvRpMsg_CreateChannel_Cap(T_ZDrvRpMsg_ActorID actorID, T_ZDrvRpMsg_ChID chID, unsigned int size)
+#endif
+{
+ T_HalRpMsg_ChInfo *channel_send = NULL;
+
+#ifndef CONFIG_ARCH_ZX297520V3_CAP
+ if (actorID != CAP_ID)
+ return RPMSG_INVALID_PARAMETER;
+#endif
+
+ if ((actorID>=ACTOR_MAXID) || (chID>=icp_rpmsg_get_max_channel(actorID))){
+ return RPMSG_INVALID_PARAMETER;
+ }
+
+ channel_send = icp_rpmsg_get_channel_send(actorID, chID);
+
+ if (channel_send->flag & CHANNEL_FLAG){
+ return RPMSG_CHANNEL_ALREADY_EXIST;
+ }
+
+ size = ALIGN_RPMSG(size,RPMSG_ALIGN);
+ mutex_lock(&rpmsgch_mutex[actorID]);
+ if (icp_rpmsg_get_buffer_type(actorID, chID) == CH_IRAM)
+ {
+ if (size > Icp_rpmsg_drvs[actorID]->Channel_config.CurIramSpace_Size){
+ mutex_unlock(&rpmsgch_mutex[actorID]);
+ return RPMSG_SPACE_NOT_ENOUGH;
+ }
+
+ channel_send->Base_Addr = Icp_rpmsg_drvs[actorID]->Channel_config.CurIramAddr;
+ Icp_rpmsg_drvs[actorID]->Channel_config.CurIramAddr += size;
+ Icp_rpmsg_drvs[actorID]->Channel_config.CurIramSpace_Size -=size;
+ }
+ else if (icp_rpmsg_get_buffer_type(actorID, chID) == CH_DDR)
+ {
+ if (size > Icp_rpmsg_drvs[actorID]->Channel_config.CurDdrSpace_Size){
+ mutex_unlock(&rpmsgch_mutex[actorID]);
+ return RPMSG_SPACE_NOT_ENOUGH;
+ }
+
+ channel_send->Base_Addr = Icp_rpmsg_drvs[actorID]->Channel_config.CurDdrAddr;
+ Icp_rpmsg_drvs[actorID]->Channel_config.CurDdrAddr += size;
+ Icp_rpmsg_drvs[actorID]->Channel_config.CurDdrSpace_Size -=size;
+ }
+ else{
+ mutex_unlock(&rpmsgch_mutex[actorID]);
+ return RPMSG_INVALID_PARAMETER;
+ }
+
+ mutex_unlock(&rpmsgch_mutex[actorID]);
+ channel_send->size = size;
+
+ init_waitqueue_head(&(rpmsg_resource[actorID].rpmsgwaitq[chID]));
+ sema_init(&(rpmsg_resource[actorID].RpmsgSema[chID]), 0);
+ mutex_init(&(rpmsg_resource[actorID].rpmsgread_mutex[chID]));
+ mutex_init(&(rpmsg_resource[actorID].rpmsgwrite_mutex[chID]));
+ channel_send->flag |= CHANNEL_FLAG;
+ icp_ops->Icp_UnMask(actorID, chID);
+
+ return RPMSG_SUCCESS;
+
+}
+
+//EXPORT_SYMBOL(zDrvRpMsg_CreateChannel);
+/*******************************************************************************
+* Function: zDrvRpMsg_GetDataSize
+* Description: This function is used for dispatching icp interrupt of m0 and arm0 ;
+* Parameters:
+* Input:
+* Output:
+*
+* Returns:
+*
+* Others:
+********************************************************************************/
+int zDrvRpMsg_RecvCh_GetDataSize(T_ZDrvRpMsg_ActorID actorID, T_ZDrvRpMsg_ChID chID)
+{
+ T_HalRpMsg_ChInfo *channel_recv = icp_rpmsg_get_channel_recv(actorID, chID);
+
+ if(channel_recv->RecvPos <= channel_recv->SendPos){
+ return (channel_recv->SendPos - channel_recv->RecvPos);
+ }
+ else{
+#if CIRCUL_BUFFER_USED
+ return (channel_recv->size - channel_recv->RecvPos + channel_recv->SendPos);
+#else
+ return (channel_recv->EndAddr - channel_recv->RecvPos + channel_recv->SendPos);
+#endif
+ }
+}
+
+//EXPORT_SYMBOL(zDrvRpMsg_RecvCh_GetDataSize);
+/*******************************************************************************
+* Function: RpMsg_Dispatch
+* Description: This function is used for dispatching icp interrupt of m0 and arm0 ;
+* Parameters:
+* Input:
+* Output:
+*
+* Returns:
+*
+* Others:
+********************************************************************************/
+#if CIRCUL_BUFFER_USED
+#define RPMSG_BUFFER_LEN 512
+static unsigned char rpmsg_cache_buffer[RPMSG_BUFFER_LEN];
+#endif
+int RpMsg_Dispatch(struct work_struct *work)
+{
+ unsigned int RecvBase_remap = 0;
+ T_ZDrvRpMsg_CallbackFunction callback;
+ T_RpMsg_struct *rpmsg_struct;
+ T_ZDrvRpMsg_ActorID actorID;
+ T_ZDrvRpMsg_ChID chID;
+ volatile u64 *chIDs;
+ u32 max_channel = 0;
+ unsigned long flags;
+ T_HalRpMsg_ChInfo *channel_recv = NULL;
+#if CIRCUL_BUFFER_USED
+ T_HalRpMsg_MsgHeader tmpMsgHeader;
+#else
+ unsigned int size=0;
+ T_HalRpMsg_RpMsg *RpMsg = NULL;
+ void *buf = NULL;
+ unsigned short len = 0;
+#endif
+
+ rpmsg_struct = container_of(work, T_RpMsg_struct, rpmsg_work);
+ actorID = rpmsg_struct->event.actorID;
+ chIDs = &(rpmsg_struct->event.chID);
+
+ max_channel = icp_rpmsg_get_max_channel(actorID);
+
+ for(chID = 0; (chID < max_channel)&&(*chIDs != (u64)0); chID++ )
+ {
+ if (*chIDs&((u64)0x1<<chID))
+ {
+ raw_spin_lock_irqsave(&rpmsg_lock, flags);
+ *chIDs &= ~((u64)0x1<<chID);
+ raw_spin_unlock_irqrestore(&rpmsg_lock, flags);
+
+ channel_recv = icp_rpmsg_get_channel_recv(actorID, chID);
+
+ if (!(channel_recv->flag & CHANNEL_FLAG))
+ {
+ // return RPMSG_CHANNEL_INEXISTANCE;
+ printk("RpMsg_Dispatch: RPMSG_CHANNEL_INEXISTANCE actorID = %d chID = %d\n", actorID, chID);
+ continue;
+ }
+
+ /* ÓÃactorºÍchID×öË÷Òý£¬ÔÚs_RpMsgCallbackList»ñÈ¡»Øµ÷º¯ÊýÖ\u017eÕë */
+ callback = rpmsg_resource[actorID].s_RpMsgCallbackList[chID];
+ if (callback == NULL){
+ // return RPMSG_ERROR;
+ printk("RpMsg_Dispatch: callback = NULL actorID = %d chID = %d\n", actorID, chID);
+ continue;
+ }
+
+ // RecvBase_remap = channel_recv->Base_Addr + icp_rpmsg_get_recvbase_offset(actorID, chID);
+ RecvBase_remap = icp_rpmsg_pa2va(actorID, chID, channel_recv->Base_Addr);
+
+ while( halRpMsg_IsRecvChEmpty(actorID, chID) != TRUE)
+ {
+#if CIRCUL_BUFFER_USED
+ /* get msg header */
+ icp_rpmsg_read(channel_recv,
+ (unsigned char *)&tmpMsgHeader,
+ (unsigned char *)RecvBase_remap,
+ sizeof(T_HalRpMsg_MsgHeader));
+
+ if (tmpMsgHeader.flag != RPMSG_MSGHEAD_FLAG)
+ // return RPMSG_CHANNEL_MSG_ERR;
+ BUG();
+
+ if(RPMSG_BUFFER_LEN < tmpMsgHeader.len)
+ BUG();
+
+ icp_rpmsg_read(channel_recv,
+ rpmsg_cache_buffer,
+ (unsigned char *)RecvBase_remap,
+ tmpMsgHeader.len);
+
+ (*callback)(rpmsg_cache_buffer, tmpMsgHeader.len);
+#else
+ RpMsg = (T_HalRpMsg_RpMsg *)(channel_recv->RecvPos + RecvBase_remap);
+ if (RpMsg->MsgHeader.flag != RPMSG_MSGHEAD_FLAG){
+ return RPMSG_CHANNEL_MSG_ERR;
+ }
+ len = RpMsg->MsgHeader.len;
+ size = ALIGN_RPMSG(len, RPMSG_ALIGN) + sizeof(T_HalRpMsg_MsgHeader);
+ buf = &(RpMsg->data);
+
+
+ /* ÒÔpMsgΪ²ÎÊý£¬µ÷Óûص÷º¯Êý\u017d\u0160Àí\u017dËÏûÏ?*/
+ (*callback)(buf, len);
+ channel_recv->RecvPos += size;
+#endif
+ }
+ }
+ }
+ return RPMSG_SUCCESS;
+}
+
+/*******************************************************************************
+* Function: zDrvIcp_interrupt
+* Description: This function is used for dispatching icp interrupt of m0 and arm0 ;
+* Parameters:
+* Input:
+* Output:
+*
+* Returns:
+*
+* Others:
+********************************************************************************/
+extern void zx_wdt_icp_wake(void);
+static irqreturn_t icp_rpmsg_interrupt(int intline, void *p)
+{
+ unsigned int i;
+ Icp_rpmsg_drv *Icp_rpmsg_driver= p;
+ T_HalIcp_Dword chIDs;
+ unsigned long flags;
+
+ chIDs = icp_ops->Icp_GetInt(Icp_rpmsg_driver->actorID);
+
+ for(i=0; i<icp_rpmsg_get_max_channel(Icp_rpmsg_driver->actorID); i++)
+ {
+ #ifdef CONFIG_ARCH_ZX297520V3_CAP
+ if((M0_ID==Icp_rpmsg_driver->actorID) &&( channel_2== i) ) //from m0 wdt
+ zx_wdt_icp_wake();
+ #endif
+ if((((i<32)&&((chIDs.low_word>>i) & 0x1))||((i>=32)&&((chIDs.high_word>>(i-32)) & 0x1))) && \
+ ((Icp_rpmsg_drvs[Icp_rpmsg_driver->actorID]->Channel_config.ChInfo_Recv_Base + i)->flag & CHANNEL_FLAG) && \
+ ((Icp_rpmsg_drvs[Icp_rpmsg_driver->actorID]->Channel_config.ChInfo_Send_Base + i)->flag & CHANNEL_FLAG))
+ {
+ if(rpmsg_resource[Icp_rpmsg_driver->actorID].s_RpMsgCallbackList[i] != NULL)
+ {
+ raw_spin_lock_irqsave(&rpmsg_lock, flags);
+ rpmsg_struct[Icp_rpmsg_driver->actorID].event.actorID = Icp_rpmsg_driver->actorID;
+ rpmsg_struct[Icp_rpmsg_driver->actorID].event.chID |= (u64)0x1<<i;
+ raw_spin_unlock_irqrestore(&rpmsg_lock, flags);
+ queue_work(rpmsg_workqueue[Icp_rpmsg_driver->actorID], &(rpmsg_struct[Icp_rpmsg_driver->actorID].rpmsg_work));
+ }
+ else
+ {
+ //if (RpmsgChnStatus[rpmsg_drve->actorID][i])
+ up(&(rpmsg_resource[Icp_rpmsg_driver->actorID].RpmsgSema[i]));
+ }
+ icp_ops->Icp_ClearInt(Icp_rpmsg_driver->actorID, i);
+ wake_up(&(rpmsg_resource[Icp_rpmsg_driver->actorID].rpmsgwaitq[i]));
+
+#ifdef CONFIG_ZX_SUSPEND_OPTIMIZE
+ /* at channel */
+ if(i == channel_9)
+ at_local_wakelock_timeout(msecs_to_jiffies(1*1000));
+#endif
+ }
+ }
+
+#ifdef CONFIG_ZX_SUSPEND_OPTIMIZE
+ wake_lock_timeout(&icp_wake_lock, msecs_to_jiffies(1*1000));
+#else
+ wake_lock_timeout(&icp_wake_lock, msecs_to_jiffies(1*1000));
+#endif
+
+ return IRQ_HANDLED;
+}
+/*******************************************************************************
+* Function: HalRpMsg_SenInt
+* Description: This function is used for generating icp interrupt to inform remote cpu;
+* Parameters:
+* Input:
+ actorID: id of remote cpu
+ chID: id of channel
+* Output:None
+*
+* Returns:None
+*
+*
+* Others:
+********************************************************************************/
+int zDrvIcp_SetInt(T_ZDrvRpMsg_ActorID actorID, unsigned int chID)
+{
+ T_HalRpMsg_ChInfo *channel_send = icp_rpmsg_get_channel_send(actorID, chID);
+
+ if (!(channel_send->flag & CHANNEL_FLAG))
+ return RPMSG_CHANNEL_INEXISTANCE;
+
+ if(actorID >= ACTOR_MAXID ||chID >= icp_rpmsg_get_max_channel(actorID)){
+ return RPMSG_INVALID_PARAMETER;
+ }
+ icp_ops->Icp_SetInt(actorID, chID);
+
+ return 0;
+}
+
+/*******************************************************************************
+* Function: zDrvRpMsg_RegCallBack
+* Description: This function is used for register callback fuction of icp interrupt;
+* Parameters:
+* Input:
+ actorID: id of remote cpu
+ chID: id of channel
+* Output:None
+*
+* Returns:None
+*
+*
+* Others:
+********************************************************************************/
+#ifdef CONFIG_ARCH_ZX297520V3_CAP
+int zDrvRpMsg_RegCallBack(T_ZDrvRpMsg_ActorID actorID, unsigned int chID, T_ZDrvRpMsg_CallbackFunction callback)
+#else
+int zDrvRpMsg_RegCallBack_Cap(T_ZDrvRpMsg_ActorID actorID, unsigned int chID, T_ZDrvRpMsg_CallbackFunction callback)
+#endif
+{
+ unsigned long flags;
+
+ if(actorID >= ACTOR_MAXID ||chID >= icp_rpmsg_get_max_channel(actorID))
+ return RPMSG_INVALID_PARAMETER;
+
+ rpmsg_resource[actorID].s_RpMsgCallbackList[chID] = callback;
+ if(callback!=NULL)
+ {
+ printk("rpmsg: actorID = %d chID = %d register callback success!\n", actorID, chID);
+
+ raw_spin_lock_irqsave(&rpmsg_lock, flags);
+ rpmsg_struct[actorID].event.actorID = actorID;
+ rpmsg_struct[actorID].event.chID |= (u64)0x1<<chID;
+ raw_spin_unlock_irqrestore(&rpmsg_lock, flags);
+
+ queue_work(rpmsg_workqueue[actorID], &(rpmsg_struct[actorID].rpmsg_work));
+ }
+ return 0;
+}
+
+//EXPORT_SYMBOL(zDrvRpMsg_RegCallBack);
+
+#ifdef CONFIG_ARCH_ZX297520V3_CAP
+void zDrvRpMsg_ReadExit(T_ZDrvRpMsg_ActorID actor, T_ZDrvRpMsg_ChID chID)
+#else
+void zDrvRpMsg_ReadExit_Cap(T_ZDrvRpMsg_ActorID actor, T_ZDrvRpMsg_ChID chID)
+#endif
+{
+ rpmsg_resource[actor].RpMsgRead_Exit[chID] = 1;
+
+ if(rpmsg_resource[actor].RpmsgSema[chID].count)
+ {
+ up(&(rpmsg_resource[actor].RpmsgSema[chID]));
+ }
+}
+//EXPORT_SYMBOL(zDrvRpMsg_ReadExit);
+
+/**********************************************************************************
+* used by devices
+**********************************************************************************/
+void rpmsg_set_ops(const icp_operations *ops)
+{
+ icp_ops = ops;
+}
+
+static int icp_rpmsg_init_resource(struct platform_device *pdev, Icp_rpmsg_drv *icp_rpmsg_drv)
+{
+ struct zx29_rpmsg_platform_data *pdata = pdev->dev.platform_data;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "[RPMSG]no platform data\n");
+ return -EINVAL;
+ }
+
+ icp_rpmsg_drv->actorID = pdev->id;
+ icp_rpmsg_drv->channel_cnt = pdata->max_channel_cnt;
+
+#ifdef _USE_CAP_SYS
+ if(icp_rpmsg_drv->actorID == CAP_ID) {
+ ddr_base_offset = ZX_DDR_CAPBUF_BASE - ZX29_DDR_CAPBUF_PHYS;
+
+ icp_rpmsg_drv->Channel_config.ChInfo_Send_Base = (T_HalRpMsg_ChInfo *)(pdata->ddr_send_base + ddr_base_offset);
+ icp_rpmsg_drv->Channel_config.ChInfo_Recv_Base = (T_HalRpMsg_ChInfo *)(pdata->ddr_recv_base + ddr_base_offset);
+ icp_rpmsg_drv->Channel_config.CurDdrAddr = (unsigned int)(pdata->ddr_send_base + icp_rpmsg_drv->channel_cnt * sizeof(T_HalRpMsg_ChInfo));
+ icp_rpmsg_drv->Channel_config.CurDdrSpace_Size = pdata->ddr_send_size - icp_rpmsg_drv->channel_cnt * sizeof(T_HalRpMsg_ChInfo);
+ } else {
+#endif
+ icp_rpmsg_drv->Channel_config.ChInfo_Send_Base = (T_HalRpMsg_ChInfo *)(pdata->iram_send_base);
+ icp_rpmsg_drv->Channel_config.ChInfo_Recv_Base = (T_HalRpMsg_ChInfo *)(pdata->iram_recv_base);
+ icp_rpmsg_drv->Channel_config.CurIramAddr = (unsigned int)(pdata->iram_send_base + icp_rpmsg_drv->channel_cnt * sizeof(T_HalRpMsg_ChInfo) - iram_base_offset);
+ icp_rpmsg_drv->Channel_config.CurIramSpace_Size = pdata->iram_send_size - icp_rpmsg_drv->channel_cnt * sizeof(T_HalRpMsg_ChInfo);
+ icp_rpmsg_drv->Channel_config.CurDdrAddr = (unsigned int)(pdata->ddr_send_base);
+ icp_rpmsg_drv->Channel_config.CurDdrSpace_Size = pdata->ddr_send_size;
+#ifdef _USE_CAP_SYS
+ }
+#endif
+
+ memset((u8 *)icp_rpmsg_drv->Channel_config.ChInfo_Send_Base,
+ 0,
+ icp_rpmsg_drv->channel_cnt*sizeof(T_HalRpMsg_ChInfo));
+
+ return 0;
+}
+
+int icp_rpmsg_register(struct platform_device *pdev, void *hw_data)
+{
+ int ret;
+ char s[30];
+ Icp_rpmsg_drv *list;
+ struct zx29_icp_hwdata *icp_hwdata = (struct zx29_icp_hwdata *)hw_data;
+
+ list = Icp_rpmsg_drvs[pdev->id];
+ icp_rpmsg_init_resource(pdev, list);
+
+ sprintf(s, "rpmsg_work%d", (unsigned int)(list->actorID));
+ rpmsg_workqueue[list->actorID] = create_workqueue(s);
+ INIT_WORK(&(rpmsg_struct[list->actorID].rpmsg_work), (void *)RpMsg_Dispatch);/*process regcallback function*/
+ mutex_init(&rpmsgch_mutex[list->actorID]);
+
+ ret = request_irq(icp_hwdata->int_line, icp_rpmsg_interrupt, 0, list->name, list);
+ if (ret)
+ {
+ printk("rpmsg_zx29 request_irq failed! ret = %d \n",ret);
+ return ret;
+ }
+
+#ifdef CONFIG_ARCH_ZX297520V3_CAP
+ if(list->actorID == AP_ID)
+#else
+ if(list->actorID == CAP_ID)
+#endif
+ {
+ list->dev.id = list->actorID;
+ ret = icp_rpmsg_device_register(list);
+ if(ret < 0)
+ printk("rpmsg_zx29 register failed! \n");
+
+#ifndef _USE_CAP_SYS
+ ddr_base_offset = (unsigned int)ioremap(ZX_RPMSG_DDR_BASE, ICP_DDR_APPS_SIZE+ICP_DDR_PSAP_SIZE);
+#endif
+
+ wake_lock_init(&icp_wake_lock, WAKE_LOCK_SUSPEND, "icp_msg");
+ printk("[zxp] icp_wake_lock inited! \n");
+ }
+
+ return 0;
+
+}
+
+unsigned int icp_rpmsg_getchflag(T_ZDrvRpMsg_ActorID actorID, T_ZDrvRpMsg_ChID chID)
+{
+ T_HalRpMsg_ChInfo *channel_send = icp_rpmsg_get_channel_send(actorID, chID);
+
+ return channel_send->flag;
+}
+
+int icp_rpmsg_setchIntflag(T_ZDrvRpMsg_ActorID actorID, T_ZDrvRpMsg_ChID chID, unsigned int flag)
+{
+ T_HalRpMsg_ChInfo *channel_send = icp_rpmsg_get_channel_send(actorID, chID);
+
+ if(flag == 1)
+ channel_send->flag |= CHANNEL_INT_FLAG;
+ else if(flag == 0)
+ channel_send->flag &= ~CHANNEL_INT_FLAG;
+ else
+ return RPMSG_ERROR;
+
+ return RPMSG_SUCCESS;
+}
+
+int icp_rpmsg_setchpollflag(T_ZDrvRpMsg_ActorID actorID, T_ZDrvRpMsg_ChID chID, unsigned int flag)
+{
+ T_HalRpMsg_ChInfo *channel_send = icp_rpmsg_get_channel_send(actorID, chID);
+
+ if(flag == 1)
+ channel_send->flag |= CHANENL_POLL_FLAG;
+ else if(flag == 0)
+ channel_send->flag &= ~CHANENL_POLL_FLAG;
+ else
+ return RPMSG_ERROR;
+
+ return RPMSG_SUCCESS;
+}
+
+int icp_rpmsg_unregister(struct platform_device *pdev)
+{
+ return 0;
+}
+
+#if 0
+/**********************************************************************************
+* ps can reset machine
+**********************************************************************************/
+static void modem_reset(void *buf, unsigned int len)
+{
+ /*zx297510_restart(NULL, "mmi_key reboot");*/
+ //zx297520v2_restart(NULL, NULL);
+ kernel_restart(NULL);
+}
+
+static int __init modem_reset_init(void)
+{
+ zDrvRpMsg_CreateChannel(PS_ID, ICP_CHANNEL_DRIVER, 0x20);
+ zDrvRpMsg_RegCallBack(PS_ID, ICP_CHANNEL_DRIVER, modem_reset);
+
+ pr_info("[ICP] modem_reset_init END \n");
+
+ return 0;
+}
+
+//device_initcall(modem_reset_init);
+#endif
+/**********************************************************************************
+* test icp function
+**********************************************************************************/
+#if ZX29_ICP_TEST
+#ifndef ICP_TEST_SELF_TXRX
+const unsigned char test_ps_data[29]={1,2,3,4,5,6,7,8,9,10,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19};
+unsigned char test_rcv_buf[50] = {0};
+unsigned int test_rcv_cnt = 0;
+unsigned int test_send_cnt = 0;
+const unsigned char test_ap_data[38]={1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,16,16,17,17,18,18,19,19};
+#endif
+
+#ifdef ICP_TEST_PS2AP
+#ifdef _USE_CAP_SYS
+static T_ZDrvRpMsg_ActorID test_actorID = CAP_ID; /* test ps-->ap*/
+#else
+static T_ZDrvRpMsg_ActorID test_actorID = PS_ID; /* test ps-->ap*/
+#endif
+static T_ZDrvRpMsg_ChID test_chID = ICP_CHANNEL_PSM;
+#else
+static T_ZDrvRpMsg_ActorID test_actorID = M0_ID; /* test M0-->ap*/
+static T_ZDrvRpMsg_ChID test_chID = ICP_CHANNEL_PSM;
+#endif
+#ifdef _USE_CAP_SYS
+#define ZX297520V2_ICP_PSAP_REG (ZX_ICP_BASE + 0xf8) /* ap-->ps */
+#else
+#define ZX297520V2_ICP_PSAP_REG (ZX_ICP_BASE + 0xc8)
+#endif
+static unsigned int icp_test_cnt = 0;
+
+static unsigned long rand = 0x97537636;
+
+static inline unsigned char random(void)
+{
+ /* See "Numerical Recipes in C", second edition, p. 284 */
+ rand = rand * 1664525L + 1013904223L;
+ return (unsigned char) (rand >> 24)&0xff;
+}
+
+
+static void icp_test_cb(void *buf, unsigned int len)
+{
+ u8 *tmp_buf = buf;
+
+#ifdef ICP_TEST_SELF_TXRX
+ icp_test_cnt ++;
+
+ {
+ T_ZDrvRpMsg_Msg Icp_Msg;
+ int ret;
+
+ Icp_Msg.actorID = test_actorID;
+ Icp_Msg.chID = test_chID;
+ Icp_Msg.flag = RPMSG_WRITE_INT; /* 1- means send an icp interrupt> */
+ Icp_Msg.buf = tmp_buf;
+ Icp_Msg.len = len;
+#ifdef _USE_CAP_SYS
+ ret = zDrvRpMsg_Write_Cap(&Icp_Msg);
+#else
+ ret = zDrvRpMsg_Write(&Icp_Msg);
+#endif
+ if(Icp_Msg.len != ret)
+ pr_info("[ICP_TEST] msg send error:(%d)", ret);
+ }
+
+ pr_info("[ICP_TEST] recv buffer[%d] is:%x %x %x %x \n", len, *tmp_buf, *(tmp_buf+1), *(tmp_buf+2), *(tmp_buf+3));
+#else
+
+#ifdef ICP_TEST_PS2AP
+
+ memset(test_rcv_buf, '\0', 50);
+ memcpy(test_rcv_buf, tmp_buf, len);
+ pr_info("[ICP_TEST] recv buffer[%d] is:%x %x %x %x \n", len, *tmp_buf, *(tmp_buf+1), *(tmp_buf+2), *(tmp_buf+3));
+
+
+/* if(memcmp(test_ps_data, tmp_buf, len))
+ {
+ pr_info("[ICP_TEST] msg cmp error len:[%d]\n", len);
+ BUG();
+ }
+ else
+ {
+ test_rcv_cnt ++;
+ pr_info("[ICP_TEST] recv buffer[%d] is:%x %x %x %x \n", len, *tmp_buf, *(tmp_buf+1), *(tmp_buf+2), *(tmp_buf+3));
+ }*/
+
+{
+ T_ZDrvRpMsg_Msg Icp_Msg;
+ int ret;
+
+ Icp_Msg.actorID = test_actorID;
+ Icp_Msg.chID = test_chID;
+ Icp_Msg.flag = RPMSG_WRITE_INT; /* 1- means send an icp interrupt> */
+ Icp_Msg.buf = test_rcv_buf;
+ Icp_Msg.len = len;
+
+#ifdef _USE_CAP_SYS
+ ret = zDrvRpMsg_Write_Cap(&Icp_Msg);
+#else
+ ret = zDrvRpMsg_Write(&Icp_Msg);
+#endif
+ if(Icp_Msg.len != ret)
+ {
+ pr_info("[ICP_TEST] msg send error:(%d)", ret);
+ BUG();
+ }
+
+ test_send_cnt ++;
+ pr_info("[ICP_TEST] msg send ok count:[%d]\n", test_send_cnt);
+}
+#else
+
+ memset(test_rcv_buf, '\0', 50);
+ memcpy(test_rcv_buf, tmp_buf, len);
+ pr_info("[ICP_TEST] recv buffer[%d] is:%x \n", len, *tmp_buf);
+
+ {
+ T_ZDrvRpMsg_Msg Icp_Msg;
+ int ret;
+
+ Icp_Msg.actorID = test_actorID;
+ Icp_Msg.chID = test_chID;
+ Icp_Msg.flag = RPMSG_WRITE_INT; /* 1- means send an icp interrupt> */
+ Icp_Msg.buf = test_rcv_buf;
+ Icp_Msg.len = len;
+
+#ifdef _USE_CAP_SYS
+ ret = zDrvRpMsg_Write_Cap(&Icp_Msg);
+#else
+ ret = zDrvRpMsg_Write(&Icp_Msg);
+#endif
+ if(Icp_Msg.len != ret)
+ {
+ pr_info("[ICP_TEST] msg send error:(%d)", ret);
+ BUG();
+ }
+
+ test_send_cnt ++;
+ pr_info("[ICP_TEST] msg send ok count:[%d]\n", test_send_cnt);
+ }
+
+
+
+#endif
+#endif
+}
+
+static void ps_create_channel(void)
+{
+#ifdef ICP_TEST_SELF_TXRX
+ T_HalRpMsg_ChInfo *channel = icp_rpmsg_get_channel_recv(test_actorID, test_chID);
+
+#ifdef _USE_CAP_SYS
+ channel->Base_Addr = Icp_rpmsg_drvs[test_actorID]->Channel_config.CurDdrAddr - ICP_DDR_APPS_SIZE;//(u32)(Icp_rpmsg_drvs[test_actorID]->Channel_config.ChInfo_Recv_Base) + Icp_rpmsg_drvs[test_actorID]->channel_cnt*sizeof(T_HalRpMsg_ChInfo);
+#else
+ channel->Base_Addr = Icp_rpmsg_drvs[test_actorID]->Channel_config.CurDdrAddr + ICP_DDR_APPS_SIZE;//(u32)(Icp_rpmsg_drvs[test_actorID]->Channel_config.ChInfo_Recv_Base) + Icp_rpmsg_drvs[test_actorID]->channel_cnt*sizeof(T_HalRpMsg_ChInfo);
+#endif
+ channel->size = 0x20;
+ channel->flag |= CHANNEL_FLAG;
+
+ channel->SendPos = 0;
+ channel->RecvPos = 0;
+ channel->EndAddr = 0;
+#endif
+}
+
+static bool halRpMsg_IsPsChFreeSpace(T_ZDrvRpMsg_ActorID actorID, T_ZDrvRpMsg_ChID chID, unsigned int size)
+{
+ unsigned int SendBase_remap = 0;
+ T_HalRpMsg_ChInfo *channel_send = icp_rpmsg_get_channel_recv(actorID, chID);
+
+// SendBase_remap = channel_send->Base_Addr + icp_rpmsg_get_sendbase_offset(actorID, chID);
+ SendBase_remap = icp_rpmsg_pa2va(actorID, chID, channel_send->Base_Addr);
+
+ /* |---sp+++++rp---| */
+ if (channel_send->SendPos < channel_send->RecvPos){
+#if CIRCUL_BUFFER_USED
+ if ((channel_send->RecvPos - channel_send->SendPos) > size){
+#else
+ if ((channel_send->RecvPos - channel_send->SendPos - RPMSG_ADDTIONAL_BYTES) >= size){
+#endif
+ return TRUE;
+ }
+ else{
+ return FALSE;
+ }
+ }
+ else{
+#if CIRCUL_BUFFER_USED
+ /* |+++rp-----sp+++| */
+ if ((channel_send->size - channel_send->SendPos + channel_send->RecvPos ) > size){
+#else
+ /* |---rp-----sp+++| */
+ if ((channel_send->size - channel_send->SendPos - RPMSG_ADDTIONAL_BYTES) >= size){
+ return TRUE;
+ }
+ /* |+++rp-----sp---| */
+ else if ((size + RPMSG_ADDTIONAL_BYTES) <= channel_send->RecvPos){
+ *(unsigned short *)(SendBase_remap + channel_send->SendPos) = RPMSG_SPACEEND;
+ channel_send->EndAddr = channel_send->SendPos;
+ channel_send->SendPos = 0x0;
+#endif
+ return TRUE;
+ }
+ else{
+ return FALSE;
+ }
+ }
+}
+
+static int zDrvRpMsg_PsWrite(const T_ZDrvRpMsg_Msg *pMsg)
+{
+ unsigned int size=0;
+ unsigned int SendBase_remap = 0;
+ T_HalRpMsg_ChInfo *channel_send = icp_rpmsg_get_channel_recv(pMsg->actorID, pMsg->chID);
+#if CIRCUL_BUFFER_USED
+ T_HalRpMsg_MsgHeader tmpMsgHeader;
+#else
+ T_HalRpMsg_RpMsg *RpMsg = NULL;
+#endif
+
+ if ((pMsg == NULL)||(pMsg->actorID >= ACTOR_MAXID) || (pMsg->chID >= icp_rpmsg_get_max_channel(pMsg->actorID))){
+ return RPMSG_INVALID_PARAMETER;
+ }
+ if (!(channel_send->flag & CHANNEL_FLAG)){
+ return RPMSG_CHANNEL_INEXISTANCE;
+ }
+ size = pMsg->len+ sizeof(T_HalRpMsg_MsgHeader);
+ size = ALIGN_RPMSG(size,RPMSG_ALIGN);
+
+// SendBase_remap = channel_send->Base_Addr + icp_rpmsg_get_sendbase_offset(pMsg->actorID, pMsg->chID);
+ SendBase_remap = icp_rpmsg_pa2va(pMsg->actorID, pMsg->chID, channel_send->Base_Addr);
+
+// mutex_lock(&(rpmsg_resource[pMsg->actorID].rpmsgwrite_mutex[pMsg->chID]));
+ if (halRpMsg_IsPsChFreeSpace(pMsg->actorID, pMsg->chID, size) == TRUE){
+
+#if CIRCUL_BUFFER_USED
+ rpmsg_resource[pMsg->actorID].sendPos[pMsg->chID] = channel_send->SendPos;
+ tmpMsgHeader.flag = (unsigned short)RPMSG_MSGHEAD_FLAG;
+ tmpMsgHeader.len = (unsigned short)(pMsg->len);
+
+ icp_rpmsg_write(channel_send,
+ (unsigned char *)SendBase_remap,
+ (unsigned char *)&tmpMsgHeader,
+ sizeof(T_HalRpMsg_MsgHeader),
+ pMsg->actorID,
+ pMsg->chID);
+
+ icp_rpmsg_write(channel_send,
+ (unsigned char *)SendBase_remap,
+ pMsg->buf,
+ pMsg->len,
+ pMsg->actorID,
+ pMsg->chID);
+ channel_send->SendPos = rpmsg_resource[pMsg->actorID].sendPos[pMsg->chID];
+#else
+ RpMsg = (T_HalRpMsg_RpMsg *)(channel_send->SendPos + SendBase_remap);
+ RpMsg->MsgHeader.flag = (unsigned short)RPMSG_MSGHEAD_FLAG;
+ RpMsg->MsgHeader.len = (unsigned short)(pMsg->len);
+
+ memcpy(&(RpMsg->data), pMsg->buf, pMsg->len);
+ channel_send->SendPos += size;
+#endif
+ }
+ else{
+// mutex_unlock(&(rpmsg_resource[pMsg->actorID].rpmsgwrite_mutex[pMsg->chID]));
+ return RPMSG_SPACE_NOT_ENOUGH;
+ }
+/*
+ if (icp_ops->Icp_GetIntState(pMsg->actorID, pMsg->chID) == FALSE){
+ if (pMsg->flag & RPMSG_WRITE_INT){
+ icp_ops->Icp_SetInt(pMsg->actorID, pMsg->chID);
+ }
+ }
+*/
+// mutex_unlock(&(rpmsg_resource[pMsg->actorID].rpmsgwrite_mutex[pMsg->chID]));
+
+ return (pMsg->len);
+}
+
+
+static void ps_send_msg(void)
+{
+#ifdef ICP_TEST_SELF_TXRX
+ char ps_buf[5] = {'t','e','s','t','x'};
+ T_ZDrvRpMsg_Msg Icp_Msg;
+
+ ps_buf[4] = '0' + icp_test_cnt;
+
+ Icp_Msg.actorID = test_actorID;
+ Icp_Msg.chID = test_chID;
+ Icp_Msg.flag = RPMSG_WRITE_INT; /* 1- means send an icp interrupt> */
+ Icp_Msg.buf = ps_buf;
+ Icp_Msg.len = 0x5;
+
+ zDrvRpMsg_PsWrite(&Icp_Msg);
+
+ /* trigger icp */
+ iowrite32(2, (void __iomem *)ZX297520V2_ICP_PSAP_REG+0); //channel_2
+#endif
+}
+
+#ifdef ICP_TEST_PS2AP
+static void icp_test(void) /* test ps-->ap*/
+{
+ if(icp_test_cnt == 0)
+ {
+ /* create channel */
+#ifdef _USE_CAP_SYS
+ if(zDrvRpMsg_CreateChannel_Cap(test_actorID, test_chID, 0x30))
+#else
+ if(zDrvRpMsg_CreateChannel(test_actorID, test_chID, 0x30))
+#endif
+ {
+ pr_info("[ICP_TEST] Failed create psm icp channel ! \n");
+ BUG();
+ }
+ pr_info("[ICP_TEST] Success create psm icp channel!!! \n");
+
+#ifdef _USE_CAP_SYS
+ zDrvRpMsg_RegCallBack_Cap(test_actorID, test_chID, icp_test_cb);
+#else
+ zDrvRpMsg_RegCallBack(test_actorID, test_chID, icp_test_cb);
+#endif
+
+ /* create ps channel*/
+ ps_create_channel();
+ }
+ /* ps_send_msg */
+ ps_send_msg();
+}
+#else
+static void icp_test(void) /* test M0-->ap*/
+{
+ if(icp_test_cnt == 0)
+ {
+ /* create channel */
+ if(zDrvRpMsg_CreateChannel(test_actorID, test_chID, 0x20))
+ {
+ pr_info("[ICP_TEST] Failed create psm icp channel ! \n");
+ BUG();
+ }
+ pr_info("[ICP_TEST] Success create psm icp channel!!! \n");
+
+ zDrvRpMsg_RegCallBack(test_actorID, test_chID, icp_test_cb);
+ }
+
+}
+#endif
+
+static ssize_t icp_test_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "[ICP_TEST]icp_test_cnt:%d\n", icp_test_cnt);
+}
+
+static ssize_t icp_test_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ icp_test();
+
+ return (count);
+}
+
+static DEVICE_ATTR(icp_test,0600,icp_test_show,icp_test_store);
+static struct attribute *zx29_icp_attributes[] = {
+ &dev_attr_icp_test.attr,
+ NULL,
+};
+
+static const struct attribute_group zx29_icp_attribute_group = {
+ .attrs = (struct attribute **) zx29_icp_attributes,
+};
+#endif
+
+/**
+ * "/sys/zte/test/icp_test"
+ */
+int __init zx_icp_test_init(void)
+{
+ int ret;
+
+#if ZX29_ICP_TEST
+ ret = sysfs_create_group(zx_test_kobj, &zx29_icp_attribute_group);
+
+ pr_info("[DEBUG] create test icp sysfs interface OK.\n");
+#endif
+
+ return 0;
+}
+
+#if 0
+#ifdef CONFIG_ARCH_ZX297520V3_CAP
+static T_ZDrvRpMsg_ActorID test_actorID = AP_ID;
+#else
+static T_ZDrvRpMsg_ActorID test_actorID = CAP_ID;
+#endif
+
+//const
+unsigned char test_ps_data[29]={0};//{1,2,3,4,5,6,7,8,9,10,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19};
+unsigned char test_rcv_buf[50] = {0};
+unsigned int test_rcv_cnt = 0;
+unsigned int test_send_cnt = 0;
+
+static unsigned long rand = 0x97537636;
+
+static inline unsigned char random(void)
+{
+ /* See "Numerical Recipes in C", second edition, p. 284 */
+ rand = rand * 1664525L + 1013904223L;
+ return (unsigned char) (rand >> 24)&0xff;
+}
+
+static void icp_test_cb(void *buf, unsigned int len)
+{
+ u8 *tmp_buf = buf;
+ int ret = 0;
+
+ memset(test_rcv_buf, '\0', 50);
+ memcpy(test_rcv_buf, tmp_buf, len);
+
+#ifndef CONFIG_ARCH_ZX297520V3_CAP
+ ret = strncmp(test_rcv_buf,test_ps_data,len);
+ if (ret)
+ {
+ pr_info("[ICP_TEST] msg recevice error:(%d)", ret);
+ BUG();
+ }
+
+ int i;
+ for (i = 0; i < len; i++)
+ test_ps_data[i] = random();
+#endif
+
+ pr_info("[ICP_TEST] recv buffer[%d] is:%x %x %x %x \n", len, *tmp_buf, *(tmp_buf+1), *(tmp_buf+2), *(tmp_buf+3));
+
+ T_ZDrvRpMsg_Msg Icp_Msg = {0};
+ Icp_Msg.actorID = test_actorID;
+ Icp_Msg.chID = channel_60;
+ Icp_Msg.flag = RPMSG_WRITE_INT; /* 1- means send an icp interrupt> */
+#ifndef CONFIG_ARCH_ZX297520V3_CAP
+ Icp_Msg.buf = test_ps_data;
+#else
+ Icp_Msg.buf = test_rcv_buf;
+#endif
+ Icp_Msg.len = len;
+
+#ifdef CONFIG_ARCH_ZX297520V3_CAP
+ ret = zDrvRpMsg_Write(&Icp_Msg);
+#else
+ msleep(1000);
+ ret = zDrvRpMsg_Write_Cap(&Icp_Msg);
+#endif
+ if(Icp_Msg.len != ret)
+ {
+ pr_info("[ICP_TEST] msg send error:(%d)", ret);
+ BUG();
+ }
+
+ test_send_cnt ++;
+ pr_info("[ICP_TEST] msg send ok count:[%d]\n", test_send_cnt);
+}
+
+static int __init zx_rpmsg_init(void)
+{
+ int ret = 0;
+
+#ifndef CONFIG_ARCH_ZX297520V3_CAP
+ zDrvRpMsg_CreateChannel_Cap(CAP_ID, channel_60, 0x20);
+ zDrvRpMsg_RegCallBack_Cap(CAP_ID, channel_60, icp_test_cb);
+#else
+ zDrvRpMsg_CreateChannel(AP_ID, channel_60, 0x20);
+ zDrvRpMsg_RegCallBack(AP_ID, channel_60, icp_test_cb);
+#endif
+
+#ifndef CONFIG_ARCH_ZX297520V3_CAP
+ T_ZDrvRpMsg_Msg Icp_Msg = {0};
+ Icp_Msg.actorID = test_actorID;
+ Icp_Msg.chID = channel_60;
+ Icp_Msg.flag = RPMSG_WRITE_INT; /* 1- means send an icp interrupt> */
+ Icp_Msg.buf = test_ps_data;
+ Icp_Msg.len = 21;
+
+ ret = zDrvRpMsg_Write_Cap(&Icp_Msg);
+ if(Icp_Msg.len != ret)
+ {
+ pr_info("[ICP_TEST] msg send error:(%d)", ret);
+ BUG();
+ }
+#endif
+ return 0;
+}
+
+late_initcall(zx_rpmsg_init);
+#endif
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/rpm/rpmsg.c b/ap/os/linux/linux-3.4.x/drivers/soc/zte/rpm/rpmsg.c
new file mode 100644
index 0000000..b05603c
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/rpm/rpmsg.c
@@ -0,0 +1,419 @@
+/*******************************************************************************
+ * Copyright (C) 2013, ZTE Corporation.
+ *
+ * File Name: icp.c
+ * File Mark:
+ * Description:
+ * Others:
+ * Version: V0.1
+ * Author: ShiDeYou
+ * Date: 2013-3-13
+ * History 1:
+ * Date:
+ * Version:
+ * Author:
+ * Modification:
+ * History 2:
+ ******************************************************************************/
+#include <linux/init.h>
+#include <linux/module.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/cpumask.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <linux/major.h>
+#include <linux/uaccess.h>
+#include <linux/device.h>
+
+#include <linux/cdev.h>
+#include <linux/workqueue.h>
+
+#include <linux/fs.h>
+#include <mach/debug.h>
+#include <linux/platform_device.h>
+
+#include <asm/irq.h>
+#include <asm/setup.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/irq.h>
+#include <asm/mach/map.h>
+
+#include <linux/soc/zte/rpm/rpmsg.h>
+#include <linux/soc/zte/rpm/rpmsg_zx29.h>
+#include <linux/sched.h>
+#include <linux/poll.h>
+ //unsigned int channel_size[2][2] = {{0x100,0x100},{0x100,0x100}};
+#define CHANNEL_NUM CHANNEL_AP2PS_MAXID//CHANNEL_MAXID //37 from voLte
+
+typedef struct _zx29_rpmsg_channel
+{
+ T_ZDrvRpMsg_ActorID actorID;
+ T_ZDrvRpMsg_ChID chID;
+ void * buf;
+ unsigned int len;
+}zx29_rpmsg_channel;
+
+typedef struct _zx29_rpmsg_ser
+{
+ struct kref kref; /* Reference management */
+ struct cdev cdev;
+ struct module *owner;
+ const char *driver_name;
+ const char *name;
+ int name_base;
+ int major;
+ int minor_start;
+ int num;
+ int flags;
+ int index;
+ int count;
+ zx29_rpmsg_channel rpmsg_channel[CHANNEL_NUM];
+
+ const struct file_operations *ops;
+}zx29_rpmsg_ser;
+
+static int zx29_rpmsg_open(struct inode *inode, struct file *filp)
+{
+ dev_t device = inode->i_rdev;
+ T_ZDrvRpMsg_ChID chID;
+ zx29_rpmsg_ser *rpmsg_drv;
+
+ rpmsg_drv = container_of (inode->i_cdev, zx29_rpmsg_ser, cdev);
+
+ rpmsg_drv->count++;
+ chID = (T_ZDrvRpMsg_ChID)(MINOR(device) - rpmsg_drv->minor_start);
+
+ filp->private_data = &(rpmsg_drv->rpmsg_channel[chID]);
+
+ return 0;
+}
+
+static long zx29_rpmsg_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+ T_ZDrvRpMsg_Msg rpmsg;
+
+ zx29_rpmsg_channel *rpmsg_channel = filp->private_data;
+ rpmsg.actorID = rpmsg_channel->actorID;
+ rpmsg.chID = rpmsg_channel->chID;
+
+ switch(cmd)
+ {
+ case RPMSG_CREATE_CHANNEL:
+#ifndef CONFIG_ARCH_ZX297520V3_CAP
+ ret = zDrvRpMsg_CreateChannel_Cap(rpmsg.actorID, rpmsg.chID, arg);
+#else
+ ret = zDrvRpMsg_CreateChannel(rpmsg.actorID, rpmsg.chID, arg);
+#endif
+ if(ret != RPMSG_SUCCESS)
+ {
+ rpmsg_log("CreateChannel(actID=%d)(chId=%d) failed(%d). \n", rpmsg_channel->actorID, rpmsg_channel->chID, ret);
+ return -ENXIO;
+ }
+ break;
+ case RPMSG_GET_DATASIZE:
+ ret = zDrvRpMsg_RecvCh_GetDataSize(rpmsg.actorID, rpmsg.chID);
+ *(unsigned int *)arg = ret;
+ break;
+ case RPMSG_SET_INT:
+ zDrvIcp_SetInt(rpmsg.actorID, rpmsg.chID);
+ break;
+ case RPMSG_SET_INT_FLAG:
+ icp_rpmsg_setchIntflag(rpmsg.actorID, rpmsg.chID, 1);
+ break;
+ case RPMSG_CLEAR_INT_FLAG:
+ icp_rpmsg_setchIntflag(rpmsg.actorID, rpmsg.chID, 0);
+ break;
+ case RPMSG_SET_POLL_FLAG:
+ icp_rpmsg_setchpollflag(rpmsg.actorID, rpmsg.chID, 1);
+ break;
+ case RPMSG_CLEAR_POLL_FLAG:
+ icp_rpmsg_setchpollflag(rpmsg.actorID, rpmsg.chID, 0);
+ break;
+
+ default:
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static ssize_t zx29_rpmsg_write(struct file *filp, const char __user *ubuf,
+ size_t len, loff_t *ppos)
+{
+ int ret;
+ unsigned int flag;
+ T_ZDrvRpMsg_Msg rpmsg;
+
+ zx29_rpmsg_channel *rpmsg_channel = filp->private_data;
+ rpmsg.actorID = rpmsg_channel->actorID;
+ rpmsg.chID = rpmsg_channel->chID;
+ rpmsg.len = len;
+ rpmsg.flag = 0;
+
+ rpmsg.buf = kmalloc(len, GFP_KERNEL);
+ if(rpmsg.buf == NULL)
+ {
+ rpmsg_log("zx29_rpmsg_write1 kmalloc failed\n");
+ return 0;
+ }
+
+ ret = copy_from_user(rpmsg.buf, ubuf, len);
+
+ if (ret<0)
+ {
+ rpmsg_log("zx29_rpmsg_write1(actID=%d)(chId=%d) len=%d failed(%d). \n", rpmsg_channel->actorID, rpmsg_channel->chID, len, ret);
+ kfree(rpmsg.buf);
+ return -EFAULT;
+ }
+ flag = icp_rpmsg_getchflag(rpmsg.actorID, rpmsg.chID);
+ if(flag&CHANNEL_INT_FLAG)
+ rpmsg.flag |= RPMSG_WRITE_INT;
+
+#ifndef CONFIG_ARCH_ZX297520V3_CAP
+ ret = zDrvRpMsg_Write_Cap(&rpmsg);
+#else
+ ret = zDrvRpMsg_Write(&rpmsg);
+#endif
+ kfree(rpmsg.buf);
+
+ if(ret<0)
+ {
+ rpmsg_log("zx29_rpmsg_write2(actID=%d)(chId=%d) len=%d failed(%d). \n", rpmsg_channel->actorID, rpmsg_channel->chID, len, ret);
+ return 0;
+ }
+
+ return len;
+}
+
+static ssize_t zx29_rpmsg_read(struct file *filp, char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ unsigned int size;
+ int ret;
+ unsigned int flag;
+ T_ZDrvRpMsg_Msg rpmsg;
+
+ zx29_rpmsg_channel *rpmsg_channel = filp->private_data;
+ rpmsg.actorID = rpmsg_channel->actorID;
+ rpmsg.chID = rpmsg_channel->chID;
+ rpmsg.len = len;
+ rpmsg.flag = 0;
+
+ rpmsg.buf = kmalloc(len, GFP_KERNEL);
+ if(rpmsg.buf == NULL)
+ {
+ rpmsg_log("zx29_rpmsg_read1 kmalloc failed\n");
+ return 0;
+ }
+
+ flag = icp_rpmsg_getchflag(rpmsg.actorID, rpmsg.chID);
+ if(flag&CHANENL_POLL_FLAG)
+ rpmsg.flag |= RPMSG_READ_POLL;
+
+#ifndef CONFIG_ARCH_ZX297520V3_CAP
+ ret = zDrvRpMsg_Read_Cap(&rpmsg);
+#else
+ ret = zDrvRpMsg_Read(&rpmsg);
+#endif
+ if(ret<0)
+ {
+ rpmsg_log("zx29_rpmsg_read1(actID=%d)(chId=%d) len=%d failed(%d). \n", rpmsg_channel->actorID, rpmsg_channel->chID, len, ret);
+ kfree(rpmsg.buf);
+ return 0;
+ }
+ else
+ size = ret;
+
+ ret = copy_to_user(ubuf, rpmsg.buf, size);
+ kfree(rpmsg.buf);
+ if (ret<0)
+ {
+ rpmsg_log("zx29_rpmsg_read2(actID=%d)(chId=%d) len=%d failed(%d). \n", rpmsg_channel->actorID, rpmsg_channel->chID, len, ret);
+ return -EFAULT;
+ }
+
+ return size;
+}
+
+extern bool halRpMsg_IsRecvChEmpty(T_ZDrvRpMsg_ActorID actorID, T_ZDrvRpMsg_ChID chID);
+extern wait_queue_head_t *icp_rpmsg_get_waitq(T_ZDrvRpMsg_ActorID actorID, T_ZDrvRpMsg_ChID chID);
+static unsigned int zx29_rpmsg_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+
+ zx29_rpmsg_channel *rpmsg_channel = file->private_data;
+
+ poll_wait(file, icp_rpmsg_get_waitq(rpmsg_channel->actorID, rpmsg_channel->chID), wait);
+
+ if (halRpMsg_IsRecvChEmpty(rpmsg_channel->actorID, rpmsg_channel->chID) != TRUE)
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+int zx29_rpmsg_release(struct inode *inode, struct file *filp)
+{
+ filp->private_data = NULL;
+
+ return 0;
+}
+
+static const struct file_operations rpmsg_zx29_ops = {
+ .owner = THIS_MODULE,
+ .open = zx29_rpmsg_open,
+ .release = zx29_rpmsg_release,
+ .write = zx29_rpmsg_write,
+ .read = zx29_rpmsg_read,
+ .unlocked_ioctl = zx29_rpmsg_ioctl,
+ .poll=zx29_rpmsg_poll,
+};
+
+struct class *rpmsg_classes;
+
+zx29_rpmsg_ser rpmsg_zx29 = {
+ .owner = THIS_MODULE,
+ .driver_name = "armps_rpmsg",
+ .name = "rpmsg", //ps_rpmsgch
+ .major = 0,
+ .minor_start = 30,
+ .num = CHANNEL_NUM,
+};
+
+struct device *zx29_rpmsg_register_device(struct class *rpmsg_class, zx29_rpmsg_ser *driver, unsigned index,
+ struct device *device)
+{
+ char name[64];
+ dev_t dev = MKDEV(driver->major, driver->minor_start) + index;
+
+ if (index >= driver->num) {
+ return ERR_PTR(-EINVAL);
+ }
+
+ sprintf(name, "%s%d", driver->name, index);
+
+ return device_create(rpmsg_class, device, dev, NULL, name);
+}
+
+static int zx29_rpmsg_probe(struct device *rpdev)
+{
+ int error;
+ int i;
+ dev_t dev;
+ zx29_rpmsg_ser *rpmsg_ser ;
+
+ rpmsg_ser = &rpmsg_zx29;
+
+ rpmsg_ser->ops = &rpmsg_zx29_ops;
+ rpmsg_ser->count = 0;
+// rpmsg_ser->rpmsg_channel.actorID = rpdev->id;
+
+ error = alloc_chrdev_region(&dev, rpmsg_ser->minor_start,
+ rpmsg_ser->num, rpmsg_ser->name);
+ if (!error) {
+ rpmsg_ser->major = MAJOR(dev);
+ rpmsg_ser->minor_start = MINOR(dev);
+ }
+
+ cdev_init(&rpmsg_ser->cdev, &rpmsg_zx29_ops);
+ rpmsg_ser->cdev.owner = rpmsg_ser->owner;
+
+ error = cdev_add(&rpmsg_ser->cdev, dev, rpmsg_ser->num);
+ if (error) {
+ unregister_chrdev_region(dev, rpmsg_ser->num);
+ return error;
+ }
+
+ for (i = 0; i < rpmsg_ser->num; i++) {
+ rpmsg_ser->rpmsg_channel[i].actorID = rpdev->id;
+ rpmsg_ser->rpmsg_channel[i].chID = channel_0 + i;
+ zx29_rpmsg_register_device(rpmsg_classes, rpmsg_ser, i, NULL);
+ }
+
+ return 0;
+}
+
+static struct device_attribute rpmsg_zx29_dev_attrs[] = {
+ __ATTR_NULL,
+};
+
+struct bus_type icp_rpmsg_bus = {
+ .name = "rpmsg_zx29",
+ .dev_attrs = rpmsg_zx29_dev_attrs,
+};
+
+static struct device_driver rpmsg_zx29_drv={
+ .name = "icp_rpmsg1",
+ .owner = THIS_MODULE,
+ .bus= &icp_rpmsg_bus,
+ .probe = zx29_rpmsg_probe,
+};
+
+void zx29_rpmsg_unregister_device(struct class *rpmsg_class, zx29_rpmsg_ser *driver, unsigned index)
+{
+ dev_t dev = MKDEV(driver->major, driver->minor_start) + index;
+
+ device_destroy(rpmsg_class, dev);
+}
+
+int icp_rpmsg_device_register(Icp_rpmsg_drv *icp_rpmsg)
+{
+ int ret;
+
+ ret = bus_register(&icp_rpmsg_bus);
+ if (ret)
+ return ret;
+
+ icp_rpmsg->dev.bus = &icp_rpmsg_bus;
+ dev_set_name(&icp_rpmsg->dev, "icp_rpmsg%d", icp_rpmsg->actorID);
+ ret = device_register(&icp_rpmsg->dev);
+
+ return ret;
+}
+
+static int __init zx29_rpmsg_init(void)
+{
+ int ret = 0;
+ rpmsg_classes = class_create(THIS_MODULE, rpmsg_zx29.name);
+
+ if (IS_ERR(rpmsg_classes))
+ return PTR_ERR(rpmsg_classes);
+
+ ret = driver_register(&rpmsg_zx29_drv);
+ if(ret < 0)
+ printk("rpmsg_zx29 init failed!");
+
+ rpmsg_sram_init();
+
+ return ret;
+}
+
+static void __exit zx29_rpmsg_exit(void)
+{
+ int i;
+ zx29_rpmsg_ser *rpmsg_ser;
+ rpmsg_ser = &rpmsg_zx29;
+
+ for (i = 0; i < rpmsg_ser->num; i++) {
+ zx29_rpmsg_unregister_device(rpmsg_classes, rpmsg_ser, i);
+ }
+
+ class_destroy(rpmsg_classes);
+}
+
+module_init(zx29_rpmsg_init);
+module_exit(zx29_rpmsg_exit);
+
+MODULE_AUTHOR("zte");
+MODULE_DESCRIPTION("zte rpmsg driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform: rpmsg");
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/rpm/rpmsg_log.c b/ap/os/linux/linux-3.4.x/drivers/soc/zte/rpm/rpmsg_log.c
new file mode 100644
index 0000000..02476cb
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/rpm/rpmsg_log.c
@@ -0,0 +1,92 @@
+/*******************************************************************************
+ * Copyright (C) 2013, ZTE Corporation.
+ *
+ * File Name: icp.c
+ * File Mark:
+ * Description:
+ * Others:
+ * Version: V0.1
+ * Author: ShiDeYou
+ * Date: 2013-3-13
+ * History 1:
+ * Date:
+ * Version:
+ * Author:
+ * Modification:
+ * History 2:
+ ******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/io.h>
+#include <linux/suspend.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+
+#ifdef CONFIG_ZX_RAM_CONSOLE
+#define RPMSG_LOG_SIZE (4*1024)
+static char rpmsg_printk_buf[RPMSG_LOG_SIZE]; // ring buffer
+static u32 rpmsg_log_point = 0;
+static u32 rpmsg_sram_inited = 0;
+//static char rpmsg_log_temp_buf[512] = {0};
+
+static void rpmsg_sram_cpy(char *s, unsigned len)
+{
+ if(rpmsg_log_point + len >= RPMSG_LOG_SIZE)
+ rpmsg_log_point = 0;
+
+ memcpy(rpmsg_printk_buf+rpmsg_log_point, s, len);
+ rpmsg_log_point += len;
+}
+#endif
+
+/**
+ * usage: like printk(...)
+ */
+void rpmsg_printk(const char *fmt, ...)
+{
+#ifdef CONFIG_ZX_RAM_CONSOLE
+ va_list args;
+ unsigned long long t;
+ unsigned long nanosec_rem;
+ int tlen, len;
+ char rpmsg_log_temp_buf[512] = {0};
+
+ if(!rpmsg_sram_inited)
+ return;
+
+ va_start(args, fmt);
+
+ /* add time stamp */
+ t = cpu_clock(smp_processor_id());
+ nanosec_rem = do_div(t, 1000000000);
+ tlen = sprintf(rpmsg_log_temp_buf, ">%5lu.%06lu< ",
+ (unsigned long) t, nanosec_rem / 1000);
+
+ len = vsprintf(rpmsg_log_temp_buf+tlen, fmt, args);
+ len += tlen;
+
+ rpmsg_sram_cpy(rpmsg_log_temp_buf, len);
+
+ va_end(args);
+#endif
+}
+
+void rpmsg_sram_init(void)
+{
+#ifdef CONFIG_ZX_RAM_CONSOLE
+ pr_info("[RPMSG] LOG_INIT \n");
+
+ rpmsg_printk_buf[0] = 0;
+ rpmsg_log_point = 0;
+
+ rpmsg_sram_inited = 1;
+#endif
+}
+
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/rpm/rpmsg_sim.c b/ap/os/linux/linux-3.4.x/drivers/soc/zte/rpm/rpmsg_sim.c
new file mode 100644
index 0000000..4b43c81
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/rpm/rpmsg_sim.c
@@ -0,0 +1,463 @@
+/*******************************************************************************
+ * Copyright (C) 2013, ZTE Corporation.
+ *
+ * File Name: icp.c
+ * File Mark:
+ * Description:
+ * Others:
+ * Version: V0.1
+ * Author: ShiDeYou
+ * Date: 2013-3-13
+ * History 1:
+ * Date:
+ * Version:
+ * Author:
+ * Modification:
+ * History 2:
+ ******************************************************************************/
+#include <linux/init.h>
+#include <linux/module.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/cpumask.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+
+
+#include <asm/irq.h>
+#include <asm/setup.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/irq.h>
+#include <asm/mach/map.h>
+
+//#include "rpmsg.h"
+#include <linux/soc/zte/rpm/rpmsg_sim.h>
+
+#include <linux/sched.h>
+#include <linux/poll.h>
+ //unsigned int channel_size[2][2] = {{0x100,0x100},{0x100,0x100}};
+unsigned int debug_print_flag = 0;
+module_param(debug_print_flag, uint, 0644);
+#define debug_print(fmt, args...) \
+ do \
+ { \
+ if (debug_print_flag) \
+ { \
+ printk(fmt,## args); \
+ } \
+ }while(0);
+
+static int zx29_rpmsg_open(struct inode *inode, struct file *filp)
+{
+ dev_t device = inode->i_rdev;
+ T_ZDrvRpMsg_ChID chID;
+ zx29_rpmsg_ser *rpmsg_drv;
+ if(filp->private_data)
+ return -1;
+ rpmsg_drv = container_of (inode->i_cdev, zx29_rpmsg_ser, cdev);
+ rpmsg_drv->count++;
+ chID = (T_ZDrvRpMsg_ChID)(MINOR(device) - rpmsg_drv->minor_start);
+ if(chID>=CHANNEL_NUM)
+ return -1;
+ if(rpmsg_drv->rpmsg_channel[chID].initflag != 1)
+ return -1;
+ filp->private_data = &(rpmsg_drv->rpmsg_channel[chID]);
+ rpmsg_drv->rpmsg_channel[chID].channelOpen(chID);
+ rpmsg_drv->rpmsg_channel[chID].initflag = 2;
+
+ debug_print("zx29_rpmsg_open channel :%d \n",chID);
+ return 0;
+}
+#if 0
+static long zx29_rpmsg_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+ T_ZDrvRpMsg_Msg rpmsg;
+
+ zx29_rpmsg_channel *rpmsg_channel = filp->private_data;
+ rpmsg.actorID = rpmsg_channel->actorID;
+ rpmsg.chID = rpmsg_channel->chID;
+
+ switch(cmd)
+ {
+ case RPMSG_CREATE_CHANNEL:
+ ret = zDrvRpMsg_CreateChannel (rpmsg.actorID, rpmsg.chID, arg);
+ if(ret != RPMSG_SUCCESS)
+ {
+ rpmsg_log("CreateChannel(actID=%d)(chId=%d) failed(%d). \n", rpmsg_channel->actorID, rpmsg_channel->chID, ret);
+ return -ENXIO;
+ }
+ break;
+ case RPMSG_GET_DATASIZE:
+ ret = zDrvRpMsg_RecvCh_GetDataSize(rpmsg.actorID, rpmsg.chID);
+ *(unsigned int *)arg = ret;
+ break;
+ case RPMSG_SET_INT:
+ zDrvIcp_SetInt(rpmsg.actorID, rpmsg.chID);
+ break;
+ case RPMSG_SET_INT_FLAG:
+ icp_rpmsg_setchIntflag(rpmsg.actorID, rpmsg.chID, 1);
+ break;
+ case RPMSG_CLEAR_INT_FLAG:
+ icp_rpmsg_setchIntflag(rpmsg.actorID, rpmsg.chID, 0);
+ break;
+ case RPMSG_SET_POLL_FLAG:
+ icp_rpmsg_setchpollflag(rpmsg.actorID, rpmsg.chID, 1);
+ break;
+ case RPMSG_CLEAR_POLL_FLAG:
+ icp_rpmsg_setchpollflag(rpmsg.actorID, rpmsg.chID, 0);
+ break;
+
+ default:
+ return -EPERM;
+ }
+
+ return 0;
+}
+#endif
+static ssize_t zx29_rpmsg_write(struct file *filp, const char __user *ubuf,
+ size_t len, loff_t *ppos)
+{
+ int ret;
+ unsigned int flag;
+ //T_ZDrvRpMsg_Msg rpmsg;
+ struct RpMsg_packet packet;// = kmalloc(sizeof(struct RpMsg_packet), GFP_KERNEL);
+ zx29_rpmsg_channel *rpmsg_channel = filp->private_data;
+ if(rpmsg_channel->initflag != 2)
+ return -1;
+ packet.chID = rpmsg_channel->chID;
+ packet.len = len;
+ packet.buf = kmalloc(len, GFP_KERNEL);
+ if(packet.buf == NULL)
+ {
+ rpmsg_log("zx29_rpmsg_write1 kmalloc failed\n");
+ return 0;
+ }
+
+ ret = copy_from_user(packet.buf, ubuf, len);
+
+ if (ret<0)
+ {
+ //rpmsg_log("zx29_rpmsg_write1(actID=%d)(chId=%d) len=%d failed(%d). \n", rpmsg_channel->actorID, rpmsg_channel->chID, len, ret);
+ kfree(packet.buf);
+ return -EFAULT;
+ }
+ #if 0
+ flag = icp_rpmsg_getchflag(rpmsg.actorID, rpmsg.chID);
+ if(flag&CHANNEL_INT_FLAG)
+ rpmsg.flag |= RPMSG_WRITE_INT;
+
+ ret = zDrvRpMsg_Write(&rpmsg);
+ #endif
+ //ret = writeATdatatoPS();//TODO
+ //TransdataToPS(rpmsg_channel->chID, packet.buf, packet.len);
+ debug_print("zx29_rpmsg_write channel :%d \n",rpmsg_channel->chID);
+ rpmsg_channel->transdataOut(rpmsg_channel->chID, packet.buf, packet.len);
+ kfree(packet.buf);
+ #if 0
+ if(ret<0)
+ {
+// rpmsg_log("zx29_rpmsg_write2(actID=%d)(chId=%d) len=%d failed(%d). \n", rpmsg_channel->actorID, rpmsg_channel->chID, len, ret);
+ return 0;
+ }
+ #endif
+ return len;
+}
+
+static ssize_t zx29_rpmsg_read(struct file *filp, char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ unsigned int size = 0;
+ int ret = 0;
+ unsigned int flag;
+ //T_ZDrvRpMsg_Msg rpmsg;
+ struct RpMsg_packet* packet = NULL;
+ zx29_rpmsg_channel *rpmsg_channel = filp->private_data;
+ if(rpmsg_channel->initflag != 2)
+ return -1;
+ // rpmsg.actorID = rpmsg_channel->actorID;
+ //rpmsg.chID = rpmsg_channel->chID;
+ //rpmsg.len = len;
+ //rpmsg.flag = 0;
+
+ //rpmsg.buf = kmalloc(len, GFP_KERNEL);
+#if 0
+ flag = icp_rpmsg_getchflag(rpmsg.actorID, rpmsg.chID);
+ if(flag&CHANENL_POLL_FLAG)
+ rpmsg.flag |= RPMSG_READ_POLL;
+
+ ret = zDrvRpMsg_Read(&rpmsg);
+#endif
+ //down_interruptible(&rpmsg_channel->channelSema);
+ ret = wait_event_interruptible(rpmsg_channel->channelwaitq, !isRecvChannelEmpty(rpmsg_channel->chID));
+ if(ret < 0)
+ {
+ debug_print("zx29_rpmsg_read wait_event_interruptible:%d \n",ret);
+ return ret;
+ }
+ debug_print("zx29_rpmsg_read :%d \n",rpmsg_channel->chID);
+ if(!isRecvChannelEmpty(rpmsg_channel->chID))
+ {
+ packet = rpmsg_dequeue(&rpmsg_channel->RpMsg_packet_queue);
+
+ //if(packet && packet->msg)
+ //{
+ // size = len?(len<packet->msg->len):(packet->msg->len);
+ // ret = copy_to_user(ubuf, packet->msg->buf, size);
+ //}
+ if(packet)
+ {
+ size = (len < packet->len)?len:(packet->len);
+ ret = copy_to_user(ubuf, packet->buf, size);
+ kfree(packet->buf);
+ kfree(packet);
+ }
+ }
+ else
+ {
+ return 0;
+ }
+
+ if (ret<0)
+ {
+// rpmsg_log("zx29_rpmsg_read2(actID=%d)(chId=%d) len=%d failed(%d). \n", rpmsg_channel->actorID, rpmsg_channel->chID, len, ret);
+ return -EFAULT;
+ }
+
+ return size;
+}
+static unsigned int zx29_rpmsg_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+
+ zx29_rpmsg_channel *rpmsg_channel = file->private_data;
+ if( !isRecvChannelEmpty(rpmsg_channel->chID))
+ return POLLIN | POLLRDNORM;
+ else
+ poll_wait(file, &rpmsg_channel->channelwaitq, wait);
+ debug_print("zx29_rpmsg_poll done :%d \n",rpmsg_channel->chID);
+ if( !isRecvChannelEmpty(rpmsg_channel->chID))
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+static int zx29_rpmsg_release(struct inode *inode, struct file *filp)
+{
+ zx29_rpmsg_ser *rpmsg_drv;
+ zx29_rpmsg_channel *rpmsg_channel = filp->private_data;
+ rpmsg_drv = container_of (inode->i_cdev, zx29_rpmsg_ser, cdev);
+ if(rpmsg_channel->initflag != 2)
+ return -1;
+ filp->private_data = NULL;
+ rpmsg_drv->count--;
+ rpmsg_channel->initflag = 1;
+ rpmsg_channel->channelClose(rpmsg_channel->chID);
+ debug_print("zx29_rpmsg_release channel :%d \n",rpmsg_channel->chID);
+ return 0;
+}
+
+static const struct file_operations rpmsg_zx29_ops = {
+ .owner = THIS_MODULE,
+ .open = zx29_rpmsg_open,
+ .release = zx29_rpmsg_release,
+ .write = zx29_rpmsg_write,
+ .read = zx29_rpmsg_read,
+ //.unlocked_ioctl = zx29_rpmsg_ioctl,
+ .poll=zx29_rpmsg_poll,
+};
+
+struct class *rpmsg_sim_classes;
+
+zx29_rpmsg_ser rpmsg_sim_zx29 = {
+ .owner = THIS_MODULE,
+ .driver_name = "armps_rpmsg",
+ .name = "rpm", //ps_rpmsgch
+ .major = 0,
+ .minor_start = 30,
+ .num = CHANNEL_NUM,
+};
+void rpmsg_enqueue(struct RpMsg_queue *packetqueue, struct list_head* plist)
+{
+ u32 spin_flag = 0;
+ spin_lock_irqsave(&packetqueue->lock, spin_flag);
+ list_add_tail(plist, &packetqueue->queue);
+ packetqueue->qlen++;
+ spin_unlock_irqrestore(&packetqueue->lock, spin_flag);
+}
+struct RpMsg_packet* rpmsg_dequeue(struct RpMsg_queue *packetqueue)
+{
+ u32 spin_flag = 0;
+ struct list_head* curr;
+ struct RpMsg_packet* packet;
+ spin_lock_irqsave(&packetqueue->lock, spin_flag);
+ if(packetqueue->qlen <= 0)
+ {
+
+ spin_unlock_irqrestore(&packetqueue->lock, spin_flag);
+ return NULL;
+ }
+ curr = packetqueue->queue.next;
+ //packet = list_first_entry(curr, struct RpMsg_packet, list);
+ list_del(curr);
+ packetqueue->qlen--;
+ spin_unlock_irqrestore(&packetqueue->lock, spin_flag);
+ return curr;
+}
+int isRecvChannelEmpty(T_ZDrvRpMsg_ChID chID)
+{
+ zx29_rpmsg_channel *rpmsg_channel = &rpmsg_sim_zx29.rpmsg_channel[chID];
+ return rpmsg_channel->RpMsg_packet_queue.qlen?0:1;
+}
+void rpmsg_recv_notify(unsigned int chID, const void *buffer, unsigned int length)//(T_ZDrvRpMsg_Msg rpmsg)
+{
+ //u32 spin_flag = 0;
+ //T_ZDrvRpMsg_Msg* rpmsg;
+
+ zx29_rpmsg_channel *rpmsg_channel = NULL;
+ if(chID >= CHANNEL_NUM)
+ return;
+ rpmsg_channel = &rpmsg_sim_zx29.rpmsg_channel[chID];
+ struct RpMsg_packet* packet = kmalloc(sizeof(struct RpMsg_packet), GFP_KERNEL);
+ if(NULL == packet){
+ debug_print("rpmsg_recv_notify kmalloc failed!\n");
+ return;
+ }
+ memset(packet, 0x00, sizeof(struct RpMsg_packet));
+ char * data = kmalloc(length+1, GFP_KERNEL);
+ if(NULL == data){
+ kfree(packet);
+ debug_print("rpmsg_recv_notify kmalloc failed!!\n");
+ return;
+ }
+ memset(data, 0x00, length+1);
+ memcpy(data, buffer, length);
+ //rpmsg = kmalloc(sizeof(T_ZDrvRpMsg_Msg), GFP_KERNEL);
+ //rpmsg->buf = buffer;
+ //rpmsg->len = length;
+ //up(&rpmsg_channel->channelSema);
+ debug_print("rpmsg_recv_notify :%d \n",rpmsg_channel->chID);
+ packet->chID = chID;
+ packet->buf = data;
+ packet->len = length;
+ rpmsg_enqueue(&rpmsg_channel->RpMsg_packet_queue, &packet->list);
+
+ wake_up_interruptible(&rpmsg_channel->channelwaitq);
+
+}
+EXPORT_SYMBOL(rpmsg_recv_notify);
+//int registerOpsCallback(unsigned int chID, functpye1* funcOpen, functpye1* funcClose, /*functpye2* funcIn,*/ functpye2* funcOut)
+int registerOpsCallback(unsigned int chID, functpye1* funcOpen, functpye1* funcClose, functpye2* funcOut)
+{
+ int nRet = -1;
+ zx29_rpmsg_channel *rpmsg_channel = &rpmsg_sim_zx29.rpmsg_channel[chID];
+ if(funcOpen && funcClose && funcOut)
+ {
+ rpmsg_channel->channelOpen = funcOpen;
+ rpmsg_channel->channelClose = funcClose;
+ //rpmsg_channel->transdatatoRpmsg = funcIn;
+ rpmsg_channel->transdataOut = funcOut;
+ rpmsg_channel->initflag = 1;
+ nRet = 0;
+ }
+ debug_print("registerOpsCallback :%d \n",rpmsg_channel->chID);
+ return nRet;
+}
+EXPORT_SYMBOL(registerOpsCallback);
+
+static struct device_attribute rpmsg_zx29_dev_attrs[] = {
+ __ATTR_NULL,
+};
+
+struct bus_type icp_rpmsg_sim_bus = {
+ .name = "rpmsg_zx29",
+ .dev_attrs = rpmsg_zx29_dev_attrs,
+};
+
+struct device *zx29_rpmsg_sim_register_device(struct class *rpmsg_class, zx29_rpmsg_ser *driver, unsigned index,
+ struct device *device)
+{
+ char name[64];
+ dev_t dev = MKDEV(driver->major, driver->minor_start) + index;
+
+ if (index >= driver->num) {
+ return ERR_PTR(-EINVAL);
+ }
+
+ sprintf(name, "%s%d", driver->name, index);
+ return device_create(rpmsg_class, device, dev, NULL, name);
+}
+void zx29_rpmsg_sim_unregister_device(struct class *rpmsg_class, zx29_rpmsg_ser *driver, unsigned index)
+{
+ dev_t dev = MKDEV(driver->major, driver->minor_start) + index;
+
+ device_destroy(rpmsg_class, dev);
+}
+
+static int zx29_rpmsg_sim_init(void)
+{
+ int error;
+ int i;
+ dev_t dev;
+ zx29_rpmsg_ser *rpmsg_ser ;
+
+ rpmsg_ser = &rpmsg_sim_zx29;
+ rpmsg_ser->num =CHANNEL_NUM;
+ rpmsg_ser->ops = &rpmsg_zx29_ops;
+ rpmsg_ser->count = 0;
+// rpmsg_ser->rpmsg_channel.actorID = rpdev->id;
+ error = alloc_chrdev_region(&dev, rpmsg_ser->minor_start,
+ rpmsg_ser->num, rpmsg_ser->name);
+ if (!error) {
+ rpmsg_ser->major = MAJOR(dev);
+ rpmsg_ser->minor_start = MINOR(dev);
+ }
+
+ cdev_init(&rpmsg_ser->cdev, &rpmsg_zx29_ops);
+ rpmsg_ser->cdev.owner = rpmsg_ser->owner;
+
+ error = cdev_add(&rpmsg_ser->cdev, dev, rpmsg_ser->num);
+ if (error) {
+ unregister_chrdev_region(dev, rpmsg_ser->num);
+ return error;
+ }
+ rpmsg_sim_classes = class_create(THIS_MODULE, rpmsg_sim_zx29.name);
+ int result = 0;
+ for (i = 0; i < rpmsg_ser->num; i++) {
+// rpmsg_ser->rpmsg_channel[i].actorID = rpdev->id;
+ rpmsg_ser->rpmsg_channel[i].chID = channel_0 + i;
+ //sema_init(&(rpmsg_ser->rpmsg_channel[i].channelSema), 0);
+ init_waitqueue_head(&rpmsg_ser->rpmsg_channel[i].channelwaitq);
+ INIT_LIST_HEAD(&(rpmsg_ser->rpmsg_channel[i].RpMsg_packet_queue.queue));
+ spin_lock_init(&(rpmsg_ser->rpmsg_channel[i].RpMsg_packet_queue.lock));
+ rpmsg_ser->rpmsg_channel[i].RpMsg_packet_queue.qlen = 0;
+ result = zx29_rpmsg_sim_register_device(rpmsg_sim_classes, rpmsg_ser, i, NULL);
+ }
+
+ return 0;
+}
+static void __exit zx29_rpmsg_sim_exit(void)
+{
+ int i;
+ zx29_rpmsg_ser *rpmsg_ser;
+ rpmsg_ser = &rpmsg_sim_zx29;
+
+ for (i = 0; i < rpmsg_ser->num; i++) {
+ zx29_rpmsg_sim_unregister_device(rpmsg_sim_classes, rpmsg_ser, i);
+ }
+
+ class_destroy(rpmsg_sim_classes);
+}
+
+module_init(zx29_rpmsg_sim_init);
+module_exit(zx29_rpmsg_sim_exit);
+
+MODULE_AUTHOR("zte");
+MODULE_DESCRIPTION("zte rpmsg driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform: rpmsg");
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/tsc/Makefile b/ap/os/linux/linux-3.4.x/drivers/soc/zte/tsc/Makefile
new file mode 100644
index 0000000..692f888
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/tsc/Makefile
@@ -0,0 +1,13 @@
+#
+# Makefile for tsc driver.
+#
+
+obj-$(CONFIG_TSC_ZX29) += zx-tsc.o
+obj-$(CONFIG_TSC_ZX29) += zx-tsc-strategy.o
+
+#obj-y += zx-tsc.o
+#obj-y += zx-tsc-strategy.o
+
+ccflags-y += -I/$(CP_ROOT_DIR)/ps/driver/inc/misc
+ccflags-y += -I/$(TOPDIR)/pub/project/zx297520v3/include/nv
+ccflags-y += -I/$(TOPDIR)/pub/project/zx297520v3/include/drv
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/tsc/zx-tsc-strategy.c b/ap/os/linux/linux-3.4.x/drivers/soc/zte/tsc/zx-tsc-strategy.c
new file mode 100644
index 0000000..138bcf8
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/tsc/zx-tsc-strategy.c
@@ -0,0 +1,888 @@
+/*
+ * ZTE tsc driver
+ *
+ * Copyright (C) 2013 ZTE Ltd.
+ * by tsp
+ *
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/io.h>
+#include <linux/kthread.h> /*For kthread_run()*/
+#include <linux/timer.h> /*for mod timer*/
+#include <linux/semaphore.h>/*for semaphore*/
+#include <linux/delay.h>
+#include <mach/iomap.h>
+#include "zx-tsc.h"
+#include <linux/timer.h>
+#include <linux/cp_types.h>
+#include "NvParam_drv.h"
+
+
+/*******************************************************************************
+* Global Variables *
+*******************************************************************************/
+/*
+µ÷ÓÃÐÒéÕ»»Øµ÷ǰÏȼì²â²ßÂÔÊÇ·ñÖ´Ðйý
+ps_rate_flag:
+1: imite rate ÖÁÉÙ·¢Éú¹ýÒ»´Î;
+0: limite rateûÓз¢Éú¹ý
+*/
+static volatile u32 ps_rate_flag=0;
+/*
+µ÷ÓÃÐÒéÕ»»Øµ÷ǰÏȼì²â²ßÂÔÊÇ·ñÖ´Ðйý
+ÔÚ·¢Õý³£×¤Áô´ëʩ֮ǰ£¬Ïȼì²â֮ǰµÄ״̬ÊDz»ÊÇany resident
+Èç¹û²»ÊÇ£¬Ôò²»ÄÜ·¢Õý³£×¤Áô´ëÊ©¼´È¥ÈÎÒâפÁô´ëÊ©
+any_resident_flag:
+1: any resident ÖÁÉÙ·¢Éú¹ýÒ»´Î;
+0: any resident ûÓз¢Éú¹ý
+*/
+static volatile u32 any_resident_flag=0;
+
+/*
+µ÷Óõ÷ƵÊÇ·ñÖ´Ðйý
+ps_freq_flag:
+1: tsc dfs ÖÁÉÙ·¢Éú¹ýÒ»´Î;
+0: tsc dfs ûÓз¢Éú¹ý
+*/
+static volatile u32 ps_freq_flag=0;
+
+struct timer_list timer_tsctrl;
+static u32 temp_percent = 0;
+
+T_TsCtrl_CallbackFunction s_tsctrl_callback[MAX_TSCTRL_STRATEGY_ID] = {NULL};
+
+typedef void (*T_Probe_Strategy)(u32 probe_num,u32 temperature );
+T_Probe_Strategy g_probe_strategy[PROBE_MAX]={0,};
+
+/*******************************************************************************
+* ÒýÓÃÍⲿ±äÁ¿ *
+*******************************************************************************/
+extern struct semaphore s_tsc_adc_semaphore;
+extern T_SYS_NV_TSC_CONFIG TsNvData;
+extern volatile u32 g_adc1_flag;
+extern volatile u32 g_adc2_flag;
+extern volatile u32 g_adc3_flag;
+extern s32 g_tsc_print_log_debug;
+/*******************************************************************************
+* ÒýÓà Íⲿº¯Êý *
+*******************************************************************************/
+#ifdef CONFIG_CPU_FREQ
+extern int zx29_set_frequency(unsigned int old_index,unsigned int new_index);
+extern unsigned int zx_getspeed(unsigned int cpu);
+#endif
+/*******************************************************************************
+* functions *
+*******************************************************************************/
+void tsc_set_reg_bits(u32 regName, u32 bitsAddr, u32 bitsLen, u32 bitsValue )
+{
+ u32 temp;
+
+ temp= (tsc_read_reg(regName)&(~(((0x1<<bitsLen)-0x1)<<bitsAddr)))|(bitsValue<<bitsAddr);
+ tsc_write_reg(regName,temp);
+}
+
+/*******************************************************************************
+ * Function: tsctrl_set_strategy2Iram
+ * Description: ½«Ã¿¸ö̽ÕëÊÇ·ñÖ´Ðзñ¸ö²ßÂÔÉèÖõ½¶ÔÓ¦iramµØÖ·ÖеĶÔÓ¦bitλ
+ * Parameters:
+ * Input: Stra_iram_addr---´æ´¢¸÷¸ö̽Õë¶Ôij¸ö²ßÂԵĿª¹ØÐÅÏ¢µÄiramµØÖ·
+ * probe_bit--------¸÷¸ö̽Õë¶Ô²ßÂԵĿª¹ØÐÅÏ¢ÔÚÏàÓ¦´æ´¢IRAMµÄÆðʼbitλ
+ Strategy---------²ßÂÔ¿ª¹ØÐÅÏ¢
+
+ * Output: N/A
+ * Returns: N/A
+
+ * Others: //not use
+********************************************************************************/
+void tsctrl_set_strategy2Iram(u32 Stra_iram_addr, u32 probe_bit, Ts_TsCtrlStrategy Strategy )
+{
+ tsc_set_reg_bits(Stra_iram_addr, probe_bit, BITS_FOR_PROBES, Strategy);
+}
+
+/*******************************************************************************
+ * Function: tsc_ProbeAdc1Strategy
+ * Description: adc1̽ÕëµÄοزßÂÔ
+ * Parameters:
+ * Input:
+ *
+
+ * Output: N/A
+ * Returns: N/A
+
+ * Others: //not use
+********************************************************************************/
+void tsctrl_probe_adc1_strategy(u32 probe_num,u32 temperature )
+{
+#if 1
+ u32 temp1=(temperature-TsNvData.Threshods[probe_num].THROSHOLD_3);
+ u32 temp2=(TsNvData.Threshods[probe_num].THROSHOLD_6-TsNvData.Threshods[probe_num].THROSHOLD_3);
+
+ if(temperature<=TsNvData.Threshods[probe_num].THROSHOLD_3){
+ temp_percent=0;
+ } else if((temperature<=TsNvData.Threshods[probe_num].THROSHOLD_6)) {
+ temp_percent=((100*temp1/temp2)/TEMP_PERCENT_INTERVAL)*TEMP_PERCENT_INTERVAL;
+ } else {
+ temp_percent=100;
+ }
+
+ /**/
+ if(temperature>=TsNvData.Threshods[probe_num].THROSHOLD_9)
+ {
+ tsctrl_set_strategy2Iram(TSCTRL_SHUTDOWN, BIT_PROBE_ADC1 ,STRTEGY_START);
+ }
+ else if(temperature>=TsNvData.Threshods[probe_num].THROSHOLD_7)
+ {
+ tsctrl_set_strategy2Iram(TSCTRL_PS_ANYRESIDENT, BIT_PROBE_ADC1 ,STRTEGY_START);
+ }
+ else if(temperature>=TsNvData.Threshods[probe_num].THROSHOLD_5)
+ {
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_UPTRANSIMITPOWER2, BIT_PROBE_ADC1 ,STRTEGY_START);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2, BIT_PROBE_ADC1 ,STRTEGY_START);
+ tsctrl_set_strategy2Iram(TSCTRL_PS_RATE, BIT_PROBE_ADC1, STRTEGY_START);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_DOWNRATE2, BIT_PROBE_ADC1, STRTEGY_START);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADC1, STRTEGY_START);
+ tsctrl_set_strategy2Iram(TSCTRL_DFS, BIT_PROBE_ADC1, STRTEGY_START);//zDrvPow_SetArmPsCoreFreq(CLK312M);
+ }
+ else if(temperature>=TsNvData.Threshods[probe_num].THROSHOLD_3)
+ {
+ tsctrl_set_strategy2Iram(TSCTRL_PS_RATE, BIT_PROBE_ADC1, STRTEGY_START);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_DOWNRATE2, BIT_PROBE_ADC1, STRTEGY_START);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADC1, STRTEGY_START);
+ tsctrl_set_strategy2Iram(TSCTRL_DFS, BIT_PROBE_ADC1, STRTEGY_START);//zDrvPow_SetArmPsCoreFreq(CLK312M);
+ }
+ else if(temperature>=TsNvData.Threshods[probe_num].THROSHOLD_1)
+ {
+ tsctrl_set_strategy2Iram(TSCTRL_PS_RATE, BIT_PROBE_ADC1, STRTEGY_START);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_DOWNRATE2, BIT_PROBE_ADC1, STRTEGY_START);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADC1, STRTEGY_START);
+ }
+ else if(temperature>=TsNvData.Threshods[probe_num].THROSHOLD_0)
+ {
+ tsctrl_set_strategy2Iram(TSCTRL_PS_RATE, BIT_PROBE_ADC1, STRTEGY_HOLD);
+ }
+ else
+ {
+
+ }
+ /**/
+ if(temperature<TsNvData.Threshods[probe_num].THROSHOLD_0)
+ {
+ tsctrl_set_strategy2Iram(TSCTRL_SHUTDOWN,BIT_PROBE_ADC1,STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_PS_ANYRESIDENT, BIT_PROBE_ADC1, STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_UPTRANSIMITPOWER2, BIT_PROBE_ADC1, STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2, BIT_PROBE_ADC1, STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_PS_RATE, BIT_PROBE_ADC1, STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_DOWNRATE2, BIT_PROBE_ADC1, STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADC1, STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_DFS, BIT_PROBE_ADC1, STRTEGY_STOP);// zDrvPow_SetArmPsCoreFreq(CLK624M);
+ }
+ else if(temperature<TsNvData.Threshods[probe_num].THROSHOLD_2)
+ {
+ tsctrl_set_strategy2Iram(TSCTRL_SHUTDOWN,BIT_PROBE_ADC1,STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_PS_ANYRESIDENT, BIT_PROBE_ADC1, STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_UPTRANSIMITPOWER2 ,BIT_PROBE_ADC1, STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2 ,BIT_PROBE_ADC1, STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_DFS, BIT_PROBE_ADC1, STRTEGY_STOP);// zDrvPow_SetArmPsCoreFreq(CLK624M);
+ }
+ else if(temperature<TsNvData.Threshods[probe_num].THROSHOLD_4)
+ {
+
+ tsctrl_set_strategy2Iram(TSCTRL_SHUTDOWN,BIT_PROBE_ADC1,STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_PS_ANYRESIDENT,BIT_PROBE_ADC1,STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_UPTRANSIMITPOWER2,BIT_PROBE_ADC1,STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2,BIT_PROBE_ADC1,STRTEGY_STOP);
+ }
+ else if(temperature<TsNvData.Threshods[probe_num].THROSHOLD_6)
+ {
+ tsctrl_set_strategy2Iram(TSCTRL_SHUTDOWN,BIT_PROBE_ADC1,STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_PS_ANYRESIDENT,BIT_PROBE_ADC1,STRTEGY_STOP);
+ }
+ else if(temperature<TsNvData.Threshods[probe_num].THROSHOLD_8)
+ {
+ tsctrl_set_strategy2Iram(TSCTRL_SHUTDOWN,BIT_PROBE_ADC1,STRTEGY_STOP);
+ }
+ else
+ {
+
+ }
+#endif
+}
+
+/*******************************************************************************
+ * Function: tsc_ProbeAdc2Strategy
+ * Description: adc2̽ÕëµÄοزßÂÔ
+ * Parameters:
+ * Input:
+ *
+
+ * Output: N/A
+ * Returns: N/A
+
+ * Others: //not use
+********************************************************************************/
+void tsctrl_probe_adc2_strategy(u32 probe_num,u32 temperature )
+{
+#if 1
+ u32 temp1=(temperature-TsNvData.Threshods[probe_num].THROSHOLD_3);
+ u32 temp2=(TsNvData.Threshods[probe_num].THROSHOLD_6-TsNvData.Threshods[probe_num].THROSHOLD_3);
+
+ if(temperature<=TsNvData.Threshods[probe_num].THROSHOLD_3){
+ temp_percent=0;
+ }else if((temperature<=TsNvData.Threshods[probe_num].THROSHOLD_6)){
+ temp_percent=((100*temp1/temp2)/TEMP_PERCENT_INTERVAL)*TEMP_PERCENT_INTERVAL;
+ }else{
+ temp_percent=100;
+ }
+
+ /**/
+ if(temperature>=TsNvData.Threshods[probe_num].THROSHOLD_9)
+ {
+ tsctrl_set_strategy2Iram(TSCTRL_SHUTDOWN, BIT_PROBE_ADC2 ,STRTEGY_START);
+ }
+ else if(temperature>=TsNvData.Threshods[probe_num].THROSHOLD_7)
+ {
+ tsctrl_set_strategy2Iram(TSCTRL_PS_ANYRESIDENT, BIT_PROBE_ADC2 ,STRTEGY_START);
+ }
+ else if(temperature>=TsNvData.Threshods[probe_num].THROSHOLD_5)
+ {
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_UPTRANSIMITPOWER2, BIT_PROBE_ADC2 ,STRTEGY_START);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2, BIT_PROBE_ADC2 ,STRTEGY_START);
+ tsctrl_set_strategy2Iram(TSCTRL_PS_RATE, BIT_PROBE_ADC2, STRTEGY_START);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_DOWNRATE2, BIT_PROBE_ADC2, STRTEGY_START);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADC2, STRTEGY_START);
+ tsctrl_set_strategy2Iram(TSCTRL_DFS, BIT_PROBE_ADC2, STRTEGY_START);//zDrvPow_SetArmPsCoreFreq(CLK312M);
+ }
+ else if(temperature>=TsNvData.Threshods[probe_num].THROSHOLD_3)
+ {
+ tsctrl_set_strategy2Iram(TSCTRL_PS_RATE, BIT_PROBE_ADC2, STRTEGY_START);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_DOWNRATE2, BIT_PROBE_ADC2, STRTEGY_START);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADC2, STRTEGY_START);
+ tsctrl_set_strategy2Iram(TSCTRL_DFS, BIT_PROBE_ADC2, STRTEGY_START);//zDrvPow_SetArmPsCoreFreq(CLK312M);
+ }
+ else if(temperature>=TsNvData.Threshods[probe_num].THROSHOLD_1)
+ {
+ tsctrl_set_strategy2Iram(TSCTRL_PS_RATE, BIT_PROBE_ADC2, STRTEGY_START);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_DOWNRATE2, BIT_PROBE_ADC2, STRTEGY_START);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADC2, STRTEGY_START);
+ }
+ else if(temperature>=TsNvData.Threshods[probe_num].THROSHOLD_0)
+ {
+ tsctrl_set_strategy2Iram(TSCTRL_PS_RATE, BIT_PROBE_ADC2, STRTEGY_HOLD);
+ }
+ else
+ {
+
+ }
+
+ /**/
+ if(temperature<TsNvData.Threshods[probe_num].THROSHOLD_0)
+ {
+ tsctrl_set_strategy2Iram(TSCTRL_SHUTDOWN,BIT_PROBE_ADC2,STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_PS_ANYRESIDENT, BIT_PROBE_ADC2, STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_UPTRANSIMITPOWER2, BIT_PROBE_ADC2, STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2, BIT_PROBE_ADC2, STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_PS_RATE, BIT_PROBE_ADC2, STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_DOWNRATE2, BIT_PROBE_ADC2, STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADC2, STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_DFS, BIT_PROBE_ADC2, STRTEGY_STOP);// zDrvPow_SetArmPsCoreFreq(CLK624M);
+ }
+ else if(temperature<TsNvData.Threshods[probe_num].THROSHOLD_2)
+ {
+ tsctrl_set_strategy2Iram(TSCTRL_SHUTDOWN,BIT_PROBE_ADC2,STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_PS_ANYRESIDENT, BIT_PROBE_ADC2, STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_UPTRANSIMITPOWER2 ,BIT_PROBE_ADC2, STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2 ,BIT_PROBE_ADC2, STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_DFS, BIT_PROBE_ADC2, STRTEGY_STOP);// zDrvPow_SetArmPsCoreFreq(CLK624M);
+ }
+ else if(temperature<TsNvData.Threshods[probe_num].THROSHOLD_4)
+ {
+ tsctrl_set_strategy2Iram(TSCTRL_SHUTDOWN,BIT_PROBE_ADC2,STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_PS_ANYRESIDENT,BIT_PROBE_ADC2,STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_UPTRANSIMITPOWER2,BIT_PROBE_ADC2,STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2,BIT_PROBE_ADC2,STRTEGY_STOP);
+ }
+ else if(temperature<TsNvData.Threshods[probe_num].THROSHOLD_6)
+ {
+ tsctrl_set_strategy2Iram(TSCTRL_SHUTDOWN,BIT_PROBE_ADC2,STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_PS_ANYRESIDENT,BIT_PROBE_ADC2,STRTEGY_STOP);
+ }
+ else if(temperature<TsNvData.Threshods[probe_num].THROSHOLD_8)
+ {
+ tsctrl_set_strategy2Iram(TSCTRL_SHUTDOWN,BIT_PROBE_ADC2,STRTEGY_STOP);
+ }
+ else
+ {
+
+ }
+#endif
+}
+
+/*******************************************************************************
+ * Function: tsc_ProbeAdcRfStrategy
+ * Description: adcRf̽ÕëµÄοزßÂÔ
+ * Parameters:
+ * Input:
+ *
+
+ * Output: N/A
+ * Returns: N/A
+
+ * Others: //not use
+********************************************************************************/
+void tsctrl_probe_adcRf_strategy(u32 probe_num,u32 temperature )
+{
+#if 1
+ u32 temp1=(temperature-TsNvData.Threshods[probe_num].THROSHOLD_3);
+ u32 temp2=(TsNvData.Threshods[probe_num].THROSHOLD_6-TsNvData.Threshods[probe_num].THROSHOLD_3);
+
+ if(temperature<=TsNvData.Threshods[probe_num].THROSHOLD_3){
+ temp_percent=0;
+ }else if((temperature<=TsNvData.Threshods[probe_num].THROSHOLD_6)){
+ temp_percent=((100*temp1/temp2)/TEMP_PERCENT_INTERVAL)*TEMP_PERCENT_INTERVAL;
+ }else{
+ temp_percent=100;
+ }
+
+ /**/
+ if(temperature>=TsNvData.Threshods[probe_num].THROSHOLD_9)
+ {
+ tsctrl_set_strategy2Iram(TSCTRL_SHUTDOWN, BIT_PROBE_ADCRF ,STRTEGY_START);
+ }
+ else if(temperature>=TsNvData.Threshods[probe_num].THROSHOLD_7)
+ {
+ tsctrl_set_strategy2Iram(TSCTRL_PS_ANYRESIDENT, BIT_PROBE_ADCRF ,STRTEGY_START);
+ }
+ else if(temperature>=TsNvData.Threshods[probe_num].THROSHOLD_5)
+ {
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_UPTRANSIMITPOWER2, BIT_PROBE_ADCRF ,STRTEGY_START);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2, BIT_PROBE_ADCRF ,STRTEGY_START);
+ tsctrl_set_strategy2Iram(TSCTRL_PS_RATE, BIT_PROBE_ADCRF, STRTEGY_START);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_DOWNRATE2, BIT_PROBE_ADCRF, STRTEGY_START);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADCRF, STRTEGY_START);
+ tsctrl_set_strategy2Iram(TSCTRL_DFS, BIT_PROBE_ADCRF, STRTEGY_START);//zDrvPow_SetArmPsCoreFreq(CLK312M);
+ }
+ else if(temperature>=TsNvData.Threshods[probe_num].THROSHOLD_3)
+ {
+ tsctrl_set_strategy2Iram(TSCTRL_PS_RATE, BIT_PROBE_ADCRF, STRTEGY_START);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_DOWNRATE2, BIT_PROBE_ADCRF, STRTEGY_START);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADCRF, STRTEGY_START);
+ tsctrl_set_strategy2Iram(TSCTRL_DFS, BIT_PROBE_ADCRF, STRTEGY_START);//zDrvPow_SetArmPsCoreFreq(CLK312M);
+ }
+ else if(temperature>=TsNvData.Threshods[probe_num].THROSHOLD_1)
+ {
+ tsctrl_set_strategy2Iram(TSCTRL_PS_RATE, BIT_PROBE_ADCRF, STRTEGY_START);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_DOWNRATE2, BIT_PROBE_ADCRF, STRTEGY_START);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADCRF, STRTEGY_START);
+ }
+ else if(temperature>=TsNvData.Threshods[probe_num].THROSHOLD_0)
+ {
+ tsctrl_set_strategy2Iram(TSCTRL_PS_RATE, BIT_PROBE_ADCRF, STRTEGY_HOLD);
+ }
+ else
+ {
+
+ }
+
+ /**/
+ if(temperature<TsNvData.Threshods[probe_num].THROSHOLD_0)
+ {
+ tsctrl_set_strategy2Iram(TSCTRL_SHUTDOWN,BIT_PROBE_ADCRF,STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_PS_ANYRESIDENT, BIT_PROBE_ADCRF, STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_UPTRANSIMITPOWER2, BIT_PROBE_ADCRF, STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2, BIT_PROBE_ADCRF, STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_PS_RATE, BIT_PROBE_ADCRF, STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_DOWNRATE2, BIT_PROBE_ADCRF, STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_DOWNRATE2, BIT_PROBE_ADCRF, STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_DFS, BIT_PROBE_ADCRF, STRTEGY_STOP);// zDrvPow_SetArmPsCoreFreq(CLK624M);
+ }
+ else if(temperature<TsNvData.Threshods[probe_num].THROSHOLD_2)
+ {
+ tsctrl_set_strategy2Iram(TSCTRL_SHUTDOWN,BIT_PROBE_ADCRF,STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_PS_ANYRESIDENT, BIT_PROBE_ADCRF, STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_UPTRANSIMITPOWER2 ,BIT_PROBE_ADCRF, STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2 ,BIT_PROBE_ADCRF, STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_DFS, BIT_PROBE_ADCRF, STRTEGY_STOP);// zDrvPow_SetArmPsCoreFreq(CLK624M);
+ }
+ else if(temperature<TsNvData.Threshods[probe_num].THROSHOLD_4)
+ {
+ tsctrl_set_strategy2Iram(TSCTRL_SHUTDOWN,BIT_PROBE_ADCRF,STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_PS_ANYRESIDENT,BIT_PROBE_ADCRF,STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_LTE_UPTRANSIMITPOWER2,BIT_PROBE_ADCRF,STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_LIMIT_W_UPTRANSIMITPOWER2,BIT_PROBE_ADCRF,STRTEGY_STOP);
+ }
+ else if(temperature<TsNvData.Threshods[probe_num].THROSHOLD_6)
+ {
+ tsctrl_set_strategy2Iram(TSCTRL_SHUTDOWN,BIT_PROBE_ADCRF,STRTEGY_STOP);
+ tsctrl_set_strategy2Iram(TSCTRL_PS_ANYRESIDENT,BIT_PROBE_ADCRF,STRTEGY_STOP);
+ }
+ else if(temperature<TsNvData.Threshods[probe_num].THROSHOLD_8)
+ {
+ tsctrl_set_strategy2Iram(TSCTRL_SHUTDOWN,BIT_PROBE_ADCRF,STRTEGY_STOP);
+ }
+ else
+ {
+
+ }
+#endif
+}
+
+/*******************************************************************************
+ * Function: tsc_RefSetProbeStr
+ * Description: ¸ù¾Ý̽ÕëºÅµ÷ÓÃÏàÓ¦µÄ²ßÂÔº¯Êý
+ * Parameters:
+ * Input: probe_num:̽ÕëºÅ
+ * temperature:¶ÔӦ̽ÕëζÈÖµ
+
+ * Output: N/A
+ * Returns: N/A
+
+ * Others: //not use
+********************************************************************************/
+static void tsctrl_set_probe_strategy(u32 probe_num,u32 temperature )
+{
+ if(PROBE_MAX!=probe_num && 0xFF!=temperature) {
+ g_probe_strategy[probe_num](probe_num, temperature);
+ }
+}
+
+/*******************************************************************************
+* Function: tsctrl_strategy_init
+* Description:
+* Parameters:
+* Input: N/A
+*
+* Output: N/A
+* Returns: N/A
+
+* Others: //not use
+********************************************************************************/
+void tsctrl_strategy_init(void)
+{
+ g_probe_strategy[PROBE_ADC1]= tsctrl_probe_adc1_strategy;
+ g_probe_strategy[PROBE_ADC2]= tsctrl_probe_adc2_strategy;
+ g_probe_strategy[PROBE_ADCRF]= tsctrl_probe_adcRf_strategy;
+
+}
+
+/*******************************************************************************
+ * Function: tsctrl_callback_dispatch
+ * Description:
+ * Parameters:
+ * Input:
+ module: Strategy_ModuleId
+ en: 1,²ßÂÔÖ´ÐÐ; 0,²ßÂÔÍ£Ö¹
+ *
+ * Output: N/A
+ * Returns:
+ *
+ *
+ * Others:
+ ********************************************************************************/
+s32 tsctrl_callback_dispatch(T_TsCtrl_Strategy_ModuleId module, u8 en)
+{
+ T_TsCtrl_CallbackFunction callback = NULL;
+
+ if(module>=MAX_TSCTRL_STRATEGY_ID )
+ return -EINVAL;
+
+ callback = s_tsctrl_callback[module];
+ if (g_tsc_print_log_debug)
+ tsc_print_log("tsctrl_callback_dispatch:callback=0x%x,module=%d,en=%d.\n",(u32)callback,module,en);
+
+ if (callback == NULL) {
+ return -EINVAL;
+ }
+
+ /* ÒÔpMsgΪ²ÎÊý£¬µ÷Óûص÷º¯Êý´¦Àí´ËÏûÏ¢ */
+ (*callback)(en);
+
+ return 0;
+
+}
+
+
+/*******************************************************************************
+ * Function: tsc_RefStrategyDispatch
+ * Description:
+ * Parameters:
+ * Input:
+ *
+
+ * Output: N/A
+ * Returns: N/A
+
+ * Others: //not use
+********************************************************************************/
+static void tsctrl_set_strategy(void)
+{
+ u32 i=0;
+ if(zDrvTsc_GetTscEn() == 0xB2)
+ {
+ if(TsNvData.User_En == 0xB2){
+
+ /*USER*/
+ if(tsc_read_reg(TSCTRL_SHUTDOWN)!=0){
+ /*user strategy start*/
+ if(g_tsc_print_log_debug)
+ tsc_print_log("user strategy start reserve \n")
+ }
+ else{
+ //tsc_print_log("user strategy stop reserve \n")
+ }
+ }
+
+ /*PS_ANYRESIDENT*/
+ if(tsc_read_reg(TSCTRL_PS_ANYRESIDENT)==0){
+ if(any_resident_flag){
+ tsctrl_callback_dispatch(PS_STRATEGY_ANYRESIDENT,false); /*È¥ÈÎÒâפÁô,Ò²¾ÍÊÇÕý³£×¤Áô*/
+ tsc_set_reg_bits(TSCTRL_PS,BIT_PS_ANYRESIDENT,BITS_FOR_PSIRAM,false);
+ any_resident_flag=0;
+ }
+ }else{
+ if(!any_resident_flag){
+ tsctrl_callback_dispatch(PS_STRATEGY_ANYRESIDENT,true);/*ÈÎÒâפÁô*/
+ tsc_set_reg_bits(TSCTRL_PS,BIT_PS_ANYRESIDENT,BITS_FOR_PSIRAM,true);
+ any_resident_flag=1;
+ }
+ }
+
+ //WIFI
+ if(TsNvData.Wifi_RateLmite_En == 0xB2){
+ if(tsc_read_reg(TSCTRL_WIFI)==0){
+ tsctrl_callback_dispatch(WIFI_STRATEGY,STRTEGY_STOP);//STOP
+ tsc_set_reg_bits(TSCTRL_PERIP,BIT_WIFI,BITS_FOR_PEPIPIRAM,STRTEGY_STOP);
+ }else if(((tsc_read_reg(TSCTRL_WIFI)&0x1000)==0x1000)||((tsc_read_reg(TSCTRL_WIFI)&0x0100)==0x0100)||((tsc_read_reg(TSCTRL_WIFI)&0x0010)==0x0010)||((tsc_read_reg(TSCTRL_WIFI)&0x0001)==0x0001))
+ {
+ tsctrl_callback_dispatch(WIFI_STRATEGY,STRTEGY_START);//START
+ tsc_set_reg_bits(TSCTRL_PERIP,BIT_WIFI,BITS_FOR_PEPIPIRAM,STRTEGY_START);
+ } else{
+ tsctrl_callback_dispatch(WIFI_STRATEGY,STRTEGY_HOLD);//HOLD
+ tsc_set_reg_bits(TSCTRL_PERIP,BIT_WIFI,BITS_FOR_PEPIPIRAM,STRTEGY_HOLD);
+ }
+ }
+
+ //ÉϲãÓ¦ÓýµËÙ
+ if(TsNvData.Aprate_En == 0xB2) {
+ if(tsc_read_reg(TSCTRL_APRATE)==0) {
+ tsctrl_callback_dispatch(AP_RATE,STRTEGY_STOP);//
+ tsc_set_reg_bits(TSCTRL_PERIP,BIT_APRATE,BITS_FOR_PEPIPIRAM,STRTEGY_STOP);
+ } else {
+ tsctrl_callback_dispatch(AP_RATE,STRTEGY_START);//
+ tsc_set_reg_bits(TSCTRL_PERIP,BIT_APRATE,BITS_FOR_PEPIPIRAM,STRTEGY_START);
+
+ }
+ }
+
+ //modem½µËÙÂʲßÂÔ
+ if(TsNvData.Modemrate_En == 0xB2) {
+ //ÐÒéÕ»ËÙÂʲßÂÔ
+ if(tsc_read_reg(TSCTRL_PS_RATE)==0){
+ if(ps_rate_flag!=0){
+ tsctrl_callback_dispatch(PS_STRATEGY_RATE,STRTEGY_STOP);//STOP
+ tsc_set_reg_bits(TSCTRL_PS,BIT_PS_RATE,BITS_FOR_PSIRAM,STRTEGY_STOP);
+ ps_rate_flag=0;
+ }
+ }
+ else if(((tsc_read_reg(TSCTRL_PS_RATE)&0x1000)==0x1000)||((tsc_read_reg(TSCTRL_PS_RATE)&0x0100)==0x0100)||((tsc_read_reg(TSCTRL_PS_RATE)&0x0010)==0x0010)||((tsc_read_reg(TSCTRL_PS_RATE)&0x0001)==0x0001))
+ {
+ if(ps_rate_flag!=1){
+ tsctrl_callback_dispatch(PS_STRATEGY_RATE,STRTEGY_START);//START
+ tsc_set_reg_bits(TSCTRL_PS,BIT_PS_RATE,BITS_FOR_PSIRAM,STRTEGY_START);
+ ps_rate_flag=1;
+ }
+ }else{
+ if(ps_rate_flag!=2){
+ tsctrl_callback_dispatch(PS_STRATEGY_RATE,STRTEGY_HOLD);//HOLD
+ tsc_set_reg_bits(TSCTRL_PS,BIT_PS_RATE,BITS_FOR_PSIRAM,STRTEGY_HOLD);
+ ps_rate_flag=2;
+ }
+ }
+
+ //ÎïÀí²ã½µËÙÂʲßÂÔ
+ for(i=0;i<4;i++){
+ if(tsc_read_reg(TSCTRL_LIMIT_LTE_DOWNRATE1+i*0x4)==0){
+ tsc_set_reg_bits(TSCTRL_PHY,(BIT_LIMIT_LTE_DOWNRATE1+i),BITS_FOR_PHYIRAM,STRTEGY_STOP);
+ }else{
+ tsc_set_reg_bits(TSCTRL_PHY,(BIT_LIMIT_LTE_DOWNRATE1+i),BITS_FOR_PHYIRAM,STRTEGY_START);
+ }
+ }
+ }
+
+ //ÎïÀí²ã½µ·¢É书ÂÊ
+
+ if(TsNvData.TansmitPower_En == 0xB2){
+ for(i=4;i<STRATEGY_PHY_NUM;i++){
+ if(tsc_read_reg(TSCTRL_LIMIT_LTE_DOWNRATE1+i*0x4)==0){
+ tsc_set_reg_bits(TSCTRL_PHY,(BIT_LIMIT_LTE_DOWNRATE1+i),BITS_FOR_PHYIRAM,STRTEGY_STOP);
+ }else{
+ tsc_set_reg_bits(TSCTRL_PHY,(BIT_LIMIT_LTE_DOWNRATE1+i),BITS_FOR_PHYIRAM,STRTEGY_START);
+ }
+ }
+ }
+
+ /*DFS*/
+ if(TsNvData.Dfs_En == 0xB2){
+
+ #ifdef CONFIG_CPU_FREQ
+
+ //tsc_print_log("CPU_FRE:zx_getspeed(0)=%d\n",zx_getspeed(0));
+
+ if(tsc_read_reg(TSCTRL_DFS)==0){
+ if(ps_freq_flag){
+ //tsc_print_log("CPU_FREQ0:zx_getspeed(0)=%d\n",zx_getspeed(0));
+ if(zx_getspeed(0) != 624000 )
+ zx29_set_frequency(1,0); //zDrvPow_SetArmPsCoreFreq(CLK624M);
+
+ //tsc_print_log("CPU_FREQ1:zx_getspeed(0)=%d\n",zx_getspeed(0));
+
+ ps_freq_flag=0;
+ }
+ }else{
+ if(!ps_freq_flag){
+ //tsc_print_log("CPU_FREQ1:zx_getspeed(0)=%d\n",zx_getspeed(0));
+ zx29_set_frequency(0,1); //zDrvPow_SetArmPsCoreFreq(CLK312M);
+ //tsc_print_log("CPU_FREQ1:zx_getspeed(0)=%d\n",zx_getspeed(0));
+
+ ps_freq_flag=1;
+ }
+ }
+ #endif
+ }
+
+ if(g_tsc_print_log_debug) {
+ tsc_print_log("0:stop;1:start;2:hold; reg32(TSCTRL_PS)=0x%x,reg32(PS_CORE_SEL_REG)=0x%x,temp_percent=%d\n",tsc_read_reg(TSCTRL_PS),tsc_read_reg(PS_CORE_SEL_REG),temp_percent);
+ tsc_print_log("bit1:limitLtedownrate2;bit3:limitWdownrate2;bit5:limitLteUptransmitrate2;bit7:limitWUptransmitrate2;reg32(TSCTRL_PHY)=0x%x.\n",tsc_read_reg(TSCTRL_PHY));
+ }
+ }
+ else
+ {
+ if(TsNvData.Modemrate_En == 0xB2){
+ /*stop ps ËùÓвßÂÔ*/
+ if(ps_rate_flag!=0){
+ tsctrl_callback_dispatch(PS_STRATEGY_RATE,STRTEGY_STOP);//STOP
+ tsc_set_reg_bits(TSCTRL_PS,BIT_PS_RATE,BITS_FOR_PSIRAM,STRTEGY_STOP);
+ ps_rate_flag=0;
+ }
+ }
+
+ if(any_resident_flag){
+ tsctrl_callback_dispatch(PS_STRATEGY_ANYRESIDENT,false); /*È¥ÈÎÒâפÁô,Ò²¾ÍÊÇÕý³£×¤Áô*/
+ tsc_set_reg_bits(TSCTRL_PS,BIT_PS_ANYRESIDENT,BITS_FOR_PSIRAM,false);
+ any_resident_flag=0;
+ }
+ //stop WIFI ½µËÙ
+ if(TsNvData.Wifi_RateLmite_En == 0xB2){
+ tsctrl_callback_dispatch(WIFI_STRATEGY,STRTEGY_STOP);//STOP
+ tsc_set_reg_bits(TSCTRL_PERIP,BIT_WIFI,BITS_FOR_PEPIPIRAM,STRTEGY_STOP);
+ }
+ //stopÉϲãÓ¦ÓýµËÙ
+ if(TsNvData.Aprate_En == 0xB2){
+ tsctrl_callback_dispatch(AP_RATE,STRTEGY_STOP);//
+ tsc_set_reg_bits(TSCTRL_PERIP,BIT_APRATE,BITS_FOR_PEPIPIRAM,STRTEGY_STOP);
+ }
+
+ //stop ÎïÀí²ãËùÓвßÂÔ
+ for(i=0;i<STRATEGY_PHY_NUM;i++){
+ tsc_set_reg_bits(TSCTRL_PHY,(BIT_LIMIT_LTE_DOWNRATE1+i),BITS_FOR_PHYIRAM,STRTEGY_STOP);
+ }
+
+ if(g_tsc_print_log_debug)
+ tsc_print_log("g_tsc_ctrlen==0, TSC Closed, close all strategy .\n");
+ }
+}
+
+static void tsctrl_set_strategy_thread(void * arg)
+{
+ while(1)
+ {
+ tsctrl_set_strategy();
+ msleep(TS_POLLING_TIME);
+ }
+}
+
+static void tsctrl_set_strategy_thread_create(unsigned long een)
+{
+ int err;
+
+ err = (int)kthread_run(tsctrl_set_strategy_thread, NULL, "tsctrl_set_strategy");
+ if (IS_ERR((const void *)err)) {
+ printk(KERN_ERR"[zx tsctrl]: Fail to create and run tsctrl_set_strategy!\n");
+ // return PTR_ERR((const void *)err);
+ }
+}
+
+/*init tsctrl Iram*/
+static void tsctrl_createtimer(void)
+{
+ init_timer(&timer_tsctrl);
+ timer_tsctrl.function = tsctrl_set_strategy_thread_create;
+
+ timer_tsctrl.expires = jiffies + msecs_to_jiffies(TS_STRATEGY_SET_BEGIN_TIME);
+ add_timer(&timer_tsctrl);
+
+}
+
+
+/*******************************************************************************
+ * Function: TsCtrl_TsCtrlAdcThread
+ * Description:
+ * Parameters:
+ * Input:
+ *
+ * Output: N/A
+ * Returns:
+ *
+ *
+ * Others:
+ ********************************************************************************/
+static void tsctrl_adc_thread(void * arg)
+{
+
+ u32 probe_num ;
+ u32 temperature ;
+
+ u32 TsDataValue[PROBE_NUM][PROBE_INFO] = {0};
+
+ while(1)
+ {
+ down(&s_tsc_adc_semaphore);
+
+ if(zDrvTsc_GetTscEn() == 0xB2) {
+ /*´ÓbufferÖлñÈ¡µ±Ç°Ì½ÕëºÅ¼°Î¶ÈÇø¼ä*/
+ tsc_get_temp_data_info(TsDataValue);
+ if(g_adc1_flag==1){
+ /*»ñÈ¡PROBE_ADC1ÐÅÏ¢*/
+ probe_num= TsDataValue[PROBE_ADC1][TS_MEMBER_PROBE];
+ temperature= TsDataValue[PROBE_ADC1][TS_MEMBER_TEMP];
+ //tsc_set_temp_data_info(probe_num);
+ if(g_tsc_print_log_debug)
+ tsc_print_log("set strategy for probe%d\n", probe_num);
+ tsctrl_set_probe_strategy(probe_num, temperature );
+ }
+
+ if(g_adc2_flag==1){
+ /*»ñÈ¡PROBE_ADC2ÐÅÏ¢*/
+ probe_num= TsDataValue[PROBE_ADC2][TS_MEMBER_PROBE];
+ temperature= TsDataValue[PROBE_ADC2][TS_MEMBER_TEMP];
+ //tsc_set_temp_data_info(probe_num);
+ if(g_tsc_print_log_debug)
+ tsc_print_log("set strategy for probe%d\n", probe_num);
+
+ tsctrl_set_probe_strategy(probe_num, temperature );
+ }
+
+ if(g_adc3_flag==1){
+ /*»ñÈ¡PROBE_ADCRFÐÅÏ¢*/
+ probe_num= TsDataValue[PROBE_ADCRF][TS_MEMBER_PROBE];
+ temperature= TsDataValue[PROBE_ADCRF][TS_MEMBER_TEMP];
+ //tsc_set_temp_data_info(probe_num);
+ if(g_tsc_print_log_debug)
+ tsc_print_log("set strategy for probe%d\n", probe_num);
+
+ tsctrl_set_probe_strategy(probe_num, temperature );
+ }
+
+ }else{
+ if(g_tsc_print_log_debug)
+ tsc_print_log("g_tsc_ctrlen==0, TSC Closed.\n");
+ }
+ }
+}
+
+
+
+void tsctrl_init(void)
+{
+ int err;
+
+ tsctrl_strategy_init();
+
+ err = (int)kthread_run(tsctrl_adc_thread, NULL, "tsctrl_adc_thread");
+ if (IS_ERR((const void *)err)) {
+ printk(KERN_ERR"[zx tsctrl]: Fail to create and run tsctrl_adc_thread!\n");
+ //return PTR_ERR((const void *)err);
+ }
+
+ tsctrl_createtimer();
+}
+
+/*******************************************************************************
+ * Function: zDrvTsCtrl_DfsEn
+ * Description:
+ * Parameters:
+ * Input:
+ *
+ * Output: 1: tsc open dfs
+ * Returns:
+ *
+ *
+ * Others:
+ ********************************************************************************/
+u32 zDrvTsCtrl_DfsEn(void)
+{
+ if(ps_freq_flag)
+ return 1;
+ else
+ return 0;
+}
+EXPORT_SYMBOL(zDrvTsCtrl_DfsEn);
+
+/*******************************************************************************
+ * Function: zDrvTsCtrl_RegCallback
+ * Description:
+ * Parameters:
+ * Input:
+ *
+ * Output: N/A
+ * Returns:
+ *
+ *
+ * Others:
+ ********************************************************************************/
+s32 zDrvTsCtrl_RegCallback(T_TsCtrl_Strategy_ModuleId module,T_TsCtrl_CallbackFunction callback)
+{
+
+ if (module >= MAX_TSCTRL_STRATEGY_ID )
+ {
+ BUG();
+ return -EINVAL;
+ }
+
+ if(NULL==callback)
+ {
+ BUG();
+ return -EINVAL;
+ }
+
+ /* ÒÔcallbackType, me×÷Ë÷Òý£¬ÔÚs_IcpCallbackListÁбíÖÐÌí¼Ó»Øµ÷º¯ÊýÖ¸Õë¡£ */
+ s_tsctrl_callback[module] = callback;
+
+ return 0;
+
+}
+EXPORT_SYMBOL(zDrvTsCtrl_RegCallback);
+
+/*******************************************************************************
+ * Function: zDrvTsc_GetTscTempPercent
+ * Description:
+ * Parameters:
+ * Input:
+ *
+
+ * Output: N/A
+ * Returns: N/A
+
+ * Others: //not use
+********************************************************************************/
+VOID zDrvTsc_GetTscTempPercent(UINT32 *percent)
+{
+ *percent=temp_percent;
+ tsc_write_reg(TSCTRL_TEMP_PERCENT,temp_percent);
+}
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/tsc/zx-tsc.c b/ap/os/linux/linux-3.4.x/drivers/soc/zte/tsc/zx-tsc.c
new file mode 100644
index 0000000..c9a5559
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/tsc/zx-tsc.c
@@ -0,0 +1,678 @@
+/*
+ * ZTE tsc driver
+ *
+ * Copyright (C) 2013 ZTE Ltd.
+ * by tsp
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/semaphore.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/io.h>
+#include <linux/timer.h>
+#include <linux/kthread.h> /*For kthread_run()*/
+#include <linux/delay.h>
+#include <mach/iomap.h>
+#include "zx-tsc.h"
+#include <linux/soc/zte/tsc/tsc.h>
+#include <linux/cpps_init2.h>
+#include <linux/cp_types.h>
+#include "NvParam_drv.h"
+
+#ifdef RFC_DCXO_EN
+#define IRAM_TEMPCOMP_DAC (IRAM_BASE_ADDR_SLAVE_AFC+0x8) //high16bit: the ADC value of temp
+#else
+#define IRAM_TEMPCOMP_DAC (IRAM_BASE_ADDR_PUB_RF+64)//the ADC value of temp
+#endif
+
+
+#if 0
+//TsNvData.DetectEn¨º?NV?D¦Ì??????a1?¨º1?¨¹,¨®?¨®¨²????????2?????3¨¬¨º?¡¤????¡¥¡ê?????¡Á¨¹?a1?
+//g_tscCtrlEn¨®??¡ì¨¦¨¨??¦Ì?????2????a1?¨º1?¨¹; TsNvData.DetectEn1?¡À?¡ê??¨°¨®??¡ì¨¦¨¨??g_tscCtrlEn??¨®D¨®¡ã?¨¬
+//TsNvData.DetectEn¡ä¨°?a¡ê?2?????3¨¬?D?¨´?Yg_tscCtrlEn????¨º?¡¤?¡¤?¡¤¡é2???¡ê?g_tscCtrlEn=0¡ê??¨°2?¡¤?¡¤¡é2???¡ê?2¡é?¨°1?¡À??¨´¨®D¨º¦Ì??¦Ì?2???
+//tsc3?¨º?¨º¡ÀD¨¨¨°a¡Á¡é¨°a¡ê??¨¨??g_tscCtrlEn=0xB2¡ê?¨¨?o¨®???¨²¨®??¡ì?¨¢¨¦¨¨??g_tscCtrlEn¦Ì?????ref init?D¨º1g_tscCtrlEn=0(g_tscCtrlEn?¨ª?¡ì??¨¨?¨¦¨¨???a0),
+//¡¤¨¤?1¨®??¡ì¨¦¨¨??g_tscCtrlEn=0?¡ã???¡¥2???¡ê?¨°¨°?a¨¨?MBB?D¡ê?¨º1¨®?¦Ì¨²¨°??????T,??¨°a¦Ì¡Â¨®?2???¦Ì¨²¨°??????T?¨²¦Ì?2???¨°??¡À?eD¡ì
+volatile u8 g_tsc_ctrlen=0;
+#endif
+
+volatile u32 g_adc1_flag = 0;//¨¨?adc1¨®?¨®¨²¨¬?2apa2¨¤???¨¨¡ê??¨°g_adc1_flag=1
+volatile u32 g_adc2_flag = 0;//¨¨?adc2¨®?¨®¨²¨¬?2apa2¨¤???¨¨¡ê??¨°g_adc2_flag=1
+volatile u32 g_adc3_flag = 0; // 1:adcrf work
+
+struct timer_list timer_tsc;
+
+s32 g_tsc_print_log_debug=0;
+u32 g_test_addr0=0;
+u32 g_test_addr1=0;
+
+#if 0//v3phone
+u32 ts_temp_value_table[TS_ADC_TEMP_NUMBER][TS_ADC_TEMP_VOLTAGE_NUMBER]={
+{30,31,32,33,34,35,36,37,38,39,
+ 40,41,42,43,44,45,46,47,48,49,
+ 50,51,52,53,54,55,56,57,58,59,
+ 60,61,62,63,64,65,66,67,68,69,
+ 70,71,72,73,74,75,76,77,78,79,
+ 80,81,82,83,84,85,86,87,88,89,
+ 90,91,92,93,94,95,96,97,98,99,
+ 100,101,102,103,104,105,106,107,108,109,
+ 110,111,112,113,114,115,116,117,118,119,
+ 120,121,122,123,124,125},
+
+{
+815,799,782,766,750,734,719,703,688,673,
+659,644,630,615,602,588,574,561,548,535,
+523,511,498,487,476,465,454,443,432,422,
+411,401,391,383,374,366,355,347,339,331,
+322,315,306,299,292,285,278,272,265,259,
+252,246,240,235,229,224,218,213,207,203,
+199,193,189,184,180,175,171,168,164,159,
+156,153,149,146,143,139,136,133,130,127,
+124,121,118,116,113,110,108,105,103,102,
+99,97,95,93,91,89}
+};
+//#else//watch
+
+u32 ts_temp_value_table[TS_ADC_TEMP_NUMBER][TS_ADC_TEMP_VOLTAGE_NUMBER]={
+{30,31,32,33,34,35,36,37,38,39,
+ 40,41,42,43,44,45,46,47,48,49,
+ 50,51,52,53,54,55,56,57,58,59,
+ 60,61,62,63,64,65,66,67,68,69,
+ 70,71,72,73,74,75,76,77,78,79,
+ 80,81,82,83,84,85,86,87,88,89,
+ 90,91,92,93,94,95,96,97,98,99,
+ 100,101,102,103,104,105,106,107,108,109,
+ 110,111,112,113,114,115,116,117,118,119,
+ 120,121,122,123,124,125},
+
+{802,783,764,746,727,709,692,674,657,640,
+ 624,607,591,576,561,545,531,516,502,489,
+ 475,462,449,437,425,413,402,390,379,369,
+ 358,348,338,329,320,311,302,293,285,277,
+ 269,262,254,247,240,234,227,221,215,209,
+ 203,197,192,187,181,176,172,167,162,158,
+ 154,150,146,142,138,134,131,127,124,121,
+ 117,114,111,108,106,103,100,98, 95, 93,
+ 90, 88, 86, 84, 82, 80, 78, 76, 74, 72,
+ 70, 69, 67, 65, 64, 62}
+};
+
+#endif
+extern u32 ts_temp_value_table[TS_ADC_TEMP_NUMBER][TS_ADC_TEMP_VOLTAGE_NUMBER];
+extern volatile u32 ts_adc_flag;
+
+u32 g_ts_data_info[PROBE_NUM][PROBE_INFO] = {0};
+
+static struct mutex tsc_mutexId;
+struct semaphore s_tsc_adc_semaphore;
+
+T_SYS_NV_TSC_CONFIG TsNvData = {0};
+unsigned int tsc_tempValue[PROBE_MAX]={0};
+
+struct kobject *zx_tsc_kobj;
+
+#define zte_tsc_attr(_name) \
+static struct kobj_attribute _name##_attr = \
+{ \
+ .attr = \
+ { \
+ .name = __stringify(_name), \
+ .mode = 0644, \
+ }, \
+ .show = _name##_show, \
+ .store = _name##_store, \
+}
+
+/*the power on info */
+typedef enum
+{
+ POWER_ON_NORMAL = 0,
+ POWER_ON_FOTA,
+ POWER_ON_CHARGING,
+ POWER_ON_RTC,
+ POWER_ON_RESET,
+ POWER_ON_HDT_TEST,
+ POWER_ON_EXCEPTRESET,
+ POWER_ON_LOCALUPDATE,
+ POWER_ON_BOOST_IN,
+ POWER_ON_AMT,
+ POWER_ON_PRODUCTION,
+ POWER_ON_INVALID,
+}T_ZDrvSys_PowerOn_Type;
+
+//extern int nand_NvRead(int dwStart, int dwLen, char* to);
+extern uint get_adc1_voltage(void);
+extern uint get_adc2_voltage(void);
+#ifndef USE_CPPS_KO
+extern UINT32 zOss_NvItemRead(UINT32 NvItemID, UINT8 * NvItemData, UINT32 NvItemLen);
+#endif
+static u32 tsc_adcRf_read(void);
+
+void tsc_get_tempPercent(unsigned int *percent)
+{
+ *percent = zx_read_reg(TSCTRL_TEMP_PERCENT);
+}
+
+
+ static ssize_t tsc_percent_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+ {
+
+ unsigned int tsc_per = 0;
+ char *s = buf;
+
+ tsc_get_tempPercent(&tsc_per);
+ sprintf(s, "%d\n", tsc_per);
+
+ return sizeof(unsigned int);
+ }
+
+ static ssize_t tsc_percent_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+ {
+ int error = 0;
+
+ return error ;
+ }
+ zte_tsc_attr(tsc_percent);
+
+
+ signed int tsc_set_detect_en(unsigned char val)
+{
+ zx_write_reg(TSCTRL_DETECT_EN,val);
+ return 0;
+}
+signed int tsc_get_detect_en(void)
+{
+ return zx_read_reg(TSCTRL_DETECT_EN);
+}
+ static ssize_t tsc_ptscCtrlEn_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+ {
+
+ signed int tsc_CtrlEn = 0;
+ char *s = buf;
+
+ tsc_CtrlEn = tsc_get_detect_en();
+
+ sprintf(s, "%d\n", tsc_CtrlEn);
+
+ return sizeof(unsigned int);
+ }
+
+ static ssize_t tsc_ptscCtrlEn_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+ {
+
+ int error = 0;
+ signed int tsc_CtrlEn=0;
+ if(strict_strtol(buf, 0, &tsc_CtrlEn))//char to num Ê®½øÖÆ
+ {
+ error = -EINVAL;
+ }
+ tsc_set_detect_en(tsc_CtrlEn);
+
+ return error ? error : n;
+ }
+
+ zte_tsc_attr(tsc_ptscCtrlEn);
+
+ unsigned int *tsc_get_probe_temperature(void)
+ {
+ unsigned int i;
+
+ for(i=0;i<PROBE_MAX; i++)
+ tsc_tempValue[i]= zx_read_reg(TSCTRL_TEMPADC1 + 0x4 * i);
+
+ tsc_tempValue[PROBE_ADCRF]= tsc_adcRf_read();
+ return tsc_tempValue;
+ }
+
+ static ssize_t tsc_value_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+ {
+ char *s = buf;
+ unsigned int *tsc_val = 0;
+ unsigned int i;
+
+ tsc_val = tsc_get_probe_temperature();
+
+ memcpy((unsigned char *)buf, (unsigned char *)tsc_val, PROBE_MAX*sizeof(unsigned int));
+ for(i=0; i<PROBE_MAX;i++){
+ sprintf(s, "%d\n ", tsc_val[i]);
+ s+=4;
+ }
+ return PROBE_MAX*sizeof(unsigned int);
+ }
+
+ static ssize_t tsc_value_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+ {
+ int error = 0;
+
+ return error;
+ }
+
+ zte_tsc_attr(tsc_value);
+
+
+ static ssize_t tsc_print_log_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+ {
+
+ char *s = buf;
+
+ sprintf(s, "%d\n", g_tsc_print_log_debug);
+
+ return sizeof(unsigned int);
+ }
+
+
+ static ssize_t tsc_print_log_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+ {
+
+ int error = 0;
+ if(strict_strtol(buf, 0, &g_tsc_print_log_debug))//char to num Ê®½øÖÆ
+ {
+ error = -EINVAL;
+ }
+
+ return error ? error : n;
+ }
+
+ zte_tsc_attr(tsc_print_log);
+
+
+ static struct attribute * g[] =
+ {
+ &tsc_value_attr.attr,
+ &tsc_percent_attr.attr,
+ &tsc_ptscCtrlEn_attr.attr,
+ &tsc_print_log_attr.attr,
+
+ NULL,
+ };
+
+ static struct attribute_group zx_tsc_attr_group =
+ {
+ .attrs = g,
+ };
+
+#if 1
+
+/********************************************************************************
+ * Function:
+ * Description: tsc_lock
+ * Parameters: void
+ * Returns: void
+ * Others:
+ ********************************************************************************/
+static void tsc_lock(void)
+{
+ mutex_lock(&tsc_mutexId);
+}
+/********************************************************************************
+ * Function:
+ * Description: tsc_unlock
+ * Parameters: void
+ * Returns: void
+ * Others:
+ ********************************************************************************/
+
+static void tsc_unlock(void)
+{
+ mutex_unlock(&tsc_mutexId);
+}
+/*******************************************************************************
+ * Function: tsc_nearest
+ * Description:2¨¦?¨°¡Á??¨®?¨¹¦Ì?¦Ì??1?¦Ì??¡À¨º
+ * Parameters:
+ * Input:
+ *
+
+ * Output: N/A
+ * Returns: N/A
+
+ * Others: //not use
+********************************************************************************/
+u32 tsc_nearest(u32 b[],u32 searchKey,u32 low,u32 high)
+{
+ u32 middle = (u32)(low+high)/2;
+ u32 index;
+ while(high -low >1)
+ {
+ if(searchKey ==b[middle] ) return middle;
+ if(searchKey > b[middle] ) high = middle;
+ if(searchKey < b[middle] ) low = middle;
+ middle = (u32)(low+high)/2;
+ }
+ index = searchKey - b[high]> b[low]-searchKey ?low:high;
+ return index;
+}
+/*******************************************************************************
+ * Function: tsc_get_probe_adcvalue
+ * Description: ?¨´?Y¦Ì??1?¦Ì??¨¨????¨¨?¦Ì
+ * Parameters:
+ * Input: adcValue:¦Ì??1?¦Ì
+ * *temp:??¨®|¨¬??????¨¨?¦Ì
+
+ * Output: N/A
+ * Returns: N/A
+
+ * Others: //not use
+********************************************************************************/
+static void tsc_get_probe_adcvalue(u32 adcValue, u32 *temp)
+{
+ u32 index = 0;
+
+ index= tsc_nearest(ts_temp_value_table[TS_TEMP_VOLTAGE_TABLE_NUMBER],adcValue,TS_TEMP_NUMBER_SMALLEST,TS_TEMP_NUMBER_BIGGEST-1);
+ *temp= ts_temp_value_table[TS_TEMP_VALUE_TABLE_NUMBER][index];
+}
+
+#define ADCRF_SEARCH_TEMP 1
+
+static u32 tsc_dac_read(u32 addr)
+{
+ u32 dac;
+
+ dac = zx_read_reg(addr);
+
+#ifdef RFC_DCXO_EN
+ dac = (dac&0xffff0000)>>16; //high16bit: the ADC value of temp
+#endif
+
+ return dac;
+}
+
+static u32 tsc_adcRf_read(void)
+{
+ u32 adcRfValue, tempRf;
+
+ adcRfValue = tsc_dac_read(IRAM_TEMPCOMP_DAC);
+ tsc_get_probe_adcvalue(adcRfValue,&tempRf);
+
+ return tempRf;
+}
+
+static void tsc_temp_detect(void * arg)
+{
+ u32 adc1Value = 0;
+ u32 adc2Value = 0;
+
+ u32 temp1 = 0;
+ u32 temp2 = 0;
+
+#if ADCRF_SEARCH_TEMP
+ u32 adcRfValue = 0;
+ u32 tempRf = 0;
+#endif
+
+ while(1) {
+ if(g_adc1_flag==1){
+ adc1Value = get_adc1_voltage();
+ tsc_get_probe_adcvalue(adc1Value,&temp1);
+ }
+ if(g_adc2_flag==1){
+ adc2Value = get_adc2_voltage();
+ tsc_get_probe_adcvalue(adc2Value,&temp2);
+ }
+#if ADCRF_SEARCH_TEMP
+ if(g_adc3_flag==1){
+ adcRfValue = tsc_dac_read(IRAM_TEMPCOMP_DAC);
+ tsc_get_probe_adcvalue(adcRfValue,&tempRf);
+ }
+#endif
+
+ tsc_lock();
+
+ /*set tempvalue to g_ts_data_info*/
+ g_ts_data_info[PROBE_ADC1][TS_MEMBER_PROBE] = PROBE_ADC1;
+ g_ts_data_info[PROBE_ADC1][TS_MEMBER_TEMP] = temp1;
+ g_ts_data_info[PROBE_ADC2][TS_MEMBER_PROBE] = PROBE_ADC2;
+ g_ts_data_info[PROBE_ADC2][TS_MEMBER_TEMP] = temp2;
+ g_ts_data_info[PROBE_ADCRF][TS_MEMBER_PROBE] = PROBE_ADCRF;
+#if ADCRF_SEARCH_TEMP
+ g_ts_data_info[PROBE_ADCRF][TS_MEMBER_TEMP] =tempRf;
+#else
+ /*zx_read_reg(IRAM_BASE_ADDR_SLAVE_AFC+0xc)>>10 is the temprature of rf*/
+ g_ts_data_info[PROBE_ADCRF][TS_MEMBER_TEMP] = zx_read_reg(IRAM_BASE_ADDR_SLAVE_AFC+0xc)>>10;
+#endif
+ /*set tempvalue to iram to ap*/
+ tsc_write_reg(TSCTRL_TEMPADC1,temp1);
+ tsc_write_reg(TSCTRL_TEMPADC2,temp2);
+ tsc_write_reg(TSCTRL_TEMPADCRF, g_ts_data_info[PROBE_ADCRF][TS_MEMBER_TEMP]);
+
+ tsc_unlock();
+
+ if(g_tsc_print_log_debug)
+ tsc_print_log("adc1/adc2 value adc1Value=%d,adc2Value=%d,temp1 = %d.temp2 = %d.temprf = %d.\n",adc1Value,adc2Value,temp1,temp2, g_ts_data_info[PROBE_ADCRF][TS_MEMBER_TEMP]);
+
+ up(&s_tsc_adc_semaphore);
+ msleep(TS_POLLING_TIME);
+
+ }
+
+}
+
+static void tsc_temp_detect_thread_ctread(unsigned long een)
+{
+
+ int err;
+
+ err = (int)kthread_run(tsc_temp_detect, NULL, "tsc_temp_detect");
+ if (IS_ERR((const void *)err)) {
+ printk(KERN_ERR"[zx tsctrl]: Fail to create and run tsc_temp_detect!\n");
+ //return PTR_ERR((const void *)err);
+ }
+}
+
+
+/*init tsctrl Iram*/
+static void tsc_createtimer(void)
+{
+ init_timer(&timer_tsc);
+ timer_tsc.function = tsc_temp_detect_thread_ctread;
+
+ timer_tsc.expires = jiffies + msecs_to_jiffies(TS_TEMP_DETECT_BEGIN_TIME);
+ add_timer(&timer_tsc);
+
+}
+/*******************************************************************************
+* Function: tsc_refInit
+* Description:
+* Parameters:
+* Input: N/A
+*
+* Output: N/A
+* Returns: N/A
+
+* Others: //not use
+********************************************************************************/
+s32 tsc_refInit(void)
+{
+ if(ts_adc_flag ==1)
+ {
+ g_adc1_flag=1; //¨¨?adc1¨®?¨®¨²¨¬?2apa2¨¤???¨¨¡ê??¨°g_adc1_flag=1
+ g_adc2_flag=0; //¨¨?adc2¨®?¨®¨²¨¬?2apa2¨¤???¨¨¡ê??¨°g_adc2_flag=1
+ g_adc3_flag=0;
+ }
+ else if(ts_adc_flag ==2)
+ {
+ g_adc1_flag=0; //¨¨?adc1¨®?¨®¨²¨¬?2apa2¨¤???¨¨¡ê??¨°g_adc1_flag=1
+ g_adc2_flag=1; //¨¨?adc2¨®?¨®¨²¨¬?2apa2¨¤???¨¨¡ê??¨°g_adc2_flag=1
+ g_adc3_flag=0;
+ }
+ else
+ {
+ g_adc1_flag=0;
+ g_adc2_flag=0;
+ g_adc3_flag=1;
+ }
+
+ tsc_print_log("g_adc1_flag:%d, g_adc2_flag:%d, g_adc3_flag:%d.\n",g_adc1_flag, g_adc2_flag, g_adc3_flag);
+
+ return 0;
+}
+
+
+/*init tsctrl Iram*/
+static void tsc_init_iram(void)
+{
+ /*¡À¡ê?¡è3?¨º?¨º¡À?¨´¨®D¨º??t??¡ä|¨®¨²?¡ä???¡¥¡Á¡ä¨¬?*/
+ memset((void *)(IRAM_BASE_ADDR_TPC), 0x0, IRAM__LEN_TPC); /*??¨¢?*/
+}
+
+
+ /* /sys/zx_tsc/tsc_value */
+ /* /sys/zx_tsc/tsc_percent */
+ /* /sys/zx_tsc/tsc_ptscCtrlEn */
+ static int zx_tsc_init(void)
+ {
+ int ret;
+ unsigned int power_typ=0;
+
+ printk(KERN_INFO "[TSC] create sysfs interface\n");
+
+ zx_tsc_kobj = kobject_create_and_add("zx_tsc", NULL);
+ if (!zx_tsc_kobj){
+ return -ENOMEM;
+ }
+
+ ret = sysfs_create_group(zx_tsc_kobj, &zx_tsc_attr_group);
+ if (ret){
+ printk(KERN_WARNING "[TSC] sysfs_create_group ret %d\n", ret);
+ return ret;
+ }
+
+ /*init iram for tsc*/
+ tsc_init_iram();
+
+ g_test_addr0=DRV_PUB_NV_ADDR;
+ g_test_addr1=DRV_SYS_NV_ITEM_ADDR(tsc_config);
+ //ret = nand_NvRead(DRV_SYS_NV_ITEM_ADDR(tsc_config),DRV_SYS_NV_ITEM_SIZE(tsc_config),(u8*)&TsNvData);
+ ret = CPPS_FUNC(cpps_callbacks, zOss_NvItemRead)(DRV_SYS_NV_ITEM_ADDR(tsc_config), (u8*)&TsNvData, DRV_SYS_NV_ITEM_SIZE(tsc_config));
+
+ if(ret != 0){
+ printk(KERN_ERR"[zx tsc]: zOss_NvItemRead failed [err=%d]!\n", ret);
+ return -1;
+ }
+ zDrvTsc_SetTscEn(0xB2);//need
+ tsc_refInit();
+
+
+ if(TsNvData.DetectEn != 0xB2){
+ zDrvTsc_SetTscEn(0);//need
+ printk("TsNvData.DetectEn != 0xB2.\n");
+ return 0; /*if dont enable tsc,just return success.*/
+ }
+
+ mutex_init(&tsc_mutexId);
+ sema_init(&s_tsc_adc_semaphore, 0);
+
+
+ tsc_createtimer();
+
+ printk("adc1 throshold:[throd0,throd1]=[%d,%d],[throd2,throd3]=[%d,%d],[throd4,throd5]=[%d,%d],[throd6,throd7]=[%d,%d],[throd8,throd9]=[%d,%d].\n", \
+ TsNvData.Threshods[PROBE_ADC1].THROSHOLD_0,TsNvData.Threshods[PROBE_ADC1].THROSHOLD_1,TsNvData.Threshods[PROBE_ADC1].THROSHOLD_2,\
+ TsNvData.Threshods[PROBE_ADC1].THROSHOLD_3,TsNvData.Threshods[PROBE_ADC1].THROSHOLD_4,TsNvData.Threshods[PROBE_ADC1].THROSHOLD_5,\
+ TsNvData.Threshods[PROBE_ADC1].THROSHOLD_6,TsNvData.Threshods[PROBE_ADC1].THROSHOLD_7,\
+ TsNvData.Threshods[PROBE_ADC1].THROSHOLD_8,TsNvData.Threshods[PROBE_ADC1].THROSHOLD_9);
+ printk("adc2 throshold:[throd0,throd1]=[%d,%d],[throd2,throd3]=[%d,%d],[throd4,throd5]=[%d,%d],[throd6,throd7]=[%d,%d],[throd8,throd9]=[%d,%d].\n", \
+ TsNvData.Threshods[PROBE_ADC2].THROSHOLD_0,TsNvData.Threshods[PROBE_ADC2].THROSHOLD_1,TsNvData.Threshods[PROBE_ADC2].THROSHOLD_2,\
+ TsNvData.Threshods[PROBE_ADC2].THROSHOLD_3,TsNvData.Threshods[PROBE_ADC2].THROSHOLD_4,TsNvData.Threshods[PROBE_ADC2].THROSHOLD_5,\
+ TsNvData.Threshods[PROBE_ADC2].THROSHOLD_6,TsNvData.Threshods[PROBE_ADC2].THROSHOLD_7,\
+ TsNvData.Threshods[PROBE_ADC2].THROSHOLD_8,TsNvData.Threshods[PROBE_ADC2].THROSHOLD_9);
+ printk("adcrf throshold:[throd0,throd1]=[%d,%d],[throd2,throd3]=[%d,%d],[throd4,throd5]=[%d,%d],[throd6,throd7]=[%d,%d],[throd8,throd9]=[%d,%d].\n", \
+ TsNvData.Threshods[PROBE_ADCRF].THROSHOLD_0,TsNvData.Threshods[PROBE_ADCRF].THROSHOLD_1,TsNvData.Threshods[PROBE_ADCRF].THROSHOLD_2,\
+ TsNvData.Threshods[PROBE_ADCRF].THROSHOLD_3,TsNvData.Threshods[PROBE_ADCRF].THROSHOLD_4,TsNvData.Threshods[PROBE_ADCRF].THROSHOLD_5,\
+ TsNvData.Threshods[PROBE_ADCRF].THROSHOLD_6,TsNvData.Threshods[PROBE_ADCRF].THROSHOLD_7,\
+ TsNvData.Threshods[PROBE_ADCRF].THROSHOLD_8,TsNvData.Threshods[PROBE_ADCRF].THROSHOLD_9);
+
+ //????¨º1?¨¹?¨¬2a¦Ì?2????¡¥????2?????3¨¬
+ if(TsNvData.DetectEn == 0xB2){
+ tsctrl_init();
+ power_typ = *(unsigned int *)POWERON_TYPE_BASE;
+ if(POWER_ON_AMT ==power_typ) {
+ zDrvTsc_SetTscEn(0);
+ printk("POWER_ON_AMT set tscen= 0.\n");
+ }
+ }
+
+ printk("zx_tsc_init end.\n");
+
+ return 0;
+ }
+
+ static int __init zx_softtsc_init(void)
+ {
+ cpps_init2_register(zx_tsc_init);
+ return 0;
+ }
+
+
+late_initcall_sync(zx_softtsc_init);
+
+
+
+void tsc_set_temp_data_info(Ts_TsCtrl_Probe ProbeNum)
+{
+ tsc_lock();
+ if(ProbeNum == PROBE_ADC1)
+ {
+ g_ts_data_info[PROBE_ADC1][TS_MEMBER_PROBE] = PROBE_MAX;
+ g_ts_data_info[PROBE_ADC1][TS_MEMBER_TEMP] = 0xFF;
+ }
+ else if(ProbeNum == PROBE_ADC2)
+ {
+ g_ts_data_info[PROBE_ADC2][TS_MEMBER_PROBE] = PROBE_MAX;
+ g_ts_data_info[PROBE_ADC2][TS_MEMBER_TEMP] = 0xFF;
+ }
+ else if(ProbeNum == PROBE_ADCRF)
+ {
+ g_ts_data_info[PROBE_ADCRF][TS_MEMBER_PROBE] = PROBE_MAX;
+ g_ts_data_info[PROBE_ADCRF][TS_MEMBER_TEMP] = 0xFF;
+ }
+ else
+ {
+ if(g_tsc_print_log_debug)
+ tsc_print_log("clear array empty.\n");
+ }
+ tsc_unlock();
+}
+
+/*get buffer first address*/
+void tsc_get_temp_data_info(u32 TsDataInfo[][PROBE_INFO])
+{
+ int i,j;
+ tsc_lock();
+ for(i=0;i<PROBE_NUM;i++)
+ {
+ for(j=0;j<PROBE_INFO;j++)
+ {
+ TsDataInfo[i][j] = g_ts_data_info[i][j];
+ }
+ }
+ tsc_unlock();
+}
+
+s32 zDrvTsc_SetTscEn(u8 val)
+{
+ tsc_set_detect_en(val);
+ return 0;
+
+}
+EXPORT_SYMBOL(zDrvTsc_SetTscEn);
+
+s32 zDrvTsc_GetTscEn(void)
+{
+ return tsc_get_detect_en();
+}
+EXPORT_SYMBOL(zDrvTsc_GetTscEn);
+
+#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/soc/zte/tsc/zx-tsc.h b/ap/os/linux/linux-3.4.x/drivers/soc/zte/tsc/zx-tsc.h
new file mode 100644
index 0000000..fd38957
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/soc/zte/tsc/zx-tsc.h
@@ -0,0 +1,211 @@
+/*
+ *
+ *
+ * Copyright (c) 2013, ZTE Corporation.
+ * write by SWQ
+ *
+ */
+
+#ifndef __ZX_TSC_H
+#define __ZX_TSC_H
+
+/****************************************************************************
+* Include files
+****************************************************************************/
+
+
+/****************************************************************************
+* Types
+****************************************************************************/
+ typedef enum _T_TsCtrl_Probe
+{
+ PROBE_ADC1 = 0,
+ PROBE_ADC2,
+ PROBE_ADCRF,
+ PROBE_RESEV2,
+ PROBE_RESEV3,
+ PROBE_RESEV4,
+
+ PROBE_MAX,
+} Ts_TsCtrl_Probe;
+
+ typedef enum
+ {
+ TS_TEMP_VALUE_TABLE_NUMBER = 0,
+ TS_TEMP_VOLTAGE_TABLE_NUMBER = 1,
+ TS_TEMP_TABLE_NUMBER_MAX
+ }TS_TEMP_TABLE_NUMBER;
+
+ typedef enum _T_Ts_Member
+ {
+ TS_MEMBER_PROBE = 0,
+ TS_MEMBER_TEMP,
+
+ TS_MEMBER_MAX,
+ } Ts_Member;
+
+typedef enum _T_TsCtrl_Strategy_Id
+{
+ PS_STRATEGY_RATE = 0,
+ PS_STRATEGY_ANYRESIDENT,
+ WIFI_STRATEGY,
+ CHARGER_STRATEGY,
+ AP_RATE,
+ MAX_TSCTRL_STRATEGY_ID
+} T_TsCtrl_Strategy_ModuleId;
+
+/**************************************************
+ 0--STRTEGY_STOP: ֹͣ
+ 1--STRTEGY_START: ¿ªÊ¼
+ 2--STRTEGY_HOLD: HOLD֮ǰ²ßÂÔ
+**************************************************/
+ typedef enum _T_TsCtrl_Strategy
+ {
+ STRTEGY_STOP = 0,
+ STRTEGY_START=1,
+ STRTEGY_HOLD=2,
+
+ STRTEGY_MAX,
+ } Ts_TsCtrlStrategy;
+
+/****************************************************************************
+* macro define
+****************************************************************************/
+//#define tsc_read_reg(addr) (*(volatile unsigned long*)(addr))
+//#define tsc_write_reg(addr, val) ((*(volatile unsigned long*)(addr)) = val)
+
+#define tsc_read_reg zx_read_reg
+#define tsc_write_reg zx_write_reg
+/**/
+#define STRATEGY_PHY_NUM 8
+#define BITS_FOR_PHYIRAM 1
+/*TSCTRL_PHY iram ÿһbit±íʾPHYµÄÒ»¸ö²ßÂÔÊÇ·ñÖ´ÐÐ*/
+#define BIT_LIMIT_LTE_DOWNRATE1 0
+#define BIT_LIMIT_LTE_DOWNRATE2 1
+#define BIT_LIMIT_W_DOWNRATE1 2
+#define BIT_LIMIT_W_DOWNRATE2 3
+#define BIT_LIMIT_LTE_UPTRANSIMITPOWER1 4
+#define BIT_LIMIT_LTE_UPTRANSIMITPOWER2 5
+#define BIT_LIMIT_W_UPTRANSIMITPOWER1 6
+#define BIT_LIMIT_W_UPTRANSIMITPOWER2 7
+/**/
+//#define STRATEGY_PS_NUM 2
+#define BITS_FOR_PSIRAM 4
+/*TSCTRL_PS iram ÿËÄbit±íʾPSµÄÒ»¸ö²ßÂÔÊÇ·ñÖ´ÐÐ*/
+#define BIT_PS_RATE 0
+#define BIT_PS_ANYRESIDENT 4
+#define BIT_SHUTDOWN 8
+/**/
+//#define STRATEGY_PERIP_NUM 2
+#define BITS_FOR_PEPIPIRAM 4
+/*TSCTRL_PERIP iram ÿËÄbit±íʾTSCTRL_PERIPµÄÒ»¸ö²ßÂÔÊÇ·ñÖ´ÐÐ*/
+#define BIT_WIFI 0
+#define BIT_CHHRGER 4
+#define BIT_APRATE 8
+
+/**/
+//#define STRATEGY_AP_NUM 2
+#define BITS_FOR_APPIRAM 4
+/*TSCTRL_AP iram ÿËÄbit±íʾAPµÄÒ»¸ö²ßÂÔÊÇ·ñÖ´ÐÐ*/
+
+
+/*TSCTRL_PHY iramInfo:ÿһbit±íʾPHYµÄÒ»¸ö²ßÂÔÊÇ·ñÖ´ÐÐ
+bit0:limit_ltedownrate1 1:ÏÞÖÆlteÏÂÐÐËÙÂÊ1£»0:Í£Ö¹ÏÞÖÆlteÏÂÐÐËÙÂÊ1
+bit1:limit_ltedownrate2 1:ÏÞÖÆlteÏÂÐÐËÙÂÊ2£»0:Í£Ö¹ÏÞÖÆlteÏÂÐÐËÙÂÊ2
+bit2:limit_wdownrate1 1:ÏÞÖÆwÏÂÐÐËÙÂÊ1£»0:Í£Ö¹ÏÞÖÆwÏÂÐÐËÙÂÊ1
+bit3:limit_wdownrate2 1:ÏÞÖÆwÏÂÐÐËÙÂÊ2£»0:Í£Ö¹ÏÞÖÆwÏÂÐÐËÙÂÊ2
+bit4:limit_lteuptransmitrate1 1:ÏÞÖÆlteÉÏÐз¢É书ÂÊ1£»0:Í£Ö¹ÏÞÖÆlteÉÏÐз¢É书ÂÊ1
+bit5:limit_lteuptransmitrate2 1:ÏÞÖÆlteÉÏÐз¢É书ÂÊ2£»0:Í£Ö¹ÏÞÖÆlteÉÏÐз¢É书ÂÊ2
+bit6:limit_wuptransmitrate1 1:ÏÞÖÆwÉÏÐз¢É书ÂÊ1£»0:Í£Ö¹ÏÞÖÆwÉÏÐз¢É书ÂÊ1
+bit7:limit_wuptransmitrate2 1:ÏÞÖÆwÉÏÐз¢É书ÂÊ2£»0:Í£Ö¹ÏÞÖÆwÉÏÐз¢É书ÂÊ2
+*/
+
+#define TSCTRL_PHY (IRAM_TSC_BASE+0x00)/* 1K£IRAM_BASE_ADDR_TPC++0x400--¬Â¿ØÊý¾Ý´æ·Å */
+
+/*TSCTRL_PHY+0x04--TSCTRL_PHY+0x44
+ÿ4 bit±íʾÿ¸ö̽²âµãÊÇ·ñÐèÒªÖ´ÐвßÂÔ1:Ö´ÐÐ0:²»Ö´ÐÐ;2 HOLD*/
+#define TSCTRL_PS (IRAM_TSC_BASE+0x04)
+#define TSCTRL_AP (IRAM_TSC_BASE+0x08)
+#define TSCTRL_PERIP (IRAM_TSC_BASE+0x0C)
+
+/*ÿ4 bit±íʾÿ¸ö̽²âµãÊÇ·ñÐèÒªÖ´ÐвßÂÔ1:Ö´ÐÐ0:²»Ö´ÐÐ;2 HOLD*/
+#define TSCTRL_LIMIT_LTE_DOWNRATE1 (IRAM_TSC_BASE+0x10) /*²ßÂÔA*/
+#define TSCTRL_LIMIT_LTE_DOWNRATE2 (IRAM_TSC_BASE+0x14) /*²ßÂÔB*/
+#define TSCTRL_LIMIT_W_DOWNRATE1 (IRAM_TSC_BASE+0x18) /*²ßÂÔA*/
+#define TSCTRL_LIMIT_W_DOWNRATE2 (IRAM_TSC_BASE+0x1c) /*²ßÂÔB*/
+#define TSCTRL_LIMIT_LTE_UPTRANSIMITPOWER1 (IRAM_TSC_BASE+0x20) /*²ßÂÔC*/
+#define TSCTRL_LIMIT_LTE_UPTRANSIMITPOWER2 (IRAM_TSC_BASE+0x24) /*²ßÂÔD*/
+#define TSCTRL_LIMIT_W_UPTRANSIMITPOWER1 (IRAM_TSC_BASE+0x28) /*²ßÂÔC*/
+#define TSCTRL_LIMIT_W_UPTRANSIMITPOWER2 (IRAM_TSC_BASE+0x2c) /*²ßÂÔD*/
+#define TSCTRL_PS_RATE (IRAM_TSC_BASE+0x30) /*²ßÂÔE*/
+#define TSCTRL_PS_ANYRESIDENT (IRAM_TSC_BASE+0x34) /*²ßÂÔF*/
+#define TSCTRL_SHUTDOWN (IRAM_TSC_BASE+0x38) /*²ßÂÔG*/
+#define TSCTRL_WIFI (IRAM_TSC_BASE+0x3c) /*²ßÂÔF*/
+#define TSCTRL_CHARGER (IRAM_TSC_BASE+0x40) /*²ßÂÔF*/
+#define TSCTRL_APRATE (IRAM_TSC_BASE+0x44) /*²ßÂÔF*/
+#define TSCTRL_DFS (IRAM_TSC_BASE+0x48) /*²ßÂÔDFS*/
+
+/*ÿ¸ö̽Õë¶Ô²ßÂԵĿª¹ØÐÅÏ¢ÔÚ´æ´¢²ßÂÔIRAMµÄÆðʼbitλ*/
+#define BITS_FOR_PROBES 4
+#define BIT_PROBE_ADC1 (PROBE_ADC1*BITS_FOR_PROBES)
+#define BIT_PROBE_ADC2 (PROBE_ADC2*BITS_FOR_PROBES)
+#define BIT_PROBE_ADCRF (PROBE_ADCRF*BITS_FOR_PROBES)
+#define BIT_PROBE_RESEV2 (PROBE_RESEV2*BITS_FOR_PROBES)
+#define BIT_PROBE_RESEV3 (PROBE_RESEV3*BITS_FOR_PROBES)
+#define BIT_PROBE_RESEV4 (PROBE_RESEV4*BITS_FOR_PROBES)
+
+/*Ô¤Áô²¿·Ö¿Õ¼äÓÃÀ´´æ´¢ÐèÒªR7´«µÝµ½A9µÄÐÅÏ¢,TSCTRL_PHY+0x100--TSCTRL_PHY+0x400*/
+#define TSCTRL_TEMPADC1 (IRAM_TSC_BASE+0x100) /*temp adc1*/
+#define TSCTRL_TEMPADC2 (IRAM_TSC_BASE+0x104) /*temp adc2*/
+#define TSCTRL_TEMPADCRF (IRAM_TSC_BASE+0x108) /*temp adcrf*/
+#define TSCTRL_TEMPREV2 (IRAM_TSC_BASE+0x10c) /*temp rev2*/
+#define TSCTRL_TEMPREV3 (IRAM_TSC_BASE+0x110) /*temp rev3*/
+#define TSCTRL_TEMPREV4 (IRAM_TSC_BASE+0x114) /*temp rev4*/
+#define TSCTRL_DETECT_EN (IRAM_TSC_BASE+0x118) /*TsNvData.DetectEn*/
+#define TSCTRL_TEMP_PERCENT (IRAM_TSC_BASE+0x11c) /*APrate ,g_tempPercent*/
+
+
+#define PS_CORE_SEL_REG (ZX_MATRIX_CRM_BASE+0x20)/*ps core clk sel*/
+
+/**/
+#define PROBE_NUM PROBE_MAX
+#define PROBE_INFO 2 //fixed value, probe num and temp
+//#define TS_ADC_TEMP_NUMBER 2
+//#define TS_ADC_TEMP_VOLTAGE_NUMBER 110
+#define TS_TEMP_NUMBER_SMALLEST 0
+#define TS_TEMP_NUMBER_BIGGEST 96
+
+#define TEMP_PERCENT_INTERVAL (10) //ζȰٷֱȼä¸ô
+
+// timing parameters
+#define TS_TEMP_DETECT_BEGIN_TIME (60 * 1000) //60s , the begin time to detect temp
+#define TS_STRATEGY_SET_BEGIN_TIME (61 * 1000) //60s , the begin time to start temp strategy
+#define TS_POLLING_TIME (20 * 1000) //20s
+#define TS_PROBE_OFFSET 2 /*´Óbit2¿ªÊ¼*/
+#define NV_TEMP_VALUE_NUMBERS 7
+
+
+#define TSC_DEBUG_DEF 1
+
+#if TSC_DEBUG_DEF
+#define tsc_print_log(fmt, args...) \
+{ \
+ printk(KERN_INFO "[tsc] " fmt, ##args); \
+}
+#else
+#define tsc_print_log(fmt, args...) {}
+#endif
+
+ /****************************************************************************
+* fuction extern
+****************************************************************************/
+void tsctrl_init(void);
+void tsc_get_temp_data_info(u32 TsDataInfo[][PROBE_INFO]);
+typedef void (* T_TsCtrl_CallbackFunction)( u8 en ); /*en: 1,Æô¶¯²ßÂÔ£¬0,Í£Ö¹²ßÂÔ; 2: hold ²ßÂÔ*/
+s32 zDrvTsCtrl_RegCallback(T_TsCtrl_Strategy_ModuleId module,T_TsCtrl_CallbackFunction callback);
+s32 zDrvTsc_SetTscEn(u8 val); //val:0xB2,οشò¿ª£» ÆäËûֵοعرÕ
+s32 zDrvTsc_GetTscEn(void);
+u32 zDrvTsCtrl_DfsEn(void);
+
+#endif /* __ZX_TSC_H */
+