[Feature][ZXW-88]merge P50 version
Only Configure: No
Affected branch: master
Affected module: unknown
Is it affected on both ZXIC and MTK: only ZXIC
Self-test: Yes
Doc Update: No
Change-Id: I34667719d9e0e7e29e8e4368848601cde0a48408
diff --git a/ap/os/linux/linux-3.4.x/drivers/cpko/cpko_main.c b/ap/os/linux/linux-3.4.x/drivers/cpko/cpko_main.c
index 9808d24..09782a0 100755
--- a/ap/os/linux/linux-3.4.x/drivers/cpko/cpko_main.c
+++ b/ap/os/linux/linux-3.4.x/drivers/cpko/cpko_main.c
@@ -82,31 +82,30 @@
__comm_modem_text_start;unsigned int modem_text_end;unsigned int cpko_data_start
;unsigned int cpko_bss_start;unsigned int cpko_text_offset;}cpko_section_layout;
cpko_section_layout cpko_ps_section;int raise(int signo){return
-(0x588+2748-0x1044);}extern unsigned int SysEntry(void);static int
+(0x1019+2285-0x1906);}extern unsigned int SysEntry(void);static int
ko_Main_Thread(void*data){struct sched_param param={.sched_priority=
-MAX_USER_RT_PRIO/(0x1aa2+478-0x1c7e)-(0xb4b+3818-0x1a32)};int ret=
-(0x985+3929-0x18de);sched_setscheduler(current,SCHED_FIFO,¶m);ret=SysEntry()
-;if(ret!=(0xacd+2158-0x133b))panic("Main_Thread\n");param.sched_priority=
-MAX_USER_RT_PRIO-(0x1194+2176-0x19e6);sched_setscheduler(kthreadd_task,
-SCHED_FIFO,¶m);return(0x1e56+83-0x1ea9);}int zte_modem_ko_start(void){
-kthread_run(ko_Main_Thread,NULL,
-"\x5a\x54\x45\x4d\x61\x69\x6e\x54\x68\x72\x65\x61\x64");return
-(0x1109+1789-0x1806);}static void cpko_sectioninfo_set(void){int ret;struct file
-*fp;mm_segment_t old_fs;loff_t cpko_pos=(0x16fb+814-0x1a29);struct
+MAX_USER_RT_PRIO/(0x1e05+849-0x2154)-(0x163f+244-0x1730)};int ret=
+(0x6a0+1651-0xd13);sched_setscheduler(current,SCHED_FIFO,¶m);ret=SysEntry();
+if(ret!=(0x1c59+477-0x1e36))panic("Main_Thread\n");param.sched_priority=
+MAX_USER_RT_PRIO-(0xa62+2452-0x13c8);sched_setscheduler(kthreadd_task,SCHED_FIFO
+,¶m);return(0x1874+837-0x1bb9);}int zte_modem_ko_start(void){kthread_run(
+ko_Main_Thread,NULL,"\x5a\x54\x45\x4d\x61\x69\x6e\x54\x68\x72\x65\x61\x64");
+return(0x16ed+2471-0x2094);}static void cpko_sectioninfo_set(void){int ret;
+struct file*fp;mm_segment_t old_fs;loff_t cpko_pos=(0x82b+1876-0xf7f);struct
cpps_globalModem globalVar;fp=filp_open(
"\x2f\x6c\x69\x62\x2f\x63\x70\x6b\x6f\x2f\x63\x70\x6b\x6f\x5f\x73\x65\x63\x69\x6e\x66\x6f\x2e\x62\x69\x6e"
-,(0x2fa+1778-0x9ec),(0x13ec+700-0x16a8));if(IS_ERR(fp)||fp==NULL)panic(
+,(0xe32+3591-0x1c39),(0xe13+5426-0x2345));if(IS_ERR(fp)||fp==NULL)panic(
"\x6f\x70\x65\x6e\x20\x66\x69\x6c\x65\x20\x65\x72\x72\x6f\x72" "\n");old_fs=
get_fs();set_fs(KERNEL_DS);ret=vfs_read(fp,(char*)&cpko_ps_section,sizeof(
-cpko_section_layout),&cpko_pos);if(ret<=(0xd62+2480-0x1712))panic(
+cpko_section_layout),&cpko_pos);if(ret<=(0x16f9+2110-0x1f37))panic(
"\x72\x65\x61\x64\x20\x66\x69\x6c\x65\x20\x65\x72\x72\x6f\x72" "\n");filp_close(
fp,NULL);
#ifdef CONFIG_MODEM_CODE_IS_MAPPING
fp=filp_open(
"\x2f\x6c\x69\x62\x2f\x63\x70\x6b\x6f\x2f\x63\x70\x6b\x6f\x2e\x6b\x6f",
-(0x172f+1154-0x1bb1),(0xc82+3654-0x1ac8));if(IS_ERR(fp)||fp==NULL)panic(
+(0xa57+3978-0x19e1),(0x1938+3499-0x26e3));if(IS_ERR(fp)||fp==NULL)panic(
"\x6f\x70\x65\x6e\x20\x66\x69\x6c\x65\x20\x65\x72\x72\x6f\x72" "\n");fp->f_ra.
-ra_pages=(0xb17+2019-0x12fa);
+ra_pages=(0x90+7657-0x1e79);
#endif
if(cpko_ps_section.cpko_text_start){globalVar.cpko_text_start=(unsigned long)
cpko_ps_section.cpko_text_start;globalVar.cpko_rodata_start=(unsigned long)
@@ -126,7 +125,7 @@
vfree_modem_section(globalVar.cpko_text_start,globalVar.modem_text_end);
#endif
}else panic("\x66\x69\x6c\x65\x20\x65\x72\x72\x6f\x72" "\n");}static int
-cpko_start(void){struct cpps_callbacks callback={(0x800+2644-0x1254)};callback.
+cpko_start(void){struct cpps_callbacks callback={(0xfcd+1554-0x15df)};callback.
zOss_ResetNVFactory=zOss_ResetNVFactory;callback.zOss_NvramFlush=zOss_NvramFlush
;callback.zOss_NvItemWrite=zOss_NvItemWrite;callback.zOss_NvItemWriteFactory=
zOss_NvItemWriteFactory;callback.zOss_NvItemRead=zOss_NvItemRead;callback.
@@ -196,5 +195,5 @@
psm_GetModemSleepFlagStatus=psm_GetModemSleepFlagStatus;
#endif
cpps_callbacks_register(&callback);cpko_sectioninfo_set();zte_modem_ko_start();
-return(0x990+7510-0x26e6);}static int cpko_stop(void){return(0xe70+451-0x1033);}
-module_init(cpko_start);module_exit(cpko_stop);
+return(0x99a+2509-0x1367);}static int cpko_stop(void){return(0xa38+1976-0x11f0);
+}module_init(cpko_start);module_exit(cpko_stop);
diff --git a/ap/os/linux/linux-3.4.x/drivers/mfd/zx234290-core.c b/ap/os/linux/linux-3.4.x/drivers/mfd/zx234290-core.c
old mode 100644
new mode 100755
index 7fc94b3..21fda0e
--- a/ap/os/linux/linux-3.4.x/drivers/mfd/zx234290-core.c
+++ b/ap/os/linux/linux-3.4.x/drivers/mfd/zx234290-core.c
@@ -576,7 +576,9 @@
}
/*************************/
-#ifndef AP_NO_NEED_PMU_INT_FOR_PHONE
+#if (defined AP_NO_NEED_PMU_INT_FOR_PHONE) || (defined _USE_VEHICLE_DC)
+
+#else
//init_data->irq = pmic_plat_data->irq;
//init_data->irq_base = pmic_plat_data->irq_base;
irq = gpio_to_irq(pmic_plat_data->irq_gpio_num);
diff --git a/ap/os/linux/linux-3.4.x/drivers/mmc/core/mmc.c b/ap/os/linux/linux-3.4.x/drivers/mmc/core/mmc.c
old mode 100644
new mode 100755
index 6d74683..d79088d
--- a/ap/os/linux/linux-3.4.x/drivers/mmc/core/mmc.c
+++ b/ap/os/linux/linux-3.4.x/drivers/mmc/core/mmc.c
@@ -22,6 +22,7 @@
#include "bus.h"
#include "mmc_ops.h"
#include "sd_ops.h"
+#include "pub_debug_info.h"
static const unsigned int tran_exp[] = {
10000, 100000, 1000000, 10000000,
@@ -1537,6 +1538,8 @@
host->card = NULL;
err:
mmc_detach_bus(host);
+ sc_debug_info_record(MODULE_ID_AP_MMC, "%s: error %d whilst initialising MMC card\n",
+ mmc_hostname(host),err);
pr_err("%s: error %d whilst initialising MMC card\n",
mmc_hostname(host), err);
diff --git a/ap/os/linux/linux-3.4.x/drivers/mmc/core/sdio.c b/ap/os/linux/linux-3.4.x/drivers/mmc/core/sdio.c
old mode 100644
new mode 100755
index 4bcac91..81a5f51
--- a/ap/os/linux/linux-3.4.x/drivers/mmc/core/sdio.c
+++ b/ap/os/linux/linux-3.4.x/drivers/mmc/core/sdio.c
@@ -30,6 +30,7 @@
#include "sd_ops.h"
#include "sdio_ops.h"
#include "sdio_cis.h"
+#include "pub_debug_info.h"
#ifdef CONFIG_MMC_EMBEDDED_SDIO
#include <linux/mmc/sdio_ids.h>
@@ -1267,6 +1268,9 @@
err:
mmc_printk("%s: Error resetting SDIO communications (%d)\n",
mmc_hostname(host), err); //xiu
+ sc_debug_info_record(MODULE_ID_AP_MMC, "%s: Error resetting SDIO communications (%d)\n",
+ mmc_hostname(host),err);
+
mmc_release_host(host);
return err;
}
diff --git a/ap/os/linux/linux-3.4.x/drivers/mmc/host/zx29_mmc.c b/ap/os/linux/linux-3.4.x/drivers/mmc/host/zx29_mmc.c
old mode 100644
new mode 100755
index b1a7a72..425d6a7
--- a/ap/os/linux/linux-3.4.x/drivers/mmc/host/zx29_mmc.c
+++ b/ap/os/linux/linux-3.4.x/drivers/mmc/host/zx29_mmc.c
@@ -50,6 +50,7 @@
#include <linux/gpio.h>
//#include "dw_mmc.h"
#include "zx29_mmc.h"
+#include "pub_debug_info.h"
#if (defined CONFIG_ARCH_ZX297520V2)||(defined CONFIG_ARCH_ZX297520V3)
#include <mach/gpio_cfg.h>
@@ -235,6 +236,9 @@
"ERROR dw_dto_timer_expired", host->dw_dto_timer_expired_cnt, host->completed_events, host->state);
xlog_mmc_log_opkey(host->host_id, opcode,
"ERROR dw_dto_timer_expired", mci_readl(host, STATUS), mci_readl(host,IDSTS), mci_readl(host, RINTSTS));
+ sc_debug_info_record(MODULE_ID_AP_MMC, "ERROR dw_dto_timer_expired cnt=%d,STATUS=0x%x\n",
+ host->dw_dto_timer_expired_cnt,mci_readl(host, STATUS));
+
/*
mmc_printk("ERROR %s,STATUS=0x%x,IDSTS=0x%x, RINTSTS=0x%x,MINTSTS=0x%x\n", __func__,
mci_readl(host, STATUS),mci_readl(host,IDSTS),mci_readl(host, RINTSTS),mci_readl(host, MINTSTS)); //xiu
@@ -1605,7 +1609,9 @@
if (!(cmd_status & SDMMC_CMD_START))
return 0;
}
- xlog_mmc_log(host->host_id, "INFO mci_send_cmd TIMEOUT", cmd, arg, cmd_status);
+ xlog_mmc_log(host->host_id, "INFO mci_send_cmd TIMEOUT", cmd, arg, cmd_status);
+ sc_debug_info_record(MODULE_ID_AP_MMC, "Timeout sending command (cmd %#x arg %#x status %#x)\n",
+ cmd, arg, cmd_status);
dev_err(&slot->mmc->class_dev,
"Timeout sending command (cmd %#x arg %#x status %#x)\n",
cmd, arg, cmd_status);
@@ -1713,6 +1719,7 @@
} while (time_before(jiffies, timeout));
dev_err(host->dev, "%s: SDIO device busy timeout,status is 0x%08x\n",__func__,status);
+ sc_debug_info_record(MODULE_ID_AP_MMC,"%s(%d): timeout,status is 0x%08x\n",__func__,host->host_id,status);
return -1;
@@ -2277,14 +2284,20 @@
}
}
- if (status & SDMMC_INT_HLE)
- cmd->error = -EIO;
+ if (status & SDMMC_INT_HLE){
+ cmd->error = -EIO;
+ sc_debug_info_record(MODULE_ID_AP_MMC,"%s(%d): cmd->error=0x%x\n",__func__,host->host_id,cmd->error);
+ }
else if (status & SDMMC_INT_RTO)
cmd->error = -ETIMEDOUT;
- else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
+ else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)){
cmd->error = -EILSEQ;
- else if (status & SDMMC_INT_RESP_ERR)
- cmd->error = -EIO;
+ sc_debug_info_record(MODULE_ID_AP_MMC,"%s(%d): cmd->error=0x%x\n",__func__,host->host_id,cmd->error);
+ }
+ else if (status & SDMMC_INT_RESP_ERR){
+ cmd->error = -EIO;
+ sc_debug_info_record(MODULE_ID_AP_MMC,"%s(%d): cmd->error=0x%x\n",__func__,host->host_id,cmd->error);
+ }
else
cmd->error = 0;
@@ -2330,7 +2343,8 @@
}
temp = mci_readl(host, STATUS);
- dev_err(host->dev, "DATA ERROR, MINTSTS 0x%X, STATUS 0x%X\n", status, temp);
+ dev_err(host->dev, "DATA ERROR, MINTSTS 0x%X, STATUS 0x%X\n", status, temp);
+ sc_debug_info_record(MODULE_ID_AP_MMC,"DATA ERROR, MINTSTS 0x%X, STATUS 0x%X\n", status, temp);
xlog_mmc_status(host->host_id, data->mrq->cmd->opcode, XLOG_TF_STATUS_DATA_COMPLETE, data->error, status, temp);
if (status & SDMMC_INT_FRUN) {
xlog_mmc_log_opkey(host->host_id, data->mrq->cmd->opcode,
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/ppp/ppp_generic.c b/ap/os/linux/linux-3.4.x/drivers/net/ppp/ppp_generic.c
old mode 100644
new mode 100755
index db2c894..7888719
--- a/ap/os/linux/linux-3.4.x/drivers/net/ppp/ppp_generic.c
+++ b/ap/os/linux/linux-3.4.x/drivers/net/ppp/ppp_generic.c
@@ -2282,8 +2282,9 @@
spin_lock_bh(&pn->all_channels_lock);
list_del(&pch->list);
spin_unlock_bh(&pn->all_channels_lock);
- put_net(pch->chan_net);
- pch->chan_net = NULL;
+ //205e1e255c479f3fd77446415706463b282f94e4
+ //put_net(pch->chan_net);
+ //pch->chan_net = NULL;
pch->file.dead = 1;
wake_up_interruptible(&pch->file.rwait);
@@ -2890,6 +2891,9 @@
*/
static void ppp_destroy_channel(struct channel *pch)
{
+ put_net(pch->chan_net);
+ pch->chan_net = NULL;
+ //205e1e255c479f3fd77446415706463b282f94e4
atomic_dec(&channel_count);
if (!pch->file.dead) {
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/ppp/pppoe.c b/ap/os/linux/linux-3.4.x/drivers/net/ppp/pppoe.c
old mode 100644
new mode 100755
index 98e92cb..79090d0
--- a/ap/os/linux/linux-3.4.x/drivers/net/ppp/pppoe.c
+++ b/ap/os/linux/linux-3.4.x/drivers/net/ppp/pppoe.c
@@ -577,7 +577,9 @@
po = pppox_sk(sk);
- if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
+ //if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
+ //1acea4f6ce1b1c0941438aca75dd2e5c6b09db60
+ if (po->pppoe_dev) {
dev_put(po->pppoe_dev);
po->pppoe_dev = NULL;
}
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/zvnet/zvnet_dev.c b/ap/os/linux/linux-3.4.x/drivers/net/zvnet/zvnet_dev.c
old mode 100644
new mode 100755
index 723810d..6e22779
--- a/ap/os/linux/linux-3.4.x/drivers/net/zvnet/zvnet_dev.c
+++ b/ap/os/linux/linux-3.4.x/drivers/net/zvnet/zvnet_dev.c
@@ -4,6 +4,10 @@
#include <linux/module.h>
#include <linux/etherdevice.h>
#include <net/sock.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/addrconf.h>
#include "zvnet_dev.h"
/*******************************************************************************
@@ -413,6 +417,7 @@
}
}
#endif
+extern int pcu_CoreIsActive(int core);
static netdev_tx_t zvnet_xmit(struct sk_buff *skb, struct net_device *net)
{
#ifdef USE_ZVNET_PACKET
@@ -426,7 +431,43 @@
kfree_skb(skb);
return NET_XMIT_SUCCESS;
}
-
+ if(!(skb->now_location & (FASTNAT_SUCC | FASTBR_SUCC))){
+ struct iphdr *iph;
+ iph = ip_hdr(skb);
+ if (iph->version == 4){
+ if (ipv4_is_multicast(iph->daddr) || ipv4_is_lbcast(iph->daddr)){
+ if(pcu_CoreIsActive(2) == 0){
+ net->stats.tx_errors++;
+ net->stats.tx_dropped++;
+ zv_err("cap psm zvnet drop v4!");
+ kfree_skb(skb);
+ return NET_XMIT_SUCCESS;
+ }
+ }
+ }else if (iph->version == 6){
+ if(ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)){
+ if(pcu_CoreIsActive(2) == 0){
+ net->stats.tx_errors++;
+ net->stats.tx_dropped++;
+ zv_err("cap psm zvnet drop v6!");
+ kfree_skb(skb);
+ return NET_XMIT_SUCCESS;
+ }
+ }
+ }else{
+#if 0
+ if(is_multicast_ether_addr(skb_mac_header(skb))){
+ if(pcu_CoreIsActive(2) == 0){
+ net->stats.tx_errors++;
+ net->stats.tx_dropped++;
+ zv_err("cap psm zvnet drop arp?");
+ kfree_skb(skb);
+ return NET_XMIT_SUCCESS;
+ }
+ }
+#endif
+ }
+ }
if(unlikely(skb->capHead || skb->next || skb->fclone || skb->cloned
|| (skb_shinfo(skb)->nr_frags) || (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
|| (skb_has_frag_list(skb)))){
diff --git a/ap/os/linux/linux-3.4.x/drivers/rtc/zx234290-rtc.c b/ap/os/linux/linux-3.4.x/drivers/rtc/zx234290-rtc.c
old mode 100644
new mode 100755
index ddfa780..54428db
--- a/ap/os/linux/linux-3.4.x/drivers/rtc/zx234290-rtc.c
+++ b/ap/os/linux/linux-3.4.x/drivers/rtc/zx234290-rtc.c
@@ -1111,6 +1111,9 @@
rtc->max_user_freq = 64; //32768;//32k clock
zx234290_rtc_irqno = zx234290_rtc.zx234290->chip_irq;
platform_set_drvdata(pdev, rtc);
+#ifdef _USE_VEHICLE_DC
+ printk("rtc get time only probe ok!\n");
+#else
ret = zx234290_rtc_request_irq(pdev, rtc);
if (ret)
{
@@ -1120,6 +1123,8 @@
zx234290_rtc_write_register(ZX234290_REG_ADDR_RTC_CTRL1, 0, 0xff);
zx234290_rtc_setuie(&pdev->dev, 0);
zx234290_rtc_enable(&pdev->dev, 0);
+#endif
+
sema_init(&timerSemaphore,0);
//zx234290_rtc_ioctl(NULL,ZX234290_SET_TIMER,20);
return 0;
diff --git a/ap/os/linux/linux-3.4.x/drivers/rtc/zx234290-rtc.h b/ap/os/linux/linux-3.4.x/drivers/rtc/zx234290-rtc.h
old mode 100644
new mode 100755
index 4e68c8e..3233c6e
--- a/ap/os/linux/linux-3.4.x/drivers/rtc/zx234290-rtc.h
+++ b/ap/os/linux/linux-3.4.x/drivers/rtc/zx234290-rtc.h
@@ -42,7 +42,7 @@
#define ZX234290_RTC_TIME_CENTURY_WID (1)
#define ZX234290_RTC_TIME_YEAR_LSH (0)
-#define ZX234290_RTC_TIME_YEAR_WID (7)
+#define ZX234290_RTC_TIME_YEAR_WID (8)
#define ZX234290_RTC_TIME_WEEKDAY_LSH (0)
#define ZX234290_RTC_TIME_WEEKDAY_WID (3)
diff --git a/ap/os/linux/linux-3.4.x/drivers/staging/android/Kconfig b/ap/os/linux/linux-3.4.x/drivers/staging/android/Kconfig
old mode 100644
new mode 100755
index 43d17c2..e66333a
--- a/ap/os/linux/linux-3.4.x/drivers/staging/android/Kconfig
+++ b/ap/os/linux/linux-3.4.x/drivers/staging/android/Kconfig
@@ -87,6 +87,10 @@
elapsed realtime, and a non-wakeup alarm on the monotonic clock.
Also exports the alarm interface to user-space.
+config ANDROID_BINDER_RPC
+ bool "Android Binder RPC Driver"
+ default n
+
endif # if ANDROID
endmenu
diff --git a/ap/os/linux/linux-3.4.x/drivers/staging/android/Makefile b/ap/os/linux/linux-3.4.x/drivers/staging/android/Makefile
old mode 100644
new mode 100755
index 8769e32..8f9caaf
--- a/ap/os/linux/linux-3.4.x/drivers/staging/android/Makefile
+++ b/ap/os/linux/linux-3.4.x/drivers/staging/android/Makefile
@@ -11,5 +11,6 @@
obj-$(CONFIG_ANDROID_SWITCH) += switch/
obj-$(CONFIG_ANDROID_INTF_ALARM_DEV) += alarm-dev.o
obj-$(CONFIG_PERSISTENT_TRACER) += trace_persistent.o
+obj-$(CONFIG_ANDROID_BINDER_RPC) += binder_rpc.o
CFLAGS_REMOVE_trace_persistent.o = -pg
diff --git a/ap/os/linux/linux-3.4.x/drivers/staging/android/binder.c b/ap/os/linux/linux-3.4.x/drivers/staging/android/binder.c
old mode 100644
new mode 100755
index c831dcc..aab6c15
--- a/ap/os/linux/linux-3.4.x/drivers/staging/android/binder.c
+++ b/ap/os/linux/linux-3.4.x/drivers/staging/android/binder.c
@@ -36,6 +36,13 @@
#include "binder.h"
#include "binder_trace.h"
+#ifdef CONFIG_ANDROID_BINDER_RPC
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <asm-generic/mman-common.h>
+
+#include "binder_rpc.h"
+#endif
static DEFINE_MUTEX(binder_main_lock);
static DEFINE_MUTEX(binder_deferred_lock);
@@ -51,7 +58,38 @@
static uid_t binder_context_mgr_uid = -1;
static int binder_last_id;
static struct workqueue_struct *binder_deferred_workqueue;
+#ifdef CONFIG_ANDROID_BINDER_RPC
+/***********************************************************************
+ BINDER RPC DEFINE START
+***********************************************************************/
+#define BINDER_RPC_IGNORE_PRIV (0xFFFFFFFF)
+struct binder_transaction;
+struct binder_proc;
+struct binder_thread;
+struct binder_transaction_entry {
+ struct list_head list;
+ struct binder_transaction *t;
+};
+struct binder_rpc_operations_struct binder_rpc_ops;
+
+struct binder_proc *rpc_own_proc = NULL;
+struct binder_proc *rpc_proc = NULL;
+struct binder_thread *rpc_thread = NULL;
+
+static int binder_init_rpc(struct binder_proc *own_proc);
+static int binder_deinit_rpc(void);
+static struct binder_proc *binder_get_rpcproc(void);
+static struct binder_thread *binder_get_rpcthread(void);
+static void binder_rpc_dequeue_todo(struct binder_transaction *t,
+ struct binder_thread *target_thread);
+static void binder_rpc_enqueue_todo(struct binder_transaction *t,
+ struct binder_thread *target_thread);
+
+/***********************************************************************
+ BINDER RPC DEFINE END
+***********************************************************************/
+#endif
#define BINDER_DEBUG_ENTRY(name) \
static int binder_##name##_open(struct inode *inode, struct file *file) \
{ \
@@ -177,6 +215,8 @@
int to_node;
int data_size;
int offsets_size;
+ int return_error_line;
+ uint32_t return_error;
};
struct binder_transaction_log {
int next;
@@ -358,6 +398,10 @@
long priority;
long saved_priority;
uid_t sender_euid;
+#ifdef CONFIG_ANDROID_BINDER_RPC
+ /*¹ØÁªremote tr,ÓÃÓÚÏìӦʱ²éÕÒremote target*/
+ binder_uintptr_t remote_priv;
+#endif
};
static void
@@ -1022,7 +1066,10 @@
node->local_strong_refs++;
if (!node->has_strong_ref && target_list) {
list_del_init(&node->work.entry);
- list_add_tail(&node->work.entry, target_list);
+#ifdef CONFIG_ANDROID_BINDER_RPC
+ if(target_list != (&(binder_get_rpcthread()->todo)))
+#endif
+ list_add_tail(&node->work.entry, target_list);
}
} else {
if (!internal)
@@ -1033,7 +1080,10 @@
"for %d\n", node->debug_id);
return -EINVAL;
}
- list_add_tail(&node->work.entry, target_list);
+#ifdef CONFIG_ANDROID_BINDER_RPC
+ if(target_list != (&(binder_get_rpcthread()->todo)))
+#endif
+ list_add_tail(&node->work.entry, target_list);
}
}
return 0;
@@ -1253,7 +1303,11 @@
static void binder_pop_transaction(struct binder_thread *target_thread,
struct binder_transaction *t)
{
- if (target_thread) {
+ if (target_thread
+#ifdef CONFIG_ANDROID_BINDER_RPC
+ && target_thread != binder_get_rpcthread()
+#endif
+ ) {
BUG_ON(target_thread->transaction_stack != t);
BUG_ON(target_thread->transaction_stack->from != target_thread);
target_thread->transaction_stack =
@@ -1290,7 +1344,21 @@
binder_pop_transaction(target_thread, t);
target_thread->return_error = error_code;
+#ifdef CONFIG_ANDROID_BINDER_RPC
+ if(target_thread == binder_get_rpcthread()) {
+ struct binder_rpc_operations_info ops_info;
+
+ ops_info.tr = NULL;
+ ops_info.cmd = target_thread->return_error;
+ ops_info.priv = t->remote_priv;
+
+ /*send error to remote thread*/
+ binder_rpc_ops.binder_return_error(&ops_info, 1);
+ }
+#else
wake_up_interruptible(&target_thread->wait);
+#endif
+
} else {
printk(KERN_ERR "binder: reply failed, target "
"thread, %d:%d, has error code %d "
@@ -1411,7 +1479,11 @@
wait_queue_head_t *target_wait;
struct binder_transaction *in_reply_to = NULL;
struct binder_transaction_log_entry *e;
- uint32_t return_error;
+ uint32_t return_error;
+ uint32_t return_error_line;
+#ifdef CONFIG_ANDROID_BINDER_RPC
+ u32 fp_handle = 0;
+#endif
e = binder_transaction_log_add(&binder_transaction_log);
e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
@@ -1428,6 +1500,7 @@
"with no transaction stack\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
goto err_empty_call_stack;
}
binder_set_nice(in_reply_to->saved_priority);
@@ -1441,6 +1514,7 @@
in_reply_to->to_thread ?
in_reply_to->to_thread->pid : 0);
return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
in_reply_to = NULL;
goto err_bad_call_stack;
}
@@ -1448,9 +1522,14 @@
target_thread = in_reply_to->from;
if (target_thread == NULL) {
return_error = BR_DEAD_REPLY;
+ return_error_line = __LINE__;
goto err_dead_binder;
}
- if (target_thread->transaction_stack != in_reply_to) {
+ if (target_thread->transaction_stack != in_reply_to
+#ifdef CONFIG_ANDROID_BINDER_RPC
+ && target_thread != binder_get_rpcthread()
+#endif
+ ) {
binder_user_error("binder: %d:%d got reply transaction "
"with bad target transaction stack %d, "
"expected %d\n",
@@ -1459,6 +1538,7 @@
target_thread->transaction_stack->debug_id : 0,
in_reply_to->debug_id);
return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
in_reply_to = NULL;
target_thread = NULL;
goto err_dead_binder;
@@ -1473,6 +1553,7 @@
"transaction to invalid handle\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
goto err_invalid_target_handle;
}
target_node = ref->node;
@@ -1480,6 +1561,7 @@
target_node = binder_context_mgr_node;
if (target_node == NULL) {
return_error = BR_DEAD_REPLY;
+ return_error_line = __LINE__;
goto err_no_context_mgr_node;
}
}
@@ -1487,9 +1569,19 @@
target_proc = target_node->proc;
if (target_proc == NULL) {
return_error = BR_DEAD_REPLY;
+ return_error_line = __LINE__;
goto err_dead_binder;
}
- if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
+#ifdef CONFIG_ANDROID_BINDER_RPC
+ if(target_proc == binder_get_rpcproc()) {
+ target_thread = binder_get_rpcthread();
+ }
+#endif
+ if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack
+#ifdef CONFIG_ANDROID_BINDER_RPC
+ && (thread != binder_get_rpcthread())
+#endif
+ ) {
struct binder_transaction *tmp;
tmp = thread->transaction_stack;
if (tmp->to_thread != thread) {
@@ -1501,6 +1593,7 @@
tmp->to_thread ?
tmp->to_thread->pid : 0);
return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
goto err_bad_call_stack;
}
while (tmp) {
@@ -1524,6 +1617,7 @@
t = kzalloc(sizeof(*t), GFP_KERNEL);
if (t == NULL) {
return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
goto err_alloc_t_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION);
@@ -1531,6 +1625,7 @@
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (tcomplete == NULL) {
return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
goto err_alloc_tcomplete_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
@@ -1572,6 +1667,7 @@
tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
if (t->buffer == NULL) {
return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
goto err_binder_alloc_buf_failed;
}
t->buffer->allow_user_free = 0;
@@ -1588,12 +1684,14 @@
binder_user_error("binder: %d:%d got transaction with invalid "
"data ptr\n", proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
goto err_copy_data_failed;
}
if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
binder_user_error("binder: %d:%d got transaction with invalid "
"offsets ptr\n", proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
goto err_copy_data_failed;
}
if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) {
@@ -1601,6 +1699,7 @@
"invalid offsets size, %zd\n",
proc->pid, thread->pid, tr->offsets_size);
return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
goto err_bad_offset;
}
off_end = (void *)offp + tr->offsets_size;
@@ -1613,6 +1712,7 @@
"invalid offset, %zd\n",
proc->pid, thread->pid, *offp);
return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
goto err_bad_offset;
}
fp = (struct flat_binder_object *)(t->buffer->data + *offp);
@@ -1625,6 +1725,7 @@
node = binder_new_node(proc, fp->binder, fp->cookie);
if (node == NULL) {
return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
goto err_binder_new_node_failed;
}
node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
@@ -1641,6 +1742,7 @@
ref = binder_get_ref_for_node(target_proc, node);
if (ref == NULL) {
return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
goto err_binder_get_ref_for_node_failed;
}
if (fp->type == BINDER_TYPE_BINDER)
@@ -1656,6 +1758,39 @@
" node %d u%p -> ref %d desc %d\n",
node->debug_id, node->ptr, ref->debug_id,
ref->desc);
+#ifdef CONFIG_ANDROID_BINDER_RPC
+ /*1.service add (tr->target.handle == 0 && tr->code == 3)
+ *2.client add listen (target_thread == rpc_thread)
+ */
+ if((proc != binder_get_rpcproc() || thread != binder_get_rpcthread()) &&
+ NULL != binder_rpc_ops.binder_broadcast) {
+ struct binder_rpc_operations_info ops_info;
+
+ fp_handle = fp->handle;
+ /*tagret isn't rpc_proc. err?*/
+ /**/
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,"%s %d 0x%x debug_id:%d\n",__FUNCTION__,fp->handle, fp->type, node->debug_id);
+ if(NULL != node &&
+ ((tr->target.handle == 0 && tr->code == ADD_SERVICE_TRANSACTION))) {
+ ref = binder_get_ref_for_node(binder_get_rpcproc(), node);
+ if (NULL != ref) {
+ fp_handle = ref->desc;
+
+ ops_info.cmd = BC_TRANSACTION;
+ ops_info.tr = tr;
+ ops_info.priv = BINDER_RPC_IGNORE_PRIV;
+
+ if(0 != binder_rpc_ops.binder_broadcast(&ops_info, fp_handle)) {
+ binder_delete_ref(ref);
+ binder_user_error("%s binder_broadcast fail. \n",__FUNCTION__);
+ }
+ }
+ else {
+ binder_user_error("%s binder_get_ref_for_node fail,\n",__FUNCTION__);
+ }
+ }
+ }
+#endif
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
@@ -1666,6 +1801,7 @@
"handle, %ld\n", proc->pid,
thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
goto err_binder_get_ref_failed;
}
if (ref->node->proc == target_proc) {
@@ -1686,6 +1822,7 @@
new_ref = binder_get_ref_for_node(target_proc, ref->node);
if (new_ref == NULL) {
return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
goto err_binder_get_ref_for_node_failed;
}
fp->handle = new_ref->desc;
@@ -1708,12 +1845,14 @@
binder_user_error("binder: %d:%d got reply with fd, %ld, but target does not allow fds\n",
proc->pid, thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
goto err_fd_not_allowed;
}
} else if (!target_node->accept_fds) {
binder_user_error("binder: %d:%d got transaction with fd, %ld, but target does not allow fds\n",
proc->pid, thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
goto err_fd_not_allowed;
}
@@ -1722,12 +1861,14 @@
binder_user_error("binder: %d:%d got transaction with invalid fd, %ld\n",
proc->pid, thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
goto err_fget_failed;
}
target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
if (target_fd < 0) {
fput(file);
return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
goto err_get_unused_fd_failed;
}
task_fd_install(target_proc, target_fd, file);
@@ -1743,17 +1884,79 @@
"n with invalid object type, %lx\n",
proc->pid, thread->pid, fp->type);
return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
goto err_bad_object_type;
}
}
if (reply) {
BUG_ON(t->buffer->async_transaction != 0);
- binder_pop_transaction(target_thread, in_reply_to);
+#ifdef CONFIG_ANDROID_BINDER_RPC
+ if(target_thread == binder_get_rpcthread() && target_proc == binder_get_rpcproc()) {
+ binder_rpc_dequeue_todo(in_reply_to, target_thread);
+ if(NULL != binder_rpc_ops.binder_reply) {
+ struct binder_rpc_operations_info ops_info;
+ struct binder_buffer *tmp_buffer = t->buffer;
+
+ ops_info.cmd = BC_REPLY;
+ ops_info.tr = tr;
+ ops_info.priv = in_reply_to->remote_priv;
+
+ /*free t*/
+ if (tmp_buffer->transaction) {
+ binder_debug(BINDER_DEBUG_TRANSACTION,"%s %d 0x%x\n",__FUNCTION__, __LINE__, tmp_buffer);
+ tmp_buffer->transaction->buffer = NULL;
+ tmp_buffer->transaction = NULL;
+ }
+ if (tmp_buffer->async_transaction && tmp_buffer->target_node) {
+ BUG_ON(!tmp_buffer->target_node->has_async_transaction);
+ if (list_empty(&tmp_buffer->target_node->async_todo))
+ tmp_buffer->target_node->has_async_transaction = 0;
+ else {
+ //list_move_tail(tmp_buffer->target_node->async_todo.next, &target_thread->todo);
+ binder_debug(BINDER_DEBUG_TRANSACTION,"%s %d\n",__FUNCTION__, __LINE__);
+ }
+ }
+ trace_binder_transaction_buffer_release(tmp_buffer);
+
+ //binder_transaction_buffer_release(target_proc, tmp_buffer, NULL);
+
+ //t->buffer->transaction = NULL;
+ binder_free_buf(target_proc, tmp_buffer);
+
+ binder_pop_transaction(NULL, t);
+
+ binder_rpc_ops.binder_reply(&ops_info, fp_handle);
+ }
+
+ binder_pop_transaction(NULL, in_reply_to);
+ }
+ else {
+#endif
+ binder_pop_transaction(target_thread, in_reply_to);
+#ifdef CONFIG_ANDROID_BINDER_RPC
+ }
+#endif
} else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0);
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
+#ifdef CONFIG_ANDROID_BINDER_RPC
+ if((target_proc == binder_get_rpcproc() && target_thread == binder_get_rpcthread()) &&
+ NULL != binder_rpc_ops.binder_transaction) {
+ struct binder_node *target_node = t->buffer->target_node;
+ struct binder_rpc_operations_info ops_info;
+
+ ops_info.cmd = BC_TRANSACTION;
+ ops_info.tr = tr;
+ ops_info.priv = (binder_uintptr_t)t;
+
+ binder_rpc_enqueue_todo(t, target_thread);
+ if(0 != binder_rpc_ops.binder_transaction(&ops_info,fp_handle, (void *)target_node->ptr, (void *)target_node->cookie)) {
+ binder_rpc_dequeue_todo(t, target_thread);
+ }
+ }
+#endif
} else {
BUG_ON(target_node == NULL);
BUG_ON(t->buffer->async_transaction != 1);
@@ -1762,13 +1965,76 @@
target_wait = NULL;
} else
target_node->has_async_transaction = 1;
+#ifdef CONFIG_ANDROID_BINDER_RPC
+ if((target_proc == binder_get_rpcproc() && target_thread == binder_get_rpcthread()) &&
+ NULL != binder_rpc_ops.binder_transaction) {
+ struct binder_node *target_node = t->buffer->target_node;
+ struct binder_rpc_operations_info ops_info;
+ struct binder_buffer *tmp_buffer = t->buffer;
+
+ ops_info.cmd = BC_TRANSACTION;
+ ops_info.tr = tr;
+ ops_info.priv = (binder_uintptr_t)t;
+
+ binder_rpc_enqueue_todo(t, target_thread);
+ if(0 != binder_rpc_ops.binder_transaction(&ops_info,fp_handle, (void *)target_node->ptr, (void *)target_node->cookie)) {
+ binder_rpc_dequeue_todo(t, target_thread);
+ }
+
+ /*free t*/
+ if (tmp_buffer->transaction) {
+ binder_debug(BINDER_DEBUG_TRANSACTION,"%s %d 0x%x\n",__FUNCTION__, __LINE__, tmp_buffer);
+ tmp_buffer->transaction->buffer = NULL;
+ tmp_buffer->transaction = NULL;
+ }
+ if (tmp_buffer->async_transaction && tmp_buffer->target_node) {
+ BUG_ON(!tmp_buffer->target_node->has_async_transaction);
+ if (list_empty(&tmp_buffer->target_node->async_todo))
+ tmp_buffer->target_node->has_async_transaction = 0;
+ else {
+ //list_move_tail(tmp_buffer->target_node->async_todo.next, &target_thread->todo);
+ binder_debug(BINDER_DEBUG_TRANSACTION,"%s %d\n",__FUNCTION__, __LINE__);
+ }
+ }
+ trace_binder_transaction_buffer_release(tmp_buffer);
+
+ binder_free_buf(target_proc, tmp_buffer);
+
+ binder_pop_transaction(NULL, t);
+ }
+#endif
}
- t->work.type = BINDER_WORK_TRANSACTION;
- list_add_tail(&t->work.entry, target_list);
- tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
- list_add_tail(&tcomplete->entry, &thread->todo);
+#ifdef CONFIG_ANDROID_BINDER_RPC
+ if(target_thread != binder_get_rpcthread()) {
+#endif
+ t->work.type = BINDER_WORK_TRANSACTION;
+ list_add_tail(&t->work.entry, target_list);
+#ifdef CONFIG_ANDROID_BINDER_RPC
+ }
+#endif
+
+#ifdef CONFIG_ANDROID_BINDER_RPC
+ if(thread != binder_get_rpcthread()) {
+#endif
+ tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
+ list_add_tail(&tcomplete->entry, &thread->todo);
+
+#ifdef CONFIG_ANDROID_BINDER_RPC
+ }
+ else {
+ kfree(tcomplete);
+ }
+#endif
+
+#ifdef CONFIG_ANDROID_BINDER_RPC
+ if(target_thread != binder_get_rpcthread()) {
+#endif
if (target_wait)
wake_up_interruptible(target_wait);
+
+#ifdef CONFIG_ANDROID_BINDER_RPC
+ }
+#endif
return;
err_get_unused_fd_failed:
@@ -1802,12 +2068,16 @@
tr->data_size, tr->offsets_size);
{
- struct binder_transaction_log_entry *fe;
+ struct binder_transaction_log_entry *fe;
+
+ e->return_error = return_error;
+ e->return_error_line = return_error_line;
fe = binder_transaction_log_add(&binder_transaction_log_failed);
*fe = *e;
}
BUG_ON(thread->return_error != BR_OK);
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,"return_error_line %d \n",return_error_line);
if (in_reply_to) {
thread->return_error = BR_TRANSACTION_COMPLETE;
binder_send_failed_reply(in_reply_to, return_error);
@@ -2822,6 +3092,11 @@
if (thread)
thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
binder_unlock(__func__);
+#ifdef CONFIG_ANDROID_BINDER_RPC
+ if(BINDER_SET_CONTEXT_MGR == cmd && NULL == binder_get_rpcproc()) {
+ binder_init_rpc(proc);
+ }
+#endif
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret && ret != -ERESTARTSYS)
printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
@@ -3023,6 +3298,11 @@
struct binder_proc *proc = filp->private_data;
debugfs_remove(proc->debugfs_entry);
binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
+#ifdef CONFIG_ANDROID_BINDER_RPC
+ if(proc == rpc_own_proc) {
+ binder_deinit_rpc();
+ }
+#endif
return 0;
}
@@ -3061,6 +3341,12 @@
rb_erase(&node->rb_node, &proc->nodes);
list_del_init(&node->work.entry);
binder_release_work(&node->async_todo);
+
+#ifdef CONFIG_ANDROID_BINDER_RPC
+ if(binder_rpc_ops.binder_release) {
+ binder_rpc_ops.binder_release((void *)proc, (void *)node);
+ }
+#endif
if (hlist_empty(&node->refs)) {
kfree(node);
binder_stats_deleted(BINDER_STAT_NODE);
@@ -3206,6 +3492,854 @@
}
mutex_unlock(&binder_deferred_lock);
}
+#ifdef CONFIG_ANDROID_BINDER_RPC
+/***********************************************************************
+ BINDER RPC API START
+***********************************************************************/
+static int binder_init_rpc(struct binder_proc *own_proc)
+{
+ struct binder_proc *proc;
+ unsigned long unused;
+ unsigned long mmap_base;
+ unsigned long mmap_size;
+
+ struct file *pfile = filp_open("/dev/binder",O_RDWR,0);
+
+ if(NULL == pfile) {
+ binder_user_error("%s: filp_open fail.\n", __func__);
+ return -1;
+ }
+ proc = pfile->private_data;
+
+ mmap_size = 16*PAGE_SIZE;
+ mmap_base = do_mmap(pfile, 0, mmap_size,
+ PROT_READ,
+ MAP_PRIVATE | MAP_NORESERVE, 0);
+
+ rpc_proc = proc;
+ rpc_own_proc = own_proc;
+ return binder_rpc_init((void *)proc, pfile, &binder_rpc_ops);
+}
+
+static int binder_deinit_rpc(void)
+{
+ struct binder_proc *proc = binder_get_rpcproc();
+ struct vm_area_struct *vma = proc->vma;
+
+ binder_debug(BINDER_DEBUG_OPEN_CLOSE,"%s rpc proc %d release.\n",__FUNCTION__, proc->pid);
+ binder_rpc_deinit(vma);
+
+ rpc_proc = NULL;
+ rpc_own_proc = NULL;
+ return 0;
+}
+
+static struct binder_proc *binder_get_rpcproc(void)
+{
+ return rpc_proc;
+}
+static struct binder_thread *binder_get_rpcthread(void)
+{
+ return rpc_thread;
+}
+
+/*ÓÃÓÚÌí¼ÓÉÏÐÐbinder_transactionµ½target_thread->todoÁ´±í*/
+static void binder_rpc_enqueue_todo(struct binder_transaction *t,
+ struct binder_thread *target_thread)
+{
+ struct binder_transaction_entry *entry;
+ struct list_head *target_list = &target_thread->todo;
+
+ BUG_ON(target_list == NULL || t == NULL);
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (entry == NULL) {
+ return;
+ }
+ entry->t = t;
+
+ //binder_inner_proc_lock(target_thread->proc);
+ list_add_tail(&entry->list, target_list);
+ //binder_inner_proc_unlock(target_thread->proc);
+ binder_debug(BINDER_DEBUG_TRANSACTION, "%s add %x to thread(%d) todo", __FUNCTION__, t, target_thread->pid);
+}
+/*ÓÃÓÚɾ³ýÉÏÐÐbinder_transaction½Úµã*/
+static void binder_rpc_dequeue_todo(struct binder_transaction *t,
+ struct binder_thread *target_thread)
+{
+ struct binder_transaction_entry *entry,*tmp_entry;
+ struct binder_transaction *tmp_t;
+ struct list_head *target_list = &target_thread->todo;
+
+ //binder_inner_proc_lock(target_thread->proc);
+ list_for_each_entry_safe(entry,tmp_entry,target_list,list) {
+ tmp_t = entry->t;
+ if(t == tmp_t) {
+ list_del(&entry->list);
+ kfree(entry);
+ binder_debug(BINDER_DEBUG_TRANSACTION, "%s del %x from thread(%d) todo", __FUNCTION__, t, target_thread->pid);
+ return;
+ }
+ }
+ //binder_inner_proc_unlock(target_thread->proc);
+}
+
+/*ÓÃÓÚ²éÕÒÆ¥ÅäÏÂÐÐBC_REPLYÃüÁîʱbinder_transaction*/
+static struct binder_transaction *binder_rpc_find_todo_by_priv(binder_uintptr_t priv,
+ struct binder_thread *target_thread)
+{
+ struct binder_transaction_entry *entry,*tmp_entry;
+ struct binder_transaction *tmp_t;
+ struct list_head *target_list = &target_thread->todo;
+
+ //binder_inner_proc_lock(target_thread->proc);
+ list_for_each_entry_safe(entry,tmp_entry,target_list,list) {
+ tmp_t = entry->t;
+ if(priv == (binder_uintptr_t)tmp_t) {
+ list_del(&entry->list);
+ kfree(entry);
+ //binder_inner_proc_unlock(target_thread->proc);
+ binder_debug(BINDER_DEBUG_TRANSACTION, "%s del %x from thread(%d) todo", __FUNCTION__, priv, target_thread->pid);
+ return tmp_t;
+ }
+ }
+
+ //binder_inner_proc_unlock(target_thread->proc);
+ return NULL;
+}
+
+
+void *binder_rpc_get_thread(void *brpc_proc)
+{
+ rpc_thread = binder_get_thread(brpc_proc);
+
+ return (void *)rpc_thread;
+}
+int binder_rpc_thread_write(void *brpc_proc,
+ binder_uintptr_t brpc_buffer, size_t size,
+ binder_size_t *consumed)
+{
+ struct binder_proc *proc = brpc_proc;
+ struct binder_thread *thread = binder_get_thread(proc);
+ int ret = 0;
+
+ binder_lock(__func__);
+ ret= binder_thread_write(proc, thread, brpc_buffer, size, consumed);
+ binder_unlock(__func__);
+
+ return ret;
+}
+/*ÏÂÐÐtransanction´¦Àíº¯Êý£¬ÉÏÐÐtransanction´¦ÀíÓÉrpc_transactionº¯Êý´¦Àí*/
+void binder_rpc_transaction(void *prpc_proc,
+ void *prpc_thread,
+ struct binder_transaction_data *tr, int reply,
+ binder_size_t extra_buffers_size,
+ binder_uintptr_t priv)
+{
+ struct binder_proc *proc = (struct binder_proc *)prpc_proc;
+ struct binder_thread *thread = (struct binder_thread *)prpc_thread;
+ struct binder_transaction *t;
+ struct binder_work *tcomplete;
+ size_t *offp, *off_end;
+ struct binder_proc *target_proc;
+ struct binder_thread *target_thread = NULL;
+ struct binder_node *target_node = NULL;
+ struct list_head *target_list;
+ wait_queue_head_t *target_wait;
+ struct binder_transaction *in_reply_to = NULL;
+ struct binder_transaction_log_entry *e;
+ uint32_t return_error;
+ uint32_t return_error_line;
+
+ binder_lock(__func__);
+
+ /*ignore binder_rpc_ops.binder_broadcast reply*/
+ if(reply && (priv == BINDER_RPC_IGNORE_PRIV)) {
+ binder_unlock(__func__);
+ return;
+ }
+
+ e = binder_transaction_log_add(&binder_transaction_log);
+ e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
+ e->from_proc = proc->pid;
+ e->from_thread = thread->pid;
+ e->target_handle = tr->target.handle;
+ e->data_size = tr->data_size;
+ e->offsets_size = tr->offsets_size;
+
+ if (reply) {
+ if((thread == binder_get_rpcthread() && proc == binder_get_rpcproc())) {
+ in_reply_to = binder_rpc_find_todo_by_priv(priv, thread);
+ }
+ else {
+ in_reply_to = thread->transaction_stack;
+ }
+
+ if (in_reply_to == NULL) {
+ binder_user_error("binder: %d:%d got reply transaction "
+ "with no transaction stack\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
+ goto err_empty_call_stack;
+ }
+ binder_set_nice(in_reply_to->saved_priority);
+ if (in_reply_to->to_thread != thread) {
+ binder_user_error("binder: %d:%d got reply transaction "
+ "with bad transaction stack,"
+ " transaction %d has target %d:%d\n",
+ proc->pid, thread->pid, in_reply_to->debug_id,
+ in_reply_to->to_proc ?
+ in_reply_to->to_proc->pid : 0,
+ in_reply_to->to_thread ?
+ in_reply_to->to_thread->pid : 0);
+ return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
+ in_reply_to = NULL;
+ goto err_bad_call_stack;
+ }
+ thread->transaction_stack = in_reply_to->to_parent;
+ target_thread = in_reply_to->from;
+ if (target_thread == NULL) {
+ return_error = BR_DEAD_REPLY;
+ return_error_line = __LINE__;
+ goto err_dead_binder;
+ }
+ if (target_thread->transaction_stack != in_reply_to) {
+ binder_user_error("binder: %d:%d got reply transaction "
+ "with bad target transaction stack %d, "
+ "expected %d\n",
+ proc->pid, thread->pid,
+ target_thread->transaction_stack ?
+ target_thread->transaction_stack->debug_id : 0,
+ in_reply_to->debug_id);
+ return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
+ in_reply_to = NULL;
+ target_thread = NULL;
+ goto err_dead_binder;
+ }
+ target_proc = target_thread->proc;
+ } else {
+ if (tr->target.handle) {
+ struct binder_ref *ref;
+ ref = binder_get_ref(proc, tr->target.handle);
+ if (ref == NULL) {
+ binder_user_error("binder: %d:%d got "
+ "transaction to invalid handle\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
+ //panic("binder_get_ref fail.\n");
+ goto err_invalid_target_handle;
+ }
+ target_node = ref->node;
+ } else {
+ target_node = binder_context_mgr_node;
+ if (target_node == NULL) {
+ return_error = BR_DEAD_REPLY;
+ return_error_line = __LINE__;
+ goto err_no_context_mgr_node;
+ }
+ }
+ e->to_node = target_node->debug_id;
+ target_proc = target_node->proc;
+ if (target_proc == NULL) {
+ return_error = BR_DEAD_REPLY;
+ return_error_line = __LINE__;
+ goto err_dead_binder;
+ }
+ if(target_proc == binder_get_rpcproc()) {
+ target_thread = binder_get_rpcthread();
+ }
+ if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack && (thread != binder_get_rpcthread())) {
+ struct binder_transaction *tmp;
+ tmp = thread->transaction_stack;
+ if (tmp->to_thread != thread) {
+ binder_user_error("binder: %d:%d got new "
+ "transaction with bad transaction stack"
+ ", transaction %d has target %d:%d\n",
+ proc->pid, thread->pid, tmp->debug_id,
+ tmp->to_proc ? tmp->to_proc->pid : 0,
+ tmp->to_thread ?
+ tmp->to_thread->pid : 0);
+ return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
+ goto err_bad_call_stack;
+ }
+ while (tmp) {
+ if (tmp->from && tmp->from->proc == target_proc)
+ target_thread = tmp->from;
+ tmp = tmp->from_parent;
+ }
+ }
+ }
+ if (target_thread) {
+ e->to_thread = target_thread->pid;
+ target_list = &target_thread->todo;
+ target_wait = &target_thread->wait;
+ } else {
+ target_list = &target_proc->todo;
+ target_wait = &target_proc->wait;
+ }
+ e->to_proc = target_proc->pid;
+
+ /* TODO: reuse incoming transaction for reply */
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (t == NULL) {
+ return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
+ goto err_alloc_t_failed;
+ }
+ binder_stats_created(BINDER_STAT_TRANSACTION);
+
+ tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
+ if (tcomplete == NULL) {
+ return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
+ goto err_alloc_tcomplete_failed;
+ }
+ binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
+
+ t->debug_id = ++binder_last_id;
+ e->debug_id = t->debug_id;
+
+ if (reply)
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ "binder: %d:%d BC_REPLY %d -> %d:%d, "
+ "data %p-%p size %zd-%zd\n",
+ proc->pid, thread->pid, t->debug_id,
+ target_proc->pid, target_thread->pid,
+ tr->data.ptr.buffer, tr->data.ptr.offsets,
+ tr->data_size, tr->offsets_size);
+ else
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ "binder: %d:%d BC_TRANSACTION %d -> "
+ "%d - node %d, data %p-%p size %zd-%zd\n",
+ proc->pid, thread->pid, t->debug_id,
+ target_proc->pid, target_node->debug_id,
+ tr->data.ptr.buffer, tr->data.ptr.offsets,
+ tr->data_size, tr->offsets_size);
+
+ if (!reply && !(tr->flags & TF_ONE_WAY))
+ t->from = thread;
+ else
+ t->from = NULL;
+ t->sender_euid = proc->tsk->cred->euid;
+ t->to_proc = target_proc;
+ t->to_thread = target_thread;
+ t->code = tr->code;
+ t->flags = tr->flags;
+ t->priority = task_nice(current);
+
+ trace_binder_transaction(reply, t, target_node);
+
+ t->buffer = binder_alloc_buf(target_proc, tr->data_size,
+ tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
+ if (t->buffer == NULL) {
+ return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
+ goto err_binder_alloc_buf_failed;
+ }
+ t->buffer->allow_user_free = 0;
+ t->buffer->debug_id = t->debug_id;
+ t->buffer->transaction = t;
+ t->buffer->target_node = target_node;
+ trace_binder_transaction_alloc_buf(t->buffer);
+ if (target_node)
+ binder_inc_node(target_node, 1, 0, NULL);
+
+ offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
+
+ if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
+ binder_user_error("binder: %d:%d got transaction with invalid "
+ "data ptr\n", proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
+ goto err_copy_data_failed;
+ }
+ if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
+ binder_user_error("binder: %d:%d got transaction with invalid "
+ "offsets ptr\n", proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
+ goto err_copy_data_failed;
+ }
+ if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) {
+ binder_user_error("binder: %d:%d got transaction with "
+ "invalid offsets size, %zd\n",
+ proc->pid, thread->pid, tr->offsets_size);
+ return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
+ goto err_bad_offset;
+ }
+ off_end = (void *)offp + tr->offsets_size;
+ for (; offp < off_end; offp++) {
+ struct flat_binder_object *fp;
+ if (*offp > t->buffer->data_size - sizeof(*fp) ||
+ t->buffer->data_size < sizeof(*fp) ||
+ !IS_ALIGNED(*offp, sizeof(void *))) {
+ binder_user_error("binder: %d:%d got transaction with "
+ "invalid offset, %zd\n",
+ proc->pid, thread->pid, *offp);
+ return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
+ goto err_bad_offset;
+ }
+ fp = (struct flat_binder_object *)(t->buffer->data + *offp);
+ switch (fp->type) {
+ case BINDER_TYPE_BINDER:
+ case BINDER_TYPE_WEAK_BINDER: {
+ struct binder_ref *ref;
+ struct binder_node *node = binder_get_node(proc, fp->binder);
+ if (node == NULL) {
+ node = binder_new_node(proc, fp->binder, fp->cookie);
+ if (node == NULL) {
+ return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
+ goto err_binder_new_node_failed;
+ }
+ node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+ node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
+ }
+ if (fp->cookie != node->cookie) {
+ binder_user_error("binder: %d:%d sending u%p "
+ "node %d, cookie mismatch %p != %p\n",
+ proc->pid, thread->pid,
+ fp->binder, node->debug_id,
+ fp->cookie, node->cookie);
+ goto err_binder_get_ref_for_node_failed;
+ }
+ ref = binder_get_ref_for_node(target_proc, node);
+ if (ref == NULL) {
+ return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
+ goto err_binder_get_ref_for_node_failed;
+ }
+ if (fp->type == BINDER_TYPE_BINDER)
+ fp->type = BINDER_TYPE_HANDLE;
+ else
+ fp->type = BINDER_TYPE_WEAK_HANDLE;
+ fp->handle = ref->desc;
+ binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
+ &thread->todo);
+
+ trace_binder_transaction_node_to_ref(t, node, ref);
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " node %d u%p -> ref %d desc %d\n",
+ node->debug_id, node->ptr, ref->debug_id,
+ ref->desc);
+ } break;
+ case BINDER_TYPE_HANDLE:
+ case BINDER_TYPE_WEAK_HANDLE: {
+ struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+ if (ref == NULL) {
+ binder_user_error("binder: %d:%d got "
+ "transaction with invalid "
+ "handle, %ld\n", proc->pid,
+ thread->pid, fp->handle);
+ return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
+ goto err_binder_get_ref_failed;
+ }
+ if (ref->node->proc == target_proc) {
+ if (fp->type == BINDER_TYPE_HANDLE)
+ fp->type = BINDER_TYPE_BINDER;
+ else
+ fp->type = BINDER_TYPE_WEAK_BINDER;
+ fp->binder = ref->node->ptr;
+ fp->cookie = ref->node->cookie;
+ binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
+ trace_binder_transaction_ref_to_node(t, ref);
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " ref %d desc %d -> node %d u%p\n",
+ ref->debug_id, ref->desc, ref->node->debug_id,
+ ref->node->ptr);
+ } else {
+ struct binder_ref *new_ref;
+ new_ref = binder_get_ref_for_node(target_proc, ref->node);
+ if (new_ref == NULL) {
+ return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
+ goto err_binder_get_ref_for_node_failed;
+ }
+ fp->handle = new_ref->desc;
+ binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
+ trace_binder_transaction_ref_to_ref(t, ref,
+ new_ref);
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " ref %d desc %d -> ref %d desc %d (node %d)\n",
+ ref->debug_id, ref->desc, new_ref->debug_id,
+ new_ref->desc, ref->node->debug_id);
+ }
+ } break;
+
+ case BINDER_TYPE_FD: {
+ int target_fd;
+ struct file *file;
+
+ if (reply) {
+ if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
+ binder_user_error("binder: %d:%d got reply with fd, %ld, but target does not allow fds\n",
+ proc->pid, thread->pid, fp->handle);
+ return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
+ goto err_fd_not_allowed;
+ }
+ } else if (!target_node->accept_fds) {
+ binder_user_error("binder: %d:%d got transaction with fd, %ld, but target does not allow fds\n",
+ proc->pid, thread->pid, fp->handle);
+ return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
+ goto err_fd_not_allowed;
+ }
+
+ file = fget(fp->handle);
+ if (file == NULL) {
+ binder_user_error("binder: %d:%d got transaction with invalid fd, %ld\n",
+ proc->pid, thread->pid, fp->handle);
+ return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
+ goto err_fget_failed;
+ }
+ target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
+ if (target_fd < 0) {
+ fput(file);
+ return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
+ goto err_get_unused_fd_failed;
+ }
+ task_fd_install(target_proc, target_fd, file);
+ trace_binder_transaction_fd(t, fp->handle, target_fd);
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " fd %ld -> %d\n", fp->handle, target_fd);
+ /* TODO: fput? */
+ fp->handle = target_fd;
+ } break;
+
+ default:
+ binder_user_error("binder: %d:%d got transactio"
+ "n with invalid object type, %lx\n",
+ proc->pid, thread->pid, fp->type);
+ return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
+ goto err_bad_object_type;
+ }
+ }
+ if (reply) {
+ BUG_ON(t->buffer->async_transaction != 0);
+
+ if(in_reply_to->to_proc == binder_get_rpcproc() && in_reply_to->to_thread == binder_get_rpcthread()) {
+ struct binder_buffer *tmp_buffer = in_reply_to->buffer;
+
+ binder_debug(BINDER_DEBUG_TRANSACTION, "%s %d \n", __FUNCTION__, __LINE__);
+
+ /*free in_reply_to*/
+ if (tmp_buffer->transaction) {
+ binder_debug(BINDER_DEBUG_TRANSACTION,"%s %d 0x%x\n",__FUNCTION__, __LINE__, tmp_buffer);
+ tmp_buffer->transaction->buffer = NULL;
+ tmp_buffer->transaction = NULL;
+ }
+ if (tmp_buffer->async_transaction && tmp_buffer->target_node) {
+ BUG_ON(!tmp_buffer->target_node->has_async_transaction);
+ if (list_empty(&tmp_buffer->target_node->async_todo))
+ tmp_buffer->target_node->has_async_transaction = 0;
+ else {
+ //list_move_tail(tmp_buffer->target_node->async_todo.next, &thread->todo);
+ binder_debug(BINDER_DEBUG_TRANSACTION,"%s %d\n",__FUNCTION__, __LINE__);
+ }
+ }
+ trace_binder_transaction_buffer_release(tmp_buffer);
+
+ //binder_transaction_buffer_release(in_reply_to->to_proc, tmp_buffer, NULL);
+
+ binder_free_buf(in_reply_to->to_proc, tmp_buffer);
+ }
+ binder_pop_transaction(target_thread, in_reply_to);
+ } else if (!(t->flags & TF_ONE_WAY)) {
+ BUG_ON(t->buffer->async_transaction != 0);
+ t->need_reply = 1;
+ t->from_parent = thread->transaction_stack;
+ thread->transaction_stack = t;
+ if(thread == binder_get_rpcthread() && proc == binder_get_rpcproc()) {
+ t->remote_priv = priv;
+ binder_rpc_enqueue_todo(t, thread);
+ }
+ } else {
+ BUG_ON(target_node == NULL);
+ BUG_ON(t->buffer->async_transaction != 1);
+ if (target_node->has_async_transaction) {
+ target_list = &target_node->async_todo;
+ target_wait = NULL;
+ } else
+ target_node->has_async_transaction = 1;
+
+ if(thread == binder_get_rpcthread() && proc == binder_get_rpcproc()) {
+ binder_debug(BINDER_DEBUG_TRANSACTION, "%s %d \n", __FUNCTION__, __LINE__);
+
+ //t->remote_priv = priv;
+ //binder_rpc_enqueue_todo(t, thread);
+ }
+ }
+ if(target_thread != binder_get_rpcthread()) {
+ t->work.type = BINDER_WORK_TRANSACTION;
+ list_add_tail(&t->work.entry, target_list);
+ }
+
+ if(thread != binder_get_rpcthread()) {
+ tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
+ list_add_tail(&tcomplete->entry, &thread->todo);
+ }
+ else {
+ kfree(tcomplete);
+ }
+
+ if(target_thread != binder_get_rpcthread()) {
+ if (target_wait)
+ wake_up_interruptible(target_wait);
+ }
+ binder_unlock(__func__);
+ return;
+
+err_get_unused_fd_failed:
+err_fget_failed:
+err_fd_not_allowed:
+err_binder_get_ref_for_node_failed:
+err_binder_get_ref_failed:
+err_binder_new_node_failed:
+err_bad_object_type:
+err_bad_offset:
+err_copy_data_failed:
+ trace_binder_transaction_failed_buffer_release(t->buffer);
+ binder_transaction_buffer_release(target_proc, t->buffer, offp);
+ t->buffer->transaction = NULL;
+ binder_free_buf(target_proc, t->buffer);
+err_binder_alloc_buf_failed:
+ kfree(tcomplete);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
+err_alloc_tcomplete_failed:
+ kfree(t);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION);
+err_alloc_t_failed:
+err_bad_call_stack:
+err_empty_call_stack:
+err_dead_binder:
+err_invalid_target_handle:
+err_no_context_mgr_node:
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+ "binder: %d:%d transaction failed %d, size %zd-%zd\n",
+ proc->pid, thread->pid, return_error,
+ tr->data_size, tr->offsets_size);
+
+ {
+ struct binder_transaction_log_entry *fe;
+ fe = binder_transaction_log_add(&binder_transaction_log_failed);
+ *fe = *e;
+ }
+
+ BUG_ON(thread->return_error != BR_OK);
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,"return_error_line %d \n",return_error_line);
+ if(thread == binder_get_rpcthread() && proc == binder_get_rpcproc()) {
+ if(NULL != binder_rpc_ops.binder_return_error) {
+ struct binder_rpc_operations_info ops_info;
+
+ ops_info.tr = NULL;
+
+ if (in_reply_to) {
+ ops_info.cmd = BR_TRANSACTION_COMPLETE;
+ /*this cmd will be ignored on remote thread.*/
+ ops_info.priv = BINDER_RPC_IGNORE_PRIV;
+
+ /*send target_thread fail reply*/
+ binder_send_failed_reply(in_reply_to, return_error);
+ }
+ else {
+ ops_info.cmd = return_error;
+ ops_info.priv = priv;
+ }
+ /*send error to remote thread*/
+ binder_rpc_ops.binder_return_error(&ops_info, reply);
+ }
+ }
+ else {
+ if (in_reply_to) {
+ thread->return_error = BR_TRANSACTION_COMPLETE;
+ binder_send_failed_reply(in_reply_to, return_error);
+ } else
+ thread->return_error = return_error;
+ }
+
+ binder_unlock(__func__);
+}
+
+void binder_rpc_return_error(void *brpc_proc,
+ void *prpc_thread,
+ uint32_t return_error,
+ binder_uintptr_t priv,
+ int reply)
+{
+ struct binder_proc *proc = (struct binder_proc *)brpc_proc;
+ struct binder_thread *thread = (struct binder_thread *)prpc_thread;
+
+ struct binder_proc *target_proc = NULL;
+ struct binder_thread *target_thread = NULL;
+ struct binder_transaction *in_reply_to = NULL;
+
+ binder_lock(__func__);
+ /*ignore binder_rpc_ops.binder_broadcast reply*/
+ if(priv == BINDER_RPC_IGNORE_PRIV) {
+ binder_unlock(__func__);
+ return;
+ }
+ binder_debug(BINDER_DEBUG_TRANSACTION,"%s %d \n",__FUNCTION__,__LINE__);
+ if((thread == binder_get_rpcthread() && proc == binder_get_rpcproc())) {
+ in_reply_to = binder_rpc_find_todo_by_priv(priv, thread);
+ }
+
+ if (in_reply_to == NULL) {
+ binder_user_error("%s binder: %d:%d got reply transaction "
+ "with no transaction stack\n", __FUNCTION__, proc->pid, thread->pid);
+ binder_unlock(__func__);
+ return;
+ }
+ /*no reply*/
+ if(in_reply_to->flags & TF_ONE_WAY) {
+ binder_unlock(__func__);
+ return;
+ }
+
+ binder_set_nice(in_reply_to->saved_priority);
+ if (in_reply_to->to_thread != thread) {
+ binder_user_error("binder: %d:%d got reply transaction "
+ "with bad transaction stack,"
+ " transaction %d has target %d:%d\n",
+ proc->pid, thread->pid, in_reply_to->debug_id,
+ in_reply_to->to_proc ?
+ in_reply_to->to_proc->pid : 0,
+ in_reply_to->to_thread ?
+ in_reply_to->to_thread->pid : 0);
+ binder_unlock(__func__);
+ return;
+ }
+
+ target_thread = in_reply_to->from;
+ if (target_thread == NULL) {
+ binder_unlock(__func__);
+ return;
+ }
+ if (target_thread->transaction_stack != in_reply_to) {
+ binder_user_error("binder: %d:%d got reply transaction "
+ "with bad target transaction stack %d, "
+ "expected %d\n",
+ proc->pid, thread->pid,
+ target_thread->transaction_stack ?
+ target_thread->transaction_stack->debug_id : 0,
+ in_reply_to->debug_id);
+ binder_unlock(__func__);
+ return;
+ }
+
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,"%s %d:%d transaction failed %d \n",
+ __FUNCTION__, target_thread->proc->pid, target_thread->pid, return_error);
+
+ if(reply) {
+ //target_thread->return_error = BR_TRANSACTION_COMPLETE;
+ binder_send_failed_reply(in_reply_to, return_error);
+ }
+ else {
+ target_thread->return_error = return_error;
+ wake_up_interruptible(&target_thread->wait);//need ?
+ }
+
+ binder_unlock(__func__);
+}
+
+void binder_rpc_node_release(void *node)
+{
+ struct binder_proc *proc = binder_get_rpcproc();
+ struct rb_node *n;
+ struct binder_node *releasing_node = (struct binder_node *)node;
+
+ binder_lock(__func__);
+ for (n = rb_first(&proc->refs_by_desc);
+ n != NULL;
+ n = rb_next(n)) {
+
+ struct binder_ref *ref;
+
+ ref = rb_entry(n, struct binder_ref, rb_node_desc);
+
+ if(ref->node == releasing_node) {
+ binder_debug(BINDER_DEBUG_OPEN_CLOSE,"%s %d debug_id:%d desc:%d strong:%d weak:%d \n",__FUNCTION__,__LINE__,
+ ref->debug_id,ref->desc,ref->strong,ref->weak);
+ if(binder_rpc_ops.binder_dead_notify) {
+ binder_rpc_ops.binder_dead_notify(ref->desc);
+ }
+
+ binder_delete_ref(ref);
+ }
+ }
+ binder_unlock(__func__);
+}
+
+void binder_rpc_dead(void *brpc_proc, uint32_t handle)
+{
+ struct binder_proc *proc = (struct binder_proc *)brpc_proc;
+ struct binder_node *node;
+
+ struct hlist_node *pos, *n;
+ struct binder_transaction *t;
+ int incoming_refs = 0;
+
+ binder_lock(__func__);
+ node = binder_get_node(proc, handle);
+ if(!node) {
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,"binder_get_node(%d) fail\n",handle);
+ }
+ else {
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,"binder_get_node(%d) success\n",handle);
+ rb_erase(&node->rb_node, &proc->nodes);
+ list_del_init(&node->work.entry);
+ //binder_release_work(&node->async_todo);
+ if (hlist_empty(&node->refs)) {
+ kfree(node);
+ binder_stats_deleted(BINDER_STAT_NODE);
+ } else {
+ struct binder_ref *ref;
+ int death = 0;
+
+ node->proc = NULL;
+ node->local_strong_refs = 0;
+ node->local_weak_refs = 0;
+ hlist_add_head(&node->dead_node, &binder_dead_nodes);
+ hlist_for_each_entry_safe(ref, pos, n, &node->refs, node_entry) {
+ binder_debug(BINDER_DEBUG_OPEN_CLOSE,"%s %d debug_id:%d desc:%d strong:%d weak:%d \n",
+ __FUNCTION__, ref->proc->pid, ref->debug_id,ref->desc,ref->strong,ref->weak);
+ incoming_refs++;
+ if (ref->death) {
+ death++;
+ if (list_empty(&ref->death->work.entry)) {
+ ref->death->work.type = BINDER_WORK_DEAD_BINDER;
+ list_add_tail(&ref->death->work.entry, &ref->proc->todo);
+ wake_up_interruptible(&ref->proc->wait);
+ } else
+ BUG();
+ }
+ }
+ binder_debug(BINDER_DEBUG_DEAD_BINDER,
+ "binder: node %d now dead, "
+ "refs %d, death %d\n", node->debug_id,
+ incoming_refs, death);
+ }
+ }
+
+ binder_unlock(__func__);
+}
+
+/***********************************************************************
+ BINDER RPC API END
+***********************************************************************/
+#endif
static void print_binder_transaction(struct seq_file *m, const char *prefix,
struct binder_transaction *t)
@@ -3283,7 +4417,12 @@
size_t start_pos = m->count;
size_t header_pos;
- seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper);
+ seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper);
+#ifdef CONFIG_ANDROID_BINDER_RPC
+ if(thread == binder_get_rpcthread) {
+ return;
+ }
+#endif
header_pos = m->count;
t = thread->transaction_stack;
while (t) {
diff --git a/ap/os/linux/linux-3.4.x/drivers/staging/android/binder_rpc.c b/ap/os/linux/linux-3.4.x/drivers/staging/android/binder_rpc.c
new file mode 100755
index 0000000..1dc410e
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/staging/android/binder_rpc.c
@@ -0,0 +1,752 @@
+#include <linux/fdtable.h>
+#include <linux/file.h>
+#include <linux/freezer.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/nsproxy.h>
+#include <linux/poll.h>
+#include <linux/debugfs.h>
+#include <linux/rbtree.h>
+//#include <linux/sched/signal.h>
+//#include <linux/sched/mm.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/pid_namespace.h>
+#include <linux/security.h>
+#include <linux/spinlock.h>
+#include <linux/ratelimit.h>
+#include <linux/syscalls.h>
+//#include <linux/task_work.h>
+//#include <linux/sizes.h>
+
+#include <linux/delay.h>
+//#include "binder_alloc.h"
+#include "binder_rpc.h"
+#include "binder_rpc_common.h"
+
+#define brpc_init_sema sema_init
+#define brpc_get_sema down_interruptible
+#define brpc_put_sema up
+
+enum binder_driver_command_protocol_ext {
+ BC_DEAD_BINDER_NOTIFY = _IOW('c', 100, struct brpc_dead_data),
+
+};
+
+struct binder_rpc_context brpc_ctx;
+
+static int brpc_rpctr_convert_tr(struct brpc_data *prpc_data, struct binder_transaction_data *tr);
+static struct brpc_data_entry *brpc_tr_convert_rpctr(struct binder_rpc_operations_info *ops_info,
+ void *ptr,
+ void *cookie);
+static int brpc_update_object_binder(struct brpc_data *prpc_data, uint32_t local_handle);
+static int brpc_chn_recv_proc(struct brpc_data *prpc_data);
+static int brpc_chn_write(struct brpc_rpmg_channel *chninfo, void *buf, unsigned int len);
+static int brpc_chn_receive_thread(void *argv);
+static int brpc_chn_create(struct binder_rpc_context *pbrpc_ctx);
+static int brpc_write_remote(struct brpc_data *data);
+static int brpc_send_trdata(struct brpc_data_entry *rpc_data_node);
+static int brpc_thread_enqueue_data(struct brpc_agent_thread_info *pagent_thread, struct brpc_data_entry *rpcdata_node);
+static int brpc_thread_write(void *brpc_proc,
+ binder_uintptr_t brpc_buffer, size_t size,
+ binder_size_t *consumed);
+static int brpc_agent_write_refs(struct binder_rpc_context *pbrpc_ctx);
+static void brpc_transaction(void *prpc_proc,
+ void *prpc_thread,
+ struct binder_transaction_data *tr, int reply,
+ binder_size_t extra_buffers_size,
+ binder_uintptr_t priv);
+
+static void brpc_uld_handle(struct binder_rpc_context *pbrpc_ctx, struct brpc_data *prpc_data);
+static void brpc_dld_handle(struct binder_rpc_context *pbrpc_ctx, struct brpc_data *prpc_data);
+static int brpc_agent_loop(void *argv);
+static int brpc_agent_create(struct binder_rpc_context *pbrpc_ctx);
+
+
+static int brpc_rpctr_convert_tr(struct brpc_data *prpc_data, struct binder_transaction_data *tr)
+{
+ struct brpc_transaction_data *prpc_tr = &prpc_data->rpc_tr;
+
+ tr->target.handle = prpc_tr->handle;
+ tr->cookie = prpc_tr->cookie;
+ tr->code = prpc_tr->code;
+ tr->flags = prpc_tr->flags;
+ tr->data_size = prpc_tr->data_size;
+ tr->offsets_size = prpc_tr->offsets_size;
+
+ tr->data.ptr.buffer = (void *)kzalloc(tr->data_size, GFP_KERNEL);
+ tr->data.ptr.offsets = (void *)kzalloc(tr->offsets_size, GFP_KERNEL);
+ memcpy(tr->data.ptr.buffer, prpc_tr->buffer, tr->data_size);
+ memcpy(tr->data.ptr.offsets, prpc_tr->offsets, tr->offsets_size);
+
+ return 0;
+}
+
+static struct brpc_data_entry *brpc_tr_convert_rpctr(struct binder_rpc_operations_info *ops_info,
+ void *ptr,
+ void *cookie)
+{
+ struct brpc_data_entry *rpc_data_node;
+ struct brpc_data *prpc_data;
+ struct brpc_transaction_data *prpc_tr;
+
+ uint32_t cmd = ops_info->cmd;
+ struct binder_transaction_data *tr = ops_info->tr;
+ binder_uintptr_t priv = ops_info->priv;
+
+ /*construct brpc_data*/
+ rpc_data_node = kzalloc(sizeof(*rpc_data_node), GFP_KERNEL);
+ if (rpc_data_node == NULL) {
+ brpc_err("kzalloc fial!!");
+ return NULL;
+ }
+
+ rpc_data_node->dir = BRPC_DIR_UPLINK;
+
+ prpc_data = &rpc_data_node->rpc_data;
+ prpc_data->cmd = cmd;
+
+ prpc_tr = &prpc_data->rpc_tr;
+ prpc_tr->priv = priv;
+ prpc_tr->handle = NULL == ptr ? tr->target.handle : ptr;
+ prpc_tr->cookie = NULL == cookie ? tr->cookie : cookie;
+ prpc_tr->code = tr->code;
+ prpc_tr->flags = tr->flags;
+ prpc_tr->data_size = tr->data_size;
+ prpc_tr->offsets_size = tr->offsets_size;
+
+ brpc_dbg("target.handle=%d code=%d ",prpc_tr->handle, prpc_tr->code);
+
+ if(prpc_tr->data_size >= BRPC_TR_BUFFER_SIZE_MAX || prpc_tr->offsets_size >= BRPC_TR_BUFFER_SIZE_MAX) {
+ brpc_err("data_size=%d offsets_size=%d ",prpc_tr->data_size,prpc_tr->offsets_size);
+ kfree(rpc_data_node);
+ return NULL;
+ }
+
+ if (copy_from_user(prpc_tr->buffer, tr->data.ptr.buffer, prpc_tr->data_size)) {
+ brpc_err("copy_from_user fail\n");
+ kfree(rpc_data_node);
+ return NULL;
+ }
+
+ if (copy_from_user(prpc_tr->offsets, tr->data.ptr.offsets, prpc_tr->offsets_size)) {
+ brpc_dbg("copy_from_user fail\n");
+ kfree(rpc_data_node);
+ return NULL;
+ }
+
+ return rpc_data_node;
+}
+
+
+static int brpc_update_object_binder(struct brpc_data *prpc_data, uint32_t local_handle)
+{
+ binder_size_t buffer_offset = 0;
+ binder_size_t off_start_offset, off_end_offset;
+ binder_size_t off_min;
+
+ struct brpc_transaction_data *prpc_tr = &prpc_data->rpc_tr;
+ size_t *offp, *off_end;
+
+ offp = (size_t *)prpc_tr->offsets;
+ off_end = (void *)offp + prpc_tr->offsets_size;
+
+ for (; offp < off_end; offp++) {
+ struct flat_binder_object *fp;
+ if (*offp > prpc_tr->data_size - sizeof(*fp) ||
+ prpc_tr->data_size < sizeof(*fp) ||
+ !IS_ALIGNED(*offp, sizeof(void *))) {
+ brpc_err("binder: got transaction with "
+ "invalid offset, %zd\n",
+ *offp);
+ return -1;
+ }
+ fp = (struct flat_binder_object *)(prpc_tr->buffer + *offp);
+ switch (fp->type) {
+ case BINDER_TYPE_BINDER:
+ case BINDER_TYPE_WEAK_BINDER: {
+ fp->binder = local_handle;
+ fp->cookie = 0;
+ brpc_info("fp->binder:%d",fp->binder);
+ } break;
+ case BINDER_TYPE_HANDLE:
+ case BINDER_TYPE_WEAK_HANDLE:
+ brpc_info("fp->handle:%d",fp->handle);
+ break;
+
+ case BINDER_TYPE_FD:
+ default:
+ brpc_err("got transaction with invalid object type, %x\n",fp->type);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+static int brpc_binder_return_error(struct binder_rpc_operations_info *ops_info, int reply)
+{
+ struct brpc_data_entry *rpc_data_node = NULL;
+
+ rpc_data_node = kzalloc(sizeof(*rpc_data_node), GFP_KERNEL);
+ if (rpc_data_node == NULL) {
+ brpc_err("kzalloc fial!!");
+ return -1;
+ }
+
+ rpc_data_node->dir = BRPC_DIR_UPLINK;
+ rpc_data_node->rpc_data.cmd = ops_info->cmd;
+ rpc_data_node->rpc_data.rpc_reply_error.priv = ops_info->priv;
+ rpc_data_node->rpc_data.rpc_reply_error.reply = reply;
+
+ brpc_send_trdata(rpc_data_node);
+ return 0;
+}
+static int brpc_binder_release(void *proc, void *node)
+{
+ struct brpc_data_entry *rpc_data_node = NULL;
+
+ rpc_data_node = kzalloc(sizeof(*rpc_data_node), GFP_KERNEL);
+ if (rpc_data_node == NULL) {
+ brpc_err("kzalloc fial!!");
+ return -1;
+ }
+
+ rpc_data_node->dir = BRPC_DIR_LOCAL;
+ rpc_data_node->rpc_data.rpc_local.proc = proc;
+ rpc_data_node->rpc_data.rpc_local.node = node;
+ brpc_send_trdata(rpc_data_node);
+ return 0;
+}
+static int brpc_binder_dead(uint32_t handle)
+{
+ struct brpc_data_entry *rpc_data_node = NULL;
+
+ rpc_data_node = kzalloc(sizeof(*rpc_data_node), GFP_KERNEL);
+ if (rpc_data_node == NULL) {
+ brpc_err("kzalloc fial!!");
+ return -1;
+ }
+
+ rpc_data_node->dir = BRPC_DIR_UPLINK;
+ rpc_data_node->rpc_data.cmd = BC_DEAD_BINDER_NOTIFY;
+ rpc_data_node->rpc_data.rpc_dead.handle = handle;
+ brpc_send_trdata(rpc_data_node);
+
+ return 0;
+}
+
+static int brpc_binder_transaction(struct binder_rpc_operations_info *ops_info,
+ uint32_t local_handle,
+ void *remote_ptr,
+ void *remote_cookie)
+{
+ struct brpc_data_entry *rpc_data_node = NULL;
+ struct brpc_data *prpc_data;
+ struct flat_binder_object *obj;
+ binder_size_t buffer_offset = 0;
+ binder_size_t off_start_offset, off_end_offset;
+ binder_size_t off_min;
+
+ uint32_t cmd = ops_info->cmd;
+ struct binder_transaction_data *tr = ops_info->tr;
+ binder_uintptr_t priv = ops_info->priv;
+
+ brpc_info("tr cmd:0x%x local_handle:%d handle:%d code:%d",cmd, local_handle, tr->target.handle, tr->code);
+ brpc_info("tr cmd:0x%x",tr->flags);
+ if(0 != tr->target.handle) {
+ rpc_data_node = brpc_tr_convert_rpctr(ops_info,remote_ptr,remote_cookie);
+ if(NULL == rpc_data_node) {
+ return -1;
+ }
+
+ if(0 != local_handle && brpc_update_object_binder(&rpc_data_node->rpc_data, local_handle)) {
+ brpc_err("brpc_update_object_binder(local_handle %d) fail.",local_handle);
+ kfree(rpc_data_node);
+ return -1;
+ }
+
+ brpc_send_trdata(rpc_data_node);
+ }
+
+ return 0;
+}
+
+static int brpc_binder_reply(struct binder_rpc_operations_info *ops_info, uint32_t local_handle)
+{
+ struct brpc_data_entry *rpc_data_node = NULL;
+ struct brpc_data *prpc_data;
+
+ uint32_t cmd = ops_info->cmd;
+ struct binder_transaction_data *tr = ops_info->tr;
+ binder_uintptr_t priv = ops_info->priv;
+
+ brpc_info("tr cmd:0x%x local_handle:%d handle:%d code:%d",cmd, local_handle, tr->target.handle, tr->code);
+
+ rpc_data_node = brpc_tr_convert_rpctr(ops_info, NULL ,NULL);
+ if(NULL == rpc_data_node) {
+ brpc_err("drop this tr.");
+ return -1;
+ }
+ if(0 != local_handle && brpc_update_object_binder(&rpc_data_node->rpc_data, local_handle)) {
+ brpc_err("brpc_update_object_binder(local_handle %d) fail.",local_handle);
+ kfree(rpc_data_node);
+ return -1;
+ }
+
+ brpc_send_trdata(rpc_data_node);
+
+ return 0;
+}
+
+static int brpc_binder_broadcast(struct binder_rpc_operations_info *ops_info, uint32_t local_handle)
+{
+ struct brpc_data_entry *rpc_data_node = NULL;
+ struct brpc_data *prpc_data;
+ struct flat_binder_object *obj;
+ binder_size_t buffer_offset = 0;
+ binder_size_t off_start_offset, off_end_offset;
+ binder_size_t off_min;
+
+ uint32_t cmd = ops_info->cmd;
+ struct binder_transaction_data *tr = ops_info->tr;
+ binder_uintptr_t priv = ops_info->priv;
+
+ brpc_info("tr handle:%d code:%d",tr->target.handle, tr->code);
+ /*broadcat add service*/
+ if(0 == tr->target.handle && ADD_SERVICE_TRANSACTION == tr->code) {
+ rpc_data_node = brpc_tr_convert_rpctr(ops_info, NULL, NULL);
+ if(NULL == rpc_data_node) {
+ brpc_err("brpc_tr_convert_rpctr fail.");
+ return -1;
+ }
+ if(brpc_update_object_binder(&rpc_data_node->rpc_data, local_handle)) {
+ brpc_err("brpc_update_object_binder fail.");
+ kfree(rpc_data_node);
+ return -1;
+ }
+
+ return brpc_send_trdata(rpc_data_node);
+ }
+
+ return 0;
+}
+
+static int brpc_chn_recv_proc(struct brpc_data *prpc_data)
+{
+ struct brpc_data_entry *rpc_data_node;
+
+ rpc_data_node = kzalloc(sizeof(*rpc_data_node), GFP_KERNEL);
+ if (rpc_data_node == NULL) {
+ return -1;
+ }
+ brpc_dbg("cmd=0x%x",prpc_data->cmd);
+ if(BC_TRANSACTION == prpc_data->cmd || BC_REPLY == prpc_data->cmd) {
+ brpc_dbg("target.handle=%d code=%d",prpc_data->rpc_tr.handle, prpc_data->rpc_tr.code);
+ brpc_dbg("data_size=%d offsets_size=%d",prpc_data->rpc_tr.data_size,prpc_data->rpc_tr.offsets_size);
+ }
+ else if(BC_DEAD_BINDER_NOTIFY == prpc_data->cmd) {
+ brpc_dbg("BC_DEAD_BINDER_NOTIFY handle=%d",prpc_data->rpc_dead.handle);
+ }
+ rpc_data_node->dir = BRPC_DIR_DOWNLINK;
+
+ memcpy(&rpc_data_node->rpc_data, prpc_data, sizeof(struct brpc_data));
+
+ return brpc_send_trdata(rpc_data_node);
+}
+
+/*·µ»ØÖµ´óÓÚµÈÓÚ0£¬±íʾдͨµÀ³É¹¦£»Ð¡ÓÚ0±íʾдͨµÀʧ°Ü*/
+static int brpc_chn_write(struct brpc_rpmg_channel *chninfo, void *buf, unsigned int len)
+{
+ T_ZDrvRpMsg_Msg msg;
+
+ if(NULL == buf) {
+ return -EINVAL;
+ }
+ memset(&msg, 0, sizeof(msg));
+ msg.actorID = chninfo->core_id;
+ msg.chID = chninfo->channel_id;
+ msg.flag |= RPMSG_WRITE_INT;
+ msg.buf = buf;
+ msg.len = len;
+
+ return binderWrite(&msg);
+}
+
+static int brpc_chn_receive_thread(void *argv)
+{
+ struct binder_rpc_context *pbrpc_ctx = (struct binder_rpc_context *)argv;
+ T_ZDrvRpMsg_Msg msg;
+ struct brpc_data data;
+
+ int ret;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.actorID = pbrpc_ctx->chn.core_id;
+ msg.chID = pbrpc_ctx->chn.channel_id;
+ msg.flag = 0;
+
+ while(!pbrpc_ctx->chn.channel_stop) {
+ msg.buf = &data;
+ msg.len = sizeof(data);
+
+ ret = binderRead(&msg);
+ if(ret <= 0) {
+ brpc_warn("binderRead ret=%d ", ret);
+ msleep(10000);
+ continue;
+ }
+
+ if(pbrpc_ctx->chn.channel_stop) {
+ break;
+ }
+
+ brpc_chn_recv_proc(&data);
+ }
+
+ return 0;
+}
+
+static int brpc_chn_create(struct binder_rpc_context *pbrpc_ctx)
+{
+ struct task_struct *th = NULL;
+ int retval = 0;
+
+ retval = binderCreateChannel(pbrpc_ctx->chn.core_id, pbrpc_ctx->chn.channel_id, pbrpc_ctx->chn.channel_size);
+ if(retval != RPMSG_SUCCESS && retval != RPMSG_CHANNEL_ALREADY_EXIST) {
+ brpc_err("binderCreateChannel fail.");
+ return retval;
+ }
+
+ th = kthread_run(brpc_chn_receive_thread, (void *)pbrpc_ctx, "brpc-chn%d", pbrpc_ctx->chn.channel_id);
+ if (IS_ERR(th)) {
+ brpc_err("kthread_run fail.");
+ return PTR_ERR(th);
+ }
+ pbrpc_ctx->chn.rcv_thread = th;
+
+ return 0;
+}
+
+static int brpc_write_remote(struct brpc_data *data)
+{
+ struct binder_rpc_context *pbrpc_ctx = &brpc_ctx;
+
+ if(0 > brpc_chn_write(&(pbrpc_ctx->chn), data, sizeof(struct brpc_data))) {
+ brpc_err("brpc_channel_write fail.\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int brpc_thread_enqueue_data(struct brpc_agent_thread_info *pagent_thread, struct brpc_data_entry *rpcdata_node)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pagent_thread->p_transaction_lock,flags);
+ list_add_tail(&rpcdata_node->list, &pagent_thread->p_transaction_list);
+ spin_unlock_irqrestore(&pagent_thread->p_transaction_lock,flags);
+ brpc_put_sema(&pagent_thread->p_transaction_sem);
+
+ return 0;
+}
+
+static int brpc_send_trdata(struct brpc_data_entry *rpc_data_node)
+{
+ struct binder_rpc_context *pbrpc_ctx = &brpc_ctx;
+
+ return brpc_thread_enqueue_data(&pbrpc_ctx->agent_thread, rpc_data_node);
+}
+
+static int brpc_thread_write(void *brpc_proc,
+ binder_uintptr_t brpc_buffer, size_t size,
+ binder_size_t *consumed)
+{
+ struct binder_proc *proc = brpc_proc;
+ int ret;
+ mm_segment_t old_fs;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ ret = binder_rpc_thread_write(brpc_proc, brpc_buffer, size, consumed);
+
+ set_fs(old_fs);
+
+ return ret;
+}
+
+
+/*·¢ËÍBC_INCREFS/BC_ACQUIRE*/
+static int brpc_agent_write_refs(struct binder_rpc_context *pbrpc_ctx)
+{
+ int ret;
+ binder_size_t consumed = 0;
+ struct {
+ __u32 cmd;
+ __u32 handle;
+ } writebuf;
+
+ writebuf.cmd = BC_INCREFS;
+ writebuf.handle = 0;
+
+ ret = brpc_thread_write(pbrpc_ctx->brpc_proc, (binder_uintptr_t)&writebuf, sizeof(writebuf), &consumed);
+ if(ret) {
+ brpc_err("write BC_INCREFS fail, ret:%d",ret);
+ return -1;
+ }
+ brpc_dbg("BC_INCREFS consumed:%d ",consumed);
+
+ writebuf.cmd = BC_ACQUIRE;
+ writebuf.handle = 0;
+
+ ret = brpc_thread_write(pbrpc_ctx->brpc_proc, (binder_uintptr_t)&writebuf, sizeof(writebuf), &consumed);
+ if(ret) {
+ brpc_err("write BC_ACQUIRE fail, ret:%d",ret);
+ return -1;
+ }
+ brpc_dbg("BC_ACQUIRE consumed:%d ",consumed);
+
+ return 0;
+}
+
+static void brpc_transaction(void *prpc_proc,
+ void *prpc_thread,
+ struct binder_transaction_data *tr, int reply,
+ binder_size_t extra_buffers_size,
+ binder_uintptr_t priv)
+{
+ mm_segment_t old_fs;
+ const char __user *p, *q;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ p = (__force const char __user *)(tr->data.ptr.buffer);
+ q = (__force const char __user *)(tr->data.ptr.offsets);
+
+ binder_rpc_transaction(prpc_proc,
+ prpc_thread,
+ tr,
+ reply,
+ 0,
+ priv);
+
+ set_fs(old_fs);
+}
+
+static void brpc_uld_handle(struct binder_rpc_context *pbrpc_ctx, struct brpc_data *prpc_data)
+{
+ if(BC_TRANSACTION == prpc_data->cmd || BC_REPLY == prpc_data->cmd) {
+ brpc_dbg("BRPC_DIR_UPLINK cmd:0x%x priv:0x%x",prpc_data->cmd,prpc_data->rpc_tr.priv);
+ }
+ else if(BC_DEAD_BINDER_NOTIFY == prpc_data->cmd) {
+ brpc_dbg("BRPC_DIR_UPLINK cmd:0x%x handle:0x%x",prpc_data->cmd,prpc_data->rpc_dead.handle);
+ }
+ else if(BR_ERROR == prpc_data->cmd ||
+ BR_DEAD_REPLY == prpc_data->cmd ||
+ BR_TRANSACTION_COMPLETE == prpc_data->cmd ||
+ BR_DEAD_BINDER == prpc_data->cmd ||
+ BR_CLEAR_DEATH_NOTIFICATION_DONE == prpc_data->cmd ||
+ BR_FAILED_REPLY == prpc_data->cmd) {
+ brpc_dbg("BRPC_DIR_UPLINK cmd:0x%x priv:0x%x",prpc_data->cmd,prpc_data->rpc_reply_error.priv);
+ }
+ else {
+ brpc_err("unknow cmd 0x%x",prpc_data->cmd);
+ }
+ brpc_write_remote(prpc_data);
+}
+static void brpc_dld_handle(struct binder_rpc_context *pbrpc_ctx, struct brpc_data *prpc_data)
+{
+ struct binder_transaction_data tr;
+ struct brpc_transaction_data *prpc_tr = &prpc_data->rpc_tr;
+ binder_uintptr_t priv;
+
+ if(BC_TRANSACTION == prpc_data->cmd || BC_REPLY == prpc_data->cmd) {
+ brpc_dbg("BRPC_DIR_DOWNLINK cmd:0x%x priv:0x%x",prpc_data->cmd,prpc_data->rpc_tr.priv);
+ brpc_rpctr_convert_tr(prpc_data, &tr);
+
+ brpc_transaction(pbrpc_ctx->brpc_proc,
+ pbrpc_ctx->brpc_thread,
+ &tr,
+ BC_REPLY == prpc_data->cmd,
+ 0,
+ prpc_tr->priv);
+
+ if(0 != tr.data.ptr.buffer) {
+ kfree(tr.data.ptr.buffer);
+ }
+ if(0 != tr.data.ptr.offsets) {
+ kfree(tr.data.ptr.offsets);
+ }
+ }
+ else if(BC_DEAD_BINDER_NOTIFY == prpc_data->cmd) {
+ brpc_dbg("BRPC_DIR_DOWNLINK cmd:0x%x handle:0x%x",prpc_data->cmd,prpc_data->rpc_dead.handle);
+ binder_rpc_dead(pbrpc_ctx->brpc_proc, prpc_data->rpc_dead.handle);
+ }
+ else if(BR_ERROR == prpc_data->cmd ||
+ BR_DEAD_REPLY == prpc_data->cmd ||
+ BR_TRANSACTION_COMPLETE == prpc_data->cmd ||
+ BR_DEAD_BINDER == prpc_data->cmd ||
+ BR_CLEAR_DEATH_NOTIFICATION_DONE == prpc_data->cmd ||
+ BR_FAILED_REPLY == prpc_data->cmd) {
+ brpc_dbg("BRPC_DIR_DOWNLINK cmd:0x%x priv:0x%x",prpc_data->cmd,prpc_data->rpc_reply_error.priv);
+ binder_rpc_return_error(pbrpc_ctx->brpc_proc,
+ pbrpc_ctx->brpc_thread,
+ prpc_data->cmd,
+ prpc_data->rpc_reply_error.priv,
+ prpc_data->rpc_reply_error.reply);
+ }
+ else {
+ brpc_err("unknow cmd 0x%x",prpc_data->cmd);
+ }
+}
+
+static void brpc_local_handle(struct binder_rpc_context *pbrpc_ctx, struct brpc_data *prpc_data)
+{
+ binder_rpc_node_release(prpc_data->rpc_local.node);
+}
+
+static int brpc_agent_loop(void *argv)
+{
+ struct binder_rpc_context *pbrpc_ctx = (struct binder_rpc_context *)argv;
+ struct brpc_agent_thread_info *pagent_thread = &pbrpc_ctx->agent_thread;
+ struct list_head tmp_list;
+ struct brpc_data_entry *entry,*tmp_entry;
+ unsigned long flags;
+
+ pbrpc_ctx->brpc_thread = binder_rpc_get_thread(pbrpc_ctx->brpc_proc);
+
+ if(brpc_agent_write_refs(pbrpc_ctx)) {
+ brpc_err("brpc_agent_write_refs fail.");
+ return -1;
+ }
+ INIT_LIST_HEAD(&tmp_list);
+ while(!pagent_thread->bstop) {
+ brpc_get_sema(&pagent_thread->p_transaction_sem);
+
+ spin_lock_irqsave(&pagent_thread->p_transaction_lock,flags);
+ if (list_empty(&pagent_thread->p_transaction_list)) {
+ spin_unlock_irqrestore(&pagent_thread->p_transaction_lock,flags);
+ continue;
+ }
+ list_replace_init(&pagent_thread->p_transaction_list,&tmp_list);
+ list_del_init(&pagent_thread->p_transaction_list);
+ spin_unlock_irqrestore(&pagent_thread->p_transaction_lock,flags);
+
+ list_for_each_entry_safe(entry,tmp_entry,&tmp_list,list) {
+ if(!pagent_thread->bstop) {
+ if(BRPC_DIR_UPLINK == entry->dir) {
+ brpc_uld_handle(pbrpc_ctx, &entry->rpc_data);
+ }
+ else if(BRPC_DIR_DOWNLINK == entry->dir) {
+ brpc_dld_handle(pbrpc_ctx, &entry->rpc_data);
+ }
+ else if(BRPC_DIR_LOCAL == entry->dir) {
+ brpc_local_handle(pbrpc_ctx, &entry->rpc_data);
+ }
+ else {
+ brpc_warn("brpc data unknow dir(%d).",entry->dir);
+ }
+ }
+
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ }
+
+ spin_lock_irqsave(&pagent_thread->p_transaction_lock,flags);
+ list_for_each_entry_safe(entry,tmp_entry,&pagent_thread->p_transaction_list,list) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ spin_unlock_irqrestore(&pagent_thread->p_transaction_lock,flags);
+
+ brpc_err("the agnet loop stop!\n");
+
+ return 0;
+}
+
+static int brpc_agent_create(struct binder_rpc_context *pbrpc_ctx)
+{
+ struct brpc_agent_thread_info *pagent_thread = &pbrpc_ctx->agent_thread;
+ struct task_struct *th = NULL;
+ int retval = 0;
+
+ INIT_LIST_HEAD(&pagent_thread->p_transaction_list);
+ spin_lock_init(&pagent_thread->p_transaction_lock);
+ brpc_init_sema(&pagent_thread->p_transaction_sem, 0);
+
+ th = kthread_run(brpc_agent_loop, (void *)pbrpc_ctx, "brpc-agent");
+ if (IS_ERR(th)) {
+ brpc_err("kthread_run fail.");
+ return PTR_ERR(th);
+ }
+
+ pagent_thread->p_thread = th;
+
+ return 0;
+}
+
+int binder_rpc_init(void *proc, struct file *pfile, struct binder_rpc_operations_struct *cb)
+{
+ struct binder_rpc_context *pbrpc_ctx = &brpc_ctx;
+ int ret = 0;
+
+ memset(pbrpc_ctx, 0, sizeof(struct binder_rpc_context));
+
+ pbrpc_ctx->brpc_proc = proc;
+ pbrpc_ctx->prpc_file = pfile;
+
+ pbrpc_ctx->chn.core_id = CAP_ID;
+ pbrpc_ctx->chn.channel_id = ICP_CHN_BINDER_RPC;
+ pbrpc_ctx->chn.channel_size = ICP_CHN_BINDER_RPC_SIZE;
+
+ if(brpc_chn_create(pbrpc_ctx)) {
+ brpc_err("brpc_chn_create fail.");
+ return -1;
+ }
+
+ if(brpc_agent_create(pbrpc_ctx)) {
+ brpc_err("brpc_agent_create fail.");
+ return -1;
+ }
+
+ if(NULL != cb) {
+ memset(cb, 0, sizeof(struct binder_rpc_operations_struct));
+ cb->binder_transaction = brpc_binder_transaction;
+ cb->binder_broadcast = brpc_binder_broadcast;
+ cb->binder_reply = brpc_binder_reply;
+ cb->binder_return_error = brpc_binder_return_error;
+ cb->binder_release = brpc_binder_release;
+ cb->binder_dead_notify = brpc_binder_dead;
+ }
+
+ brpc_info("success!");
+
+ return 0;
+}
+
+int binder_rpc_deinit(struct vm_area_struct *vma)
+{
+ struct binder_rpc_context *pbrpc_ctx = &brpc_ctx;
+
+ pbrpc_ctx->chn.channel_stop = 1;
+ pbrpc_ctx->agent_thread.bstop = 1;
+
+ if(pbrpc_ctx->prpc_file) {
+ filp_close(pbrpc_ctx->prpc_file, current->files);
+
+ if(NULL != vma) {
+ do_munmap(current->mm, vma->vm_start, vma->vm_end - vma->vm_start);
+ }
+ }
+
+ brpc_info("success!");
+
+ return 0;
+}
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/staging/android/binder_rpc.h b/ap/os/linux/linux-3.4.x/drivers/staging/android/binder_rpc.h
new file mode 100755
index 0000000..7315494
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/staging/android/binder_rpc.h
@@ -0,0 +1,51 @@
+#ifndef _LINUX_BINDER_RPC_H
+#define _LINUX_BINDER_RPC_H
+
+#include "binder.h"
+
+typedef __u32 binder_size_t;
+typedef __u32 binder_uintptr_t;
+
+enum {
+ GET_SERVICE_TRANSACTION = 0x00000001,
+ CHECK_SERVICE_TRANSACTION,
+ ADD_SERVICE_TRANSACTION,
+ LIST_SERVICES_TRANSACTION,
+};
+
+struct binder_rpc_operations_info {
+ uint32_t cmd;
+ struct binder_transaction_data *tr;
+ binder_uintptr_t priv;
+};
+
+struct binder_rpc_operations_struct {
+ int (*binder_broadcast)(struct binder_rpc_operations_info *ops_info, uint32_t local_handle);
+ int (*binder_reply)(struct binder_rpc_operations_info *ops_info, uint32_t local_handle);
+ int (*binder_transaction)(struct binder_rpc_operations_info *ops_info, uint32_t local_handle, void *remote_ptr, void *remote_cookie);
+ int (*binder_return_error)(struct binder_rpc_operations_info *ops_info, int reply);
+ int (*binder_release)(void *proc, void *node);
+ int (*binder_dead_notify)(uint32_t handle);
+};
+
+int binder_rpc_init(void *proc, struct file *pfile, struct binder_rpc_operations_struct *cb);
+int binder_rpc_deinit(struct vm_area_struct *vma);
+int binder_rpc_thread_write(void *brpc_proc,
+ binder_uintptr_t brpc_buffer, size_t size,
+ binder_size_t *consumed);
+void *binder_rpc_get_thread(void *brpc_proc);
+void binder_rpc_transaction(void *prpc_proc,
+ void *prpc_thread,
+ struct binder_transaction_data *tr, int reply,
+ binder_size_t extra_buffers_size,
+ binder_uintptr_t priv);
+void binder_rpc_node_release(void *node);
+void binder_rpc_dead(void *brpc_proc, uint32_t handle);
+void binder_rpc_return_error(void *brpc_proc,
+ void *prpc_thread,
+ uint32_t return_error,
+ binder_uintptr_t priv,
+ int reply);
+
+
+#endif /* _LINUX_BINDER_RPC_H*/
diff --git a/ap/os/linux/linux-3.4.x/drivers/staging/android/binder_rpc_common.h b/ap/os/linux/linux-3.4.x/drivers/staging/android/binder_rpc_common.h
new file mode 100755
index 0000000..5541022
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/staging/android/binder_rpc_common.h
@@ -0,0 +1,126 @@
+#ifndef _LINUX_BINDER_RPC_COMM_H
+#define _LINUX_BINDER_RPC_COMM_H
+
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/spinlock.h>
+#include <linux/semaphore.h>
+#include <linux/soc/zte/rpm/rpmsg.h>
+
+#include <binder.h>
+
+int brpc_debug = 0;
+module_param(brpc_debug,int,0644);
+
+#define brpc_dbg(format, arg...) if(brpc_debug == 1) \
+ printk(KERN_INFO " [brpc]<%s>: " format "\n" , __func__ , ## arg)
+#define brpc_info(format, arg...) if(brpc_debug == 1) \
+ printk(KERN_INFO " [brpc]<%s>: " format "\n" , __func__ , ## arg)
+
+#define brpc_err(format, arg...) printk(KERN_ERR " [brpc]<%s>: " format "\n" , \
+ __func__ , ## arg)
+
+#define brpc_warn(format, arg...) printk(KERN_WARNING " [brpc]<%s>: " format "\n" , \
+ __func__ , ## arg)
+
+/*local data ,to remote,need write rpmsg channel*/
+#define BRPC_DIR_UPLINK 1
+/*remote data ,to local,read from rpmsg channel*/
+#define BRPC_DIR_DOWNLINK 2
+#define BRPC_DIR_LOCAL 3
+
+#define ICP_CHN_BINDER_RPC 33
+#define ICP_CHN_BINDER_RPC_SIZE (8 * 1024 *2)
+#define binderCreateChannel zDrvRpMsg_CreateChannel_Cap
+#define binderWrite zDrvRpMsg_Write_Cap
+#define binderRead zDrvRpMsg_Read_Cap
+
+struct brpc_rpmg_channel {
+ T_ZDrvRpMsg_ActorID core_id;
+ T_ZDrvRpMsg_ChID channel_id;
+ unsigned int channel_size;
+ struct task_struct *rcv_thread;
+ int channel_stop;
+};
+
+/*thread proxy list*/
+struct brpc_agent_thread_info {
+ struct task_struct *p_thread;
+ struct list_head p_transaction_list;
+ struct spinlock p_transaction_lock;
+ struct semaphore p_transaction_sem;
+
+ int bstop;
+};
+
+struct binder_rpc_context {
+ void *brpc_proc;
+ void *brpc_thread;
+ struct file *prpc_file;
+ struct brpc_agent_thread_info agent_thread;
+ struct brpc_rpmg_channel chn;
+ //struct list_head svc_list;
+};
+
+struct brpc_transaction_data {
+ union {
+ /*cmd == BC_TRANSACTION*/
+ binder_uintptr_t local_tr;
+ /*cmd == BC_REPLY*/
+ binder_uintptr_t remote_tr;
+
+ binder_uintptr_t priv;
+ };
+ __u32 handle; /* target descriptor of command transaction */
+ binder_uintptr_t cookie; /* target object cookie */
+ __u32 code; /* transaction command */
+
+ /* General information about the transaction. */
+ __u32 flags;
+
+ binder_size_t data_size; /* number of bytes of data */
+ binder_size_t offsets_size; /* number of bytes of offsets */
+#define BRPC_TR_BUFFER_SIZE_MAX 256
+ char buffer[BRPC_TR_BUFFER_SIZE_MAX];
+ char offsets[BRPC_TR_BUFFER_SIZE_MAX];
+};
+
+struct brpc_ref_data {
+ uint32_t target;
+};
+
+struct brpc_local_data {
+ void *proc;
+ void *node;
+};
+
+struct brpc_dead_data {
+ uint32_t handle;
+};
+
+struct brpc_reply_error_data {
+ binder_uintptr_t priv;
+ int reply;
+};
+
+struct brpc_data {
+ /*BC_## command*/
+ uint32_t cmd;
+ union {
+ struct brpc_ref_data rpc_ref;
+ struct brpc_transaction_data rpc_tr;
+ struct brpc_local_data rpc_local;
+ struct brpc_dead_data rpc_dead;
+ struct brpc_reply_error_data rpc_reply_error;
+ };
+};
+
+struct brpc_data_entry {
+ struct list_head list;
+ /*BRPC_DIR_UPLINK / BRPC_DIR_DOWNLINK/*/
+ uint32_t dir;
+ struct brpc_data rpc_data;
+};
+
+#endif /* _LINUX_BINDER_RPC_COMM_H*/
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/tty/serial/zx29_uart.c b/ap/os/linux/linux-3.4.x/drivers/tty/serial/zx29_uart.c
index eed941f..2c0496b 100755
--- a/ap/os/linux/linux-3.4.x/drivers/tty/serial/zx29_uart.c
+++ b/ap/os/linux/linux-3.4.x/drivers/tty/serial/zx29_uart.c
@@ -1825,10 +1825,10 @@
}
if(fifo_len){
- printk("qq >> fifo len %d.\n",fifo_len);
+ //printk("qq >> fifo len %d.\n",fifo_len);
fifo_count = tty_insert_flip_string(&zup->port.state->port,
fifo_buf, fifo_len);
- printk("qq >>fifo count %d,buf is %x %x %x .\n",fifo_count, fifo_buf[0],fifo_buf[1],fifo_buf[2]);
+ //printk("qq >>fifo count %d,buf is %x %x %x .\n",fifo_count, fifo_buf[0],fifo_buf[1],fifo_buf[2]);
fifo_buf[0] = '\0';
fifo_buf[1] = '\0';
fifo_buf[2] = '\0';
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/adb_server.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/adb_server.c
index ff872df..a71960d 100755
--- a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/adb_server.c
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/adb_server.c
@@ -439,7 +439,7 @@
adb_server_plug_notify(USB_RPMSG_NOTIFY_ADB_ONLINE);
dev->agent_start = 1;
dev->agt_error = 0;
- //wake_up(&dev->agt_start_wait);
+ wake_up(&dev->agt_start_wait);
//wakeup adb read and return
//dev->rx_done = 1;
//wake_up(&dev->read_wq);
@@ -476,6 +476,10 @@
printk("adb_enable_rpmsg_agent, adb is NULL, fail\n");
return -1;
}
+ if(g_adb_agent->adb_ready == 0){
+ printk("adb_enable_rpmsg_agent, adb has not init ok, return\n");
+ return;
+ }
g_adb_agent->agent_state = ((flag != 0) ? 1 : 0);
atomic_set(&g_adb_agent->agent_switch, 1);
printk("adb_enable_rpmsg_agent,now %s adb agent\n", (g_adb_agent->agent_state == 1) ? "start" : "stop");
@@ -552,7 +556,7 @@
pr_debug("rpmsg_recv: failed to queue req %p (%d)\n", req, ret);
r = -EIO;
dev->agt_error = 1;
- printk("rpmsg_recv ep-queue fail ret:%d", r);
+ USBSTACK_DBG("rpmsg_recv ep-queue fail ret:%d", r);
goto done;
} else {
USBSTACK_DBG("rx %p queue\n", req);
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_adb.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_adb.c
index 9561ca8..3a506fc 100755
--- a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_adb.c
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/f_adb.c
@@ -88,7 +88,9 @@
#endif
struct work_struct online_inform; /* online inform USB adb */
struct work_struct offline_inform; /* offline inform USB adb */
-
+
+ //this flag means adb is ok
+ int adb_ready;
#ifdef CONFIG_PM
u32 suspend_state;
#endif
@@ -734,9 +736,18 @@
usb_ep_disable(dev->ep_in);
return ret;
}
+
+ dev->adb_ready = 1;
+
#ifdef _USE_VEHICLE_DC
//maybe need clear adb server state
adb_agent_close();
+ if(usb_get_adb_agent() == 1){
+ //delay time is num of jiffies
+ printk("---adb_function_set_alt, switch to cap\n");
+ dev->online = 1;
+ schedule_delayed_work(&_adb_dev->agent_switch_work, 0);
+ }else
#endif
schedule_work(&dev->online_inform);
@@ -750,7 +761,6 @@
/* readers may be blocked waiting for us to go online */
wake_up(&dev->read_wq);
}
-
return 0;
}
@@ -761,6 +771,7 @@
USBSTACK_DBG("%s", __func__);
+ dev->adb_ready = 0;
if(atomic_read(&dev->enable_excl)==0)
return;
else
diff --git a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/usb_netlink.c b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/usb_netlink.c
index 7e53a63..b2f470f 100755
--- a/ap/os/linux/linux-3.4.x/drivers/usb/gadget/usb_netlink.c
+++ b/ap/os/linux/linux-3.4.x/drivers/usb/gadget/usb_netlink.c
@@ -1020,6 +1020,11 @@
switch_mode = USB_SWITCH_DEBUG;
usb_record_dbginfo(USB_SWITCH_TO_DEBUG, 0, 0);
schedule_work(&switch_usbmode);
+ //if boot from user mode, this flag doesn't set
+ if(adb_agent_state == 0){
+ printk("usb_parse_cap_notify,set debug mode, set adb_agent_state \n");
+ adb_agent_state =1;
+ }
break;
case USB_RPMSG_SWITCH_USER_MODE:
if(switch_mode == USB_SWITCH_USER){