[Feature][API-751][GSW165/167/172/173][T8TSK-135][Data]RIL3.0 soceket broadcast optimistion,Data part
Change-Id: I298bc248cbfbad881b84dda5edd4eb28c3781f73
diff --git a/src/lynq/lib/liblynq-data/lynq_data_urc.cpp b/src/lynq/lib/liblynq-data/lynq_data_urc.cpp
new file mode 100755
index 0000000..aa38f0d
--- /dev/null
+++ b/src/lynq/lib/liblynq-data/lynq_data_urc.cpp
@@ -0,0 +1,327 @@
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <arpa/inet.h>
+#include <string.h>
+#include <unistd.h>
+#include <binder/Parcel.h>
+#include <log/log.h>
+#include <cutils/jstring.h>
+#include <pthread.h>
+#include <list>
+#include "lynq_data.h"
+#include "lynq_data_urc.h"
+#include "liblog/lynq_deflog.h"
+#include "lynq_shm.h"
+
+#define LYNQ_REC_BUF 8192
+
+static std::list<Parcel*> s_urc_recv_parcel_list;
+
+int lynq_len_urc_addr_serv;
+struct sockaddr_in urc_local_addr;
+static int lynq_urc_sockfd = -1;
+bool data_urc_recive_status = 1;
+static pthread_mutex_t s_lynq_urc_mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t s_lynq_urc_cond = PTHREAD_COND_INITIALIZER;
+
+/*recv*/
+pthread_t data_urc_recv_tid = -1;
+static pthread_mutex_t s_lynq_urc_recv_mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t s_lynq_urc_recv_cond = PTHREAD_COND_INITIALIZER;
+
+/*process*/
+pthread_t data_urc_process_tid = -1;
+bool data_urc_process_status = 1;
+static pthread_mutex_t s_lynq_urc_process_mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t s_lynq_urc_process_cond = PTHREAD_COND_INITIALIZER;
+
+#define SHM_BUFFER_INDEX_OFFSET 1
+#define SHM_BUFFER_SIZE_OFFSET 16
+#define SHM_BUFFER_INDEX_MASK 0x0000007F
+#define SHM_BUFFER_SIZE_MASK 0x0000FFFF
+
+bool urc_data_is_in_shm_data(int responseType,int& level, int& index, int& size)
+{
+ int shm_index=((responseType>>SHM_BUFFER_INDEX_OFFSET)&SHM_BUFFER_INDEX_MASK);
+ if (shm_index>0)
+ {
+ index=shm_index-1;
+ size=((responseType>>SHM_BUFFER_SIZE_OFFSET)&SHM_BUFFER_SIZE_MASK);
+ if(size>=sizeof(int32_t)*3 && get_shem_buffer_level(size,&level))
+ {
+ LYINFLOG("urc_data_is_in_shm_data level is %d, index is %d size is %d",level,index,size);
+ return true;
+ }
+ }
+ LYINFLOG("urc_data_is_in_shm_data return false, responseType is %d",responseType);
+ return false;
+}
+
+void cleanup_urc_process_mutex(void *arg)
+{
+ pthread_mutex_unlock(&s_lynq_urc_process_mutex);
+}
+
+void *thread_urc_recv()
+{
+ Parcel *urc_p =NULL;
+ char urc_data[LYNQ_REC_BUF];
+ int res = 0;
+ lynq_head_t* phead;
+
+ LYINFLOG("urc recv thread is running");
+ while(data_urc_recive_status)
+ {
+ bzero(urc_data,LYNQ_REC_BUF);
+ res = recvfrom(lynq_urc_sockfd,urc_data,sizeof(urc_data),0,(struct sockaddr *)&urc_local_addr,(socklen_t*)&lynq_len_urc_addr_serv);
+
+ if(res<sizeof(int32_t)*2)
+ {
+ LYERRLOG("thread_urc_recv step2 fail: res is %d",res);
+ continue;
+ }
+
+ phead=(lynq_head_t*) urc_data;
+ if(is_support_urc(phead->urcid)==false)
+ {
+ continue;
+ }
+ urc_p = new Parcel();
+ if(urc_p == NULL)
+ {
+ LYERRLOG("new parcel failure!!!");
+ continue;
+ }
+ int level,index,size;
+ if(urc_data_is_in_shm_data(phead->resp_type,level,index,size))
+ {
+ urc_p->setData((uint8_t *)get_shem_buffer(level,index),size); // p.setData((uint8_t *) buffer, buflen);
+ }
+ else if(res>=sizeof(int32_t)*3)
+ {
+ urc_p->setData((uint8_t *)urc_data,res); // p.setData((uint8_t *) buffer, buflen);
+ }
+ else
+ {
+ LYERRLOG("res %d error!!!", res);
+ delete urc_p;
+ urc_p = NULL;
+ continue;
+ }
+ urc_p->setDataPosition(0);
+ if(urc_p->dataAvail()>0)
+ {
+ pthread_mutex_lock(&s_lynq_urc_process_mutex);
+ s_urc_recv_parcel_list.push_back(urc_p);
+ pthread_cond_broadcast(&s_lynq_urc_process_cond);
+ pthread_mutex_unlock(&s_lynq_urc_process_mutex);
+ }
+ else
+ {
+ delete urc_p;
+ urc_p = NULL;
+ }
+ }
+ LYINFLOG("urc recv thread ended");
+ return NULL;
+}
+
+void *thread_urc_process()
+{
+ Parcel *urc_p =NULL;
+ std::list<Parcel*>::iterator iter;
+
+ LYINFLOG("urc process thread is running");
+ pthread_cleanup_push(cleanup_urc_process_mutex, NULL); // thread cleanup handler
+ while (data_urc_process_status)
+ {
+ pthread_mutex_lock(&s_lynq_urc_process_mutex);
+ while(s_urc_recv_parcel_list.empty())
+ {
+ pthread_cond_wait(&s_lynq_urc_process_cond,&s_lynq_urc_process_mutex);
+ }
+ iter=s_urc_recv_parcel_list.begin();
+ urc_p = (*iter);
+ s_urc_recv_parcel_list.erase(iter);
+ pthread_mutex_unlock(&s_lynq_urc_process_mutex);
+ urc_p->setDataPosition(0);
+ if(urc_p->dataAvail()>0)
+ {
+ pthread_mutex_lock(&s_lynq_urc_mutex);
+ urc_msg_process(urc_p);
+ pthread_mutex_unlock(&s_lynq_urc_mutex);
+ }
+ delete urc_p;
+ urc_p = NULL;
+ }
+ pthread_cleanup_pop(0);
+ LYINFLOG("urc process thread ended");
+ return NULL;
+}
+
+int lynq_socket_recv_start()
+{
+ int rt=0;
+ int on=1;
+ struct sockaddr_in urc_local_addr;
+ pthread_attr_t attr;
+ lynq_urc_sockfd = socket(AF_INET,SOCK_DGRAM,0);
+ if(lynq_urc_sockfd < 0)
+ {
+ perror("creaet socket for udp fail");
+ return -1;
+ }
+ urc_local_addr.sin_family = AF_INET;
+ urc_local_addr.sin_port = htons(LYNQ_URC_SERVICE_PORT);
+ urc_local_addr.sin_addr.s_addr = htons(INADDR_ANY);
+ /* Set socket to allow reuse of address and port, SO_REUSEADDR value is 2*/
+ rt = setsockopt(lynq_urc_sockfd,SOL_SOCKET,SO_REUSEADDR,&on,sizeof on);
+ if(rt<0)
+ {
+ perror("SO_REUSEADDR fail\n");
+ close(lynq_urc_sockfd);
+ lynq_urc_sockfd = -1;
+ return -1;
+ }
+ rt = bind(lynq_urc_sockfd ,(struct sockaddr*)&urc_local_addr, sizeof(urc_local_addr));
+ if (rt == -1)
+ {
+ perror("bind failed");
+ close(lynq_urc_sockfd);
+ lynq_urc_sockfd = -1;
+ return -1;
+ }
+ return 0;
+}
+
+int lynq_socket_recv_stop()
+{
+ if (lynq_urc_sockfd >=0)
+ {
+ close(lynq_urc_sockfd);
+ lynq_urc_sockfd = -1;
+ }
+ return 0;
+}
+
+void lynq_urc_recv_thread_stop()
+{
+ int ret;
+
+ pthread_mutex_lock(&s_lynq_urc_process_mutex);
+ data_urc_recive_status = 0;
+ if (data_urc_recv_tid != -1)
+ {
+ ret = pthread_cancel(data_urc_recv_tid);
+ LYDBGLOG("pthread cancel ret = %d",ret);
+ }
+ pthread_mutex_unlock(&s_lynq_urc_process_mutex);
+
+ if (data_urc_recv_tid != -1)
+ {
+ ret = pthread_join(data_urc_recv_tid,NULL);
+ LYDBGLOG("pthread join ret = %d",ret);
+ data_urc_recv_tid = -1;
+ }
+}
+
+void lynq_urc_process_thread_stop()
+{
+ int ret;
+
+ pthread_mutex_lock(&s_lynq_urc_process_mutex);
+ pthread_mutex_lock(&s_lynq_urc_mutex);
+
+ data_urc_process_status = 0;
+ if (data_urc_process_tid != -1)
+ {
+ ret = pthread_cancel(data_urc_process_tid);
+ LYDBGLOG("pthread cancel ret = %d",ret);
+ }
+ pthread_mutex_unlock(&s_lynq_urc_mutex);
+ pthread_mutex_unlock(&s_lynq_urc_process_mutex);
+
+ if (data_urc_process_tid != -1)
+ {
+ ret = pthread_join(data_urc_process_tid,NULL);
+ LYDBGLOG("pthread join ret = %d",ret);
+ data_urc_process_tid = -1;
+ }
+}
+
+
+int lynq_init_data_urc_thread()
+{
+ int ret = 0;
+ if(ril_init_mem()!=0)
+ {
+ LYERRLOG("ril_init_mem fail");
+ return -1;
+ }
+
+ ret = lynq_socket_recv_start();
+ if (ret != 0)
+ {
+ LYERRLOG("lynq_socket_recv_start fail");
+ ril_deinit_mem();
+ return -1;
+ }
+
+ pthread_mutex_init(&s_lynq_urc_process_mutex,NULL);
+ pthread_mutex_init(&s_lynq_urc_recv_mutex,NULL);
+
+ pthread_mutex_lock(&s_lynq_urc_process_mutex);
+ std::list<Parcel*>::iterator iter;
+ for (iter=s_urc_recv_parcel_list.begin();iter!=s_urc_recv_parcel_list.end();++iter)
+ {
+ delete(*iter);
+ }
+ s_urc_recv_parcel_list.clear();
+ pthread_mutex_unlock(&s_lynq_urc_process_mutex);
+
+ pthread_mutex_init(&s_lynq_urc_mutex,NULL);
+ data_urc_recive_status = 1;
+
+ ret = pthread_create(&data_urc_recv_tid,NULL,thread_urc_recv,NULL);
+ if (ret < 0)
+ {
+ LYERRLOG("urc recv pthread create error");
+ data_urc_recive_status = 0;
+ lynq_socket_recv_stop();
+ ril_deinit_mem();
+ return -1;
+ }
+
+ data_urc_process_status = 1;
+ ret = pthread_create(&data_urc_process_tid,NULL,thread_urc_process,NULL);
+ if (ret < 0)
+ {
+ LYERRLOG("urc recv pthread create error");
+
+ data_urc_process_status = 0;
+ lynq_socket_recv_stop();
+ lynq_urc_recv_thread_stop();
+
+ return -1;
+ }
+ return 0;
+}
+
+int lynq_deinit_data_urc_thread()
+{
+ lynq_socket_recv_stop();
+ lynq_urc_recv_thread_stop();
+ lynq_urc_process_thread_stop();
+
+ pthread_mutex_lock(&s_lynq_urc_process_mutex);
+ std::list<Parcel*>::iterator iter;
+ for (iter=s_urc_recv_parcel_list.begin();iter!=s_urc_recv_parcel_list.end();++iter)
+ {
+ delete(*iter);
+ }
+ s_urc_recv_parcel_list.clear();
+ pthread_mutex_unlock(&s_lynq_urc_process_mutex);
+
+ ril_deinit_mem();
+}
\ No newline at end of file