[Feature] add GA346 baseline version

Change-Id: Ic62933698569507dcf98240cdf5d9931ae34348f
diff --git a/framework/lynq-framework-service/src/stateManager/link.cpp b/framework/lynq-framework-service/src/stateManager/link.cpp
new file mode 100644
index 0000000..1456eb4
--- /dev/null
+++ b/framework/lynq-framework-service/src/stateManager/link.cpp
@@ -0,0 +1,110 @@
+/* ********************************
+ * Author:       Warren
+ * License:      MobileTek
+ *//** @file link.h*//*
+ *
+ ********************************/
+#include "link.h"
+#include <stdlib.h>
+#include <string.h>
+#include <log/log.h>
+COND_NODE* createCondLinkHead()
+{
+    COND_NODE *head = (COND_NODE *)malloc(sizeof(COND_NODE));
+    if(head==NULL)
+    {
+         return NULL;
+    }
+    memset(head,0,sizeof(COND_NODE));
+    head->next=NULL;
+    return head;
+}
+
+COND_NODE* initConditionLink()
+{
+    COND_NODE* head = createCondLinkHead();
+    if(head==NULL)
+    {
+        return NULL;
+    }
+    return head;
+}
+
+COND_NODE * addCondLinkNode(COND_NODE *head,int32_t token,int index,pthread_cond_t cond)
+{
+    COND_NODE* Node = (COND_NODE *)malloc(sizeof(COND_NODE));
+    memset(Node,0,sizeof(COND_NODE));
+    if (Node)
+    {
+        Node->token = token;
+        Node->cond = cond;
+        Node->cond_index=index;
+        Node->next = head;
+        head = Node;
+        //LYDBGLOG("[%s] node->token is %x,request is %d\n",__FUNCTION__,Node->token,Node->request);
+    }
+    else 
+    {
+        //LYDBGLOG("[%s] malloc Node failed!\n",__FUNCTION__);
+        return head;
+    }
+    return head;
+}
+
+COND_NODE * DeleteLinkNode(int32_t token,COND_NODE *head)
+{
+    COND_NODE *p,*temp;
+    p = head;
+    if((p ==NULL)||(p->next==NULL))
+    {
+       // LYDBGLOG("[%s] lynqDeQueue head is NULL\n",__FUNCTION__);
+        return 0;
+        //return head;
+    }
+    //delete head note
+    if(p->token == token)
+    {
+        temp=p->next;
+        free(p);
+        p =NULL;
+       // LYDBGLOG("[%s] delete head note!!\n",__FUNCTION__);
+        return temp;
+        //return head;
+    }
+    //delete intermediate node
+    do
+    {
+        temp = p;
+        p=p->next;
+        if(p->token==token)
+        {
+            temp->next=p->next;
+            free(p);
+            p=NULL;
+           // LYDBGLOG("[%s] delete intermediate node!!\n",__FUNCTION__);
+            return head;
+        }
+    }while(p->next->next!=NULL);
+   // LYDBGLOG("[%s] Not find this token,token is %d!!\n",__FUNCTION__,token);
+    return head;
+}
+
+COND_NODE * searchRequestinCondLink(int32_t token,COND_NODE *head)
+{
+    COND_NODE *p;
+    p=head;
+    if(p!=NULL)
+    {
+       do
+       {
+           if(p->token == token)
+           {
+               RLOGD("[searchRequestinCondLink] search  request %x success",token);
+               return p;
+           }
+           p = p->next;
+       } while (p != NULL);
+    }
+    return NULL;
+}
+
diff --git a/framework/lynq-framework-service/src/stateManager/link.h b/framework/lynq-framework-service/src/stateManager/link.h
new file mode 100644
index 0000000..daa231b
--- /dev/null
+++ b/framework/lynq-framework-service/src/stateManager/link.h
@@ -0,0 +1,27 @@
+#include <pthread.h>

+#include <stdint.h>

+#include <stdlib.h>

+

+#ifdef __cplusplus

+extern "C" {

+#endif

+

+typedef struct cond_node

+{

+    pthread_cond_t cond;

+    int32_t token;

+    int cond_index;

+    struct cond_node* next;

+}COND_NODE;

+

+COND_NODE* createCondLinkHead();

+COND_NODE* initConditionLink();

+COND_NODE * addCondLinkNode(COND_NODE *head,int32_t token,int index,pthread_cond_t cond);

+COND_NODE * DeleteLinkNode(int32_t token,COND_NODE *head);

+COND_NODE * searchRequestinCondLink(int32_t token,COND_NODE *head);

+#ifdef __cplusplus

+}

+#endif

+

+

+

diff --git a/framework/lynq-framework-service/src/stateManager/main.c b/framework/lynq-framework-service/src/stateManager/main.c
new file mode 100644
index 0000000..6061208
--- /dev/null
+++ b/framework/lynq-framework-service/src/stateManager/main.c
@@ -0,0 +1,54 @@
+#include <stdio.h>

+#include <pthread.h>

+#include <stdint.h>

+#include <semaphore.h>

+#include <sys/time.h>

+#include <unistd.h>

+#include <stdlib.h>

+#include <string.h>

+#include "thpool.h"

+#include "timeManager/timeManagement.h"

+#if 0

+int main_test(int argc,char **argv)

+{

+    timerArgv *timeMan=(timerArgv*)malloc(sizeof(timerArgv));

+    initTimeManagement(10,timeMan);

+    cond_used_state *node=NULL;

+    for (int i=0;i<5;i++)

+    {

+        request_cond *requestCond=(request_cond*)malloc(sizeof(request_cond));

+        printf("[Warren test] add task %d\n",i);

+        node = findUnusedCond(timeMan->cond_array,CONDARRAYMAX);

+        requestCond->cond_index=node->index;

+        requestCond->requestID=i;

+        printf("[Warren test] cond_index=%d,requestID=%d task %d\n",requestCond->cond_index,requestCond->requestID,i);

+        thpool_add_work(timeMan->pool,startWaitResp,(void*)requestCond);

+        //millli_sleep_with_restart(1);

+        node->use_state = 1;

+    }

+    printf("[Warren test] wait 5s....\n");

+    sleep(5);

+    printf("正在发送信号....\n");

+    pthread_mutex_lock(&timeMan->cond_array[0].mutex);

+    pthread_cond_signal(&timeMan->cond_array[0].cond);

+    pthread_mutex_unlock(&timeMan->cond_array[0].mutex);

+    printf("end cond_0....\n");

+    sleep(3);

+    printf("正在发送信号_2....\n");

+    pthread_mutex_lock(&timeMan->cond_array[2].mutex);

+    pthread_cond_signal(&timeMan->cond_array[2].cond);

+    pthread_mutex_unlock(&timeMan->cond_array[2].mutex);

+    printf("end cond_2\n"); 

+    printf("正在发送信号_1....\n");

+    sleep(3);

+    pthread_mutex_lock(&timeMan->cond_array[1].mutex);

+    pthread_cond_signal(&timeMan->cond_array[1].cond);

+    pthread_mutex_unlock(&timeMan->cond_array[1].mutex);

+    printf("end cond_1\n"); 

+    thpool_wait(timeMan->pool);

+    puts("Killing threadpool");

+    thpool_destroy(timeMan->pool);

+    free(timeMan);

+    return 0;

+}

+#endif

diff --git a/framework/lynq-framework-service/src/stateManager/stateManager.cpp b/framework/lynq-framework-service/src/stateManager/stateManager.cpp
new file mode 100644
index 0000000..6407939
--- /dev/null
+++ b/framework/lynq-framework-service/src/stateManager/stateManager.cpp
@@ -0,0 +1,388 @@
+/* Copyright Statement:
+ *
+ * This software/firmware and related documentation ("MediaTek Software") are
+ * protected under relevant copyright laws. The information contained herein
+ * is confidential and proprietary to MediaTek Inc. and/or its licensors.
+ * Without the prior written permission of MediaTek inc. and/or its licensors,
+ * any reproduction, modification, use or disclosure of MediaTek Software,
+ * and information contained herein, in whole or in part, shall be strictly prohibited.
+ */
+/* MediaTek Inc. (C) 2010. All rights reserved.
+ *
+ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
+ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
+ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
+ * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
+ * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
+ * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
+ * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
+ * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+ * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
+ * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
+ * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
+ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
+ * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
+ * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
+ * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
+ * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
+ *
+ * The following software/firmware and/or related documentation ("MediaTek Software")
+ * have been modified by MediaTek Inc. All revisions are subject to any receiver's
+ * applicable license agreements with MediaTek Inc.
+ */
+#include "stateManager/stateManager.h"
+
+#include <string>
+#include <alloca.h>
+#include <stdlib.h>
+#include <vector>
+#include <arpa/inet.h>
+#include <string.h>
+
+#include "../util/AtLine.h"
+#include "powerManager.h"
+#include "util/utils.h"
+
+#undef LOG_TAG
+#define LOG_TAG "DEMO_MANAGER"
+
+//RIL_REQUEST_DEVICE_IDENTITY
+int getDeviceIdentity(int argc, char **argv, RIL_SOCKET_ID socket_id, RequestInfo *pRI)
+{
+    android::Parcel p;
+    pRI->pCI->dispatchFunction(p, pRI);
+    return 0;
+}
+//  RIL_REQUEST_GET_IMEI
+int getIMEI(int argc, char **argv, RIL_SOCKET_ID socket_id, RequestInfo *pRI)
+{
+    android::Parcel p;
+    pRI->pCI->dispatchFunction(p, pRI);
+    return 0;
+}
+
+//RIL_REQUEST_GET_IMEISV
+int getIMEISV(int argc, char **argv, RIL_SOCKET_ID socket_id, RequestInfo *pRI)
+{
+    android::Parcel p;
+    pRI->pCI->dispatchFunction(p, pRI);
+    return 0;
+}
+
+//RIL_REQUEST_BASEBAND_VERSION
+int getBasebandVersion(int argc, char **argv, RIL_SOCKET_ID socket_id, RequestInfo *pRI)
+{
+    android::Parcel p;
+    pRI->pCI->dispatchFunction(p, pRI);
+    return 0;
+}
+
+//RIL_REQUEST_RESET_RADIO
+int resetRadio(int argc, char **argv, RIL_SOCKET_ID socket_id, RequestInfo *pRI)
+{
+    android::Parcel p;
+    pRI->pCI->dispatchFunction(p, pRI);
+    return 0;
+}
+
+//RIL_REQUEST_SCREEN_STATE
+int getScreenState(int argc, char **argv, RIL_SOCKET_ID socket_id, RequestInfo *pRI)
+{
+    android::Parcel p;
+    size_t pos = p.dataPosition();
+
+    p.writeInt32(1);
+    p.writeInt32(atoi(argv[1]));
+
+    p.setDataPosition(pos);
+    pRI->pCI->dispatchFunction(p, pRI);
+    return 0;
+}
+
+//RIL_REQUEST_SET_TRM
+int setTRM(int argc, char **argv, RIL_SOCKET_ID socket_id, RequestInfo *pRI)
+{
+//  android::Parcel p;
+
+//  pRI->pCI->dispatchFunction(p, pRI);
+    free(pRI);
+    return 0;
+}
+//RIL_REQUEST_SET_IMS_ENABLE
+int setIMSEnable(int argc, char **argv, RIL_SOCKET_ID socket_id, RequestInfo *pRI)
+{
+    android::Parcel p;
+    size_t pos = p.dataPosition();
+
+    p.writeInt32(1);
+    p.writeInt32(atoi(argv[1]));
+    p.setDataPosition(pos);
+    pRI->pCI->dispatchFunction(p, pRI);
+    return 0;
+}
+//RIL_REQUEST_OEM_HOOK_RAW
+int sendATCMD(int argc, char **argv, RIL_SOCKET_ID socket_id, RequestInfo *pRI)
+{
+    android::Parcel p;
+    char *cmd = (char *)argv[1];
+    size_t pos = p.dataPosition();
+    if (cmd == NULL){
+        RLOGD("sendATCMD:cmd is null\n");
+        free(pRI);
+        return -1;
+    }
+    int len = strlen(cmd);
+    p.writeInt32(len);
+    p.write((const void*)cmd,len);
+    RLOGD("sendATCMD: %s %d",cmd,strlen(cmd));
+
+    p.setDataPosition(pos);
+    pRI->pCI->dispatchFunction(p, pRI);
+    return 0;
+}
+#ifdef KEEP_ALIVE
+//RIL_REQUEST_START_KEEPALIVE_PRO
+void tranferToNetByteOrder(int type, char* addr, std::vector<uint8_t> & dest) {
+    RLOGD("type is %d, addr: %s", type ,addr);
+    int ret;
+    int len = 0;
+    int domain;
+    if(type == static_cast<int>(RIL_PacketType::IPV4_TCP) || type == static_cast<int>(RIL_PacketType::IPV4_UDP)) {
+        len = sizeof(struct in_addr);
+        domain = AF_INET;
+    } else if(type == static_cast<int>(RIL_PacketType::IPV6_TCP) || type == static_cast<int>(RIL_PacketType::IPV6_UDP)) {
+        int len = sizeof(struct in6_addr);
+        domain = AF_INET6;
+    }
+    if (len > 0) {
+        unsigned char buf[len];
+        ret = inet_pton(domain, addr, &buf);
+        if (ret <= 0) {
+            if (ret == 0)
+                RLOGE("Not in presentation format");
+            else
+                RLOGE("inet_pton");
+            return;
+        }
+        for (int i = 0 ; i < len; i++ ) {
+            dest.push_back(buf[i]);
+            RLOGD("tranferToNetByteOrder[%d]: %d", i,buf[i]);
+        }
+    }
+
+}
+
+int startKeepAlivePro(int argc, char **argv, RIL_SOCKET_ID socket_id, RequestInfo *pRI) {
+    if (argc != 10){
+        RLOGD("startKeepAlivePro parameters  number isn't enough");
+        free(pRI);
+        return -1;
+    }
+    RLOGD("startKeepAlivePro");
+    std::vector<uint8_t> sourceAddress;
+    std::vector<uint8_t> destinationAddress;
+    int type = atoi(argv[1]);
+    tranferToNetByteOrder(type, argv[2], sourceAddress);
+    int sourcePort = atoi(argv[3]);
+    tranferToNetByteOrder(type, argv[4], destinationAddress);
+    int destinationPort = atoi(argv[5]);
+    int netif_id = atoi(argv[6]);
+    int keepIdleTime = atoi(argv[7]);
+    int keepIntervalTime = atoi(argv[8]);
+    int retryCount = atoi(argv[9]);
+
+    android::Parcel p;
+    size_t pos = p.dataPosition();
+
+    p.writeInt32(type);
+    p.writeByteVector(sourceAddress);
+    p.writeInt32(sourcePort);
+    p.writeByteVector(destinationAddress);
+    p.writeInt32(destinationPort);
+    p.writeInt32(netif_id);
+    p.writeInt32(keepIdleTime);
+    p.writeInt32(keepIntervalTime);
+    p.writeInt32(retryCount);
+
+    p.setDataPosition(pos);
+    pRI->pCI->dispatchFunction(p, pRI);
+    return 0;
+}
+
+//RIL_REQUEST_STOP_KEEPALIVE_PRO
+int stopKeepAlivePro(int argc, char **argv, RIL_SOCKET_ID socket_id, RequestInfo *pRI) {
+    if (argc != 2){
+        RLOGD("stopKeepAlivePro parameters  number isn't enough");
+        free(pRI);
+        return -1;
+    }
+    RLOGD("stopKeepAlivePro");
+    android::Parcel p;
+    uint32_t id = atoi(argv[1]);
+    RLOGD("stopKeepAlivePro sesssion id:%d", id);
+    size_t pos = p.dataPosition();
+    p.writeInt32(1);
+    p.writeInt32(id);
+
+    p.setDataPosition(pos);
+    pRI->pCI->dispatchFunction(p, pRI);
+    return 0;
+}
+
+void composeMsg(int request,const void* data, size_t datalen) {
+    int* p_int = (int*) (data);
+    int numInts = datalen / sizeof(int);
+    if (numInts < 2) {
+        RLOGD("%s error.", android::requestToString(request));
+        std::string fail(android::requestToString(request));
+        fail.append(",fail");
+        sendKeepAlive(fail.c_str());
+        return;
+    }
+    std::string msg(android::requestToString(request));
+    if(request == RIL_REQUEST_START_KEEPALIVE_PRO) {
+        msg.append(",ok");
+    }
+    int sessionHandle = p_int[0];
+    int code = p_int[1];
+    msg.append(",");
+    msg.append(std::to_string(sessionHandle));
+    msg.append(",");
+    msg.append(std::to_string(code));
+    RLOGD("%s response(%s)", android::requestToString(request),msg.c_str());
+    sendKeepAlive(msg.c_str());
+}
+
+void handleKeepAliveResponse(int request, const void* data, size_t datalen, RIL_SOCKET_ID soc_id, bool is_error) {
+    RLOGD("handleKeepAliveResponse(%s) is_error: %d", android::requestToString(request),is_error);
+    if(is_error) {
+        if(request == RIL_REQUEST_START_KEEPALIVE_PRO) {
+            sendKeepAlive("RIL_REQUEST_START_KEEPALIVE_PRO,fail");
+        } else if(request == RIL_REQUEST_STOP_KEEPALIVE_PRO) {
+            sendKeepAlive("RIL_REQUEST_STOP_KEEPALIVE_PRO,fail");
+        }
+    } else {
+        if(request == RIL_REQUEST_START_KEEPALIVE_PRO) {
+            composeMsg(request, data, datalen);
+        } else if(request == RIL_REQUEST_STOP_KEEPALIVE_PRO) {
+            sendKeepAlive("RIL_REQUEST_STOP_KEEPALIVE_PRO,ok");
+        } else if (request == RIL_UNSOL_KEEPALIVE_STATUS_PRO) {
+            composeMsg(request, data, datalen);
+        }
+    }
+}
+#endif /*KEEP_ALIVE*/
+
+void parseAtCmd(const char* line) {
+    if (strstr(line, "+ETHERMAL") != NULL) {
+        RLOGD("parse at command: ETHERMAL");
+        AtLine* atLine = new AtLine(line, NULL);
+        int err;
+        atLine->atTokStart(&err);
+        if (err < 0) {
+            delete atLine;
+            RLOGW("this is not a valid response string");
+            return;
+        }
+        int rat = atLine->atTokNextint(&err);
+        if (err < 0) {
+            delete atLine;
+            RLOGW("parse rat fail");
+            return;
+        }
+        int temperature = atLine->atTokNextint(&err);
+        if (err < 0) {
+            delete atLine;
+            RLOGW("parse temperature fail");
+            return;
+        }
+        int tx_power = atLine->atTokNextint(&err);
+        if (err < 0) {
+            delete atLine;
+            RLOGW("parse tx_power fail");
+            return;
+        }
+        RLOGD("[tx_power]rat: %d, temperature: %d, tx_power: %d", rat, temperature, tx_power);
+        printf("[tx_power]rat: %d, temperature: %d, tx_power: %d\n", rat, temperature, tx_power);
+        delete atLine;
+    } else if (strstr(line, "+ECAL") != NULL) {
+        RLOGD("parse at command: ECAL");
+        AtLine* atLine = new AtLine(line, NULL);
+        int err;
+        atLine->atTokStart(&err);
+        if (err < 0) {
+            delete atLine;
+            RLOGW("this is not a valid response string");
+            return;
+        }
+        int cal = atLine->atTokNextint(&err);
+        if (err < 0) {
+            delete atLine;
+            RLOGW("parse rat fail");
+            return;
+        }
+        RLOGD("calibration data is %s", cal == 1 ? "download" : "not download");
+        if (cal == 0) {
+            printf(
+                    "************************************************\n*** NOTICE: calibration data is not download ***\n************************************************\n");
+        }
+        delete atLine;
+    }
+}
+
+//RIL_REQUEST_SET_IMSCFG
+int setIMSCfg(int argc, char **argv, RIL_SOCKET_ID socket_id, RequestInfo *pRI)
+{
+    android::Parcel p;
+    size_t pos = p.dataPosition();
+
+    p.writeInt32(6);
+    p.writeInt32(atoi(argv[1]));
+    p.writeInt32(atoi(argv[2]));
+    p.writeInt32(atoi(argv[3]));
+    p.writeInt32(atoi(argv[4]));
+    p.writeInt32(atoi(argv[5]));
+    p.writeInt32(atoi(argv[6]));
+    p.setDataPosition(pos);
+    pRI->pCI->dispatchFunction(p, pRI);
+    return 0;
+}
+/*mobiletek add*/
+frameworkState *g_framework_state=NULL;
+frameworkState::frameworkState()
+{
+    RLOGD("frameworkState init success!!");
+    return;
+}
+
+frameworkState::~ frameworkState()
+{
+    RLOGD("frameworkState clean success!!");
+    return;
+}
+
+int frameworkState::getCallState(void)
+{
+    return callState;
+}
+int frameworkState::setCallState(const int state)
+{
+    callState = state;
+    return 0;
+}
+int initFrameworkState()
+{
+    g_framework_state = new frameworkState();
+    if(g_framework_state==NULL)
+    {
+        RLOGE("init class framework fial!!!");
+        exit(EXIT_FAILURE);
+    }
+    RLOGD("init class framework success!!!");
+    return 0;
+}
+
+/*mobiletek end*/
+
diff --git a/framework/lynq-framework-service/src/stateManager/stateManager.h b/framework/lynq-framework-service/src/stateManager/stateManager.h
new file mode 100644
index 0000000..f7990c8
--- /dev/null
+++ b/framework/lynq-framework-service/src/stateManager/stateManager.h
@@ -0,0 +1,71 @@
+/* Copyright Statement:
+ *
+ * This software/firmware and related documentation ("MediaTek Software") are
+ * protected under relevant copyright laws. The information contained herein
+ * is confidential and proprietary to MediaTek Inc. and/or its licensors.
+ * Without the prior written permission of MediaTek inc. and/or its licensors,
+ * any reproduction, modification, use or disclosure of MediaTek Software,
+ * and information contained herein, in whole or in part, shall be strictly prohibited.
+ */
+/* MediaTek Inc. (C) 2010. All rights reserved.
+ *
+ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
+ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
+ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
+ * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
+ * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
+ * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
+ * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
+ * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+ * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
+ * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
+ * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
+ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
+ * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
+ * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
+ * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
+ * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
+ *
+ * The following software/firmware and/or related documentation ("MediaTek Software")
+ * have been modified by MediaTek Inc. All revisions are subject to any receiver's
+ * applicable license agreements with MediaTek Inc.
+ */
+#ifndef __RIL_STAM__
+#define __RIL_STAM__
+#include  <vendor-ril/telephony/ril.h>
+#include  "common.h"
+
+int getDeviceIdentity(int argc, char **argv, RIL_SOCKET_ID socket_id, RequestInfo *pRI);
+int getIMEI(int argc, char **argv, RIL_SOCKET_ID socket_id, RequestInfo *pRI);
+int getIMEISV(int argc, char **argv, RIL_SOCKET_ID socket_id, RequestInfo *pRI);
+int getBasebandVersion(int argc, char **argv, RIL_SOCKET_ID socket_id, RequestInfo *pRI);
+int resetRadio(int argc, char **argv, RIL_SOCKET_ID socket_id, RequestInfo *pRI);
+int getScreenState(int argc, char **argv, RIL_SOCKET_ID socket_id, RequestInfo *pRI);
+int setTRM(int argc, char **argv, RIL_SOCKET_ID socket_id, RequestInfo *pRI);
+int setIMSEnable(int argc, char **argv, RIL_SOCKET_ID socket_id, RequestInfo *pRI);
+int sendATCMD(int argc, char **argv, RIL_SOCKET_ID socket_id, RequestInfo *pRI);
+void parseAtCmd(const char* line);
+int setIMSCfg(int argc, char **argv, RIL_SOCKET_ID socket_id, RequestInfo *pRI);
+#ifdef KEEP_ALIVE
+int startKeepAlivePro(int argc, char **argv, RIL_SOCKET_ID socket_id, RequestInfo *pRI);
+int stopKeepAlivePro(int argc, char **argv, RIL_SOCKET_ID socket_id, RequestInfo *pRI);
+void handleKeepAliveResponse(int request, const void* data, size_t datalen, RIL_SOCKET_ID soc_id, bool is_error);
+/*mobiletek add*/
+class frameworkState
+{
+    public:
+        frameworkState();
+        virtual ~ frameworkState();
+        int getCallState(void);
+        int setCallState(const int state);
+    private:
+        int callState;
+};
+extern frameworkState *g_framework_state;
+int initFrameworkState();
+/*mobiletek end*/
+
+#endif /*KEEP_ALIVE*/
+#endif
diff --git a/framework/lynq-framework-service/src/stateManager/thpool.c b/framework/lynq-framework-service/src/stateManager/thpool.c
new file mode 100644
index 0000000..d139df7
--- /dev/null
+++ b/framework/lynq-framework-service/src/stateManager/thpool.c
@@ -0,0 +1,547 @@
+/* ********************************
+ * Author:       Johan Hanssen Seferidis
+ * License:      MIT
+ * Description:  Library providing a threading pool where you can add
+ *               work. For usage, check the thpool.h file or README.md
+ *
+ *//** @file thpool.h *//*
+ *
+ ********************************/
+
+#define _POSIX_C_SOURCE 200809L
+#include <unistd.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include <errno.h>
+#include <time.h>
+#if defined(__linux__)
+#include <sys/prctl.h>
+#endif
+
+#include "thpool.h"
+
+#ifdef THPOOL_DEBUG
+#define THPOOL_DEBUG 1
+#else
+#define THPOOL_DEBUG 0
+#endif
+
+#if !defined(DISABLE_PRINT) || defined(THPOOL_DEBUG)
+#define err(str) fprintf(stderr, str)
+#else
+#define err(str)
+#endif
+
+static volatile int threads_keepalive;
+static volatile int threads_on_hold;
+
+
+
+/* ========================== STRUCTURES ============================ */
+
+
+/* Binary semaphore */
+typedef struct bsem {
+    pthread_mutex_t mutex;
+    pthread_cond_t   cond;
+    int v;
+} bsem;
+
+
+/* Job */
+typedef struct job{
+    struct job*  prev;                   /* pointer to previous job   */
+    void   (*function)(void* arg);       /* function pointer          */
+    void*  arg;                          /* function's argument       */
+} job;
+
+
+/* Job queue */
+typedef struct jobqueue{
+    pthread_mutex_t rwmutex;             /* used for queue r/w access */
+    job  *front;                         /* pointer to front of queue */
+    job  *rear;                          /* pointer to rear  of queue */
+    bsem *has_jobs;                      /* flag as binary semaphore  */
+    int   len;                           /* number of jobs in queue   */
+} jobqueue;
+
+
+/* Thread */
+typedef struct thread{
+    int       id;                        /* friendly id               */
+    pthread_t pthread;                   /* pointer to actual thread  */
+    struct thpool_* thpool_p;            /* access to thpool          */
+} thread;
+
+
+/* Threadpool */
+typedef struct thpool_{
+    thread**   threads;                  /* pointer to threads        */
+    volatile int num_threads_alive;      /* threads currently alive   */
+    volatile int num_threads_working;    /* threads currently working */
+    pthread_mutex_t  thcount_lock;       /* used for thread count etc */
+    pthread_cond_t  threads_all_idle;    /* signal to thpool_wait     */
+    jobqueue  jobqueue_t;                  /* job queue                 */
+} thpool_;
+
+
+
+
+
+/* ========================== PROTOTYPES ============================ */
+
+
+static int  thread_init(thpool_* thpool_p, struct thread** thread_p, int id);
+static void* thread_do(struct thread* thread_p);
+static void  thread_hold(int sig_id);
+static void  thread_destroy(struct thread* thread_p);
+
+static int   jobqueue_init(jobqueue* jobqueue_p);
+static void  jobqueue_clear(jobqueue* jobqueue_p);
+static void  jobqueue_push(jobqueue* jobqueue_p, struct job* newjob_p);
+static struct job* jobqueue_pull(jobqueue* jobqueue_p);
+static void  jobqueue_destroy(jobqueue* jobqueue_p);
+
+static void  bsem_init(struct bsem *bsem_p, int value);
+static void  bsem_reset(struct bsem *bsem_p);
+static void  bsem_post(struct bsem *bsem_p);
+static void  bsem_post_all(struct bsem *bsem_p);
+static void  bsem_wait(struct bsem *bsem_p);
+
+
+
+
+
+/* ========================== THREADPOOL ============================ */
+
+
+/* Initialise thread pool */
+struct thpool_* thpool_init(int num_threads){
+
+    threads_on_hold   = 0;
+    threads_keepalive = 1;
+
+    if (num_threads < 0){
+        num_threads = 0;
+    }
+
+    /* Make new thread pool */
+    thpool_* thpool_p;
+    thpool_p = (struct thpool_*)malloc(sizeof(struct thpool_));
+    if (thpool_p == NULL){
+        err("thpool_init(): Could not allocate memory for thread pool\n");
+        return NULL;
+    }
+    thpool_p->num_threads_alive   = 0;
+    thpool_p->num_threads_working = 0;
+
+    /* Initialise the job queue */
+    if (jobqueue_init(&thpool_p->jobqueue_t) == -1){
+        err("thpool_init(): Could not allocate memory for job queue\n");
+        free(thpool_p);
+        return NULL;
+    }
+
+    /* Make threads in pool */
+    thpool_p->threads = (struct thread**)malloc(num_threads * sizeof(struct thread *));
+    if (thpool_p->threads == NULL){
+        err("thpool_init(): Could not allocate memory for threads\n");
+        jobqueue_destroy(&thpool_p->jobqueue_t);
+        free(thpool_p);
+        return NULL;
+    }
+
+    pthread_mutex_init(&(thpool_p->thcount_lock), NULL);
+    pthread_cond_init(&thpool_p->threads_all_idle, NULL);
+
+    /* Thread init */
+    int n;
+    for (n=0; n<num_threads; n++){
+        thread_init(thpool_p, &thpool_p->threads[n], n);
+#if THPOOL_DEBUG
+            printf("THPOOL_DEBUG: Created thread %d in pool \n", n);
+#endif
+    }
+
+    /* Wait for threads to initialize */
+    while (thpool_p->num_threads_alive != num_threads) {}
+
+    return thpool_p;
+}
+
+
+/* Add work to the thread pool */
+int thpool_add_work(thpool_* thpool_p, void (*function_p)(void*), void* arg_p){
+    job* newjob;
+
+    newjob=(struct job*)malloc(sizeof(struct job));
+    if (newjob==NULL){
+        err("thpool_add_work(): Could not allocate memory for new job\n");
+        return -1;
+    }
+
+    /* add function and argument */
+    newjob->function=function_p;
+    newjob->arg=arg_p;
+
+    /* add job to queue */
+    jobqueue_push(&thpool_p->jobqueue_t, newjob);
+
+    return 0;
+}
+
+
+/* Wait until all jobs have finished */
+void thpool_wait(thpool_* thpool_p){
+    pthread_mutex_lock(&thpool_p->thcount_lock);
+    while (thpool_p->jobqueue_t.len || thpool_p->num_threads_working) {
+        pthread_cond_wait(&thpool_p->threads_all_idle, &thpool_p->thcount_lock);
+    }
+    pthread_mutex_unlock(&thpool_p->thcount_lock);
+}
+
+
+/* Destroy the threadpool */
+void thpool_destroy(thpool_* thpool_p){
+    /* No need to destory if it's NULL */
+    if (thpool_p == NULL) return ;
+
+    volatile int threads_total = thpool_p->num_threads_alive;
+
+    /* End each thread 's infinite loop */
+    threads_keepalive = 0;
+
+    /* Give one second to kill idle threads */
+    double TIMEOUT = 1.0;
+    time_t start, end;
+    double tpassed = 0.0;
+    time (&start);
+    while (tpassed < TIMEOUT && thpool_p->num_threads_alive){
+        bsem_post_all(thpool_p->jobqueue_t.has_jobs);
+        time (&end);
+        tpassed = difftime(end,start);
+    }
+
+    /* Poll remaining threads */
+    while (thpool_p->num_threads_alive){
+        bsem_post_all(thpool_p->jobqueue_t.has_jobs);
+        sleep(1);
+    }
+
+    /* Job queue cleanup */
+    jobqueue_destroy(&thpool_p->jobqueue_t);
+    /* Deallocs */
+    int n;
+    for (n=0; n < threads_total; n++){
+        thread_destroy(thpool_p->threads[n]);
+    }
+    free(thpool_p->threads);
+    free(thpool_p);
+}
+
+
+/* Pause all threads in threadpool */
+void thpool_pause(thpool_* thpool_p) {
+    int n;
+    for (n=0; n < thpool_p->num_threads_alive; n++){
+        pthread_kill(thpool_p->threads[n]->pthread, SIGUSR1);
+    }
+}
+
+
+/* Resume all threads in threadpool */
+void thpool_resume(thpool_* thpool_p) {
+    // resuming a single threadpool hasn't been
+    // implemented yet, meanwhile this supresses
+    // the warnings
+    (void)thpool_p;
+
+    threads_on_hold = 0;
+}
+
+
+int thpool_num_threads_working(thpool_* thpool_p){
+    return thpool_p->num_threads_working;
+}
+
+
+
+
+
+/* ============================ THREAD ============================== */
+
+
+/* Initialize a thread in the thread pool
+ *
+ * @param thread        address to the pointer of the thread to be created
+ * @param id            id to be given to the thread
+ * @return 0 on success, -1 otherwise.
+ */
+static int thread_init (thpool_* thpool_p, struct thread** thread_p, int id){
+
+    *thread_p = (struct thread*)malloc(sizeof(struct thread));
+    if (*thread_p == NULL){
+        err("thread_init(): Could not allocate memory for thread\n");
+        return -1;
+    }
+
+    (*thread_p)->thpool_p = thpool_p;
+    (*thread_p)->id       = id;
+
+    pthread_create(&(*thread_p)->pthread, NULL, (void * (*)(void *)) thread_do, (*thread_p));
+    pthread_detach((*thread_p)->pthread);
+    return 0;
+}
+
+
+/* Sets the calling thread on hold */
+static void thread_hold(int sig_id) {
+    (void)sig_id;
+    threads_on_hold = 1;
+    while (threads_on_hold){
+        sleep(1);
+    }
+}
+
+
+/* What each thread is doing
+*
+* In principle this is an endless loop. The only time this loop gets interuppted is once
+* thpool_destroy() is invoked or the program exits.
+*
+* @param  thread        thread that will run this function
+* @return nothing
+*/
+static void* thread_do(struct thread* thread_p){
+
+    /* Set thread name for profiling and debuging */
+    char thread_name[32] = {0};
+    snprintf(thread_name, 32, "thread-pool-%d", thread_p->id);
+
+#if defined(__linux__)
+    /* Use prctl instead to prevent using _GNU_SOURCE flag and implicit declaration */
+    prctl(PR_SET_NAME, thread_name);
+#elif defined(__APPLE__) && defined(__MACH__)
+    pthread_setname_np(thread_name);
+#else
+    err("thread_do(): pthread_setname_np is not supported on this system");
+#endif
+
+    /* Assure all threads have been created before starting serving */
+    thpool_* thpool_p = thread_p->thpool_p;
+
+    /* Register signal handler */
+    struct sigaction act;
+    sigemptyset(&act.sa_mask);
+    act.sa_flags = 0;
+    act.sa_handler = thread_hold;
+    if (sigaction(SIGUSR1, &act, NULL) == -1) {
+        err("thread_do(): cannot handle SIGUSR1");
+    }
+
+    /* Mark thread as alive (initialized) */
+    pthread_mutex_lock(&thpool_p->thcount_lock);
+    thpool_p->num_threads_alive += 1;
+    pthread_mutex_unlock(&thpool_p->thcount_lock);
+
+    while(threads_keepalive){
+
+        bsem_wait(thpool_p->jobqueue_t.has_jobs);
+
+        if (threads_keepalive){
+
+            pthread_mutex_lock(&thpool_p->thcount_lock);
+            thpool_p->num_threads_working++;
+            pthread_mutex_unlock(&thpool_p->thcount_lock);
+
+            /* Read job from queue and execute it */
+            void (*func_buff)(void*);
+            void*  arg_buff;
+            job* job_p = jobqueue_pull(&thpool_p->jobqueue_t);
+            if (job_p) {
+                func_buff = job_p->function;
+                arg_buff  = job_p->arg;
+                func_buff(arg_buff);
+                free(job_p);
+            }
+
+            pthread_mutex_lock(&thpool_p->thcount_lock);
+            thpool_p->num_threads_working--;
+            if (!thpool_p->num_threads_working) {
+                pthread_cond_signal(&thpool_p->threads_all_idle);
+            }
+            pthread_mutex_unlock(&thpool_p->thcount_lock);
+
+        }
+    }
+    pthread_mutex_lock(&thpool_p->thcount_lock);
+    thpool_p->num_threads_alive --;
+    pthread_mutex_unlock(&thpool_p->thcount_lock);
+
+    return NULL;
+}
+
+
+/* Frees a thread  */
+static void thread_destroy (thread* thread_p){
+    free(thread_p);
+}
+
+
+
+
+
+/* ============================ JOB QUEUE =========================== */
+
+
+/* Initialize queue */
+static int jobqueue_init(jobqueue* jobqueue_p){
+    jobqueue_p->len = 0;
+    jobqueue_p->front = NULL;
+    jobqueue_p->rear  = NULL;
+
+    jobqueue_p->has_jobs = (struct bsem*)malloc(sizeof(struct bsem));
+    if (jobqueue_p->has_jobs == NULL){
+        return -1;
+    }
+
+    pthread_mutex_init(&(jobqueue_p->rwmutex), NULL);
+    bsem_init(jobqueue_p->has_jobs, 0);
+
+    return 0;
+}
+
+
+/* Clear the queue */
+static void jobqueue_clear(jobqueue* jobqueue_p){
+
+    while(jobqueue_p->len){
+        free(jobqueue_pull(jobqueue_p));
+    }
+
+    jobqueue_p->front = NULL;
+    jobqueue_p->rear  = NULL;
+    bsem_reset(jobqueue_p->has_jobs);
+    jobqueue_p->len = 0;
+
+}
+
+
+/* Add (allocated) job to queue
+ */
+static void jobqueue_push(jobqueue* jobqueue_p, struct job* newjob){
+
+    pthread_mutex_lock(&jobqueue_p->rwmutex);
+    newjob->prev = NULL;
+
+    switch(jobqueue_p->len){
+
+        case 0:  /* if no jobs in queue */
+                    jobqueue_p->front = newjob;
+                    jobqueue_p->rear  = newjob;
+                    break;
+
+        default: /* if jobs in queue */
+                    jobqueue_p->rear->prev = newjob;
+                    jobqueue_p->rear = newjob;
+
+    }
+    jobqueue_p->len++;
+
+    bsem_post(jobqueue_p->has_jobs);
+    pthread_mutex_unlock(&jobqueue_p->rwmutex);
+}
+
+
+/* Get first job from queue(removes it from queue)
+ * Notice: Caller MUST hold a mutex
+ */
+static struct job* jobqueue_pull(jobqueue* jobqueue_p){
+
+    pthread_mutex_lock(&jobqueue_p->rwmutex);
+    job* job_p = jobqueue_p->front;
+
+    switch(jobqueue_p->len){
+
+        case 0:  /* if no jobs in queue */
+                      break;
+
+        case 1:  /* if one job in queue */
+                    jobqueue_p->front = NULL;
+                    jobqueue_p->rear  = NULL;
+                    jobqueue_p->len = 0;
+                    break;
+
+        default: /* if >1 jobs in queue */
+                    jobqueue_p->front = job_p->prev;
+                    jobqueue_p->len--;
+                    /* more than one job in queue -> post it */
+                    bsem_post(jobqueue_p->has_jobs);
+
+    }
+
+    pthread_mutex_unlock(&jobqueue_p->rwmutex);
+    return job_p;
+}
+
+
+/* Free all queue resources back to the system */
+static void jobqueue_destroy(jobqueue* jobqueue_p){
+    jobqueue_clear(jobqueue_p);
+    free(jobqueue_p->has_jobs);
+}
+
+
+
+
+
+/* ======================== SYNCHRONISATION ========================= */
+
+
+/* Init semaphore to 1 or 0 */
+static void bsem_init(bsem *bsem_p, int value) {
+    if (value < 0 || value > 1) {
+        err("bsem_init(): Binary semaphore can take only values 1 or 0");
+        exit(1);
+    }
+    pthread_mutex_init(&(bsem_p->mutex), NULL);
+    pthread_cond_init(&(bsem_p->cond), NULL);
+    bsem_p->v = value;
+}
+
+
+/* Reset semaphore to 0 */
+static void bsem_reset(bsem *bsem_p) {
+    bsem_init(bsem_p, 0);
+}
+
+
+/* Post to at least one thread */
+static void bsem_post(bsem *bsem_p) {
+    pthread_mutex_lock(&bsem_p->mutex);
+    bsem_p->v = 1;
+    pthread_cond_signal(&bsem_p->cond);
+    pthread_mutex_unlock(&bsem_p->mutex);
+}
+
+
+/* Post to all threads */
+static void bsem_post_all(bsem *bsem_p) {
+    pthread_mutex_lock(&bsem_p->mutex);
+    bsem_p->v = 1;
+    pthread_cond_broadcast(&bsem_p->cond);
+    pthread_mutex_unlock(&bsem_p->mutex);
+}
+
+
+/* Wait on semaphore until semaphore has value 0 */
+static void bsem_wait(bsem* bsem_p) {
+    pthread_mutex_lock(&bsem_p->mutex);
+    while (bsem_p->v != 1) {
+        pthread_cond_wait(&bsem_p->cond, &bsem_p->mutex);
+    }
+    bsem_p->v = 0;
+    pthread_mutex_unlock(&bsem_p->mutex);
+}
diff --git a/framework/lynq-framework-service/src/stateManager/thpool.cpp b/framework/lynq-framework-service/src/stateManager/thpool.cpp
new file mode 100644
index 0000000..d139df7
--- /dev/null
+++ b/framework/lynq-framework-service/src/stateManager/thpool.cpp
@@ -0,0 +1,547 @@
+/* ********************************
+ * Author:       Johan Hanssen Seferidis
+ * License:      MIT
+ * Description:  Library providing a threading pool where you can add
+ *               work. For usage, check the thpool.h file or README.md
+ *
+ *//** @file thpool.h *//*
+ *
+ ********************************/
+
+#define _POSIX_C_SOURCE 200809L
+#include <unistd.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include <errno.h>
+#include <time.h>
+#if defined(__linux__)
+#include <sys/prctl.h>
+#endif
+
+#include "thpool.h"
+
+#ifdef THPOOL_DEBUG
+#define THPOOL_DEBUG 1
+#else
+#define THPOOL_DEBUG 0
+#endif
+
+#if !defined(DISABLE_PRINT) || defined(THPOOL_DEBUG)
+#define err(str) fprintf(stderr, str)
+#else
+#define err(str)
+#endif
+
+static volatile int threads_keepalive;
+static volatile int threads_on_hold;
+
+
+
+/* ========================== STRUCTURES ============================ */
+
+
+/* Binary semaphore */
+typedef struct bsem {
+    pthread_mutex_t mutex;
+    pthread_cond_t   cond;
+    int v;
+} bsem;
+
+
+/* Job */
+typedef struct job{
+    struct job*  prev;                   /* pointer to previous job   */
+    void   (*function)(void* arg);       /* function pointer          */
+    void*  arg;                          /* function's argument       */
+} job;
+
+
+/* Job queue */
+typedef struct jobqueue{
+    pthread_mutex_t rwmutex;             /* used for queue r/w access */
+    job  *front;                         /* pointer to front of queue */
+    job  *rear;                          /* pointer to rear  of queue */
+    bsem *has_jobs;                      /* flag as binary semaphore  */
+    int   len;                           /* number of jobs in queue   */
+} jobqueue;
+
+
+/* Thread */
+typedef struct thread{
+    int       id;                        /* friendly id               */
+    pthread_t pthread;                   /* pointer to actual thread  */
+    struct thpool_* thpool_p;            /* access to thpool          */
+} thread;
+
+
+/* Threadpool */
+typedef struct thpool_{
+    thread**   threads;                  /* pointer to threads        */
+    volatile int num_threads_alive;      /* threads currently alive   */
+    volatile int num_threads_working;    /* threads currently working */
+    pthread_mutex_t  thcount_lock;       /* used for thread count etc */
+    pthread_cond_t  threads_all_idle;    /* signal to thpool_wait     */
+    jobqueue  jobqueue_t;                  /* job queue                 */
+} thpool_;
+
+
+
+
+
+/* ========================== PROTOTYPES ============================ */
+
+
+static int  thread_init(thpool_* thpool_p, struct thread** thread_p, int id);
+static void* thread_do(struct thread* thread_p);
+static void  thread_hold(int sig_id);
+static void  thread_destroy(struct thread* thread_p);
+
+static int   jobqueue_init(jobqueue* jobqueue_p);
+static void  jobqueue_clear(jobqueue* jobqueue_p);
+static void  jobqueue_push(jobqueue* jobqueue_p, struct job* newjob_p);
+static struct job* jobqueue_pull(jobqueue* jobqueue_p);
+static void  jobqueue_destroy(jobqueue* jobqueue_p);
+
+static void  bsem_init(struct bsem *bsem_p, int value);
+static void  bsem_reset(struct bsem *bsem_p);
+static void  bsem_post(struct bsem *bsem_p);
+static void  bsem_post_all(struct bsem *bsem_p);
+static void  bsem_wait(struct bsem *bsem_p);
+
+
+
+
+
+/* ========================== THREADPOOL ============================ */
+
+
+/* Initialise thread pool */
+struct thpool_* thpool_init(int num_threads){
+
+    threads_on_hold   = 0;
+    threads_keepalive = 1;
+
+    if (num_threads < 0){
+        num_threads = 0;
+    }
+
+    /* Make new thread pool */
+    thpool_* thpool_p;
+    thpool_p = (struct thpool_*)malloc(sizeof(struct thpool_));
+    if (thpool_p == NULL){
+        err("thpool_init(): Could not allocate memory for thread pool\n");
+        return NULL;
+    }
+    thpool_p->num_threads_alive   = 0;
+    thpool_p->num_threads_working = 0;
+
+    /* Initialise the job queue */
+    if (jobqueue_init(&thpool_p->jobqueue_t) == -1){
+        err("thpool_init(): Could not allocate memory for job queue\n");
+        free(thpool_p);
+        return NULL;
+    }
+
+    /* Make threads in pool */
+    thpool_p->threads = (struct thread**)malloc(num_threads * sizeof(struct thread *));
+    if (thpool_p->threads == NULL){
+        err("thpool_init(): Could not allocate memory for threads\n");
+        jobqueue_destroy(&thpool_p->jobqueue_t);
+        free(thpool_p);
+        return NULL;
+    }
+
+    pthread_mutex_init(&(thpool_p->thcount_lock), NULL);
+    pthread_cond_init(&thpool_p->threads_all_idle, NULL);
+
+    /* Thread init */
+    int n;
+    for (n=0; n<num_threads; n++){
+        thread_init(thpool_p, &thpool_p->threads[n], n);
+#if THPOOL_DEBUG
+            printf("THPOOL_DEBUG: Created thread %d in pool \n", n);
+#endif
+    }
+
+    /* Wait for threads to initialize */
+    while (thpool_p->num_threads_alive != num_threads) {}
+
+    return thpool_p;
+}
+
+
+/* Add work to the thread pool */
+int thpool_add_work(thpool_* thpool_p, void (*function_p)(void*), void* arg_p){
+    job* newjob;
+
+    newjob=(struct job*)malloc(sizeof(struct job));
+    if (newjob==NULL){
+        err("thpool_add_work(): Could not allocate memory for new job\n");
+        return -1;
+    }
+
+    /* add function and argument */
+    newjob->function=function_p;
+    newjob->arg=arg_p;
+
+    /* add job to queue */
+    jobqueue_push(&thpool_p->jobqueue_t, newjob);
+
+    return 0;
+}
+
+
+/* Wait until all jobs have finished */
+void thpool_wait(thpool_* thpool_p){
+    pthread_mutex_lock(&thpool_p->thcount_lock);
+    while (thpool_p->jobqueue_t.len || thpool_p->num_threads_working) {
+        pthread_cond_wait(&thpool_p->threads_all_idle, &thpool_p->thcount_lock);
+    }
+    pthread_mutex_unlock(&thpool_p->thcount_lock);
+}
+
+
+/* Destroy the threadpool */
+void thpool_destroy(thpool_* thpool_p){
+    /* No need to destory if it's NULL */
+    if (thpool_p == NULL) return ;
+
+    volatile int threads_total = thpool_p->num_threads_alive;
+
+    /* End each thread 's infinite loop */
+    threads_keepalive = 0;
+
+    /* Give one second to kill idle threads */
+    double TIMEOUT = 1.0;
+    time_t start, end;
+    double tpassed = 0.0;
+    time (&start);
+    while (tpassed < TIMEOUT && thpool_p->num_threads_alive){
+        bsem_post_all(thpool_p->jobqueue_t.has_jobs);
+        time (&end);
+        tpassed = difftime(end,start);
+    }
+
+    /* Poll remaining threads */
+    while (thpool_p->num_threads_alive){
+        bsem_post_all(thpool_p->jobqueue_t.has_jobs);
+        sleep(1);
+    }
+
+    /* Job queue cleanup */
+    jobqueue_destroy(&thpool_p->jobqueue_t);
+    /* Deallocs */
+    int n;
+    for (n=0; n < threads_total; n++){
+        thread_destroy(thpool_p->threads[n]);
+    }
+    free(thpool_p->threads);
+    free(thpool_p);
+}
+
+
+/* Pause all threads in threadpool */
+void thpool_pause(thpool_* thpool_p) {
+    int n;
+    for (n=0; n < thpool_p->num_threads_alive; n++){
+        pthread_kill(thpool_p->threads[n]->pthread, SIGUSR1);
+    }
+}
+
+
+/* Resume all threads in threadpool */
+void thpool_resume(thpool_* thpool_p) {
+    // resuming a single threadpool hasn't been
+    // implemented yet, meanwhile this supresses
+    // the warnings
+    (void)thpool_p;
+
+    threads_on_hold = 0;
+}
+
+
+int thpool_num_threads_working(thpool_* thpool_p){
+    return thpool_p->num_threads_working;
+}
+
+
+
+
+
+/* ============================ THREAD ============================== */
+
+
+/* Initialize a thread in the thread pool
+ *
+ * @param thread        address to the pointer of the thread to be created
+ * @param id            id to be given to the thread
+ * @return 0 on success, -1 otherwise.
+ */
+static int thread_init (thpool_* thpool_p, struct thread** thread_p, int id){
+
+    *thread_p = (struct thread*)malloc(sizeof(struct thread));
+    if (*thread_p == NULL){
+        err("thread_init(): Could not allocate memory for thread\n");
+        return -1;
+    }
+
+    (*thread_p)->thpool_p = thpool_p;
+    (*thread_p)->id       = id;
+
+    pthread_create(&(*thread_p)->pthread, NULL, (void * (*)(void *)) thread_do, (*thread_p));
+    pthread_detach((*thread_p)->pthread);
+    return 0;
+}
+
+
+/* Sets the calling thread on hold */
+static void thread_hold(int sig_id) {
+    (void)sig_id;
+    threads_on_hold = 1;
+    while (threads_on_hold){
+        sleep(1);
+    }
+}
+
+
+/* What each thread is doing
+*
+* In principle this is an endless loop. The only time this loop gets interuppted is once
+* thpool_destroy() is invoked or the program exits.
+*
+* @param  thread        thread that will run this function
+* @return nothing
+*/
+static void* thread_do(struct thread* thread_p){
+
+    /* Set thread name for profiling and debuging */
+    char thread_name[32] = {0};
+    snprintf(thread_name, 32, "thread-pool-%d", thread_p->id);
+
+#if defined(__linux__)
+    /* Use prctl instead to prevent using _GNU_SOURCE flag and implicit declaration */
+    prctl(PR_SET_NAME, thread_name);
+#elif defined(__APPLE__) && defined(__MACH__)
+    pthread_setname_np(thread_name);
+#else
+    err("thread_do(): pthread_setname_np is not supported on this system");
+#endif
+
+    /* Assure all threads have been created before starting serving */
+    thpool_* thpool_p = thread_p->thpool_p;
+
+    /* Register signal handler */
+    struct sigaction act;
+    sigemptyset(&act.sa_mask);
+    act.sa_flags = 0;
+    act.sa_handler = thread_hold;
+    if (sigaction(SIGUSR1, &act, NULL) == -1) {
+        err("thread_do(): cannot handle SIGUSR1");
+    }
+
+    /* Mark thread as alive (initialized) */
+    pthread_mutex_lock(&thpool_p->thcount_lock);
+    thpool_p->num_threads_alive += 1;
+    pthread_mutex_unlock(&thpool_p->thcount_lock);
+
+    while(threads_keepalive){
+
+        bsem_wait(thpool_p->jobqueue_t.has_jobs);
+
+        if (threads_keepalive){
+
+            pthread_mutex_lock(&thpool_p->thcount_lock);
+            thpool_p->num_threads_working++;
+            pthread_mutex_unlock(&thpool_p->thcount_lock);
+
+            /* Read job from queue and execute it */
+            void (*func_buff)(void*);
+            void*  arg_buff;
+            job* job_p = jobqueue_pull(&thpool_p->jobqueue_t);
+            if (job_p) {
+                func_buff = job_p->function;
+                arg_buff  = job_p->arg;
+                func_buff(arg_buff);
+                free(job_p);
+            }
+
+            pthread_mutex_lock(&thpool_p->thcount_lock);
+            thpool_p->num_threads_working--;
+            if (!thpool_p->num_threads_working) {
+                pthread_cond_signal(&thpool_p->threads_all_idle);
+            }
+            pthread_mutex_unlock(&thpool_p->thcount_lock);
+
+        }
+    }
+    pthread_mutex_lock(&thpool_p->thcount_lock);
+    thpool_p->num_threads_alive --;
+    pthread_mutex_unlock(&thpool_p->thcount_lock);
+
+    return NULL;
+}
+
+
+/* Frees a thread  */
+static void thread_destroy (thread* thread_p){
+    free(thread_p);
+}
+
+
+
+
+
+/* ============================ JOB QUEUE =========================== */
+
+
+/* Initialize queue */
+static int jobqueue_init(jobqueue* jobqueue_p){
+    jobqueue_p->len = 0;
+    jobqueue_p->front = NULL;
+    jobqueue_p->rear  = NULL;
+
+    jobqueue_p->has_jobs = (struct bsem*)malloc(sizeof(struct bsem));
+    if (jobqueue_p->has_jobs == NULL){
+        return -1;
+    }
+
+    pthread_mutex_init(&(jobqueue_p->rwmutex), NULL);
+    bsem_init(jobqueue_p->has_jobs, 0);
+
+    return 0;
+}
+
+
+/* Clear the queue */
+static void jobqueue_clear(jobqueue* jobqueue_p){
+
+    while(jobqueue_p->len){
+        free(jobqueue_pull(jobqueue_p));
+    }
+
+    jobqueue_p->front = NULL;
+    jobqueue_p->rear  = NULL;
+    bsem_reset(jobqueue_p->has_jobs);
+    jobqueue_p->len = 0;
+
+}
+
+
+/* Add (allocated) job to queue
+ */
+static void jobqueue_push(jobqueue* jobqueue_p, struct job* newjob){
+
+    pthread_mutex_lock(&jobqueue_p->rwmutex);
+    newjob->prev = NULL;
+
+    switch(jobqueue_p->len){
+
+        case 0:  /* if no jobs in queue */
+                    jobqueue_p->front = newjob;
+                    jobqueue_p->rear  = newjob;
+                    break;
+
+        default: /* if jobs in queue */
+                    jobqueue_p->rear->prev = newjob;
+                    jobqueue_p->rear = newjob;
+
+    }
+    jobqueue_p->len++;
+
+    bsem_post(jobqueue_p->has_jobs);
+    pthread_mutex_unlock(&jobqueue_p->rwmutex);
+}
+
+
+/* Get first job from queue(removes it from queue)
+ * Notice: Caller MUST hold a mutex
+ */
+static struct job* jobqueue_pull(jobqueue* jobqueue_p){
+
+    pthread_mutex_lock(&jobqueue_p->rwmutex);
+    job* job_p = jobqueue_p->front;
+
+    switch(jobqueue_p->len){
+
+        case 0:  /* if no jobs in queue */
+                      break;
+
+        case 1:  /* if one job in queue */
+                    jobqueue_p->front = NULL;
+                    jobqueue_p->rear  = NULL;
+                    jobqueue_p->len = 0;
+                    break;
+
+        default: /* if >1 jobs in queue */
+                    jobqueue_p->front = job_p->prev;
+                    jobqueue_p->len--;
+                    /* more than one job in queue -> post it */
+                    bsem_post(jobqueue_p->has_jobs);
+
+    }
+
+    pthread_mutex_unlock(&jobqueue_p->rwmutex);
+    return job_p;
+}
+
+
+/* Free all queue resources back to the system */
+static void jobqueue_destroy(jobqueue* jobqueue_p){
+    jobqueue_clear(jobqueue_p);
+    free(jobqueue_p->has_jobs);
+}
+
+
+
+
+
+/* ======================== SYNCHRONISATION ========================= */
+
+
+/* Init semaphore to 1 or 0 */
+static void bsem_init(bsem *bsem_p, int value) {
+    if (value < 0 || value > 1) {
+        err("bsem_init(): Binary semaphore can take only values 1 or 0");
+        exit(1);
+    }
+    pthread_mutex_init(&(bsem_p->mutex), NULL);
+    pthread_cond_init(&(bsem_p->cond), NULL);
+    bsem_p->v = value;
+}
+
+
+/* Reset semaphore to 0 */
+static void bsem_reset(bsem *bsem_p) {
+    bsem_init(bsem_p, 0);
+}
+
+
+/* Post to at least one thread */
+static void bsem_post(bsem *bsem_p) {
+    pthread_mutex_lock(&bsem_p->mutex);
+    bsem_p->v = 1;
+    pthread_cond_signal(&bsem_p->cond);
+    pthread_mutex_unlock(&bsem_p->mutex);
+}
+
+
+/* Post to all threads */
+static void bsem_post_all(bsem *bsem_p) {
+    pthread_mutex_lock(&bsem_p->mutex);
+    bsem_p->v = 1;
+    pthread_cond_broadcast(&bsem_p->cond);
+    pthread_mutex_unlock(&bsem_p->mutex);
+}
+
+
+/* Wait on semaphore until semaphore has value 0 */
+static void bsem_wait(bsem* bsem_p) {
+    pthread_mutex_lock(&bsem_p->mutex);
+    while (bsem_p->v != 1) {
+        pthread_cond_wait(&bsem_p->cond, &bsem_p->mutex);
+    }
+    bsem_p->v = 0;
+    pthread_mutex_unlock(&bsem_p->mutex);
+}
diff --git a/framework/lynq-framework-service/src/stateManager/thpool.h b/framework/lynq-framework-service/src/stateManager/thpool.h
new file mode 100644
index 0000000..af3e68d
--- /dev/null
+++ b/framework/lynq-framework-service/src/stateManager/thpool.h
@@ -0,0 +1,187 @@
+/**********************************
+ * @author      Johan Hanssen Seferidis
+ * License:     MIT
+ *
+ **********************************/
+
+#ifndef _THPOOL_
+#define _THPOOL_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* =================================== API ======================================= */
+
+
+typedef struct thpool_* threadpool;
+
+
+/**
+ * @brief  Initialize threadpool
+ *
+ * Initializes a threadpool. This function will not return until all
+ * threads have initialized successfully.
+ *
+ * @example
+ *
+ *    ..
+ *    threadpool thpool;                     //First we declare a threadpool
+ *    thpool = thpool_init(4);               //then we initialize it to 4 threads
+ *    ..
+ *
+ * @param  num_threads   number of threads to be created in the threadpool
+ * @return threadpool    created threadpool on success,
+ *                       NULL on error
+ */
+threadpool thpool_init(int num_threads);
+
+
+/**
+ * @brief Add work to the job queue
+ *
+ * Takes an action and its argument and adds it to the threadpool's job queue.
+ * If you want to add to work a function with more than one arguments then
+ * a way to implement this is by passing a pointer to a structure.
+ *
+ * NOTICE: You have to cast both the function and argument to not get warnings.
+ *
+ * @example
+ *
+ *    void print_num(int num){
+ *       printf("%d\n", num);
+ *    }
+ *
+ *    int main() {
+ *       ..
+ *       int a = 10;
+ *       thpool_add_work(thpool, (void*)print_num, (void*)a);
+ *       ..
+ *    }
+ *
+ * @param  threadpool    threadpool to which the work will be added
+ * @param  function_p    pointer to function to add as work
+ * @param  arg_p         pointer to an argument
+ * @return 0 on success, -1 otherwise.
+ */
+int thpool_add_work(threadpool, void (*function_p)(void*), void* arg_p);
+
+
+/**
+ * @brief Wait for all queued jobs to finish
+ *
+ * Will wait for all jobs - both queued and currently running to finish.
+ * Once the queue is empty and all work has completed, the calling thread
+ * (probably the main program) will continue.
+ *
+ * Smart polling is used in wait. The polling is initially 0 - meaning that
+ * there is virtually no polling at all. If after 1 seconds the threads
+ * haven't finished, the polling interval starts growing exponentially
+ * until it reaches max_secs seconds. Then it jumps down to a maximum polling
+ * interval assuming that heavy processing is being used in the threadpool.
+ *
+ * @example
+ *
+ *    ..
+ *    threadpool thpool = thpool_init(4);
+ *    ..
+ *    // Add a bunch of work
+ *    ..
+ *    thpool_wait(thpool);
+ *    puts("All added work has finished");
+ *    ..
+ *
+ * @param threadpool     the threadpool to wait for
+ * @return nothing
+ */
+void thpool_wait(threadpool);
+
+
+/**
+ * @brief Pauses all threads immediately
+ *
+ * The threads will be paused no matter if they are idle or working.
+ * The threads return to their previous states once thpool_resume
+ * is called.
+ *
+ * While the thread is being paused, new work can be added.
+ *
+ * @example
+ *
+ *    threadpool thpool = thpool_init(4);
+ *    thpool_pause(thpool);
+ *    ..
+ *    // Add a bunch of work
+ *    ..
+ *    thpool_resume(thpool); // Let the threads start their magic
+ *
+ * @param threadpool    the threadpool where the threads should be paused
+ * @return nothing
+ */
+void thpool_pause(threadpool);
+
+
+/**
+ * @brief Unpauses all threads if they are paused
+ *
+ * @example
+ *    ..
+ *    thpool_pause(thpool);
+ *    sleep(10);              // Delay execution 10 seconds
+ *    thpool_resume(thpool);
+ *    ..
+ *
+ * @param threadpool     the threadpool where the threads should be unpaused
+ * @return nothing
+ */
+void thpool_resume(threadpool);
+
+
+/**
+ * @brief Destroy the threadpool
+ *
+ * This will wait for the currently active threads to finish and then 'kill'
+ * the whole threadpool to free up memory.
+ *
+ * @example
+ * int main() {
+ *    threadpool thpool1 = thpool_init(2);
+ *    threadpool thpool2 = thpool_init(2);
+ *    ..
+ *    thpool_destroy(thpool1);
+ *    ..
+ *    return 0;
+ * }
+ *
+ * @param threadpool     the threadpool to destroy
+ * @return nothing
+ */
+void thpool_destroy(threadpool);
+
+
+/**
+ * @brief Show currently working threads
+ *
+ * Working threads are the threads that are performing work (not idle).
+ *
+ * @example
+ * int main() {
+ *    threadpool thpool1 = thpool_init(2);
+ *    threadpool thpool2 = thpool_init(2);
+ *    ..
+ *    printf("Working threads: %d\n", thpool_num_threads_working(thpool1));
+ *    ..
+ *    return 0;
+ * }
+ *
+ * @param threadpool     the threadpool of interest
+ * @return integer       number of threads working
+ */
+int thpool_num_threads_working(threadpool);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/framework/lynq-framework-service/src/stateManager/timeManagement.cpp b/framework/lynq-framework-service/src/stateManager/timeManagement.cpp
new file mode 100644
index 0000000..654b85d
--- /dev/null
+++ b/framework/lynq-framework-service/src/stateManager/timeManagement.cpp
@@ -0,0 +1,234 @@
+/* ********************************
+ * Author:       Warren
+ * License:      MobileTek
+ *//** @file timeManagement.h  link.h*//*
+ *
+ ********************************/
+#include <stdio.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <semaphore.h>
+#include <sys/time.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <log/log.h>
+#include "thpool.h"
+#include "timeManagement.h"
+#include "link.h"
+#define LOG_TAG "LYNQ_TIME_MAN"
+
+int cond_length = 0;
+cond_used_state cond_array[CONDARRAYMAX]={0};
+COND_NODE* COND_HEAD;
+pthread_mutex_t len_mutex;
+extern timerArgv * timerArgv_t =  NULL;
+int taskWorking = 0;
+pthread_mutex_t task_Mutex = PTHREAD_MUTEX_INITIALIZER;
+pthread_cond_t task_Cond = PTHREAD_COND_INITIALIZER;
+
+int initCondArray(void)
+{
+    for(int i = 0;i<CONDARRAYMAX;i++)
+    {
+        cond_array[i].state = 0;
+        cond_array[i].index = i;
+        cond_array[i].use_state = 0;
+        pthread_mutex_init(&(cond_array[i].mutex), NULL);
+        pthread_cond_init(&(cond_array[i].cond), NULL);
+    }
+    return 0;
+}
+cond_used_state* findUnusedCond(cond_used_state cond_array[],int arraLength)
+{
+    for(int i =0;i<arraLength;i++)
+    {
+        if((cond_array[i].state==0)&&(cond_array[i].use_state==0))
+        {
+            return &cond_array[i];
+        }
+    }
+    return NULL;
+}
+
+int millli_sleep_with_restart(int millisecond)
+{
+    int left = millisecond*1000;
+    while (left > 0) 
+    {
+        left = usleep(left);
+    }
+
+    return 0;
+}
+int micro_sleep_with_restart(int microseconds)
+{
+    int left = microseconds;
+    while (left > 0) 
+    {
+        left = usleep(left);
+    }
+    return 0;
+}
+
+void startWaitResp(void *arg)
+{
+    struct timeval now;
+    struct timespec outtime;
+    request_cond* param = (request_cond*)arg;
+    int num = param->cond_index;
+    int time = param->waitTime;
+    RLOGD("[Warren test] index is %d\n",num);
+    RLOGD("[Warren test] thread 0x%x working on task %d\n",(unsigned int)pthread_self(),num);
+    int ret = 0;
+    gettimeofday(&now, NULL);
+    outtime.tv_sec = now.tv_sec + time;
+    //outtime.tv_nsec = now.tv_usec * 1000;
+    while (cond_array[num].state==0)
+    {
+        RLOGD("[Warren test]cond_length is %d\n",cond_length);
+        if(cond_length<CONDARRAYMAX)
+        {
+            pthread_mutex_lock(&len_mutex);
+            cond_length++;
+            COND_HEAD = addCondLinkNode(COND_HEAD,param->token, cond_array[num].index,cond_array[num].cond);
+            taskWorking=0;
+            pthread_cond_signal(&task_Cond);
+            pthread_mutex_unlock(&len_mutex);
+            pthread_mutex_lock(&cond_array[num].mutex);
+            cond_array[num].state = 1;
+            COND_HEAD = addCondLinkNode(COND_HEAD,param->token, cond_array[num].index,cond_array[num].cond);
+            RLOGD("[Warren test] 开始等待 state = %d....\n",cond_array[num].state);
+            ret = pthread_cond_timedwait(&cond_array[num].cond,&cond_array[num].mutex,&outtime);
+        }
+        else
+        {
+            int w_status =0;
+            int waitTime = 10;//∑j=10*(2^1)+10*(2^2)+10*(2^3)+…+10*(2^n). 
+            int count = 0;
+            do
+            {
+                if(count<7)//n=7,∑j=1270
+                {
+                    count++;
+                    millli_sleep_with_restart(waitTime);
+                    waitTime = waitTime*2;
+                }
+                else
+                {
+                    w_status=1;
+                }
+            }while((cond_length>=CONDARRAYMAX)&&(w_status==0));
+
+            if(w_status==1)
+            {
+                RLOGD("wait cond link leisure failture!!! And callback timeout function\n");
+                return;
+                /*call time out function
+                *
+                *
+                *
+                */
+            }
+            pthread_mutex_lock(&len_mutex);
+            cond_length++;
+            pthread_mutex_unlock(&len_mutex);
+            COND_HEAD = addCondLinkNode(COND_HEAD,param->token, cond_array[num].index,cond_array[num].cond);
+
+            taskWorking = 0;
+            pthread_cond_signal(&task_Cond);
+
+            pthread_mutex_unlock(&len_mutex);
+            pthread_mutex_lock(&cond_array[num].mutex);
+            cond_array[num].state = 1;
+            COND_HEAD = addCondLinkNode(COND_HEAD,param->token, cond_array[num].index,cond_array[num].cond);
+            RLOGD("[Warren test] 开始等待 state = %d....\n",cond_array[num].state);
+            ret = pthread_cond_timedwait(&cond_array[num].cond,&cond_array[num].mutex,&outtime);
+        }
+
+    }
+    cond_array[num].state = 0;//rest cond state.
+    cond_array[num].use_state = 0;
+    RLOGD("cond_array[%d] state=%d, use_state = %d",num,cond_array[num].state,cond_array[num].use_state);
+    pthread_mutex_unlock(&cond_array[num].mutex);
+    if(ret!=0)
+    {
+        //DeleteLinkNode(param->requestID);
+        COND_HEAD = DeleteLinkNode(param->token,COND_HEAD);
+        pthread_mutex_lock(&len_mutex);
+        cond_length--;
+        pthread_mutex_unlock(&len_mutex);
+        RLOGD("[Warren test] ret= %d,cond_index %d cond_length %d, token = %x,request %d is end\n",ret,param->cond_index,cond_length,param->token,param->requestID);
+        //some call back
+        //
+        //
+        return;
+    }
+    COND_HEAD = DeleteLinkNode(param->token,COND_HEAD);
+    pthread_mutex_lock(&len_mutex);
+    cond_length--;
+    pthread_mutex_unlock(&len_mutex);
+    RLOGD("[Warren test] ret= %d,cond_index %d cond_length %d, token = %x,request %d is end\n",ret,param->cond_index,cond_length,param->token,param->requestID);
+    free(param);
+    return;
+}
+int addTaskToTimerMan(int time,int32_t token,int request)
+{
+    cond_used_state *node=NULL;
+    request_cond *requestCond=(request_cond*)malloc(sizeof(request_cond));
+    if((requestCond==NULL) ||(timerArgv_t==NULL))
+    {
+        return 1;
+    }
+    RLOGD("[Warren test] add token = %x, request %d\n",token,request);
+    node = findUnusedCond(timerArgv_t->cond_array,CONDARRAYMAX);
+    requestCond->cond_index=node->index;
+    requestCond->token=token;
+    requestCond->waitTime = time;
+    requestCond->requestID = request;
+    RLOGD("[Warren test] cond_index=%d,token = %x request %d\n",requestCond->cond_index,requestCond->token,request);
+    thpool_add_work(timerArgv_t->pool,startWaitResp,(void*)requestCond);
+    node->use_state = 1;
+    return 0;
+
+}
+int sendSignalToTimer(int32_t token)
+{
+    COND_NODE *node=NULL;
+    int cond_index = 0;
+    if(!(node = searchRequestinCondLink(token,COND_HEAD)))
+    {
+        //some thing will be do
+        return 1;
+    }
+    cond_index = node->cond_index;
+    RLOGD("[Warren test] 正在发送信号....\n");
+    pthread_mutex_lock(&timerArgv_t->cond_array[cond_index].mutex);
+    pthread_cond_signal(&timerArgv_t->cond_array[cond_index].cond);
+    pthread_mutex_unlock(&timerArgv_t->cond_array[cond_index].mutex);
+    RLOGD("[Warren test] end cond_0....\n");
+    return 0;
+}
+int initTimeManagement(int max_thread)
+{
+    cond_used_state* node=NULL;
+    timerArgv_t=(timerArgv*)malloc(sizeof(timerArgv));
+    if(timerArgv_t==NULL)
+    {
+        printf("timerArgv_t init fail!!!\n");
+        exit(EXIT_FAILURE);;
+    }
+    initCondArray();
+    timerArgv_t->cond_array=cond_array;
+    COND_HEAD = initConditionLink();
+    if(!COND_HEAD)
+    {
+        printf("COND HEAD init fail!!!\n");
+        exit(EXIT_FAILURE);;
+    }
+    puts("Making threadpool with 4 threads");
+    threadpool thpool = thpool_init(max_thread);
+    timerArgv_t->pool = thpool;
+    return 0;
+
+}
diff --git a/framework/lynq-framework-service/src/stateManager/timeManagement.h b/framework/lynq-framework-service/src/stateManager/timeManagement.h
new file mode 100644
index 0000000..e5f8a12
--- /dev/null
+++ b/framework/lynq-framework-service/src/stateManager/timeManagement.h
@@ -0,0 +1,47 @@
+#include <pthread.h>

+#include "thpool.h"

+//#ifdef __cplusplus

+//extern "C" {

+//#endif

+#define CONDARRAYMAX 20

+typedef struct

+{

+    pthread_cond_t cond;

+    pthread_mutex_t mutex;

+    int state;

+    int index;

+    int use_state;

+}cond_used_state;

+

+typedef struct

+{

+    int requestID;

+    int32_t token;

+    int cond_index;

+    int waitTime;

+}request_cond;

+

+typedef struct

+{

+    threadpool pool;

+    cond_used_state *cond_array;

+}timerArgv;

+

+extern timerArgv * timerArgv_t;

+extern int taskWorking;

+extern pthread_mutex_t task_Mutex;

+extern pthread_cond_t task_Cond;

+

+int millli_sleep_with_restart(int millisecond);

+int micro_sleep_with_restart(int microseconds);

+cond_used_state* findUnusedCond(cond_used_state cond_array[],int arraLength);

+void startWaitResp(void *arg);

+int addTaskToTimerMan(int time,int32_t token,int request);

+int sendSignalToTimer(int32_t token);

+int initTimeManagement(int max_thread);

+//#ifdef __cplusplus

+//}

+//#endif

+

+

+