[Feature][Modem]Update MTK MODEM V1.6 baseline version: MOLY.NR15.R3.MD700.IVT.MP1MR3.MP.V1.6

MTK modem version: MT2735_IVT_MOLY.NR15.R3.MD700.IVT.MP1MR3.MP.V1.6.tar.gz
RF  modem version: NA

Change-Id: I45a4c2752fa9d1a618beacd5d40737fb39ab64fb
diff --git a/mcu/protocol/dispatcher/src/dispatcher_control.c b/mcu/protocol/dispatcher/src/dispatcher_control.c
new file mode 100644
index 0000000..8161a01
--- /dev/null
+++ b/mcu/protocol/dispatcher/src/dispatcher_control.c
@@ -0,0 +1,611 @@
+/*****************************************************************************
+*  Copyright Statement:
+*  --------------------
+*  This software is protected by Copyright and the information contained
+*  herein is confidential. The software may not be copied and the information
+*  contained herein may not be used or disclosed except with the written
+*  permission of MediaTek Inc. (C) 2005
+*
+*  BY OPENING THIS FILE, BUYER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
+*  THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
+*  RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO BUYER ON
+*  AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+*  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
+*  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
+*  NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
+*  SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
+*  SUPPLIED WITH THE MEDIATEK SOFTWARE, AND BUYER AGREES TO LOOK ONLY TO SUCH
+*  THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. MEDIATEK SHALL ALSO
+*  NOT BE RESPONSIBLE FOR ANY MEDIATEK SOFTWARE RELEASES MADE TO BUYER'S
+*  SPECIFICATION OR TO CONFORM TO A PARTICULAR STANDARD OR OPEN FORUM.
+*
+*  BUYER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND CUMULATIVE
+*  LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
+*  AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
+*  OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY BUYER TO
+*  MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
+*
+*  THE TRANSACTION CONTEMPLATED HEREUNDER SHALL BE CONSTRUED IN ACCORDANCE
+*  WITH THE LAWS OF THE STATE OF CALIFORNIA, USA, EXCLUDING ITS CONFLICT OF
+*  LAWS PRINCIPLES.  ANY DISPUTES, CONTROVERSIES OR CLAIMS ARISING THEREOF AND
+*  RELATED THERETO SHALL BE SETTLED BY ARBITRATION IN SAN FRANCISCO, CA, UNDER
+*  THE RULES OF THE INTERNATIONAL CHAMBER OF COMMERCE (ICC).
+*
+*****************************************************************************/
+
+/*******************************************************************************
+ * Filename:
+ * ---------
+ *   dispatcher_control.c
+ *
+ * Project:
+ * --------
+ *   VMOLY
+ *
+ * Description:
+ * ------------
+ *   Dispatcher control information managment.
+ *
+ * Author:
+ * -------
+ * -------
+ *
+ *******************************************************************************/
+
+#include "dispatcher_struct.h"
+#include "dispatcher_msgid.h"
+#include "md_sap.h"
+#include "dispatcher_debug.h"
+#include "dispatcher_data_path_trace_utmd.h"
+/**
+ * dispatcher global conrol context having bearer & context information 
+ */
+static dispatcher_control_cntx g_dispatcher_cntx[MAX_SIM_NUM];
+
+/**
+ * dispatcher current protoidx for recvd ilm 
+ */
+kal_uint8 g_dispatcher_curr_protoidx;
+
+
+void dispatcher_ctrl_set_current_protoidx(kal_uint8 protoidx)
+{
+    g_dispatcher_curr_protoidx = protoidx;
+}
+
+
+void dispatcher_init_control_cntx(void)
+{
+    kal_mem_set(g_dispatcher_cntx, 0, sizeof(g_dispatcher_cntx));
+}
+
+#ifdef ATEST_SYS_DISPATCHER
+    extern kal_bool dispatcher_ut_assert_flag;
+
+    #ifdef ASSERT
+    #undef ASSERT
+    #endif
+    //#define ASSERT(x) do{ if (x) ut_assert_flag = KAL_TRUE; }while(0)
+    void ASSERT(int x) {
+        if (!x) {
+            dispatcher_ut_assert_flag = KAL_TRUE;
+        }
+    }
+#endif
+
+/******************************************************************
+****************Control path functions****************************
+*****************************************************************/
+
+void
+n3epc_dispatcher_bearer_act_req(n3epc_dispatcher_bearer_act_req_struct *p_local)
+{
+    kal_uint8 proto_idx = g_dispatcher_curr_protoidx;
+    dispatcher_control_cntx *p_dispatcher_ctrl_cntx;
+    dispatcher_bearer_info_struct *p_bearer_info;
+    dispatcher_cid_info_struct *p_cid_info;
+    kal_uint8 bearer_id, cid;
+
+    MD_TRC_DISPATCHER_TR_BEARER_ACT_REQ(p_local->bearer_id, p_local->linked_bearer_id,
+        p_local->context_id, proto_idx);
+
+    bearer_id = p_local->bearer_id;
+    /*as per WO bearer_id will range from 1~15*/
+    if (0 == bearer_id || bearer_id > 15) {
+        //BEARER_ID_INVALID_VALUE
+        ASSERT(0);
+        return;
+    }
+    cid = p_local->context_id;
+    p_dispatcher_ctrl_cntx = &g_dispatcher_cntx[proto_idx];
+    p_bearer_info = &p_dispatcher_ctrl_cntx->bearer_info[bearer_id];
+    p_cid_info = &p_dispatcher_ctrl_cntx->cid_info[cid];
+
+    /*if ebi context has already valid values it means bearer is already active*/
+    if (p_bearer_info->defult_bearer_id != 0) {
+        ASSERT(0);
+        return;
+    }
+    p_bearer_info->bearer_id = bearer_id;
+    p_bearer_info->defult_bearer_id = p_local->linked_bearer_id;
+    //for R14 this will be always true
+    if (p_bearer_info->bearer_id == p_local->linked_bearer_id)
+    {
+        // the bearer_id of the cid info not to be set at bearer act req
+        //bearer is already active for this cid
+        ASSERT(p_cid_info->bearer_id == 0);
+        p_bearer_info->context_id = cid;
+
+        // save ip_addr and dns for the ipcore bind process
+        p_cid_info->ip_addr = p_local->ip_addr;
+        p_cid_info->dns = p_local->dns;
+
+        MD_TRC_DISPATCHER_TR_BEARER_ACT_REQ_BIND_STATE(p_bearer_info->context_id, p_cid_info->bind_state);
+
+        if (DISPATCHER_PDN_ST_UNBIND == p_cid_info->bind_state)
+        {
+            //reset old bearer id here
+            p_cid_info->old_bearer_id = 0;
+        }
+        /* not accepatable state to handle bearer activation*/
+        if ((DISPATCHER_PDN_ST_BIND == p_cid_info->bind_state) ||
+            (DISPATCHER_PDN_ST_BINDING == p_cid_info->bind_state) ||
+            (DISPATCHER_PDN_ST_BINDING_GOING_TO_UNBIND == p_cid_info->bind_state))
+        {
+            ASSERT(0);
+            return;
+        }
+        p_cid_info->bearer_id = p_bearer_info->defult_bearer_id;
+    }
+    /*currently bearer_id & linked bearer_id will be same*/
+    else
+    {
+        ASSERT(0);
+        return;
+    }
+
+}
+
+
+
+void
+n3epc_dispatcher_bearer_deact_req(n3epc_dispatcher_bearer_deact_req_struct *p_local)
+{
+    kal_uint8 proto_idx = g_dispatcher_curr_protoidx;
+    dispatcher_control_cntx *p_dispatcher_ctrl_cntx;
+    dispatcher_bearer_info_struct *p_bearer_info;
+    dispatcher_cid_info_struct *p_cid_info;
+    kal_uint8 bearer_id, cid;
+
+    p_dispatcher_ctrl_cntx = &g_dispatcher_cntx[proto_idx];
+    bearer_id = p_local->bearer_id;
+    cid = p_local->context_id;
+
+    MD_TRC_DISPATCHER_TR_BEARER_DEACT_REQ(bearer_id, cid, proto_idx);
+
+    if (0 == bearer_id || bearer_id > 15) {
+        //BEARER_ID_INVALID_VALUE
+        ASSERT(0);
+        return;
+    }
+
+    p_bearer_info = &p_dispatcher_ctrl_cntx->bearer_info[bearer_id];
+    p_cid_info = &p_dispatcher_ctrl_cntx->cid_info[cid];
+
+    if (p_bearer_info->defult_bearer_id == bearer_id)
+    {
+        /*check this bearer info have same cid*/
+        ASSERT(p_bearer_info->context_id == cid);
+        p_bearer_info->defult_bearer_id = 0;
+        p_cid_info->bearer_id = 0;
+    }
+
+    /*bearer can be deactivated before pdn unbind*/
+    /*to handle unbind we need bearer_id, to keep bearer_id in old_bearer_id in cid info*/
+    if ((DISPATCHER_PDN_ST_BIND == p_cid_info->bind_state) ||
+        (DISPATCHER_PDN_ST_BINDING_GOING_TO_UNBIND == p_cid_info->bind_state) ||
+        (DISPATCHER_PDN_ST_BINDING == p_cid_info->bind_state))
+    {
+        p_cid_info->old_bearer_id = bearer_id;
+    }
+
+}
+
+void n3epc_dispatcher_pdn_bind_req(n3epc_dispatcher_pdn_bind_req_struct *p_local,
+                                kal_uint32 src_mod_id)
+{
+    kal_uint8 proto_idx = g_dispatcher_curr_protoidx;
+    dispatcher_control_cntx *p_dispatcher_ctrl_cntx;
+    dispatcher_bearer_info_struct *p_bearer_info;
+    dispatcher_cid_info_struct *p_cid_info;
+    kal_uint8 bearer_id;
+    dispatcher_bind_state_enum bind_state;
+#ifndef ATEST_SYS_DISPATCHER
+    kal_uint8 pdn_id;
+    ipcore_dispatcher_pdn_bind_ind_struct *p_pdn_bind_ind;
+#endif
+    ps_cause_enum error_result = HIF_IPC_OK;
+
+    MD_TRC_DISPATCHER_TR_PDN_BIND_REQ(p_local->context_id,
+        p_local->network_interface_id, proto_idx);
+
+    p_dispatcher_ctrl_cntx = &g_dispatcher_cntx[proto_idx];
+    p_cid_info = &p_dispatcher_ctrl_cntx->cid_info[p_local->context_id];
+    bearer_id = p_cid_info->bearer_id;
+
+    p_bearer_info = &p_dispatcher_ctrl_cntx->bearer_info[bearer_id];
+    /*check if bearer is already active or not before bind this is must condition*/
+    /*******handling added for assert removal*************/
+    if ((p_cid_info->bearer_id == 0) ||
+        (p_bearer_info->defult_bearer_id != bearer_id))
+    {  
+        error_result = N3EPC_DISPATCHER_BIND_ERROR_AS_BEARER_NOT_ACTIVATED_BEFORE;
+        dispatcher_report_bind_unbind_error_cnf(KAL_TRUE, error_result, p_local->context_id, src_mod_id);
+        return; 
+    }
+    bind_state = p_cid_info->bind_state;
+    /*bind req is valid for unbind state only*/
+    if (DISPATCHER_PDN_ST_UNBIND == bind_state)
+    {
+        /*update pdn bind state*/
+        p_cid_info->bind_state = DISPATCHER_PDN_ST_BINDING;
+        p_cid_info->apn_type_info = p_local->apn_type_info;
+
+        /* forward to IPCORE */
+#ifndef ATEST_SYS_DISPATCHER
+        p_pdn_bind_ind = construct_local_para(sizeof(ipcore_dispatcher_pdn_bind_ind_struct), 0);
+
+#if defined(__SENSITIVE_DATA_MOSAIC__) && defined(__MTK_TARGET__)
+      	kal_set_sensitive_buff(p_pdn_bind_ind);
+#endif
+
+        p_pdn_bind_ind->network_interface_id = p_local->network_interface_id;
+        DISPATCHER_CONVERT_BEARERID_TO_PDNID(bearer_id, pdn_id);
+        p_pdn_bind_ind->pdn_id = pdn_id;
+        p_pdn_bind_ind->apn_type_info = p_local->apn_type_info;
+        p_pdn_bind_ind->ip_addr = p_cid_info->ip_addr;
+        p_pdn_bind_ind->dns     = p_cid_info->dns; 
+
+        p_pdn_bind_ind->back_info.reply_dest_mod_id = src_mod_id;
+        p_pdn_bind_ind->back_info.context_id = p_local->context_id;
+
+        msg_send6(
+            MOD_DISPATCHER + proto_idx,
+            MOD_IPCORE,
+#if defined(__SENSITIVE_DATA_MOSAIC__) && defined(__MTK_TARGET__)
+            DISPATCHER_USER_SENSITIVE_SAP,
+#else
+            DISPATCHER_SAP,
+#endif
+            MSG_ID_IPCORE_DISPATCHER_PDN_BIND_IND,
+            (local_para_struct*) p_pdn_bind_ind,
+            NULL);
+#endif
+
+    } else if (DISPATCHER_PDN_ST_BIND == bind_state) {
+        error_result = N3EPC_DISPATCHER_BIND_ERROR_AS_PDN_ALREADY_BIND;
+        dispatcher_report_bind_unbind_error_cnf(KAL_TRUE, error_result, p_local->context_id, src_mod_id);
+    } else if (DISPATCHER_PDN_ST_BINDING == bind_state) {
+        error_result = N3EPC_DISPATCHER_BIND_ERROR_AS_PREV_BIND_IN_PROGRESS;
+        dispatcher_report_bind_unbind_error_cnf(KAL_TRUE, error_result, p_local->context_id, src_mod_id);
+    } else /*if (DISPATCHER_PDN_ST_BINDING_GOING_TO_UNBIND == bind_state)*/ {
+        error_result = N3EPC_DISPATCHER_BINDING_GOING_TO_UNBIND_ERROR_AS_PREV_BIND_IN_PROGRESS;
+        dispatcher_report_bind_unbind_error_cnf(KAL_TRUE, error_result, p_local->context_id, src_mod_id);
+    }
+    MD_TRC_DISPATCHER_TR_PDN_BIND_REQ_BIND_STATE(p_local->context_id,
+        bind_state, p_cid_info->bind_state);
+}
+
+
+void n3epc_dispatcher_pdn_unbind_req(n3epc_dispatcher_pdn_unbind_req_struct *p_local,
+                                  kal_uint32 src_mod_id)
+{
+
+    kal_uint8 proto_idx = g_dispatcher_curr_protoidx;
+    dispatcher_control_cntx *p_dispatcher_ctrl_cntx;
+    dispatcher_bearer_info_struct *p_bearer_info;
+    dispatcher_cid_info_struct *p_cid_info;
+    kal_uint8 bearer_id;
+    kal_uint8 cid = p_local->context_id;
+    dispatcher_bind_state_enum bind_state;
+    ps_cause_enum error_result = HIF_IPC_OK;
+
+    MD_TRC_DISPATCHER_TR_PDN_UNBIND_REQ(p_local->context_id, proto_idx);
+
+    p_dispatcher_ctrl_cntx = &g_dispatcher_cntx[proto_idx];
+    p_cid_info = &p_dispatcher_ctrl_cntx->cid_info[cid];
+    bind_state = p_cid_info->bind_state;
+    bearer_id = p_cid_info->bearer_id;	
+
+    /* unbind may be after bearer deact
+    * on bearer deact we will keep the bearer_id to old_bearer_id in cid
+    */
+    if (!bearer_id) {
+        bearer_id = p_cid_info->old_bearer_id;
+    }
+
+    p_bearer_info = &p_dispatcher_ctrl_cntx->bearer_info[bearer_id];
+    ASSERT(p_bearer_info->context_id == cid);
+
+    if (DISPATCHER_PDN_ST_BIND == bind_state) {
+        /* wo unbind cnf + ipc unbind ind!! */
+        dispatcher_unbind_cnf_optional_ipc_ind(p_bearer_info, KAL_TRUE, src_mod_id, proto_idx);
+        p_cid_info->bind_state = DISPATCHER_PDN_ST_UNBIND;
+    } else if (DISPATCHER_PDN_ST_BINDING == bind_state) {
+        // update bind_state
+        p_cid_info->bind_state = DISPATCHER_PDN_ST_BINDING_GOING_TO_UNBIND;
+    } else {
+        // Not expected state                
+        //ASSERT(0);
+        error_result = N3EPC_DISPATCHER_UNBIND_ERROR_AS_DUPLICATE_UNBIND_REQ;
+        dispatcher_report_bind_unbind_error_cnf(KAL_FALSE, error_result, p_local->context_id, src_mod_id);
+    }
+    MD_TRC_DISPATCHER_TR_PDN_UNBIND_REQ_BIND_STATE(cid, bind_state, p_cid_info->bind_state);
+}
+
+
+void
+ipcore_dispatcher_pdn_bind_rsp(ipcore_dispatcher_pdn_bind_rsp_struct *p_local)
+{
+    kal_uint8 proto_idx = g_dispatcher_curr_protoidx;
+    kal_uint8 bearer_id = 0;
+    kal_uint8 context_id = p_local->back_info.context_id;
+    n3epc_dispatcher_pdn_bind_cnf_struct *p_pdn_bind_cnf;
+    dispatcher_bind_state_enum bind_state;
+    dispatcher_control_cntx *p_dispatcher_ctrl_cntx;
+    dispatcher_bearer_info_struct *p_bearer_info;
+    dispatcher_cid_info_struct *p_cid_info;
+
+    p_dispatcher_ctrl_cntx = &g_dispatcher_cntx[proto_idx];
+    DISPATCHER_CONVERT_PDNID_TO_BEARERID(p_local->pdn_id, bearer_id);
+    p_bearer_info = &p_dispatcher_ctrl_cntx->bearer_info[bearer_id];
+    p_cid_info = &p_dispatcher_ctrl_cntx->cid_info[context_id];
+    bind_state = p_cid_info->bind_state;
+    if (DISPATCHER_PDN_ST_BINDING == bind_state) {
+        /* state is binding(waiting for bind rsp) send cnf to wo!! */
+        if (HIF_IPC_OK == p_local->result) {
+            p_cid_info->bind_state = DISPATCHER_PDN_ST_BIND;
+            p_cid_info->network_interface_id = p_local->network_interface_id;
+        } else {
+            p_cid_info->bind_state = DISPATCHER_PDN_ST_UNBIND;
+        }
+    } else if (DISPATCHER_PDN_ST_BINDING_GOING_TO_UNBIND == bind_state) {
+        /*send bind cnf to WO + unbind to ipc based upon bind result*/
+        if (HIF_IPC_OK == p_local->result) {
+            p_cid_info->pending_req = DISPATCHER_PENDING_REQ_UNBIND_IND_N_CNF ;
+        } else {
+            p_cid_info->pending_req = DISPATCHER_PENDING_REQ_UNBIND_CNF;
+        }
+    } else {
+        ASSERT(0);
+    }
+
+    MD_TRC_DISPATCHER_TR_PDN_IPC_BIND_RSP(p_local->result, context_id,
+        bind_state, p_cid_info->bind_state, proto_idx);
+    /*as this is the bind rsp so first send bind cnf to WO !! */
+
+    p_pdn_bind_cnf = construct_local_para(sizeof(n3epc_dispatcher_pdn_bind_cnf_struct), TD_RESET);
+    p_pdn_bind_cnf->context_id = p_local->back_info.context_id;
+    if (DISPATCHER_PDN_ST_BIND == p_cid_info->bind_state) {
+        p_pdn_bind_cnf->is_successful = p_local->result == HIF_IPC_OK ? KAL_TRUE : KAL_FALSE;
+    }
+    p_pdn_bind_cnf->error_result  = p_local->result;
+
+    msg_send6(
+#ifndef ATEST_SYS_DISPATCHER
+        MOD_DISPATCHER + proto_idx,
+#else
+        MOD_NIL,
+#endif
+        p_local->back_info.reply_dest_mod_id,
+        DISPATCHER_SAP,
+        MSG_ID_N3EPC_DISPATCHER_PDN_BIND_CNF,
+        (local_para_struct*) p_pdn_bind_cnf,
+        NULL);
+
+    /*only send unbind cnf to WO*/
+    if (DISPATCHER_PENDING_REQ_UNBIND_CNF == p_cid_info->pending_req) {
+        //KAL_FALSE : as bind rsp failed so no need to send unbind ind to ipcore
+        dispatcher_unbind_cnf_optional_ipc_ind(p_bearer_info, KAL_FALSE,
+            p_local->back_info.reply_dest_mod_id, proto_idx);
+        p_cid_info->bind_state = DISPATCHER_PDN_ST_UNBIND;
+        p_cid_info->pending_req = DISPATCHER_PENDING_REQ_NONE;
+    } else if (DISPATCHER_PENDING_REQ_UNBIND_IND_N_CNF == p_cid_info->pending_req) {
+        //KAL_TRUE: as bind rsp success, so send unbind ind to ipcore
+        dispatcher_unbind_cnf_optional_ipc_ind(p_bearer_info, KAL_TRUE,
+            p_local->back_info.reply_dest_mod_id, proto_idx);
+        p_cid_info->bind_state = DISPATCHER_PDN_ST_UNBIND;
+        p_cid_info->pending_req = DISPATCHER_PENDING_REQ_NONE;
+    }
+}
+
+
+/******************************************************************
+*helper function to check binding state & bearer activation state*
+*****************************************************************/
+
+dispatcher_bind_state_enum 
+dispatcher_get_bind_state_by_bearerid(kal_uint8 bearer_id, kal_uint8 proto_idx)
+{
+    dispatcher_cid_info_struct *p_cid_info;
+    dispatcher_bearer_info_struct *p_bearer_info;
+    kal_uint8 cid = 0;
+    dispatcher_control_cntx *p_dispatcher_ctrl_cntx = &g_dispatcher_cntx[proto_idx];
+    p_bearer_info = &p_dispatcher_ctrl_cntx->bearer_info[bearer_id];
+    cid = p_bearer_info->context_id;
+    p_cid_info = &p_dispatcher_ctrl_cntx->cid_info[cid];
+
+    MD_TRC_DISPATCHER_DATA_TRACE_BINDING_INFO(bearer_id, cid, p_cid_info->bind_state, proto_idx);
+    return p_cid_info->bind_state;
+
+}
+
+
+dispatcher_bind_state_enum
+dispatcher_get_bind_state_by_pdnid(kal_uint8 pdn_id, kal_uint8 proto_idx)
+{
+    kal_uint8 bearer_id = 0;
+    DISPATCHER_CONVERT_PDNID_TO_BEARERID(pdn_id, bearer_id);
+    if (IS_VALID_BEARERID(bearer_id)) {
+        return dispatcher_get_bind_state_by_bearerid(bearer_id, proto_idx);
+    }
+    return 0;
+}
+
+
+kal_bool dispatcher_check_is_bearer_active_by_pdnid(kal_uint8 pdn_id,
+                                                    kal_uint8 proto_idx)
+{
+    kal_uint8 bearer_id = 0;
+    DISPATCHER_CONVERT_PDNID_TO_BEARERID(pdn_id, bearer_id);
+    if (IS_VALID_BEARERID(bearer_id)) {
+        return dispatcher_check_is_bearer_active_by_bearerid(bearer_id, proto_idx);
+    }
+    return KAL_FALSE;
+}
+
+
+kal_bool dispatcher_check_is_bearer_active_by_bearerid(kal_uint8 bearer_id,
+                                                       kal_uint8 proto_idx)
+{
+    dispatcher_control_cntx *p_dispatcher_ctrl_cntx = &g_dispatcher_cntx[proto_idx];
+    dispatcher_bearer_info_struct *p_bearer_info = &p_dispatcher_ctrl_cntx->bearer_info[bearer_id];
+    kal_bool is_active = (0 != p_bearer_info->defult_bearer_id) ? KAL_TRUE : KAL_FALSE;
+
+    MD_TRC_DISPATCHER_DATA_TRACE_BEARER_INFO(bearer_id, is_active, proto_idx);
+    return is_active;
+}
+
+/**
+ * dispatcher_report_bind_unbind_error_cnf
+ * report bind unbind error to WO for pre check
+ */
+void dispatcher_report_bind_unbind_error_cnf(kal_bool is_bindreq,
+                                             ps_cause_enum error_code,
+                                             kal_uint8 cid,
+                                             kal_uint32 reply_mod_id)
+{
+    kal_bool success = KAL_FALSE;
+    kal_uint8 proto_idx = g_dispatcher_curr_protoidx;
+    if (is_bindreq)
+    {
+        n3epc_dispatcher_pdn_bind_cnf_struct *p_pdn_bind_cnf;
+        p_pdn_bind_cnf = construct_local_para(sizeof(n3epc_dispatcher_pdn_bind_cnf_struct), TD_RESET);
+        p_pdn_bind_cnf->context_id = cid;
+        p_pdn_bind_cnf->is_successful = success;
+        p_pdn_bind_cnf->error_result = error_code;
+
+        msg_send6(
+#ifndef ATEST_SYS_DISPATCHER
+            MOD_DISPATCHER + proto_idx,
+#else
+            MOD_NIL,
+#endif
+            reply_mod_id,
+            DISPATCHER_SAP,
+            MSG_ID_N3EPC_DISPATCHER_PDN_BIND_CNF,
+            (local_para_struct*) p_pdn_bind_cnf,
+            NULL);
+
+    }
+    else
+    {
+        n3epc_dispatcher_pdn_unbind_cnf_struct *p_pdn_unbind_cnf;
+        p_pdn_unbind_cnf = construct_local_para(sizeof(n3epc_dispatcher_pdn_unbind_cnf_struct), TD_RESET);
+        p_pdn_unbind_cnf->context_id = cid;
+        p_pdn_unbind_cnf->is_successful = success;
+        p_pdn_unbind_cnf->error_result = error_code;
+
+        msg_send6(
+#ifndef ATEST_SYS_DISPATCHER
+            MOD_DISPATCHER + proto_idx,
+#else
+            MOD_NIL,
+#endif
+            reply_mod_id,
+            DISPATCHER_SAP,
+            MSG_ID_N3EPC_DISPATCHER_PDN_UNBIND_CNF,
+            (local_para_struct*) p_pdn_unbind_cnf,
+            NULL);
+    }
+}
+
+
+/**
+ * dispatcher_unbind_cnf_optional_ipc_ind
+ * unbind CNF to WO must + optional IPC unbind ind
+ */
+void dispatcher_unbind_cnf_optional_ipc_ind(
+    dispatcher_bearer_info_struct *p_bearer_info,
+    kal_bool ipc_unbind_required,
+    kal_uint32 dest_mod_id,
+    kal_uint8 proto_idx) 
+{
+#ifndef ATEST_SYS_DISPATCHER
+    ipcore_dispatcher_pdn_unbind_ind_struct *p_pdn_unbind_ind;
+#endif
+    n3epc_dispatcher_pdn_unbind_cnf_struct *p_pdn_unbind_cnf;
+
+    kal_uint8 bearer_id = p_bearer_info->bearer_id;
+    kal_uint8 cid = p_bearer_info->context_id;
+
+    MD_TRC_DISPATCHER_TR_PDN_UNBIND_CNF_N_IND_OPTIONAL(bearer_id,
+        p_bearer_info->context_id, ipc_unbind_required, proto_idx);
+
+    /* forward to IPCORE */
+    if (ipc_unbind_required) {
+#ifndef ATEST_SYS_DISPATCHER
+        kal_uint8 pdn_id = 0;
+        p_pdn_unbind_ind = construct_local_para(sizeof(ipcore_dispatcher_pdn_unbind_ind_struct), TD_RESET);       
+        DISPATCHER_CONVERT_BEARERID_TO_PDNID(bearer_id, pdn_id);
+        p_pdn_unbind_ind->pdn_id = pdn_id;
+
+        msg_send6(
+            MOD_DISPATCHER + proto_idx,
+            MOD_IPCORE,
+            DISPATCHER_SAP,
+            MSG_ID_IPCORE_DISPATCHER_PDN_UNBIND_IND,
+            (local_para_struct*) p_pdn_unbind_ind,
+            NULL);
+#endif
+    }
+
+    /* CNF to WO */
+
+    p_pdn_unbind_cnf = construct_local_para(sizeof(n3epc_dispatcher_pdn_unbind_cnf_struct), TD_RESET);
+    p_pdn_unbind_cnf->context_id  = cid;
+    p_pdn_unbind_cnf->is_successful = KAL_TRUE;
+    p_pdn_unbind_cnf->error_result = HIF_IPC_OK;
+    msg_send6(
+#ifndef ATEST_SYS_DISPATCHER
+        MOD_DISPATCHER + proto_idx,
+#else
+        MOD_NIL,
+#endif
+        dest_mod_id,
+        DISPATCHER_SAP,
+        MSG_ID_N3EPC_DISPATCHER_PDN_UNBIND_CNF,
+        (local_para_struct*) p_pdn_unbind_cnf,
+        NULL);
+}
+
+
+#if defined(__SENSITIVE_DATA_MOSAIC__)
+void dispatcher_clean_private_data(void)
+{
+    //kal_mem_set(g_dispatcher_cntx, 0, sizeof(g_dispatcher_cntx));
+    //or just clean the ip_addr from global
+    kal_uint8 sim_id, cid;
+    dispatcher_control_cntx *p_dispatcher_ctrl_cntx;
+    dispatcher_cid_info_struct *p_cid_info;
+
+    for (sim_id = 0; sim_id < MAX_SIM_NUM; sim_id++) {
+        p_dispatcher_ctrl_cntx = &g_dispatcher_cntx[sim_id];    
+        for (cid = 0; cid < 200; cid++) {
+            p_cid_info = &p_dispatcher_ctrl_cntx->cid_info[cid];
+            kal_mem_set(&p_cid_info->ip_addr,0, sizeof(ip_addr_struct));
+            kal_mem_set(&p_cid_info->dns, 0, sizeof(dns_struct));
+        }
+    }
+}
+#endif
diff --git a/mcu/protocol/dispatcher/src/dispatcher_data_path_trace_utmd.json b/mcu/protocol/dispatcher/src/dispatcher_data_path_trace_utmd.json
new file mode 100755
index 0000000..fc50511
--- /dev/null
+++ b/mcu/protocol/dispatcher/src/dispatcher_data_path_trace_utmd.json
@@ -0,0 +1,130 @@
+{
+  "legacyParameters": {
+    "codeSection": "TCMFORCE", 
+    "l2BufferSetting": "L2_BUFFER_EL2", 
+    "l2MaxArg": 4, 
+    "modemType": "Data_Path"
+  }, 
+  "module": "DISPATCHER_L2", 
+  "stringTranslationDefs": [], 
+  "startGen": "97",
+  "endGen": "-",
+  "traceClassDefs": [ 
+    {
+      "DISPATCHER_UL": { 
+        "debugLevel": "High", 
+        "filterDefaultValue": "ON", 
+        "tag": [
+          "Baseline"
+        ], 
+        "traceType": "InternalDesign"
+      }
+    },
+    {
+      "DISPATCHER_DL": { 
+        "debugLevel": "Ultra-High", 
+        "filterDefaultValue": "ON", 
+        "tag": [
+          "Baseline"
+        ], 
+        "traceType": "InternalDesign"
+      }
+    },
+	{
+      "DISPATCHER_GEN": { 
+        "debugLevel": "Low", 
+        "filterDefaultValue": "ON", 
+        "tag": [
+          "Baseline"
+        ], 
+        "traceType": "InternalDesign"
+      }
+    },
+    {
+      "DISPATCHER_GE": { 
+        "debugLevel": "Medium", 
+        "filterDefaultValue": "ON", 
+        "tag": [
+          "Baseline"
+        ], 
+        "traceType": "CoreDesign"
+      }
+    }
+  ], 
+  "traceDefs": [ 
+    {
+      "DISPATCHER_DATA_TRACE_RCV_UL_META": {
+        "format": "[DISPATCHER][UL] recv UL META Q: q_type=%ub, start_end_idx=[%ud, %ud)", 
+        "traceClass": "DISPATCHER_UL"
+      }
+    }, 
+    {
+      "DISPATCHER_DATA_TRACE_RCV_UL_GPD": {
+        "format": "[DISPATCHER][UL] recv UL GPD : pdn_id(%b), gpd_head(%xl), gpd_tail(%xl), proto_idx(%ub)", 
+        "traceClass": "DISPATCHER_UL"
+      }
+    }, 
+    {
+      "DISPATCHER_DATA_TRACE_RCV_DL_DID": {
+        "format": "[DISPATCHER][DL] recv DL DID bearer_id(%b), DID_head(%xl), DID_tail(%xl), proto_idx(%ub)", 
+        "traceClass": "DISPATCHER_DL"
+      }
+    }, 
+    {
+      "DISPATCHER_DATA_TRACE_FORWARD_DL_DID": {
+        "format": "[DISPATCHER][UL] forward DL DID : pdn_id(%b), DID_head(%xl), DID_tail(%xl), proto_idx(%ub)", 
+        "traceClass": "DISPATCHER_DL"
+      }
+    }, 
+    {
+      "DISPATCHER_DATA_TRACE_FORWARD_UL_GPD_WTUNNEL": {
+        "format": "[DISPATCHER][UL] forward UL GPD to wtunnel : bearer_id(%b), gpd_head(%xl), gpd_tail(%xl), proto_idx(%ub)", 
+        "traceClass": "DISPATCHER_UL"
+      }
+    }, 
+    {
+      "DISPATCHER_DATA_TRACE_FORWARD_UL_GPD_UPCM": {
+        "format": "[DISPATCHER][UL] forward UL GPD to UPCM : pdn_id(%b), gpd_head(%xl), gpd_tail(%xl), proto_idx(%ub)", 
+        "traceClass": "DISPATCHER_UL"
+      }
+    }, 
+    {
+      "DISPATCHER_DATA_TRACE_FORWARD_UL_META": {
+        "format": "[DISPATCHER][UL] forward UL META Q to UPCM: q_type=%ub, start_end_idx=[%ud, %ud)", 
+        "traceClass": "DISPATCHER_UL"
+      }
+    }, 
+    {
+      "DISPATCHER_DATA_TRACE_ALLOC_GPD_INFO": {
+        "format": "[DISPATCHER][UL] process meta_idx=(%ud) : pdn_id=(%b), GPD alloc=(%xl)", 
+        "traceClass": "DISPATCHER_UL"
+      }
+    }, 
+    {
+      "DISPATCHER_DATA_TRACE_CLUB_GPD_INFO": {
+        "format": "[DISPATCHER][UL] append GPD updated gpd_head(%xl), updated_gpd_tail(%xl)", 
+        "traceClass": "DISPATCHER_GEN"
+      }
+    }, 
+    {
+      "DISPATCHER_DATA_TRACE_BEARER_INFO": {
+        "format": "[DISPATCHER][GE] bearer info: bearer_id(%b), is_active(%Mkal_bool), proto_idx(%ub)", 
+        "traceClass": "DISPATCHER_GE"
+      }
+    }, 
+    {
+      "DISPATCHER_DATA_TRACE_BINDING_INFO": {
+        "format": "[DISPATCHER][GE] bind info: bearer_id(%b), context_id(%ub), bind_state(%Mdispatcher_bind_state_enum), proto_idx(%ub)", 
+        "traceClass": "DISPATCHER_GE"
+      }
+    }, 
+    {
+      "DISPATCHER_DATA_TRACE_RCV_UL_GPD_BY_EBI": {
+        "format": "[DISPATCHER][UL] recv UL GPD by EBI : ebi(%b), gpd_head(%xl), gpd_tail(%xl), proto_idx(%ub)", 
+        "traceClass": "DISPATCHER_GEN"
+      }
+    }
+  ], 
+  "traceFamily": "L2",
+  "userModule": "MOD_DISPATCHER"
+}
diff --git a/mcu/protocol/dispatcher/src/dispatcher_if.c b/mcu/protocol/dispatcher/src/dispatcher_if.c
new file mode 100644
index 0000000..24ed82f
--- /dev/null
+++ b/mcu/protocol/dispatcher/src/dispatcher_if.c
@@ -0,0 +1,477 @@
+/*****************************************************************************
+*  Copyright Statement:
+*  --------------------
+*  This software is protected by Copyright and the information contained
+*  herein is confidential. The software may not be copied and the information
+*  contained herein may not be used or disclosed except with the written
+*  permission of MediaTek Inc. (C) 2005
+*
+*  BY OPENING THIS FILE, BUYER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
+*  THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
+*  RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO BUYER ON
+*  AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+*  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
+*  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
+*  NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
+*  SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
+*  SUPPLIED WITH THE MEDIATEK SOFTWARE, AND BUYER AGREES TO LOOK ONLY TO SUCH
+*  THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. MEDIATEK SHALL ALSO
+*  NOT BE RESPONSIBLE FOR ANY MEDIATEK SOFTWARE RELEASES MADE TO BUYER'S
+*  SPECIFICATION OR TO CONFORM TO A PARTICULAR STANDARD OR OPEN FORUM.
+*
+*  BUYER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND CUMULATIVE
+*  LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
+*  AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
+*  OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY BUYER TO
+*  MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
+*
+*  THE TRANSACTION CONTEMPLATED HEREUNDER SHALL BE CONSTRUED IN ACCORDANCE
+*  WITH THE LAWS OF THE STATE OF CALIFORNIA, USA, EXCLUDING ITS CONFLICT OF
+*  LAWS PRINCIPLES.  ANY DISPUTES, CONTROVERSIES OR CLAIMS ARISING THEREOF AND
+*  RELATED THERETO SHALL BE SETTLED BY ARBITRATION IN SAN FRANCISCO, CA, UNDER
+*  THE RULES OF THE INTERNATIONAL CHAMBER OF COMMERCE (ICC).
+*
+*****************************************************************************/
+
+/*******************************************************************************
+ * Filename:
+ * ---------
+ *   dispatcher_if.c
+ *
+ * Project:
+ * --------
+ *   VMOLY
+ *
+ * Description:
+ * ------------
+ *   Dispatcher public interface implementation.
+ *
+ * Author:
+ * -------
+ * -------
+ *
+ *******************************************************************************/
+
+#include "dispatcher_if.h"
+#include "dispatcher_struct.h"
+#include "global_def.h"
+#include "dispatcher_msgid.h"
+#include "qmu_bm_util.h"
+#include "md_sap.h"
+#include "dispatcher_debug.h"
+#include "dispatcher_data_path_trace_utmd.h"
+/**
+ * ipcore registered dl callback to dispatcher
+ */
+static dispatcher_dlvr_dl_did_f  g_funcptr_ipcore_dl_did_cbk;
+static kal_bool wtunnel_cbk_set = KAL_FALSE;
+
+
+/**
+ * init function for dispatcher module
+ * init the global control context & register DL callback with wifi tunnel
+ */
+kal_bool dispatcher_init(void)
+{
+    //wtunnel_reg_dl_cbk(dispatcher_on_dl_did_cbk)
+    //DISPATCHER_REGISTER_DL_DID_CBK(dispatcher_on_dl_did_cbk);
+    //memset(g_dispatcher_cntx, 0, sizeof(g_dispatcher_cntx));
+    dispatcher_init_control_cntx();
+    MD_TRC_DISPATCHER_TR_INIT_ENTRY();
+    return KAL_TRUE;
+}
+
+
+/**
+ * reset function for dispatcher module
+ * reset the global control context & reset globals
+ */
+kal_bool dispatcher_reset(void)
+{
+    //memset(g_dispatcher_cntx, 0, sizeof(g_dispatcher_cntx));
+    dispatcher_init_control_cntx();
+    g_funcptr_ipcore_dl_did_cbk = NULL;
+    dispatcher_ctrl_set_current_protoidx(0);
+    MD_TRC_DISPATCHER_TR_RESET_ENTRY();
+    return KAL_TRUE;
+}
+
+
+/**
+ * ILM function for dispatcher module
+ * handle corresponding ILM for control path
+ * update the bearer and cid binding status to dispatcher control context 
+ */
+void dispatcher_on_ilm(ilm_struct *p_ilm)
+{
+    kal_uint8 curr_protoidx = p_ilm->dest_mod_id - MOD_DISPATCHER;
+    dispatcher_ctrl_set_current_protoidx(curr_protoidx);
+    switch (p_ilm->msg_id) {
+        /*WO->DISPATCHER bearer related*/
+    case MSG_ID_N3EPC_DISPATCHER_BEARER_ACT_REQ:
+        if(KAL_FALSE == wtunnel_cbk_set) {
+            DISPATCHER_REGISTER_DL_DID_CBK(dispatcher_on_dl_did_cbk);
+            wtunnel_cbk_set = KAL_TRUE;
+        }
+        n3epc_dispatcher_bearer_act_req(
+            (n3epc_dispatcher_bearer_act_req_struct*)p_ilm->local_para_ptr);
+        break;
+
+    case MSG_ID_N3EPC_DISPATCHER_BEARER_DEACT_REQ:
+        n3epc_dispatcher_bearer_deact_req(
+            (n3epc_dispatcher_bearer_deact_req_struct*)p_ilm->local_para_ptr);
+        break;
+
+        /*WO->DISPATCHER binding related*/
+    case MSG_ID_N3EPC_DISPATCHER_PDN_BIND_REQ:
+        n3epc_dispatcher_pdn_bind_req(
+            (n3epc_dispatcher_pdn_bind_req_struct*)p_ilm->local_para_ptr,
+            p_ilm->src_mod_id);
+        break;
+
+    case MSG_ID_N3EPC_DISPATCHER_PDN_UNBIND_REQ:
+        n3epc_dispatcher_pdn_unbind_req(
+            (n3epc_dispatcher_pdn_unbind_req_struct*)p_ilm->local_para_ptr,
+            p_ilm->src_mod_id);
+        break;
+
+        /*DISPATCHER->IPCORE binding related*/
+    case MSG_ID_IPCORE_DISPATCHER_PDN_BIND_RSP:
+        ipcore_dispatcher_pdn_bind_rsp(
+            (ipcore_dispatcher_pdn_bind_rsp_struct*)p_ilm->local_para_ptr);
+        break;
+#ifdef ATEST_SYS_DISPATCHER
+    case MSG_ID_N3EPC_DISPATCHER_PDN_BIND_CNF:
+    case MSG_ID_N3EPC_DISPATCHER_PDN_UNBIND_CNF:
+        dispatcher_ut_handling_n3epc_ilm(p_ilm);
+        break;
+#endif
+    default:
+        MD_TRC_DISPATCHER_TR_ILM_WRONG_MSG_ID(p_ilm->msg_id);
+        break;
+    }
+}
+
+
+/**
+ * DL callback register to Dispatcher module
+ * Ipcore register its DL callback via this API
+ */
+void dispatcher_reg_cbk_dlvr_dl_did(dispatcher_dlvr_dl_did_f pf_dlvr_did)
+{
+    //need to set the ipcore dl callback
+    g_funcptr_ipcore_dl_did_cbk = pf_dlvr_did;
+
+}
+
+
+/******************************************************************
+****************Data path functions******************************
+*****************************************************************/
+
+/**
+ * DL callback from wifi tunnel (wtunnel->dispatcher->ipcore)
+ * 1. bearer id validataion first & convert to pdn_id
+ * 2. get binding state for pdn_id
+ * 3. forward data to ipcore by pdn_id
+ * 4. drop the DID if anything fails
+ */
+void dispatcher_on_dl_did_cbk(kal_uint8 bearer_id, upcm_did* p_head,
+                              upcm_did* p_tail, kal_uint8 protocol_idx)
+{
+    //dispatch to the Ipcore by checking the dlcbk registered or not
+    kal_bool is_bearer_active = KAL_FALSE;
+    MD_TRC_DISPATCHER_DATA_TRACE_RCV_DL_DID(bearer_id, p_head, p_tail, protocol_idx);
+    
+    if (IS_VALID_BEARERID(bearer_id))
+    {
+        is_bearer_active = dispatcher_check_is_bearer_active_by_bearerid(bearer_id, protocol_idx);
+        if (KAL_TRUE == is_bearer_active)
+        {
+            dispatcher_forward_dl_did_to_ipc(bearer_id, p_head, p_tail, protocol_idx);
+            return;
+        }
+    }
+    /*bearer id is not valid,drop did here*/
+    //upcm_did_dest_q(p_head, p_tail);
+    MD_TRC_DISPATCHER_TR_DL_DID_DROP_BEARER_STATS(bearer_id, is_bearer_active, p_head, p_tail);
+    DISPATCHER_DROP_DL_DID(bearer_id, p_head, p_tail, protocol_idx);
+}
+
+
+void dispatcher_forward_dl_did_to_ipc(kal_uint8 bearer_id, upcm_did* p_head,
+                              upcm_did* p_tail, kal_uint8 protocol_idx)
+{
+    kal_uint32 pdn_id = 0;
+    DISPATCHER_CONVERT_BEARERID_TO_PDNID(bearer_id, pdn_id);
+    if (g_funcptr_ipcore_dl_did_cbk) {
+        MD_TRC_DISPATCHER_DATA_TRACE_FORWARD_DL_DID(pdn_id, p_head, p_tail, protocol_idx);
+        g_funcptr_ipcore_dl_did_cbk(pdn_id, p_head, p_tail, protocol_idx);
+    } else {
+        MD_TRC_DISPATCHER_TR_DL_DID_DROP_NULL_CBK(p_head, p_tail);
+        DISPATCHER_DROP_DL_DID(bearer_id, p_head, p_tail, protocol_idx);
+    }
+}
+
+
+void dispatcher_drop_dl_did(kal_uint8 bearer_id, upcm_did* p_head,
+                            upcm_did* p_tail, kal_uint8 protocol_idx)
+{
+    upcm_did_dest_q_free_buf(p_head, p_tail);
+}
+
+/**
+ * UL GPD flow (ipcore->dispatcher->wtunnel or UPCM)
+ * 1. ebi validataion first, is of wifi bearer, active
+ * 2. get binding state for ebi
+ * 3. forward data to wtunnel by bearerid
+ * 4. if not forward to wifitunnel, forward to UPCM
+ */
+void dispatcher_rcv_ul_gpd_by_ebi(kal_uint32 ebi, qbm_gpd* p_head, qbm_gpd* p_tail, kal_uint8 protocol_idx)
+{
+    kal_bool is_bearer_active = KAL_FALSE;
+    dispatcher_bind_state_enum bind_state;
+    /*bearer id value is of wifi & bearer is bind with dispatcher*/
+    MD_TRC_DISPATCHER_DATA_TRACE_RCV_UL_GPD_BY_EBI(ebi, p_head, p_tail, protocol_idx);
+    if (IS_VALID_BEARERID(ebi))
+    {
+        is_bearer_active = dispatcher_check_is_bearer_active_by_bearerid(ebi, protocol_idx);
+        bind_state = dispatcher_get_bind_state_by_bearerid(ebi, protocol_idx);
+
+        if ((DISPATCHER_PDN_ST_BIND == bind_state) &&
+            (KAL_TRUE == is_bearer_active))
+        {
+            DISPATCHER_FORWARD_UL_GPD_WTUNNEL(ebi, p_head, p_tail, protocol_idx);
+            return;
+        }
+    }
+
+    DISPATCHER_FORWARD_UL_GPD_UPCM_BY_EBI(ebi, p_head, p_tail, protocol_idx);
+
+}
+
+
+/**
+ * UL GPD flow (ipcore->dispatcher->wtunnel or UPCM)
+ * 1. pdn_id validataion first & convert to bearer_id
+ * 2. get binding state for pdn_id
+ * 3. forward data to wtunnel by bearerid
+ * 4. drop the DID if anything fails
+ * 5. if pdn_id not of wifi, forward to UPCM
+ */
+void dispatcher_rcv_ul_gpd_by_pdn(ip_type_e ip_type, kal_uint32 pdn_id,
+                                  qbm_gpd* p_head, qbm_gpd* p_tail,
+                                  kal_uint8 protocol_idx)
+{
+    /*pdn_id belong to wifi pdn*/
+    MD_TRC_DISPATCHER_DATA_TRACE_RCV_UL_GPD(pdn_id, p_head, p_tail, protocol_idx);
+    if (IS_PDNID_OF_WIFI(pdn_id))
+    {
+        dispatcher_bind_state_enum bind_state = dispatcher_get_bind_state_by_pdnid(pdn_id, protocol_idx);
+        kal_bool is_bearer_active = dispatcher_check_is_bearer_active_by_pdnid(pdn_id, protocol_idx);
+        if ((DISPATCHER_PDN_ST_BIND == bind_state) &&
+            (KAL_TRUE == is_bearer_active))
+        {
+            //wifi tunnel ul gpd interface without ip_type
+            kal_uint8 bearer_id = 0;
+            DISPATCHER_CONVERT_PDNID_TO_BEARERID(pdn_id, bearer_id);
+            DISPATCHER_FORWARD_UL_GPD_WTUNNEL(bearer_id, p_head, p_tail, protocol_idx);
+        } else {
+            //should drop this here
+            MD_TRC_DISPATCHER_TR_UL_GPD_DROP(pdn_id, is_bearer_active, bind_state, p_head, p_tail);
+            DISPATCHER_DROP_UL_GPD(pdn_id, p_head, p_tail, protocol_idx);
+        }
+    } else {
+        //call UPCM UL GPD interface
+        MD_TRC_DISPATCHER_DATA_TRACE_FORWARD_UL_GPD_UPCM(pdn_id, p_head, p_tail, protocol_idx);
+        DISPATCHER_FORWARD_UL_GPD_UPCM(ip_type, pdn_id, p_head, p_tail, protocol_idx);
+    }
+}
+
+
+void dispatcher_drop_ul_gpd(kal_uint32 pdn_id, qbm_gpd* p_head,
+                            qbm_gpd* p_tail, kal_uint8 protocol_idx)
+{
+    qbmt_dest_q(p_head, p_tail);
+}
+
+
+/**
+ * dispatcher UL META interface IPCORE->Dispatcher->wtunnel or UPCM)
+ * meta Q query to lhif & process each meta index 
+ * if meta belong to wifi pdn then convert to gpd & keep accumulate for same PDN
+ * forward the gpd list to wtunnel
+ * forward the lhif q to upcm to release meta
+ */
+void dispatcher_rcv_ul_meta_queue(kal_uint16 start_idx, kal_uint16 end_idx,
+                                  LHIF_QUEUE_TYPE queue_type)
+{
+    kal_uint16 read_idx;
+    lhif_meta_tbl_t *meta_tbl;
+    lhif_meta_tbl_t *meta;
+    kal_uint16 tbl_size;
+
+    qbm_gpd *head_gpd_p = NULL;
+    qbm_gpd *tail_gpd_p =  NULL;
+    kal_uint8 previous_bearer_id = 0;
+    kal_uint8 previous_proto_idx = 0;
+
+    read_idx = start_idx;
+    MD_TRC_DISPATCHER_DATA_TRACE_RCV_UL_META(queue_type, start_idx, end_idx);
+    //Query meta table
+    DIPSTACHER_QUERY_META_TBL((kal_uint32 **)&meta_tbl, &tbl_size, queue_type);
+    //check each meta index and process
+    do {
+        meta = &meta_tbl[read_idx];
+        //meta is not ignore & pdn state is bind
+
+        if (!meta->ignore && IS_PDNID_OF_WIFI(meta->pdn))
+        {
+            //check bind & bearer status both
+            dispatcher_bind_state_enum bind_state = dispatcher_get_bind_state_by_pdnid(meta->pdn, meta->protocol_idx);
+            kal_bool is_bearer_active = dispatcher_check_is_bearer_active_by_pdnid(meta->pdn, meta->protocol_idx);
+            
+            if ((DISPATCHER_PDN_ST_BIND == bind_state) &&
+                (KAL_TRUE == is_bearer_active))
+            {
+                kal_uint8 curr_bearer_id;
+                kal_uint8 curr_proto_idx;
+                kal_uint8 *ip_packet = NULL;
+                kal_uint32 length = 0;
+                qbm_gpd *curr_gpd_p;
+                kal_uint8 *gpd_data_p;
+                DISPATCHER_CONVERT_PDNID_TO_BEARERID(meta->pdn, curr_bearer_id);
+                curr_proto_idx = meta->protocol_idx;
+                length = meta->length;
+                ip_packet = meta->vrb_addr;
+                if (1 != qbmt_alloc_q_no_tail(QBM_TYPE_NET_UL_SHRD,
+                    1,
+                    (void**) &curr_gpd_p,
+                    (void**) &curr_gpd_p))
+                {
+                    //gpd allocation failed
+                    MD_TRC_DISPATCHER_TR_GPD_ALLOC_FAILED(read_idx);
+                    meta->ignore = KAL_TRUE;
+                    DISPATCHER_FREE_META_VRB(meta);
+                    goto drop;
+                }
+                MD_TRC_DISPATCHER_DATA_TRACE_ALLOC_GPD_INFO(read_idx, meta->pdn, curr_gpd_p);
+                QBM_CACHE_INVALID(ip_packet, length);
+                //find the data ptr
+                dispatcher_utils_set_gpd_datalen(curr_gpd_p, length, (void**) &gpd_data_p);
+                kal_mem_cpy(gpd_data_p, ip_packet, length);
+                QBM_CACHE_FLUSH(gpd_data_p, length);
+
+                meta->ignore = KAL_TRUE;
+                //dpcopro_rbuf_release(meta->vrb_addr, meta->length);
+                DISPATCHER_FREE_META_VRB(meta);
+
+                if ((NULL == head_gpd_p) && (NULL == tail_gpd_p))
+                {
+                    head_gpd_p = tail_gpd_p = curr_gpd_p;
+                    previous_bearer_id = curr_bearer_id;
+                    previous_proto_idx = curr_proto_idx;
+                }
+                else
+                {
+                    if ((previous_bearer_id == curr_bearer_id) &&
+                        (previous_proto_idx == curr_proto_idx))
+                    {
+                        //prev & current gpd can be clubbed together so update tail gpd
+                        QBM_DES_SET_NEXT(tail_gpd_p, curr_gpd_p);
+                        tail_gpd_p = curr_gpd_p;
+                        MD_TRC_DISPATCHER_DATA_TRACE_CLUB_GPD_INFO(head_gpd_p, tail_gpd_p);
+                    }
+                    else
+                    {
+                        //previous GPD & curr gpd are of different bearer or proto
+                        //call made to wtunnel for previous gpd head tail
+                        DISPATCHER_FORWARD_UL_GPD_WTUNNEL(previous_bearer_id,
+                            head_gpd_p, tail_gpd_p, previous_proto_idx);
+                        //now update the variable
+                        previous_bearer_id = curr_bearer_id;
+                        previous_proto_idx = curr_proto_idx;
+                        head_gpd_p = tail_gpd_p = curr_gpd_p;
+                    }
+                }
+            } /*PDN state is not bind and by any how the ipcore has send this to Dispatcher*/
+            else
+            {
+                /*PDN is in range of wifi PDN & not bind*/
+                MD_TRC_DISPATCHER_TR_IGR_META_INDEX(read_idx, meta->pdn, bind_state, is_bearer_active);
+                meta->ignore = KAL_TRUE;
+                //dpcopro_rbuf_release(meta->vrb_addr, meta->length);
+                DISPATCHER_FREE_META_VRB(meta);
+            }
+        }
+
+drop:
+        read_idx ++;
+        if (read_idx == tbl_size) {
+            read_idx = 0;
+        }
+    } while (read_idx != end_idx);
+
+    /*if any allocated gpd is there*/
+    if(head_gpd_p) {
+        DISPATCHER_FORWARD_UL_GPD_WTUNNEL(previous_bearer_id, head_gpd_p,
+            tail_gpd_p, previous_proto_idx);
+    }
+    /*forward to upcm to release meta & process non wifi pdn meta idxs*/
+    MD_TRC_DISPATCHER_DATA_TRACE_FORWARD_UL_META(queue_type, start_idx, end_idx);
+    DISPATCHER_FORWARD_UL_META_UPCM(start_idx, end_idx, queue_type);
+}
+
+
+kal_bool dispatcher_wtunnel_ul_send(kal_uint8 bearer_id, qbm_gpd *head,
+                                    qbm_gpd *tail, kal_uint8 proto_idx)
+{
+    MD_TRC_DISPATCHER_DATA_TRACE_FORWARD_UL_GPD_WTUNNEL(bearer_id, head, tail, proto_idx);
+    //actual wtunnel cbk here
+    wtunnel_upp_ul_send(bearer_id, head, tail, proto_idx);
+    return KAL_TRUE;
+}
+
+
+
+void dispatcher_utils_set_gpd_datalen(void *p_gpd, kal_uint32 datalen,
+                                      void **p_payload)
+{
+    kal_uint8* p_data = NULL;
+
+    ASSERT(p_gpd && p_payload);
+
+    if (QBM_DES_GET_BDP(p_gpd)) {
+        void* p_bd = NULL;
+
+        /* set p_bd data ptr */
+        p_bd = QBM_DES_GET_DATAPTR(p_gpd);
+
+        p_data = (kal_uint8*) QBM_DES_GET_DATAPTR(p_bd);
+
+        /* set p_bd data len */
+        QBM_DES_SET_DATALEN(p_bd, datalen);
+
+        /* set p_bd checksum */
+        qbm_cal_set_checksum(p_bd);
+    } else {
+        p_data = (kal_uint8*) QBM_DES_GET_DATAPTR(p_gpd);
+    }
+
+    /* set p_gpd data len */
+    QBM_DES_SET_DATALEN(p_gpd, datalen);
+
+    /* set p_gpd checksum */
+    qbm_cal_set_checksum(p_gpd);
+
+    *p_payload = (void*)(p_data);
+}
+
+
+/*adding for mini dump , reset sensitive data*/
+#if defined(__SENSITIVE_DATA_MOSAIC__)
+void dispatcher_module_clean(void)
+{
+    dispatcher_clean_private_data();
+}
+#endif
diff --git a/mcu/protocol/dispatcher/src/dispatcher_ut.c b/mcu/protocol/dispatcher/src/dispatcher_ut.c
new file mode 100644
index 0000000..14a47a8
--- /dev/null
+++ b/mcu/protocol/dispatcher/src/dispatcher_ut.c
@@ -0,0 +1,1398 @@
+/*****************************************************************************
+*  Copyright Statement:
+*  --------------------
+*  This software is protected by Copyright and the information contained
+*  herein is confidential. The software may not be copied and the information
+*  contained herein may not be used or disclosed except with the written
+*  permission of MediaTek Inc. (C) 2012
+*
+*  BY OPENING THIS FILE, BUYER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
+*  THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
+*  RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO BUYER ON
+*  AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+*  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
+*  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
+*  NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
+*  SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
+*  SUPPLIED WITH THE MEDIATEK SOFTWARE, AND BUYER AGREES TO LOOK ONLY TO SUCH
+*  THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. MEDIATEK SHALL ALSO
+*  NOT BE RESPONSIBLE FOR ANY MEDIATEK SOFTWARE RELEASES MADE TO BUYER'S
+*  SPECIFICATION OR TO CONFORM TO A PARTICULAR STANDARD OR OPEN FORUM.
+*
+*  BUYER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND CUMULATIVE
+*  LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
+*  AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
+*  OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY BUYER TO
+*  MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
+*
+*  THE TRANSACTION CONTEMPLATED HEREUNDER SHALL BE CONSTRUED IN ACCORDANCE
+*  WITH THE LAWS OF THE STATE OF CALIFORNIA, USA, EXCLUDING ITS CONFLICT OF
+*  LAWS PRINCIPLES.  ANY DISPUTES, CONTROVERSIES OR CLAIMS ARISING THEREOF AND
+*  RELATED THERETO SHALL BE SETTLED BY ARBITRATION IN SAN FRANCISCO, CA, UNDER
+*  THE RULES OF THE INTERNATIONAL CHAMBER OF COMMERCE (ICC).
+*
+*****************************************************************************/
+
+/*******************************************************************************
+ * Filename:
+ * ---------
+ *   dispatcher_ut.c
+ *
+ * Project:
+ * --------
+ *   VMOLY
+ *
+ * Description:
+ * ------------
+ *   DISPATCHER unit test implementation.
+ *
+ * Author:
+ * -------
+ * -------
+ *
+ *******************************************************************************/
+
+#ifdef ATEST_SYS_DISPATCHER
+
+#include "kal_public_api.h"
+#include "sys_test.h"
+#include "n3epc_dispatcher_struct.h"
+#include "ipcore_dispatcher_struct.h"
+#include "dispatcher_struct.h"
+#include "dispatcher_if.h"
+#include "dispatcher_msgid.h"
+
+#define DISPATCHER_UT_CASE(_func, _param) { #_func, _func, _param }
+#define DISPATCHER_UT_TEST_BEARERID 14
+#define DISPATCHER_UT_TEST_INVALID_BEARERID 18 /*graeter than 15*/
+#define DISPATCHER_UT_TEST_PDNID 15 //as current meta does not store above 32 so taken 15 to check current code
+#define DISPATCHER_UT_TEST_CID 100 //any temp cid value
+#define DISPATCHER_UT_TEST_NWID 4 //any temp nw_id value
+#define DISPATCHER_UT_TEST_NON_WIFI_PDNID 20
+
+static kal_uint8 dispatcher_ut_ipv4_dns_packet[] = {
+        0x45, 0x00, 0x00, 0x45, 0x50, 0x9e, 0x00, 0x00, 0x80, 0x11, 0x96, 0x3a, 0xac, 0x16, 0x97, 0x53,
+        0xac, 0x15, 0x64, 0x50, 0xc0, 0x51, 0x00, 0x35, 0x00, 0x31, 0x7d, 0x15, 0x2d, 0x89, 0x01, 0x00,
+        0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x50, 0x43, 0x31, 0x30, 0x30, 0x36, 0x30,
+        0x30, 0x31, 0x34, 0x08, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x6b, 0x03, 0x69, 0x6e, 0x63,
+        0x00, 0x00, 0x1c, 0x00, 0x01};
+
+static kal_uint8 dispatcher_ut_ipv6_dhcp_ul_packet[] = {
+        0x60, 0x00, 0x00, 0x00, 0x00, 0x56, 0x11, 0x80, 0xfe, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x02, 0x13, 0xd4, 0xff, 0xfe, 0x80, 0x56, 0x15, 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x02, 0x02, 0x22, 0x02, 0x23, 0x00, 0x56, 0x98, 0x9b,
+        0x01, 0x00, 0x12, 0x69, 0x00, 0x01, 0x00, 0x0e, 0x00, 0x01, 0x00, 0x06, 0x18, 0xc5, 0xe1, 0xd2,
+        0x00, 0x13, 0xd4, 0x80, 0x56, 0x15, 0x00, 0x03, 0x00, 0x28, 0x00, 0x00, 0x00, 0x01, 0xff, 0xff,
+        0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x05, 0x00, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+        0xff, 0xff, 0x00, 0x08, 0x00, 0x02, 0x00, 0x00, 0x00, 0x06, 0x00, 0x02, 0x00, 0x17};
+
+
+
+typedef struct {
+    kal_uint32  type_idx;
+    kal_uint8   *base_addr;
+    kal_uint8   read_idx;
+    kal_uint8   write_idx;
+    kal_uint8   *buff_addr_base;
+}dispatcher_ut_meta_queue_t;
+
+typedef enum {
+    DISPATCHER_UT_LHIF_META_AP0 = 0,
+    DISPATCHER_UT_META_QUEUE_NUM_MAX,
+}dispatcher_ut_meta_type_e;
+
+#define DISPATCHER_UT_META_TABLE_SIZE      70
+static lhif_meta_tbl_t dispatcher_ut_meta_tbl_s[DISPATCHER_UT_META_TABLE_SIZE];
+
+static dispatcher_ut_meta_queue_t dispatcher_ut_meta_queues[DISPATCHER_UT_META_QUEUE_NUM_MAX];
+
+kal_uint8 g_ul_wtunnel_forwarded_gpd_count;
+kal_uint8 g_dl_ipcore_forward_did_count;
+kal_uint8 g_dl_drop_did_count;
+kal_uint8 g_ul_drop_gpd_count;
+kal_uint8 g_ul_upcm_forward_gpd_count;
+kal_uint8 g_ul_total_ignr_count;
+kal_bool dispatcher_ut_assert_flag = KAL_FALSE;
+typedef struct {
+    kal_uint8 cid;
+    kal_bool result;
+    ps_cause_enum error_code; 
+}dispatcher_bind_cnf_struct;
+
+typedef struct {
+    kal_uint8 cid;
+    kal_bool result;
+    ps_cause_enum error_code; 
+}dispatcher_unbind_cnf_struct;
+
+dispatcher_bind_cnf_struct g_curr_bind_cnf;
+dispatcher_bind_cnf_struct g_curr_unbind_cnf;
+
+kal_bool dispatcher_ut_test_first(void *p_param, kal_char *p_ret_err_str, kal_uint32 *p_ret_err_str_sz) {
+    /*temp test case*/
+    return KAL_TRUE;
+}
+
+void dispatcher_ut_bearer_act_req_only(void)
+{
+
+    n3epc_dispatcher_bearer_act_req_struct req_struct;
+	req_struct.bearer_id = DISPATCHER_UT_TEST_BEARERID;
+	req_struct.linked_bearer_id = DISPATCHER_UT_TEST_BEARERID;
+	req_struct.context_id = DISPATCHER_UT_TEST_CID;
+	n3epc_dispatcher_bearer_act_req(&req_struct);
+
+}
+
+void dispatcher_ut_invalid_bearer_act_req_only(int bearer_id)
+{
+
+    n3epc_dispatcher_bearer_act_req_struct req_struct;
+	req_struct.bearer_id = bearer_id;
+	req_struct.linked_bearer_id = bearer_id;
+	req_struct.context_id = DISPATCHER_UT_TEST_CID;
+	n3epc_dispatcher_bearer_act_req(&req_struct);
+
+}
+
+void dispatcher_ut_bearer_deact_req_only(void)
+{
+	n3epc_dispatcher_bearer_deact_req_struct bearer_deact_req_struct;
+	bearer_deact_req_struct.bearer_id = DISPATCHER_UT_TEST_BEARERID;
+	bearer_deact_req_struct.context_id = DISPATCHER_UT_TEST_CID;
+	n3epc_dispatcher_bearer_deact_req(&bearer_deact_req_struct);
+}
+
+
+void dispatcher_ut_invalid_bearer_deact_req_only(int bearer_id)
+{
+	n3epc_dispatcher_bearer_deact_req_struct bearer_deact_req_struct;
+	bearer_deact_req_struct.bearer_id = bearer_id;
+	bearer_deact_req_struct.context_id = DISPATCHER_UT_TEST_CID;
+	n3epc_dispatcher_bearer_deact_req(&bearer_deact_req_struct);
+}
+
+kal_bool dispatcher_ut_bearer_act_req_test(void *p_param, kal_char *p_ret_err_str, kal_uint32 *p_ret_err_str_sz)
+{
+    
+    kal_bool is_active = KAL_FALSE;
+    /**********reset dispatcher full context************/
+    dispatcher_reset();
+
+    /*activate bearer with invalid value 0 */
+    is_active = dispatcher_check_is_bearer_active_by_bearerid(DISPATCHER_UT_TEST_BEARERID, 0);
+    dispatcher_ut_invalid_bearer_act_req_only(0);
+    if (dispatcher_ut_assert_flag) {
+        dispatcher_ut_assert_flag = KAL_FALSE;
+    } else {
+        return KAL_FALSE;
+    }
+
+    /*activate bearer with invalid value 18 */
+    dispatcher_ut_invalid_bearer_act_req_only(DISPATCHER_UT_TEST_INVALID_BEARERID);
+    if (dispatcher_ut_assert_flag) {
+        dispatcher_ut_assert_flag = KAL_FALSE;
+    } else {
+        return KAL_FALSE;
+    }
+    /*valid bearer activate*/
+    dispatcher_ut_bearer_act_req_only();
+    /*check if bearer is really active*/
+    is_active = dispatcher_check_is_bearer_active_by_bearerid(DISPATCHER_UT_TEST_BEARERID, 0);
+    if (is_active != KAL_TRUE) {
+        return KAL_FALSE;
+    }
+
+    /*bearer is activate, now try do duplicate bearer req*/
+    dispatcher_ut_bearer_act_req_only();
+    if (dispatcher_ut_assert_flag) {
+        dispatcher_ut_assert_flag = KAL_FALSE;
+    } else {
+        return KAL_FALSE;
+    }
+    /*deactivate the activated bearer, kind of reset*/
+    dispatcher_ut_bearer_deact_req_only();
+    return KAL_TRUE;
+}
+
+
+
+kal_bool dispatcher_ut_bearer_deact_req_test(void *p_param, kal_char *p_ret_err_str, kal_uint32 *p_ret_err_str_sz)
+{
+    kal_bool is_active = KAL_FALSE;
+   
+    dispatcher_reset();
+    /*********reset dispatcher full context************/
+    
+    /*invalid req*/
+    dispatcher_ut_invalid_bearer_deact_req_only(0);
+    if (dispatcher_ut_assert_flag) {
+        dispatcher_ut_assert_flag = KAL_FALSE;
+    } else {
+        return KAL_FALSE;
+    }
+    /*activate bearer with invalid value 18 */
+    dispatcher_ut_invalid_bearer_deact_req_only(DISPATCHER_UT_TEST_INVALID_BEARERID);
+    if (dispatcher_ut_assert_flag) {
+        dispatcher_ut_assert_flag = KAL_FALSE;
+    } else {
+        return KAL_FALSE;
+    }
+    
+    /*activate a valid bearer*/
+    dispatcher_ut_bearer_act_req_only();
+    is_active = dispatcher_check_is_bearer_active_by_bearerid(DISPATCHER_UT_TEST_BEARERID, 0);
+
+    dispatcher_ut_bearer_deact_req_only();
+    /*check if bearer is really deactive*/
+    is_active = dispatcher_check_is_bearer_active_by_bearerid(DISPATCHER_UT_TEST_BEARERID, 0);
+    if (is_active != KAL_FALSE) {
+         return KAL_FALSE;
+    }
+    return KAL_TRUE;
+}
+
+void dispatcher_ut_pdn_bind_req_only(void)
+{
+    n3epc_dispatcher_pdn_bind_req_struct pdn_bind_struct;
+    pdn_bind_struct.context_id = DISPATCHER_UT_TEST_CID;
+	pdn_bind_struct.network_interface_id = DISPATCHER_UT_TEST_NWID;
+	n3epc_dispatcher_pdn_bind_req(&pdn_bind_struct,MOD_DISPATCHER);
+}
+
+void dispatcher_ut_pdn_unbind_req_only(void)
+{
+    n3epc_dispatcher_pdn_unbind_req_struct pdn_unbind_struct;
+    pdn_unbind_struct.context_id = DISPATCHER_UT_TEST_CID;
+	n3epc_dispatcher_pdn_unbind_req(&pdn_unbind_struct, MOD_DISPATCHER);
+}
+
+void dispatcher_ut_pdn_bind_ipc_rsp_only(void)
+{
+    /*ipcore bind OK rsp*/
+    ipcore_dispatcher_pdn_bind_rsp_struct ipc_bind_rsp_struct;
+	ipc_bind_rsp_struct.network_interface_id = DISPATCHER_UT_TEST_NWID;
+	ipc_bind_rsp_struct.pdn_id = DISPATCHER_UT_TEST_PDNID;
+	ipc_bind_rsp_struct.result= HIF_IPC_OK;
+	ipc_bind_rsp_struct.back_info.context_id = DISPATCHER_UT_TEST_CID;
+	ipc_bind_rsp_struct.back_info.reply_dest_mod_id = MOD_DISPATCHER;
+	ipcore_dispatcher_pdn_bind_rsp(&ipc_bind_rsp_struct);
+}
+
+void dispatcher_ut_pdn_bind_ipc_error_rsp_only(void)
+{
+    /*ipcore bind OK rsp*/
+    ipcore_dispatcher_pdn_bind_rsp_struct ipc_bind_rsp_struct;
+	ipc_bind_rsp_struct.network_interface_id = DISPATCHER_UT_TEST_NWID;
+	ipc_bind_rsp_struct.pdn_id = DISPATCHER_UT_TEST_PDNID;
+	ipc_bind_rsp_struct.result= !(HIF_IPC_OK);
+	ipc_bind_rsp_struct.back_info.context_id = DISPATCHER_UT_TEST_CID;
+	ipc_bind_rsp_struct.back_info.reply_dest_mod_id = MOD_DISPATCHER;
+	ipcore_dispatcher_pdn_bind_rsp(&ipc_bind_rsp_struct);
+}
+
+kal_bool dispatcher_ut_return_compare_bind_cnf(kal_bool result, ps_cause_enum error_code) {
+
+    if (g_curr_bind_cnf.result == KAL_TRUE) {
+        return KAL_TRUE;
+    } else {
+        if (g_curr_bind_cnf.error_code == error_code) {
+            return KAL_TRUE;
+        } else {
+            return KAL_FALSE;
+        }
+    }
+}
+
+kal_bool dispatcher_ut_return_compare_unbind_cnf(kal_bool result, ps_cause_enum error_code) {
+
+    if (g_curr_unbind_cnf.result == KAL_TRUE) {
+        return KAL_TRUE;
+    } else {
+        if (g_curr_unbind_cnf.error_code == error_code) {
+            return KAL_TRUE;
+        } else {
+            return KAL_FALSE;
+        }
+    }
+}
+
+kal_bool dispatcher_ut_pdn_bind_req_test(void *p_param, kal_char *p_ret_err_str, kal_uint32 *p_ret_err_str_sz)
+{
+    kal_bool is_continue = KAL_FALSE;
+    dispatcher_bind_state_enum state;
+    
+    /**********reset dispatcher full context************/
+    dispatcher_reset();
+
+    //TEST CASE1
+    /*do pdn bind without bearer activation*/
+    dispatcher_ut_pdn_bind_req_only();
+    if (!dispatcher_ut_return_compare_bind_cnf(KAL_FALSE, N3EPC_DISPATCHER_BIND_ERROR_AS_BEARER_NOT_ACTIVATED_BEFORE)) {
+        return KAL_FALSE;
+    }
+    
+    /* TEST CASE2 bearer act+ pdn bind req+ bind rsp from ipc*/
+    kal_mem_set(&g_curr_bind_cnf, 0, sizeof(g_curr_bind_cnf));
+    dispatcher_reset();
+
+    dispatcher_ut_bearer_act_req_only();
+    dispatcher_ut_pdn_bind_req_only();
+    state = dispatcher_get_bind_state_by_bearerid(DISPATCHER_UT_TEST_BEARERID, 0);
+    
+    if (state != DISPATCHER_PDN_ST_BINDING) {
+        return KAL_FALSE;
+    }
+
+    /*at binding stage send duplicate bearer act req*/
+    dispatcher_ut_bearer_act_req_only();
+    if (dispatcher_ut_assert_flag) {
+        dispatcher_ut_assert_flag = KAL_FALSE;
+    } else {
+        return KAL_FALSE;
+    }
+    /*at dispatcher binding state send another bind req*/
+    dispatcher_ut_pdn_bind_req_only();
+    if (!dispatcher_ut_return_compare_bind_cnf(KAL_FALSE, N3EPC_DISPATCHER_BIND_ERROR_AS_PREV_BIND_IN_PROGRESS)) {
+        return KAL_FALSE;
+    }
+
+    /*simulate ipc bind rsp*/
+    dispatcher_ut_pdn_bind_ipc_rsp_only();
+    state = dispatcher_get_bind_state_by_bearerid(DISPATCHER_UT_TEST_BEARERID, 0);
+    if (state != DISPATCHER_PDN_ST_BIND) {
+        return KAL_FALSE;
+    }
+    if (!dispatcher_ut_return_compare_bind_cnf(KAL_TRUE, 0)) {
+        return KAL_FALSE;
+    }
+
+    /*at bind stage send duplicate bearer act req*/
+    dispatcher_ut_bearer_act_req_only();
+    if (dispatcher_ut_assert_flag) {
+        dispatcher_ut_assert_flag = KAL_FALSE;
+    } else {
+        return KAL_FALSE;
+    }
+
+    /*TEST CASE3 pdn bind success,test remaining 1. send duplicate bind req*/
+    kal_mem_set(&g_curr_bind_cnf, 0, sizeof(g_curr_bind_cnf));
+    dispatcher_ut_pdn_bind_req_only();
+    if (!dispatcher_ut_return_compare_bind_cnf(KAL_FALSE, N3EPC_DISPATCHER_BIND_ERROR_AS_PDN_ALREADY_BIND)) {
+        return KAL_FALSE;
+    }
+
+    /*TEST CASE4 */
+    /*for BIND state, bearer deact req, send the bearer act req, check assert flag*/
+    kal_mem_set(&g_curr_bind_cnf, 0, sizeof(g_curr_bind_cnf));
+    dispatcher_reset();
+    dispatcher_ut_bearer_act_req_only();
+    dispatcher_ut_pdn_bind_req_only();
+    state = dispatcher_get_bind_state_by_bearerid(DISPATCHER_UT_TEST_BEARERID, 0);   
+    if (state != DISPATCHER_PDN_ST_BINDING) {
+        return KAL_FALSE;
+    }
+    dispatcher_ut_bearer_deact_req_only();
+    dispatcher_ut_bearer_act_req_only();
+    if (dispatcher_ut_assert_flag) {
+        dispatcher_ut_assert_flag = KAL_FALSE;
+    } else {
+        return KAL_FALSE;
+    }
+
+    /*TEST CASE5 */
+    /*binding going to unbind , bearer deact then bearer act, assert fail as state does not allow bearer to activate here*/
+    kal_mem_set(&g_curr_bind_cnf, 0, sizeof(g_curr_bind_cnf));
+    dispatcher_reset();
+    dispatcher_ut_bearer_act_req_only();
+    dispatcher_ut_pdn_bind_req_only();
+    dispatcher_ut_pdn_unbind_req_only();
+    state = dispatcher_get_bind_state_by_bearerid(DISPATCHER_UT_TEST_BEARERID, 0);   
+    if (state != DISPATCHER_PDN_ST_BINDING_GOING_TO_UNBIND) {
+        return KAL_FALSE;
+    }
+   
+    dispatcher_ut_bearer_deact_req_only();
+    dispatcher_ut_bearer_act_req_only();
+    if (dispatcher_ut_assert_flag) {
+        dispatcher_ut_assert_flag = KAL_FALSE;
+    } else {
+        return KAL_FALSE;
+    }
+
+
+     /*TEST CASE5 */
+    /*BIND state , bearer deact then bearer act, assert fail as state does not allow bearer to activate here*/
+    kal_mem_set(&g_curr_bind_cnf, 0, sizeof(g_curr_bind_cnf));
+    dispatcher_reset();
+    dispatcher_ut_bearer_act_req_only();
+    dispatcher_ut_pdn_bind_req_only();
+    dispatcher_ut_pdn_bind_ipc_rsp_only();
+    state = dispatcher_get_bind_state_by_bearerid(DISPATCHER_UT_TEST_BEARERID, 0);   
+    if (state != DISPATCHER_PDN_ST_BIND) {
+        return KAL_FALSE;
+    }
+   
+    dispatcher_ut_bearer_deact_req_only();
+    dispatcher_ut_bearer_act_req_only();
+    if (dispatcher_ut_assert_flag) {
+        dispatcher_ut_assert_flag = KAL_FALSE;
+    } else {
+        return KAL_FALSE;
+    }
+    state = dispatcher_get_bind_state_by_bearerid(DISPATCHER_UT_TEST_BEARERID, 0);   
+    if (state != DISPATCHER_PDN_ST_BIND) {
+        return KAL_FALSE;
+    }
+    /*test pdn bind rsp for unexpected state already bind, assert flag will become true*/
+    dispatcher_ut_pdn_bind_ipc_rsp_only();
+    if (dispatcher_ut_assert_flag) {
+        dispatcher_ut_assert_flag = KAL_FALSE;
+    } else {
+        return KAL_FALSE;
+    }
+    return KAL_TRUE;
+
+}
+
+
+
+kal_bool dispatcher_ut_pdn_unbind_normal_req_test(void *p_param, kal_char *p_ret_err_str, kal_uint32 *p_ret_err_str_sz)
+{
+    kal_bool is_continue = KAL_FALSE;
+    dispatcher_bind_state_enum state;
+    dispatcher_reset();
+    /*TEST CASE 1*/    
+    dispatcher_ut_bearer_act_req_only();
+    dispatcher_ut_pdn_bind_req_only();/*binding state*/
+    state = dispatcher_get_bind_state_by_bearerid(DISPATCHER_UT_TEST_BEARERID, 0);
+    if (state != DISPATCHER_PDN_ST_BINDING) {
+        return KAL_FALSE;
+    }
+    /*at binding state send ipc bind rsp failure--->state will become UNBIND*/
+    dispatcher_ut_pdn_bind_ipc_error_rsp_only();
+    state = dispatcher_get_bind_state_by_bearerid(DISPATCHER_UT_TEST_BEARERID, 0);
+    if (state != DISPATCHER_PDN_ST_UNBIND) {
+        return KAL_FALSE;
+    }
+    if (!dispatcher_ut_return_compare_bind_cnf(KAL_FALSE, 0)) {
+        return KAL_FALSE;
+    }
+
+
+    /*TEST CASE 2*/
+    dispatcher_reset();
+    dispatcher_ut_bearer_act_req_only();
+    dispatcher_ut_pdn_bind_req_only();/*binding state*/
+    state = dispatcher_get_bind_state_by_bearerid(DISPATCHER_UT_TEST_BEARERID, 0);
+    if (state != DISPATCHER_PDN_ST_BINDING) {
+        return KAL_FALSE;
+    }
+    dispatcher_ut_pdn_unbind_req_only();/*this will trigger state binding_going_to_unbind*/
+    state = dispatcher_get_bind_state_by_bearerid(DISPATCHER_UT_TEST_BEARERID, 0);
+    if (state != DISPATCHER_PDN_ST_BINDING_GOING_TO_UNBIND) {
+        return KAL_FALSE;
+    }
+    dispatcher_ut_pdn_unbind_req_only();/*another unbind req @ binding_going_to_unbind state*/
+    if (!dispatcher_ut_return_compare_unbind_cnf(KAL_FALSE, N3EPC_DISPATCHER_UNBIND_ERROR_AS_DUPLICATE_UNBIND_REQ)) {
+        return KAL_FALSE;
+    }
+    dispatcher_ut_pdn_bind_req_only();/*bind req @ binding going to unbind state*/
+    if (!dispatcher_ut_return_compare_bind_cnf(KAL_FALSE, N3EPC_DISPATCHER_BINDING_GOING_TO_UNBIND_ERROR_AS_PREV_BIND_IN_PROGRESS)) {
+        return KAL_FALSE;
+    }
+    /*dispatcher state is binding going to unbind , so send ipc bind rsp failure*/
+    dispatcher_ut_pdn_bind_ipc_error_rsp_only();
+    /*check both rsp bind cnf as well as unbind cnf  along with pdn bind state should become unbind*/
+    if (!dispatcher_ut_return_compare_bind_cnf(KAL_FALSE, 0)) {
+        return KAL_FALSE;
+    }
+    if (!dispatcher_ut_return_compare_unbind_cnf(KAL_TRUE, 0)) {
+        return KAL_FALSE;
+    }
+    state = dispatcher_get_bind_state_by_bearerid(DISPATCHER_UT_TEST_BEARERID, 0);
+    if (state != DISPATCHER_PDN_ST_UNBIND) {
+        return KAL_FALSE;
+    }
+
+    /*send unbind at unbind state*/
+    dispatcher_ut_pdn_unbind_req_only();
+    if (!dispatcher_ut_return_compare_unbind_cnf(KAL_FALSE, N3EPC_DISPATCHER_UNBIND_ERROR_AS_DUPLICATE_UNBIND_REQ)) {
+        return KAL_FALSE;
+    }
+
+    /*TEST CASE 3 ipc rsp success at binding state*/
+    dispatcher_reset();
+    dispatcher_ut_bearer_act_req_only();
+    dispatcher_ut_pdn_bind_req_only();/*binding state*/
+    state = dispatcher_get_bind_state_by_bearerid(DISPATCHER_UT_TEST_BEARERID, 0);
+    if (state != DISPATCHER_PDN_ST_BINDING) {
+        return KAL_FALSE;
+    }
+    dispatcher_ut_pdn_bind_ipc_rsp_only();
+    /*state should be BIND*/
+    state = dispatcher_get_bind_state_by_bearerid(DISPATCHER_UT_TEST_BEARERID, 0);
+    if (state != DISPATCHER_PDN_ST_BIND) {
+        return KAL_FALSE;
+    }
+    if (!dispatcher_ut_return_compare_bind_cnf(KAL_TRUE, 0)) {
+        return KAL_FALSE;
+    }
+
+
+    /*TEST CASE 4 ipc rsp success at binding_going_to_unbind state*/
+    dispatcher_reset();
+    dispatcher_ut_bearer_act_req_only();
+    dispatcher_ut_pdn_bind_req_only();/*binding state*/
+    state = dispatcher_get_bind_state_by_bearerid(DISPATCHER_UT_TEST_BEARERID, 0);
+    if (state != DISPATCHER_PDN_ST_BINDING) {
+        return KAL_FALSE;
+    }
+    dispatcher_ut_pdn_unbind_req_only();/*another unbind req @ binding_going_to_unbind state*/
+    state = dispatcher_get_bind_state_by_bearerid(DISPATCHER_UT_TEST_BEARERID, 0);
+    if (state != DISPATCHER_PDN_ST_BINDING_GOING_TO_UNBIND) {
+        return KAL_FALSE;
+    }
+    dispatcher_ut_pdn_bind_ipc_rsp_only();
+    /*check both rsp bind cnf as well as unbind cnf  along with pdn bind state should become unbind*/
+    if (!dispatcher_ut_return_compare_bind_cnf(KAL_FALSE, HIF_CAUSE_START)) {
+        return KAL_FALSE;
+    }
+    if (!dispatcher_ut_return_compare_unbind_cnf(KAL_TRUE, 0)) {
+        return KAL_FALSE;
+    }
+    state = dispatcher_get_bind_state_by_bearerid(DISPATCHER_UT_TEST_BEARERID, 0);
+    if (state != DISPATCHER_PDN_ST_UNBIND) {
+        return KAL_FALSE;
+    }
+
+    /*check some other condition*/
+    /*pdn state is binding or binding going to unbind, then bearer deactivate*/
+    /*as bearer can be deactivated any time so there should be no assert*/
+    dispatcher_reset();
+    dispatcher_ut_bearer_act_req_only();
+    dispatcher_ut_pdn_bind_req_only();/*binding state*/
+    state = dispatcher_get_bind_state_by_bearerid(DISPATCHER_UT_TEST_BEARERID, 0);
+    if (state != DISPATCHER_PDN_ST_BINDING) {
+        return KAL_FALSE;
+    }
+    dispatcher_ut_bearer_deact_req_only();
+    if (dispatcher_ut_assert_flag) {
+        return KAL_FALSE;
+    }
+
+
+    dispatcher_reset();
+    dispatcher_ut_bearer_act_req_only();
+    dispatcher_ut_pdn_bind_req_only();/*binding state*/
+    dispatcher_ut_pdn_unbind_req_only();/*binding going to unbind*/
+    state = dispatcher_get_bind_state_by_bearerid(DISPATCHER_UT_TEST_BEARERID, 0);
+    if (state != DISPATCHER_PDN_ST_BINDING_GOING_TO_UNBIND) {
+        return KAL_FALSE;
+    }
+    dispatcher_ut_bearer_deact_req_only();
+    if (dispatcher_ut_assert_flag) {
+        return KAL_FALSE;
+    }
+
+    return KAL_TRUE;
+}
+
+kal_bool dispatcher_ut_pdn_unbind_handover_req_test(void *p_param, kal_char *p_ret_err_str, kal_uint32 *p_ret_err_str_sz)
+{
+    /*first do bind*/
+    kal_bool is_continue = KAL_FALSE;
+    dispatcher_bind_state_enum state;
+    dispatcher_reset();
+    dispatcher_ut_bearer_act_req_only();
+    dispatcher_ut_pdn_bind_req_only();
+    dispatcher_ut_pdn_bind_ipc_rsp_only();
+    state = dispatcher_get_bind_state_by_bearerid(DISPATCHER_UT_TEST_BEARERID, 0);
+    if (state != DISPATCHER_PDN_ST_BIND) {
+        return KAL_FALSE;
+    }
+    /*for handover case unbind will happen before bearre deact*/
+    dispatcher_ut_pdn_unbind_req_only();
+    state = dispatcher_get_bind_state_by_bearerid(DISPATCHER_UT_TEST_BEARERID, 0);
+    if (state != DISPATCHER_PDN_ST_UNBIND) {
+        return KAL_FALSE;
+    }
+
+    is_continue = dispatcher_check_is_bearer_active_by_bearerid(DISPATCHER_UT_TEST_BEARERID, 0);
+    if (is_continue != KAL_TRUE) {
+        return KAL_FALSE;
+    }
+    dispatcher_ut_bearer_deact_req_only();
+    is_continue = dispatcher_check_is_bearer_active_by_bearerid(DISPATCHER_UT_TEST_BEARERID, 0);
+    if (is_continue != KAL_FALSE) {
+        return KAL_FALSE;
+    }
+    return KAL_TRUE;
+}
+
+
+void dispatcher_ut_prepare_ul_gpd_list(ip_type_e ip, kal_uint32 pkt_count, qbm_gpd **head_gpd, qbm_gpd **tail_gpd)
+{
+    qbm_gpd            *curr_gpd;
+    qbm_gpd            *bd;
+    kal_uint8          *packet_buf;
+    kal_uint32          packet_len;
+    kal_uint32          idx;
+    if (pkt_count != qbmt_alloc_q(
+                        QBM_TYPE_NET_UL_SHRD,
+                        pkt_count,
+                        (void **)(head_gpd),
+                        (void **)(tail_gpd))) {
+        return;
+    }
+    
+    curr_gpd = *head_gpd;
+    for (idx = 0; idx < pkt_count; idx++)
+    {
+        bd = QBM_DES_GET_DATAPTR(curr_gpd);
+        if(ip == IP_TYPE_V4) {
+            packet_buf = dispatcher_ut_ipv4_dns_packet;
+            packet_len = sizeof(dispatcher_ut_ipv4_dns_packet);
+        }
+        if(ip == IP_TYPE_V6) {
+            packet_buf = dispatcher_ut_ipv6_dhcp_ul_packet;
+            packet_len = sizeof(dispatcher_ut_ipv6_dhcp_ul_packet);
+        }
+        kal_mem_cpy(QBM_DES_GET_DATAPTR(bd), packet_buf, packet_len);
+        QBM_DES_SET_DATALEN(bd, packet_len);
+        QBM_DES_SET_DATALEN(curr_gpd, packet_len);
+        curr_gpd = (qbm_gpd *)QBM_DES_GET_NEXT(curr_gpd);
+    }
+}
+
+kal_bool dispatcher_ut_prepare_and_recv_gpd_test_case_non_wifipdnId(void) 
+{
+    kal_uint32  ipv4_cnt = 2;
+    qbm_gpd    *head_gpd;
+    qbm_gpd    *tail_gpd;
+    kal_uint8 non_wifi_pdn = DISPATCHER_UT_TEST_NON_WIFI_PDNID;
+    kal_uint8 proto_idx = 0;
+    dispatcher_ut_prepare_ul_gpd_list(1, ipv4_cnt, &head_gpd, &tail_gpd);
+    dispatcher_rcv_ul_gpd_by_pdn((ip_type_e)1, non_wifi_pdn, head_gpd, tail_gpd, proto_idx);
+    if (g_ul_upcm_forward_gpd_count != ipv4_cnt) {
+        return KAL_FALSE;
+    }
+    return KAL_TRUE;
+}
+
+
+kal_bool dispatcher_ut_prepare_and_recv_gpd_test_case()
+{
+    kal_uint32  ipv4_cnt = 2;
+    kal_uint32  ipv6_cnt = 2;
+    qbm_gpd    *head_gpd;
+    qbm_gpd    *tail_gpd;
+    kal_uint8 test_pdn_id = DISPATCHER_UT_TEST_PDNID;
+    kal_uint8 proto_idx = 0;
+
+    dispatcher_bind_state_enum bind_state = dispatcher_get_bind_state_by_pdnid(DISPATCHER_UT_TEST_PDNID, proto_idx);
+    kal_bool is_bearer_active = dispatcher_check_is_bearer_active_by_pdnid(DISPATCHER_UT_TEST_PDNID, proto_idx);   
+
+    if (ipv4_cnt > 0) {
+        dispatcher_ut_prepare_ul_gpd_list(1, ipv4_cnt, &head_gpd, &tail_gpd);
+        dispatcher_rcv_ul_gpd_by_pdn((ip_type_e)1, test_pdn_id, head_gpd, tail_gpd, proto_idx);
+        if((bind_state == DISPATCHER_PDN_ST_BIND) && is_bearer_active) {
+            if(g_ul_wtunnel_forwarded_gpd_count != ipv4_cnt) {
+                return KAL_FALSE;
+            }
+            g_ul_wtunnel_forwarded_gpd_count = 0;
+        } else {
+            if(g_ul_drop_gpd_count != ipv4_cnt) {
+                return KAL_FALSE;
+            }
+            g_ul_drop_gpd_count = 0;
+        }
+
+    }
+    if (ipv6_cnt > 0) {
+        dispatcher_ut_prepare_ul_gpd_list(2, ipv4_cnt, &head_gpd, &tail_gpd);
+        dispatcher_rcv_ul_gpd_by_pdn((ip_type_e)2, test_pdn_id, head_gpd, tail_gpd, proto_idx);
+        if((bind_state == DISPATCHER_PDN_ST_BIND) && is_bearer_active) {
+            if(g_ul_wtunnel_forwarded_gpd_count != ipv6_cnt) {
+                return KAL_FALSE;
+            }
+            g_ul_wtunnel_forwarded_gpd_count = 0;
+        } else {
+            if(g_ul_drop_gpd_count != ipv6_cnt) {
+                return KAL_FALSE;
+            }
+            g_ul_drop_gpd_count = 0;
+        }
+    
+    }
+    return KAL_TRUE;
+    
+}
+
+kal_bool dispatcher_ut_ul_gpd_req_test(void *p_param, kal_char *p_ret_err_str, kal_uint32 *p_ret_err_str_sz)
+{
+   /*1.bearer active + bind
+    2.prepare UL GPD list
+    3.run the UL GPD test case
+    4. bearer deact & unbind*/
+    dispatcher_reset();
+    /*first do bind*/
+    dispatcher_ut_bearer_act_req_only();
+    dispatcher_ut_pdn_bind_req_only();
+    dispatcher_ut_pdn_bind_ipc_rsp_only();
+
+    if (!dispatcher_ut_prepare_and_recv_gpd_test_case()) {
+        return KAL_FALSE;
+    }
+    
+
+    /*bearer deact & unbind in advance for next test case*/
+    dispatcher_ut_bearer_deact_req_only();
+    dispatcher_ut_pdn_unbind_req_only();
+
+
+    //now unbind or bearer inactive check ul for drop gpd list
+    if (!dispatcher_ut_prepare_and_recv_gpd_test_case()) {
+        return KAL_FALSE;
+    }
+
+    //a test case to check non wifi pdn id ul gpd req forwardig to UPCM
+    if (!dispatcher_ut_prepare_and_recv_gpd_test_case_non_wifipdnId()) {
+        return KAL_FALSE;
+    }
+
+    return KAL_TRUE;
+}
+
+
+kal_uint32 dispatcher_ut_prepare_did_list(kal_uint8 count, upcm_did** p_head, upcm_did** p_tail)
+{
+    kal_bool end_of_list = KAL_FALSE;
+    upcm_did   *did;
+    upcm_did   *prev_did = NULL;
+    upcm_did   *next_did;
+    kal_uint8 alloc_num = upcm_did_alloc_q(count, p_head, p_tail);
+    
+    for (did = *p_head; did && !end_of_list; did = next_did) {
+        next_did = UPCM_DID_GET_NEXT(did);
+        end_of_list = (did == *p_tail);
+
+        if (prev_did) {
+            UPCM_DID_SET_NEXT(prev_did, did);
+        }
+        prev_did = did;
+    }
+
+    return alloc_num;
+}
+
+
+kal_bool dispatcher_ut_dl_did_req_test(void *p_param, kal_char *p_ret_err_str, kal_uint32 *p_ret_err_str_sz)
+{
+    /*1. bearer active + bind
+    2.prepare DL Did list
+    3.run the DL test case
+    4. bearer deact & unbind*/
+
+    kal_uint32 allocated_count = 0;
+    kal_uint8 proto_idx = 0;
+    upcm_did* p_head;
+    upcm_did* p_tail;
+    kal_uint8 count = 2;
+
+    dispatcher_reset();
+
+    /*TEST CASE 1*/
+    dispatcher_ut_bearer_act_req_only();
+    dispatcher_ut_pdn_bind_req_only();
+    dispatcher_ut_pdn_bind_ipc_rsp_only();
+    /*DL cbk is not registered sp these DID will be dropped*/
+    dispatcher_ut_prepare_did_list(count, &p_head, &p_tail);
+    dispatcher_on_dl_did_cbk(DISPATCHER_UT_TEST_BEARERID, p_head, p_tail, proto_idx);
+    if (g_dl_drop_did_count != count) {
+        return KAL_FALSE;
+    }
+    g_dl_drop_did_count = 0;
+
+
+    /*TEST CASE 2*/
+    /*now register DL cbk of ipcore simulation*/
+    count = 4;
+    dispatcher_reg_cbk_dlvr_dl_did(dispatcher_ut_forward_dl_did_to_ipc);
+    dispatcher_ut_prepare_did_list(count, &p_head, &p_tail);
+    dispatcher_on_dl_did_cbk(DISPATCHER_UT_TEST_BEARERID, p_head, p_tail, proto_idx); 
+    if (g_dl_ipcore_forward_did_count != count) {
+        return KAL_FALSE;
+    }
+    g_dl_ipcore_forward_did_count = 0;
+
+    /*TEST CASE 3*/
+    /*bearer deact & unbind in advance for next test case*/
+    dispatcher_ut_bearer_deact_req_only();
+    dispatcher_ut_pdn_unbind_req_only();
+    // not bind or bearer deact so try again for drop test.
+    count = 3;
+    allocated_count = dispatcher_ut_prepare_did_list(count, &p_head, &p_tail);
+    dispatcher_on_dl_did_cbk(DISPATCHER_UT_TEST_BEARERID, p_head, p_tail, proto_idx);
+    if (g_dl_drop_did_count != count) {
+        return KAL_FALSE;
+    }
+    g_dl_drop_did_count = 0;
+    return KAL_TRUE;
+
+}
+
+void dispatcher_ut_init_meta(void) {
+    /*reset Q property*/
+    kal_mem_set(dispatcher_ut_meta_queues, 0, sizeof(dispatcher_ut_meta_queue_t));
+    /*reset meta tbl values*/
+    kal_mem_set(dispatcher_ut_meta_tbl_s, 0, sizeof(dispatcher_ut_meta_tbl_s));
+    /*init the Q type & Q base address*/
+    dispatcher_ut_meta_queues[DISPATCHER_UT_LHIF_META_AP0].base_addr = (kal_uint8*)dispatcher_ut_meta_tbl_s;
+    dispatcher_ut_meta_queues[DISPATCHER_UT_LHIF_META_AP0].type_idx = DISPATCHER_UT_LHIF_META_AP0;                
+}
+
+kal_uint32 dispatcher_ut_alloc_meta(
+        dispatcher_ut_meta_type_e  q_type,
+        kal_uint32          request_num,
+        kal_uint32         *p_head_idx,
+        kal_uint32         *p_tail_idx) {
+    kal_uint32 alloc_num, used_num, remain_num;
+
+    kal_uint8 write_idx, read_idx;
+
+    write_idx = dispatcher_ut_meta_queues[q_type].write_idx;
+    read_idx = dispatcher_ut_meta_queues[q_type].read_idx;
+
+    used_num = (write_idx >= read_idx)
+                    ? (write_idx - read_idx) :
+                      (DISPATCHER_UT_META_TABLE_SIZE - read_idx + write_idx);
+    remain_num = DISPATCHER_UT_META_TABLE_SIZE - used_num - 1;
+    alloc_num = (request_num > remain_num) ? remain_num : request_num;
+
+    *p_head_idx = write_idx;
+    write_idx += alloc_num;
+    if (write_idx >= DISPATCHER_UT_META_TABLE_SIZE) {
+        write_idx -= DISPATCHER_UT_META_TABLE_SIZE;
+    }
+    *p_tail_idx = write_idx;
+
+    dispatcher_ut_meta_queues[q_type].write_idx = write_idx;
+
+    return alloc_num;
+}
+
+void dispatcher_ut_prepare_ul_meta_list(
+        kal_uint8 v4_wifi_pdn_meta_cnt,
+        kal_uint8 v6_wifi_pdn_meta_cnt,
+        kal_uint32 ipv4_cnt,
+        kal_uint32 ipv6_cnt,
+        kal_uint32 *p_head_idx,
+        kal_uint32 *p_tail_idx,
+        LHIF_QUEUE_TYPE *q_type,
+        kal_uint8 proto_idx,
+        kal_uint8 v4_igr_cnt,
+        kal_uint8 v6_igr_cnt) {
+
+        kal_uint32          total_cnt = ipv4_cnt + ipv6_cnt;
+        
+        kal_uint8 v4_igr = v4_igr_cnt;
+        kal_uint8 v6_igr = v6_igr_cnt;
+        kal_uint8 v4_wifi_meta = v4_wifi_pdn_meta_cnt;
+        kal_uint8 v6_wifi_meta = v6_wifi_pdn_meta_cnt;
+        kal_uint8          *packet_buf;
+        kal_uint32          packet_len;
+        kal_uint8           sequence = 0;
+        kal_uint32          curr_meta_idx;
+        lhif_meta_tbl_t    *curr_meta;
+        kal_uint32          idx;
+        dispatcher_ut_alloc_meta(DISPATCHER_UT_LHIF_META_AP0, total_cnt, p_head_idx, p_tail_idx);
+        *q_type = DISPATCHER_UT_LHIF_META_AP0;
+
+
+        /*now fill each meta as per choice*/
+        /*IPV4 first*/
+        curr_meta_idx = *p_head_idx;
+        for (idx = 0; idx < ipv4_cnt; idx++) {
+             curr_meta = &dispatcher_ut_meta_tbl_s[curr_meta_idx];
+             packet_buf = dispatcher_ut_ipv4_dns_packet;
+             packet_len = sizeof(dispatcher_ut_ipv4_dns_packet);
+             curr_meta->vrb_addr = packet_buf;
+             curr_meta->length = packet_len;
+            curr_meta->psn = sequence;
+            if (v4_wifi_meta > 0) 
+            {
+                curr_meta->pdn = DISPATCHER_UT_TEST_PDNID;
+                v4_wifi_meta --;
+            }
+            else
+            {
+                curr_meta->pdn = DISPATCHER_UT_TEST_NON_WIFI_PDNID;
+            }
+            curr_meta->protocol_idx = proto_idx;
+            if (v4_igr >0)
+            {
+                curr_meta->ignore = KAL_TRUE;
+                v4_igr --;
+            }
+            sequence ++;
+            curr_meta_idx ++;
+            if (curr_meta_idx == DISPATCHER_UT_META_TABLE_SIZE) {
+            curr_meta_idx = 0;
+            }
+            if (curr_meta_idx == *p_tail_idx) {
+                return;
+            }
+        }
+        /*IPV6*/
+        for (idx = 0; idx < ipv6_cnt; idx++) {
+            curr_meta = &dispatcher_ut_meta_tbl_s[curr_meta_idx];
+             packet_buf = dispatcher_ut_ipv6_dhcp_ul_packet;
+             packet_len = sizeof(dispatcher_ut_ipv6_dhcp_ul_packet);
+            curr_meta->vrb_addr = packet_buf;
+            curr_meta->length = packet_len;
+            curr_meta->psn = sequence;
+            if (v6_wifi_meta > 0) 
+            {
+                curr_meta->pdn = DISPATCHER_UT_TEST_PDNID;
+                v6_wifi_meta --;
+            }
+            else
+            {
+                curr_meta->pdn = DISPATCHER_UT_TEST_NON_WIFI_PDNID;
+            }
+            if (v6_igr >0)
+            {
+                curr_meta->ignore = KAL_TRUE;
+                v6_igr --;
+            }
+            curr_meta->protocol_idx = proto_idx;
+            sequence ++;
+
+            curr_meta_idx ++;
+            if (curr_meta_idx == DISPATCHER_UT_META_TABLE_SIZE) {
+                curr_meta_idx = 0;
+            }
+            if (curr_meta_idx == *p_tail_idx) {
+                return;
+            }
+        }
+
+}
+
+kal_bool dispatcher_ut_ul_meta_req(void *p_param, kal_char *p_ret_err_str, kal_uint32 *p_ret_err_str_sz)
+{
+    kal_uint32  ipv4_cnt = 5;
+    kal_uint32  ipv6_cnt = 5;
+    kal_uint32  head_idx;
+    kal_uint32  tail_idx;
+    LHIF_QUEUE_TYPE q_type;
+    kal_uint8 proto_idx = 0;
+    kal_uint8 v4_ignore = 1;
+    kal_uint8 v6_ignore = 1;
+    kal_uint8 ipv4_wifi_pdn_meta = 2;
+    kal_uint8 ipv6_wifi_pdn_meta = 2;
+    kal_uint8 expected_forward_count = 0;
+    kal_uint8 expected_igr_count = 0;
+
+    dispatcher_reset();
+
+    /*first do bind*/
+    dispatcher_ut_bearer_act_req_only();
+    dispatcher_ut_pdn_bind_req_only();
+    dispatcher_ut_pdn_bind_ipc_rsp_only();
+
+    dispatcher_ut_init_meta();          	
+    dispatcher_ut_prepare_ul_meta_list(ipv4_wifi_pdn_meta, ipv6_wifi_pdn_meta, ipv4_cnt, ipv6_cnt, &head_idx, &tail_idx, &q_type, proto_idx, v4_ignore, v6_ignore);
+    /*test uplink flow*/
+	dispatcher_rcv_ul_meta_queue(head_idx, tail_idx, q_type);
+    expected_forward_count = ipv4_wifi_pdn_meta - v4_ignore + ipv6_wifi_pdn_meta - v6_ignore;
+    expected_igr_count = v4_ignore + v6_ignore + expected_forward_count;
+    ASSERT(expected_forward_count == g_ul_wtunnel_forwarded_gpd_count);
+    ASSERT(g_ul_total_ignr_count == g_ul_total_ignr_count);
+    g_ul_total_ignr_count = 0;
+    g_ul_wtunnel_forwarded_gpd_count = 0;
+
+    /*bearer deact & unbind in advance for next test case*/
+    dispatcher_ut_bearer_deact_req_only();
+    dispatcher_ut_pdn_unbind_req_only();
+
+
+    /*first do bind*/
+    dispatcher_ut_bearer_act_req_only();
+    dispatcher_ut_pdn_bind_req_only();
+    dispatcher_ut_pdn_bind_ipc_rsp_only();
+
+    dispatcher_ut_init_meta();
+    dispatcher_ut_pdn_unbind_req_only();
+    /*bearer active but state unbind*/
+    dispatcher_ut_prepare_ul_meta_list(ipv4_wifi_pdn_meta, ipv6_wifi_pdn_meta, ipv4_cnt, ipv6_cnt, &head_idx, &tail_idx, &q_type, proto_idx, v4_ignore, v6_ignore);
+    /*test uplink flow*/
+	dispatcher_rcv_ul_meta_queue(head_idx, tail_idx, q_type);
+    expected_forward_count = 0;
+    expected_igr_count = v4_ignore + v6_ignore + (ipv4_wifi_pdn_meta - v4_ignore) + (ipv6_wifi_pdn_meta - v6_ignore);
+    ASSERT(expected_forward_count == g_ul_wtunnel_forwarded_gpd_count);
+    ASSERT(g_ul_total_ignr_count == g_ul_total_ignr_count);
+    g_ul_total_ignr_count = 0;
+    g_ul_wtunnel_forwarded_gpd_count = 0;
+    /*deactivate bearer*/
+    dispatcher_ut_bearer_deact_req_only();
+    return KAL_TRUE;
+
+}
+
+kal_bool dispatcher_ut_ul_gpd_bearer_act_and_no_binding(void *p_param, kal_char *p_ret_err_str, kal_uint32 *p_ret_err_str_sz)
+{
+    /*only bearer act , pdn is unbind
+    prepare gpd list & execute TC
+    Expected tc to check drop the gpd list*/
+    dispatcher_reset();
+    dispatcher_ut_bearer_act_req_only();
+    if (!dispatcher_ut_prepare_and_recv_gpd_test_case()) {
+        return KAL_FALSE;
+    }
+
+    /*bearer deact & unbind in advance for next test case*/
+    dispatcher_ut_bearer_deact_req_only();
+    return KAL_TRUE;
+
+}
+
+kal_bool dispatcher_ut_ul_gpd_bearer_act_bind_then_bearer_deact(void *p_param, kal_char *p_ret_err_str, kal_uint32 *p_ret_err_str_sz)
+{
+    dispatcher_reset();
+    /*first do bind*/
+    dispatcher_ut_bearer_act_req_only();
+    dispatcher_ut_pdn_bind_req_only();
+    dispatcher_ut_pdn_bind_ipc_rsp_only();
+    
+    /*deactivate bearer*/
+    dispatcher_ut_bearer_deact_req_only();
+
+    /*bearer deactive & pdn bind, UL GPD recv, Drop the gpd*/
+    if (!dispatcher_ut_prepare_and_recv_gpd_test_case()) {
+        return KAL_FALSE;
+    }
+
+        /*reset by unbinding here*/
+    dispatcher_ut_pdn_unbind_req_only();
+    
+    return KAL_TRUE;
+}
+
+kal_bool dispatcher_ut_ul_meta_burst_packet_req(void *p_param, kal_char *p_ret_err_str, kal_uint32 *p_ret_err_str_sz)
+{
+    kal_uint32  ipv4_cnt = 30;
+    kal_uint32  ipv6_cnt = 30;
+    kal_uint32  head_idx;
+    kal_uint32  tail_idx;
+    LHIF_QUEUE_TYPE q_type;
+    kal_uint8 proto_idx = 0;
+    kal_uint8 v4_ignore = 0;
+    kal_uint8 v6_ignore = 0;
+    kal_uint8 ipv4_wifi_pdn_meta = 30;
+    kal_uint8 ipv6_wifi_pdn_meta = 30;
+    kal_uint8 expected_forward_count = 0;
+    kal_uint8 expected_igr_count = 0;
+    kal_uint8 expected_drop_pkt = 0;
+
+    dispatcher_reset();
+
+    /*first do bind*/
+    dispatcher_ut_bearer_act_req_only();
+    dispatcher_ut_pdn_bind_req_only();
+    dispatcher_ut_pdn_bind_ipc_rsp_only();
+
+    dispatcher_ut_init_meta();          	
+    dispatcher_ut_prepare_ul_meta_list(ipv4_wifi_pdn_meta, ipv6_wifi_pdn_meta, ipv4_cnt, ipv6_cnt, &head_idx, &tail_idx, &q_type, proto_idx, v4_ignore, v6_ignore);
+    /*test uplink flow*/
+    dispatcher_rcv_ul_meta_queue(head_idx, tail_idx, q_type);
+    expected_forward_count = ipv4_wifi_pdn_meta - v4_ignore + ipv6_wifi_pdn_meta - v6_ignore;
+    expected_drop_pkt = expected_forward_count - g_ul_wtunnel_forwarded_gpd_count;
+    expected_igr_count = v4_ignore + v6_ignore + expected_forward_count;
+    ASSERT(expected_igr_count == g_ul_total_ignr_count);
+    g_ul_total_ignr_count = 0;
+    g_ul_wtunnel_forwarded_gpd_count = 0;
+
+    /*bearer deact & unbind in advance for next test case*/
+    dispatcher_ut_bearer_deact_req_only();
+    dispatcher_ut_pdn_unbind_req_only();
+
+    return KAL_TRUE;
+}
+
+kal_bool dispatcher_ut_pdn_binding_ipc_rsp_test(void *p_param, kal_char *p_ret_err_str, kal_uint32 *p_ret_err_str_sz)
+{
+    dispatcher_bind_state_enum bind_state;
+    // BINDING ->IPC RSP OK ->BIND
+    dispatcher_reset();
+    dispatcher_ut_bearer_act_req_only();
+    dispatcher_ut_pdn_bind_req_only();
+    bind_state = dispatcher_get_bind_state_by_pdnid(DISPATCHER_UT_TEST_PDNID, 0);
+    if (bind_state != DISPATCHER_PDN_ST_BINDING) {
+        return KAL_FALSE;
+    }
+    dispatcher_ut_pdn_bind_ipc_rsp_only();
+    bind_state = dispatcher_get_bind_state_by_pdnid(DISPATCHER_UT_TEST_PDNID, 0);
+    if (bind_state != DISPATCHER_PDN_ST_BIND) {
+        return KAL_FALSE;
+    }
+    dispatcher_ut_pdn_unbind_req_only();
+    dispatcher_ut_bearer_deact_req_only();
+
+    // BINDING ->IPC RSP FAIL ->UNBIND
+    dispatcher_ut_bearer_act_req_only();
+    dispatcher_ut_pdn_bind_req_only();
+    bind_state = dispatcher_get_bind_state_by_pdnid(DISPATCHER_UT_TEST_PDNID, 0);
+    if (bind_state != DISPATCHER_PDN_ST_BINDING) {
+        return KAL_FALSE;
+    }
+    dispatcher_ut_pdn_bind_ipc_error_rsp_only();
+    bind_state = dispatcher_get_bind_state_by_pdnid(DISPATCHER_UT_TEST_PDNID, 0);
+    if (bind_state != DISPATCHER_PDN_ST_UNBIND) {
+        return KAL_FALSE;
+    }
+    //already unbind so just deactivate bearer
+    dispatcher_ut_bearer_deact_req_only();
+
+    // BINDING->BINDING_GOING_TO_UNBIND ->IPC RSP OK ->UNBIND
+    dispatcher_ut_bearer_act_req_only();
+    dispatcher_ut_pdn_bind_req_only();
+    bind_state = dispatcher_get_bind_state_by_pdnid(DISPATCHER_UT_TEST_PDNID, 0);
+    if (bind_state != DISPATCHER_PDN_ST_BINDING) {
+        return KAL_FALSE;
+    }
+    dispatcher_ut_pdn_unbind_req_only();
+    bind_state = dispatcher_get_bind_state_by_pdnid(DISPATCHER_UT_TEST_PDNID, 0);
+    if (bind_state != DISPATCHER_PDN_ST_BINDING_GOING_TO_UNBIND) {
+        return KAL_FALSE;
+    }
+    dispatcher_ut_pdn_bind_ipc_rsp_only();
+    bind_state = dispatcher_get_bind_state_by_pdnid(DISPATCHER_UT_TEST_PDNID, 0);
+    if (bind_state != DISPATCHER_PDN_ST_UNBIND) {
+        return KAL_FALSE;
+    }
+    //already unbind so just deactivate bearer
+    dispatcher_ut_bearer_deact_req_only();
+
+
+    // BINDING->BINDING_GOING_TO_UNBIND ->IPC RSP FAIL->UNBIND
+    dispatcher_ut_bearer_act_req_only();
+    dispatcher_ut_pdn_bind_req_only();
+    bind_state = dispatcher_get_bind_state_by_pdnid(DISPATCHER_UT_TEST_PDNID, 0);
+    if (bind_state != DISPATCHER_PDN_ST_BINDING) {
+        return KAL_FALSE;
+    }
+    dispatcher_ut_pdn_unbind_req_only();
+    bind_state = dispatcher_get_bind_state_by_pdnid(DISPATCHER_UT_TEST_PDNID, 0);
+    if (bind_state != DISPATCHER_PDN_ST_BINDING_GOING_TO_UNBIND) {
+        return KAL_FALSE;
+    }
+    dispatcher_ut_pdn_bind_ipc_error_rsp_only();
+    bind_state = dispatcher_get_bind_state_by_pdnid(DISPATCHER_UT_TEST_PDNID, 0);
+    if (bind_state != DISPATCHER_PDN_ST_UNBIND) {
+        return KAL_FALSE;
+    }
+    //already unbind so just deactivate bearer
+    dispatcher_ut_bearer_deact_req_only();
+
+    return KAL_TRUE;
+}
+
+kal_bool dispatcher_ut_query_meta_table(kal_uint32 **base_addr, kal_uint16 *size, LHIF_QUEUE_TYPE queue_type) {
+    *base_addr = (kal_uint32 *)dispatcher_ut_meta_tbl_s;
+    *size = DISPATCHER_UT_META_TABLE_SIZE;
+
+    return KAL_TRUE;
+}
+
+kal_bool dispatcher_ut_st_create(void) {
+    static ST_TCASE_T dispatcher_ut_cases_s[] = {
+        DISPATCHER_UT_CASE(dispatcher_ut_test_first, NULL),
+                /*only bearer active*/
+		DISPATCHER_UT_CASE(dispatcher_ut_bearer_act_req_test, NULL),
+        /*same bearer deactive*/
+		DISPATCHER_UT_CASE(dispatcher_ut_bearer_deact_req_test, NULL),
+        /* bearer act + bind*/
+		DISPATCHER_UT_CASE(dispatcher_ut_pdn_bind_req_test, NULL),
+        DISPATCHER_UT_CASE(dispatcher_ut_pdn_unbind_normal_req_test, NULL),
+        DISPATCHER_UT_CASE(dispatcher_ut_pdn_unbind_handover_req_test, NULL),
+        DISPATCHER_UT_CASE(dispatcher_ut_pdn_binding_ipc_rsp_test, NULL),
+        DISPATCHER_UT_CASE(dispatcher_ut_ul_gpd_req_test, NULL),
+        DISPATCHER_UT_CASE(dispatcher_ut_dl_did_req_test, NULL),
+        DISPATCHER_UT_CASE(dispatcher_ut_ul_meta_req, NULL),
+        DISPATCHER_UT_CASE(dispatcher_ut_ul_gpd_bearer_act_and_no_binding, NULL),
+        DISPATCHER_UT_CASE(dispatcher_ut_ul_gpd_bearer_act_bind_then_bearer_deact, NULL),
+        DISPATCHER_UT_CASE(dispatcher_ut_ul_meta_burst_packet_req, NULL),
+
+    };
+    return st_reg_test("DISPATCHER", &(dispatcher_ut_cases_s[0]), (sizeof(dispatcher_ut_cases_s)/sizeof(ST_TCASE_T)));
+}
+
+kal_bool dispatcher_ut_register_dl_cbk(dispatcher_dlvr_dl_did_f1 pf_dlvr_did) {
+    /*do nothing*/
+    return KAL_TRUE;
+}
+
+void dispatcher_ut_forward_dl_did_to_ipc(kal_uint32 pdn_id, upcm_did* p_head,
+                              upcm_did* p_tail, kal_uint8 protocol_idx)
+{
+    upcm_did           *did;
+    kal_bool end_of_list = KAL_FALSE;
+    upcm_did           *next_did;
+    kal_uint8 count = 0;
+   
+    for (did = p_head; did && !end_of_list; did = next_did) {
+        next_did = UPCM_DID_GET_NEXT(did);
+        count++;
+        end_of_list = (did == p_tail);
+    }
+    upcm_did_dest_q(p_head, p_tail);
+    g_dl_ipcore_forward_did_count = count;
+
+}
+
+void dispatcher_ut_drop_dl_did(kal_uint8 bearer_id, upcm_did* p_head,
+                              upcm_did* p_tail, kal_uint8 protocol_idx)
+{
+    /*dispatcher UT is dropping DID due to invalid bearer ID or bearer not active or no cbk set*/
+    
+    upcm_did           *did;
+    kal_bool end_of_list = KAL_FALSE;
+    upcm_did           *next_did;
+    kal_uint8 count = 0;
+   
+    for (did = p_head; did && !end_of_list; did = next_did) {
+        next_did = UPCM_DID_GET_NEXT(did);
+        count++;
+        end_of_list = (did == p_tail);
+    }
+    upcm_did_dest_q(p_head, p_tail);
+    g_dl_drop_did_count = count;
+}
+
+void dispatcher_ut_forward_upcm_ul_meta(kal_uint16 start_idx, kal_uint16 end_idx, LHIF_QUEUE_TYPE q_type)
+{
+    /*dispatcher has processed the queue , now can check the processing of meta is expected or not*/
+    kal_uint16 read_idx;
+    kal_uint8 igr_count = 0;
+    kal_uint16 wifi_pdn_meta_cnt = 0;
+    kal_uint16 non_wifi_pdn_meta_cnt = 0;
+    lhif_meta_tbl_t *meta_tbl;
+    lhif_meta_tbl_t *meta;
+    kal_uint16 tbl_size;
+    dispatcher_ut_query_meta_table((kal_uint32 **)&meta_tbl, &tbl_size, q_type);
+    read_idx = start_idx;
+
+    do {
+        meta = &meta_tbl[read_idx];
+        //meta is not ignore & pdn state is bind
+
+        if (meta->ignore) {
+            igr_count ++;
+        }
+
+        read_idx ++;
+        if(read_idx == tbl_size) {
+            read_idx = 0;
+        }
+    }while(read_idx != end_idx);
+    g_ul_total_ignr_count = igr_count;
+
+}
+
+void dispatcher_ut_forward_upcm_ul_gpd(ip_type_e ip_type, 
+                                kal_uint32 pdn_id, 
+                                qbm_gpd* p_head, 
+                                qbm_gpd* p_tail, 
+                                kal_uint8 protocol_idx)
+{
+    /*dispatcher is going to forward the gpd queue to upcm as PDN ID is not of WIFI*/
+
+    kal_uint8 count = qbmt_dest_q(p_head, p_tail);
+    if (IS_PDNID_OF_WIFI(pdn_id)) {
+        ASSERT(0);
+    }
+    g_ul_upcm_forward_gpd_count = count;
+
+}
+
+
+void dispatcher_ut_forward_upcm_ul_gpd_by_ebi(kal_uint32 ebi, qbm_gpd* p_head, qbm_gpd* p_tail, kal_uint8 protocol_idx)
+{
+    /*dispatcher is going to forward the gpd queue to upcm as PDN ID is not of WIFI*/
+
+    kal_uint8 count = qbmt_dest_q(p_head, p_tail);
+    
+    g_ul_upcm_forward_gpd_count = count;
+
+}
+
+void dispatcher_ut_forward_wtunnel_ul_gpd(kal_uint8 bearer_id, 
+                                qbm_gpd* p_head, 
+                                qbm_gpd* p_tail, 
+                                kal_uint8 protocol_idx)
+{
+    /*dispatcher is going to forward the gpd queue to wifi tunnel as PDN ID is of WIFI & bearer active & bind*/
+    kal_uint8 pdn_id = 0;
+    dispatcher_bind_state_enum bind_state;
+    kal_bool is_bearer_active;
+    kal_uint32 forwarded_gpd_count = qbmt_dest_q(p_head, p_tail);
+    
+    DISPATCHER_CONVERT_BEARERID_TO_PDNID(bearer_id, pdn_id);
+    bind_state = dispatcher_get_bind_state_by_pdnid(pdn_id, protocol_idx);
+    is_bearer_active = dispatcher_check_is_bearer_active_by_pdnid(pdn_id, protocol_idx);    
+    if(!((bind_state == DISPATCHER_PDN_ST_BIND) && (is_bearer_active== KAL_TRUE))) {
+        ASSERT(0);
+    }
+    g_ul_wtunnel_forwarded_gpd_count += forwarded_gpd_count;
+    
+}
+
+void dispatcher_ut_drop_ul_gpd(kal_uint32 pdn_id,
+                                  qbm_gpd* p_head, qbm_gpd* p_tail,
+                                  kal_uint8 protocol_idx)
+{
+    /*dispatcher is going to drop the gpd queue to bearer not active or unbind*/
+    kal_uint8 count = qbmt_dest_q(p_head, p_tail);
+    dispatcher_bind_state_enum bind_state = dispatcher_get_bind_state_by_pdnid(pdn_id, protocol_idx);
+    kal_bool is_bearer_active = dispatcher_check_is_bearer_active_by_pdnid(pdn_id, protocol_idx);
+    if (!((bind_state !=  DISPATCHER_PDN_ST_BIND) || (is_bearer_active == KAL_FALSE)))
+    {
+        ASSERT(0);
+    }
+    g_ul_drop_gpd_count = count;
+
+}
+
+void dispatcher_ut_pdn_bind_cnf_handling(n3epc_dispatcher_pdn_bind_cnf_struct* local_para) {
+    g_curr_bind_cnf.cid = local_para->context_id;
+    g_curr_bind_cnf.result = local_para->is_successful;
+    g_curr_bind_cnf.error_code = local_para->error_result;
+}
+
+
+void dispatcher_ut_pdn_unbind_cnf_handling(n3epc_dispatcher_pdn_unbind_cnf_struct* local_para) {
+    g_curr_unbind_cnf.cid = local_para->context_id;
+    g_curr_unbind_cnf.result = local_para->is_successful;
+    g_curr_unbind_cnf.error_code = local_para->error_result;
+}
+
+void dispatcher_ut_handling_n3epc_ilm(ilm_struct *p_ilm) {
+     switch (p_ilm->msg_id) {
+        /*WO<-DISPATCHER bind cnf*/
+        case MSG_ID_N3EPC_DISPATCHER_PDN_BIND_CNF:
+            dispatcher_ut_pdn_bind_cnf_handling((n3epc_dispatcher_pdn_bind_cnf_struct*)p_ilm->local_para_ptr);
+            break;
+        /*WO<-DISPATCHER unbind cnf*/
+        case MSG_ID_N3EPC_DISPATCHER_PDN_UNBIND_CNF:
+            dispatcher_ut_pdn_unbind_cnf_handling((n3epc_dispatcher_pdn_unbind_cnf_struct*)p_ilm->local_para_ptr);
+            break;
+
+        default:
+            break;
+     }
+}
+
+#endif /* ATEST_SYS_DISPATCHER */