blob: f73326054126c6b9ae693b63e6b309fca19157cb [file] [log] [blame]
/*****************************************************************************
* Copyright Statement:
* --------------------
* This software is protected by Copyright and the information contained
* herein is confidential. The software may not be copied and the information
* contained herein may not be used or disclosed except with the written
* permission of MediaTek Inc. (C) 2005
*
* BY OPENING THIS FILE, BUYER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
* THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
* RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO BUYER ON
* AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
* NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
* SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
* SUPPLIED WITH THE MEDIATEK SOFTWARE, AND BUYER AGREES TO LOOK ONLY TO SUCH
* THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. MEDIATEK SHALL ALSO
* NOT BE RESPONSIBLE FOR ANY MEDIATEK SOFTWARE RELEASES MADE TO BUYER'S
* SPECIFICATION OR TO CONFORM TO A PARTICULAR STANDARD OR OPEN FORUM.
*
* BUYER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND CUMULATIVE
* LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
* AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
* OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY BUYER TO
* MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
*
* THE TRANSACTION CONTEMPLATED HEREUNDER SHALL BE CONSTRUED IN ACCORDANCE
* WITH THE LAWS OF THE STATE OF CALIFORNIA, USA, EXCLUDING ITS CONFLICT OF
* LAWS PRINCIPLES. ANY DISPUTES, CONTROVERSIES OR CLAIMS ARISING THEREOF AND
* RELATED THERETO SHALL BE SETTLED BY ARBITRATION IN SAN FRANCISCO, CA, UNDER
* THE RULES OF THE INTERNATIONAL CHAMBER OF COMMERCE (ICC).
*
*****************************************************************************/
/*
* Include
*/
#include <string.h>
#include "kal_general_types.h"
#include "kal_internal_api.h"
#include "kal_public_defs.h"
#include "kal_public_api.h"
#include "kal_trace.h"
#include "syscomp_config.h"
#include "task_config.h"
#include "sysconf_statistics.h" /* stack_statistics_struct */
#include "custom_em.h"
/* Factory mode, should not send ADC calibration data to BMT */
#ifdef __MULTI_BOOT__
#include "multiboot_config.h"
#include "intrCtrl.h" /* INT_BootMode */
#endif /* __MULTI_BOOT__ */
#ifdef __CCCIFS_SUPPORT__
#include "ccci.h"
#endif
#if defined(__MTK_TARGET__)
#include "SST_secure.h"
#endif
#include "fs_type.h" /* FS_HANDLE */
#include "fs_func.h" /* FS_Delete */
#include "fs_errcode.h" /* FS_NO_ERROR */
#include "ex_item.h" /* EX_LOG_T */
#include "tst_msgid.h"
#include "sbp_public_utility.h"
#if defined(__NVRAM_ACCESS_TIMEOUT_ASSERT__)
#include "dcl_gpt.h"
#endif
#include "nvram_cache_interface.h"
#ifdef __NV_CHKSUM_ENHANCE__
#include "nvram_chksum_algorithm.h"
#endif
#if defined(__HIF_CCCI_SUPPORT__)
#include "ccci_if.h"
#endif
#include "us_timer.h"
#include "ex_public.h"
#if defined(__MTK_TARGET__)
#include "ostd_public.h"
#endif
/*******************************************************
* External Function
*******************************************************/
extern kal_int32 nvram_recover_data_item(nvram_ltable_entry_struct *ldi);
/*extern nvram_errno_enum nvram_read_data_item_multiple(
nvram_ltable_entry_struct *ldi,
kal_uint32 rec_index,
kal_uint16 rec_amount,
kal_uint8 *buffer,
kal_uint32 buffer_size);
*/
extern nvram_drv_status_enum nvram_drv_fat_write_multRec(nvram_ltable_entry_struct *ldi,
kal_char *nvramname,
nvram_folder_enum nvram_folder,
kal_uint32 file_offset,
kal_uint16 rec_index,
kal_uint16 rec_amount,
kal_uint32 rec_size,
const kal_uint8 *buffer,
kal_bool initialize);
extern kal_int32 nvram_get_defval_chksum_index(nvram_lid_enum LID);
extern nvram_folder_enum nvram_query_folder_index_ex(nvram_category_enum category, kal_bool first_copy);
extern void nvram_util_make_lid_filename(nvram_ltable_entry_struct *ldi, NVRAM_FILE_NAME nvramname, kal_bool first_copy);
kal_uint8 const * nvram_get_lid_default_value_to_write(
nvram_ltable_entry_struct *ldi,
kal_uint16 rec_index,
kal_uint8 *buffer,
kal_uint32 buffer_size);
extern module_type stack_get_active_module_id( void );
/*******************************************************
* Define
*******************************************************/
/*******************************************************
* Typedef
*******************************************************/
/*******************************************************
* Global Variable
*******************************************************/
kal_bool g_nvram_cache_ready = KAL_FALSE;
kal_bool g_nvram_cache_SHM_support = KAL_FALSE;
kal_mutexid g_nvram_cache_mutex = NULL;
static kal_int32 nvram_cache_last_err;
static kal_uint32 nvram_cache_last_line;
extern kal_bool bResetNvramData;
kal_uint8* g_nvcache_base_address = NULL;
kal_uint32 g_nvcache_memory_size = 0;
#if ((!defined(__MTK_TARGET__)) && (!defined(__UE_SIMULATOR__)))
ltable_type assgn_ltable = {0};
nvram_ltable_entry_struct *assgn_logical_data_item_table = NULL;
extern nvram_ltable_entry_struct _nvram_ltable_start;
extern nvram_ltable_entry_struct the_nvram_ltable_end;
#endif
extern nvram_ee_info_type* nvram_ee_info;
extern kal_char nvram_trace_dump_temp_buffer[];
extern kal_char nvram_trace_dump_buffer[];
extern kal_mutexid g_nvram_dump_trace_mutex;
extern kal_wchar nvram_trace_filename[];
extern FS_HANDLE nvram_trace_file_hdl;
extern kal_uint32 nvram_trace_dump_buffer_offset;
/*******************************************************
* Local Function
*******************************************************/
/*******************************************************
* Local Variable
*******************************************************/
/*****************************************************************************
* FUNCTION
* get_lid_cache_index_item
* DESCRIPTION
* get cache LID from cache table
* PARAMETERS
* ldi [IN]
* cache_ldi [OUT]
* RETURNS
* success or fail
*****************************************************************************/
kal_bool get_lid_cache_index_item(nvram_lid_enum LID, nvram_lid_cache_table_struct** cache_ldi)
{
/*----------------------------------------------------------------*/
/* Local Variables */
/*----------------------------------------------------------------*/
kal_bool ret_val = KAL_FALSE;
kal_uint32 low = 0;
kal_uint32 mid;
kal_uint32 high = cache_info_header.cache_lid_num -1;
while (low<=high) {
mid = low + ((high - low)/2);
if(cache_info_table[mid].LID < LID) {
low = mid +1;
}else if(cache_info_table[mid].LID > LID) {
high = mid -1;
}else {
if (cache_ldi) {
*cache_ldi = &cache_info_table[mid];
}
ret_val = KAL_TRUE;
break;
}
}
return ret_val;
}
/*****************************************************************************
* FUNCTION
* get_lid_cache_base_address
* DESCRIPTION
* get LID cache region base address
* PARAMETERS
* ldi [IN]
* RETURNS
* address
*****************************************************************************/
nvram_errno_enum get_lid_cache_base_address(nvram_ltable_entry_struct* ldi, kal_uint32* cache_offset)
{
nvram_lid_cache_table_struct *cache_ldi = NULL;
kal_bool result = KAL_FALSE;
if (!(result = get_lid_cache_index_item(ldi->LID , &cache_ldi))) {
kal_prompt_trace(MOD_NVRAM, "[%s]search lid failed from cache table index: 0x%x\n\r", __FUNCTION__,ldi->LID);
kal_prompt_trace(MOD_NVRAM, "category:0x%x, attr:0x%x\n\r", ldi->category, ldi->attr);
kal_prompt_trace(MOD_NVRAM, "fileprefix:%s, fileverno:%s\n\r", ldi->fileprefix, ldi->fileverno);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"[Error][%s]search failed from cache table\r\n", __FUNCTION__);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"LID 0x%04X category:0x%08X, attr:0x%08X\r\n",ldi->LID,ldi->category, ldi->attr);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"fileprefix:%s, fileverno:%s\r\n", ldi->fileprefix, ldi->fileverno);
NVRAM_EXT_ASSERT(KAL_FALSE, (kal_uint32)result, NVRAM_ERROR_LOC_NVCACHE_ERRNO_INVALID_LID_1, ldi->LID);
return NVRAM_IO_ERRNO_INVALID_LID;
}
*cache_offset = (kal_uint32)(g_nvcache_base_address + ((cache_info_header.cache_table_offset) + cache_ldi->cache_offset));
return NVRAM_IO_ERRNO_OK;
}
/*****************************************************************************
* FUNCTION
* get_lid_record_cache_offset
* DESCRIPTION
* get LID record cache region address
* PARAMETERS
* ldi [IN]
* rec_index [IN]
* section_size [IN]
* RETURNS
* record cache address
*****************************************************************************/
nvram_errno_enum get_lid_record_cache_offset(nvram_ltable_entry_struct* ldi, kal_uint16 rec_index, kal_uint32 section_size, kal_uint32* cache_offset)
{
nvram_lid_cache_table_struct *cache_ldi = NULL;
kal_bool result = KAL_FALSE;
if (!(result = get_lid_cache_index_item(ldi->LID , &cache_ldi))) {
kal_prompt_trace(MOD_NVRAM, "[%s]search lid failed from cache table index: 0x%x\n\r", __FUNCTION__, ldi->LID);
kal_prompt_trace(MOD_NVRAM, "category:0x%x,attr:0x%x\n\r", ldi->category, ldi->attr);
kal_prompt_trace(MOD_NVRAM, "fileprefix:%s,fileverno:%s\n\r", ldi->fileprefix, ldi->fileverno);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"[Error][%s]search lid failed from cache table\r\n", __FUNCTION__);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"LID 0x%04X,category:0x%08X,attr:0x%08X\r\n",ldi->LID, ldi->category, ldi->attr);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"fileprefix:%s,fileverno:%s\r\n", ldi->fileprefix, ldi->fileverno);
NVRAM_EXT_ASSERT(KAL_FALSE, (kal_uint32)result, NVRAM_ERROR_LOC_NVCACHE_ERRNO_INVALID_LID_2, ldi->LID);
return NVRAM_IO_ERRNO_INVALID_LID;
}
if (rec_index == 0) {
*cache_offset = (kal_uint32)(g_nvcache_base_address + ((cache_info_header.cache_table_offset) + cache_ldi->cache_offset));
return NVRAM_IO_ERRNO_OK;
}else{
*cache_offset = (kal_uint32)(g_nvcache_base_address + (((cache_info_header.cache_table_offset + cache_ldi->cache_offset) + NVRAM_LDI_HEADER_SIZE) + (rec_index-1) * section_size));
return NVRAM_IO_ERRNO_OK;
}
}
/*****************************************************************************
* FUNCTION
* nvram_write_data_to_cache
* DESCRIPTION
* write data to cache
* PARAMETERS
* ldi [IN]
* src_buffer [IN]
* size [IN]
* cache_offset [IN]
* RETURNS
* success or fail
*****************************************************************************/
nvram_errno_enum nvram_write_data_to_cache(nvram_ltable_entry_struct* ldi, void* src_buffer, kal_uint32 size, kal_uint32 cache_offset)
{
nvram_lid_cache_table_struct *cache_ldi = NULL;
kal_uint8* dest_buffer = NULL;
kal_bool result = KAL_FALSE;
kal_uint32 cache_temp_addr = 0;
if (!(result = get_lid_cache_index_item(ldi->LID , &cache_ldi))) {
kal_prompt_trace(MOD_NVRAM, "[%s]search lid failed from cache table index: 0x%x\n\r", __FUNCTION__, ldi->LID);
kal_prompt_trace(MOD_NVRAM, "category:0x%x, attr:0x%x\n\r", ldi->category, ldi->attr);
kal_prompt_trace(MOD_NVRAM, "fileprefix:%s, fileverno:%s\n\r", ldi->fileprefix, ldi->fileverno);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"[Error][%s]search lid failed from cache table\r\n", __FUNCTION__);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"LID 0x%04X,category:0x%08X, attr:0x%08X\r\n",ldi->LID,ldi->category, ldi->attr);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"fileprefix:%s, fileverno:%s\r\n", ldi->fileprefix, ldi->fileverno);
NVRAM_EXT_ASSERT(KAL_FALSE, (kal_uint32)result, NVRAM_ERROR_LOC_NVCACHE_ERRNO_INVALID_LID_3, ldi->LID);
return NVRAM_IO_ERRNO_INVALID_LID;
}
nvram_util_take_mutex(g_nvram_cache_mutex);
cache_temp_addr = (kal_uint32)(g_nvcache_base_address + (cache_info_header.cache_table_offset + cache_ldi->cache_offset + cache_ldi->file_length));
if(cache_temp_addr < (cache_offset + size))
{
nvram_util_give_mutex(g_nvram_cache_mutex);
kal_prompt_trace(MOD_NVRAM, "NVCACHE WRITE SHM CROSS BORDE:0x%x < 0x%x\n\r", cache_temp_addr, (cache_offset + size));
NVRAM_EXT_ASSERT(KAL_FALSE, (kal_uint32)cache_temp_addr, NVRAM_ERROR_LOC_NVCACHE_WRITE_SHM_CROSS_BORDER, ldi->LID);
}
dest_buffer = (kal_uint8*)cache_offset;
kal_mem_cpy((void *)dest_buffer, src_buffer, size);
nvram_util_give_mutex(g_nvram_cache_mutex);
return NVRAM_IO_ERRNO_OK;
}
/*****************************************************************************
* FUNCTION
* nvram_read_data_from_cache
* DESCRIPTION
* read data from cache
* PARAMETERS
* ldi [IN]
* nvram_param [IN]
* RETURNS
* success or fail
*****************************************************************************/
nvram_errno_enum nvram_read_data_from_cache(nvram_ltable_entry_struct* ldi, NVRAM_FS_PARAM_CMPT_T *nvram_param)
{
nvram_lid_cache_table_struct *cache_ldi = NULL;
kal_bool result = KAL_FALSE;
kal_uint32 cache_temp_addr = 0;
if (!(result = get_lid_cache_index_item(ldi->LID , &cache_ldi))) {
kal_prompt_trace(MOD_NVRAM, "[%s]search lid failed from cache table index: 0x%x\n\r", __FUNCTION__, ldi->LID);
kal_prompt_trace(MOD_NVRAM, "category:0x%x, attr:0x%x\n\r", ldi->category, ldi->attr);
kal_prompt_trace(MOD_NVRAM, "fileprefix:%s, fileverno:%s\n\r", ldi->fileprefix, ldi->fileverno);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"[Error][%s]search lid failed from cache table index\r\n", __FUNCTION__);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"LID 0x%04X category:0x%08X, attr:0x%08X\r\n", ldi->LID, ldi->category, ldi->attr);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"fileprefix:%s, fileverno:%s\r\n", ldi->fileprefix, ldi->fileverno);
NVRAM_EXT_ASSERT(KAL_FALSE, (kal_uint32)result, NVRAM_ERROR_LOC_NVCACHE_ERRNO_INVALID_LID_4, ldi->LID);
return NVRAM_IO_ERRNO_INVALID_LID;
}
nvram_util_take_mutex(g_nvram_cache_mutex);
cache_temp_addr = (kal_uint32)(g_nvcache_base_address + (cache_info_header.cache_table_offset + cache_ldi->cache_offset));
if((cache_temp_addr + cache_ldi->file_length) < (cache_temp_addr + nvram_param->Offset + nvram_param->Length))
{
nvram_util_give_mutex(g_nvram_cache_mutex);
kal_prompt_trace(MOD_NVRAM, "NVCACHE READ SHM CROSS BORDE:0x%x < 0x%x\n\r", (cache_temp_addr + cache_ldi->file_length), (cache_temp_addr + nvram_param->Offset + nvram_param->Length));
NVRAM_EXT_ASSERT(KAL_FALSE, (kal_uint32)cache_temp_addr, NVRAM_ERROR_LOC_NVCACHE_READ_SHM_CROSS_BORDER, ldi->LID);
}
kal_mem_cpy((void *)nvram_param->DataPtr, (void *)(g_nvcache_base_address + ((cache_info_header.cache_table_offset + cache_ldi->cache_offset) + nvram_param->Offset)), nvram_param->Length);
*(nvram_param->Read) = nvram_param->Length;
*(nvram_param->FileSize) = cache_ldi->file_length;
nvram_param->ret[0] = nvram_param->opid_map;
nvram_param->ret[1] = 0;
nvram_util_give_mutex(g_nvram_cache_mutex);
return NVRAM_IO_ERRNO_OK;
}
/*****************************************************************************
* FUNCTION
* nvram_read_data_from_cache
* DESCRIPTION
* read data from cache
* PARAMETERS
* ldi [IN]
* nvram_param [IN]
* RETURNS
* success or fail
*****************************************************************************/
kal_bool nvram_read_header_from_cache(nvram_ltable_entry_struct* ldi, void* buffer, kal_uint32 buffer_size, kal_uint32 cache_offset)
{
nvram_lid_cache_table_struct *cache_ldi = NULL;
kal_uint8* src_buffer = NULL;
kal_bool result = KAL_FALSE;
if (!(result = get_lid_cache_index_item(ldi->LID , &cache_ldi))) {
kal_prompt_trace(MOD_NVRAM, "[%s]search lid failed from cache table index: 0x%x\n\r", __FUNCTION__, ldi->LID);
kal_prompt_trace(MOD_NVRAM, "category:0x%x, attr:0x%x\n\r", ldi->category, ldi->attr);
kal_prompt_trace(MOD_NVRAM, "fileprefix:%s, fileverno:%s\n\r", ldi->fileprefix, ldi->fileverno);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"[Error][%s]search lid failed from cache table\r\n", __FUNCTION__);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"LID 0x%04X,category:0x%08X, attr:0x%08X\r\n", ldi->LID, ldi->category, ldi->attr);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"fileprefix:%s, fileverno:%s\r\n", ldi->fileprefix, ldi->fileverno);
NVRAM_EXT_ASSERT(KAL_FALSE, (kal_uint32)result, NVRAM_ERROR_LOC_NVCACHE_ERRNO_INVALID_LID_5, ldi->LID);
return NVRAM_IO_ERRNO_INVALID_LID;
}
nvram_util_take_mutex(g_nvram_cache_mutex);
src_buffer = (kal_uint8*)cache_offset;
kal_mem_cpy((void *)buffer, (void *)src_buffer, buffer_size);
nvram_util_give_mutex(g_nvram_cache_mutex);
return KAL_TRUE;
}
DECLARE_MIPS32
static kal_uint16 nvram_cache_data_header_checksum(kal_uint8 *buf, kal_uint32 size)
{
/*----------------------------------------------------------------*/
/* Local Variables */
/*----------------------------------------------------------------*/
kal_uint32 i;
kal_uint16 chksum = *(kal_uint16*)buf;
kal_uint8 *byte_chksum = (kal_uint8*)&chksum;
kal_uint8 value;
/*----------------------------------------------------------------*/
/* Code Body */
/*----------------------------------------------------------------*/
for (i = 0; i < size; i++)
{
value = *(buf + i);
while(value) {
if(value & 0x1) {
#if defined(__MTK_TARGET__)
__asm__ __volatile__
(
"rol %0, %0, 1\r\n"
::"r"(chksum)
);
#else
__asm {ROL [chksum],1};
#endif
}
value >>= 1;
}
#if defined(__MTK_TARGET__)
__asm__ __volatile__
(
"rol %0, %0, 4\r\n"
::"r"(chksum)
);
#else
__asm {ROL [chksum],4};
#endif
*byte_chksum += *(buf + i);
}
return chksum;
}
kal_uint32 nvram_cache_appendix_header_offset(nvram_ltable_entry_struct *ldi)
{
kal_uint32 size = 0;
kal_uint32 nvram_chksum_size = 0;
nvram_lid_chksum_info lid_chksum_info = {0};
nvram_get_lid_chksum_algo_info(ldi, &lid_chksum_info, KAL_FALSE, KAL_FALSE);
nvram_chksum_size = lid_chksum_info.algo_info.chksum_algo_length;
if ((ldi->attr & NVRAM_ATTR_CONFIDENTIAL)
#ifdef __NVRAM_BIND_TO_CHIP_CIPHER__
|| (ldi->attr & NVRAM_ATTR_MSP)
#endif
){ /* 16 byte alignment */
size = NVRAM_MSP_ALIGNMENT_REMAINDER(ldi->size + nvram_chksum_size) + ldi->size + nvram_chksum_size;
}
else {
size = (ldi->size + nvram_chksum_size);
}
size = size * ldi->total_records;
return (NVRAM_LDI_HEADER_SIZE + size);
}
/*****************************************************************************
* FUNCTION
* nvram_drv_fat_prepare_data
* DESCRIPTION
* copy data into buffer
* PARAMETERS
* source: [IN] source data
* offset: [IN] offset in data
* buffer: [OUT] buffer
* buffer_size: [IN] size of buffer
* RETURNS
* void
*****************************************************************************/
void nvram_cache_drv_fat_prepare_data(kal_uint8 *buffer, const kal_uint8 *source, kal_uint32 offset, kal_uint32 buffer_size)
{
/*----------------------------------------------------------------*/
/* Local Variables */
/*----------------------------------------------------------------*/
/*----------------------------------------------------------------*/
/* Code Body */
/*----------------------------------------------------------------*/
if (source == NVRAM_EF_ZERO_DEFAULT)
{
nvram_memset(buffer, 0x00, buffer_size);
}
else if (source == NVRAM_EF_FF_DEFAULT)
{
nvram_memset(buffer, 0xFF, buffer_size);
}
else
{
kal_mem_cpy(buffer, source+offset, buffer_size);
}
}
kal_bool nvram_cache_prepare_ota_header(nvram_ldi_ota_header *ldi_ota_header, nvram_ltable_entry_struct *ldi)
{
#if defined(__NVRAM_DEFVAL_CHANGE_RESET__)
kal_int32 chksum_index;
#endif
memcpy(ldi_ota_header->header, "LDI", 4);
ldi_ota_header->LID = ldi->LID;
ldi_ota_header->ldi_attr = ldi->attr;
ldi_ota_header->ldi_category= ldi->category;
ldi_ota_header->record_size = ldi->size;
ldi_ota_header->total_records = ldi->total_records;
//check appendix header offset
if(ldi->attr & NVRAM_ATTR_CHKSUM_INTEGRATE) {
ldi->append_offset = nvram_cache_appendix_header_offset(ldi);
}
#if defined(__NVRAM_DEFVAL_CHANGE_RESET__)
chksum_index = nvram_get_defval_chksum_index(ldi->LID);
if(-1 != chksum_index)
{
kal_mem_cpy((char *)(ldi_ota_header->defval_chkrst_h), (const char*)(lid_default_value_chksum[chksum_index].chksum), RST_CHKSUM_SIZE-6);
}
#endif
ldi_ota_header->checksum = nvram_cache_data_header_checksum((kal_uint8*)ldi_ota_header, (NVRAM_LDI_OTA_HEADER_SIZE - sizeof(ldi_ota_header->checksum)));
return KAL_TRUE;
}
kal_bool nvram_cache_prepare_debug_header(nvram_ldi_debug_header *ldi_debug_header, nvram_ltable_entry_struct *ldi)
{
#if defined(__NVRAM_STRUCTURE_CHANGE_RESET__) || defined(__NVRAM_DEFVAL_CHANGE_RESET__)
kal_int32 chksum_index;
#endif
ldi_debug_header->last_write_taskID = kal_get_current_task_index();
ldi_debug_header->last_write_time = kal_get_systicks();
ldi_debug_header->write_times += 1;
#if defined(__NVRAM_STRUCTURE_CHANGE_RESET__)
chksum_index = nvram_get_structure_chksum_index(ldi->LID);
if(-1 != chksum_index)
{
kal_mem_cpy((char *)(ldi_debug_header->struct_chkrst), (const char*)(lid_structure_chksum[chksum_index].chksum), RST_CHKSUM_SIZE);
}
#endif
#if defined(__NVRAM_DEFVAL_CHANGE_RESET__)
chksum_index = nvram_get_defval_chksum_index(ldi->LID);
if(-1 != chksum_index)
{
kal_mem_cpy((char *)(ldi_debug_header->defval_chkrst_l),(const char*)(&(lid_default_value_chksum[chksum_index].chksum[RST_CHKSUM_SIZE-6])), 6);
}
#endif
return KAL_TRUE;
}
kal_bool nvram_cache_prepare_data_header(nvram_ltable_entry_struct *ldi,kal_uint8 *ldi_hd_buffer)
{
nvram_ldi_ota_header *ldi_ota_header = (nvram_ldi_ota_header*)ldi_hd_buffer;
nvram_ldi_debug_header *ldi_debug_header = (nvram_ldi_debug_header*)(ldi_hd_buffer + NVRAM_LDI_OTA_HEADER_SIZE);
nvram_cache_prepare_ota_header(ldi_ota_header,ldi);
nvram_cache_prepare_debug_header(ldi_debug_header, ldi);
return KAL_TRUE;
}
kal_uint32 nvram_cache_prepare_appendix_header(nvram_appendix_type_enum type, nvram_ldi_appendix_header *ldi_append_header, nvram_ltable_entry_struct *ldi, kal_uint32 data_offset)
{
kal_uint32 nvram_chksum_size = 0;
nvram_lid_chksum_info lid_chksum_info = {0};
memset(ldi_append_header, 0, NVRAM_LDI_APPENDIX_HEADER_SIZE);
memcpy(ldi_append_header->header, "APDX", 4);
nvram_get_lid_chksum_algo_info(ldi, &lid_chksum_info, KAL_FALSE, KAL_FALSE);
nvram_chksum_size = lid_chksum_info.algo_info.chksum_algo_length;
if(type == NVRAM_APPEND_TYPE_CHKSUM) {
ldi_append_header->type = NVRAM_APPEND_TYPE_CHKSUM;
ldi_append_header->data_offset = data_offset;
//caculate next data start
data_offset = (data_offset + (ldi->total_records * (nvram_chksum_size)));
data_offset = NVRAM_MSP_ALIGNMENT_CEILING(data_offset);
}
else {
ldi_append_header->type = NVRAM_APPEND_TYPE_END;
data_offset = 0;
}
ldi_append_header->checksum = nvram_cache_data_header_checksum((kal_uint8*)ldi_append_header, NVRAM_LDI_APPENDIX_HEADER_SIZE);
return data_offset;
}
/*****************************************************************************
* FUNCTION
* nvram_cache_reset_header
* DESCRIPTION
* reset one LID header to default value
* PARAMETERS
* ldi [IN]
* rec_index [IN]
* rec_amount [IN]
* RETURNS
* success or fail
*****************************************************************************/
kal_bool nvram_cache_reset_header(nvram_ltable_entry_struct *ldi, nvram_header_section_enum section)
{
NVRAM_FILE_NAME nvramname;
kal_wchar filename[NVRAM_MAX_PATH_LEN];
nvram_folder_enum nvram_folder;
FS_HANDLE file_handle = FS_INVALID_FILE_HANDLE;
kal_int32 result = FS_NO_ERROR;
kal_int32 backup_file_num = 1;
kal_bool mulpiple = KAL_FALSE;
kal_bool ret_val = KAL_TRUE;
nvram_ldi_header nv_header;
nvram_ldi_appendix_header nv_appendix_header;
nvram_ldi_ota_header *ldi_ota_header = NULL;
nvram_ldi_debug_header *ldi_debug_header = NULL;
nvram_ldi_appendix_header *ldi_append_header = &nv_appendix_header;
kal_uint32 ldi_hd_buffer_size = 0;
kal_uint32 ldi_hd_offset = 0;
kal_uint32 ldi_checksum_hd_offset = 0;
#if defined(__NVRAM_ACCESS_TIMEOUT_ASSERT__)
SGPT_CTRL_START_T start;
#endif
kal_mem_set(filename, 0x0, NVRAM_MAX_PATH_LEN * sizeof(kal_wchar));
kal_mem_set(&nv_header, 0x0, sizeof(nv_header));
kal_mem_set(&nv_appendix_header, 0x0, sizeof(nv_appendix_header));
ldi_ota_header = &(nv_header.nv_ota_header);
ldi_debug_header = &(nv_header.nv_dbg_header);
ldi_hd_buffer_size += NVRAM_LDI_HEADER_SIZE;
if(NVRAM_IS_ATTR_MULTIPLE(ldi->attr) || NVRAM_IS_CATEGORY_IMPORTANT_L4(ldi->category))
{
mulpiple = KAL_TRUE;
backup_file_num = 2;
}
nvram_util_make_lid_filename(ldi, nvramname, KAL_TRUE);
nvram_folder = nvram_query_folder_index(ldi->category);
nvram_query_file_name(nvram_folder, nvramname, filename);
/* NVRAM GPT timeout assert start timer */
#if defined(__NVRAM_ACCESS_TIMEOUT_ASSERT__)
start.u2Tick= NVRAM_WRITE_GPT_TIMEOUT;
start.pfCallback=nvram_gpt_timeout_callback;
start.vPara=NULL;
#endif
#if defined(__NVRAM_ACCESS_TIMEOUT_ASSERT__)
DclSGPT_Control(nvram_gpt_handle, SGPT_CMD_START, (DCL_CTRL_DATA_T*)&start); //start timer
#endif
do {
NVRAM_FS_START_EX(FS_OP_OPEN, filename);
file_handle = FS_Open(filename, FS_READ_WRITE | FS_OPEN_NO_DIR | FS_CREATE);
NVRAM_FS_END(FS_OP_OPEN,file_handle);
if (file_handle < FS_NO_ERROR) {
kal_prompt_trace(MOD_NVRAM, "NVRAM write header open fail:0x%x\n\r", file_handle);
kal_prompt_trace(MOD_NVRAM, "category:0x%x, attr:0x%x\n\r", ldi->category, ldi->attr);
kal_prompt_trace(MOD_NVRAM, "fileprefix:%s, fileverno:%s\n\r", ldi->fileprefix, ldi->fileverno);
kal_prompt_trace(MOD_NVRAM, "section:%d\n\r", section);
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"NVRAM write header open fail:%d\r\n", file_handle);
NVRAM_DEBUG_DUMP(NVRAM_INFO_DUMP,"LID 0x%04X,category:0x%08X, attr:0x%08X\r\n", ldi->LID, ldi->category, ldi->attr);
NVRAM_DEBUG_DUMP(NVRAM_INFO_DUMP,"fileprefix:%s,fileverno:%s\r\n", ldi->fileprefix, ldi->fileverno);
NVRAM_DEBUG_DUMP(NVRAM_INFO_DUMP,"section:%d\r\n", section);
if(NVRAM_IS_ATTR_FAULT_ASSERT(ldi->attr)) {
NVRAM_EXT_ASSERT(KAL_FALSE, (kal_uint32)file_handle, NVRAM_ERROR_LOC_NVCACHE_OPEN_NV_FOLDER_FAIL_3, ldi->LID);
}
ret_val = KAL_FALSE;
goto CH_RESRET_FINAL;
}
if(ldi_hd_offset && (file_handle > FS_NO_ERROR)) {
NVRAM_FS_START(FS_OP_SEEK);
result = FS_Seek(file_handle, ldi_hd_offset, FS_FILE_BEGIN);
NVRAM_FS_END(FS_OP_SEEK,result);
}
if(section & LDI_HEADER_DBG_SECTION) {
nvram_cache_prepare_debug_header(ldi_debug_header,ldi);
}
if(section & LDI_HEADER_OTA_SECTION) {
nvram_cache_prepare_ota_header(ldi_ota_header,ldi);
}
NVRAM_FS_START(FS_OP_WRITE);
result = FS_Write(file_handle, (void *)(&nv_header), ldi_hd_buffer_size, &ldi_hd_buffer_size);
NVRAM_FS_END(FS_OP_WRITE,result);
if (result < FS_NO_ERROR) {
kal_prompt_trace(MOD_NVRAM, "NVRAM write header write fail:0x%x\n\r", result);
kal_prompt_trace(MOD_NVRAM, "category:0x%x, attr:0x%x\n\r", ldi->category, ldi->attr);
kal_prompt_trace(MOD_NVRAM, "fileprefix:%s, fileverno:%s\n\r", ldi->fileprefix, ldi->fileverno);
kal_prompt_trace(MOD_NVRAM, "section:%d\n\r", section);
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"NVRAM write header write fail:%d\r\n", file_handle);
NVRAM_DEBUG_DUMP(NVRAM_INFO_DUMP,"LID 0x%04X,category:0x%08X, attr:0x%08X\r\n", ldi->LID, ldi->category, ldi->attr);
NVRAM_DEBUG_DUMP(NVRAM_INFO_DUMP,"fileprefix:%s, fileverno:%s\r\n", ldi->fileprefix, ldi->fileverno);
NVRAM_DEBUG_DUMP(NVRAM_INFO_DUMP,"section:%d\r\n", section);
if(NVRAM_IS_ATTR_FAULT_ASSERT(ldi->attr)) {
NVRAM_FS_START(FS_OP_CLOSE);
result = FS_Close(file_handle);
NVRAM_FS_END(FS_OP_CLOSE,result);
NVRAM_EXT_ASSERT(KAL_FALSE, (kal_uint32)file_handle, NVRAM_ERROR_LOC_NVCACHE_OPEN_NV_FOLDER_FAIL_2, ldi->LID, result);
}
ret_val = KAL_FALSE;
goto CH_RESRET_FINAL;
}
//write appendix header
ldi_checksum_hd_offset = nvram_cache_appendix_header_offset(ldi);
if((section == LDI_HEADER_ALL_SECTION) && (ldi->attr & NVRAM_ATTR_CHKSUM_INTEGRATE)) {
nvram_prepare_appendix_header(NVRAM_APPEND_TYPE_CHKSUM, ldi_append_header, ldi, NVRAM_LDI_APPENDIX_HEADER_SIZE);
NVRAM_FS_START(FS_OP_SEEK);
result = FS_Seek(file_handle, ldi_checksum_hd_offset, FS_FILE_BEGIN);
NVRAM_FS_END(FS_OP_SEEK,result);
NVRAM_FS_START(FS_OP_WRITE);
result = FS_Write(file_handle, ldi_append_header, NVRAM_LDI_APPENDIX_HEADER_SIZE, &ldi_hd_buffer_size);
NVRAM_FS_END(FS_OP_WRITE,result);
if (result < FS_NO_ERROR) {
kal_prompt_trace(MOD_NVRAM, "NVRAM appendix header write fail:0x%x\n\r", result);
kal_prompt_trace(MOD_NVRAM, "category:0x%x, attr:0x%x\n\r", ldi->category, ldi->attr);
kal_prompt_trace(MOD_NVRAM, "fileprefix:%s, fileverno:%s\n\r", ldi->fileprefix, ldi->fileverno);
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"NVRAM write header write fail:%d\r\n", file_handle);
NVRAM_DEBUG_DUMP(NVRAM_INFO_DUMP,"LID 0x%04X,category:0x%08X, attr:0x%08X\r\n", ldi->LID, ldi->category, ldi->attr);
NVRAM_DEBUG_DUMP(NVRAM_INFO_DUMP,"fileprefix:%s, fileverno:%s\r\n", ldi->fileprefix, ldi->fileverno);
NVRAM_DEBUG_DUMP(NVRAM_INFO_DUMP,"section:%d\r\n", section);
if(NVRAM_IS_ATTR_FAULT_ASSERT(ldi->attr)) {
NVRAM_FS_START(FS_OP_CLOSE);
result = FS_Close(file_handle);
NVRAM_FS_END(FS_OP_CLOSE,result);
NVRAM_EXT_ASSERT(KAL_FALSE, (kal_uint32)file_handle, NVRAM_LOC_WRITE_FILE_FAIL_7, ldi->LID, result);
}
ret_val = KAL_FALSE;
goto CH_RESRET_FINAL;
}
}
CH_RESRET_FINAL:
if(file_handle > FS_NO_ERROR) {
NVRAM_FS_START(FS_OP_CLOSE);
result = FS_Close(file_handle);
NVRAM_FS_END(FS_OP_CLOSE,result);
}
backup_file_num --;
#if defined(__NVRAM_WRITE_PROTECT_ENABLE__)
if (NVRAM_IS_CATEGORY_IMPORTANT_L4(ldi->category)) {
// Don't write protect2
break;
}
#endif
if(!mulpiple)
{
break;
}
nvram_folder = nvram_query_folder_index_ex(ldi->category,KAL_FALSE);
nvram_util_make_lid_filename(ldi, nvramname, KAL_FALSE);
nvram_query_file_name(nvram_folder, nvramname, filename);
}while(backup_file_num > 0);
#if defined(__NVRAM_ACCESS_TIMEOUT_ASSERT__)
DclSGPT_Control(nvram_gpt_handle, SGPT_CMD_STOP, (DCL_CTRL_DATA_T*)NULL); //stop timer
#endif
return ret_val;
}
/*****************************************************************************
* FUNCTION
* nvram_cache_drv_fat_backup
* DESCRIPTION
* To make a backup.
* PARAMETERS
* prefix [IN] file prefix
* verno [IN] file verno
* a_to_b [IN] direction
* RETURNS
* error code
*****************************************************************************/
kal_int32 nvram_cache_drv_fat_backup(nvram_ltable_entry_struct *ldi, kal_bool a_to_b)
{
/*----------------------------------------------------------------*/
/* Local Variables */
/*----------------------------------------------------------------*/
kal_wchar src_path[NVRAM_MAX_PATH_LEN], dest_path[NVRAM_MAX_PATH_LEN];
NVRAM_FILE_NAME nvramname;
kal_int32 result = FS_NO_ERROR;
/*----------------------------------------------------------------*/
/* Code Body */
/*----------------------------------------------------------------*/
if (a_to_b)
{
nvram_util_make_lid_filename(ldi, nvramname, KAL_TRUE); // A
nvram_query_file_name(nvram_query_folder_index_ex(ldi->category, KAL_TRUE), nvramname, src_path);
#ifdef __NVRAM_BACKUP_DISK_FAT__
if (NVRAM_IS_ATTR_BACKUP_FAT(ldi->attr))
{
nvram_util_make_lid_filename(ldi, nvramname, KAL_TRUE);
nvram_query_file_name(NVRAM_NVD_BAK, nvramname, dest_path);
}
else
#endif
{
nvram_util_make_lid_filename(ldi, nvramname, KAL_FALSE); // B
nvram_query_file_name(nvram_query_folder_index_ex(ldi->category, KAL_FALSE), nvramname, dest_path);
}
}
else
{
#ifdef __NVRAM_BACKUP_DISK_FAT__
if (NVRAM_IS_ATTR_BACKUP_FAT(ldi->attr))
{
nvram_util_make_lid_filename(ldi, nvramname, KAL_TRUE);
nvram_query_file_name(NVRAM_NVD_BAK, nvramname, src_path);
}
else
#endif
{
nvram_util_make_lid_filename(ldi, nvramname, KAL_FALSE); // B
nvram_query_file_name(nvram_query_folder_index_ex(ldi->category, KAL_FALSE), nvramname, src_path);
}
nvram_util_make_lid_filename(ldi, nvramname, KAL_TRUE); // A
nvram_query_file_name(nvram_query_folder_index_ex(ldi->category, KAL_TRUE), nvramname, dest_path);
}
NVRAM_FS_START_EX(FS_OP_DELETE, dest_path);
result = FS_Delete(dest_path);
NVRAM_FS_END(FS_OP_DELETE,result);
NVRAM_FS_START_EX(FS_OP_MOVE, dest_path);
result = FS_Move(src_path, dest_path, FS_MOVE_COPY, NULL, NULL, 0);
NVRAM_FS_END(FS_OP_MOVE,result);
return result;
}
/*****************************************************************************
* FUNCTION
* nvram_reset_data_item
* DESCRIPTION
* This is nvram_cache_reset_data_item() function of NVRAM module.
* PARAMETERS
* ldi [IN] MUST be 1 ~ (total_LID - 1)
* index [IN] MUST be 1 ~ total_records
* data [?]
* is_init [IN]
* buffer_size(?) [IN] MUST be even-bytes aligned: ie, ((ldi->size + 1) / 2) * 2 Note that content of `data' could be changed due to encyrption!!
* buffer(?) [IN] MUST be even-bytes aligned.
* RETURNS
* NVRAM_IO_ERRNO_INVALID_LID
* NVRAM_IO_ERRNO_INVALID_RECORD
* NVRAM_IO_ERRNO_INVALID_SIZE
* NVRAM_IO_ERRNO_CHK if all copies are failed to write
* NVRAM_IO_ERRNO_OK at least one valid copy is written.
*****************************************************************************/
nvram_errno_enum nvram_cache_reset_data_item(kal_char *nvramname,
nvram_folder_enum nvram_folder,
kal_uint32 file_offset,
kal_uint16 rec_index,
kal_uint16 rec_amount,
kal_uint32 rec_size,
const kal_uint8 *buffer,
nvram_ltable_entry_struct *ldi,
kal_bool initialize)
{
kal_uint32 openOption = FS_READ_WRITE | FS_OPEN_NO_DIR;
kal_wchar filename[NVRAM_MAX_PATH_LEN];
FS_HANDLE hFile = 0;
kal_uint32 len = 0;
kal_int32 result = FS_NO_ERROR;
nvram_ldi_ota_header ota_header;
kal_int32 ret = FS_NO_ERROR;
kal_uint32 nvram_chksum_size = 0;
nvram_lid_chksum_info lid_chksum_info = {0};
#if defined(__NVRAM_ACCESS_TIMEOUT_ASSERT__)
SGPT_CTRL_START_T start;
#endif
/* NVRAM GPT timeout assert start timer */
#if defined(__NVRAM_ACCESS_TIMEOUT_ASSERT__)
start.u2Tick= NVRAM_WRITE_GPT_TIMEOUT;
start.pfCallback=nvram_gpt_timeout_callback;
start.vPara=NULL;
#endif
kal_mem_set(filename, 0x0, NVRAM_MAX_PATH_LEN * sizeof(kal_wchar));
nvram_util_take_mutex(g_nvram_fs_mutex);
#if defined(__NVRAM_ACCESS_TIMEOUT_ASSERT__)
DclSGPT_Control(nvram_gpt_handle, SGPT_CMD_START, (DCL_CTRL_DATA_T*)&start); //start timer
#endif
nvram_get_lid_chksum_algo_info(ldi, &lid_chksum_info, KAL_FALSE, KAL_FALSE);
nvram_chksum_size = lid_chksum_info.algo_info.chksum_algo_length;
do
{
/* translate record id to filename */
nvram_query_file_name(nvram_folder, nvramname, filename);
openOption |= FS_CREATE;
if (initialize != KAL_TRUE) {
#if !defined(_NAND_FLASH_BOOTING_) && !defined(__FS_SYSDRV_ON_NAND__) && !defined(__EMMC_BOOTING__)
openOption |= FS_PROTECTION_MODE; /* boot from NAND and single bank NOR don't support this */
#endif
}
#if defined(__CCCIFS_SUPPORT__) && defined(__MTK_TARGET__)
if (ldi->attr & NVRAM_ATTR_COMMITTED) {
openOption |= FS_COMMITTED;
}
#endif
NVRAM_FS_START_EX(FS_OP_OPEN,filename);
hFile = FS_Open((const kal_wchar*)filename, openOption);
NVRAM_FS_END(FS_OP_OPEN,hFile);
if (hFile == FS_FILE_NOT_FOUND) {
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"[Error]%s->FS_Open %s fail at %d,FS_FILE_NOT_FOUND\r\n",__FUNCTION__,nvramname,__LINE__);
result = NVRAM_DRV_EMPTY_RECORD;
nvram_cache_last_line = __LINE__;
break;
}
else if (hFile <= FS_NO_ERROR) {
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"[Error]%s->FS_Open %s fail\r\n",__FUNCTION__,nvramname);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"[Error]Fail at %d,hFile=%d\r\n",__LINE__,hFile);
result = hFile;
nvram_cache_last_line = __LINE__;
break;
}
//Get appdenix header info
if(ldi->attr & NVRAM_ATTR_CHKSUM_INTEGRATE) {
ldi->append_buffer = get_ctrl_buffer(rec_amount * nvram_chksum_size);
}
if (file_offset) {
NVRAM_FS_START(FS_OP_SEEK);
result = FS_Seek(hFile, file_offset, FS_FILE_BEGIN);
NVRAM_FS_END(FS_OP_SEEK,result);
if(FS_NO_ERROR > result){
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"[Error]%s->FS_Seek fail at %d,result=%d\r\n",__FUNCTION__,__LINE__,result);
result = hFile;
nvram_cache_last_line = __LINE__;
break;
}
}
result = nvram_drv_fat_write_section(hFile, buffer, rec_index, rec_amount, rec_size, ldi, file_offset);
}while(0);
//write-back appdenix header info
if(ldi->attr & NVRAM_ATTR_CHKSUM_INTEGRATE) {
if(ldi->append_offset == 0) {
if(KAL_TRUE == nvram_read_ota_header(hFile, &ota_header, NVRAM_LDI_OTA_HEADER_SIZE) &&
(ota_header.ldi_attr & NVRAM_ATTR_CHKSUM_INTEGRATE) )
{
ldi->append_offset = nvram_appendix_header_offset(ldi);
}
else {
ldi->append_offset = -1;
}
}
if(ldi->append_offset > 0) {
NVRAM_FS_START(FS_OP_SEEK);
ret = FS_Seek(hFile, (ldi->append_offset + NVRAM_LDI_APPENDIX_HEADER_SIZE + ((rec_index - 1) * nvram_chksum_size)), FS_FILE_BEGIN);
NVRAM_FS_END(FS_OP_SEEK,ret);
NVRAM_FS_START(FS_OP_WRITE);
ret = FS_Write(hFile, ldi->append_buffer, (rec_amount * nvram_chksum_size), &len);
NVRAM_FS_END(FS_OP_WRITE,ret);
}
if(ldi->append_buffer) {
free_ctrl_buffer(ldi->append_buffer);
ldi->append_buffer = NULL;
}
}
if (hFile > FS_NO_ERROR) {
NVRAM_FS_START(FS_OP_CLOSE);
ret = FS_Close(hFile);
NVRAM_FS_END(FS_OP_CLOSE,ret);
}
#if defined(__NVRAM_ACCESS_TIMEOUT_ASSERT__)
DclSGPT_Control(nvram_gpt_handle, SGPT_CMD_STOP, (DCL_CTRL_DATA_T*)NULL); //stop timer
#endif
nvram_util_give_mutex(g_nvram_fs_mutex);
if (result < FS_NO_ERROR) {
nvram_cache_last_err = result;
MD_TRC_FUNC_NVRAM_DRV_FAT_THROW_EXCEPTION(nvram_cache_last_err, nvram_cache_last_line);
}
return NVRAM_IO_ERRNO_OK;
}
/*****************************************************************************
* FUNCTION
* nvram_cache_write_data_item
* DESCRIPTION
* This is nvram_cache_write_data_item() function of NVRAM module.
* PARAMETERS
* ldi [IN] MUST be 1 ~ (total_LID - 1)
* index [IN] MUST be 1 ~ total_records
* data [?]
* is_init [IN]
* buffer_size(?) [IN] MUST be even-bytes aligned: ie, ((ldi->size + 1) / 2) * 2 Note that content of `data' could be changed due to encyrption!!
* buffer(?) [IN] MUST be even-bytes aligned.
* RETURNS
* NVRAM_IO_ERRNO_INVALID_LID
* NVRAM_IO_ERRNO_INVALID_RECORD
* NVRAM_IO_ERRNO_INVALID_SIZE
* NVRAM_IO_ERRNO_CHK if all copies are failed to write
* NVRAM_IO_ERRNO_OK at least one valid copy is written.
*****************************************************************************/
nvram_errno_enum nvram_cache_write_data_item(nvram_ltable_entry_struct *ldi, kal_uint32 index, kal_uint8 *data, kal_bool is_init)
{
/*----------------------------------------------------------------*/
/* Local Variables */
/*----------------------------------------------------------------*/
nvram_errno_enum status = NVRAM_IO_ERRNO_OK;
nvram_drv_status_enum drv_status[2] = {NVRAM_DRV_OK, NVRAM_DRV_OK};
kal_uint32 record_ID =0;
NVRAM_FILE_NAME nvramname;
kal_int32 recovery_status = NVRAM_DRV_OK;
kal_uint32 file_offset;
nvram_folder_enum folder_index;
kal_bool multiple = KAL_FALSE;
/*----------------------------------------------------------------*/
/* Code Body */
/*----------------------------------------------------------------*/
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"%s ====>\r\n",__FUNCTION__);
if (NVRAM_IS_ATTR_MULTIPLE(ldi->attr) || NVRAM_IS_ATTR_BACKUP_FAT(ldi->attr) || NVRAM_IS_CATEGORY_IMPORTANT_L4(ldi->category))
{
multiple = KAL_TRUE;
}
folder_index = nvram_query_folder_index(ldi->category);
file_offset = NVRAM_LDI_HEADER_SIZE;
nvram_util_make_lid_filename(ldi, nvramname, KAL_TRUE);
for (record_ID = 0; record_ID < 2; record_ID++)
{
#if defined(__MTK_TARGET__) && defined(__NVRAM_IMPORTANT_PARTITIONS__) && defined(__CCCIFS_SUPPORT__)
if (record_ID == 1)
{
#ifdef __NVRAM_WRITE_PROTECT_ENABLE__
extern kal_bool smu_is_write_protect2(nvram_lid_enum file_idx);
if (NVRAM_IS_CATEGORY_IMPORTANT_L4(ldi->category))
{
// Aussme protect2 is always locked, acctually it only unlocked at first boot in factory
if (drv_status[0] == NVRAM_DRV_OK)
{
if (smu_is_write_protect2(ldi->LID)) {
// only trigger backup for specific LIDs when they are written legally (shoudn't be frequently)
// trigger backup, this will write sync pattern in AP side
MD_TRC_IO_WRITE_DATA_ITEM_RESULT(ldi->LID, 0xFFFF, 0x0001,__LINE__);
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"LID 0x%04X write sync pattern to AP\r\n",ldi->LID);
ccci_send_message(CCMSG_ID_SYSMSGSVC_MD_UNPROTECT_PART_REQ, 0xABC);
} else {
// Don't trigger backup.
// think about this scenario:
// some LID will update at known time (every md bootup time)
// this will leave sync pattern in AP if we trigger backup,
// then hacker can deleted all files on protect1 before reboot the phone
// the SML data on protect2 will lost after phone reboot ...
}
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"%s <====\r\n",__FUNCTION__);
return NVRAM_IO_ERRNO_OK;
}
else
{
// don't trigger backup due to protect1 write failed
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"%s <====\r\n",__FUNCTION__);
return NVRAM_IO_ERRNO_CHK;
}
}
#endif
folder_index = nvram_query_folder_index_ex(ldi->category, KAL_FALSE);
}
#endif
// if LID == SYS_LID, file can be empty & index is the index of record needs to write (amount is 1)
// if is_init == TRUE, file can be empty & index is the # of record needs to write (index always 1)
// if is_init == FALSE, file should not be empty & index is the index of record needs to write (amount always 1)
if(is_init && NVRAM_EF_SYS_LID != ldi->LID)
{
drv_status[record_ID] = nvram_cache_reset_data_item(
nvramname,
folder_index,
file_offset,
1,
index,
ldi->size,
data,
ldi,
is_init);
}
else
{
drv_status[record_ID] = nvram_cache_reset_data_item(
nvramname,
folder_index,
file_offset,
index,
1,
ldi->size,
data,
ldi,
is_init);
}
/* Try to reset data if it is not a initial case */
if (drv_status[record_ID] != NVRAM_DRV_OK)
{
MD_TRC_IO_WRITE_DATA_ITEM_MULTIPLE(ldi->LID, drv_status, nvram_drv_fat_get_last_err(), __LINE__);
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"drv_status[%d] =%d\r\n",record_ID,drv_status[record_ID]);
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"%s <====\r\n",__FUNCTION__);
status = NVRAM_IO_ERRNO_CHK;
}
if (!multiple)
{
break;
}
nvram_util_make_lid_filename(ldi, nvramname, KAL_FALSE);
}
if (status != NVRAM_IO_ERRNO_OK && multiple == KAL_TRUE)
{
if (drv_status[0] != NVRAM_DRV_OK && drv_status[1] == NVRAM_DRV_OK)
{
recovery_status = nvram_cache_drv_fat_backup(ldi, KAL_FALSE); /* A <-- B */
}
else if (drv_status[0] == NVRAM_DRV_OK && drv_status[1] != NVRAM_DRV_OK)
{
recovery_status = nvram_cache_drv_fat_backup(ldi, KAL_TRUE); /* A --> B */
}
else
{
// Both A&B write fail
recovery_status = drv_status[0];
}
if (recovery_status == NVRAM_DRV_OK)
{
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"%s <====\r\n",__FUNCTION__);
return NVRAM_IO_ERRNO_OK;
}
}
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"%s <====\r\n",__FUNCTION__);
return status;
}
/*****************************************************************************
* FUNCTION
* nvram_drv_fat_write_multiple
* DESCRIPTION
* write record(s) to FAT.
* PARAMETERS
* hFile [?]
* buffer [?]
* rec_amount [IN]
* rec_size [IN]
* RETURNS
* void
*****************************************************************************/
kal_int32 nvram_cache_drv_fat_write_multiple(
FS_HANDLE hFile,
const kal_uint8 *buffer,
kal_uint16 rec_index,
kal_uint16 rec_amount,
kal_uint32 rec_size,
nvram_ltable_entry_struct *ldi)
{
/*----------------------------------------------------------------*/
/* Local Variables */
/*----------------------------------------------------------------*/
kal_uint32 len = 0, remainLen = 0;
kal_uint8 *chksum = NULL;
kal_uint32 max_rec_amount;
kal_uint32 section_size;
kal_uint32 working_buffer_size;
kal_uint8 *working_buffer = NULL;
kal_int32 result = NVRAM_DRV_OK;
kal_uint32 i;
kal_uint32 rec_in_block;
kal_uint32 nvram_chksum_size = 0;
nvram_lid_chksum_info lid_chksum_info = {0};
/*----------------------------------------------------------------*/
/* Code Body */
/*----------------------------------------------------------------*/
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"%s ====>\r\n",__FUNCTION__);
nvram_get_lid_chksum_algo_info(ldi, &lid_chksum_info, KAL_FALSE, KAL_FALSE);
nvram_chksum_size = lid_chksum_info.algo_info.chksum_algo_length;
#ifdef __NVRAM_BIND_TO_CHIP_CIPHER__
if ((ldi->attr & NVRAM_ATTR_MSP)||(ldi->attr & NVRAM_ATTR_CONFIDENTIAL))
{
/* 16 byte alignment */
remainLen = NVRAM_MSP_ALIGNMENT_REMAINDER(rec_size + nvram_chksum_size);
}
#else
if (ldi->attr & NVRAM_ATTR_CONFIDENTIAL)
{
/* 16 byte alignment */
remainLen = NVRAM_MSP_ALIGNMENT_REMAINDER(rec_size + nvram_chksum_size);
}
#endif
//16bytes alignment, limitation: msp data will be oversize
section_size = rec_size + nvram_chksum_size + remainLen;
working_buffer_size = section_size * rec_amount;
if (rec_index > 1)
{
NVRAM_FS_START(FS_OP_SEEK);
result = FS_Seek(hFile, (rec_index - 1) * section_size, FS_FILE_CURRENT);
NVRAM_FS_END(FS_OP_SEEK,result);
if (FS_NO_ERROR > result)
{
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"[Error]%s->FS_Seek fail at %d,result=%d\r\n",__FUNCTION__,__LINE__,result);
nvram_cache_last_line = __LINE__;
goto final;
}
}
if (working_buffer_size > MAX_NVRAM_RECORD_SIZE)
{
working_buffer_size = MAX_NVRAM_RECORD_SIZE;
}
working_buffer = (kal_uint8*) get_ctrl_buffer(working_buffer_size);
max_rec_amount = working_buffer_size/section_size;
chksum = (kal_uint8*) get_ctrl_buffer(nvram_chksum_size);
if (!(ldi->attr & NVRAM_ATTR_MSP) && !(ldi->attr & NVRAM_ATTR_CONFIDENTIAL) &&
buffer != NVRAM_EF_ZERO_DEFAULT && buffer != NVRAM_EF_FF_DEFAULT)
{
for(i = 0; i < rec_amount; i++)
{
if (ldi->attr & NVRAM_ATTR_MULTI_DEFAULT)
{
//rec_index start from 1
kal_mem_set(chksum, 0, nvram_chksum_size);
nvram_util_caculate_checksum(ldi, buffer + (i+rec_index-1)*rec_size, rec_size,chksum);
kal_mem_cpy(working_buffer + i*(rec_size + nvram_chksum_size), buffer + (i+rec_index-1)*rec_size, rec_size);
kal_mem_cpy(working_buffer + i*(rec_size+ nvram_chksum_size)+rec_size, chksum, nvram_chksum_size);
}
else
{
if (i==0)
{
kal_mem_set(chksum, 0, nvram_chksum_size);
nvram_util_caculate_checksum(ldi, buffer, rec_size,chksum);
}
kal_mem_cpy(working_buffer + i*(rec_size+ nvram_chksum_size), buffer, rec_size);
kal_mem_cpy(working_buffer + i*(rec_size+ nvram_chksum_size)+rec_size, chksum, nvram_chksum_size);
}
//record integrated checksum
if((ldi->attr & NVRAM_ATTR_CHKSUM_INTEGRATE) && ldi->append_buffer) {
kal_mem_cpy((void *)(ldi->append_buffer + (i * nvram_chksum_size)), (void *)chksum, nvram_chksum_size);
}
if (i == (rec_amount - 1))
{
NVRAM_FS_START(FS_OP_WRITE);
result = FS_Write(hFile, working_buffer, section_size * rec_amount, &len);
NVRAM_FS_END(FS_OP_WRITE,result);
if (FS_NO_ERROR > result)
{
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"[Error]%s->FS_Write fail at %d,result=%d\r\n",__FUNCTION__,__LINE__,result);
nvram_cache_last_line = __LINE__;
goto final;
}
}
}
goto final;
}
rec_in_block = 0;
max_rec_amount = working_buffer_size / section_size;
for (i = 0; i < rec_amount; i ++)
{
nvram_cache_drv_fat_prepare_data(working_buffer + rec_in_block * section_size, buffer, (i+rec_index-1) * rec_size, rec_size);
kal_mem_set(chksum, 0, nvram_chksum_size);
nvram_util_caculate_checksum(ldi, working_buffer + rec_in_block * section_size, rec_size, chksum);
kal_mem_cpy(working_buffer + rec_in_block * section_size + rec_size, chksum, nvram_chksum_size);
//record integrated checksum
if((ldi->attr & NVRAM_ATTR_CHKSUM_INTEGRATE) && ldi->append_buffer) {
kal_mem_cpy((void *)(ldi->append_buffer + (i * nvram_chksum_size)), (void *)chksum, nvram_chksum_size);
}
if (remainLen)
{
kal_mem_set(working_buffer + rec_in_block * section_size + rec_size + nvram_chksum_size , 0x00, remainLen);
}
if (ldi->attr & NVRAM_ATTR_CONFIDENTIAL)
{
//custom_nvram_encrypt(nvram_ptr->secret_key, working_buffer + rec_in_block * section_size, rec_size, rec_size);
nvram_AES_encrypt(working_buffer + rec_in_block * section_size, section_size);
}
#ifdef __NVRAM_BIND_TO_CHIP_CIPHER__
if (ldi->attr & NVRAM_ATTR_MSP)
{
/* this solution is only for work arround */
#if (defined(__SMART_PHONE_MODEM__) || defined(__CCCIFS_SUPPORT__))
kal_uint8 *working_buffer2 = (kal_uint8*) get_ctrl_buffer(section_size);
kal_uint8 *working_buffer3 = (kal_uint8*) get_ctrl_buffer(section_size);
if (working_buffer2 == NULL)
{
if (working_buffer)
{
free_ctrl_buffer(working_buffer);
working_buffer = NULL;
}
kal_prompt_trace(MOD_NVRAM, "%s Can not get the memory from control buffer @line %d\n\r",__FUNCTION__,__LINE__);
}
if (working_buffer3 == NULL)
{
if (working_buffer)
{
free_ctrl_buffer(working_buffer);
working_buffer = NULL;
}
free_ctrl_buffer(working_buffer2);
working_buffer2 = NULL;
kal_prompt_trace(MOD_NVRAM, "%s Can not get the memory from control buffer @line %d\n\r",__FUNCTION__,__LINE__);
}
//copy the original data from working_buffer to working_buffer2
memcpy(working_buffer2, working_buffer + rec_in_block * section_size, section_size);
do
{
nvram_trace_to_file(__LINE__, 999, 0, 0, 0, 0);
nvram_trace_to_file(nvram_ptr->secret_key[0], nvram_ptr->secret_key[1], nvram_ptr->secret_key[2], nvram_ptr->secret_key[3], 0, 0);
nvram_trace_to_file(working_buffer[rec_in_block*section_size], working_buffer[rec_in_block*section_size + 1], working_buffer[rec_in_block*section_size + 2], working_buffer[rec_in_block*section_size + 3], 0, 0);
//encrypt working_buffer
SST_Secure_Algo(NVRAM_MSP_ENCRYPT, (kal_uint32)working_buffer + rec_in_block * section_size, section_size, nvram_ptr->secret_key, working_buffer + rec_in_block * section_size);
nvram_trace_to_file(working_buffer[rec_in_block*section_size], working_buffer[rec_in_block*section_size + 1], working_buffer[rec_in_block*section_size + 2], working_buffer[rec_in_block*section_size + 3], 0, 0);
//copy the encrypted data from working_buffer to working_buffer3
memcpy(working_buffer3, working_buffer + rec_in_block * section_size, section_size);
//decrypt the working_buffer3
SST_Secure_Algo(NVRAM_MSP_DECRYPT, (kal_uint32)working_buffer3, section_size, nvram_ptr->secret_key, working_buffer3);
//compare the data between the working_buffer2 & working_buffer3
if (memcmp(working_buffer2, working_buffer3, section_size) == 0)
{
//encrypt PASS
break;
}
else
{
//encrypt FAIL, try again, WTF
memcpy(working_buffer + rec_in_block * section_size, working_buffer2, section_size);
}
}while(1);
free_ctrl_buffer(working_buffer2);
free_ctrl_buffer(working_buffer3);
working_buffer2 = NULL;
working_buffer3 = NULL;
#else
SST_Secure_Algo(NVRAM_MSP_ENCRYPT, (kal_uint32)working_buffer + rec_in_block * section_size, section_size, nvram_ptr->secret_key, working_buffer + rec_in_block * section_size);
#endif
}
#endif
/* if this is not multi default, no need to prepare data anymore */
if (!(ldi->attr & NVRAM_ATTR_MULTI_DEFAULT))
{
break;
}
rec_in_block ++;
if (rec_in_block == max_rec_amount || i == rec_amount - 1)
{
NVRAM_FS_START(FS_OP_WRITE);
result = FS_Write(hFile, working_buffer, section_size * rec_in_block, &len);
NVRAM_FS_END(FS_OP_WRITE,result);
if (FS_NO_ERROR > result)
{
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"[Error]%s->FS_Write fail at %d,result=%d\r\n",__FUNCTION__,__LINE__,result);
nvram_cache_last_line = __LINE__;
goto final;
}
rec_in_block = 0;
}
}
/* special handling for not multi default */
if (!(ldi->attr & NVRAM_ATTR_MULTI_DEFAULT))
{
for (i = 0; i < rec_amount; i++)
{
//record integrated checksum
if((ldi->attr & NVRAM_ATTR_CHKSUM_INTEGRATE) && ldi->append_buffer) {
kal_mem_cpy((void *)(ldi->append_buffer + (i * nvram_chksum_size)), (void *)(working_buffer+rec_size), nvram_chksum_size);
}
NVRAM_FS_START(FS_OP_WRITE);
result = FS_Write(hFile, working_buffer, section_size, &len);
NVRAM_FS_END(FS_OP_WRITE,result);
if (FS_NO_ERROR > result)
{
nvram_cache_last_line = __LINE__;
goto final;
}
}
}
final:
if (working_buffer)
{
free_ctrl_buffer(working_buffer);
working_buffer = NULL;
}
if (chksum)
{
free_ctrl_buffer(chksum);
chksum = NULL;
}
if (FS_NO_ERROR > result)
{
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"[Error]%s fail at %d,result=%d\r\n",__FUNCTION__,__LINE__,result);
nvram_cache_last_err = result;
MD_TRC_FUNC_NVRAM_DRV_FAT_THROW_EXCEPTION(nvram_cache_last_err, nvram_cache_last_line);
return result;
}
return NVRAM_DRV_OK;
}
/*****************************************************************************
* FUNCTION
* nvram_drv_fat_write_multRec
* DESCRIPTION
* write record(s) to FAT,now it only used at initiation stage
* PARAMETERS
* buffer [?]
* nvramname [?]
* section_number [IN]
* size [IN]
* initialize [IN] true for reset, false for normal write.
* RETURNS
* void
*****************************************************************************/
nvram_drv_status_enum nvram_cache_drv_fat_write_multRec(nvram_ltable_entry_struct *ldi,
kal_char *nvramname,
nvram_folder_enum nvram_folder,
kal_uint32 file_offset,
kal_uint16 rec_index,
kal_uint16 rec_amount,
kal_uint32 rec_size,
const kal_uint8 *buffer,
kal_bool initialize)
{
/*----------------------------------------------------------------*/
/* Local Variables */
/*----------------------------------------------------------------*/
kal_wchar filename[NVRAM_MAX_PATH_LEN];
FS_HANDLE hFile = 0;
kal_int32 result = FS_NO_ERROR;
kal_uint32 openOption = FS_READ_WRITE | FS_OPEN_NO_DIR;
kal_uint32 len;
#if defined(__NVRAM_ACCESS_TIMEOUT_ASSERT__)
SGPT_CTRL_START_T start;
#endif
nvram_ldi_ota_header ota_header;
kal_int32 ret = FS_NO_ERROR;
kal_uint32 nvram_chksum_size = 0;
nvram_lid_chksum_info lid_chksum_info = {0};
/*----------------------------------------------------------------*/
/* Code Body */
/*----------------------------------------------------------------*/
/* NVRAM GPT timeout assert start timer */
#if defined(__NVRAM_ACCESS_TIMEOUT_ASSERT__)
start.u2Tick= NVRAM_WRITE_GPT_TIMEOUT;
start.pfCallback=nvram_gpt_timeout_callback;
start.vPara=NULL;
#endif
kal_mem_set(filename, 0x0, NVRAM_MAX_PATH_LEN * sizeof(kal_wchar));
nvram_util_take_mutex(g_nvram_fs_mutex);
#if defined(__NVRAM_ACCESS_TIMEOUT_ASSERT__)
DclSGPT_Control(nvram_gpt_handle, SGPT_CMD_START, (DCL_CTRL_DATA_T*)&start); //start timer
#endif
nvram_get_lid_chksum_algo_info(ldi, &lid_chksum_info, KAL_FALSE, KAL_FALSE);
nvram_chksum_size = lid_chksum_info.algo_info.chksum_algo_length;
do
{
if (rec_index < 1 || rec_amount < 1)
{
nvram_cache_last_line = __LINE__;
result = NVRAM_DRV_INVALID_RECORD_ID;
goto final;
}
/* translate record id to filename */
nvram_query_file_name(nvram_folder, nvramname, filename);
/* set the attribute to empty before write data
sometime the files may be read only if the nvram lock is turn on
ex: software update when nvram lock is turned on
But it is not a good solution here, we should unlock it in io layer */
openOption |= FS_CREATE;
#if (defined(__SMART_PHONE_MODEM__) || defined(__CCCIFS_SUPPORT__)) && defined(__MTK_TARGET__)
if (ldi->attr & NVRAM_ATTR_COMMITTED)
{
openOption |= FS_COMMITTED;
}
#endif
//Get appdenix header info
if(ldi->attr & NVRAM_ATTR_CHKSUM_INTEGRATE)
{
ldi->append_buffer = get_ctrl_buffer(rec_amount * nvram_chksum_size);
}
nvram_cache_reset_header(ldi, LDI_HEADER_ALL_SECTION);
NVRAM_FS_START_EX(FS_OP_OPEN,filename);
hFile = FS_Open((const kal_wchar*)filename, openOption);
NVRAM_FS_END(FS_OP_OPEN,hFile);
if (hFile == FS_FILE_NOT_FOUND)
{
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"[Error]%s->FS_Open %s fail at %d,FS_FILE_NOT_FOUND\r\n",__FUNCTION__,nvramname,__LINE__);
nvram_cache_last_line = __LINE__;
result = NVRAM_DRV_EMPTY_RECORD;
goto final;
}
else if (hFile <= FS_NO_ERROR)
{
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"[Error]%s->FS_Open %s fail\r\n",__FUNCTION__,nvramname);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"[Error]Fail at %d,hFile=%d\r\n",__LINE__,hFile);
nvram_cache_last_line = __LINE__;
result = hFile;
goto final;
}
NVRAM_FS_START(FS_OP_SEEK);
result = FS_Seek(hFile, NVRAM_LDI_HEADER_SIZE, FS_FILE_BEGIN);
NVRAM_FS_END(FS_OP_SEEK,result);
if(FS_NO_ERROR > result){
nvram_cache_last_line = __LINE__;
goto final;
}
result = nvram_cache_drv_fat_write_multiple(hFile, buffer, rec_index, rec_amount, rec_size, ldi);
}while(0);
//write-back appdenix header info
if(ldi->attr & NVRAM_ATTR_CHKSUM_INTEGRATE) {
//Write appendix info
if(ldi->append_offset == 0) {
kal_mem_set(&ota_header, 0, NVRAM_LDI_APPENDIX_HEADER_SIZE);
if(KAL_TRUE == nvram_read_ota_header(hFile, &ota_header, NVRAM_LDI_OTA_HEADER_SIZE) &&
(ota_header.ldi_attr & NVRAM_ATTR_CHKSUM_INTEGRATE) )
{
ldi->append_offset = nvram_cache_appendix_header_offset(ldi);
}
else {
ldi->append_offset = -1;
}
}
if(ldi->append_offset > 0) {
NVRAM_FS_START(FS_OP_SEEK);
ret = FS_Seek(hFile, (ldi->append_offset + NVRAM_LDI_APPENDIX_HEADER_SIZE + ((rec_index - 1) * nvram_chksum_size)), FS_FILE_BEGIN);
NVRAM_FS_END(FS_OP_SEEK,ret);
NVRAM_FS_START(FS_OP_WRITE);
ret = FS_Write(hFile, ldi->append_buffer, (rec_amount * nvram_chksum_size), &len);
NVRAM_FS_END(FS_OP_WRITE,ret);
}
}
final:
if(ldi->append_buffer) {
free_ctrl_buffer(ldi->append_buffer);
ldi->append_buffer = NULL;
}
if (hFile > FS_NO_ERROR)
{
NVRAM_FS_START(FS_OP_CLOSE);
ret = FS_Close(hFile);
NVRAM_FS_END(FS_OP_CLOSE,ret);
}
#if defined(__NVRAM_ACCESS_TIMEOUT_ASSERT__)
DclSGPT_Control(nvram_gpt_handle, SGPT_CMD_STOP, (DCL_CTRL_DATA_T*)NULL); //stop timer
#endif
nvram_util_give_mutex(g_nvram_fs_mutex);
/* Set the attribute back to original attribute */
if (result < FS_NO_ERROR)
{
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"[Error]%s fail at %d,result=%d\r\n",__FUNCTION__,__LINE__,result);
nvram_cache_last_err = result;
MD_TRC_FUNC_NVRAM_DRV_FAT_THROW_EXCEPTION(nvram_cache_last_err, nvram_cache_last_line);
}
return result;
}
#if defined(__MTK_TARGET__) && defined(__NVRAM_WRITE_WITH_FILE_SIZE__) && !defined(__NVRAM_WRITE_PROTECT_ENABLE__)
/*****************************************************************************
* FUNCTION
* nvram__cache_write_fs_data_item_multiple
* DESCRIPTION
* This is nvram_write_data_item_multiple() function of NVRAM module.
* PARAMETERS
* ldi [?]
* index [IN]
* buffer [?]
* buffer_size [IN]
* is_init [IN]
* RETURNS
* NVRAM_IO_ERRNO_CHK if all copies are failed to write
* NVRAM_IO_ERRNO_OK at least one valid copy is written.
*****************************************************************************/
static nvram_errno_enum nvram_cache_write_fs_data_item_multiple
(nvram_ltable_entry_struct *ldi,
kal_uint16 index,
kal_uint16 rec_mount,
const kal_uint8 *buffer,
kal_bool is_init)
{
/*----------------------------------------------------------------*/
/* Local Variables */
/*----------------------------------------------------------------*/
nvram_errno_enum status = NVRAM_IO_ERRNO_OK;
nvram_drv_status_enum drv_status[2] = {NVRAM_DRV_OK, NVRAM_DRV_OK};
kal_uint32 record_ID;
NVRAM_FILE_NAME nvramname;
kal_uint32 file_offset;
nvram_folder_enum folder_index;
kal_bool multiple = KAL_FALSE;
/*----------------------------------------------------------------*/
/* Code Body */
/*----------------------------------------------------------------*/
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"%s ====>\r\n",__FUNCTION__);
if (NVRAM_IS_ATTR_MULTIPLE(ldi->attr) || NVRAM_IS_ATTR_BACKUP_FAT(ldi->attr) || NVRAM_IS_CATEGORY_IMPORTANT_L4(ldi->category))
{
multiple = KAL_TRUE;
}
folder_index = nvram_query_folder_index(ldi->category);
file_offset = NVRAM_LDI_HEADER_SIZE;
nvram_util_make_lid_filename(ldi, nvramname, KAL_TRUE);
for (record_ID = 0; record_ID < 2; record_ID++)
{
#if defined(__MTK_TARGET__) && defined(__NVRAM_IMPORTANT_PARTITIONS__) && (defined(__SMART_PHONE_MODEM__) || defined(__CCCIFS_SUPPORT__))
if (record_ID == 1)
{
#ifdef __NVRAM_WRITE_PROTECT_ENABLE__
extern kal_bool smu_is_write_protect2(nvram_lid_enum file_idx);
if (NVRAM_IS_CATEGORY_IMPORTANT_L4(ldi->category))
{
// Aussme protect2 is always locked, acctually it only unlocked at first boot in factory
if (drv_status[0] == NVRAM_DRV_OK)
{
if (smu_is_write_protect2(ldi->LID)) {
// only trigger backup for specific LIDs when they are written legally (shoudn't be frequently)
// trigger backup, this will write sync pattern in AP side
MD_TRC_IO_WRITE_DATA_ITEM_RESULT(ldi->LID, 0xFFFF, 0x0001,__LINE__);
NVRAM_DEBUG_DUMP(NVRAM_INFO_DUMP,"LID:0x%04X write sync pattern to AP\r\n",ldi->LID);
ccci_send_message(CCMSG_ID_SYSMSGSVC_MD_UNPROTECT_PART_REQ, 0xABC);
} else {
// Don't trigger backup.
// think about this scenario:
// some LID will update at known time (every md bootup time)
// this will leave sync pattern in AP if we trigger backup,
// then hacker can deleted all files on protect1 before reboot the phone
// the SML data on protect2 will lost after phone reboot ...
}
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"%s <====\r\n",__FUNCTION__);
return NVRAM_IO_ERRNO_OK;
}
else
{
// don't trigger backup due to protect1 write failed
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"%s <====\r\n",__FUNCTION__);
return NVRAM_IO_ERRNO_CHK;
}
}
#endif
folder_index = nvram_query_folder_index_ex(ldi->category, KAL_FALSE);
}
#endif
drv_status[record_ID] = nvram_cache_drv_fat_write_multRec(
ldi,
nvramname,
folder_index,
file_offset,
index,
rec_mount,
ldi->size,
buffer,
is_init);
/* Try to reset data if it is not a initial case */
if (drv_status[record_ID] != NVRAM_DRV_OK)
{
MD_TRC_IO_WRITE_DATA_ITEM_MULTIPLE(ldi->LID, drv_status, nvram_drv_fat_get_last_err(), __LINE__);
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP," drv_status[%d] =%d\r\n",record_ID,drv_status[record_ID]);
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"%s <====\r\n",__FUNCTION__);
status = NVRAM_IO_ERRNO_CHK;
}
if (!multiple)
{
break;
}
nvram_util_make_lid_filename(ldi, nvramname, KAL_FALSE);
}
if (status != NVRAM_IO_ERRNO_OK && multiple == KAL_TRUE)
{
kal_int32 recovery_status;
if (drv_status[0] != NVRAM_DRV_OK && drv_status[1] == NVRAM_DRV_OK)
{
recovery_status = nvram_drv_fat_backup(ldi, KAL_FALSE); /* A <-- B */
}
else if (drv_status[0] == NVRAM_DRV_OK && drv_status[1] != NVRAM_DRV_OK)
{
recovery_status = nvram_drv_fat_backup(ldi, KAL_TRUE); /* A --> B */
}
else
{
// Both A&B write fail
recovery_status = drv_status[0];
}
if (recovery_status == NVRAM_DRV_OK)
{
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"%s <====\r\n",__FUNCTION__);
return NVRAM_IO_ERRNO_OK;
}
}
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"%s <====\r\n",__FUNCTION__);
return status;
}
/*****************************************************************************
* FUNCTION
* nvram_write_mutilpe_data_item
* DESCRIPTION
* write data with lid size but not a record.Support write 1..amount records
* one time if (lid->size+checksum) * amount < MAX_NVRAM_RECORD_SIZE
* PARAMETERS
* ldi [IN] the data item to be write
* rec_idnex [IN] the start record index will be write
* rec_amount [IN] the amount of the records will be write
* RETURNS
* void
*****************************************************************************/
static nvram_errno_enum nvram_cache_write_default_data_item_mutilpe(nvram_ltable_entry_struct *ldi,
kal_uint16 rec_index,
kal_uint16 rec_amount)
{
/*----------------------------------------------------------------*/
/* Local Variables */
/*----------------------------------------------------------------*/
nvram_errno_enum status = NVRAM_IO_ERRNO_OK;
/*----------------------------------------------------------------*/
/* Code Body */
/*----------------------------------------------------------------*/
MD_TRC_IO_WRITE_DATA_ITEM_START(ldi->LID, rec_index, rec_amount);
MD_TRC_INFO_NVRAM_DATA_ITEM(ldi->LID, ldi->size);
/******************************************************
* Device Broken
******************************************************/
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"%s ====>\r\n",__FUNCTION__);
if (nvram_ptr->dev_broken)
{
MD_TRC_IO_WRITE_DATA_ITEM_RESULT(ldi->LID, NVRAM_IO_ERRNO_DRV_BROKEN, __LINE__, ldi->LID);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"[Error]%s NVRAM_IO_ERRNO_DRV_BROKEN\r\n",__FUNCTION__);
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"%s <====\r\n",__FUNCTION__);
return status;
}
/* prepare data */
if( ldi->default_value == NVRAM_EF_FF_DEFAULT ||
ldi->default_value == NVRAM_EF_ZERO_DEFAULT)
{
kal_uint8 const *default_value = ldi->default_value;
status = nvram_cache_write_fs_data_item_multiple(ldi,rec_index,rec_amount,default_value,KAL_TRUE);
}
else if(ldi->default_value == NULL)
{
kal_uint8 const *default_value = NVRAM_EF_FF_DEFAULT;
status = nvram_cache_write_fs_data_item_multiple(ldi,rec_index,rec_amount,default_value,KAL_TRUE);
}
else
{
kal_uint8 *default_value_buffer = NULL;
kal_uint32 i;
kal_uint8 *default_value_p;
kal_uint8 *def_buff;
default_value_buffer = (kal_uint8*) get_ctrl_buffer(ldi->size * rec_amount + NVRAM_BUFFER_TAIL_SIZE);
kal_mem_cpy((default_value_buffer + (ldi->size * rec_amount)), NVRAM_BUFFER_TAIL_MARK, NVRAM_BUFFER_TAIL_SIZE);
if((ldi->category & NVRAM_CATEGORY_FUNC_DEFAULT) && (ldi->attr & NVRAM_ATTR_MULTI_DEFAULT))
{
nvram_get_lid_default_value_to_write(ldi, 0, default_value_buffer, (ldi->size * rec_amount));
}
else
{
def_buff = (kal_uint8*) get_ctrl_buffer(ldi->size + NVRAM_BUFFER_TAIL_SIZE);
kal_mem_cpy((def_buff + ldi->size), NVRAM_BUFFER_TAIL_MARK, NVRAM_BUFFER_TAIL_SIZE);
for (i = 0; i < rec_amount; i++)
{
default_value_p = (kal_uint8*) nvram_get_lid_default_value_to_write(ldi, i, NULL, 0);
if (default_value_p == NULL)
{
nvram_get_lid_default_value_to_write(ldi, i, def_buff, ldi->size);
default_value_p = def_buff;
}
kal_mem_cpy(default_value_buffer + i*(ldi->size), default_value_p, ldi->size);
default_value_p = NULL;
}
if(kal_mem_cmp((def_buff + ldi->size), NVRAM_BUFFER_TAIL_MARK, NVRAM_BUFFER_TAIL_SIZE) != 0)
{
// Return default value is out of buffer.
MD_TRC_FUNC_NVRAM_RESET_DATA_ITEMS(ldi->LID, (ldi->size * rec_amount), __LINE__, 0,0);
NVRAM_EXT_ASSERT(KAL_FALSE, (ldi->size * ldi->total_records), NVRAM_LOC_SPACE_NOT_ENOUGH_4, ldi->LID, free_ctrl_buffer(def_buff);free_ctrl_buffer(default_value_buffer));
}
free_ctrl_buffer(def_buff);
}
if(kal_mem_cmp((default_value_buffer + (ldi->size * rec_amount)), NVRAM_BUFFER_TAIL_MARK, NVRAM_BUFFER_TAIL_SIZE) != 0)
{
// Return default value is out of buffer.
MD_TRC_FUNC_NVRAM_RESET_DATA_ITEMS(ldi->LID, (ldi->size * rec_amount), __LINE__, 0,0);
NVRAM_EXT_ASSERT(KAL_FALSE, (ldi->size * ldi->total_records), NVRAM_LOC_SPACE_NOT_ENOUGH_3, ldi->LID, free_ctrl_buffer(default_value_buffer));
}
status = nvram_cache_write_fs_data_item_multiple(ldi,rec_index,rec_amount,default_value_buffer,KAL_TRUE);
free_ctrl_buffer(default_value_buffer);
}
if (status != NVRAM_IO_ERRNO_OK)
{
/* IMEI and SML */
if ((NVRAM_IS_CATEGORY_IMPORTANT(ldi->category)
#if (defined(__SMART_PHONE_MODEM__) || defined(__CCCIFS_SUPPORT__))
#if defined(__MTK_TARGET__) && defined(__NVRAM_IMPORTANT_PARTITIONS__)
|| NVRAM_IS_CATEGORY_IMPORTANT_L4(ldi->category))
#else
)
#endif
#else
)
#endif
)
{
kal_prompt_trace(MOD_NVRAM, "NVRAM ASSERT ERROR NVRAM_ERROR_LOC_NVCACHE_WRITE_IMPORTANT_DATA_FAIL_1:%d\n\r", DISPLAY_ERROR(status));
kal_prompt_trace(MOD_NVRAM, "LID:0x%x, total_records:%d, record_size:%d\n\r", ldi->LID, ldi->total_records, ldi->size);
kal_prompt_trace(MOD_NVRAM, "category:0x%x, attr:0x%x\n\r", ldi->category, ldi->attr);
kal_prompt_trace(MOD_NVRAM, "fileprefix:%s, fileverno:%s\n\r", ldi->fileprefix, ldi->fileverno);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP, "[Error]NVRAM ASSERT ERROR NVRAM_ERROR_LOC_NVCACHE_WRITE_IMPORTANT_DATA_FAIL_1:%d\r\n", DISPLAY_ERROR(status));
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP, "LID:0x%04X, total_records:%d, record_size:%d\r\n", ldi->LID, ldi->total_records, ldi->size);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP, "category:0x%08X, attr:0x%08X\r\n", ldi->category, ldi->attr);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP, "fileprefix:%s, fileverno:%s\r\n", ldi->fileprefix, ldi->fileverno);
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"%s <====\r\n",__FUNCTION__);
NVRAM_EXT_ASSERT (KAL_FALSE, DISPLAY_ERROR(status),NVRAM_ERROR_LOC_NVCACHE_WRITE_IMPORTANT_DATA_FAIL_1 , ldi->LID);
return status;
}
MD_TRC_IO_WRITE_DATA_ITEM_RESULT(ldi->LID, status, __LINE__, KAL_TRUE);
}
if (status != NVRAM_IO_ERRNO_OK)
{
MD_TRC_IO_WRITE_DATA_ITEM_RESULT(ldi->LID, status, __LINE__, KAL_TRUE);
kal_prompt_trace(MOD_NVRAM, "NVRAM ASSERT ERROR NVRAM_ERROR_LOC_NVCACGE_WRITE_AND_RESET_FAIL_1:%d\n\r", DISPLAY_ERROR(status));
kal_prompt_trace(MOD_NVRAM, "LID:0x%x, total_records:%d, record_size:%d\n\r", ldi->LID, ldi->total_records, ldi->size);
kal_prompt_trace(MOD_NVRAM, "category:0x%x, attr:0x%x\n\r", ldi->category, ldi->attr);
kal_prompt_trace(MOD_NVRAM, "fileprefix:%s, fileverno:%s\n\r", ldi->fileprefix, ldi->fileverno);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP, "[Error]NVRAM_ERROR_LOC_NVCACGE_WRITE_AND_RESET_FAIL_1:%d\r\n", DISPLAY_ERROR(status));
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP, "LID:0x%04X, total_records:%d, record_size:%d\r\n", ldi->LID, ldi->total_records, ldi->size);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP, "category:0x%08X, attr:0x%08X\r\n", ldi->category, ldi->attr);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP, "fileprefix:%s, fileverno:%s\r\n", ldi->fileprefix, ldi->fileverno);
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"%s <====\r\n",__FUNCTION__);
NVRAM_EXT_ASSERT(KAL_FALSE, DISPLAY_ERROR(status), NVRAM_ERROR_LOC_NVCACGE_WRITE_AND_RESET_FAIL_1, ldi->LID);
}
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"%s <====\r\n",__FUNCTION__);
return status;
}
#endif
/*****************************************************************************
* FUNCTION
* nvram_cache_reset_one_data_item
* DESCRIPTION
* reset one LID to default value
* PARAMETERS
* ldi [IN]
* rec_index [IN]
* rec_amount [IN]
* RETURNS
* success or fail
*****************************************************************************/
nvram_errno_enum nvram_cache_reset_one_data_item(nvram_ltable_entry_struct* ldi, kal_uint16 rec_index, kal_uint16 rec_amount)
{
kal_uint8 *default_value = NULL;
kal_uint32 i;
kal_uint32 start = rec_index;
kal_uint32 end = rec_amount;
kal_uint8 *default_value_buffer = NULL;
#if defined(__MTK_TARGET__) && defined(__NVRAM_WRITE_WITH_FILE_SIZE__) && !defined(__NVRAM_WRITE_PROTECT_ENABLE__)
kal_uint32 remainLen = 0;
#endif
kal_uint32 working_buffer_size = 0;
nvram_errno_enum result = NVRAM_IO_ERRNO_OK;
kal_uint32 nvram_chksum_size = 0;
nvram_lid_chksum_info lid_chksum_info = {0};
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"%s ====>\r\n",__FUNCTION__);
nvram_get_lid_chksum_algo_info(ldi, &lid_chksum_info, KAL_FALSE, KAL_FALSE);
nvram_chksum_size = lid_chksum_info.algo_info.chksum_algo_length;
#if defined(__MTK_TARGET__) && defined(__NVRAM_WRITE_WITH_FILE_SIZE__) && !defined(__NVRAM_WRITE_PROTECT_ENABLE__)
do
{
if (KAL_FALSE == bResetNvramData
|| KAL_FALSE == kal_query_systemInit() //initiation
|| (ldi->size + nvram_chksum_size) > MAX_NVRAM_RECORD_SIZE //large record
|| ldi->LID == NVRAM_EF_SYS_LID
#ifdef __NVRAM_OTP__
|| NVRAM_IS_CATEGORY_OTP(ldi->category)
#endif
#ifdef __NVRAM_CUSTOM_DISK__
|| NVRAM_IS_CATEGORY_CUSTOM_DISK(ldi->category)
#endif
#ifdef __NVRAM_CRYPT_TEST__
|| ldi->LID == NVRAM_EF_NVRAM_MSP_TEST_LID
#endif
)
{
break;
}
// only support reset 1..total_records
#ifdef __NVRAM_BIND_TO_CHIP_CIPHER__
if (ldi->attr & NVRAM_ATTR_MSP)
{
/* 4 byte alignment */
remainLen = NVRAM_MSP_ALIGNMENT_REMAINDER(ldi->size + nvram_chksum_size);
}
#endif
working_buffer_size = (ldi->size + nvram_chksum_size + remainLen) * ldi->total_records;
if (working_buffer_size <= MAX_NVRAM_RECORD_SIZE)
{
result = nvram_cache_write_default_data_item_mutilpe(ldi,1,ldi->total_records);
goto final;
}
}while(0);
#endif
working_buffer_size = MAX_NVRAM_RECORD_SIZE;
default_value_buffer = (kal_uint8*) get_ctrl_buffer(working_buffer_size);
//nvram_debug_write_dump(ldi, 0x701, rec_index, rec_amount, NULL, ldi->size, NVRAM_IO_ERRNO_CHK);
nvram_util_take_mutex(g_nvram_fs_mutex);
nvram_cache_reset_header(ldi, LDI_HEADER_ALL_SECTION);
nvram_util_give_mutex(g_nvram_fs_mutex);
for (i = start; i <= end; i++)
{
/* when ldi size too big, we cannot operator it on buffer,
use original default value pointer directly */
default_value = (kal_uint8*) nvram_get_default_value_to_write(ldi, i, NULL, 0);
/* the default_value is not assigned in table , ex: L1 LID */
if (default_value == NULL)
{
if((i == 1 && end == ldi->total_records) && (NVRAM_EF_SYS_LID != ldi->LID))
{
nvram_get_default_value_to_write(ldi, i, default_value_buffer, working_buffer_size);
}
else
{
nvram_memset(default_value_buffer, (kal_uint8) NVRAM_EF_ZERO_DEFAULT_VALUE, working_buffer_size);
nvram_get_default_value_to_write(ldi, i, default_value_buffer, ldi->size);
}
default_value = default_value_buffer;
}
/* initial case, we reset it directly to enhance performance*/
if(NVRAM_EF_SYS_LID == ldi->LID)
{
result = nvram_cache_write_data_item(ldi, i, default_value, KAL_TRUE);
if (result != NVRAM_IO_ERRNO_OK)
{
break;
}
}
else
{
result = nvram_cache_write_data_item(ldi, ldi->total_records, default_value, KAL_TRUE);
break;
}
}
#if defined(__MTK_TARGET__) && defined(__NVRAM_WRITE_WITH_FILE_SIZE__) && !defined(__NVRAM_WRITE_PROTECT_ENABLE__)
final:
#endif
if(default_value_buffer)
{
free_ctrl_buffer(default_value_buffer);
default_value_buffer = NULL;
}
if(result == NVRAM_IO_ERRNO_OK) {
unmask_valid_bit_by_ltable_entry(ldi, 0, (ldi->total_records + 1));
}
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"%s <====\r\n",__FUNCTION__);
return result;
}
/*****************************************************************************
* FUNCTION
* update_cache_header
* DESCRIPTION
* update cache data for read
* PARAMETERS
* ldi [IN]
* rec_index [IN]
* rec_amount [IN]
* working_buffer [IN]
* RETURNS
* success or fail
*****************************************************************************/
nvram_errno_enum update_cache_header(nvram_ltable_entry_struct* ldi, void* src_buffer, kal_uint32 ldi_hd_offset, kal_uint32 ldi_hd_buffer_size)
{
nvram_lid_cache_table_struct *cache_ldi = NULL;
kal_bool result = KAL_FALSE;
if (!(result = get_lid_cache_index_item(ldi->LID , &cache_ldi))) {
kal_prompt_trace(MOD_NVRAM, "[%s]search lid failed from cache table index: 0x%x\n\r", __FUNCTION__, ldi->LID);
kal_prompt_trace(MOD_NVRAM, "category:0x%x, attr:0x%x\n\r", ldi->category, ldi->attr);
kal_prompt_trace(MOD_NVRAM, "fileprefix:%s, fileverno:%s\n\r", ldi->fileprefix, ldi->fileverno);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"[Error][%s]search lid failed from cache table\r\n", __FUNCTION__);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"LID 0x%04X category:0x%08X, attr:0x%08X\r\n",ldi->LID,ldi->category, ldi->attr);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"fileprefix:%s, fileverno:%s\r\n", ldi->fileprefix, ldi->fileverno);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"[Error]%s <====\r\n",__FUNCTION__);
NVRAM_EXT_ASSERT(KAL_FALSE, (kal_uint32)result, NVRAM_ERROR_LOC_NVCACHE_ERRNO_INVALID_LID_6, ldi->LID);
return NVRAM_IO_ERRNO_INVALID_LID;
}
nvram_util_take_mutex(g_nvram_cache_mutex);
kal_mem_cpy((void *)(g_nvcache_base_address + ((cache_info_header.cache_table_offset + cache_ldi->cache_offset) + ldi_hd_offset)), src_buffer, ldi_hd_buffer_size);
nvram_util_give_mutex(g_nvram_cache_mutex);
return NVRAM_IO_ERRNO_OK;
}
/*****************************************************************************
* FUNCTION
* update_cache_data
* DESCRIPTION
* update cache data for read
* PARAMETERS
* ldi [IN]
* rec_index [IN]
* rec_amount [IN]
* working_buffer [IN]
* RETURNS
* success or fail
*****************************************************************************/
nvram_errno_enum update_cache_data(nvram_ltable_entry_struct* ldi, kal_uint16 rec_index, kal_uint16 rec_amount, NVRAM_FS_PARAM_CMPT_T* nvram_param, kal_bool is_only_chksum)
{
nvram_lid_cache_table_struct *cache_ldi = NULL;
kal_bool result = KAL_FALSE;
kal_uint8* cache_data_boundary = NULL;
kal_uint32 remainLen = 0;
kal_uint32 section_size;
kal_uint16 temp_rec_index = rec_index;
kal_uint16 i, j;
kal_uint8* temp_dataPtr = (kal_uint8 *)(nvram_param->DataPtr);
kal_bool total_valid_bit = KAL_TRUE;
kal_uint32 nvram_chksum_size = 0;
nvram_lid_chksum_info lid_chksum_info = {0};
if (!(result = get_lid_cache_index_item(ldi->LID , &cache_ldi))) {
kal_prompt_trace(MOD_NVRAM, "[%s]search lid failed from cache table index: 0x%x\n\r", __FUNCTION__, ldi->LID);
kal_prompt_trace(MOD_NVRAM, "category:0x%x, attr:0x%x\n\r", ldi->category, ldi->attr);
kal_prompt_trace(MOD_NVRAM, "fileprefix:%s, fileverno:%s\n\r", ldi->fileprefix, ldi->fileverno);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"[Error][%s]search lid failed from cache table\n\r", __FUNCTION__);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"LID 0x%04X,category:0x%08X,attr:0x%08X\r\n",ldi->LID,ldi->category, ldi->attr);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"fileprefix:%s, fileverno:%s\r\n", ldi->fileprefix, ldi->fileverno);
NVRAM_EXT_ASSERT(KAL_FALSE, (kal_uint32)result, NVRAM_ERROR_LOC_NVCACHE_ERRNO_INVALID_LID_7, ldi->LID);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"[Error]%s <====\r\n",__FUNCTION__);
return NVRAM_IO_ERRNO_INVALID_LID;
}
nvram_get_lid_chksum_algo_info(ldi, &lid_chksum_info, KAL_FALSE, KAL_FALSE);
nvram_chksum_size = lid_chksum_info.algo_info.chksum_algo_length;
#ifdef __NVRAM_BIND_TO_CHIP_CIPHER__
if ((ldi->attr & NVRAM_ATTR_MSP)||(ldi->attr & NVRAM_ATTR_CONFIDENTIAL))
{
/* 16 byte alignment */
remainLen = NVRAM_MSP_ALIGNMENT_REMAINDER(ldi->size + nvram_chksum_size);
}
#else
if (ldi->attr & NVRAM_ATTR_CONFIDENTIAL)
{
/* 16 byte alignment */
remainLen = NVRAM_MSP_ALIGNMENT_REMAINDER(ldi->size + nvram_chksum_size);
}
#endif
section_size = ldi->size+ nvram_chksum_size + remainLen;
nvram_util_take_mutex(g_nvram_cache_mutex);
cache_data_boundary = g_nvcache_base_address + cache_info_header.cache_table_offset + cache_info_header.cache_table_size;
if((g_nvcache_base_address + cache_info_header.cache_table_offset + cache_ldi->cache_offset + nvram_param->Offset + (*(nvram_param->Read))) > cache_data_boundary)
{
NVRAM_EXT_ASSERT(KAL_FALSE, (kal_uint32)(g_nvcache_base_address + (cache_info_header.cache_table_offset + cache_ldi->cache_offset) + nvram_param->Offset + (*(nvram_param->Read))), NVRAM_ERROR_LOC_NVCACHE_ERRNO_BIT_BOUNDARY_9, cache_ldi->LID);
}
nvram_util_give_mutex(g_nvram_cache_mutex);
for(i = 0; i < rec_amount; i++)
{
if(is_only_chksum)
{
if(ldi->attr & NVRAM_ATTR_CHKSUM_INTEGRATE)
{
total_valid_bit = KAL_FALSE;
nvram_util_take_mutex(g_nvram_cache_mutex);
for(j = 0; j < nvram_chksum_size; j++)
{
if(*((kal_uint8*)(g_nvcache_base_address + ((cache_info_header.cache_table_offset + cache_ldi->cache_offset) + (nvram_param->Offset + (i* nvram_chksum_size)+ j)))) != 0)
{
total_valid_bit = KAL_TRUE;
break;
}
}
nvram_util_give_mutex(g_nvram_cache_mutex);
if (check_dirty_bit_by_cache_table(cache_ldi, temp_rec_index + i) && total_valid_bit)
{
nvram_util_take_mutex(g_nvram_cache_mutex);
kal_mem_cpy((void *)(temp_dataPtr + (i* nvram_chksum_size)),
(void *)(g_nvcache_base_address + ((cache_info_header.cache_table_offset + cache_ldi->cache_offset) + (nvram_param->Offset + (i* nvram_chksum_size)))), nvram_chksum_size);
nvram_util_give_mutex(g_nvram_cache_mutex);
}else if(!check_valid_bit_by_cache_table(cache_ldi, temp_rec_index + i) &&(total_valid_bit == KAL_FALSE))
{
nvram_util_take_mutex(g_nvram_cache_mutex);
kal_mem_cpy((void *)(g_nvcache_base_address + ((cache_info_header.cache_table_offset + cache_ldi->cache_offset) + (nvram_param->Offset + (i* nvram_chksum_size)))),
(void *)(temp_dataPtr + (i* nvram_chksum_size)), nvram_chksum_size);
nvram_util_give_mutex(g_nvram_cache_mutex);
}
}else
{
nvram_util_take_mutex(g_nvram_cache_mutex);
kal_mem_cpy((void *)(g_nvcache_base_address + ((cache_info_header.cache_table_offset + cache_ldi->cache_offset) + (nvram_param->Offset + (i* section_size)))),
(void *)(temp_dataPtr + (i* nvram_chksum_size)), nvram_chksum_size);
nvram_util_give_mutex(g_nvram_cache_mutex);
}
}else
{
if (check_dirty_bit_by_cache_table(cache_ldi, temp_rec_index + i))
{
nvram_util_take_mutex(g_nvram_cache_mutex);
kal_mem_cpy((void *)(temp_dataPtr + (i* section_size)), (void *)(g_nvcache_base_address + ((cache_info_header.cache_table_offset + cache_ldi->cache_offset) + (nvram_param->Offset + (i*section_size )))), section_size);
nvram_util_give_mutex(g_nvram_cache_mutex);
}else if(!check_valid_bit_by_cache_table(cache_ldi, temp_rec_index + i))
{
nvram_util_take_mutex(g_nvram_cache_mutex);
kal_mem_cpy((void *)(g_nvcache_base_address + ((cache_info_header.cache_table_offset + cache_ldi->cache_offset) + (nvram_param->Offset + (i* section_size)))), (void *)(temp_dataPtr + (i* section_size)), section_size);
nvram_util_give_mutex(g_nvram_cache_mutex);
mask_valid_bit_by_cache_table(cache_ldi, temp_rec_index + i, 1);
}
}
}
return NVRAM_IO_ERRNO_OK;
}
/*****************************************************************************
* FUNCTION
* get_cache_data
* DESCRIPTION
* update cache data for read
* PARAMETERS
* ldi [IN]
* rec_index [IN]
* rec_amount [IN]
* working_buffer [IN]
* RETURNS
* success or fail
*****************************************************************************/
nvram_errno_enum get_cache_data(kal_uint32 cache_address, void *dest_buffer, kal_uint32 working_buffer_size)
{
kal_uint8* src_buffer = (kal_uint8*)cache_address;
nvram_util_take_mutex(g_nvram_cache_mutex);
kal_mem_set(dest_buffer,0,working_buffer_size);
kal_mem_cpy(dest_buffer, (void *)src_buffer, working_buffer_size);
nvram_util_give_mutex(g_nvram_cache_mutex);
return NVRAM_IO_ERRNO_OK;
}
/*****************************************************************************
* FUNCTION
* nvram_flush_cache_data_to_file
* DESCRIPTION
* nvram flush cache data to file
* PARAMETERS
* ldi [IN]
* rec_index [IN]
* rec_amount [IN]
* working_buffer [IN]
* RETURNS
* success or fail
*****************************************************************************/
nvram_errno_enum nvram_flush_cache_data_to_file(nvram_cache_write_item *cache_queue_ldi)
{
/*----------------------------------------------------------------*/
/* Local Variables */
/*----------------------------------------------------------------*/
kal_uint32 len = 0;
kal_uint32 section_size = 0;
kal_uint32 working_buffer_size = 0;
kal_uint8 *working_buffer = NULL;
kal_int32 result = NVRAM_DRV_OK;
nvram_drv_status_enum drv_status[2] = {NVRAM_DRV_OK, NVRAM_DRV_OK};
kal_bool ret_val = KAL_FALSE;
kal_uint32 i, j;
kal_uint32 rec_size;
kal_uint16 rec_amount;
kal_uint32 openOption = FS_READ_WRITE | FS_OPEN_NO_DIR;
nvram_lid_cache_table_struct *cache_ldi = NULL;
kal_uint32 start_record = 0;
kal_uint32 end_record = 0;
kal_int32 flush_length = 0;
kal_int32 flush_append_length = 0;
kal_uint32 file_offset = 0;
kal_uint32 file_append_offset = 0;
kal_uint32 temp_offset = 0;
kal_uint32 remainLen = 0;
kal_bool multiple = KAL_FALSE;
kal_uint32 multiple_ID = 0;
FS_HANDLE hFile = 0;
kal_wchar filename[NVRAM_MAX_PATH_LEN];
kal_wchar src_path[NVRAM_MAX_PATH_LEN];
kal_wchar dest_path[NVRAM_MAX_PATH_LEN];
NVRAM_FILE_NAME nvramname;
nvram_folder_enum nvram_folder;
kal_uint32 file_sz = 0;
kal_int32 ret = NVRAM_DRV_OK;
kal_uint32 nvram_chksum_size = 0;
nvram_lid_chksum_info lid_chksum_info = {0};
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"%s ====>\r\n",__FUNCTION__);
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"LID 0x%04X cache_queue_ldi->rec_index=%d cache_queue_ldi->rec_amount=%d\r\n",cache_queue_ldi->ldi->LID,cache_queue_ldi->rec_index,cache_queue_ldi->rec_amount);
rec_size = cache_queue_ldi->ldi->size;
rec_amount = cache_queue_ldi->ldi->total_records;
openOption = cache_queue_ldi->openoption;
kal_mem_set(filename, 0x0, NVRAM_MAX_PATH_LEN * sizeof(kal_wchar));
working_buffer = (kal_uint8*) get_ctrl_buffer(MD_CCCI_LIMIT_SIZE);
kal_mem_set(working_buffer, 0x0, MD_CCCI_LIMIT_SIZE);
nvram_get_lid_chksum_algo_info(cache_queue_ldi->ldi, &lid_chksum_info, KAL_FALSE, KAL_FALSE);
nvram_chksum_size = lid_chksum_info.algo_info.chksum_algo_length;
#ifdef __NVRAM_BIND_TO_CHIP_CIPHER__
if ((cache_queue_ldi->ldi->attr & NVRAM_ATTR_MSP)||(cache_queue_ldi->ldi->attr & NVRAM_ATTR_CONFIDENTIAL))
{
/* 16 byte alignment */
remainLen = NVRAM_MSP_ALIGNMENT_REMAINDER(rec_size + nvram_chksum_size);
}
#else
if (cache_queue_ldi->ldi->attr & NVRAM_ATTR_CONFIDENTIAL)
{
/* 16 byte alignment */
remainLen = NVRAM_MSP_ALIGNMENT_REMAINDER(rec_size + nvram_chksum_size);
}
#endif
section_size = rec_size + nvram_chksum_size + remainLen;
if (!(ret_val = get_lid_cache_index_item(cache_queue_ldi->ldi->LID , &cache_ldi))) {
if (working_buffer)
{
free_ctrl_buffer(working_buffer);
working_buffer = NULL;
}
kal_prompt_trace(MOD_NVRAM, "[%s]search lid failed from cache table index: 0x%x\n\r", __FUNCTION__, cache_queue_ldi->ldi->LID);
kal_prompt_trace(MOD_NVRAM, "category:0x%x, attr:0x%x\n\r", cache_queue_ldi->ldi->category, cache_queue_ldi->ldi->attr);
kal_prompt_trace(MOD_NVRAM, "fileprefix:%s, fileverno:%s\n\r", cache_queue_ldi->ldi->fileprefix, cache_queue_ldi->ldi->fileverno);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"[Error]%s search LID 0x%04X failed from cache table index:\r\n",__FUNCTION__,cache_queue_ldi->ldi->LID);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"category:0x%08X, attr:0x%08X \r\n", cache_queue_ldi->ldi->category, cache_queue_ldi->ldi->attr);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"fileprefix:%s, fileverno:%s\n\r", cache_queue_ldi->ldi->fileprefix, cache_queue_ldi->ldi->fileverno);
NVRAM_EXT_ASSERT(KAL_FALSE, (kal_uint32)result, NVRAM_ERROR_LOC_NVCACHE_ERRNO_INVALID_LID_8, cache_queue_ldi->ldi->LID);
result = NVRAM_IO_ERRNO_INVALID_LID;
nvram_cache_last_line = __LINE__;
goto final;
}
if (NVRAM_IS_ATTR_MULTIPLE(cache_queue_ldi->ldi->attr) || NVRAM_IS_ATTR_BACKUP_FAT(cache_queue_ldi->ldi->attr) || NVRAM_IS_CATEGORY_IMPORTANT_L4(cache_queue_ldi->ldi->category))
{
multiple = KAL_TRUE;
}
nvram_util_take_mutex(g_nvram_fs_mutex);
nvram_folder = nvram_query_folder_index(cache_queue_ldi->ldi->category);
for (multiple_ID = 0; multiple_ID < 2; multiple_ID++)
{
#if defined(__MTK_TARGET__) && defined(__NVRAM_IMPORTANT_PARTITIONS__) && defined(__CCCIFS_SUPPORT__)
if (multiple_ID == 1)
{
#ifdef __NVRAM_WRITE_PROTECT_ENABLE__
extern kal_bool smu_is_write_protect2(nvram_lid_enum file_idx);
if (NVRAM_IS_CATEGORY_IMPORTANT_L4(cache_queue_ldi->ldi->category))
{
// Aussme protect2 is always locked, acctually it only unlocked at first boot in factory
if (drv_status[0] == NVRAM_DRV_OK)
{
if (smu_is_write_protect2(cache_queue_ldi->ldi->LID)) {
// only trigger backup for specific LIDs when they are written legally (shoudn't be frequently)
// trigger backup, this will write sync pattern in AP side
MD_TRC_IO_WRITE_DATA_ITEM_RESULT(cache_queue_ldi->ldi->LID, 0xFFFF, 0x0001,__LINE__);
NVRAM_DEBUG_DUMP(NVRAM_INFO_DUMP,"LID:0x%04X write sync pattern to AP\r\n",cache_queue_ldi->ldi->LID);
ccci_send_message(CCMSG_ID_SYSMSGSVC_MD_UNPROTECT_PART_REQ, 0xABC);
} else {
// Don't trigger backup.
// think about this scenario:
// some LID will update at known time (every md bootup time)
// this will leave sync pattern in AP if we trigger backup,
// then hacker can deleted all files on protect1 before reboot the phone
// the SML data on protect2 will lost after phone reboot ...
}
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"%s <====\r\n",__FUNCTION__);
return NVRAM_IO_ERRNO_OK;
}
else
{
// don't trigger backup due to protect1 write failed
return NVRAM_IO_ERRNO_CHK;
}
}
#endif
nvram_folder = nvram_query_folder_index_ex(cache_queue_ldi->ldi->category, KAL_FALSE);
}
#endif
if (multiple_ID == 0)
{
nvram_util_make_lid_filename(cache_queue_ldi->ldi, nvramname, KAL_TRUE);
}else
{
nvram_util_make_lid_filename(cache_queue_ldi->ldi, nvramname, KAL_FALSE);
}
nvram_query_file_name(nvram_folder, nvramname, filename);
NVRAM_FS_START_EX(FS_OP_OPEN,filename);
hFile = FS_Open((const kal_wchar*)filename, openOption);
NVRAM_FS_END(FS_OP_OPEN,hFile);
if (hFile == FS_FILE_NOT_FOUND) {
drv_status[multiple_ID] = hFile;
nvram_cache_last_line = __LINE__;
goto FS_OP_ERROR;
} else if (hFile <= FS_NO_ERROR) {
drv_status[multiple_ID] = hFile;
nvram_cache_last_line = __LINE__;
goto FS_OP_ERROR;
}
NVRAM_FS_START(FS_OP_GETFILESIZE);
result = FS_GetFileSize(hFile, &file_sz);
NVRAM_FS_END(FS_OP_GETFILESIZE,result);
if((cache_ldi->is_reset != 1) && (file_sz < NVRAM_LDI_HEADER_SIZE))
{
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"%s->FS_GetFileSize fail at %d,result=%d file_sz =%d\r\n",__FUNCTION__,__LINE__,result,file_sz);
drv_status[multiple_ID] = FS_FILE_NOT_FOUND;
nvram_cache_last_line = __LINE__;
goto FS_OP_ERROR;
}
for (i = 0; i <= rec_amount; i ++) {
if (check_dirty_bit_by_cache_table(cache_ldi, i)) {
start_record = i;
if(i == 0) {
flush_length += NVRAM_LDI_HEADER_SIZE;
if (cache_queue_ldi->ldi->attr & NVRAM_ATTR_CHKSUM_INTEGRATE) {
flush_append_length += NVRAM_LDI_APPENDIX_HEADER_SIZE;
}
}else {
flush_length += section_size;
if (cache_queue_ldi->ldi->attr & NVRAM_ATTR_CHKSUM_INTEGRATE) {
flush_append_length += nvram_chksum_size;
}
}
if(i < rec_amount)
{
for (j = i+1; j <= rec_amount; j++)
{
if (check_dirty_bit_by_cache_table(cache_ldi, j)) {
flush_length += section_size;
if (cache_queue_ldi->ldi->attr & NVRAM_ATTR_CHKSUM_INTEGRATE) {
flush_append_length += nvram_chksum_size;
}
if(j == rec_amount) {
end_record = j;
i = j;
break;
}
}else {
end_record = j-1;
i = j-1;
break;
}
}
}
else
{
end_record = i;
}
if (start_record== 0) {
file_offset = 0;
if (cache_queue_ldi->ldi->attr & NVRAM_ATTR_CHKSUM_INTEGRATE) {
file_append_offset = NVRAM_LDI_HEADER_SIZE + (section_size * rec_amount) ;
}
}else{
file_offset = NVRAM_LDI_HEADER_SIZE + (start_record-1) * section_size;
if (cache_queue_ldi->ldi->attr & NVRAM_ATTR_CHKSUM_INTEGRATE) {
file_append_offset = NVRAM_LDI_HEADER_SIZE + (section_size * rec_amount) + NVRAM_LDI_APPENDIX_HEADER_SIZE +((start_record-1) * nvram_chksum_size);
}
}
if (file_offset) {
NVRAM_FS_START(FS_OP_SEEK);
drv_status[multiple_ID] = FS_Seek(hFile, file_offset, FS_FILE_BEGIN);
NVRAM_FS_END(FS_OP_SEEK,drv_status[multiple_ID]);
if (FS_NO_ERROR > drv_status[multiple_ID]){
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"[Error]%s->FS_Seek fail at %d,result=%d\r\n",__FUNCTION__,__LINE__,drv_status[multiple_ID]);
nvram_cache_last_line = __LINE__;
goto FS_OP_ERROR;
}
}
temp_offset = (kal_uint32)((g_nvcache_base_address + (cache_info_header.cache_table_offset + cache_ldi->cache_offset)) + file_offset);
len = 0;
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"LID 0x%04X start_record=%d end_record=%d\r\n",cache_queue_ldi->ldi->LID,start_record,end_record);
while(flush_length > 0) {
if(flush_length > MD_CCCI_LIMIT_SIZE) {
working_buffer_size = MD_CCCI_LIMIT_SIZE;
}else{
working_buffer_size = flush_length;
}
get_cache_data(temp_offset, working_buffer, working_buffer_size);
NVRAM_FS_START(FS_OP_WRITE);
drv_status[multiple_ID] = FS_Write(hFile, working_buffer, working_buffer_size, &len);
NVRAM_FS_END(FS_OP_WRITE,drv_status[multiple_ID]);
if (FS_NO_ERROR > drv_status[multiple_ID]) {
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"[Error]%s->FS_Write fail at %d,drv_status[%d]=%d\r\n",__FUNCTION__,__LINE__,multiple_ID,drv_status[multiple_ID]);
nvram_cache_last_line = __LINE__;
goto FS_OP_ERROR;
}
temp_offset = temp_offset + len;
flush_length = flush_length - len;
}
if (cache_queue_ldi->ldi->attr & NVRAM_ATTR_CHKSUM_INTEGRATE)
{
working_buffer_size = flush_append_length;
NVRAM_FS_START(FS_OP_SEEK);
drv_status[multiple_ID] = FS_Seek(hFile, file_append_offset, FS_FILE_BEGIN);
NVRAM_FS_END(FS_OP_SEEK,drv_status[multiple_ID]);
if (file_append_offset && FS_NO_ERROR > drv_status[multiple_ID]) {
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"[Error]%s->FS_Seek fail at %d,result=%d\r\n",__FUNCTION__,__LINE__,result);
nvram_cache_last_line = __LINE__;
goto FS_OP_ERROR;
}
temp_offset = (kal_uint32)((g_nvcache_base_address + (cache_info_header.cache_table_offset + cache_ldi->cache_offset)) + file_append_offset);
len = 0;
while(flush_append_length > 0) {
if(flush_append_length > MD_CCCI_LIMIT_SIZE) {
working_buffer_size = MD_CCCI_LIMIT_SIZE;
}else{
working_buffer_size = flush_append_length;
}
get_cache_data(temp_offset, working_buffer, working_buffer_size);
NVRAM_FS_START(FS_OP_WRITE);
drv_status[multiple_ID] = FS_Write(hFile, working_buffer, working_buffer_size, &len);
NVRAM_FS_END(FS_OP_WRITE,drv_status[multiple_ID]);
if (FS_NO_ERROR > drv_status[multiple_ID]) {
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"[Error]%s->FS_Write fail at %d,result=%d\r\n",__FUNCTION__,__LINE__,result);
nvram_cache_last_line = __LINE__;
goto FS_OP_ERROR;
}
temp_offset = temp_offset + len;
flush_append_length = flush_append_length - len;
}
}
unmask_dirty_bit_by_cache_table(cache_ldi, start_record, (end_record - start_record)+1);
mask_valid_bit_by_cache_table(cache_ldi, start_record, (end_record - start_record)+1);
}
}
FS_OP_ERROR:
if(hFile > FS_NO_ERROR)
{
NVRAM_FS_START(FS_OP_CLOSE);
ret = FS_Close(hFile);
NVRAM_FS_END(FS_OP_CLOSE,ret);
}
if(drv_status[multiple_ID] < FS_NO_ERROR)
{
if(!multiple)
{
result = drv_status[multiple_ID];
goto final;
}else
{
if(multiple_ID == 1)
{
result = drv_status[multiple_ID];
goto final;
}
}
}else
{
if(multiple)
{
kal_mem_set(src_path, 0x0, NVRAM_MAX_PATH_LEN * sizeof(kal_wchar));
kal_mem_set(dest_path, 0x0, NVRAM_MAX_PATH_LEN * sizeof(kal_wchar));
if(multiple_ID == 0)
{
nvram_util_make_lid_filename(cache_queue_ldi->ldi, nvramname, KAL_TRUE); // A
nvram_query_file_name(nvram_query_folder_index_ex(cache_queue_ldi->ldi->category, KAL_TRUE), nvramname, src_path);
nvram_util_make_lid_filename(cache_queue_ldi->ldi, nvramname, KAL_FALSE); // B
nvram_query_file_name(nvram_query_folder_index_ex(cache_queue_ldi->ldi->category, KAL_FALSE), nvramname, dest_path);
NVRAM_FS_START_EX(FS_OP_MOVE, dest_path);
ret = FS_Move(src_path, dest_path, FS_MOVE_COPY, NULL, NULL, 0); //A to B
NVRAM_FS_END(FS_OP_MOVE,ret);
}else
{
nvram_util_make_lid_filename(cache_queue_ldi->ldi, nvramname, KAL_FALSE); // B
nvram_query_file_name(nvram_query_folder_index_ex(cache_queue_ldi->ldi->category, KAL_FALSE), nvramname, src_path);
nvram_util_make_lid_filename(cache_queue_ldi->ldi, nvramname, KAL_TRUE); // A
nvram_query_file_name(nvram_query_folder_index_ex(cache_queue_ldi->ldi->category, KAL_TRUE), nvramname, dest_path);
NVRAM_FS_START_EX(FS_OP_MOVE, dest_path);
ret = FS_Move(src_path, dest_path, FS_MOVE_COPY, NULL, NULL, 0); //B to A
NVRAM_FS_END(FS_OP_MOVE,ret);
}
result = drv_status[multiple_ID];
goto final;
}else
{
result = drv_status[multiple_ID];
goto final;
}
}
}
final:
nvram_util_give_mutex(g_nvram_fs_mutex);
if (working_buffer)
{
free_ctrl_buffer(working_buffer);
}
if (FS_NO_ERROR > result)
{
nvram_cache_last_err = result;
MD_TRC_FUNC_NVRAM_DRV_FAT_THROW_EXCEPTION(nvram_cache_last_err, nvram_cache_last_line);
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"Failed at %d,result=%d\r\n",__LINE__,result);
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"%s <====\r\n",__FUNCTION__);
return NVRAM_ERRNO_FAIL;
}
if(cache_ldi->is_reset == 1)
{
cache_ldi->is_reset = 0;
}
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"%s <====\r\n",__FUNCTION__);
return NVRAM_IO_ERRNO_OK;
}
#ifdef __NVRAM_UT_TEST__
static kal_bool check_cache_lid_all_record_is_undirty_bit(nvram_ltable_entry_struct* ldi)
{
kal_uint8 i;
for(i = 1; i <= ldi->total_records; i++)
{
if(check_dirty_bit_by_ltable_entry(ldi,i)!= KAL_FALSE)
{
return KAL_FALSE;
}
}
return KAL_TRUE;
}
#endif
/*****************************************************************************
* FUNCTION
* nvram_flush_cache_handler
* DESCRIPTION
* nvram flush cache handler
* PARAMETERS
* ldi [IN]
* rec_index [IN]
* rec_amount [IN]
* working_buffer [IN]
* RETURNS
* success or fail
*****************************************************************************/
kal_bool nvram_flush_cache_handler(void)
{
/*----------------------------------------------------------------*/
/* Local Variables */
/*----------------------------------------------------------------*/
nvram_cache_write_item cache_queue_ldi;
nvram_errno_enum status = NVRAM_IO_ERRNO_OK;
nvram_ltable_entry_struct tmp_ldi;
FS_HANDLE hFile = 0;
kal_uint8 *filename = NULL;
mcf_ota_result_e mcf_ota_ret = MCF_OTA_R_SUCCESS;
kal_int32 result = FS_NO_ERROR;
#if defined(__MTK_TARGET__)
kal_uint32 temp_queue_length = 0;
ostd_ap_core_status_enum temp_ostd_status = OSTD_AP_CORE_UNKNOWN;
#endif
kal_mem_set(&cache_queue_ldi, 0x0, sizeof(nvram_cache_write_item));
do {
#if defined(__MTK_TARGET__)
if((temp_queue_length = nvram_cache_queue_usage_rates()) < CACHE_QUEUE_BOUNDARY_SIZE)
{
if((temp_ostd_status = OSTD_return_AP_status()) == OSTD_AP_CORE_SUSPEND)
{
kal_prompt_trace(MOD_NVRAM, "queue_length:%d, ostd_status:%d\n\r", temp_queue_length, temp_ostd_status);
MD_TRC_IO_WRITE_DATA_ITEM_RESULT(temp_queue_length, temp_ostd_status, __LINE__);
break;
}
}
kal_prompt_trace(MOD_NVRAM, "queue_length:%d, ostd_status:%d\n\r", temp_queue_length, temp_ostd_status);
MD_TRC_IO_WRITE_DATA_ITEM_RESULT(temp_queue_length, temp_ostd_status, __LINE__);
#endif
if (!nvram_cache_dequeue(&cache_queue_ldi)) {
break;
}
#ifdef __NVRAM_UT_TEST__
if(check_cache_lid_all_record_is_undirty_bit(cache_queue_ldi.ldi))
{
continue;
}
if (NVRAM_FLUSH_CACHE_CHECK(cache_queue_ldi.ldi->LID))
{
kal_prompt_trace(MOD_NVRAM, "%s @%d NVCACHE flush bypass whilte list LID=%d\n\r",__FUNCTION__,__LINE__ , cache_queue_ldi.ldi->LID);
NVRAM_EXT_ASSERT(KAL_FALSE, __LINE__, NVRAM_LOC_INVALID_LID_5, cache_queue_ldi.ldi->LID);
}
#endif
status = nvram_flush_cache_data_to_file(&cache_queue_ldi);
/*
If write fail and the LID is located in NVRAM folder's subfolder,
first check the root folder is OK or not.
*/
if(status != NVRAM_IO_ERRNO_OK)
{
if(NVRAM_IS_CATEGORY_INTERNAL(cache_queue_ldi.ldi->category) || NVRAM_IS_CATEGORY_CALIBRAT(cache_queue_ldi.ldi->category) ||
NVRAM_IS_CATEGORY_IMPORTANT(cache_queue_ldi.ldi->category) || (NVRAM_NVD_DATA == nvram_query_folder_index(cache_queue_ldi.ldi->category)))
{
filename = (kal_uint8 *) get_ctrl_buffer(NVRAM_MAX_PATH_LEN * sizeof(kal_wchar));
NVRAM_FS_MAKE_ROOT_PATH((kal_wchar*)filename);
nvram_util_take_mutex(g_nvram_fs_mutex);
NVRAM_FS_START_EX(FS_OP_OPEN,filename);
hFile = FS_Open((const kal_wchar*)filename, FS_READ_ONLY | FS_OPEN_DIR);
NVRAM_FS_END(FS_OP_OPEN,hFile);
free_ctrl_buffer(filename);
filename = NULL;
if(hFile >= FS_NO_ERROR) /* Open NVRAM root folder Success */
{
NVRAM_DEBUG_DUMP(NVRAM_INFO_DUMP,"%s->FS_Open Success at %d,hFile=%d\r\n",__FUNCTION__,__LINE__,hFile);
NVRAM_FS_START(FS_OP_CLOSE);
result = FS_Close(hFile);
NVRAM_FS_END(FS_OP_CLOSE,result);
}
else if (hFile == FS_FILE_NOT_FOUND || hFile == FS_PATH_NOT_FOUND)
{
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"[Error]%s NVRAM main folder lost\r\n",__FUNCTION__);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP,"[Error]%s NVRAM_ERROR_LOC_NVCACHE_OPEN_NV_FOLDER_FAIL_1\r\n",__FUNCTION__);
NVRAM_EXT_ASSERT(KAL_FALSE, (kal_uint32)hFile, NVRAM_ERROR_LOC_NVCACHE_OPEN_NV_FOLDER_FAIL_1, 0);
}
else
{
kal_prompt_trace(MOD_NVRAM, "Open NVRAM root folder result:%d\n\r", (kal_uint32)hFile);
NVRAM_DEBUG_DUMP(NVRAM_INFO_DUMP,"Open NVRAM root folder result:%d\r\n",(kal_uint32)hFile);
}
nvram_util_give_mutex(g_nvram_fs_mutex);
}
}
memcpy(&tmp_ldi,cache_queue_ldi.ldi,sizeof(nvram_ltable_entry_struct));
if (status != NVRAM_IO_ERRNO_OK)
{
#if (defined(__CCCIFS_SUPPORT__) && defined(__MTK_TARGET__)) || defined(__NVRAM_BIN_REGION_SIMULATION__)
if (NVRAM_IS_CATEGORY_IN_BIN_REGION(cache_queue_ldi.ldi->category))
{
// try restore from bin region
if (nvram_recover_data_item(cache_queue_ldi.ldi) == NVRAM_IO_ERRNO_OK)
{
if(nvram_ptr->state == NVRAM_STATE_READY)
{
mcf_ota_ret = mcf_do_ota_by_lid(cache_queue_ldi.ldi->LID,1, cache_queue_ldi.ldi->total_records,&tmp_ldi);
kal_prompt_trace(MOD_NVRAM, "%s @%d mcr_ota_ret=%d\n\r",__FUNCTION__,__LINE__ , mcf_ota_ret);
NVRAM_DEBUG_DUMP(NVRAM_INFO_DUMP,"mcf_do_ota_by_lid return %d\r\n",mcf_ota_ret);
}
status = nvram_flush_cache_data_to_file(&cache_queue_ldi);
NVRAM_DEBUG_DUMP(NVRAM_INFO_DUMP,"nvram_flush_cache_data_to_file return %d\r\n",status);
if (status == NVRAM_IO_ERRNO_OK)
{
goto end;
}
}
}
#endif
/* IMEI and SML */
if (NVRAM_IS_ATTR_FAULT_ASSERT(cache_queue_ldi.ldi->attr))
{
kal_prompt_trace(MOD_NVRAM, "NVRAM ASSERT ERROR NVRAM_ERROR_LOC_NVCACHE_WRITE_IMPORTANT_DATA_FAIL_2:%d\n\r", DISPLAY_ERROR(status));
kal_prompt_trace(MOD_NVRAM, "LID:0x%x, total_records:%d, record_size:%d\n\r", cache_queue_ldi.ldi->LID, cache_queue_ldi.ldi->total_records, cache_queue_ldi.ldi->size);
kal_prompt_trace(MOD_NVRAM, "category:0x%x, attr:0x%x\n\r", cache_queue_ldi.ldi->category, cache_queue_ldi.ldi->attr);
kal_prompt_trace(MOD_NVRAM, "fileprefix:%s, fileverno:%s\n\r", cache_queue_ldi.ldi->fileprefix, cache_queue_ldi.ldi->fileverno);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP, "[Error]NVRAM_ERROR_LOC_NVCACHE_WRITE_IMPORTANT_DATA_FAIL_2:%d\r\n", DISPLAY_ERROR(status));
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP, "LID:0x%04X, total_records:%d, record_size:%d\r\n", cache_queue_ldi.ldi->LID, cache_queue_ldi.ldi->total_records, cache_queue_ldi.ldi->size);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP, "category:0x%08X, attr:0x%08X\r\n", cache_queue_ldi.ldi->category, cache_queue_ldi.ldi->attr);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP, "fileprefix:%s, fileverno:%s\r\n", cache_queue_ldi.ldi->fileprefix, cache_queue_ldi.ldi->fileverno);
NVRAM_EXT_ASSERT (KAL_FALSE, DISPLAY_ERROR(status),NVRAM_ERROR_LOC_NVCACHE_WRITE_IMPORTANT_DATA_FAIL_2 , cache_queue_ldi.ldi->LID);
return status;
}
MD_TRC_IO_WRITE_DATA_ITEM_RESULT(cache_queue_ldi.ldi->LID, status, __LINE__);
if (KAL_TRUE
#ifdef __NVRAM_CUSTOM_DISK__
&& !NVRAM_IS_CATEGORY_CUSTOM_DISK(cache_queue_ldi.ldi->category)
#endif
#ifdef __NVRAM_OTP__
&& !NVRAM_IS_CATEGORY_OTP(cache_queue_ldi.ldi->category)
#endif
)
{
/* Try to reset data if it is not a initial case */
if ((status = nvram_cache_reset_one_data_item(cache_queue_ldi.ldi, 1, cache_queue_ldi.ldi->total_records)) == NVRAM_IO_ERRNO_OK)
{
MD_TRC_IO_WRITE_DATA_ITEM_RESULT(cache_queue_ldi.ldi->LID, status, __LINE__);
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"%s->nvram_cache_reset_one_data_item successfully\r\n",__FUNCTION__);
if(nvram_ptr->state == NVRAM_STATE_READY)
{
mcf_ota_ret = mcf_do_ota_by_lid(cache_queue_ldi.ldi->LID,1, cache_queue_ldi.ldi->total_records,&tmp_ldi);
kal_prompt_trace(MOD_NVRAM, "%s @%d mcr_ota_ret=%d\n\r",__FUNCTION__,__LINE__ , mcf_ota_ret);
NVRAM_DEBUG_DUMP(NVRAM_WARNING_DUMP,"%s->mcf_do_ota_by_lid return %d\r\n",__FUNCTION__,mcf_ota_ret);
}
status = nvram_flush_cache_data_to_file(&cache_queue_ldi);
NVRAM_DEBUG_DUMP(NVRAM_INFO_DUMP,"nvram_flush_cache_data_to_file after reset status:%d\r\n",status);
}
//nvram_trace(TRACE_FUNC, IO_WRITE_DATA_ITEM_RESULT, ldi->LID, status, __LINE__, is_init);
}
}
if (status != NVRAM_IO_ERRNO_OK)
{
MD_TRC_IO_WRITE_DATA_ITEM_RESULT(cache_queue_ldi.ldi->LID, status, __LINE__);
kal_prompt_trace(MOD_NVRAM, "NVRAM ASSERT ERROR NVRAM_ERROR_LOC_NVCACGE_WRITE_AND_RESET_FAIL_2:%d\n\r", DISPLAY_ERROR(status));
kal_prompt_trace(MOD_NVRAM, "LID:0x%x, total_records:%d, record_size:%d\n\r", cache_queue_ldi.ldi->LID, cache_queue_ldi.ldi->total_records, cache_queue_ldi.ldi->size);
kal_prompt_trace(MOD_NVRAM, "category:0x%x, attr:0x%x\n\r", cache_queue_ldi.ldi->category, cache_queue_ldi.ldi->attr);
kal_prompt_trace(MOD_NVRAM, "fileprefix:%s, fileverno:%s\n\r", cache_queue_ldi.ldi->fileprefix, cache_queue_ldi.ldi->fileverno);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP, "[Error]NVRAM_ERROR_LOC_NVCACGE_WRITE_AND_RESET_FAIL_2:%d\r\n", DISPLAY_ERROR(status));
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP, "LID:0x%04X, total_records:%d, record_size:%d\r\n", cache_queue_ldi.ldi->LID, cache_queue_ldi.ldi->total_records, cache_queue_ldi.ldi->size);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP, "category:0x%08X, attr:0x%08X\r\n", cache_queue_ldi.ldi->category, cache_queue_ldi.ldi->attr);
NVRAM_DEBUG_DUMP(NVRAM_CRITICAL_DUMP, "fileprefix:%s, fileverno:%s\r\n", cache_queue_ldi.ldi->fileprefix, cache_queue_ldi.ldi->fileverno);
NVRAM_EXT_ASSERT(KAL_FALSE, DISPLAY_ERROR(status), NVRAM_ERROR_LOC_NVCACGE_WRITE_AND_RESET_FAIL_2, cache_queue_ldi.ldi->LID);
}
#if (defined(__CCCIFS_SUPPORT__) && defined(__MTK_TARGET__)) || defined(__NVRAM_BIN_REGION_SIMULATION__)
end:
#endif
#if defined(__NVRAM_CREATE_FILE_ON_WRITE__)
if (status == NVRAM_IO_ERRNO_OK)
{
nvram_util_post_write_lid(cache_queue_ldi.ldi);
}
#endif
} while(1);
return KAL_TRUE;
}
/*****************************************************************************
* FUNCTION
* mark_cache_task_ready
* DESCRIPTION
* mark NVRAM cache task ready
* PARAMETERS
* flag [IN]
* RETURNS
* success or fail
*****************************************************************************/
kal_bool mark_nvram_cache_ready(void)
{
g_nvram_cache_ready = KAL_TRUE;
return KAL_TRUE;
}
/*****************************************************************************
* FUNCTION
* unmark_nvram_cache_task_ready
* DESCRIPTION
* unmark NVRAM cache task ready
* PARAMETERS
* flag [IN]
* RETURNS
* success or fail
*****************************************************************************/
kal_bool unmark_nvram_cache_ready(void)
{
g_nvram_cache_ready = KAL_FALSE;
return KAL_TRUE;
}
/*****************************************************************************
* FUNCTION
* check_nvram_cache_task_ready
* DESCRIPTION
* check NVRAM cache task whether ready
* PARAMETERS
* void
* RETURNS
* TRUE or FALSE
*****************************************************************************/
kal_bool check_nvram_cache_ready(void)
{
kal_bool ready = KAL_FALSE;
ready = ((g_nvram_cache_ready? KAL_TRUE : KAL_FALSE) && (KAL_FALSE == kal_query_systemInit())
&& (kal_query_boot_mode() != FACTORY_BOOT) && (stack_get_active_module_id() != MOD_FT));
return ready;
}
/*****************************************************************************
* FUNCTION
* check_nvram_cache_initialized
* DESCRIPTION
* check NVRAM cache task whether initialized
* PARAMETERS
* void
* RETURNS
* TRUE or FALSE
*****************************************************************************/
kal_bool check_nvram_cache_initialized(void)
{
kal_bool initialzied = KAL_FALSE;
initialzied = ((g_nvram_cache_ready? KAL_TRUE : KAL_FALSE) && (kal_query_boot_mode() != FACTORY_BOOT) && (stack_get_active_module_id() != MOD_FT));
return initialzied;
}
#if ((!defined(__MTK_TARGET__)) && (!defined(__UE_SIMULATOR__)))
static void nvcache_assgn_ltable(void)
{
kal_uint32 i, offset = 0;
nvram_ltable_entry_struct *ltable_entry = NULL;
nvram_ltable_entry_struct *the_start = &_nvram_ltable_start;
kal_uint32 start_addr = (kal_uint32)the_start, end_addr = (kal_uint32)&the_nvram_ltable_end;
#if defined(_MSC_VER) && !defined(L1_SIM)
//skip session gap
for (offset = 0; offset < 0x1000; offset += 32) // __declspec(align(32))
{
ltable_entry = (nvram_ltable_entry_struct*)((kal_uint32)&the_start[0] + offset);
if(ltable_entry->fileprefix[0]) {
break;
}
}
#endif
for (i = 0;(kal_uint32)ltable_entry <= end_addr; i++)
{
ltable_entry = (nvram_ltable_entry_struct*)((kal_uint32)&the_start[i] + offset);
if (assgn_logical_data_item_table == NULL && ltable_entry->fileprefix[0])
{
assgn_logical_data_item_table = ltable_entry;
assgn_ltable.table = &assgn_logical_data_item_table;
}
if (ltable_entry->fileprefix[0])
{
//printf("%03d: ltable[%d]:%x filename = %s\n", nvram_ptr->ltable.total_LID, ltable_entry->LID, ltable_entry, ltable_entry->fileprefix);
assgn_ltable.total_LID++;
}
else
{
ltable_entry = (nvram_ltable_entry_struct*)((kal_uint32)&the_start[i] + offset + 32);
if (ltable_entry->fileprefix[0])
{
//printf("%03d: ltable[%d]:%x filename = %s\n", nvram_ptr->ltable.total_LID, ltable_entry->LID, ltable_entry, ltable_entry->fileprefix);
assgn_ltable.total_LID++;
}
}
}
assgn_ltable.area_size = (kal_uint32)ltable_entry - (kal_uint32)assgn_logical_data_item_table;
}
kal_bool nvcache_util_get_data_item(nvram_ltable_entry_struct **ldi, nvram_lid_enum LID)
{
/*----------------------------------------------------------------*/
/* Local Variables */
/*----------------------------------------------------------------*/
kal_uint32 i = 0, count = 0;
kal_uint32 end = (kal_uint32)assgn_logical_data_item_table + assgn_ltable.area_size;
do
{
if (assgn_logical_data_item_table[i].fileprefix[0])
{
count++;
if (assgn_logical_data_item_table[i].LID == LID)
{
if (ldi) {
*ldi = &assgn_logical_data_item_table[i];
}
return KAL_TRUE;
}
}
i++;
}while(count < assgn_ltable.total_LID && ((kal_uint32)&assgn_logical_data_item_table[i] < end));
if (ldi)
*ldi = NULL;
return KAL_FALSE;
}
#endif
/*****************************************************************************
* FUNCTION
* nvram_cache_task_main
* DESCRIPTION
* NVRAM cache task
* PARAMETERS
* task_entry_ptr [?]
* RETURNS
* void
*****************************************************************************/
void nvram_cache_main(task_entry_struct *task_entry_ptr)
{
kal_uint32 count = 0; // for debg, CVD var error, can remove
//kal_uint32 retrieved_events = 0;
kal_status status;
if(g_nvram_cache_SHM_support == KAL_TRUE)
{
//kal_set_active_module_id();
while(1) {
count ++;
//wait for some events, then do something.
if((status = nvram_cache_retrieve_event()) == KAL_SUCCESS) {
nvram_flush_cache_handler();
}else {
//NVRAM_EXT_ASSERT(KAL_FALSE, DISPLAY_ERROR(status),1,1,1);
break;
}
/**do your own things*/
}
}
}
/*****************************************************************************
* FUNCTION
* nvram_cache_task_init
* DESCRIPTION
* This is init() function of NVRAM cache task of NVRAM module.
* This function initialize all the context variables required for NVRAM cache task of NVRAM module
* PARAMETERS
* task_indx [IN]
* RETURNS
* True if succeed.
*****************************************************************************/
kal_bool nvram_cache_init(void)
{
#if defined(__HIF_CCCI_SUPPORT__)
CCCI_RUNTIME_FEATURE_SUPPORT_T feature_support = {0};
CCCI_RUNTIME_SHARE_MEMORY_FORMAT_T shm = {0};
#endif
#if (!defined(__MTK_TARGET__)) && (!defined(__UE_SIMULATOR__))
kal_uint32 temp_offset = 0;
kal_int32 i;
nvram_ltable_entry_struct *ldi = NULL;
kal_uint32 remainLen = 0;
kal_uint32 nvram_chksum_size = 0;
#ifdef __NVRAM_UT_TEST__
extern nvram_checksum_config NVRAM_CHK_CONFIG;
#else
extern const nvram_checksum_config NVRAM_CHK_CONFIG;
#endif
nvcache_assgn_ltable();
for(i = 0; i < cache_info_header.cache_lid_num; i++)
{
remainLen = 0;
if(nvcache_util_get_data_item(&ldi, cache_info_table[i].LID))
{
#ifdef __NV_CHKSUM_ENHANCE__
if(NVRAM_IS_ATTR_CHKSUM_ENHANC_ALGRTHM(ldi->attr))
{
if(NVRAM_MD5 == NVRAM_CHK_CONFIG.enhance_algo_type)
{
nvram_chksum_size = MD5_CHKSUM_LENGTH_8;
}else
{
nvram_chksum_size = NVRAM_CHK_CONFIG.enhance_algo_size;
}
}else
{
if(NVRAM_MD5 == NVRAM_CHK_CONFIG.default_algo_type)
{
nvram_chksum_size = MD5_CHKSUM_LENGTH_8;
}else
{
nvram_chksum_size = NVRAM_CHK_CONFIG.default_algo_size;
}
}
#else
nvram_chksum_size = MD5_CHKSUM_LENGTH_8;
#endif
#ifdef __NVRAM_BIND_TO_CHIP_CIPHER__
if ((ldi->attr & NVRAM_ATTR_MSP)||(ldi->attr & NVRAM_ATTR_CONFIDENTIAL))
{
/* 16 byte alignment */
remainLen = NVRAM_MSP_ALIGNMENT_REMAINDER(ldi->size + nvram_chksum_size);
}
#else
if (ldi->attr & NVRAM_ATTR_CONFIDENTIAL)
{
/* 16 byte alignment */
remainLen = NVRAM_MSP_ALIGNMENT_REMAINDER(ldi->size + nvram_chksum_size);
}
#endif
if(ldi->attr & NVRAM_ATTR_CHKSUM_INTEGRATE)
{
cache_info_table[i].file_length = nvram_cache_appendix_header_offset(ldi) + NVRAM_LDI_APPENDIX_HEADER_SIZE + (ldi->total_records * nvram_chksum_size);
}else
{
cache_info_table[i].file_length = NVRAM_LDI_HEADER_SIZE + ((ldi->size + nvram_chksum_size + remainLen) * ldi->total_records);
}
cache_info_table[i].cache_offset = temp_offset;
temp_offset = ((cache_info_table[i].file_length + temp_offset + (4 - 1)) & (~(4-1)));
}
else
{
cache_info_table[i].cache_offset = temp_offset;
temp_offset = ((cache_info_table[i].file_length + temp_offset + (4 - 1)) & (~(4-1)));
//return KAL_FALSE;
}
}
cache_info_header.cache_table_size = temp_offset;
#endif
#if defined(__MTK_TARGET__)
#if defined(__HIF_CCCI_SUPPORT__)
feature_support = ccci_runtime_data_query(AP_CCCI_RUNTIME_NVRAM_CACHE_SHARE_MEMORY, &shm, sizeof(CCCI_RUNTIME_SHARE_MEMORY_FORMAT_T));
if(feature_support.support_mask == CCCI_RUNTIME_FEATURE_MUST_SUPPORT)
{
g_nvcache_base_address = (kal_uint8 *)(shm.addr);
g_nvcache_memory_size = shm.size;
if ((cache_info_header.cache_table_offset + cache_info_header.cache_table_size) >= shm.size)
{
g_nvram_cache_SHM_support = KAL_FALSE;
MD_TRC_FUNC_NVRAM_DRV_FAT_THROW_EXCEPTION(shm.addr, shm.size);
NVRAM_EXT_ASSERT (KAL_FALSE, shm.addr, shm.size, (cache_info_header.cache_table_offset + cache_info_header.cache_table_size));
}
kal_mem_set((void *)g_nvcache_base_address,0,shm.size);
MD_TRC_FUNC_NVRAM_DRV_FAT_THROW_EXCEPTION(shm.addr, shm.size);
}else
{
g_nvram_cache_SHM_support = KAL_FALSE;
unmark_nvram_cache_ready();
NVRAM_EXT_ASSERT (KAL_FALSE, feature_support.support_mask, NVRAM_ERROR_LOC_NVCACHE_ERRNO_SHM_GET_FAILED , __LINE__);
return KAL_TRUE;
}
#else
g_nvram_cache_SHM_support = KAL_FALSE;
unmark_nvram_cache_ready();
return KAL_TRUE;
#endif
kal_mem_cpy(g_nvcache_base_address, &cache_info_header,sizeof(nvram_lid_cache_header));
kal_mem_cpy((g_nvcache_base_address + cache_info_header.table_index_offset), cache_info_table, cache_info_header.table_index_size);
#elif (!defined(__UE_SIMULATOR__))
g_nvcache_base_address =(kal_uint8 *) malloc(CACHE_SHARE_MEMORY_SIZE);
if(g_nvcache_base_address == NULL)
{
g_nvram_cache_SHM_support = KAL_FALSE;
unmark_nvram_cache_ready();
NVRAM_EXT_ASSERT (KAL_FALSE, KAL_FALSE, NVRAM_ERROR_LOC_NVCACHE_ERRNO_SHM_MALLOC_FAILED , __LINE__);
return KAL_TRUE;
}
kal_mem_set((void *)g_nvcache_base_address,0,CACHE_SHARE_MEMORY_SIZE);
kal_mem_cpy(g_nvcache_base_address, &cache_info_header,sizeof(nvram_lid_cache_header));
kal_mem_cpy((g_nvcache_base_address + cache_info_header.table_index_offset), cache_info_table, cache_info_header.table_index_size);
#else
g_nvram_cache_SHM_support = KAL_FALSE;
unmark_nvram_cache_ready();
return KAL_TRUE;
#endif
g_nvram_cache_SHM_support = KAL_TRUE;
g_nvram_cache_mutex = kal_create_mutex("NV_CACHE");
nvram_cache_event_init();
nvram_cache_queue_init();
mark_nvram_cache_ready();
return KAL_TRUE;
}
/*****************************************************************************
* FUNCTION
* nvram_cache_task_reset
* DESCRIPTION
* This is reset() function of NVRAM cache task of NVRAM module.
* PARAMETERS
* task_indx [IN]
* RETURNS
* void
*****************************************************************************/
kal_bool nvram_cache_reset(void)
{
/*----------------------------------------------------------------*/
/* Local Variables */
/*----------------------------------------------------------------*/
/*----------------------------------------------------------------*/
/* Code Body */
/*----------------------------------------------------------------*/
g_nvram_cache_ready = KAL_FALSE;
#if defined (__NVRAM_UT_TEST__)
nvram_cache_queue_init();
kal_mem_set(g_nvcache_base_address + cache_info_header.dirty_mapping_offset,0,cache_info_header.dirty_mapping_size);
kal_mem_set(g_nvcache_base_address + cache_info_header.valid_mapping_offset,0,cache_info_header.dirty_mapping_size);
#endif
return KAL_TRUE;
} /* end of nvram_reset function */
/*****************************************************************************
* FUNCTION
* nvram_cache_task_create
* DESCRIPTION
* NVRAM cache task create function
* PARAMETERS
* handle [IN]
* RETURNS
* success or fail
*****************************************************************************/
kal_bool nvram_cache_create(comptask_handler_struct **handle)
{
/*----------------------------------------------------------------*/
/* Local Variables */
/*----------------------------------------------------------------*/
static const comptask_handler_struct nvram_cache_task_handler_info =
{
nvram_cache_main, /* task entry function */
nvram_cache_init, /* task initialization function */
nvram_cache_reset /* task reset handler */
};
/*----------------------------------------------------------------*/
/* Code Body */
/*----------------------------------------------------------------*/
*handle = (comptask_handler_struct*) & nvram_cache_task_handler_info;
return KAL_TRUE;
}