[Feature]Upload Modem source code
Change-Id: Id4294f30faced84d3e6fd6d5e61e1111bf287a37
diff --git a/mcu/tools/mertos/mer_codegen.py b/mcu/tools/mertos/mer_codegen.py
new file mode 100644
index 0000000..d09ecab
--- /dev/null
+++ b/mcu/tools/mertos/mer_codegen.py
@@ -0,0 +1,368 @@
+#!/usr/bin/python
+# ****************************************************************************
+# Copyright Statement:
+# --------------------
+# This software is protected by Copyright and the information contained
+# herein is confidential. The software may not be copied and the information
+# contained herein may not be used or disclosed except with the written
+# permission of MediaTek Inc. (C) 2018
+#
+# BY OPENING THIS FILE, BUYER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
+# THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
+# RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO BUYER ON
+# AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
+# NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
+# SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
+# SUPPLIED WITH THE MEDIATEK SOFTWARE, AND BUYER AGREES TO LOOK ONLY TO SUCH
+# THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. MEDIATEK SHALL ALSO
+# NOT BE RESPONSIBLE FOR ANY MEDIATEK SOFTWARE RELEASES MADE TO BUYER'S
+# SPECIFICATION OR TO CONFORM TO A PARTICULAR STANDARD OR OPEN FORUM.
+#
+# BUYER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND CUMULATIVE
+# LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
+# AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
+# OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY BUYER TO
+# MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
+#
+# THE TRANSACTION CONTEMPLATED HEREUNDER SHALL BE CONSTRUED IN ACCORDANCE
+# WITH THE LAWS OF THE STATE OF CALIFORNIA, USA, EXCLUDING ITS CONFLICT OF
+# LAWS PRINCIPLES. ANY DISPUTES, CONTROVERSIES OR CLAIMS ARISING THEREOF AND
+# RELATED THERETO SHALL BE SETTLED BY ARBITRATION IN SAN FRANCISCO, CA, UNDER
+# THE RULES OF THE INTERNATIONAL CHAMBER OF COMMERCE (ICC).
+#
+# ****************************************************************************
+#
+# ****************************************************************************
+#
+# Filename:
+# ---------
+# mer_codegen.py
+#
+# Project:
+# --------
+# MERTOS
+#
+# Description:
+# ------------
+# Generate the codes required by MERTOS.
+#
+#
+# Author:
+# -------
+# Ke-Ting Chen (mtk03141)
+#
+# ****************************************************************************
+
+import mer_utility
+
+import os
+import sys
+
+
+def extract_task_info(data_extractor):
+ """
+ Transform syscomp data to internal representation
+ """
+ # typedef struct {
+ # kal_char *comp_name_ptr;
+ # kal_char *comp_qname_ptr;
+ # kal_uint32 comp_priority;
+ # kal_uint32 comp_stack_size;
+ # kal_create_func_ptr comp_create_func;
+ # kal_bool comp_internal_ram_stack;
+ # kal_uint8 comp_ext_qsize;
+ # kal_uint8 comp_int_qsize;
+ # kal_uint8 comp_boot_mode;
+ # kal_affinity_group comp_affinity_attribute;
+ # kal_bool comp_affinity_dynamic;
+ # kal_task_group_id comp_affinity_group_id;
+ # } comptask_info_struct;
+
+ # Get structure size
+ affinity_group_size = data_extractor.unpack_symbol_data('I', 'cfg_affinity_group_size')[0]
+ if affinity_group_size == 1:
+ struct_format_string = 'IIIIIBBBBBBxxI'
+ elif affinity_group_size == 2:
+ struct_format_string = 'IIIIIBBBBHBxI'
+ else:
+ assert 0
+ field_names = ['name', 'mail_queue_name', 'priority', 'stack_size', 'create_func',
+ 'is_stack_in_internal_ram', 'mail_queue_size', 'internal_queue_size',
+ 'boot_mode', 'affinity', 'is_dynamic_affinity', 'affinity_group_id']
+
+ infos = data_extractor.unpack_symbol_data_struct_array('sys_comp_config_tbl',
+ struct_format_string, field_names)
+
+ # Get VPE number of MIPS
+ vpe_num = data_extractor.unpack_symbol_data('I', 'cfg_sys_vpe_num')[0]
+
+ # Get ILM struct size
+ mail_size = data_extractor.unpack_symbol_data('I', 'cfg_ilm_struct')[0]
+
+ # Tweak the format
+ infos = [info for info in infos if info['create_func'] != 0xF0F0F0F0]
+ for info in infos:
+ if info['name'] == 0:
+ info['name'] = 'UNKNOWN'
+ else:
+ info['name'] = data_extractor.get_string_data(info['name'])[:-1] # Remove ending 0
+
+ info['priority'] = info['priority'] >> 16
+ info['mail_size'] = mail_size
+ info['entry_func'] = 'kal_mer_task_entry_wrapper'
+ info['init_func'] = 'mer_kernel_task_default_init'
+ del info['mail_queue_name']
+ del info['create_func']
+ del info['internal_queue_size']
+ del info['boot_mode']
+ del info['is_dynamic_affinity']
+ del info['affinity_group_id']
+
+ if info['stack_size'] % 32 != 0:
+ info['stack_size'] += (32 - info['stack_size'] % 32)
+
+ assert info['stack_size'] >= 96, ('Stack size should not less than 96 bytes: task ' +
+ info['name'])
+ for idx in range(vpe_num):
+ info = dict()
+ info['name'] = 'DUMMY_IDLE_LOOP%d' % idx
+ info['priority'] = 1024+idx
+ info['mail_size'] = 0
+ info['entry_func'] = 'mer_kernel_task_dummy_loop'
+ info['init_func'] = 'mer_kernel_task_default_init'
+ info['affinity'] = 2**idx
+ info['mail_queue_size'] = 0
+ info['mail_size'] = 0
+ info['stack_size'] = 512
+ infos.append(info)
+
+ assert len(infos) <= 256, 'The current total tasks number over 256.\n'
+
+ return infos
+
+
+def write_task_config(infos, config_output_path, priority_output_path):
+ # Sort by priority
+ infos.sort(key=lambda x: (x['priority'], -x['affinity']))
+
+ # To reduce the size of TCB size required in MERTOS, we have to pack the priorities
+ # Also, we have to put the KAL priority to MERTOS priority mapping in another file
+ original_priorities = [None] * len(infos)
+ for index in xrange(len(infos)):
+ if infos[index]['priority'] < 1024:
+ original_priorities[index] = infos[index]['priority']
+ else:
+ original_priorities[index] = 0
+
+ infos[index]['priority'] = index
+
+ # Write task config file
+ output_info_list = ['name', 'affinity', 'stack_size', 'mail_queue_size', 'mail_size',
+ 'priority', 'entry_func', 'init_func']
+ write_config(infos, output_info_list, 'MER_CONFIG_TASK', config_output_path)
+
+ # Write priority mapping
+ with open(priority_output_path, 'w') as out_file:
+ out_file.write(', '.join([str(priority) for priority in original_priorities]))
+
+
+def extract_dpc_info(data_extractor):
+ """
+ Transform syscomp data to internal representation
+ """
+ # typedef struct {
+ # kal_hisr index;
+ # kal_uint8 priority;
+ # kal_uint8 options;
+ # kal_uint32 stack_size;
+ # kal_hisr_func_ptr entry_func;
+ # kal_char *hisr_name;
+ # kal_affinity_group affinity_attribute;
+ # kal_bool affinity_dynamic;
+ # } hisr_parameter_s;
+
+ # Get structure size
+ affinity_group_size = data_extractor.unpack_symbol_data('I', 'cfg_affinity_group_size')[0]
+ if affinity_group_size == 1:
+ struct_format_string = 'BBBxIIIBBxx'
+ elif affinity_group_size == 2:
+ struct_format_string = 'BBBxIIIHBx'
+ else:
+ assert 0
+ field_names = ['index', 'priority', 'options', 'stack_size', 'entry_func', 'name', 'affinity',
+ 'is_dynamic_affinity']
+ infos = data_extractor.unpack_symbol_data_struct_array('hisr_info',
+ struct_format_string, field_names)
+
+ # Tweak the format
+ for index, info in zip(xrange(len(infos)), infos):
+ if info['name'] == 0:
+ info['name'] == 'UNKNOWN'
+ else:
+ info['name'] = data_extractor.get_string_data(info['name'])[:-1] # Remove ending 0
+
+ assert info['entry_func'], ('Entry function should not be NULL: hisr ' +
+ info['name'])
+ info['entry_func'] = data_extractor.get_symbol_name(info['entry_func'])
+ assert info['entry_func'] != 'SYMBOL_NOT_FOUND'
+
+ del info['options']
+ del info['is_dynamic_affinity']
+
+ if info['stack_size'] % 32 != 0:
+ info['stack_size'] += (32 - info['stack_size'] % 32)
+
+ assert info['stack_size'] >= 96, ('Stack size should not less than 96 bytes: task ' +
+ info['name'])
+
+ return infos
+
+
+def write_dpc_config(infos, config_output_path, index_output_path):
+ # Do not sort by index
+ # infos.sort(key=lambda x: x['index'])
+
+ # Get the information to map from KAL HISR index to MERTOS DPC index
+ # Also, we have to put the KAL index to MERTOS index mapping in another file
+ max_index = max(info['index'] for info in infos)
+ kal_to_mertos_indices = [max_index] * (max_index + 1)
+ for index in xrange(len(infos)):
+ kal_to_mertos_indices[infos[index]['index']] = index
+
+ # Write dpc config file
+ output_info_list = ['name', 'affinity', 'priority', 'stack_size', 'entry_func']
+ write_config(infos, output_info_list, 'MER_CONFIG_DPC', config_output_path)
+
+ # Write index mapping
+ with open(index_output_path, 'w') as out_file:
+ out_file.write(', '.join([str(index) for index in kal_to_mertos_indices]))
+
+
+def extract_fixmem_info(data_extractor):
+ """
+ Transform syscomp data to internal representation
+ """
+ # First parse control buffer
+ # typedef struct
+ # {
+ # kal_uint32 buf_size;
+ # kal_uint16 buf_num;
+ # } buffer_size_s;
+ struct_format_string = 'IHxx'
+ field_names = ['size', 'number']
+ infos = data_extractor.unpack_symbol_data_struct_array('cfg_ctrl_buff',
+ struct_format_string, field_names)
+
+ # Get control buffer overhead for each block
+ control_buffer_total_overhead = data_extractor.unpack_symbol_data('I', 'cfg_ctrl_buff_overhead')[0]
+
+ # Get control buffer OS overhead for each block
+ control_buffer_os_overhead = data_extractor.unpack_symbol_data('I', 'cfg_os_buff_overhead')[0]
+
+ # Before KAL refactoring cfg_event_buff is controlled by MERTOS
+ # That is, we don't need to know the certain pool_id of memory buffer from header
+ # It must be evshed_pool_id
+ # However, KAL refactoring gather the porting layer from each OS
+ # and use the common level API(kal_os_allocate_buffer/kal_os_deallocate_buffer) to allocate/free memory
+
+
+ # Add the overhead to each control buffer, this buffer is for user, apply total overhead(header/footer)
+ for entry in infos:
+ entry['size'] += control_buffer_total_overhead
+ entry['type'] = 'fixmem'
+
+ # Get event scheduler buffer requirement
+ event_scheduler_infos = data_extractor.unpack_symbol_data_struct_array(
+ 'cfg_event_buff', struct_format_string, field_names)
+
+ # Add the overhead to each control buffer, this buffer is for OS, apply only the OS overhead
+ for entry in event_scheduler_infos:
+ entry['size'] += control_buffer_os_overhead
+ entry['type'] = 'event'
+
+ infos.extend(event_scheduler_infos)
+
+ return infos
+
+
+def write_fixmem_config(infos, config_output_path):
+ # Write dpc config file
+ output_info_list = ['type', 'size', 'number']
+ write_config(infos, output_info_list, 'MER_SERVICE_CONFIG_FIXMEM', config_output_path)
+
+
+def write_mutex_semaphore_config(mutex_number, semaphore_number, config_output_path):
+ output_info_list = ['name', 'count']
+ infos = list({output_info_list[0]: 'M{0}'.format(index), output_info_list[1]: 1}
+ for index in xrange(mutex_number - 1))
+ infos.append({output_info_list[0]: 'LAST_MUTEX', output_info_list[1]: 1})
+ infos.extend(list({output_info_list[0]: 'S{0}'.format(index), output_info_list[1]: 1}
+ for index in xrange(semaphore_number)))
+ write_config(infos, output_info_list, 'MER_SERVICE_CONFIG_SEMAPHORE', config_output_path)
+
+
+def write_enhmutex_config(number, config_output_path):
+ output_info_list = ['name']
+ infos = list({output_info_list[0]: index} for index in xrange(number))
+ write_config(infos, output_info_list, 'MER_SERVICE_CONFIG_SYNC_ENHMUTEX', config_output_path)
+
+
+def write_event_group_config(number, config_output_path):
+ output_info_list = ['name']
+ infos = list({output_info_list[0]: index} for index in xrange(number))
+ write_config(infos, output_info_list, 'MER_SERVICE_CONFIG_EVENT_GROUP', config_output_path)
+
+
+def write_config(infos, column_names, row_name, output_path):
+ def merge_dicts(a, b):
+ c = a.copy()
+ c.update(b)
+ return c
+
+ column_width = {}
+ for name in column_names:
+ column_width[name + '_width'] = max(len(name), max(len(str(info[name])) for info in infos))
+ output_format = ', '.join('{{{0}:{{{0}_width}}}}'.format(name) for name in column_names)
+
+ # Write config file
+ with open(output_path, 'w') as out_file:
+ #Guard header
+ out_file.write('#if !defined(__MER_CONFIG_MUST_REBUILD__)\n')
+ out_file.write(' #error "should not include this file since this module is library release or binary release"\n')
+ out_file.write('#endif\n')
+ # Write info comment line
+ column_name_dict = {name: name for name in column_names}
+ out_file.write('/*' + ' ' * (len(row_name) - 2 + 1) + output_format.format(
+ **merge_dicts(column_name_dict, column_width)) + '*/\n')
+ for info in infos:
+ out_file.write('{0}('.format(row_name) + output_format.format(
+ **merge_dicts(info, column_width)) + ')\n')
+
+
+def main(argv):
+ def get_config_output_path(filename):
+ return os.path.join(task_config_output_folder, filename)
+
+ readelf_exec = argv[1]
+ elf_path = argv[2]
+ task_config_output_folder = argv[3]
+
+ data_extractor = mer_utility.ElfDataExtractor(readelf_exec, elf_path)
+ task_infos = extract_task_info(data_extractor)
+ dpc_infos = extract_dpc_info(data_extractor)
+ fixmem_infos = extract_fixmem_info(data_extractor)
+
+ write_task_config(task_infos,
+ get_config_output_path('kal_mertos_config_task.inc'),
+ get_config_output_path('kal_mertos_config_task_priority_mapping.inc'))
+ write_dpc_config(dpc_infos,
+ get_config_output_path('kal_mertos_config_dpc.inc'),
+ get_config_output_path('kal_mertos_config_dpc_index_mapping.inc'))
+ write_fixmem_config(fixmem_infos,
+ get_config_output_path('kal_mertos_config_fixmem.inc'))
+
+
+if __name__ == '__main__':
+ main(sys.argv)