| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame^] | 1 | # test case management tool - manual execution from testopia test cases | 
|  | 2 | # | 
|  | 3 | # Copyright (c) 2018, Intel Corporation. | 
|  | 4 | # | 
|  | 5 | # This program is free software; you can redistribute it and/or modify it | 
|  | 6 | # under the terms and conditions of the GNU General Public License, | 
|  | 7 | # version 2, as published by the Free Software Foundation. | 
|  | 8 | # | 
|  | 9 | # This program is distributed in the hope it will be useful, but WITHOUT | 
|  | 10 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 
|  | 11 | # FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for | 
|  | 12 | # more details. | 
|  | 13 | # | 
|  | 14 | import argparse | 
|  | 15 | import json | 
|  | 16 | import os | 
|  | 17 | import sys | 
|  | 18 | import datetime | 
|  | 19 | import re | 
|  | 20 | import copy | 
|  | 21 | from oeqa.core.runner import OETestResultJSONHelper | 
|  | 22 |  | 
|  | 23 |  | 
|  | 24 | def load_json_file(f): | 
|  | 25 | with open(f, "r") as filedata: | 
|  | 26 | return json.load(filedata) | 
|  | 27 |  | 
|  | 28 | def write_json_file(f, json_data): | 
|  | 29 | os.makedirs(os.path.dirname(f), exist_ok=True) | 
|  | 30 | with open(f, 'w') as filedata: | 
|  | 31 | filedata.write(json.dumps(json_data, sort_keys=True, indent=4)) | 
|  | 32 |  | 
|  | 33 | class ManualTestRunner(object): | 
|  | 34 |  | 
|  | 35 | def _get_test_module(self, case_file): | 
|  | 36 | return os.path.basename(case_file).split('.')[0] | 
|  | 37 |  | 
|  | 38 | def _get_input(self, config): | 
|  | 39 | while True: | 
|  | 40 | output = input('{} = '.format(config)) | 
|  | 41 | if re.match('^[a-z0-9-.]+$', output): | 
|  | 42 | break | 
|  | 43 | print('Only lowercase alphanumeric, hyphen and dot are allowed. Please try again') | 
|  | 44 | return output | 
|  | 45 |  | 
|  | 46 | def _get_available_config_options(self, config_options, test_module, target_config): | 
|  | 47 | avail_config_options = None | 
|  | 48 | if test_module in config_options: | 
|  | 49 | avail_config_options = config_options[test_module].get(target_config) | 
|  | 50 | return avail_config_options | 
|  | 51 |  | 
|  | 52 | def _choose_config_option(self, options): | 
|  | 53 | while True: | 
|  | 54 | output = input('{} = '.format('Option index number')) | 
|  | 55 | if output in options: | 
|  | 56 | break | 
|  | 57 | print('Only integer index inputs from above available configuration options are allowed. Please try again.') | 
|  | 58 | return options[output] | 
|  | 59 |  | 
|  | 60 | def _get_config(self, config_options, test_module): | 
|  | 61 | from oeqa.utils.metadata import get_layers | 
|  | 62 | from oeqa.utils.commands import get_bb_var | 
|  | 63 | from resulttool.resultutils import store_map | 
|  | 64 |  | 
|  | 65 | layers = get_layers(get_bb_var('BBLAYERS')) | 
|  | 66 | configurations = {} | 
|  | 67 | configurations['LAYERS'] = layers | 
|  | 68 | configurations['STARTTIME'] = datetime.datetime.now().strftime('%Y%m%d%H%M%S') | 
|  | 69 | configurations['TEST_TYPE'] = 'manual' | 
|  | 70 | configurations['TEST_MODULE'] = test_module | 
|  | 71 |  | 
|  | 72 | extra_config = set(store_map['manual']) - set(configurations) | 
|  | 73 | for config in sorted(extra_config): | 
|  | 74 | avail_config_options = self._get_available_config_options(config_options, test_module, config) | 
|  | 75 | if avail_config_options: | 
|  | 76 | print('---------------------------------------------') | 
|  | 77 | print('These are available configuration #%s options:' % config) | 
|  | 78 | print('---------------------------------------------') | 
|  | 79 | for option, _ in sorted(avail_config_options.items(), key=lambda x: int(x[0])): | 
|  | 80 | print('%s: %s' % (option, avail_config_options[option])) | 
|  | 81 | print('Please select configuration option, enter the integer index number.') | 
|  | 82 | value_conf = self._choose_config_option(avail_config_options) | 
|  | 83 | print('---------------------------------------------\n') | 
|  | 84 | else: | 
|  | 85 | print('---------------------------------------------') | 
|  | 86 | print('This is configuration #%s. Please provide configuration value(use "None" if not applicable).' % config) | 
|  | 87 | print('---------------------------------------------') | 
|  | 88 | value_conf = self._get_input('Configuration Value') | 
|  | 89 | print('---------------------------------------------\n') | 
|  | 90 | configurations[config] = value_conf | 
|  | 91 | return configurations | 
|  | 92 |  | 
|  | 93 | def _execute_test_steps(self, case): | 
|  | 94 | test_result = {} | 
|  | 95 | print('------------------------------------------------------------------------') | 
|  | 96 | print('Executing test case: %s' % case['test']['@alias']) | 
|  | 97 | print('------------------------------------------------------------------------') | 
|  | 98 | print('You have total %s test steps to be executed.' % len(case['test']['execution'])) | 
|  | 99 | print('------------------------------------------------------------------------\n') | 
|  | 100 | for step, _ in sorted(case['test']['execution'].items(), key=lambda x: int(x[0])): | 
|  | 101 | print('Step %s: %s' % (step, case['test']['execution'][step]['action'])) | 
|  | 102 | expected_output = case['test']['execution'][step]['expected_results'] | 
|  | 103 | if expected_output: | 
|  | 104 | print('Expected output: %s' % expected_output) | 
|  | 105 | while True: | 
|  | 106 | done = input('\nPlease provide test results: (P)assed/(F)ailed/(B)locked/(S)kipped? \n').lower() | 
|  | 107 | result_types = {'p':'PASSED', | 
|  | 108 | 'f':'FAILED', | 
|  | 109 | 'b':'BLOCKED', | 
|  | 110 | 's':'SKIPPED'} | 
|  | 111 | if done in result_types: | 
|  | 112 | for r in result_types: | 
|  | 113 | if done == r: | 
|  | 114 | res = result_types[r] | 
|  | 115 | if res == 'FAILED': | 
|  | 116 | log_input = input('\nPlease enter the error and the description of the log: (Ex:log:211 Error Bitbake)\n') | 
|  | 117 | test_result.update({case['test']['@alias']: {'status': '%s' % res, 'log': '%s' % log_input}}) | 
|  | 118 | else: | 
|  | 119 | test_result.update({case['test']['@alias']: {'status': '%s' % res}}) | 
|  | 120 | break | 
|  | 121 | print('Invalid input!') | 
|  | 122 | return test_result | 
|  | 123 |  | 
|  | 124 | def _get_write_dir(self): | 
|  | 125 | return os.environ['BUILDDIR'] + '/tmp/log/manual/' | 
|  | 126 |  | 
|  | 127 | def run_test(self, case_file, config_options_file, testcase_config_file): | 
|  | 128 | test_module = self._get_test_module(case_file) | 
|  | 129 | cases = load_json_file(case_file) | 
|  | 130 | config_options = {} | 
|  | 131 | if config_options_file: | 
|  | 132 | config_options = load_json_file(config_options_file) | 
|  | 133 | configurations = self._get_config(config_options, test_module) | 
|  | 134 | result_id = 'manual_%s_%s' % (test_module, configurations['STARTTIME']) | 
|  | 135 | test_results = {} | 
|  | 136 | if testcase_config_file: | 
|  | 137 | test_case_config = load_json_file(testcase_config_file) | 
|  | 138 | test_case_to_execute = test_case_config['testcases'] | 
|  | 139 | for case in copy.deepcopy(cases) : | 
|  | 140 | if case['test']['@alias'] not in test_case_to_execute: | 
|  | 141 | cases.remove(case) | 
|  | 142 |  | 
|  | 143 | print('\nTotal number of test cases in this test suite: %s\n' % len(cases)) | 
|  | 144 | for c in cases: | 
|  | 145 | test_result = self._execute_test_steps(c) | 
|  | 146 | test_results.update(test_result) | 
|  | 147 | return configurations, result_id, self._get_write_dir(), test_results | 
|  | 148 |  | 
|  | 149 | def _get_true_false_input(self, input_message): | 
|  | 150 | yes_list = ['Y', 'YES'] | 
|  | 151 | no_list = ['N', 'NO'] | 
|  | 152 | while True: | 
|  | 153 | more_config_option = input(input_message).upper() | 
|  | 154 | if more_config_option in yes_list or more_config_option in no_list: | 
|  | 155 | break | 
|  | 156 | print('Invalid input!') | 
|  | 157 | if more_config_option in no_list: | 
|  | 158 | return False | 
|  | 159 | return True | 
|  | 160 |  | 
|  | 161 | def make_config_option_file(self, logger, case_file, config_options_file): | 
|  | 162 | config_options = {} | 
|  | 163 | if config_options_file: | 
|  | 164 | config_options = load_json_file(config_options_file) | 
|  | 165 | new_test_module = self._get_test_module(case_file) | 
|  | 166 | print('Creating configuration options file for test module: %s' % new_test_module) | 
|  | 167 | new_config_options = {} | 
|  | 168 |  | 
|  | 169 | while True: | 
|  | 170 | config_name = input('\nPlease provide test configuration to create:\n').upper() | 
|  | 171 | new_config_options[config_name] = {} | 
|  | 172 | while True: | 
|  | 173 | config_value = self._get_input('Configuration possible option value') | 
|  | 174 | config_option_index = len(new_config_options[config_name]) + 1 | 
|  | 175 | new_config_options[config_name][config_option_index] = config_value | 
|  | 176 | more_config_option = self._get_true_false_input('\nIs there more configuration option input: (Y)es/(N)o\n') | 
|  | 177 | if not more_config_option: | 
|  | 178 | break | 
|  | 179 | more_config = self._get_true_false_input('\nIs there more configuration to create: (Y)es/(N)o\n') | 
|  | 180 | if not more_config: | 
|  | 181 | break | 
|  | 182 |  | 
|  | 183 | if new_config_options: | 
|  | 184 | config_options[new_test_module] = new_config_options | 
|  | 185 | if not config_options_file: | 
|  | 186 | config_options_file = os.path.join(self._get_write_dir(), 'manual_config_options.json') | 
|  | 187 | write_json_file(config_options_file, config_options) | 
|  | 188 | logger.info('Configuration option file created at %s' % config_options_file) | 
|  | 189 |  | 
|  | 190 | def manualexecution(args, logger): | 
|  | 191 | testrunner = ManualTestRunner() | 
|  | 192 | if args.make_config_options_file: | 
|  | 193 | testrunner.make_config_option_file(logger, args.file, args.config_options_file) | 
|  | 194 | return 0 | 
|  | 195 | configurations, result_id, write_dir, test_results = testrunner.run_test(args.file, args.config_options_file, args.testcase_config_file) | 
|  | 196 | resultjsonhelper = OETestResultJSONHelper() | 
|  | 197 | resultjsonhelper.dump_testresult_file(write_dir, configurations, result_id, test_results) | 
|  | 198 | return 0 | 
|  | 199 |  | 
|  | 200 | def register_commands(subparsers): | 
|  | 201 | """Register subcommands from this plugin""" | 
|  | 202 | parser_build = subparsers.add_parser('manualexecution', help='helper script for results populating during manual test execution.', | 
|  | 203 | description='helper script for results populating during manual test execution. You can find manual test case JSON file in meta/lib/oeqa/manual/', | 
|  | 204 | group='manualexecution') | 
|  | 205 | parser_build.set_defaults(func=manualexecution) | 
|  | 206 | parser_build.add_argument('file', help='specify path to manual test case JSON file.Note: Please use \"\" to encapsulate the file path.') | 
|  | 207 | parser_build.add_argument('-c', '--config-options-file', default='', | 
|  | 208 | help='the config options file to import and used as available configuration option selection or make config option file') | 
|  | 209 | parser_build.add_argument('-m', '--make-config-options-file', action='store_true', | 
|  | 210 | help='make the configuration options file based on provided inputs') | 
|  | 211 | parser_build.add_argument('-t', '--testcase-config-file', default='', | 
|  | 212 | help='the testcase configuration file to enable user to run a selected set of test case') |