[Feature]add MT2731_MP2_MR2_SVN388 baseline version
Change-Id: Ief04314834b31e27effab435d3ca8ba33b499059
diff --git a/meta/poky/scripts/lib/resulttool/__init__.py b/meta/poky/scripts/lib/resulttool/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/meta/poky/scripts/lib/resulttool/__init__.py
diff --git a/meta/poky/scripts/lib/resulttool/log.py b/meta/poky/scripts/lib/resulttool/log.py
new file mode 100644
index 0000000..4981635
--- /dev/null
+++ b/meta/poky/scripts/lib/resulttool/log.py
@@ -0,0 +1,78 @@
+# resulttool - Show logs
+#
+# Copyright (c) 2019 Garmin International
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+import os
+import resulttool.resultutils as resultutils
+
+def show_ptest(result, ptest, logger):
+ if 'ptestresult.sections' in result:
+ if ptest in result['ptestresult.sections'] and 'log' in result['ptestresult.sections'][ptest]:
+ print(result['ptestresult.sections'][ptest]['log'])
+ return 0
+
+ print("ptest '%s' not found" % ptest)
+ return 1
+
+def log(args, logger):
+ results = resultutils.load_resultsdata(args.source)
+
+ ptest_count = sum(1 for _, _, _, r in resultutils.test_run_results(results) if 'ptestresult.sections' in r)
+ if ptest_count > 1 and not args.prepend_run:
+ print("%i ptest sections found. '--prepend-run' is required" % ptest_count)
+ return 1
+
+ for _, run_name, _, r in resultutils.test_run_results(results):
+ if args.dump_ptest:
+ if 'ptestresult.sections' in r:
+ for name, ptest in r['ptestresult.sections'].items():
+ if 'log' in ptest:
+ dest_dir = args.dump_ptest
+ if args.prepend_run:
+ dest_dir = os.path.join(dest_dir, run_name)
+
+ os.makedirs(dest_dir, exist_ok=True)
+
+ dest = os.path.join(dest_dir, '%s.log' % name)
+ print(dest)
+ with open(dest, 'w') as f:
+ f.write(ptest['log'])
+
+ if args.raw:
+ if 'ptestresult.rawlogs' in r:
+ print(r['ptestresult.rawlogs']['log'])
+ else:
+ print('Raw logs not found')
+ return 1
+
+ for ptest in args.ptest:
+ if not show_ptest(r, ptest, logger):
+ return 1
+
+def register_commands(subparsers):
+ """Register subcommands from this plugin"""
+ parser = subparsers.add_parser('log', help='show logs',
+ description='show the logs from test results',
+ group='analysis')
+ parser.set_defaults(func=log)
+ parser.add_argument('source',
+ help='the results file/directory/URL to import')
+ parser.add_argument('--ptest', action='append', default=[],
+ help='show logs for a ptest')
+ parser.add_argument('--dump-ptest', metavar='DIR',
+ help='Dump all ptest log files to the specified directory.')
+ parser.add_argument('--prepend-run', action='store_true',
+ help='''Dump ptest results to a subdirectory named after the test run when using --dump-ptest.
+ Required if more than one test run is present in the result file''')
+ parser.add_argument('--raw', action='store_true',
+ help='show raw logs')
+
diff --git a/meta/poky/scripts/lib/resulttool/manualexecution.py b/meta/poky/scripts/lib/resulttool/manualexecution.py
new file mode 100755
index 0000000..dc368f3
--- /dev/null
+++ b/meta/poky/scripts/lib/resulttool/manualexecution.py
@@ -0,0 +1,212 @@
+# test case management tool - manual execution from testopia test cases
+#
+# Copyright (c) 2018, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+import argparse
+import json
+import os
+import sys
+import datetime
+import re
+import copy
+from oeqa.core.runner import OETestResultJSONHelper
+
+
+def load_json_file(f):
+ with open(f, "r") as filedata:
+ return json.load(filedata)
+
+def write_json_file(f, json_data):
+ os.makedirs(os.path.dirname(f), exist_ok=True)
+ with open(f, 'w') as filedata:
+ filedata.write(json.dumps(json_data, sort_keys=True, indent=4))
+
+class ManualTestRunner(object):
+
+ def _get_test_module(self, case_file):
+ return os.path.basename(case_file).split('.')[0]
+
+ def _get_input(self, config):
+ while True:
+ output = input('{} = '.format(config))
+ if re.match('^[a-z0-9-.]+$', output):
+ break
+ print('Only lowercase alphanumeric, hyphen and dot are allowed. Please try again')
+ return output
+
+ def _get_available_config_options(self, config_options, test_module, target_config):
+ avail_config_options = None
+ if test_module in config_options:
+ avail_config_options = config_options[test_module].get(target_config)
+ return avail_config_options
+
+ def _choose_config_option(self, options):
+ while True:
+ output = input('{} = '.format('Option index number'))
+ if output in options:
+ break
+ print('Only integer index inputs from above available configuration options are allowed. Please try again.')
+ return options[output]
+
+ def _get_config(self, config_options, test_module):
+ from oeqa.utils.metadata import get_layers
+ from oeqa.utils.commands import get_bb_var
+ from resulttool.resultutils import store_map
+
+ layers = get_layers(get_bb_var('BBLAYERS'))
+ configurations = {}
+ configurations['LAYERS'] = layers
+ configurations['STARTTIME'] = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
+ configurations['TEST_TYPE'] = 'manual'
+ configurations['TEST_MODULE'] = test_module
+
+ extra_config = set(store_map['manual']) - set(configurations)
+ for config in sorted(extra_config):
+ avail_config_options = self._get_available_config_options(config_options, test_module, config)
+ if avail_config_options:
+ print('---------------------------------------------')
+ print('These are available configuration #%s options:' % config)
+ print('---------------------------------------------')
+ for option, _ in sorted(avail_config_options.items(), key=lambda x: int(x[0])):
+ print('%s: %s' % (option, avail_config_options[option]))
+ print('Please select configuration option, enter the integer index number.')
+ value_conf = self._choose_config_option(avail_config_options)
+ print('---------------------------------------------\n')
+ else:
+ print('---------------------------------------------')
+ print('This is configuration #%s. Please provide configuration value(use "None" if not applicable).' % config)
+ print('---------------------------------------------')
+ value_conf = self._get_input('Configuration Value')
+ print('---------------------------------------------\n')
+ configurations[config] = value_conf
+ return configurations
+
+ def _execute_test_steps(self, case):
+ test_result = {}
+ print('------------------------------------------------------------------------')
+ print('Executing test case: %s' % case['test']['@alias'])
+ print('------------------------------------------------------------------------')
+ print('You have total %s test steps to be executed.' % len(case['test']['execution']))
+ print('------------------------------------------------------------------------\n')
+ for step, _ in sorted(case['test']['execution'].items(), key=lambda x: int(x[0])):
+ print('Step %s: %s' % (step, case['test']['execution'][step]['action']))
+ expected_output = case['test']['execution'][step]['expected_results']
+ if expected_output:
+ print('Expected output: %s' % expected_output)
+ while True:
+ done = input('\nPlease provide test results: (P)assed/(F)ailed/(B)locked/(S)kipped? \n').lower()
+ result_types = {'p':'PASSED',
+ 'f':'FAILED',
+ 'b':'BLOCKED',
+ 's':'SKIPPED'}
+ if done in result_types:
+ for r in result_types:
+ if done == r:
+ res = result_types[r]
+ if res == 'FAILED':
+ log_input = input('\nPlease enter the error and the description of the log: (Ex:log:211 Error Bitbake)\n')
+ test_result.update({case['test']['@alias']: {'status': '%s' % res, 'log': '%s' % log_input}})
+ else:
+ test_result.update({case['test']['@alias']: {'status': '%s' % res}})
+ break
+ print('Invalid input!')
+ return test_result
+
+ def _get_write_dir(self):
+ return os.environ['BUILDDIR'] + '/tmp/log/manual/'
+
+ def run_test(self, case_file, config_options_file, testcase_config_file):
+ test_module = self._get_test_module(case_file)
+ cases = load_json_file(case_file)
+ config_options = {}
+ if config_options_file:
+ config_options = load_json_file(config_options_file)
+ configurations = self._get_config(config_options, test_module)
+ result_id = 'manual_%s_%s' % (test_module, configurations['STARTTIME'])
+ test_results = {}
+ if testcase_config_file:
+ test_case_config = load_json_file(testcase_config_file)
+ test_case_to_execute = test_case_config['testcases']
+ for case in copy.deepcopy(cases) :
+ if case['test']['@alias'] not in test_case_to_execute:
+ cases.remove(case)
+
+ print('\nTotal number of test cases in this test suite: %s\n' % len(cases))
+ for c in cases:
+ test_result = self._execute_test_steps(c)
+ test_results.update(test_result)
+ return configurations, result_id, self._get_write_dir(), test_results
+
+ def _get_true_false_input(self, input_message):
+ yes_list = ['Y', 'YES']
+ no_list = ['N', 'NO']
+ while True:
+ more_config_option = input(input_message).upper()
+ if more_config_option in yes_list or more_config_option in no_list:
+ break
+ print('Invalid input!')
+ if more_config_option in no_list:
+ return False
+ return True
+
+ def make_config_option_file(self, logger, case_file, config_options_file):
+ config_options = {}
+ if config_options_file:
+ config_options = load_json_file(config_options_file)
+ new_test_module = self._get_test_module(case_file)
+ print('Creating configuration options file for test module: %s' % new_test_module)
+ new_config_options = {}
+
+ while True:
+ config_name = input('\nPlease provide test configuration to create:\n').upper()
+ new_config_options[config_name] = {}
+ while True:
+ config_value = self._get_input('Configuration possible option value')
+ config_option_index = len(new_config_options[config_name]) + 1
+ new_config_options[config_name][config_option_index] = config_value
+ more_config_option = self._get_true_false_input('\nIs there more configuration option input: (Y)es/(N)o\n')
+ if not more_config_option:
+ break
+ more_config = self._get_true_false_input('\nIs there more configuration to create: (Y)es/(N)o\n')
+ if not more_config:
+ break
+
+ if new_config_options:
+ config_options[new_test_module] = new_config_options
+ if not config_options_file:
+ config_options_file = os.path.join(self._get_write_dir(), 'manual_config_options.json')
+ write_json_file(config_options_file, config_options)
+ logger.info('Configuration option file created at %s' % config_options_file)
+
+def manualexecution(args, logger):
+ testrunner = ManualTestRunner()
+ if args.make_config_options_file:
+ testrunner.make_config_option_file(logger, args.file, args.config_options_file)
+ return 0
+ configurations, result_id, write_dir, test_results = testrunner.run_test(args.file, args.config_options_file, args.testcase_config_file)
+ resultjsonhelper = OETestResultJSONHelper()
+ resultjsonhelper.dump_testresult_file(write_dir, configurations, result_id, test_results)
+ return 0
+
+def register_commands(subparsers):
+ """Register subcommands from this plugin"""
+ parser_build = subparsers.add_parser('manualexecution', help='helper script for results populating during manual test execution.',
+ description='helper script for results populating during manual test execution. You can find manual test case JSON file in meta/lib/oeqa/manual/',
+ group='manualexecution')
+ parser_build.set_defaults(func=manualexecution)
+ parser_build.add_argument('file', help='specify path to manual test case JSON file.Note: Please use \"\" to encapsulate the file path.')
+ parser_build.add_argument('-c', '--config-options-file', default='',
+ help='the config options file to import and used as available configuration option selection or make config option file')
+ parser_build.add_argument('-m', '--make-config-options-file', action='store_true',
+ help='make the configuration options file based on provided inputs')
+ parser_build.add_argument('-t', '--testcase-config-file', default='',
+ help='the testcase configuration file to enable user to run a selected set of test case')
\ No newline at end of file
diff --git a/meta/poky/scripts/lib/resulttool/merge.py b/meta/poky/scripts/lib/resulttool/merge.py
new file mode 100644
index 0000000..7159463
--- /dev/null
+++ b/meta/poky/scripts/lib/resulttool/merge.py
@@ -0,0 +1,42 @@
+# resulttool - merge multiple testresults.json files into a file or directory
+#
+# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Linux Foundation
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+import os
+import json
+import resulttool.resultutils as resultutils
+
+def merge(args, logger):
+ if resultutils.is_url(args.target_results) or os.path.isdir(args.target_results):
+ results = resultutils.load_resultsdata(args.target_results, configmap=resultutils.store_map)
+ resultutils.append_resultsdata(results, args.base_results, configmap=resultutils.store_map)
+ resultutils.save_resultsdata(results, args.target_results)
+ else:
+ results = resultutils.load_resultsdata(args.base_results, configmap=resultutils.flatten_map)
+ if os.path.exists(args.target_results):
+ resultutils.append_resultsdata(results, args.target_results, configmap=resultutils.flatten_map)
+ resultutils.save_resultsdata(results, os.path.dirname(args.target_results), fn=os.path.basename(args.target_results))
+
+ return 0
+
+def register_commands(subparsers):
+ """Register subcommands from this plugin"""
+ parser_build = subparsers.add_parser('merge', help='merge test result files/directories/URLs',
+ description='merge the results from multiple files/directories/URLs into the target file or directory',
+ group='setup')
+ parser_build.set_defaults(func=merge)
+ parser_build.add_argument('base_results',
+ help='the results file/directory/URL to import')
+ parser_build.add_argument('target_results',
+ help='the target file or directory to merge the base_results with')
+
diff --git a/meta/poky/scripts/lib/resulttool/regression.py b/meta/poky/scripts/lib/resulttool/regression.py
new file mode 100644
index 0000000..fa90ab1
--- /dev/null
+++ b/meta/poky/scripts/lib/resulttool/regression.py
@@ -0,0 +1,192 @@
+# resulttool - regression analysis
+#
+# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Linux Foundation
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+import resulttool.resultutils as resultutils
+import json
+
+from oeqa.utils.git import GitRepo
+import oeqa.utils.gitarchive as gitarchive
+
+def compare_result(logger, base_name, target_name, base_result, target_result):
+ base_result = base_result.get('result')
+ target_result = target_result.get('result')
+ result = {}
+ if base_result and target_result:
+ for k in base_result:
+ base_testcase = base_result[k]
+ base_status = base_testcase.get('status')
+ if base_status:
+ target_testcase = target_result.get(k, {})
+ target_status = target_testcase.get('status')
+ if base_status != target_status:
+ result[k] = {'base': base_status, 'target': target_status}
+ else:
+ logger.error('Failed to retrieved base test case status: %s' % k)
+ if result:
+ resultstring = "Regression: %s\n %s\n" % (base_name, target_name)
+ for k in sorted(result):
+ resultstring += ' %s: %s -> %s\n' % (k, result[k]['base'], result[k]['target'])
+ else:
+ resultstring = "Match: %s\n %s" % (base_name, target_name)
+ return result, resultstring
+
+def get_results(logger, source):
+ return resultutils.load_resultsdata(source, configmap=resultutils.regression_map)
+
+def regression(args, logger):
+ base_results = get_results(logger, args.base_result)
+ target_results = get_results(logger, args.target_result)
+
+ regression_common(args, logger, base_results, target_results)
+
+def regression_common(args, logger, base_results, target_results):
+ if args.base_result_id:
+ base_results = resultutils.filter_resultsdata(base_results, args.base_result_id)
+ if args.target_result_id:
+ target_results = resultutils.filter_resultsdata(target_results, args.target_result_id)
+
+ matches = []
+ regressions = []
+ notfound = []
+
+ for a in base_results:
+ if a in target_results:
+ base = list(base_results[a].keys())
+ target = list(target_results[a].keys())
+ # We may have multiple base/targets which are for different configurations. Start by
+ # removing any pairs which match
+ for c in base.copy():
+ for b in target.copy():
+ res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b])
+ if not res:
+ matches.append(resstr)
+ base.remove(c)
+ target.remove(b)
+ break
+ # Should only now see regressions, we may not be able to match multiple pairs directly
+ for c in base:
+ for b in target:
+ res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b])
+ if res:
+ regressions.append(resstr)
+ else:
+ notfound.append("%s not found in target" % a)
+ print("\n".join(sorted(matches)))
+ print("\n".join(sorted(regressions)))
+ print("\n".join(sorted(notfound)))
+
+ return 0
+
+def regression_git(args, logger):
+ base_results = {}
+ target_results = {}
+
+ tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}"
+ repo = GitRepo(args.repo)
+
+ revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=args.branch)
+
+ if args.branch2:
+ revs2 = gitarchive.get_test_revs(logger, repo, tag_name, branch=args.branch2)
+ if not len(revs2):
+ logger.error("No revisions found to compare against")
+ return 1
+ if not len(revs):
+ logger.error("No revision to report on found")
+ return 1
+ else:
+ if len(revs) < 2:
+ logger.error("Only %d tester revisions found, unable to generate report" % len(revs))
+ return 1
+
+ # Pick revisions
+ if args.commit:
+ if args.commit_number:
+ logger.warning("Ignoring --commit-number as --commit was specified")
+ index1 = gitarchive.rev_find(revs, 'commit', args.commit)
+ elif args.commit_number:
+ index1 = gitarchive.rev_find(revs, 'commit_number', args.commit_number)
+ else:
+ index1 = len(revs) - 1
+
+ if args.branch2:
+ revs2.append(revs[index1])
+ index1 = len(revs2) - 1
+ revs = revs2
+
+ if args.commit2:
+ if args.commit_number2:
+ logger.warning("Ignoring --commit-number2 as --commit2 was specified")
+ index2 = gitarchive.rev_find(revs, 'commit', args.commit2)
+ elif args.commit_number2:
+ index2 = gitarchive.rev_find(revs, 'commit_number', args.commit_number2)
+ else:
+ if index1 > 0:
+ index2 = index1 - 1
+ # Find the closest matching commit number for comparision
+ # In future we could check the commit is a common ancestor and
+ # continue back if not but this good enough for now
+ while index2 > 0 and revs[index2].commit_number > revs[index1].commit_number:
+ index2 = index2 - 1
+ else:
+ logger.error("Unable to determine the other commit, use "
+ "--commit2 or --commit-number2 to specify it")
+ return 1
+
+ logger.info("Comparing:\n%s\nto\n%s\n" % (revs[index1], revs[index2]))
+
+ base_results = resultutils.git_get_result(repo, revs[index1][2])
+ target_results = resultutils.git_get_result(repo, revs[index2][2])
+
+ regression_common(args, logger, base_results, target_results)
+
+ return 0
+
+def register_commands(subparsers):
+ """Register subcommands from this plugin"""
+
+ parser_build = subparsers.add_parser('regression', help='regression file/directory analysis',
+ description='regression analysis comparing the base set of results to the target results',
+ group='analysis')
+ parser_build.set_defaults(func=regression)
+ parser_build.add_argument('base_result',
+ help='base result file/directory/URL for the comparison')
+ parser_build.add_argument('target_result',
+ help='target result file/directory/URL to compare with')
+ parser_build.add_argument('-b', '--base-result-id', default='',
+ help='(optional) filter the base results to this result ID')
+ parser_build.add_argument('-t', '--target-result-id', default='',
+ help='(optional) filter the target results to this result ID')
+
+ parser_build = subparsers.add_parser('regression-git', help='regression git analysis',
+ description='regression analysis comparing base result set to target '
+ 'result set',
+ group='analysis')
+ parser_build.set_defaults(func=regression_git)
+ parser_build.add_argument('repo',
+ help='the git repository containing the data')
+ parser_build.add_argument('-b', '--base-result-id', default='',
+ help='(optional) default select regression based on configurations unless base result '
+ 'id was provided')
+ parser_build.add_argument('-t', '--target-result-id', default='',
+ help='(optional) default select regression based on configurations unless target result '
+ 'id was provided')
+
+ parser_build.add_argument('--branch', '-B', default='master', help="Branch to find commit in")
+ parser_build.add_argument('--branch2', help="Branch to find comparision revisions in")
+ parser_build.add_argument('--commit', help="Revision to search for")
+ parser_build.add_argument('--commit-number', help="Revision number to search for, redundant if --commit is specified")
+ parser_build.add_argument('--commit2', help="Revision to compare with")
+ parser_build.add_argument('--commit-number2', help="Revision number to compare with, redundant if --commit2 is specified")
+
diff --git a/meta/poky/scripts/lib/resulttool/report.py b/meta/poky/scripts/lib/resulttool/report.py
new file mode 100644
index 0000000..8ae4272
--- /dev/null
+++ b/meta/poky/scripts/lib/resulttool/report.py
@@ -0,0 +1,150 @@
+# test result tool - report text based test results
+#
+# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Linux Foundation
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+import os
+import glob
+import json
+import resulttool.resultutils as resultutils
+from oeqa.utils.git import GitRepo
+import oeqa.utils.gitarchive as gitarchive
+
+
+class ResultsTextReport(object):
+ def __init__(self):
+ self.ptests = {}
+ self.result_types = {'passed': ['PASSED', 'passed'],
+ 'failed': ['FAILED', 'failed', 'ERROR', 'error', 'UNKNOWN'],
+ 'skipped': ['SKIPPED', 'skipped']}
+
+
+ def handle_ptest_result(self, k, status, result):
+ if k == 'ptestresult.sections':
+ # Ensure tests without any test results still show up on the report
+ for suite in result['ptestresult.sections']:
+ if suite not in self.ptests:
+ self.ptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ if 'duration' in result['ptestresult.sections'][suite]:
+ self.ptests[suite]['duration'] = result['ptestresult.sections'][suite]['duration']
+ if 'timeout' in result['ptestresult.sections'][suite]:
+ self.ptests[suite]['duration'] += " T"
+ return
+ try:
+ _, suite, test = k.split(".", 2)
+ except ValueError:
+ return
+ # Handle 'glib-2.0'
+ if 'ptestresult.sections' in result and suite not in result['ptestresult.sections']:
+ try:
+ _, suite, suite1, test = k.split(".", 3)
+ if suite + "." + suite1 in result['ptestresult.sections']:
+ suite = suite + "." + suite1
+ except ValueError:
+ pass
+ if suite not in self.ptests:
+ self.ptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ for tk in self.result_types:
+ if status in self.result_types[tk]:
+ self.ptests[suite][tk] += 1
+
+ def get_aggregated_test_result(self, logger, testresult):
+ test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []}
+ result = testresult.get('result', [])
+ for k in result:
+ test_status = result[k].get('status', [])
+ for tk in self.result_types:
+ if test_status in self.result_types[tk]:
+ test_count_report[tk] += 1
+ if test_status in self.result_types['failed']:
+ test_count_report['failed_testcases'].append(k)
+ if k.startswith("ptestresult."):
+ self.handle_ptest_result(k, test_status, result)
+ return test_count_report
+
+ def print_test_report(self, template_file_name, test_count_reports):
+ from jinja2 import Environment, FileSystemLoader
+ script_path = os.path.dirname(os.path.realpath(__file__))
+ file_loader = FileSystemLoader(script_path + '/template')
+ env = Environment(loader=file_loader, trim_blocks=True)
+ template = env.get_template(template_file_name)
+ havefailed = False
+ haveptest = bool(self.ptests)
+ reportvalues = []
+ cols = ['passed', 'failed', 'skipped']
+ maxlen = {'passed' : 0, 'failed' : 0, 'skipped' : 0, 'result_id': 0, 'testseries' : 0, 'ptest' : 0 }
+ for line in test_count_reports:
+ total_tested = line['passed'] + line['failed'] + line['skipped']
+ vals = {}
+ vals['result_id'] = line['result_id']
+ vals['testseries'] = line['testseries']
+ vals['sort'] = line['testseries'] + "_" + line['result_id']
+ vals['failed_testcases'] = line['failed_testcases']
+ for k in cols:
+ vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f'))
+ for k in maxlen:
+ if k in vals and len(vals[k]) > maxlen[k]:
+ maxlen[k] = len(vals[k])
+ reportvalues.append(vals)
+ if line['failed_testcases']:
+ havefailed = True
+ for ptest in self.ptests:
+ if len(ptest) > maxlen['ptest']:
+ maxlen['ptest'] = len(ptest)
+ output = template.render(reportvalues=reportvalues,
+ havefailed=havefailed,
+ haveptest=haveptest,
+ ptests=self.ptests,
+ maxlen=maxlen)
+ print(output)
+
+ def view_test_report(self, logger, source_dir, branch, commit, tag):
+ test_count_reports = []
+ if commit:
+ if tag:
+ logger.warning("Ignoring --tag as --commit was specified")
+ tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}"
+ repo = GitRepo(source_dir)
+ revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=branch)
+ rev_index = gitarchive.rev_find(revs, 'commit', commit)
+ testresults = resultutils.git_get_result(repo, revs[rev_index][2])
+ elif tag:
+ repo = GitRepo(source_dir)
+ testresults = resultutils.git_get_result(repo, [tag])
+ else:
+ testresults = resultutils.load_resultsdata(source_dir)
+ for testsuite in testresults:
+ for resultid in testresults[testsuite]:
+ result = testresults[testsuite][resultid]
+ test_count_report = self.get_aggregated_test_result(logger, result)
+ test_count_report['testseries'] = result['configuration']['TESTSERIES']
+ test_count_report['result_id'] = resultid
+ test_count_reports.append(test_count_report)
+ self.print_test_report('test_report_full_text.txt', test_count_reports)
+
+def report(args, logger):
+ report = ResultsTextReport()
+ report.view_test_report(logger, args.source_dir, args.branch, args.commit, args.tag)
+ return 0
+
+def register_commands(subparsers):
+ """Register subcommands from this plugin"""
+ parser_build = subparsers.add_parser('report', help='summarise test results',
+ description='print a text-based summary of the test results',
+ group='analysis')
+ parser_build.set_defaults(func=report)
+ parser_build.add_argument('source_dir',
+ help='source file/directory/URL that contain the test result files to summarise')
+ parser_build.add_argument('--branch', '-B', default='master', help="Branch to find commit in")
+ parser_build.add_argument('--commit', help="Revision to report")
+ parser_build.add_argument('-t', '--tag', default='',
+ help='source_dir is a git repository, report on the tag specified from that repository')
diff --git a/meta/poky/scripts/lib/resulttool/resultutils.py b/meta/poky/scripts/lib/resulttool/resultutils.py
new file mode 100644
index 0000000..07dab4c
--- /dev/null
+++ b/meta/poky/scripts/lib/resulttool/resultutils.py
@@ -0,0 +1,185 @@
+# resulttool - common library/utility functions
+#
+# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Linux Foundation
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+import os
+import json
+import scriptpath
+import copy
+import urllib.request
+import posixpath
+scriptpath.add_oe_lib_path()
+
+flatten_map = {
+ "oeselftest": [],
+ "runtime": [],
+ "sdk": [],
+ "sdkext": [],
+ "manual": []
+}
+regression_map = {
+ "oeselftest": ['TEST_TYPE', 'MACHINE'],
+ "runtime": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'IMAGE_PKGTYPE', 'DISTRO'],
+ "sdk": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE'],
+ "sdkext": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE'],
+ "manual": ['TEST_TYPE', 'TEST_MODULE', 'IMAGE_BASENAME', 'MACHINE']
+}
+store_map = {
+ "oeselftest": ['TEST_TYPE'],
+ "runtime": ['TEST_TYPE', 'DISTRO', 'MACHINE', 'IMAGE_BASENAME'],
+ "sdk": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'],
+ "sdkext": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'],
+ "manual": ['TEST_TYPE', 'TEST_MODULE', 'MACHINE', 'IMAGE_BASENAME']
+}
+
+def is_url(p):
+ """
+ Helper for determining if the given path is a URL
+ """
+ return p.startswith('http://') or p.startswith('https://')
+
+#
+# Load the json file and append the results data into the provided results dict
+#
+def append_resultsdata(results, f, configmap=store_map):
+ if type(f) is str:
+ if is_url(f):
+ with urllib.request.urlopen(f) as response:
+ data = json.loads(response.read().decode('utf-8'))
+ url = urllib.parse.urlparse(f)
+ testseries = posixpath.basename(posixpath.dirname(url.path))
+ else:
+ with open(f, "r") as filedata:
+ data = json.load(filedata)
+ testseries = os.path.basename(os.path.dirname(f))
+ else:
+ data = f
+ for res in data:
+ if "configuration" not in data[res] or "result" not in data[res]:
+ raise ValueError("Test results data without configuration or result section?")
+ if "TESTSERIES" not in data[res]["configuration"]:
+ data[res]["configuration"]["TESTSERIES"] = testseries
+ testtype = data[res]["configuration"].get("TEST_TYPE")
+ if testtype not in configmap:
+ raise ValueError("Unknown test type %s" % testtype)
+ configvars = configmap[testtype]
+ testpath = "/".join(data[res]["configuration"].get(i) for i in configmap[testtype])
+ if testpath not in results:
+ results[testpath] = {}
+ results[testpath][res] = data[res]
+
+#
+# Walk a directory and find/load results data
+# or load directly from a file
+#
+def load_resultsdata(source, configmap=store_map):
+ results = {}
+ if is_url(source) or os.path.isfile(source):
+ append_resultsdata(results, source, configmap)
+ return results
+ for root, dirs, files in os.walk(source):
+ for name in files:
+ f = os.path.join(root, name)
+ if name == "testresults.json":
+ append_resultsdata(results, f, configmap)
+ return results
+
+def filter_resultsdata(results, resultid):
+ newresults = {}
+ for r in results:
+ for i in results[r]:
+ if i == resultsid:
+ newresults[r] = {}
+ newresults[r][i] = results[r][i]
+ return newresults
+
+def strip_ptestresults(results):
+ newresults = copy.deepcopy(results)
+ #for a in newresults2:
+ # newresults = newresults2[a]
+ for res in newresults:
+ if 'result' not in newresults[res]:
+ continue
+ if 'ptestresult.rawlogs' in newresults[res]['result']:
+ del newresults[res]['result']['ptestresult.rawlogs']
+ if 'ptestresult.sections' in newresults[res]['result']:
+ for i in newresults[res]['result']['ptestresult.sections']:
+ if 'log' in newresults[res]['result']['ptestresult.sections'][i]:
+ del newresults[res]['result']['ptestresult.sections'][i]['log']
+ return newresults
+
+def save_resultsdata(results, destdir, fn="testresults.json", ptestjson=False, ptestlogs=False):
+ for res in results:
+ if res:
+ dst = destdir + "/" + res + "/" + fn
+ else:
+ dst = destdir + "/" + fn
+ os.makedirs(os.path.dirname(dst), exist_ok=True)
+ resultsout = results[res]
+ if not ptestjson:
+ resultsout = strip_ptestresults(results[res])
+ with open(dst, 'w') as f:
+ f.write(json.dumps(resultsout, sort_keys=True, indent=4))
+ for res2 in results[res]:
+ if ptestlogs and 'result' in results[res][res2]:
+ if 'ptestresult.rawlogs' in results[res][res2]['result']:
+ with open(dst.replace(fn, "ptest-raw.log"), "w+") as f:
+ f.write(results[res][res2]['result']['ptestresult.rawlogs']['log'])
+ if 'ptestresult.sections' in results[res][res2]['result']:
+ for i in results[res][res2]['result']['ptestresult.sections']:
+ if 'log' in results[res][res2]['result']['ptestresult.sections'][i]:
+ with open(dst.replace(fn, "ptest-%s.log" % i), "w+") as f:
+ f.write(results[res][res2]['result']['ptestresult.sections'][i]['log'])
+
+def git_get_result(repo, tags):
+ git_objs = []
+ for tag in tags:
+ files = repo.run_cmd(['ls-tree', "--name-only", "-r", tag]).splitlines()
+ git_objs.extend([tag + ':' + f for f in files if f.endswith("testresults.json")])
+
+ def parse_json_stream(data):
+ """Parse multiple concatenated JSON objects"""
+ objs = []
+ json_d = ""
+ for line in data.splitlines():
+ if line == '}{':
+ json_d += '}'
+ objs.append(json.loads(json_d))
+ json_d = '{'
+ else:
+ json_d += line
+ objs.append(json.loads(json_d))
+ return objs
+
+ # Optimize by reading all data with one git command
+ results = {}
+ for obj in parse_json_stream(repo.run_cmd(['show'] + git_objs + ['--'])):
+ append_resultsdata(results, obj)
+
+ return results
+
+def test_run_results(results):
+ """
+ Convenient generator function that iterates over all test runs that have a
+ result section.
+
+ Generates a tuple of:
+ (result json file path, test run name, test run (dict), test run "results" (dict))
+ for each test run that has a "result" section
+ """
+ for path in results:
+ for run_name, test_run in results[path].items():
+ if not 'result' in test_run:
+ continue
+ yield path, run_name, test_run, test_run['result']
+
diff --git a/meta/poky/scripts/lib/resulttool/store.py b/meta/poky/scripts/lib/resulttool/store.py
new file mode 100644
index 0000000..acdfbd9
--- /dev/null
+++ b/meta/poky/scripts/lib/resulttool/store.py
@@ -0,0 +1,102 @@
+# resulttool - store test results
+#
+# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Linux Foundation
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+import tempfile
+import os
+import subprocess
+import json
+import shutil
+import scriptpath
+scriptpath.add_bitbake_lib_path()
+scriptpath.add_oe_lib_path()
+import resulttool.resultutils as resultutils
+import oeqa.utils.gitarchive as gitarchive
+
+
+def store(args, logger):
+ tempdir = tempfile.mkdtemp(prefix='testresults.')
+ try:
+ results = {}
+ logger.info('Reading files from %s' % args.source)
+ if resultutils.is_url(args.source) or os.path.isfile(args.source):
+ resultutils.append_resultsdata(results, args.source)
+ else:
+ for root, dirs, files in os.walk(args.source):
+ for name in files:
+ f = os.path.join(root, name)
+ if name == "testresults.json":
+ resultutils.append_resultsdata(results, f)
+ elif args.all:
+ dst = f.replace(args.source, tempdir + "/")
+ os.makedirs(os.path.dirname(dst), exist_ok=True)
+ shutil.copyfile(f, dst)
+
+ revisions = {}
+
+ if not results and not args.all:
+ if args.allow_empty:
+ logger.info("No results found to store")
+ return 0
+ logger.error("No results found to store")
+ return 1
+
+ # Find the branch/commit/commit_count and ensure they all match
+ for suite in results:
+ for result in results[suite]:
+ config = results[suite][result]['configuration']['LAYERS']['meta']
+ revision = (config['commit'], config['branch'], str(config['commit_count']))
+ if revision not in revisions:
+ revisions[revision] = {}
+ if suite not in revisions[revision]:
+ revisions[revision][suite] = {}
+ revisions[revision][suite][result] = results[suite][result]
+
+ logger.info("Found %d revisions to store" % len(revisions))
+
+ for r in revisions:
+ results = revisions[r]
+ keywords = {'commit': r[0], 'branch': r[1], "commit_count": r[2]}
+ subprocess.check_call(["find", tempdir, "!", "-path", "./.git/*", "-delete"])
+ resultutils.save_resultsdata(results, tempdir, ptestlogs=True)
+
+ logger.info('Storing test result into git repository %s' % args.git_dir)
+
+ gitarchive.gitarchive(tempdir, args.git_dir, False, False,
+ "Results of {branch}:{commit}", "branch: {branch}\ncommit: {commit}", "{branch}",
+ False, "{branch}/{commit_count}-g{commit}/{tag_number}",
+ 'Test run #{tag_number} of {branch}:{commit}', '',
+ [], [], False, keywords, logger)
+
+ finally:
+ subprocess.check_call(["rm", "-rf", tempdir])
+
+ return 0
+
+def register_commands(subparsers):
+ """Register subcommands from this plugin"""
+ parser_build = subparsers.add_parser('store', help='store test results into a git repository',
+ description='takes a results file or directory of results files and stores '
+ 'them into the destination git repository, splitting out the results '
+ 'files as configured',
+ group='setup')
+ parser_build.set_defaults(func=store)
+ parser_build.add_argument('source',
+ help='source file/directory/URL that contain the test result files to be stored')
+ parser_build.add_argument('git_dir',
+ help='the location of the git repository to store the results in')
+ parser_build.add_argument('-a', '--all', action='store_true',
+ help='include all files, not just testresults.json files')
+ parser_build.add_argument('-e', '--allow-empty', action='store_true',
+ help='don\'t error if no results to store are found')
+
diff --git a/meta/poky/scripts/lib/resulttool/template/test_report_full_text.txt b/meta/poky/scripts/lib/resulttool/template/test_report_full_text.txt
new file mode 100644
index 0000000..590f35c
--- /dev/null
+++ b/meta/poky/scripts/lib/resulttool/template/test_report_full_text.txt
@@ -0,0 +1,44 @@
+==============================================================================================================
+Test Result Status Summary (Counts/Percentages sorted by testseries, ID)
+==============================================================================================================
+--------------------------------------------------------------------------------------------------------------
+{{ 'Test Series'.ljust(maxlen['testseries']) }} | {{ 'ID'.ljust(maxlen['result_id']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }}
+--------------------------------------------------------------------------------------------------------------
+{% for report in reportvalues |sort(attribute='sort') %}
+{{ report.testseries.ljust(maxlen['testseries']) }} | {{ report.result_id.ljust(maxlen['result_id']) }} | {{ (report.passed|string).ljust(maxlen['passed']) }} | {{ (report.failed|string).ljust(maxlen['failed']) }} | {{ (report.skipped|string).ljust(maxlen['skipped']) }}
+{% endfor %}
+--------------------------------------------------------------------------------------------------------------
+
+{% if haveptest %}
+==============================================================================================================
+PTest Result Summary
+==============================================================================================================
+--------------------------------------------------------------------------------------------------------------
+{{ 'Recipe'.ljust(maxlen['ptest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }}
+--------------------------------------------------------------------------------------------------------------
+{% for ptest in ptests |sort %}
+{{ ptest.ljust(maxlen['ptest']) }} | {{ (ptests[ptest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ptests[ptest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ptests[ptest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ptests[ptest]['duration']|string) }}
+{% endfor %}
+--------------------------------------------------------------------------------------------------------------
+
+{% else %}
+There was no ptest data
+{% endif %}
+
+==============================================================================================================
+Failed test cases (sorted by testseries, ID)
+==============================================================================================================
+{% if havefailed %}
+--------------------------------------------------------------------------------------------------------------
+{% for report in reportvalues |sort(attribute='sort') %}
+{% if report.failed_testcases %}
+testseries | result_id : {{ report.testseries }} | {{ report.result_id }}
+{% for testcase in report.failed_testcases %}
+ {{ testcase }}
+{% endfor %}
+{% endif %}
+{% endfor %}
+--------------------------------------------------------------------------------------------------------------
+{% else %}
+There were no test failures
+{% endif %}