summaryrefslogtreecommitdiffstats
path: root/meta
diff options
context:
space:
mode:
authorRichard Purdie <richard.purdie@linuxfoundation.org>2019-02-16 18:13:00 +0000
committerRichard Purdie <richard.purdie@linuxfoundation.org>2019-03-26 15:38:22 +0000
commit10b6fbfb8a3a9c0ba2f323658bc3387051a77ca9 (patch)
tree7cebb6b4664189ad76d067ac12c3024eddfaa00c /meta
parent12385bc69b1e4f7e61638e201783f6a56136ba29 (diff)
downloadpoky-10b6fbfb8a3a9c0ba2f323658bc3387051a77ca9.tar.gz
poky-10b6fbfb8a3a9c0ba2f323658bc3387051a77ca9.tar.bz2
poky-10b6fbfb8a3a9c0ba2f323658bc3387051a77ca9.zip
resulttool: Improvements to allow integration to the autobuilder
This is a combined patch of the various tweaks and improvements I made to resulttool: * Avoid subprocess.run() as its a python 3.6 feature and we have autobuilder workers with 3.5. * Avoid python keywords as variable names * Simplify dict accesses using .get() * Rename resultsutils -> resultutils to match the resultstool -> resulttool rename * Formalised the handling of "file_name" to "TESTSERIES" which the code will now add into the json configuration data if its not present, based on the directory name. * When we don't have failed test cases, print something saying so instead of an empty table * Tweak the table headers in the report to be more readable (reference "Test Series" instead if file_id and ID instead of results_id) * Improve/simplify the max string length handling * Merge the counts and percentage data into one table in the report since printing two reports of the same data confuses the user * Removed the confusing header in the regression report * Show matches, then regressions, then unmatched runs in the regression report, also remove chatting unneeded output * Try harder to "pair" up matching configurations to reduce noise in the regressions report * Abstracted the "mapping" table concept used to pairing in the regression code to general code in resultutils * Created multiple mappings for results analysis, results storage and 'flattening' results data in a merge * Simplify the merge command to take a source and a destination, letting the destination be a directory or a file, removing the need for an output directory parameter * Add the 'IMAGE_PKGTYPE' and 'DISTRO' config options to the regression mappings * Have the store command place the testresults files in a layout from the mapping, making commits into the git repo for results storage more useful for simple comparison purposes * Set the oe-git-archive tag format appropriately for oeqa results storage (and simplify the commit messages closer to their defaults) * Fix oe-git-archive to use the commit/branch data from the results file * Cleaned up the command option help to match other changes * Follow the model of git branch/tag processing used by oe-build-perf-report and use that to read the data using git show to avoid branch change * Add ptest summary to the report command * Update the tests to match the above changes (From OE-Core rev: b4513e75f746a0989b09ee53cb85e489d41e5783) Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'meta')
-rw-r--r--meta/lib/oeqa/selftest/cases/resulttooltests.py106
1 files changed, 48 insertions, 58 deletions
diff --git a/meta/lib/oeqa/selftest/cases/resulttooltests.py b/meta/lib/oeqa/selftest/cases/resulttooltests.py
index 7bf1ec60c1..0a089c0b7f 100644
--- a/meta/lib/oeqa/selftest/cases/resulttooltests.py
+++ b/meta/lib/oeqa/selftest/cases/resulttooltests.py
@@ -4,13 +4,46 @@ basepath = os.path.abspath(os.path.dirname(__file__) + '/../../../../../')
lib_path = basepath + '/scripts/lib'
sys.path = sys.path + [lib_path]
from resulttool.report import ResultsTextReport
-from resulttool.regression import ResultsRegressionSelector, ResultsRegression
-from resulttool.merge import ResultsMerge
-from resulttool.store import ResultsGitStore
-from resulttool.resultsutils import checkout_git_dir
+from resulttool import regression as regression
+from resulttool import resultutils as resultutils
from oeqa.selftest.case import OESelftestTestCase
class ResultToolTests(OESelftestTestCase):
+ base_results_data = {'base_result1': {'configuration': {"TEST_TYPE": "runtime",
+ "TESTSERIES": "series1",
+ "IMAGE_BASENAME": "image",
+ "IMAGE_PKGTYPE": "ipk",
+ "DISTRO": "mydistro",
+ "MACHINE": "qemux86"},
+ 'result': {}},
+ 'base_result2': {'configuration': {"TEST_TYPE": "runtime",
+ "TESTSERIES": "series1",
+ "IMAGE_BASENAME": "image",
+ "IMAGE_PKGTYPE": "ipk",
+ "DISTRO": "mydistro",
+ "MACHINE": "qemux86-64"},
+ 'result': {}}}
+ target_results_data = {'target_result1': {'configuration': {"TEST_TYPE": "runtime",
+ "TESTSERIES": "series1",
+ "IMAGE_BASENAME": "image",
+ "IMAGE_PKGTYPE": "ipk",
+ "DISTRO": "mydistro",
+ "MACHINE": "qemux86"},
+ 'result': {}},
+ 'target_result2': {'configuration': {"TEST_TYPE": "runtime",
+ "TESTSERIES": "series1",
+ "IMAGE_BASENAME": "image",
+ "IMAGE_PKGTYPE": "ipk",
+ "DISTRO": "mydistro",
+ "MACHINE": "qemux86"},
+ 'result': {}},
+ 'target_result3': {'configuration': {"TEST_TYPE": "runtime",
+ "TESTSERIES": "series1",
+ "IMAGE_BASENAME": "image",
+ "IMAGE_PKGTYPE": "ipk",
+ "DISTRO": "mydistro",
+ "MACHINE": "qemux86-64"},
+ 'result': {}}}
def test_report_can_aggregate_test_result(self):
result_data = {'result': {'test1': {'status': 'PASSED'},
@@ -25,23 +58,12 @@ class ResultToolTests(OESelftestTestCase):
self.assertTrue(result_report['skipped'] == 1, msg="Skipped count not correct:%s" % result_report['skipped'])
def test_regression_can_get_regression_base_target_pair(self):
- base_results_data = {'base_result1': {'configuration': {"TEST_TYPE": "oeselftest",
- "HOST": "centos-7"}},
- 'base_result2': {'configuration': {"TEST_TYPE": "oeselftest",
- "HOST": "centos-7",
- "MACHINE": "qemux86-64"}}}
- target_results_data = {'target_result1': {'configuration': {"TEST_TYPE": "oeselftest",
- "HOST": "centos-7"}},
- 'target_result2': {'configuration': {"TEST_TYPE": "oeselftest",
- "HOST": "centos-7",
- "MACHINE": "qemux86"}},
- 'target_result3': {'configuration': {"TEST_TYPE": "oeselftest",
- "HOST": "centos-7",
- "MACHINE": "qemux86-64"}}}
- regression = ResultsRegressionSelector()
- pair = regression.get_regression_base_target_pair(self.logger, base_results_data, target_results_data)
- self.assertTrue('target_result1' in pair['base_result1'], msg="Pair not correct:%s" % pair['base_result1'])
- self.assertTrue('target_result3' in pair['base_result2'], msg="Pair not correct:%s" % pair['base_result2'])
+
+ results = {}
+ resultutils.append_resultsdata(results, ResultToolTests.base_results_data)
+ resultutils.append_resultsdata(results, ResultToolTests.target_results_data)
+ self.assertTrue('target_result1' in results['runtime/mydistro/qemux86/image'], msg="Pair not correct:%s" % results)
+ self.assertTrue('target_result3' in results['runtime/mydistro/qemux86-64/image'], msg="Pair not correct:%s" % results)
def test_regrresion_can_get_regression_result(self):
base_result_data = {'result': {'test1': {'status': 'PASSED'},
@@ -54,8 +76,7 @@ class ResultToolTests(OESelftestTestCase):
'test3': {'status': 'PASSED'},
'test4': {'status': 'ERROR'},
'test5': {'status': 'SKIPPED'}}}
- regression = ResultsRegression()
- result = regression.get_regression_result(self.logger, base_result_data, target_result_data)
+ result, text = regression.compare_result(self.logger, "BaseTestRunName", "TargetTestRunName", base_result_data, target_result_data)
self.assertTrue(result['test2']['base'] == 'PASSED',
msg="regression not correct:%s" % result['test2']['base'])
self.assertTrue(result['test2']['target'] == 'FAILED',
@@ -66,39 +87,8 @@ class ResultToolTests(OESelftestTestCase):
msg="regression not correct:%s" % result['test3']['target'])
def test_merge_can_merged_results(self):
- base_results_data = {'base_result1': {},
- 'base_result2': {}}
- target_results_data = {'target_result1': {},
- 'target_result2': {},
- 'target_result3': {}}
-
- merge = ResultsMerge()
- results = merge.merge_results(base_results_data, target_results_data)
- self.assertTrue(len(results.keys()) == 5, msg="merge not correct:%s" % len(results.keys()))
-
- def test_store_can_store_to_new_git_repository(self):
- basepath = os.path.abspath(os.path.dirname(__file__) + '/../../')
- source_dir = basepath + '/files/testresults'
- git_branch = 'qa-cycle-2.7'
- store = ResultsGitStore()
- output_dir = store.store_to_new(self.logger, source_dir, git_branch)
- self.assertTrue(checkout_git_dir(output_dir, git_branch), msg="store to new git repository failed:%s" %
- output_dir)
- store._remove_temporary_workspace_dir(output_dir)
+ results = {}
+ resultutils.append_resultsdata(results, ResultToolTests.base_results_data, configmap=resultutils.flatten_map)
+ resultutils.append_resultsdata(results, ResultToolTests.target_results_data, configmap=resultutils.flatten_map)
+ self.assertEqual(len(results[''].keys()), 5, msg="Flattened results not correct %s" % str(results))
- def test_store_can_store_to_existing(self):
- basepath = os.path.abspath(os.path.dirname(__file__) + '/../../')
- source_dir = basepath + '/files/testresults'
- git_branch = 'qa-cycle-2.6'
- store = ResultsGitStore()
- output_dir = store.store_to_new(self.logger, source_dir, git_branch)
- self.assertTrue(checkout_git_dir(output_dir, git_branch), msg="store to new git repository failed:%s" %
- output_dir)
- git_branch = 'qa-cycle-2.7'
- output_dir = store.store_to_existing_with_new_branch(self.logger, source_dir, output_dir, git_branch)
- self.assertTrue(checkout_git_dir(output_dir, git_branch), msg="store to existing git repository failed:%s" %
- output_dir)
- output_dir = store.store_to_existing(self.logger, source_dir, output_dir, git_branch)
- self.assertTrue(checkout_git_dir(output_dir, git_branch), msg="store to existing git repository failed:%s" %
- output_dir)
- store._remove_temporary_workspace_dir(output_dir)