summaryrefslogtreecommitdiffstats
path: root/meta/lib/oeqa/core/runner.py
diff options
context:
space:
mode:
Diffstat (limited to 'meta/lib/oeqa/core/runner.py')
-rw-r--r--meta/lib/oeqa/core/runner.py45
1 files changed, 40 insertions, 5 deletions
diff --git a/meta/lib/oeqa/core/runner.py b/meta/lib/oeqa/core/runner.py
index f656e1a9c5..a86a706bd9 100644
--- a/meta/lib/oeqa/core/runner.py
+++ b/meta/lib/oeqa/core/runner.py
@@ -44,6 +44,7 @@ class OETestResult(_TestResult):
self.endtime = {}
self.progressinfo = {}
self.extraresults = {}
+ self.shownmsg = []
# Inject into tc so that TestDepends decorator can see results
tc.results = self
@@ -74,6 +75,7 @@ class OETestResult(_TestResult):
for (scase, msg) in getattr(self, t):
if test.id() == scase.id():
self.tc.logger.info(str(msg))
+ self.shownmsg.append(test.id())
break
def logSummary(self, component, context_msg=''):
@@ -169,7 +171,6 @@ class OETestResult(_TestResult):
def logDetails(self, json_file_dir=None, configuration=None, result_id=None,
dump_streams=False):
- self.tc.logger.info("RESULTS:")
result = self.extraresults
logs = {}
@@ -182,8 +183,10 @@ class OETestResult(_TestResult):
(status, log) = self._getTestResultDetails(case)
t = ""
+ duration = 0
if case.id() in self.starttime and case.id() in self.endtime:
- t = " (" + "{0:.2f}".format(self.endtime[case.id()] - self.starttime[case.id()]) + "s)"
+ duration = self.endtime[case.id()] - self.starttime[case.id()]
+ t = " (" + "{0:.2f}".format(duration) + "s)"
if status not in logs:
logs[status] = []
@@ -191,12 +194,33 @@ class OETestResult(_TestResult):
report = {'status': status}
if log:
report['log'] = log
+ # Class setup failures wouldn't enter stopTest so would never display
+ if case.id() not in self.shownmsg:
+ self.tc.logger.info("Failure (%s) for %s:\n" % (status, case.id()) + log)
+
+ if duration:
+ report['duration'] = duration
+
+ alltags = []
+ # pull tags from the case class
+ if hasattr(case, "__oeqa_testtags"):
+ alltags.extend(getattr(case, "__oeqa_testtags"))
+ # pull tags from the method itself
+ test_name = case._testMethodName
+ if hasattr(case, test_name):
+ method = getattr(case, test_name)
+ if hasattr(method, "__oeqa_testtags"):
+ alltags.extend(getattr(method, "__oeqa_testtags"))
+ if alltags:
+ report['oetags'] = alltags
+
if dump_streams and case.id() in self.logged_output:
(stdout, stderr) = self.logged_output[case.id()]
report['stdout'] = stdout
report['stderr'] = stderr
result[case.id()] = report
+ self.tc.logger.info("RESULTS:")
for i in ['PASSED', 'SKIPPED', 'EXPECTEDFAIL', 'ERROR', 'FAILED', 'UNKNOWN']:
if i not in logs:
continue
@@ -211,6 +235,10 @@ class OETestResult(_TestResult):
# Override as we unexpected successes aren't failures for us
return (len(self.failures) == len(self.errors) == 0)
+ def hasAnyFailingTest(self):
+ # Account for expected failures
+ return not self.wasSuccessful() or len(self.expectedFailures)
+
class OEListTestsResult(object):
def wasSuccessful(self):
return True
@@ -319,10 +347,17 @@ class OETestResultJSONHelper(object):
the_file.write(file_content)
def dump_testresult_file(self, write_dir, configuration, result_id, test_result):
- bb.utils.mkdirhier(write_dir)
- lf = bb.utils.lockfile(os.path.join(write_dir, 'jsontestresult.lock'))
+ try:
+ import bb
+ has_bb = True
+ bb.utils.mkdirhier(write_dir)
+ lf = bb.utils.lockfile(os.path.join(write_dir, 'jsontestresult.lock'))
+ except ImportError:
+ has_bb = False
+ os.makedirs(write_dir, exist_ok=True)
test_results = self._get_existing_testresults_if_available(write_dir)
test_results[result_id] = {'configuration': configuration, 'result': test_result}
json_testresults = json.dumps(test_results, sort_keys=True, indent=4)
self._write_file(write_dir, self.testresult_filename, json_testresults)
- bb.utils.unlockfile(lf)
+ if has_bb:
+ bb.utils.unlockfile(lf)