Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(2628)

Unified Diff: build/android/pylib/base/test_result.py

Issue 11901003: [Android] Reland 'Keep track of unknown test results at the TestRunner layer.' (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 7 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « build/android/pylib/base/base_test_sharder.py ('k') | build/android/pylib/gtest/single_test_runner.py » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: build/android/pylib/base/test_result.py
diff --git a/build/android/pylib/base/test_result.py b/build/android/pylib/base/test_result.py
index ee2421e4672fa19ae55a2abaad95ae77fb77009c..dd8bfac5585767142a89bf97d691c39cc68fb296 100644
--- a/build/android/pylib/base/test_result.py
+++ b/build/android/pylib/base/test_result.py
@@ -55,22 +55,15 @@ class TestResults(object):
self.crashed = []
self.unknown = []
self.timed_out = []
- self.overall_timed_out = False
- self.overall_fail = False
self.device_exception = None
@staticmethod
- def FromRun(ok=None, failed=None, crashed=None, timed_out=None,
- overall_timed_out=False, overall_fail=False,
- device_exception=None):
+ def FromRun(ok=None, failed=None, crashed=None, timed_out=None):
ret = TestResults()
ret.ok = ok or []
ret.failed = failed or []
ret.crashed = crashed or []
ret.timed_out = timed_out or []
- ret.overall_timed_out = overall_timed_out
- ret.overall_fail = overall_fail
- ret.device_exception = device_exception
return ret
@staticmethod
@@ -83,10 +76,6 @@ class TestResults(object):
ret.crashed += t.crashed
ret.unknown += t.unknown
ret.timed_out += t.timed_out
- if t.overall_timed_out:
- ret.overall_timed_out = True
- if t.overall_fail:
- ret.overall_fail = True
return ret
@staticmethod
@@ -131,6 +120,10 @@ class TestResults(object):
"""Returns the all broken tests."""
return self.failed + self.crashed + self.unknown + self.timed_out
+ def GetAll(self):
+ """Returns the all tests."""
+ return self.ok + self.GetAllBroken()
+
def _LogToFile(self, test_type, test_suite, build_type):
"""Log results to local files which can be used for aggregation later."""
# TODO(frankf): Report tests that failed to run here too.
@@ -201,7 +194,7 @@ class TestResults(object):
logging.error(e)
def LogFull(self, test_type, test_package, annotation=None,
- build_type='Debug', all_tests=None, flakiness_server=None):
+ build_type='Debug', flakiness_server=None):
"""Log the tests results for the test suite.
The results will be logged three different ways:
@@ -217,9 +210,6 @@ class TestResults(object):
annotation: If instrumenation test type, this is a list of annotations
(e.g. ['Smoke', 'SmallTest']).
build_type: Release/Debug
- all_tests: A list of all tests that were supposed to run.
- This is used to determine which tests have failed to run.
- If None, we assume all tests ran.
flakiness_server: If provider, upload the results to flakiness dashboard
with this URL.
"""
@@ -242,31 +232,20 @@ class TestResults(object):
logging.critical('Passed')
# Summarize in the test output.
- logging.critical('*' * 80)
- summary = ['Summary:\n']
- if all_tests:
- summary += ['TESTS_TO_RUN=%d\n' % len(all_tests)]
- num_tests_ran = (len(self.ok) + len(self.failed) +
- len(self.crashed) + len(self.unknown) +
- len(self.timed_out))
+ num_tests_ran = len(self.GetAll())
tests_passed = [t.name for t in self.ok]
tests_failed = [t.name for t in self.failed]
tests_crashed = [t.name for t in self.crashed]
- tests_unknown = [t.name for t in self.unknown]
tests_timed_out = [t.name for t in self.timed_out]
+ tests_unknown = [t.name for t in self.unknown]
+ logging.critical('*' * 80)
+ summary = ['Summary:\n']
summary += ['RAN=%d\n' % (num_tests_ran),
'PASSED=%d\n' % len(tests_passed),
'FAILED=%d %s\n' % (len(tests_failed), tests_failed),
'CRASHED=%d %s\n' % (len(tests_crashed), tests_crashed),
'TIMEDOUT=%d %s\n' % (len(tests_timed_out), tests_timed_out),
'UNKNOWN=%d %s\n' % (len(tests_unknown), tests_unknown)]
- if all_tests and num_tests_ran != len(all_tests):
- # Add the list of tests we failed to run.
- tests_failed_to_run = list(set(all_tests) - set(tests_passed) -
- set(tests_failed) - set(tests_crashed) -
- set(tests_unknown) - set(tests_timed_out))
- summary += ['FAILED_TO_RUN=%d %s\n' % (len(tests_failed_to_run),
- tests_failed_to_run)]
summary_string = ''.join(summary)
logging.critical(summary_string)
logging.critical('*' * 80)
@@ -285,8 +264,7 @@ class TestResults(object):
def PrintAnnotation(self):
"""Print buildbot annotations for test results."""
- if (self.failed or self.crashed or self.overall_fail or
- self.overall_timed_out):
+ if self.GetAllBroken():
buildbot_report.PrintError()
else:
print 'Step success!' # No annotation needed
« no previous file with comments | « build/android/pylib/base/base_test_sharder.py ('k') | build/android/pylib/gtest/single_test_runner.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698