Index: third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py |
diff --git a/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py b/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py |
index fcf9be2ce07aec0f69ca45271ff92c2e6a051c86..03ba145b36fd1da5a530f54fedc411a24c5febab 100644 |
--- a/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py |
+++ b/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py |
@@ -35,7 +35,6 @@ import time |
from webkitpy.layout_tests.models import test_expectations |
from webkitpy.layout_tests.models import test_failures |
- |
_log = logging.getLogger(__name__) |
OK_EXIT_STATUS = 0 |
@@ -55,19 +54,18 @@ NO_TESTS_EXIT_STATUS = 253 |
NO_DEVICES_EXIT_STATUS = 254 |
UNEXPECTED_ERROR_EXIT_STATUS = 255 |
-ERROR_CODES = ( |
- INTERRUPTED_EXIT_STATUS, |
- EARLY_EXIT_STATUS, |
- SYS_DEPS_EXIT_STATUS, |
- NO_TESTS_EXIT_STATUS, |
- NO_DEVICES_EXIT_STATUS, |
- UNEXPECTED_ERROR_EXIT_STATUS, |
-) |
+ERROR_CODES = (INTERRUPTED_EXIT_STATUS, |
+ EARLY_EXIT_STATUS, |
+ SYS_DEPS_EXIT_STATUS, |
+ NO_TESTS_EXIT_STATUS, |
+ NO_DEVICES_EXIT_STATUS, |
+ UNEXPECTED_ERROR_EXIT_STATUS, ) |
# In order to avoid colliding with the above codes, we put a ceiling on |
# the value returned by num_regressions |
MAX_FAILURES_EXIT_STATUS = 101 |
+ |
class TestRunException(Exception): |
def __init__(self, code, msg): |
self.code = code |
@@ -135,9 +133,13 @@ class TestRunResults(object): |
class RunDetails(object): |
- def __init__(self, exit_code, summarized_full_results=None, |
- summarized_failing_results=None, initial_results=None, |
- all_retry_results=None, enabled_pixel_tests_in_retry=False): |
+ def __init__(self, |
+ exit_code, |
+ summarized_full_results=None, |
+ summarized_failing_results=None, |
+ initial_results=None, |
+ all_retry_results=None, |
+ enabled_pixel_tests_in_retry=False): |
self.exit_code = exit_code |
self.summarized_full_results = summarized_full_results |
self.summarized_failing_results = summarized_failing_results |
@@ -166,8 +168,11 @@ def _interpret_test_failures(failures): |
return test_dict |
-def summarize_results(port_obj, expectations, initial_results, |
- all_retry_results, enabled_pixel_tests_in_retry, |
+def summarize_results(port_obj, |
+ expectations, |
+ initial_results, |
+ all_retry_results, |
+ enabled_pixel_tests_in_retry, |
only_include_failing=False): |
"""Returns a dictionary containing a summary of the test runs, with the following fields: |
'version': a version indicator |
@@ -218,8 +223,7 @@ def summarize_results(port_obj, expectations, initial_results, |
num_passes += 1 |
if not result.has_stderr and only_include_failing: |
continue |
- elif (result.type != test_expectations.SKIP and |
- test_name in initial_results.unexpected_results_by_name): |
+ elif (result.type != test_expectations.SKIP and test_name in initial_results.unexpected_results_by_name): |
# Loop through retry results to collate results and determine |
# whether this is a regression, unexpected pass, or flaky test. |
is_flaky = False |
@@ -276,9 +280,8 @@ def summarize_results(port_obj, expectations, initial_results, |
test_dict['actual'] = " ".join(actual) |
def is_expected(actual_result): |
- return expectations.matches_an_expected_result(test_name, actual_result, |
- port_obj.get_option('pixel_tests') or result.reftest_type, |
- port_obj.get_option('enable_sanitizer')) |
+ return expectations.matches_an_expected_result(test_name, actual_result, port_obj.get_option('pixel_tests') or |
+ result.reftest_type, port_obj.get_option('enable_sanitizer')) |
# To avoid bloating the output results json too much, only add an entry for whether the failure is unexpected. |
if not any(is_expected(actual_result) for actual_result in actual_types): |
@@ -321,7 +324,8 @@ def summarize_results(port_obj, expectations, initial_results, |
results['num_flaky'] = num_flaky |
# FIXME: Remove this. It is redundant with results['num_failures_by_type']. |
results['num_regressions'] = num_regressions |
- results['interrupted'] = initial_results.interrupted # Does results.html have enough information to compute this itself? (by checking total number of results vs. total number of tests?) |
+ results[ |
+ 'interrupted'] = initial_results.interrupted # Does results.html have enough information to compute this itself? (by checking total number of results vs. total number of tests?) |
results['layout_tests_dir'] = port_obj.layout_tests_dir() |
results['has_wdiff'] = port_obj.wdiff_available() |
results['has_pretty_patch'] = port_obj.pretty_patch_available() |
@@ -340,7 +344,6 @@ def summarize_results(port_obj, expectations, initial_results, |
results['chromium_revision'] = str(scm.commit_position(path)) |
else: |
_log.warn('Failed to determine chromium commit position for %s, ' |
- 'leaving "chromium_revision" key blank in full_results.json.' |
- % path) |
+ 'leaving "chromium_revision" key blank in full_results.json.' % path) |
return results |