Index: third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/update_test_expectations.py |
diff --git a/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/update_test_expectations.py b/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/update_test_expectations.py |
new file mode 100644 |
index 0000000000000000000000000000000000000000..a9f1c08f74f31df7d26719b138ee71d0a59c8a64 |
--- /dev/null |
+++ b/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/update_test_expectations.py |
@@ -0,0 +1,172 @@ |
+# Copyright 2016 The Chromium Authors. All rights reserved. |
+# Use of this source code is governed by a BSD-style license that can be |
+# found in the LICENSE file. |
+ |
+import argparse |
+ |
+from webkitpy.layout_tests.port import builders |
+from webkitpy.layout_tests.models.test_expectations import TestExpectations |
+from webkitpy.layout_tests.layout_package.bot_test_expectations import BotTestExpectationsFactory |
+ |
+ |
+def main(host, argv): |
+ parser = argparse.ArgumentParser(epilog=(""" |
+ Scans the TestExpectations file |
+ and uses results from actual builder bots runs to remove tests that are |
+ marked as flaky but don't fail in the specified way. |
+ |
+ E.g. If a test has this expectation: |
+ bug(test) fast/test.html [ Failure Pass ] |
+ |
+ And all the runs on builders have passed the line will be removed. |
+ Additionally, the runs don't all have to be Passing to remove the line; |
+ as long as the non-Passing results are of a type not specified in the |
+ expectation (e.g. Crash) this line will be remoed. |
qyearsley
2016/03/10 19:13:58
Excellent, this is helpful.
I'm not sure, but may
bokan
2016/03/16 20:24:53
Done.
|
+ """), formatter_class=argparse.RawTextHelpFormatter) |
+ args = parser.parse_args(argv) |
+ |
+ port = host.port_factory.get() |
+ bot_test_expectations_factory = BotTestExpectationsFactory() |
+ |
+ remove_flakes_o_matic = RemoveFlakesOMatic(host, |
+ port, |
+ bot_test_expectations_factory) |
+ |
+ test_expectations = remove_flakes_o_matic.get_updated_test_expectations() |
+ |
+ expectations_file = port.path_to_generic_test_expectations_file() |
+ |
+ remove_flakes_o_matic.write_test_expectations(test_expectations, |
+ expectations_file) |
+ |
+ |
+class RemoveFlakesOMatic: |
+ def __init__(self, host, port, bot_test_expectations_factory): |
+ self._host = host |
+ self._port = port |
qyearsley
2016/03/10 19:13:58
Host is the host on which the expectations file wi
bokan
2016/03/16 20:24:53
I'm not 100% clear on what Host and Port are but I
|
+ self._expectations_factory = bot_test_expectations_factory |
+ |
+ def _can_delete_line(self, test_expectation_line): |
+ """Returns whether a given line in the expectations can be removed based |
+ on its results on the bots (i.e. remove if the bots show its not flaky.) |
+ |
+ Args: |
+ test_expectation_line (TestExpectationLine): A line in the test |
+ expectation file to test for possible removal. |
+ |
qyearsley
2016/03/10 19:13:58
Optional docstring recommendations:
- It's faste
bokan
2016/03/16 20:24:53
Thanks, done.
|
+ """ |
+ expectations = test_expectation_line.expectations |
+ if len(expectations) < 2: |
+ return False |
+ |
+ if self._has_unstrippable_expectations(expectations): |
+ return False |
+ |
+ if not self._has_pass_expectation(expectations): |
+ return False |
+ |
+ # The line can be deleted if the only expectation on the line that appears in the actual |
+ # results is the PASS expectation. |
+ for config in test_expectation_line.matching_configurations: |
+ builder_name = builders.builder_name_for_specifiers(config.version, config.build_type) |
+ |
+ if not builder_name: |
+ # TODO(bokan): We probably want to report a problem if we can't |
+ # turn the specifiers into a builder name. |
+ # TODO(bokan): Matching configurations often give us bots that don't have a |
+ # builder in builders.py's exact_matches. Should we ignore those or be conservative |
+ # and assume we need these expectations to make a decision? Also, testing isn't |
+ # well suited to changing the list of builders/configurations so if we want to skip |
+ # here the tests will have to specify results for all builders. |
+ continue |
+ |
+ if builder_name not in self.builder_results_by_path.keys(): |
+ # TODO(bokan): We probably want to report a problem if we can't get results for |
+ # a bot. |
qyearsley
2016/03/10 19:13:58
Sounds reasonable -- question is, should it be con
bokan
2016/03/16 20:24:53
I think not because, for example, if we have an ex
|
+ continue |
+ |
+ results_by_path = self.builder_results_by_path[builder_name] |
+ |
+ # No results means the test was all skipped or all results are passing. |
+ if test_expectation_line.path not in results_by_path.keys(): |
+ continue |
+ |
+ results_for_single_test = results_by_path[test_expectation_line.path] |
+ |
+ if self._met_expectations(test_expectation_line, results_for_single_test) != set(['PASS']): |
+ return False |
+ |
+ return True |
+ |
+ def _has_pass_expectation(self, expectations): |
+ return 'PASS' in expectations |
+ |
+ def _met_expectations(self, test_expectation_line, results_for_single_test): |
qyearsley
2016/03/10 19:13:58
The name of this function currently suggests it mi
bokan
2016/03/16 20:24:53
I don't really like testing private internals but
|
+ # TODO(bokan): Does this not exist in a more central place? |
+ def replace_failing_with_fail(expectation): |
+ if expectation in ('TEXT', 'IMAGE', 'IMAGE+TEXT', 'AUDIO'): |
+ return 'FAIL' |
+ else: |
+ return expectation |
+ |
+ actual_results = {replace_failing_with_fail(r) for r in results_for_single_test} |
+ |
+ return set(test_expectation_line.expectations) & actual_results |
+ |
+ def _has_unstrippable_expectations(self, expectations): |
qyearsley
2016/03/10 19:13:58
expectations here is a list of strings, right?
bokan
2016/03/16 20:24:53
Right, docstring added.
|
+ unstrippable_expectations = ('REBASELINE', 'NEEDSREBASELINE', |
+ 'NEEDSMANUALREBASELINE', 'SLOW') |
+ return any(s in expectations for s in unstrippable_expectations) |
+ |
+ def get_updated_test_expectations(self): |
+ test_expectations = TestExpectations(self._port, include_overrides=False).expectations() |
+ |
+ self.builder_results_by_path = {} |
+ for builder_name in builders.all_builder_names(): |
+ expectations_for_builder = ( |
+ self._expectations_factory.expectations_for_builder(builder_name) |
+ ) |
+ |
+ # TODO(bokan): What to do if we can't get the results for a builder? |
+ if not expectations_for_builder: |
+ continue |
+ |
+ self.builder_results_by_path[builder_name] = ( |
+ expectations_for_builder.all_results_by_path() |
+ ) |
qyearsley
2016/03/10 19:13:58
Does getting the builder results require making ne
bokan
2016/03/16 20:24:53
Not sure since that all happens elsewhere but my b
|
+ |
+ expectations_to_remove = [] |
+ |
+ for expectation in test_expectations: |
+ if self._can_delete_line(expectation): |
+ expectations_to_remove.append(expectation) |
+ |
+ for expectation in expectations_to_remove: |
+ index = test_expectations.index(expectation) |
+ test_expectations.remove(expectation) |
+ |
+ # Remove associated comments and whitespace if we've removed the last expectation under |
+ # a comment block. Only remove a comment block if it's not separated from the tests by |
+ # whitespace. |
+ if index == len(test_expectations) or test_expectations[index].is_whitespace_or_comment(): |
+ removed_whitespace = False |
+ while index and test_expectations[index - 1].is_whitespace(): |
+ index = index - 1 |
+ test_expectations.pop(index) |
+ removed_whitespace = True |
+ |
+ if not removed_whitespace: |
+ while index and test_expectations[index - 1].is_comment(): |
+ index = index - 1 |
+ test_expectations.pop(index) |
+ |
+ while index and test_expectations[index - 1].is_whitespace(): |
+ index = index - 1 |
+ test_expectations.pop(index) |
+ |
+ return test_expectations |
+ |
+ def write_test_expectations(self, test_expectations, test_expectations_file): |
+ self._host.filesystem.write_text_file( |
+ test_expectations_file, |
+ TestExpectations.list_to_string(test_expectations, reconstitute_only_these=[])) |