Chromium Code Reviews| Index: third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/update_test_expectations.py |
| diff --git a/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/update_test_expectations.py b/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/update_test_expectations.py |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..775ea2f7dcdb5f000aeab790236c35731252219e |
| --- /dev/null |
| +++ b/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/update_test_expectations.py |
| @@ -0,0 +1,217 @@ |
| +# Copyright 2016 The Chromium Authors. All rights reserved. |
| +# Use of this source code is governed by a BSD-style license that can be |
| +# found in the LICENSE file. |
| + |
| +import argparse |
| + |
| +from webkitpy.layout_tests.port import builders |
| +from webkitpy.layout_tests.models.test_expectations import TestExpectations |
| +from webkitpy.layout_tests.layout_package.bot_test_expectations import BotTestExpectationsFactory |
| + |
| + |
| +def main(host, argv): |
| + parser = argparse.ArgumentParser(epilog=(""" |
| + Scans the TestExpectations file |
| + and uses results from actual builder bots runs to remove tests that are |
| + marked as flaky but don't fail in the specified way. |
| + |
| + E.g. If a test has this expectation: |
| + bug(test) fast/test.html [ Failure Pass ] |
| + |
| + And all the runs on builders have passed the line will be removed. |
| + |
| + Additionally, the runs don't all have to be Passing to remove the line; |
| + as long as the non-Passing results are of a type not specified in the |
| + expectation this line will be removed. For example, if this is the |
| + expectation: |
| + |
| + bug(test) fast/test.html [ Crash Pass ] |
| + |
| + But the results on the builders show only Passes and Timeouts, the line |
| + will be removed since there's no Crash results. |
| + """), formatter_class=argparse.RawTextHelpFormatter) |
|
Dirk Pranke
2016/03/29 22:39:01
I would actually move this string up to the top-le
bokan
2016/04/05 12:28:57
Done.
|
| + args = parser.parse_args(argv) |
| + |
| + port = host.port_factory.get() |
| + bot_test_expectations_factory = BotTestExpectationsFactory() |
| + |
| + remove_flakes_o_matic = RemoveFlakesOMatic(host, |
| + port, |
| + bot_test_expectations_factory) |
| + |
| + test_expectations = remove_flakes_o_matic.get_updated_test_expectations() |
| + |
| + if not test_expectations: |
| + # TODO(bokan): How should we report errors? |
| + return None |
| + |
| + expectations_file = port.path_to_generic_test_expectations_file() |
| + |
| + remove_flakes_o_matic.write_test_expectations(test_expectations, |
| + expectations_file) |
|
qyearsley
2016/03/21 21:45:25
Note, the unit test doesn't cover main(), or write
Dirk Pranke
2016/03/29 22:39:01
By passing a MockHost() to main, you can (and shou
bokan
2016/04/05 12:28:56
Added a TODO, will add shortly.
|
| + |
| + |
| +class RemoveFlakesOMatic: |
|
Dirk Pranke
2016/03/29 22:39:01
derive this class from object, i.e. RemoveFlakesOM
bokan
2016/04/05 12:28:56
Done.
|
| + def __init__(self, host, port, bot_test_expectations_factory): |
| + self._host = host |
| + self._port = port |
| + self._expectations_factory = bot_test_expectations_factory |
| + |
| + def _can_delete_line(self, test_expectation_line): |
| + """Returns whether a given line in the expectations can be removed. |
| + |
| + Uses results from builder bots to determine if a given line is stale and |
| + can safely be removed from the TestExpectations file. (i.e. remove if |
| + the bots show that it's not flaky.) There's also some rules about when |
| + not to remove lines (e.g. never remove lines with Rebaseline |
| + expectations, don't remove non-flaky expectations, etc.) |
| + |
| + Args: |
| + test_expectation_line (TestExpectationLine): A line in the test |
| + expectation file to test for possible removal. |
| + |
| + Returns: True if the line can be removed, False otherwise. |
| + """ |
| + expectations = test_expectation_line.expectations |
| + if len(expectations) < 2: |
| + return False |
| + |
| + if self._has_unstrippable_expectations(expectations): |
| + return False |
| + |
| + if not self._has_pass_expectation(expectations): |
| + return False |
| + |
| + # The line can be deleted if the only expectation on the line that appears in the actual |
| + # results is the PASS expectation. |
| + for config in test_expectation_line.matching_configurations: |
| + builder_name = builders.builder_name_for_specifiers(config.version, config.build_type) |
| + |
| + if not builder_name: |
| + # TODO(bokan): We probably want to report a problem if we can't |
| + # turn the specifiers into a builder name. |
| + # TODO(bokan): Matching configurations often give us bots that don't have a |
| + # builder in builders.py's exact_matches. Should we ignore those or be conservative |
| + # and assume we need these expectations to make a decision? |
|
Dirk Pranke
2016/03/29 22:39:01
Are you wondering about cases like Debug bots wher
bokan
2016/04/05 12:28:56
Yah, I think it's mostly debug, but not all. The m
|
| + return False |
| + |
| + if builder_name not in self.builder_results_by_path.keys(): |
| + # TODO(bokan): We probably want to report a problem if we can't get results for |
| + # a bot specified in expectatoins. |
| + return False |
|
qyearsley
2016/03/21 21:45:25
For here and above, I agree that logging an error
bokan
2016/04/05 12:28:56
Added logging messages. Will add tests shortly.
|
| + |
| + results_by_path = self.builder_results_by_path[builder_name] |
| + |
| + # No results means the test was all skipped or all results are passing. |
|
qyearsley
2016/03/21 21:45:25
test was all skipped -> tests were all skipped
bokan
2016/04/05 12:28:56
Done.
|
| + if test_expectation_line.path not in results_by_path.keys(): |
| + continue |
|
qyearsley
2016/03/21 21:45:25
This line is not covered by the unit test right no
bokan
2016/04/05 12:28:56
Ack. Added a TODO, will do shortly.
|
| + |
| + results_for_single_test = results_by_path[test_expectation_line.path] |
| + |
| + if self._expectations_that_were_met(test_expectation_line, results_for_single_test) != set(['PASS']): |
| + return False |
| + |
| + return True |
| + |
| + def _has_pass_expectation(self, expectations): |
| + return 'PASS' in expectations |
| + |
| + def _expectations_that_were_met(self, test_expectation_line, results_for_single_test): |
| + """Returns the set of expectations that appear in the given results. |
| + |
| + e.g. If the test expectations is: |
| + bug(test) fast/test.html [Crash Failure Pass] |
| + |
| + And the results are ['TEXT', 'PASS', 'PASS', 'TIMEOUT'] |
| + |
| + This method would return [Pass Failure] |
| + |
| + Args: |
| + test_expectation_line: A TestExpectationLine object |
| + results_for_single_test: A list of result strings. |
| + e.g. ['IMAGE', 'IMAGE', 'PASS'] |
| + |
| + Returns: |
| + A set containing expectations that occured in the results. |
| + """ |
| + # TODO(bokan): Does this not exist in a more central place? |
| + def replace_failing_with_fail(expectation): |
| + if expectation in ('TEXT', 'IMAGE', 'IMAGE+TEXT', 'AUDIO'): |
| + return 'FAIL' |
| + else: |
| + return expectation |
| + |
| + actual_results = {replace_failing_with_fail(r) for r in results_for_single_test} |
| + |
| + return set(test_expectation_line.expectations) & actual_results |
| + |
| + def _has_unstrippable_expectations(self, expectations): |
| + """ Returns whether any of the given expectations are considered unstrippable. |
| + |
| + Unstrippable expectations are those which should stop a line from being |
| + removed regardless of builder bot results. |
| + |
| + Args: |
| + expectations: A list of string expectations. |
| + E.g. ['PASS', 'FAIL' 'CRASH'] |
| + |
| + Returns: |
| + True if at least one of the expectations is unstrippable. False |
| + otherwise. |
| + """ |
| + unstrippable_expectations = ('REBASELINE', 'NEEDSREBASELINE', |
| + 'NEEDSMANUALREBASELINE', 'SLOW') |
| + return any(s in expectations for s in unstrippable_expectations) |
| + |
| + def get_updated_test_expectations(self): |
| + test_expectations = TestExpectations(self._port, include_overrides=False).expectations() |
| + |
| + self.builder_results_by_path = {} |
| + for builder_name in builders.all_builder_names(): |
| + expectations_for_builder = ( |
| + self._expectations_factory.expectations_for_builder(builder_name) |
| + ) |
| + |
| + # TODO(bokan): Error message? |
| + if not expectations_for_builder: |
| + return None |
|
qyearsley
2016/03/21 21:45:25
Logging an error here sounds good to me. Also, thi
bokan
2016/04/05 12:28:56
Added log message. TODO and working on test.
|
| + |
| + self.builder_results_by_path[builder_name] = ( |
| + expectations_for_builder.all_results_by_path() |
| + ) |
| + |
| + expectations_to_remove = [] |
| + |
| + for expectation in test_expectations: |
| + if self._can_delete_line(expectation): |
| + expectations_to_remove.append(expectation) |
| + |
| + for expectation in expectations_to_remove: |
| + index = test_expectations.index(expectation) |
| + test_expectations.remove(expectation) |
| + |
| + # Remove associated comments and whitespace if we've removed the last expectation under |
| + # a comment block. Only remove a comment block if it's not separated from the tests by |
| + # whitespace. |
|
qyearsley
2016/03/21 21:45:25
Would it be correct to rephrase "if it's not separ
bokan
2016/04/05 12:28:56
Done. (replaced with "if it's not separated _from_
|
| + if index == len(test_expectations) or test_expectations[index].is_whitespace_or_comment(): |
| + removed_whitespace = False |
| + while index and test_expectations[index - 1].is_whitespace(): |
| + index = index - 1 |
| + test_expectations.pop(index) |
| + removed_whitespace = True |
|
qyearsley
2016/03/21 21:45:25
I think there may be no test case that covers this
bokan
2016/04/05 12:28:56
It should be covered by test_preserve_comments_and
|
| + |
| + if not removed_whitespace: |
| + while index and test_expectations[index - 1].is_comment(): |
| + index = index - 1 |
| + test_expectations.pop(index) |
| + |
| + while index and test_expectations[index - 1].is_whitespace(): |
| + index = index - 1 |
| + test_expectations.pop(index) |
| + |
| + return test_expectations |
| + |
| + def write_test_expectations(self, test_expectations, test_expectations_file): |
| + self._host.filesystem.write_text_file( |
| + test_expectations_file, |
| + TestExpectations.list_to_string(test_expectations, reconstitute_only_these=[])) |