| Index: third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/update_test_expectations.py
|
| diff --git a/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/update_test_expectations.py b/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/update_test_expectations.py
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..0fb450abee8a9f72fb010cd7a09bd1880e86e76c
|
| --- /dev/null
|
| +++ b/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/update_test_expectations.py
|
| @@ -0,0 +1,239 @@
|
| +# Copyright 2016 The Chromium Authors. All rights reserved.
|
| +# Use of this source code is governed by a BSD-style license that can be
|
| +# found in the LICENSE file.
|
| +
|
| +"""Updates TestExpectations based on results in builder bots.
|
| +
|
| +Scans the TestExpectations file and uses results from actual builder bots runs
|
| +to remove tests that are marked as flaky but don't fail in the specified way.
|
| +
|
| +E.g. If a test has this expectation:
|
| + bug(test) fast/test.html [ Failure Pass ]
|
| +
|
| +And all the runs on builders have passed the line will be removed.
|
| +
|
| +Additionally, the runs don't all have to be Passing to remove the line;
|
| +as long as the non-Passing results are of a type not specified in the
|
| +expectation this line will be removed. For example, if this is the
|
| +expectation:
|
| +
|
| + bug(test) fast/test.html [ Crash Pass ]
|
| +
|
| +But the results on the builders show only Passes and Timeouts, the line
|
| +will be removed since there's no Crash results.
|
| +
|
| +"""
|
| +
|
| +import argparse
|
| +import logging
|
| +
|
| +from webkitpy.layout_tests.models.test_expectations import TestExpectations
|
| +
|
| +_log = logging.getLogger(__name__)
|
| +
|
| +
|
| +def main(host, bot_test_expectations_factory, argv):
|
| + parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawTextHelpFormatter)
|
| + parser.parse_args(argv)
|
| +
|
| + port = host.port_factory.get()
|
| +
|
| + logging.basicConfig(level=logging.INFO, format="%(message)s")
|
| +
|
| + expectations_file = port.path_to_generic_test_expectations_file()
|
| + if not host.filesystem.isfile(expectations_file):
|
| + _log.warn("Didn't find generic expectations file at: " + expectations_file)
|
| + return None
|
| +
|
| + remove_flakes_o_matic = RemoveFlakesOMatic(host,
|
| + port,
|
| + bot_test_expectations_factory)
|
| +
|
| + test_expectations = remove_flakes_o_matic.get_updated_test_expectations()
|
| +
|
| + remove_flakes_o_matic.write_test_expectations(test_expectations,
|
| + expectations_file)
|
| +
|
| +
|
| +class RemoveFlakesOMatic(object):
|
| + def __init__(self, host, port, bot_test_expectations_factory):
|
| + self._host = host
|
| + self._port = port
|
| + self._expectations_factory = bot_test_expectations_factory
|
| + self.builder_results_by_path = {}
|
| +
|
| + def _can_delete_line(self, test_expectation_line):
|
| + """Returns whether a given line in the expectations can be removed.
|
| +
|
| + Uses results from builder bots to determine if a given line is stale and
|
| + can safely be removed from the TestExpectations file. (i.e. remove if
|
| + the bots show that it's not flaky.) There's also some rules about when
|
| + not to remove lines (e.g. never remove lines with Rebaseline
|
| + expectations, don't remove non-flaky expectations, etc.)
|
| +
|
| + Args:
|
| + test_expectation_line (TestExpectationLine): A line in the test
|
| + expectation file to test for possible removal.
|
| +
|
| + Returns: True if the line can be removed, False otherwise.
|
| + """
|
| + expectations = test_expectation_line.expectations
|
| + if len(expectations) < 2:
|
| + return False
|
| +
|
| + if self._has_unstrippable_expectations(expectations):
|
| + return False
|
| +
|
| + if not self._has_pass_expectation(expectations):
|
| + return False
|
| +
|
| + # The line can be deleted if the only expectation on the line that appears in the actual
|
| + # results is the PASS expectation.
|
| + for config in test_expectation_line.matching_configurations:
|
| + builder_name = self._host.builders.builder_name_for_specifiers(config.version, config.build_type)
|
| +
|
| + if not builder_name:
|
| + _log.error('Failed to get builder for config [%s, %s, %s]' % (config.version, config.architecture, config.build_type))
|
| + # TODO(bokan): Matching configurations often give us bots that don't have a
|
| + # builder in builders.py's exact_matches. Should we ignore those or be conservative
|
| + # and assume we need these expectations to make a decision?
|
| + return False
|
| +
|
| + if builder_name not in self.builder_results_by_path.keys():
|
| + _log.error('Failed to find results for builder "%s"' % builder_name)
|
| + return False
|
| +
|
| + results_by_path = self.builder_results_by_path[builder_name]
|
| +
|
| + # No results means the tests were all skipped or all results are passing.
|
| + if test_expectation_line.path not in results_by_path.keys():
|
| + continue
|
| +
|
| + results_for_single_test = results_by_path[test_expectation_line.path]
|
| +
|
| + if self._expectations_that_were_met(test_expectation_line, results_for_single_test) != set(['PASS']):
|
| + return False
|
| +
|
| + return True
|
| +
|
| + def _has_pass_expectation(self, expectations):
|
| + return 'PASS' in expectations
|
| +
|
| + def _expectations_that_were_met(self, test_expectation_line, results_for_single_test):
|
| + """Returns the set of expectations that appear in the given results.
|
| +
|
| + e.g. If the test expectations is:
|
| + bug(test) fast/test.html [Crash Failure Pass]
|
| +
|
| + And the results are ['TEXT', 'PASS', 'PASS', 'TIMEOUT']
|
| +
|
| + This method would return [Pass Failure]
|
| +
|
| + Args:
|
| + test_expectation_line: A TestExpectationLine object
|
| + results_for_single_test: A list of result strings.
|
| + e.g. ['IMAGE', 'IMAGE', 'PASS']
|
| +
|
| + Returns:
|
| + A set containing expectations that occured in the results.
|
| + """
|
| + # TODO(bokan): Does this not exist in a more central place?
|
| + def replace_failing_with_fail(expectation):
|
| + if expectation in ('TEXT', 'IMAGE', 'IMAGE+TEXT', 'AUDIO'):
|
| + return 'FAIL'
|
| + else:
|
| + return expectation
|
| +
|
| + actual_results = {replace_failing_with_fail(r) for r in results_for_single_test}
|
| +
|
| + return set(test_expectation_line.expectations) & actual_results
|
| +
|
| + def _has_unstrippable_expectations(self, expectations):
|
| + """ Returns whether any of the given expectations are considered unstrippable.
|
| +
|
| + Unstrippable expectations are those which should stop a line from being
|
| + removed regardless of builder bot results.
|
| +
|
| + Args:
|
| + expectations: A list of string expectations.
|
| + E.g. ['PASS', 'FAIL' 'CRASH']
|
| +
|
| + Returns:
|
| + True if at least one of the expectations is unstrippable. False
|
| + otherwise.
|
| + """
|
| + unstrippable_expectations = ('REBASELINE', 'NEEDSREBASELINE',
|
| + 'NEEDSMANUALREBASELINE', 'SLOW',
|
| + 'SKIP')
|
| + return any(s in expectations for s in unstrippable_expectations)
|
| +
|
| + def get_updated_test_expectations(self):
|
| + """Filters out passing lines from TestExpectations file.
|
| +
|
| + Reads the current TestExpectatoins file and, using results from the
|
| + build bots, removes lines that are passing. That is, removes lines that
|
| + were not needed to keep the bots green.
|
| +
|
| + Returns: A TestExpectations object with the passing lines filtered out.
|
| + """
|
| + test_expectations = TestExpectations(self._port, include_overrides=False).expectations()
|
| +
|
| + self.builder_results_by_path = {}
|
| + for builder_name in self._host.builders.all_builder_names():
|
| + expectations_for_builder = (
|
| + self._expectations_factory.expectations_for_builder(builder_name)
|
| + )
|
| +
|
| + if not expectations_for_builder:
|
| + # This is not fatal since we may not need to check these
|
| + # results. If we do need these results we'll log an error later
|
| + # when trying to check against them.
|
| + _log.warn('Downloaded results are missing results for builder "%s"' % builder_name)
|
| + continue
|
| +
|
| + self.builder_results_by_path[builder_name] = (
|
| + expectations_for_builder.all_results_by_path()
|
| + )
|
| +
|
| + expectations_to_remove = []
|
| +
|
| + for expectation in test_expectations:
|
| + if self._can_delete_line(expectation):
|
| + expectations_to_remove.append(expectation)
|
| +
|
| + for expectation in expectations_to_remove:
|
| + index = test_expectations.index(expectation)
|
| + test_expectations.remove(expectation)
|
| +
|
| + # Remove associated comments and whitespace if we've removed the last expectation under
|
| + # a comment block. Only remove a comment block if it's not separated from the test
|
| + # expectation line by whitespace.
|
| + if index == len(test_expectations) or test_expectations[index].is_whitespace() or test_expectations[index].is_comment():
|
| + removed_whitespace = False
|
| + while index and test_expectations[index - 1].is_whitespace():
|
| + index = index - 1
|
| + test_expectations.pop(index)
|
| + removed_whitespace = True
|
| +
|
| + if not removed_whitespace:
|
| + while index and test_expectations[index - 1].is_comment():
|
| + index = index - 1
|
| + test_expectations.pop(index)
|
| +
|
| + while index and test_expectations[index - 1].is_whitespace():
|
| + index = index - 1
|
| + test_expectations.pop(index)
|
| +
|
| + return test_expectations
|
| +
|
| + def write_test_expectations(self, test_expectations, test_expectations_file):
|
| + """Writes the given TestExpectations object to the filesystem.
|
| +
|
| + Args:
|
| + test_expectatoins: The TestExpectations object to write.
|
| + test_expectations_file: The full file path of the Blink
|
| + TestExpectations file. This file will be overwritten.
|
| + """
|
| + self._host.filesystem.write_text_file(
|
| + test_expectations_file,
|
| + TestExpectations.list_to_string(test_expectations, reconstitute_only_these=[]))
|
|
|