Index: third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py |
diff --git a/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py b/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py |
index 0ceb3b326136938fac843c926908399843eb64a7..de7a89d161a682996d6b9ad3b6591b27a9c5e0eb 100644 |
--- a/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py |
+++ b/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py |
@@ -26,7 +26,6 @@ |
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
- |
""" |
The Manager runs a series of tests (TestType interface) against a set |
of test files. If a test file fails a TestType, it returns a list of TestFailure |
@@ -60,7 +59,6 @@ BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/" |
TestExpectations = test_expectations.TestExpectations |
- |
class Manager(object): |
"""A class for managing running a series of tests on a series of layout |
test files.""" |
@@ -83,8 +81,7 @@ class Manager(object): |
self.INSPECTOR_SUBDIR = 'inspector' + port.TEST_PATH_SEPARATOR |
self.PERF_SUBDIR = 'perf' |
self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR |
- self.VIRTUAL_HTTP_SUBDIR = port.TEST_PATH_SEPARATOR.join([ |
- 'virtual', 'stable', 'http']) |
+ self.VIRTUAL_HTTP_SUBDIR = port.TEST_PATH_SEPARATOR.join(['virtual', 'stable', 'http']) |
self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests' |
self.ARCHIVED_RESULTS_LIMIT = 25 |
self._http_server_started = False |
@@ -96,15 +93,10 @@ class Manager(object): |
self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow) |
def _collect_tests(self, args): |
- return self._finder.find_tests(args, test_list=self._options.test_list, |
- fastest_percentile=self._options.fastest) |
+ return self._finder.find_tests(args, test_list=self._options.test_list, fastest_percentile=self._options.fastest) |
def _is_http_test(self, test): |
- return ( |
- test.startswith(self.HTTP_SUBDIR) or |
- self._is_websocket_test(test) or |
- self.VIRTUAL_HTTP_SUBDIR in test |
- ) |
+ return (test.startswith(self.HTTP_SUBDIR) or self._is_websocket_test(test) or self.VIRTUAL_HTTP_SUBDIR in test) |
def _is_inspector_test(self, test): |
return self.INSPECTOR_SUBDIR in test |
@@ -136,7 +128,7 @@ class Manager(object): |
random.shuffle(tests_to_run) |
elif self._options.order == 'random-seeded': |
rnd = random.Random() |
- rnd.seed(4) # http://xkcd.com/221/ |
+ rnd.seed(4) # http://xkcd.com/221/ |
rnd.shuffle(tests_to_run) |
tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run) |
@@ -146,7 +138,8 @@ class Manager(object): |
return tests_to_run, tests_to_skip |
def _test_input_for_file(self, test_file): |
- return TestInput(test_file, |
+ return TestInput( |
+ test_file, |
self._options.slow_time_out_ms if self._test_is_slow(test_file) else self._options.time_out_ms, |
self._test_requires_lock(test_file), |
should_add_missing_baselines=(self._options.new_test_results and not self._test_is_expected_missing(test_file))) |
@@ -170,7 +163,9 @@ class Manager(object): |
def _rename_results_folder(self): |
try: |
- timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime(self._filesystem.mtime(self._filesystem.join(self._results_directory, "results.html")))) |
+ timestamp = time.strftime( |
+ "%Y-%m-%d-%H-%M-%S", |
+ time.localtime(self._filesystem.mtime(self._filesystem.join(self._results_directory, "results.html")))) |
except (IOError, OSError), e: |
# It might be possible that results.html was not generated in previous run, because the test |
# run was interrupted even before testing started. In those cases, don't archive the folder. |
@@ -272,12 +267,12 @@ class Manager(object): |
num_workers = self._port.num_workers(int(self._options.child_processes)) |
- initial_results = self._run_tests( |
- tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations, |
- num_workers) |
+ initial_results = self._run_tests(tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations, |
+ num_workers) |
# Don't retry failures when interrupted by user or failures limit exception. |
- should_retry_failures = should_retry_failures and not (initial_results.interrupted or initial_results.keyboard_interrupted) |
+ should_retry_failures = should_retry_failures and not (initial_results.interrupted or |
+ initial_results.keyboard_interrupted) |
tests_to_retry = self._tests_to_retry(initial_results) |
all_retry_results = [] |
@@ -289,9 +284,8 @@ class Manager(object): |
break |
_log.info('') |
- _log.info('Retrying %s, attempt %d of %d...' % |
- (grammar.pluralize('unexpected failure', len(tests_to_retry)), |
- retry_attempt, self._options.num_retries)) |
+ _log.info('Retrying %s, attempt %d of %d...' % (grammar.pluralize('unexpected failure', len(tests_to_retry)), |
+ retry_attempt, self._options.num_retries)) |
retry_results = self._run_tests(tests_to_retry, |
tests_to_skip=set(), |
@@ -317,12 +311,14 @@ class Manager(object): |
self._look_for_new_crash_logs(retry_attempt_results, start_time) |
_log.debug("summarizing results") |
- summarized_full_results = test_run_results.summarize_results( |
- self._port, self._expectations, initial_results, all_retry_results, |
- enabled_pixel_tests_in_retry) |
- summarized_failing_results = test_run_results.summarize_results( |
- self._port, self._expectations, initial_results, all_retry_results, |
- enabled_pixel_tests_in_retry, only_include_failing=True) |
+ summarized_full_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, |
+ all_retry_results, enabled_pixel_tests_in_retry) |
+ summarized_failing_results = test_run_results.summarize_results(self._port, |
+ self._expectations, |
+ initial_results, |
+ all_retry_results, |
+ enabled_pixel_tests_in_retry, |
+ only_include_failing=True) |
exit_code = summarized_failing_results['num_regressions'] |
if exit_code > test_run_results.MAX_FAILURES_EXIT_STATUS: |
@@ -334,8 +330,8 @@ class Manager(object): |
self._write_json_files(summarized_full_results, summarized_failing_results, initial_results, running_all_tests) |
if self._options.write_full_results_to: |
- self._filesystem.copyfile(self._filesystem.join(self._results_directory, "full_results.json"), |
- self._options.write_full_results_to) |
+ self._filesystem.copyfile( |
+ self._filesystem.join(self._results_directory, "full_results.json"), self._options.write_full_results_to) |
self._upload_json_files() |
@@ -346,26 +342,24 @@ class Manager(object): |
else: |
if initial_results.interrupted: |
exit_code = test_run_results.EARLY_EXIT_STATUS |
- if self._options.show_results and (exit_code or (self._options.full_results_html and initial_results.total_failures)): |
+ if self._options.show_results and (exit_code or |
+ (self._options.full_results_html and initial_results.total_failures)): |
self._port.show_results_html_file(results_path) |
self._printer.print_results(time.time() - start_time, initial_results, summarized_failing_results) |
self._check_for_stale_w3c_dir() |
- return test_run_results.RunDetails( |
- exit_code, summarized_full_results, summarized_failing_results, |
- initial_results, all_retry_results, enabled_pixel_tests_in_retry) |
+ return test_run_results.RunDetails(exit_code, summarized_full_results, summarized_failing_results, initial_results, |
+ all_retry_results, enabled_pixel_tests_in_retry) |
- def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations, |
- num_workers, retry_attempt=0): |
+ def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations, num_workers, retry_attempt=0): |
test_inputs = [] |
for _ in xrange(iterations): |
for test in tests_to_run: |
for _ in xrange(repeat_each): |
test_inputs.append(self._test_input_for_file(test)) |
- return self._runner.run_tests(self._expectations, test_inputs, |
- tests_to_skip, num_workers, retry_attempt) |
+ return self._runner.run_tests(self._expectations, test_inputs, tests_to_skip, num_workers, retry_attempt) |
def _start_servers(self, tests_to_run): |
if self._port.is_wpt_enabled() and any(self._port.is_wpt_test(test) for test in tests_to_run): |
@@ -373,7 +367,8 @@ class Manager(object): |
self._port.start_wptserve() |
self._wptserve_started = True |
- if self._port.requires_http_server() or any((self._is_http_test(test) or self._is_inspector_test(test)) for test in tests_to_run): |
+ if self._port.requires_http_server() or any((self._is_http_test(test) or self._is_inspector_test(test)) |
+ for test in tests_to_run): |
self._printer.write_update('Starting HTTP server ...') |
self._port.start_http_server(additional_dirs={}, number_of_drivers=self._options.max_locked_shards) |
self._http_server_started = True |
@@ -476,7 +471,8 @@ class Manager(object): |
def _tests_to_retry(self, run_results): |
# TODO(ojan): This should also check that result.type != test_expectations.MISSING since retrying missing expectations is silly. |
# But that's a bit tricky since we only consider the last retry attempt for the count of unexpected regressions. |
- return [result.test_name for result in run_results.unexpected_results_by_name.values() if result.type != test_expectations.PASS] |
+ return [result.test_name |
+ for result in run_results.unexpected_results_by_name.values() if result.type != test_expectations.PASS] |
def _write_json_files(self, summarized_full_results, summarized_failing_results, initial_results, running_all_tests): |
_log.debug("Writing JSON files in %s." % self._results_directory) |
@@ -514,11 +510,11 @@ class Manager(object): |
return |
_log.debug("Uploading JSON files for builder: %s", self._options.builder_name) |
- attrs = [("builder", self._options.builder_name), |
- ("testtype", self._options.step_name), |
+ attrs = [("builder", self._options.builder_name), ("testtype", self._options.step_name), |
("master", self._options.master_name)] |
- files = [(file, self._filesystem.join(self._results_directory, file)) for file in ["failing_results.json", "full_results.json", "times_ms.json"]] |
+ files = [(file, self._filesystem.join(self._results_directory, file)) |
+ for file in ["failing_results.json", "full_results.json", "times_ms.json"]] |
url = "http://%s/testfile/upload" % self._options.test_results_server |
# Set uploading timeout in case appengine server is having problems. |
@@ -551,7 +547,8 @@ class Manager(object): |
stats = {} |
for result in initial_results.results_by_name.values(): |
if result.type != test_expectations.SKIP: |
- stats[result.test_name] = {'results': (_worker_number(result.worker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int(result.total_run_time * 1000))} |
+ stats[result.test_name] = {'results': (_worker_number(result.worker_name), result.test_number, result.pid, int( |
+ result.test_run_time * 1000), int(result.total_run_time * 1000))} |
stats_trie = {} |
for name, value in stats.iteritems(): |
json_results_generator.add_path_to_trie(name, value, stats_trie) |