Index: third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py |
diff --git a/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py b/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py |
index 7088b19f2411462651b629f3ead8c66c1b35e5ec..d7243538bfbca80bbe910ea2894ba34d84cb3729 100644 |
--- a/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py |
+++ b/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py |
@@ -96,6 +96,127 @@ class Manager(object): |
self._finder = LayoutTestFinder(self._port, self._options) |
self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow) |
+ def run(self, args): |
+ """Run the tests and return a RunDetails object with the results.""" |
+ start_time = time.time() |
+ self._printer.write_update("Collecting tests ...") |
+ running_all_tests = False |
+ try: |
+ paths, test_names, running_all_tests = self._collect_tests(args) |
+ except IOError: |
+ # This is raised if --test-list doesn't exist |
+ return test_run_results.RunDetails(exit_code=test_run_results.NO_TESTS_EXIT_STATUS) |
+ |
+ self._printer.write_update("Parsing expectations ...") |
+ self._expectations = test_expectations.TestExpectations(self._port, test_names) |
+ |
+ tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names) |
+ self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations) |
+ |
+ # Check to make sure we're not skipping every test. |
+ if not tests_to_run: |
+ _log.critical('No tests to run.') |
+ return test_run_results.RunDetails(exit_code=test_run_results.NO_TESTS_EXIT_STATUS) |
+ |
+ exit_code = self._set_up_run(tests_to_run) |
+ if exit_code: |
+ return test_run_results.RunDetails(exit_code=exit_code) |
+ |
+ # Don't retry failures if an explicit list of tests was passed in. |
+ if self._options.retry_failures is None: |
+ should_retry_failures = len(paths) < len(test_names) |
+ else: |
+ should_retry_failures = self._options.retry_failures |
+ |
+ enabled_pixel_tests_in_retry = False |
+ try: |
+ self._start_servers(tests_to_run) |
+ |
+ num_workers = self._port.num_workers(int(self._options.child_processes)) |
+ |
+ initial_results = self._run_tests( |
+ tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations, |
+ num_workers) |
+ |
+ # Don't retry failures when interrupted by user or failures limit exception. |
+ should_retry_failures = should_retry_failures and not ( |
+ initial_results.interrupted or initial_results.keyboard_interrupted) |
+ |
+ tests_to_retry = self._tests_to_retry(initial_results) |
+ all_retry_results = [] |
+ if should_retry_failures and tests_to_retry: |
+ enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed() |
+ |
+ for retry_attempt in xrange(1, self._options.num_retries + 1): |
+ if not tests_to_retry: |
+ break |
+ |
+ _log.info('') |
+ _log.info('Retrying %s, attempt %d of %d...' % |
+ (grammar.pluralize('unexpected failure', len(tests_to_retry)), |
+ retry_attempt, self._options.num_retries)) |
+ |
+ retry_results = self._run_tests(tests_to_retry, |
+ tests_to_skip=set(), |
+ repeat_each=1, |
+ iterations=1, |
+ num_workers=num_workers, |
+ retry_attempt=retry_attempt) |
+ all_retry_results.append(retry_results) |
+ |
+ tests_to_retry = self._tests_to_retry(retry_results) |
+ |
+ if enabled_pixel_tests_in_retry: |
+ self._options.pixel_tests = False |
+ finally: |
+ self._stop_servers() |
+ self._clean_up_run() |
+ |
+ # Some crash logs can take a long time to be written out so look |
+ # for new logs after the test run finishes. |
+ self._printer.write_update("looking for new crash logs") |
+ self._look_for_new_crash_logs(initial_results, start_time) |
+ for retry_attempt_results in all_retry_results: |
+ self._look_for_new_crash_logs(retry_attempt_results, start_time) |
+ |
+ _log.debug("summarizing results") |
+ summarized_full_results = test_run_results.summarize_results( |
+ self._port, self._expectations, initial_results, all_retry_results, |
+ enabled_pixel_tests_in_retry) |
+ summarized_failing_results = test_run_results.summarize_results( |
+ self._port, self._expectations, initial_results, all_retry_results, |
+ enabled_pixel_tests_in_retry, only_include_failing=True) |
+ |
+ exit_code = summarized_failing_results['num_regressions'] |
+ if exit_code > test_run_results.MAX_FAILURES_EXIT_STATUS: |
+ _log.warning('num regressions (%d) exceeds max exit status (%d)' % |
+ (exit_code, test_run_results.MAX_FAILURES_EXIT_STATUS)) |
+ exit_code = test_run_results.MAX_FAILURES_EXIT_STATUS |
+ |
+ if not self._options.dry_run: |
+ self._write_json_files(summarized_full_results, summarized_failing_results, initial_results, running_all_tests) |
+ |
+ if self._options.write_full_results_to: |
+ self._filesystem.copyfile(self._filesystem.join(self._results_directory, "full_results.json"), |
+ self._options.write_full_results_to) |
+ |
+ self._upload_json_files() |
+ |
+ results_path = self._filesystem.join(self._results_directory, "results.html") |
+ self._copy_results_html_file(results_path) |
+ if initial_results.keyboard_interrupted: |
+ exit_code = test_run_results.INTERRUPTED_EXIT_STATUS |
+ else: |
+ if initial_results.interrupted: |
+ exit_code = test_run_results.EARLY_EXIT_STATUS |
+ if self._options.show_results and (exit_code or (self._options.full_results_html and initial_results.total_failures)): |
+ self._port.show_results_html_file(results_path) |
+ self._printer.print_results(time.time() - start_time, initial_results, summarized_failing_results) |
+ |
+ return test_run_results.RunDetails( |
+ exit_code, summarized_full_results, summarized_failing_results, |
+ initial_results, all_retry_results, enabled_pixel_tests_in_retry) |
+ |
def _collect_tests(self, args): |
return self._finder.find_tests(args, test_list=self._options.test_list, |
fastest_percentile=self._options.fastest) |
@@ -168,7 +289,7 @@ class Manager(object): |
def _test_is_slow(self, test_file): |
return test_expectations.SLOW in self._expectations.model().get_expectations(test_file) |
- def needs_servers(self, test_names): |
+ def _needs_servers(self, test_names): |
return any(self._test_requires_lock(test_name) for test_name in test_names) |
def _rename_results_folder(self): |
@@ -206,7 +327,7 @@ class Manager(object): |
def _set_up_run(self, test_names): |
self._printer.write_update("Checking build ...") |
if self._options.build: |
- exit_code = self._port.check_build(self.needs_servers(test_names), self._printer) |
+ exit_code = self._port.check_build(self._needs_servers(test_names), self._printer) |
if exit_code: |
_log.error("Build check failed") |
return exit_code |
@@ -220,7 +341,7 @@ class Manager(object): |
# Check that the system dependencies (themes, fonts, ...) are correct. |
if not self._options.nocheck_sys_deps: |
self._printer.write_update("Checking system dependencies ...") |
- exit_code = self._port.check_sys_deps(self.needs_servers(test_names)) |
+ exit_code = self._port.check_sys_deps(self._needs_servers(test_names)) |
if exit_code: |
self._port.stop_helper() |
return exit_code |
@@ -238,127 +359,6 @@ class Manager(object): |
self._port.setup_test_run() |
return test_run_results.OK_EXIT_STATUS |
- def run(self, args): |
- """Run the tests and return a RunDetails object with the results.""" |
- start_time = time.time() |
- self._printer.write_update("Collecting tests ...") |
- running_all_tests = False |
- try: |
- paths, test_names, running_all_tests = self._collect_tests(args) |
- except IOError: |
- # This is raised if --test-list doesn't exist |
- return test_run_results.RunDetails(exit_code=test_run_results.NO_TESTS_EXIT_STATUS) |
- |
- self._printer.write_update("Parsing expectations ...") |
- self._expectations = test_expectations.TestExpectations(self._port, test_names) |
- |
- tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names) |
- self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations) |
- |
- # Check to make sure we're not skipping every test. |
- if not tests_to_run: |
- _log.critical('No tests to run.') |
- return test_run_results.RunDetails(exit_code=test_run_results.NO_TESTS_EXIT_STATUS) |
- |
- exit_code = self._set_up_run(tests_to_run) |
- if exit_code: |
- return test_run_results.RunDetails(exit_code=exit_code) |
- |
- # Don't retry failures if an explicit list of tests was passed in. |
- if self._options.retry_failures is None: |
- should_retry_failures = len(paths) < len(test_names) |
- else: |
- should_retry_failures = self._options.retry_failures |
- |
- enabled_pixel_tests_in_retry = False |
- try: |
- self._start_servers(tests_to_run) |
- |
- num_workers = self._port.num_workers(int(self._options.child_processes)) |
- |
- initial_results = self._run_tests( |
- tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations, |
- num_workers) |
- |
- # Don't retry failures when interrupted by user or failures limit exception. |
- should_retry_failures = should_retry_failures and not ( |
- initial_results.interrupted or initial_results.keyboard_interrupted) |
- |
- tests_to_retry = self._tests_to_retry(initial_results) |
- all_retry_results = [] |
- if should_retry_failures and tests_to_retry: |
- enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed() |
- |
- for retry_attempt in xrange(1, self._options.num_retries + 1): |
- if not tests_to_retry: |
- break |
- |
- _log.info('') |
- _log.info('Retrying %s, attempt %d of %d...' % |
- (grammar.pluralize('unexpected failure', len(tests_to_retry)), |
- retry_attempt, self._options.num_retries)) |
- |
- retry_results = self._run_tests(tests_to_retry, |
- tests_to_skip=set(), |
- repeat_each=1, |
- iterations=1, |
- num_workers=num_workers, |
- retry_attempt=retry_attempt) |
- all_retry_results.append(retry_results) |
- |
- tests_to_retry = self._tests_to_retry(retry_results) |
- |
- if enabled_pixel_tests_in_retry: |
- self._options.pixel_tests = False |
- finally: |
- self._stop_servers() |
- self._clean_up_run() |
- |
- # Some crash logs can take a long time to be written out so look |
- # for new logs after the test run finishes. |
- self._printer.write_update("looking for new crash logs") |
- self._look_for_new_crash_logs(initial_results, start_time) |
- for retry_attempt_results in all_retry_results: |
- self._look_for_new_crash_logs(retry_attempt_results, start_time) |
- |
- _log.debug("summarizing results") |
- summarized_full_results = test_run_results.summarize_results( |
- self._port, self._expectations, initial_results, all_retry_results, |
- enabled_pixel_tests_in_retry) |
- summarized_failing_results = test_run_results.summarize_results( |
- self._port, self._expectations, initial_results, all_retry_results, |
- enabled_pixel_tests_in_retry, only_include_failing=True) |
- |
- exit_code = summarized_failing_results['num_regressions'] |
- if exit_code > test_run_results.MAX_FAILURES_EXIT_STATUS: |
- _log.warning('num regressions (%d) exceeds max exit status (%d)' % |
- (exit_code, test_run_results.MAX_FAILURES_EXIT_STATUS)) |
- exit_code = test_run_results.MAX_FAILURES_EXIT_STATUS |
- |
- if not self._options.dry_run: |
- self._write_json_files(summarized_full_results, summarized_failing_results, initial_results, running_all_tests) |
- |
- if self._options.write_full_results_to: |
- self._filesystem.copyfile(self._filesystem.join(self._results_directory, "full_results.json"), |
- self._options.write_full_results_to) |
- |
- self._upload_json_files() |
- |
- results_path = self._filesystem.join(self._results_directory, "results.html") |
- self._copy_results_html_file(results_path) |
- if initial_results.keyboard_interrupted: |
- exit_code = test_run_results.INTERRUPTED_EXIT_STATUS |
- else: |
- if initial_results.interrupted: |
- exit_code = test_run_results.EARLY_EXIT_STATUS |
- if self._options.show_results and (exit_code or (self._options.full_results_html and initial_results.total_failures)): |
- self._port.show_results_html_file(results_path) |
- self._printer.print_results(time.time() - start_time, initial_results, summarized_failing_results) |
- |
- return test_run_results.RunDetails( |
- exit_code, summarized_full_results, summarized_failing_results, |
- initial_results, all_retry_results, enabled_pixel_tests_in_retry) |
- |
def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations, |
num_workers, retry_attempt=0): |