| Index: Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
|
| diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py b/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
|
| index b5d4cce27c8d5b33c1ca1a3a9811b38f2f6aa9d0..da5c518c543256a70a63424fcd447f12f24c72b5 100644
|
| --- a/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
|
| +++ b/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
|
| @@ -54,13 +54,13 @@ from webkitpy.layout_tests.models.test_input import TestInput
|
| _log = logging.getLogger(__name__)
|
|
|
| # Builder base URL where we have the archived test results.
|
| -BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/"
|
| +BUILDER_BASE_URL = 'http://build.chromium.org/buildbot/layout_test_results/'
|
|
|
| TestExpectations = test_expectations.TestExpectations
|
|
|
|
|
| -
|
| class Manager(object):
|
| +
|
| """A class for managing running a series of tests on a series of layout
|
| test files."""
|
|
|
| @@ -120,7 +120,7 @@ class Manager(object):
|
| random.shuffle(tests_to_run)
|
| elif self._options.order == 'random-seeded':
|
| rnd = random.Random()
|
| - rnd.seed(4) # http://xkcd.com/221/
|
| + rnd.seed(4) # http://xkcd.com/221/
|
| rnd.shuffle(tests_to_run)
|
|
|
| tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run)
|
| @@ -131,9 +131,9 @@ class Manager(object):
|
|
|
| def _test_input_for_file(self, test_file):
|
| return TestInput(test_file,
|
| - self._options.slow_time_out_ms if self._test_is_slow(test_file) else self._options.time_out_ms,
|
| - self._test_requires_lock(test_file),
|
| - should_add_missing_baselines=(self._options.new_test_results and not self._test_is_expected_missing(test_file)))
|
| + self._options.slow_time_out_ms if self._test_is_slow(test_file) else self._options.time_out_ms,
|
| + self._test_requires_lock(test_file),
|
| + should_add_missing_baselines=(self._options.new_test_results and not self._test_is_expected_missing(test_file)))
|
|
|
| def _test_requires_lock(self, test_file):
|
| """Return True if the test needs to be locked when
|
| @@ -154,16 +154,22 @@ class Manager(object):
|
|
|
| def _rename_results_folder(self):
|
| try:
|
| - timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime(self._filesystem.mtime(self._filesystem.join(self._results_directory, "results.html"))))
|
| - except OSError, e:
|
| + timestamp = time.strftime(
|
| + '%Y-%m-%d-%H-%M-%S',
|
| + time.localtime(
|
| + self._filesystem.mtime(
|
| + self._filesystem.join(
|
| + self._results_directory,
|
| + 'results.html'))))
|
| + except OSError as e:
|
| # It might be possible that results.html was not generated in previous run, because the test
|
| # run was interrupted even before testing started. In those cases, don't archive the folder.
|
| # Simply override the current folder contents with new results.
|
| import errno
|
| if e.errno == errno.EEXIST:
|
| - _log.warning("No results.html file found in previous run, skipping it.")
|
| + _log.warning('No results.html file found in previous run, skipping it.')
|
| return None
|
| - archived_name = ''.join((self._filesystem.basename(self._results_directory), "_", timestamp))
|
| + archived_name = ''.join((self._filesystem.basename(self._results_directory), '_', timestamp))
|
| archived_path = self._filesystem.join(self._filesystem.dirname(self._results_directory), archived_name)
|
| self._filesystem.move(self._results_directory, archived_path)
|
|
|
| @@ -176,27 +182,27 @@ class Manager(object):
|
| if self._filesystem.isdir(file_path):
|
| results_directories.append(file_path)
|
| results_directories.sort(key=lambda x: self._filesystem.mtime(x))
|
| - self._printer.write_update("Clobbering old archived results in %s" % results_directory_path)
|
| + self._printer.write_update('Clobbering old archived results in %s' % results_directory_path)
|
| for dir in results_directories[:-self.ARCHIVED_RESULTS_LIMIT]:
|
| self._filesystem.rmtree(dir)
|
|
|
| def _set_up_run(self, test_names):
|
| - self._printer.write_update("Checking build ...")
|
| + self._printer.write_update('Checking build ...')
|
| if self._options.build:
|
| exit_code = self._port.check_build(self.needs_servers(test_names), self._printer)
|
| if exit_code:
|
| - _log.error("Build check failed")
|
| + _log.error('Build check failed')
|
| return exit_code
|
|
|
| # This must be started before we check the system dependencies,
|
| # since the helper may do things to make the setup correct.
|
| if self._options.pixel_tests:
|
| - self._printer.write_update("Starting pixel test helper ...")
|
| + self._printer.write_update('Starting pixel test helper ...')
|
| self._port.start_helper()
|
|
|
| # Check that the system dependencies (themes, fonts, ...) are correct.
|
| if not self._options.nocheck_sys_deps:
|
| - self._printer.write_update("Checking system dependencies ...")
|
| + self._printer.write_update('Checking system dependencies ...')
|
| exit_code = self._port.check_sys_deps(self.needs_servers(test_names))
|
| if exit_code:
|
| self._port.stop_helper()
|
| @@ -204,7 +210,7 @@ class Manager(object):
|
|
|
| if self._options.enable_versioned_results and self._filesystem.exists(self._results_directory):
|
| if self._options.clobber_old_results:
|
| - _log.warning("Flag --enable_versioned_results overrides --clobber-old-results.")
|
| + _log.warning('Flag --enable_versioned_results overrides --clobber-old-results.')
|
| self._clobber_old_archived_results()
|
| # Rename the existing results folder for archiving.
|
| self._rename_results_folder()
|
| @@ -220,14 +226,14 @@ class Manager(object):
|
| def run(self, args):
|
| """Run the tests and return a RunDetails object with the results."""
|
| start_time = time.time()
|
| - self._printer.write_update("Collecting tests ...")
|
| + self._printer.write_update('Collecting tests ...')
|
| try:
|
| paths, test_names = self._collect_tests(args)
|
| except IOError:
|
| # This is raised if --test-list doesn't exist
|
| return test_run_results.RunDetails(exit_code=test_run_results.NO_TESTS_EXIT_STATUS)
|
|
|
| - self._printer.write_update("Parsing expectations ...")
|
| + self._printer.write_update('Parsing expectations ...')
|
| self._expectations = test_expectations.TestExpectations(self._port, test_names)
|
|
|
| tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
|
| @@ -253,20 +259,21 @@ class Manager(object):
|
| self._start_servers(tests_to_run)
|
|
|
| initial_results = self._run_tests(tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations,
|
| - self._port.num_workers(int(self._options.child_processes)), retrying=False)
|
| + self._port.num_workers(int(self._options.child_processes)), retrying=False)
|
|
|
| # Don't retry failures when interrupted by user or failures limit exception.
|
| - should_retry_failures = should_retry_failures and not (initial_results.interrupted or initial_results.keyboard_interrupted)
|
| + should_retry_failures = should_retry_failures and not (
|
| + initial_results.interrupted or initial_results.keyboard_interrupted)
|
|
|
| tests_to_retry = self._tests_to_retry(initial_results)
|
| if should_retry_failures and tests_to_retry:
|
| enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed()
|
|
|
| _log.info('')
|
| - _log.info("Retrying %d unexpected failure(s) ..." % len(tests_to_retry))
|
| + _log.info('Retrying %d unexpected failure(s) ...' % len(tests_to_retry))
|
| _log.info('')
|
| retry_results = self._run_tests(tests_to_retry, tests_to_skip=set(), repeat_each=1, iterations=1,
|
| - num_workers=1, retrying=True)
|
| + num_workers=1, retrying=True)
|
|
|
| if enabled_pixel_tests_in_retry:
|
| self._options.pixel_tests = False
|
| @@ -278,14 +285,25 @@ class Manager(object):
|
|
|
| # Some crash logs can take a long time to be written out so look
|
| # for new logs after the test run finishes.
|
| - self._printer.write_update("looking for new crash logs")
|
| + self._printer.write_update('looking for new crash logs')
|
| self._look_for_new_crash_logs(initial_results, start_time)
|
| if retry_results:
|
| self._look_for_new_crash_logs(retry_results, start_time)
|
|
|
| - _log.debug("summarizing results")
|
| - summarized_full_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry)
|
| - summarized_failing_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry, only_include_failing=True)
|
| + _log.debug('summarizing results')
|
| + summarized_full_results = test_run_results.summarize_results(
|
| + self._port,
|
| + self._expectations,
|
| + initial_results,
|
| + retry_results,
|
| + enabled_pixel_tests_in_retry)
|
| + summarized_failing_results = test_run_results.summarize_results(
|
| + self._port,
|
| + self._expectations,
|
| + initial_results,
|
| + retry_results,
|
| + enabled_pixel_tests_in_retry,
|
| + only_include_failing=True)
|
|
|
| exit_code = summarized_failing_results['num_regressions']
|
| if exit_code > test_run_results.MAX_FAILURES_EXIT_STATUS:
|
| @@ -297,22 +315,24 @@ class Manager(object):
|
| self._write_json_files(summarized_full_results, summarized_failing_results, initial_results)
|
|
|
| if self._options.write_full_results_to:
|
| - self._filesystem.copyfile(self._filesystem.join(self._results_directory, "full_results.json"),
|
| + self._filesystem.copyfile(self._filesystem.join(self._results_directory, 'full_results.json'),
|
| self._options.write_full_results_to)
|
|
|
| self._upload_json_files()
|
|
|
| - results_path = self._filesystem.join(self._results_directory, "results.html")
|
| + results_path = self._filesystem.join(self._results_directory, 'results.html')
|
| self._copy_results_html_file(results_path)
|
| if initial_results.keyboard_interrupted:
|
| exit_code = test_run_results.INTERRUPTED_EXIT_STATUS
|
| else:
|
| if initial_results.interrupted:
|
| exit_code = test_run_results.EARLY_EXIT_STATUS
|
| - if self._options.show_results and (exit_code or (self._options.full_results_html and initial_results.total_failures)):
|
| + if self._options.show_results and (
|
| + exit_code or (self._options.full_results_html and initial_results.total_failures)):
|
| self._port.show_results_html_file(results_path)
|
| self._printer.print_results(time.time() - start_time, initial_results, summarized_failing_results)
|
| - return test_run_results.RunDetails(exit_code, summarized_full_results, summarized_failing_results, initial_results, retry_results, enabled_pixel_tests_in_retry)
|
| + return test_run_results.RunDetails(
|
| + exit_code, summarized_full_results, summarized_failing_results, initial_results, retry_results, enabled_pixel_tests_in_retry)
|
|
|
| def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations, num_workers, retrying):
|
|
|
| @@ -345,20 +365,20 @@ class Manager(object):
|
| self._port.stop_websocket_server()
|
|
|
| def _clean_up_run(self):
|
| - _log.debug("Flushing stdout")
|
| + _log.debug('Flushing stdout')
|
| sys.stdout.flush()
|
| - _log.debug("Flushing stderr")
|
| + _log.debug('Flushing stderr')
|
| sys.stderr.flush()
|
| - _log.debug("Stopping helper")
|
| + _log.debug('Stopping helper')
|
| self._port.stop_helper()
|
| - _log.debug("Cleaning up port")
|
| + _log.debug('Cleaning up port')
|
| self._port.clean_up_test_run()
|
|
|
| def _force_pixel_tests_if_needed(self):
|
| if self._options.pixel_tests:
|
| return False
|
|
|
| - _log.debug("Restarting helper")
|
| + _log.debug('Restarting helper')
|
| self._port.stop_helper()
|
| self._options.pixel_tests = True
|
| self._port.start_helper()
|
| @@ -398,7 +418,7 @@ class Manager(object):
|
| # Just clobber the actual test results directories since the other
|
| # files in the results directory are explicitly used for cross-run
|
| # tracking.
|
| - self._printer.write_update("Clobbering old results in %s" %
|
| + self._printer.write_update('Clobbering old results in %s' %
|
| self._results_directory)
|
| layout_tests_dir = self._port.layout_tests_dir()
|
| possible_dirs = self._port.test_dirs()
|
| @@ -410,45 +430,54 @@ class Manager(object):
|
| self._port.clobber_old_port_specific_results()
|
|
|
| def _tests_to_retry(self, run_results):
|
| - return [result.test_name for result in run_results.unexpected_results_by_name.values() if result.type != test_expectations.PASS]
|
| + return [result.test_name for result in run_results.unexpected_results_by_name.values(
|
| + ) if result.type != test_expectations.PASS]
|
|
|
| def _write_json_files(self, summarized_full_results, summarized_failing_results, initial_results):
|
| - _log.debug("Writing JSON files in %s." % self._results_directory)
|
| + _log.debug('Writing JSON files in %s.' % self._results_directory)
|
|
|
| # FIXME: Upload stats.json to the server and delete times_ms.
|
| times_trie = json_results_generator.test_timings_trie(initial_results.results_by_name.values())
|
| - times_json_path = self._filesystem.join(self._results_directory, "times_ms.json")
|
| + times_json_path = self._filesystem.join(self._results_directory, 'times_ms.json')
|
| json_results_generator.write_json(self._filesystem, times_trie, times_json_path)
|
|
|
| stats_trie = self._stats_trie(initial_results)
|
| - stats_path = self._filesystem.join(self._results_directory, "stats.json")
|
| + stats_path = self._filesystem.join(self._results_directory, 'stats.json')
|
| self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))
|
|
|
| - full_results_path = self._filesystem.join(self._results_directory, "full_results.json")
|
| + full_results_path = self._filesystem.join(self._results_directory, 'full_results.json')
|
| json_results_generator.write_json(self._filesystem, summarized_full_results, full_results_path)
|
|
|
| - full_results_path = self._filesystem.join(self._results_directory, "failing_results.json")
|
| - # We write failing_results.json out as jsonp because we need to load it from a file url for results.html and Chromium doesn't allow that.
|
| - json_results_generator.write_json(self._filesystem, summarized_failing_results, full_results_path, callback="ADD_RESULTS")
|
| + full_results_path = self._filesystem.join(self._results_directory, 'failing_results.json')
|
| + # We write failing_results.json out as jsonp because we need to load it
|
| + # from a file url for results.html and Chromium doesn't allow that.
|
| + json_results_generator.write_json(self._filesystem, summarized_failing_results, full_results_path, callback='ADD_RESULTS')
|
|
|
| - _log.debug("Finished writing JSON files.")
|
| + _log.debug('Finished writing JSON files.')
|
|
|
| def _upload_json_files(self):
|
| if not self._options.test_results_server:
|
| return
|
|
|
| if not self._options.master_name:
|
| - _log.error("--test-results-server was set, but --master-name was not. Not uploading JSON files.")
|
| + _log.error('--test-results-server was set, but --master-name was not. Not uploading JSON files.')
|
| return
|
|
|
| - _log.debug("Uploading JSON files for builder: %s", self._options.builder_name)
|
| - attrs = [("builder", self._options.builder_name),
|
| - ("testtype", "layout-tests"),
|
| - ("master", self._options.master_name)]
|
| -
|
| - files = [(file, self._filesystem.join(self._results_directory, file)) for file in ["failing_results.json", "full_results.json", "times_ms.json"]]
|
| -
|
| - url = "http://%s/testfile/upload" % self._options.test_results_server
|
| + _log.debug('Uploading JSON files for builder: %s', self._options.builder_name)
|
| + attrs = [('builder', self._options.builder_name),
|
| + ('testtype', 'layout-tests'),
|
| + ('master', self._options.master_name)]
|
| +
|
| + files = [
|
| + (file,
|
| + self._filesystem.join(
|
| + self._results_directory,
|
| + file)) for file in [
|
| + 'failing_results.json',
|
| + 'full_results.json',
|
| + 'times_ms.json']]
|
| +
|
| + url = 'http://%s/testfile/upload' % self._options.test_results_server
|
| # Set uploading timeout in case appengine server is having problems.
|
| # 120 seconds are more than enough to upload test results.
|
| uploader = FileUploader(url, 120)
|
| @@ -456,13 +485,13 @@ class Manager(object):
|
| response = uploader.upload_as_multipart_form_data(self._filesystem, files, attrs)
|
| if response:
|
| if response.code == 200:
|
| - _log.debug("JSON uploaded.")
|
| + _log.debug('JSON uploaded.')
|
| else:
|
| _log.debug("JSON upload failed, %d: '%s'" % (response.code, response.read()))
|
| else:
|
| - _log.error("JSON upload failed; no response returned")
|
| - except Exception, err:
|
| - _log.error("Upload failed: %s" % err)
|
| + _log.error('JSON upload failed; no response returned')
|
| + except Exception as err:
|
| + _log.error('Upload failed: %s' % err)
|
|
|
| def _copy_results_html_file(self, destination_path):
|
| base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harness')
|
| @@ -479,7 +508,11 @@ class Manager(object):
|
| stats = {}
|
| for result in initial_results.results_by_name.values():
|
| if result.type != test_expectations.SKIP:
|
| - stats[result.test_name] = {'results': (_worker_number(result.worker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int(result.total_run_time * 1000))}
|
| + stats[result.test_name] = {'results': (_worker_number(result.worker_name),
|
| + result.test_number,
|
| + result.pid,
|
| + int(result.test_run_time * 1000),
|
| + int(result.total_run_time * 1000))}
|
| stats_trie = {}
|
| for name, value in stats.iteritems():
|
| json_results_generator.add_path_to_trie(name, value, stats_trie)
|
|
|