Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(25)

Unified Diff: third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py

Issue 1783073002: Run auto-formatter on files in webkitpy/layout_tests/. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Ran yapf -i --style '{based_on_style: pep8, column_limit: 132}' then did manual fix-up Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
diff --git a/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py b/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
index 0ceb3b326136938fac843c926908399843eb64a7..33d80f0d9db380c97abd1f48350f86fa96662a45 100644
--- a/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
+++ b/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
@@ -26,7 +26,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
"""
The Manager runs a series of tests (TestType interface) against a set
of test files. If a test file fails a TestType, it returns a list of TestFailure
@@ -60,7 +59,6 @@ BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/"
TestExpectations = test_expectations.TestExpectations
-
class Manager(object):
"""A class for managing running a series of tests on a series of layout
test files."""
@@ -83,8 +81,7 @@ class Manager(object):
self.INSPECTOR_SUBDIR = 'inspector' + port.TEST_PATH_SEPARATOR
self.PERF_SUBDIR = 'perf'
self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
- self.VIRTUAL_HTTP_SUBDIR = port.TEST_PATH_SEPARATOR.join([
- 'virtual', 'stable', 'http'])
+ self.VIRTUAL_HTTP_SUBDIR = port.TEST_PATH_SEPARATOR.join(['virtual', 'stable', 'http'])
self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
self.ARCHIVED_RESULTS_LIMIT = 25
self._http_server_started = False
@@ -96,15 +93,10 @@ class Manager(object):
self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow)
def _collect_tests(self, args):
- return self._finder.find_tests(args, test_list=self._options.test_list,
- fastest_percentile=self._options.fastest)
+ return self._finder.find_tests(args, test_list=self._options.test_list, fastest_percentile=self._options.fastest)
def _is_http_test(self, test):
- return (
- test.startswith(self.HTTP_SUBDIR) or
- self._is_websocket_test(test) or
- self.VIRTUAL_HTTP_SUBDIR in test
- )
+ return (test.startswith(self.HTTP_SUBDIR) or self._is_websocket_test(test) or self.VIRTUAL_HTTP_SUBDIR in test)
def _is_inspector_test(self, test):
return self.INSPECTOR_SUBDIR in test
@@ -136,7 +128,7 @@ class Manager(object):
random.shuffle(tests_to_run)
elif self._options.order == 'random-seeded':
rnd = random.Random()
- rnd.seed(4) # http://xkcd.com/221/
+ rnd.seed(4) # http://xkcd.com/221/
rnd.shuffle(tests_to_run)
tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run)
@@ -146,7 +138,8 @@ class Manager(object):
return tests_to_run, tests_to_skip
def _test_input_for_file(self, test_file):
- return TestInput(test_file,
+ return TestInput(
+ test_file,
self._options.slow_time_out_ms if self._test_is_slow(test_file) else self._options.time_out_ms,
self._test_requires_lock(test_file),
should_add_missing_baselines=(self._options.new_test_results and not self._test_is_expected_missing(test_file)))
Dirk Pranke 2016/03/19 02:19:56 this is an interesting difference; I personally pr
@@ -170,7 +163,9 @@ class Manager(object):
def _rename_results_folder(self):
try:
- timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime(self._filesystem.mtime(self._filesystem.join(self._results_directory, "results.html"))))
+ timestamp = time.strftime(
+ "%Y-%m-%d-%H-%M-%S",
+ time.localtime(self._filesystem.mtime(self._filesystem.join(self._results_directory, "results.html"))))
Dirk Pranke 2016/03/19 02:19:56 I wonder why yapf broke the line here and autopep8
except (IOError, OSError), e:
# It might be possible that results.html was not generated in previous run, because the test
# run was interrupted even before testing started. In those cases, don't archive the folder.
@@ -272,12 +267,12 @@ class Manager(object):
num_workers = self._port.num_workers(int(self._options.child_processes))
- initial_results = self._run_tests(
- tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations,
- num_workers)
+ initial_results = self._run_tests(tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations,
+ num_workers)
Dirk Pranke 2016/03/19 02:19:56 and yet here the formatting is the opposite for wh
# Don't retry failures when interrupted by user or failures limit exception.
- should_retry_failures = should_retry_failures and not (initial_results.interrupted or initial_results.keyboard_interrupted)
+ should_retry_failures = should_retry_failures and not (initial_results.interrupted or
+ initial_results.keyboard_interrupted)
tests_to_retry = self._tests_to_retry(initial_results)
all_retry_results = []
@@ -289,9 +284,8 @@ class Manager(object):
break
_log.info('')
- _log.info('Retrying %s, attempt %d of %d...' %
- (grammar.pluralize('unexpected failure', len(tests_to_retry)),
- retry_attempt, self._options.num_retries))
+ _log.info('Retrying %s, attempt %d of %d...' % (grammar.pluralize('unexpected failure', len(tests_to_retry)),
+ retry_attempt, self._options.num_retries))
retry_results = self._run_tests(tests_to_retry,
tests_to_skip=set(),
@@ -317,12 +311,14 @@ class Manager(object):
self._look_for_new_crash_logs(retry_attempt_results, start_time)
_log.debug("summarizing results")
- summarized_full_results = test_run_results.summarize_results(
- self._port, self._expectations, initial_results, all_retry_results,
- enabled_pixel_tests_in_retry)
- summarized_failing_results = test_run_results.summarize_results(
- self._port, self._expectations, initial_results, all_retry_results,
- enabled_pixel_tests_in_retry, only_include_failing=True)
+ summarized_full_results = test_run_results.summarize_results(self._port, self._expectations, initial_results,
+ all_retry_results, enabled_pixel_tests_in_retry)
+ summarized_failing_results = test_run_results.summarize_results(self._port,
+ self._expectations,
+ initial_results,
+ all_retry_results,
+ enabled_pixel_tests_in_retry,
+ only_include_failing=True)
exit_code = summarized_failing_results['num_regressions']
if exit_code > test_run_results.MAX_FAILURES_EXIT_STATUS:
@@ -334,8 +330,8 @@ class Manager(object):
self._write_json_files(summarized_full_results, summarized_failing_results, initial_results, running_all_tests)
if self._options.write_full_results_to:
- self._filesystem.copyfile(self._filesystem.join(self._results_directory, "full_results.json"),
- self._options.write_full_results_to)
+ self._filesystem.copyfile(
+ self._filesystem.join(self._results_directory, "full_results.json"), self._options.write_full_results_to)
self._upload_json_files()
@@ -346,26 +342,24 @@ class Manager(object):
else:
if initial_results.interrupted:
exit_code = test_run_results.EARLY_EXIT_STATUS
- if self._options.show_results and (exit_code or (self._options.full_results_html and initial_results.total_failures)):
+ if self._options.show_results and (exit_code or
+ (self._options.full_results_html and initial_results.total_failures)):
self._port.show_results_html_file(results_path)
self._printer.print_results(time.time() - start_time, initial_results, summarized_failing_results)
self._check_for_stale_w3c_dir()
- return test_run_results.RunDetails(
- exit_code, summarized_full_results, summarized_failing_results,
- initial_results, all_retry_results, enabled_pixel_tests_in_retry)
+ return test_run_results.RunDetails(exit_code, summarized_full_results, summarized_failing_results, initial_results,
+ all_retry_results, enabled_pixel_tests_in_retry)
- def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations,
- num_workers, retry_attempt=0):
+ def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations, num_workers, retry_attempt=0):
test_inputs = []
for _ in xrange(iterations):
for test in tests_to_run:
for _ in xrange(repeat_each):
test_inputs.append(self._test_input_for_file(test))
- return self._runner.run_tests(self._expectations, test_inputs,
- tests_to_skip, num_workers, retry_attempt)
+ return self._runner.run_tests(self._expectations, test_inputs, tests_to_skip, num_workers, retry_attempt)
def _start_servers(self, tests_to_run):
if self._port.is_wpt_enabled() and any(self._port.is_wpt_test(test) for test in tests_to_run):
@@ -373,7 +367,8 @@ class Manager(object):
self._port.start_wptserve()
self._wptserve_started = True
- if self._port.requires_http_server() or any((self._is_http_test(test) or self._is_inspector_test(test)) for test in tests_to_run):
+ if self._port.requires_http_server() or any((self._is_http_test(test) or self._is_inspector_test(test))
+ for test in tests_to_run):
self._printer.write_update('Starting HTTP server ...')
self._port.start_http_server(additional_dirs={}, number_of_drivers=self._options.max_locked_shards)
self._http_server_started = True
@@ -476,7 +471,8 @@ class Manager(object):
def _tests_to_retry(self, run_results):
# TODO(ojan): This should also check that result.type != test_expectations.MISSING since retrying missing expectations is silly.
# But that's a bit tricky since we only consider the last retry attempt for the count of unexpected regressions.
- return [result.test_name for result in run_results.unexpected_results_by_name.values() if result.type != test_expectations.PASS]
+ return [result.test_name
+ for result in run_results.unexpected_results_by_name.values() if result.type != test_expectations.PASS]
def _write_json_files(self, summarized_full_results, summarized_failing_results, initial_results, running_all_tests):
_log.debug("Writing JSON files in %s." % self._results_directory)
@@ -500,7 +496,8 @@ class Manager(object):
json_results_generator.write_json(self._filesystem, summarized_full_results, full_results_path)
full_results_path = self._filesystem.join(self._results_directory, "failing_results.json")
- # We write failing_results.json out as jsonp because we need to load it from a file url for results.html and Chromium doesn't allow that.
+ # We write failing_results.json out as jsonp because we need to load it
+ # from a file url for results.html and Chromium doesn't allow that.
json_results_generator.write_json(self._filesystem, summarized_failing_results, full_results_path, callback="ADD_RESULTS")
_log.debug("Finished writing JSON files.")
@@ -514,11 +511,11 @@ class Manager(object):
return
_log.debug("Uploading JSON files for builder: %s", self._options.builder_name)
- attrs = [("builder", self._options.builder_name),
- ("testtype", self._options.step_name),
+ attrs = [("builder", self._options.builder_name), ("testtype", self._options.step_name),
("master", self._options.master_name)]
- files = [(file, self._filesystem.join(self._results_directory, file)) for file in ["failing_results.json", "full_results.json", "times_ms.json"]]
+ files = [(file, self._filesystem.join(self._results_directory, file))
+ for file in ["failing_results.json", "full_results.json", "times_ms.json"]]
url = "http://%s/testfile/upload" % self._options.test_results_server
# Set uploading timeout in case appengine server is having problems.
@@ -551,7 +548,8 @@ class Manager(object):
stats = {}
for result in initial_results.results_by_name.values():
if result.type != test_expectations.SKIP:
- stats[result.test_name] = {'results': (_worker_number(result.worker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int(result.total_run_time * 1000))}
+ stats[result.test_name] = {'results': (_worker_number(result.worker_name), result.test_number, result.pid,
+ int(result.test_run_time * 1000), int(result.total_run_time * 1000))}
stats_trie = {}
for name, value in stats.iteritems():
json_results_generator.add_path_to_trie(name, value, stats_trie)

Powered by Google App Engine
This is Rietveld 408576698