Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(81)

Unified Diff: Tools/Scripts/webkitpy/layout_tests/views/buildbot_results.py

Issue 546133003: Reformat webkitpy.layout_tests w/ format-webkitpy. (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Created 6 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: Tools/Scripts/webkitpy/layout_tests/views/buildbot_results.py
diff --git a/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results.py b/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results.py
index 82d334af27ad1288da61633ae4cc0586106f1974..78bbf41c5d0697a7b94b2e513c1a47708de75cb3 100644
--- a/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results.py
+++ b/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results.py
@@ -62,22 +62,22 @@ class BuildBotPrinter(object):
if total > 0:
percent_passed = float(passed) * 100 / total
- self._print("=> Results: %d/%d tests passed (%.1f%%)" % (passed, total, percent_passed))
- self._print("")
- self._print_run_results_entry(run_results, test_expectations.NOW, "Tests to be fixed")
+ self._print('=> Results: %d/%d tests passed (%.1f%%)' % (passed, total, percent_passed))
+ self._print('')
+ self._print_run_results_entry(run_results, test_expectations.NOW, 'Tests to be fixed')
- self._print("")
+ self._print('')
# FIXME: We should be skipping anything marked WONTFIX, so we shouldn't bother logging these stats.
self._print_run_results_entry(run_results, test_expectations.WONTFIX,
- "Tests that will only be fixed if they crash (WONTFIX)")
- self._print("")
+ 'Tests that will only be fixed if they crash (WONTFIX)')
+ self._print('')
def _print_run_results_entry(self, run_results, timeline, heading):
total = len(run_results.tests_by_timeline[timeline])
not_passing = (total -
- len(run_results.tests_by_expectation[test_expectations.PASS] &
- run_results.tests_by_timeline[timeline]))
- self._print("=> %s (%d):" % (heading, not_passing))
+ len(run_results.tests_by_expectation[test_expectations.PASS] &
+ run_results.tests_by_timeline[timeline]))
+ self._print('=> %s (%d):' % (heading, not_passing))
for result in TestExpectations.EXPECTATION_DESCRIPTIONS.keys():
if result in (test_expectations.PASS, test_expectations.SKIP):
@@ -86,7 +86,7 @@ class BuildBotPrinter(object):
desc = TestExpectations.EXPECTATION_DESCRIPTIONS[result]
if not_passing and len(results):
pct = len(results) * 100.0 / not_passing
- self._print(" %5d %-24s (%4.1f%%)" % (len(results), desc, pct))
+ self._print(' %5d %-24s (%4.1f%%)' % (len(results), desc, pct))
def print_unexpected_results(self, summarized_results, enabled_pixel_tests_in_retry=False):
passes = {}
@@ -97,8 +97,8 @@ class BuildBotPrinter(object):
dict.setdefault(key, []).append(value)
def add_result(test, results, passes=passes, flaky=flaky, regressions=regressions):
- actual = results['actual'].split(" ")
- expected = results['expected'].split(" ")
+ actual = results['actual'].split(' ')
+ expected = results['expected'].split(' ')
if 'is_unexpected' not in results or not results['is_unexpected']:
# Don't print anything for tests that ran as expected.
@@ -122,43 +122,44 @@ class BuildBotPrinter(object):
layouttestresults.for_each_test(summarized_results['tests'], add_result)
if len(passes) or len(flaky) or len(regressions):
- self._print("")
+ self._print('')
if len(passes):
for key, tests in passes.iteritems():
- self._print("%s: (%d)" % (key, len(tests)))
+ self._print('%s: (%d)' % (key, len(tests)))
tests.sort()
for test in tests:
- self._print(" %s" % test)
- self._print("")
- self._print("")
+ self._print(' %s' % test)
+ self._print('')
+ self._print('')
if len(flaky):
descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
for key, tests in flaky.iteritems():
result = TestExpectations.EXPECTATIONS[key.lower()]
- self._print("Unexpected flakiness: %s (%d)" % (descriptions[result], len(tests)))
+ self._print('Unexpected flakiness: %s (%d)' % (descriptions[result], len(tests)))
tests.sort()
for test in tests:
result = layouttestresults.result_for_test(summarized_results['tests'], test)
- actual = result['actual'].split(" ")
- expected = result['expected'].split(" ")
+ actual = result['actual'].split(' ')
+ expected = result['expected'].split(' ')
result = TestExpectations.EXPECTATIONS[key.lower()]
# FIXME: clean this up once the old syntax is gone
- new_expectations_list = [TestExpectationParser._inverted_expectation_tokens[exp] for exp in list(set(actual) | set(expected))]
- self._print(" %s [ %s ]" % (test, " ".join(new_expectations_list)))
- self._print("")
- self._print("")
+ new_expectations_list = [TestExpectationParser._inverted_expectation_tokens[exp]
+ for exp in list(set(actual) | set(expected))]
+ self._print(' %s [ %s ]' % (test, ' '.join(new_expectations_list)))
+ self._print('')
+ self._print('')
if len(regressions):
descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
for key, tests in regressions.iteritems():
result = TestExpectations.EXPECTATIONS[key.lower()]
- self._print("Regressions: Unexpected %s (%d)" % (descriptions[result], len(tests)))
+ self._print('Regressions: Unexpected %s (%d)' % (descriptions[result], len(tests)))
tests.sort()
for test in tests:
- self._print(" %s [ %s ]" % (test, TestExpectationParser._inverted_expectation_tokens[key]))
- self._print("")
+ self._print(' %s [ %s ]' % (test, TestExpectationParser._inverted_expectation_tokens[key]))
+ self._print('')
if len(summarized_results['tests']) and self.debug_logging:
- self._print("%s" % ("-" * 78))
+ self._print('%s' % ('-' * 78))

Powered by Google App Engine
This is Rietveld 408576698