Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(13)

Side by Side Diff: third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py

Issue 2188623002: Change logging statements to not use the "%" operator. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 4 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 # Copyright (C) 2010 Google Inc. All rights reserved. 1 # Copyright (C) 2010 Google Inc. All rights reserved.
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze ged 2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze ged
3 # 3 #
4 # Redistribution and use in source and binary forms, with or without 4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are 5 # modification, are permitted provided that the following conditions are
6 # met: 6 # met:
7 # 7 #
8 # * Redistributions of source code must retain the above copyright 8 # * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer. 9 # notice, this list of conditions and the following disclaimer.
10 # * Redistributions in binary form must reproduce the above 10 # * Redistributions in binary form must reproduce the above
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after
142 tests_to_retry = self._tests_to_retry(initial_results) 142 tests_to_retry = self._tests_to_retry(initial_results)
143 all_retry_results = [] 143 all_retry_results = []
144 if should_retry_failures and tests_to_retry: 144 if should_retry_failures and tests_to_retry:
145 enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed () 145 enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed ()
146 146
147 for retry_attempt in xrange(1, self._options.num_retries + 1): 147 for retry_attempt in xrange(1, self._options.num_retries + 1):
148 if not tests_to_retry: 148 if not tests_to_retry:
149 break 149 break
150 150
151 _log.info('') 151 _log.info('')
152 _log.info('Retrying %s, attempt %d of %d...' % 152 _log.info('Retrying %s, attempt %d of %d...',
153 (grammar.pluralize('unexpected failure', len(tests _to_retry)), 153 grammar.pluralize('unexpected failure', len(tests_ to_retry)),
154 retry_attempt, self._options.num_retries)) 154 retry_attempt, self._options.num_retries)
155 155
156 retry_results = self._run_tests(tests_to_retry, 156 retry_results = self._run_tests(tests_to_retry,
157 tests_to_skip=set(), 157 tests_to_skip=set(),
158 repeat_each=1, 158 repeat_each=1,
159 iterations=1, 159 iterations=1,
160 num_workers=num_workers, 160 num_workers=num_workers,
161 retry_attempt=retry_attempt) 161 retry_attempt=retry_attempt)
162 all_retry_results.append(retry_results) 162 all_retry_results.append(retry_results)
163 163
164 tests_to_retry = self._tests_to_retry(retry_results) 164 tests_to_retry = self._tests_to_retry(retry_results)
(...skipping 14 matching lines...) Expand all
179 _log.debug("summarizing results") 179 _log.debug("summarizing results")
180 summarized_full_results = test_run_results.summarize_results( 180 summarized_full_results = test_run_results.summarize_results(
181 self._port, self._expectations, initial_results, all_retry_results, 181 self._port, self._expectations, initial_results, all_retry_results,
182 enabled_pixel_tests_in_retry) 182 enabled_pixel_tests_in_retry)
183 summarized_failing_results = test_run_results.summarize_results( 183 summarized_failing_results = test_run_results.summarize_results(
184 self._port, self._expectations, initial_results, all_retry_results, 184 self._port, self._expectations, initial_results, all_retry_results,
185 enabled_pixel_tests_in_retry, only_include_failing=True) 185 enabled_pixel_tests_in_retry, only_include_failing=True)
186 186
187 exit_code = summarized_failing_results['num_regressions'] 187 exit_code = summarized_failing_results['num_regressions']
188 if exit_code > test_run_results.MAX_FAILURES_EXIT_STATUS: 188 if exit_code > test_run_results.MAX_FAILURES_EXIT_STATUS:
189 _log.warning('num regressions (%d) exceeds max exit status (%d)' % 189 _log.warning('num regressions (%d) exceeds max exit status (%d)',
190 (exit_code, test_run_results.MAX_FAILURES_EXIT_STATUS)) 190 exit_code, test_run_results.MAX_FAILURES_EXIT_STATUS)
191 exit_code = test_run_results.MAX_FAILURES_EXIT_STATUS 191 exit_code = test_run_results.MAX_FAILURES_EXIT_STATUS
192 192
193 if not self._options.dry_run: 193 if not self._options.dry_run:
194 self._write_json_files(summarized_full_results, summarized_failing_r esults, initial_results, running_all_tests) 194 self._write_json_files(summarized_full_results, summarized_failing_r esults, initial_results, running_all_tests)
195 195
196 if self._options.write_full_results_to: 196 if self._options.write_full_results_to:
197 self._filesystem.copyfile(self._filesystem.join(self._results_di rectory, "full_results.json"), 197 self._filesystem.copyfile(self._filesystem.join(self._results_di rectory, "full_results.json"),
198 self._options.write_full_results_to) 198 self._options.write_full_results_to)
199 199
200 self._upload_json_files() 200 self._upload_json_files()
(...skipping 269 matching lines...) Expand 10 before | Expand all | Expand 10 after
470 # Port specific clean-up. 470 # Port specific clean-up.
471 self._port.clobber_old_port_specific_results() 471 self._port.clobber_old_port_specific_results()
472 472
473 def _tests_to_retry(self, run_results): 473 def _tests_to_retry(self, run_results):
474 # TODO(ojan): This should also check that result.type != test_expectatio ns.MISSING since retrying missing expectations is silly. 474 # TODO(ojan): This should also check that result.type != test_expectatio ns.MISSING since retrying missing expectations is silly.
475 # But that's a bit tricky since we only consider the last retry attempt for the count of unexpected regressions. 475 # But that's a bit tricky since we only consider the last retry attempt for the count of unexpected regressions.
476 return [result.test_name for result in run_results.unexpected_results_by _name.values( 476 return [result.test_name for result in run_results.unexpected_results_by _name.values(
477 ) if result.type != test_expectations.PASS] 477 ) if result.type != test_expectations.PASS]
478 478
479 def _write_json_files(self, summarized_full_results, summarized_failing_resu lts, initial_results, running_all_tests): 479 def _write_json_files(self, summarized_full_results, summarized_failing_resu lts, initial_results, running_all_tests):
480 _log.debug("Writing JSON files in %s." % self._results_directory) 480 _log.debug("Writing JSON files in %s.", self._results_directory)
481 481
482 # FIXME: Upload stats.json to the server and delete times_ms. 482 # FIXME: Upload stats.json to the server and delete times_ms.
483 times_trie = json_results_generator.test_timings_trie(initial_results.re sults_by_name.values()) 483 times_trie = json_results_generator.test_timings_trie(initial_results.re sults_by_name.values())
484 times_json_path = self._filesystem.join(self._results_directory, "times_ ms.json") 484 times_json_path = self._filesystem.join(self._results_directory, "times_ ms.json")
485 json_results_generator.write_json(self._filesystem, times_trie, times_js on_path) 485 json_results_generator.write_json(self._filesystem, times_trie, times_js on_path)
486 486
487 # Save out the times data so we can use it for --fastest in the future. 487 # Save out the times data so we can use it for --fastest in the future.
488 if running_all_tests: 488 if running_all_tests:
489 bot_test_times_path = self._port.bot_test_times_path() 489 bot_test_times_path = self._port.bot_test_times_path()
490 self._filesystem.maybe_make_directory(self._filesystem.dirname(bot_t est_times_path)) 490 self._filesystem.maybe_make_directory(self._filesystem.dirname(bot_t est_times_path))
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
526 url = "http://%s/testfile/upload" % self._options.test_results_server 526 url = "http://%s/testfile/upload" % self._options.test_results_server
527 # Set uploading timeout in case appengine server is having problems. 527 # Set uploading timeout in case appengine server is having problems.
528 # 120 seconds are more than enough to upload test results. 528 # 120 seconds are more than enough to upload test results.
529 uploader = FileUploader(url, 120) 529 uploader = FileUploader(url, 120)
530 try: 530 try:
531 response = uploader.upload_as_multipart_form_data(self._filesystem, files, attrs) 531 response = uploader.upload_as_multipart_form_data(self._filesystem, files, attrs)
532 if response: 532 if response:
533 if response.code == 200: 533 if response.code == 200:
534 _log.debug("JSON uploaded.") 534 _log.debug("JSON uploaded.")
535 else: 535 else:
536 _log.debug("JSON upload failed, %d: '%s'" % (response.code, response.read())) 536 _log.debug("JSON upload failed, %d: '%s'", response.code, re sponse.read())
537 else: 537 else:
538 _log.error("JSON upload failed; no response returned") 538 _log.error("JSON upload failed; no response returned")
539 except Exception as err: 539 except Exception as err:
540 _log.error("Upload failed: %s" % err) 540 _log.error("Upload failed: %s", err)
541 541
542 def _copy_results_html_file(self, destination_path): 542 def _copy_results_html_file(self, destination_path):
543 base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harn ess') 543 base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harn ess')
544 results_file = self._filesystem.join(base_dir, 'results.html') 544 results_file = self._filesystem.join(base_dir, 'results.html')
545 # Note that the results.html template file won't exist when we're using a MockFileSystem during unit tests, 545 # Note that the results.html template file won't exist when we're using a MockFileSystem during unit tests,
546 # so make sure it exists before we try to copy it. 546 # so make sure it exists before we try to copy it.
547 if self._filesystem.exists(results_file): 547 if self._filesystem.exists(results_file):
548 self._filesystem.copyfile(results_file, destination_path) 548 self._filesystem.copyfile(results_file, destination_path)
549 549
550 def _stats_trie(self, initial_results): 550 def _stats_trie(self, initial_results):
551 def _worker_number(worker_name): 551 def _worker_number(worker_name):
552 return int(worker_name.split('/')[1]) if worker_name else -1 552 return int(worker_name.split('/')[1]) if worker_name else -1
553 553
554 stats = {} 554 stats = {}
555 for result in initial_results.results_by_name.values(): 555 for result in initial_results.results_by_name.values():
556 if result.type != test_expectations.SKIP: 556 if result.type != test_expectations.SKIP:
557 stats[result.test_name] = {'results': (_worker_number(result.wor ker_name), result.test_number, result.pid, int( 557 stats[result.test_name] = {'results': (_worker_number(result.wor ker_name), result.test_number, result.pid, int(
558 result.test_run_time * 1000), int(result.total_run_time * 10 00))} 558 result.test_run_time * 1000), int(result.total_run_time * 10 00))}
559 stats_trie = {} 559 stats_trie = {}
560 for name, value in stats.iteritems(): 560 for name, value in stats.iteritems():
561 json_results_generator.add_path_to_trie(name, value, stats_trie) 561 json_results_generator.add_path_to_trie(name, value, stats_trie)
562 return stats_trie 562 return stats_trie
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698