Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(7)

Side by Side Diff: Tools/Scripts/webkitpy/layout_tests/controllers/manager.py

Issue 489093002: Enabling archiving of test results by default in run-webkit-tests. (Closed) Base URL: https://chromium.googlesource.com/chromium/blink.git@master
Patch Set: Addresssing comments Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 # Copyright (C) 2010 Google Inc. All rights reserved. 1 # Copyright (C) 2010 Google Inc. All rights reserved.
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze ged 2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze ged
3 # 3 #
4 # Redistribution and use in source and binary forms, with or without 4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are 5 # modification, are permitted provided that the following conditions are
6 # met: 6 # met:
7 # 7 #
8 # * Redistributions of source code must retain the above copyright 8 # * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer. 9 # notice, this list of conditions and the following disclaimer.
10 # * Redistributions in binary form must reproduce the above 10 # * Redistributions in binary form must reproduce the above
(...skipping 149 matching lines...) Expand 10 before | Expand all | Expand 10 after
160 # run was interrupted even before testing started. In those cases, d on't archive the folder. 160 # run was interrupted even before testing started. In those cases, d on't archive the folder.
161 # Simply override the current folder contents with new results. 161 # Simply override the current folder contents with new results.
162 import errno 162 import errno
163 if e.errno == errno.EEXIST: 163 if e.errno == errno.EEXIST:
164 _log.warning("No results.html file found in previous run, skippi ng it.") 164 _log.warning("No results.html file found in previous run, skippi ng it.")
165 return None 165 return None
166 archived_name = ''.join((self._filesystem.basename(self._results_directo ry), "_", timestamp)) 166 archived_name = ''.join((self._filesystem.basename(self._results_directo ry), "_", timestamp))
167 archived_path = self._filesystem.join(self._filesystem.dirname(self._res ults_directory), archived_name) 167 archived_path = self._filesystem.join(self._filesystem.dirname(self._res ults_directory), archived_name)
168 self._filesystem.move(self._results_directory, archived_path) 168 self._filesystem.move(self._results_directory, archived_path)
169 169
170 def _clobber_old_archived_results(self): 170 def _limit_archived_results_count(self):
171 results_directory_path = self._filesystem.dirname(self._results_director y) 171 results_directory_path = self._filesystem.dirname(self._results_director y)
172 file_list = self._filesystem.listdir(results_directory_path) 172 file_list = self._filesystem.listdir(results_directory_path)
173 results_directories = [] 173 results_directories = []
174 for dir in file_list: 174 for dir in file_list:
175 file_path = self._filesystem.join(results_directory_path, dir) 175 file_path = self._filesystem.join(results_directory_path, dir)
176 if self._filesystem.isdir(file_path): 176 if self._filesystem.isdir(file_path):
177 results_directories.append(file_path) 177 results_directories.append(file_path)
178 results_directories.sort(key=lambda x: self._filesystem.mtime(x)) 178 results_directories.sort(key=lambda x: self._filesystem.mtime(x))
179 self._printer.write_update("Clobbering old archived results in %s" % res ults_directory_path) 179 self._printer.write_update("Clobbering excess archived results in %s" % results_directory_path)
180 for dir in results_directories[:-self.ARCHIVED_RESULTS_LIMIT]: 180 for dir in results_directories[:-self.ARCHIVED_RESULTS_LIMIT]:
181 self._filesystem.rmtree(dir) 181 self._filesystem.rmtree(dir)
182 182
183 def _set_up_run(self, test_names): 183 def _set_up_run(self, test_names):
184 self._printer.write_update("Checking build ...") 184 self._printer.write_update("Checking build ...")
185 if self._options.build: 185 if self._options.build:
186 exit_code = self._port.check_build(self.needs_servers(test_names), s elf._printer) 186 exit_code = self._port.check_build(self.needs_servers(test_names), s elf._printer)
187 if exit_code: 187 if exit_code:
188 _log.error("Build check failed") 188 _log.error("Build check failed")
189 return exit_code 189 return exit_code
190 190
191 # This must be started before we check the system dependencies, 191 # This must be started before we check the system dependencies,
192 # since the helper may do things to make the setup correct. 192 # since the helper may do things to make the setup correct.
193 if self._options.pixel_tests: 193 if self._options.pixel_tests:
194 self._printer.write_update("Starting pixel test helper ...") 194 self._printer.write_update("Starting pixel test helper ...")
195 self._port.start_helper() 195 self._port.start_helper()
196 196
197 # Check that the system dependencies (themes, fonts, ...) are correct. 197 # Check that the system dependencies (themes, fonts, ...) are correct.
198 if not self._options.nocheck_sys_deps: 198 if not self._options.nocheck_sys_deps:
199 self._printer.write_update("Checking system dependencies ...") 199 self._printer.write_update("Checking system dependencies ...")
200 exit_code = self._port.check_sys_deps(self.needs_servers(test_names) ) 200 exit_code = self._port.check_sys_deps(self.needs_servers(test_names) )
201 if exit_code: 201 if exit_code:
202 self._port.stop_helper() 202 self._port.stop_helper()
203 return exit_code 203 return exit_code
204 204
205 if self._options.enable_versioned_results and self._filesystem.exists(se lf._results_directory): 205 if self._options.clobber_old_results:
206 if self._options.clobber_old_results: 206 self._clobber_old_results()
207 _log.warning("Flag --enable_versioned_results overrides --clobbe r-old-results.") 207 elif self._filesystem.exists(self._results_directory):
208 self._clobber_old_archived_results() 208 self._limit_archived_results_count()
209 # Rename the existing results folder for archiving. 209 # Rename the existing results folder for archiving.
210 self._rename_results_folder() 210 self._rename_results_folder()
211 elif self._options.clobber_old_results:
212 self._clobber_old_results()
213 211
214 # Create the output directory if it doesn't already exist. 212 # Create the output directory if it doesn't already exist.
215 self._port.host.filesystem.maybe_make_directory(self._results_directory) 213 self._port.host.filesystem.maybe_make_directory(self._results_directory)
216 214
217 self._port.setup_test_run() 215 self._port.setup_test_run()
218 return test_run_results.OK_EXIT_STATUS 216 return test_run_results.OK_EXIT_STATUS
219 217
220 def run(self, args): 218 def run(self, args):
221 """Run the tests and return a RunDetails object with the results.""" 219 """Run the tests and return a RunDetails object with the results."""
222 start_time = time.time() 220 start_time = time.time()
(...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after
388 writer = TestResultWriter(self._port._filesystem, self._port, se lf._port.results_directory(), test) 386 writer = TestResultWriter(self._port._filesystem, self._port, se lf._port.results_directory(), test)
389 writer.copy_sample_file(sample_file) 387 writer.copy_sample_file(sample_file)
390 388
391 crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start _time) 389 crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start _time)
392 if crash_logs: 390 if crash_logs:
393 for test, crash_log in crash_logs.iteritems(): 391 for test, crash_log in crash_logs.iteritems():
394 writer = TestResultWriter(self._port._filesystem, self._port, se lf._port.results_directory(), test) 392 writer = TestResultWriter(self._port._filesystem, self._port, se lf._port.results_directory(), test)
395 writer.write_crash_log(crash_log) 393 writer.write_crash_log(crash_log)
396 394
397 def _clobber_old_results(self): 395 def _clobber_old_results(self):
398 # Just clobber the actual test results directories since the other 396 results_directory_path = self._filesystem.dirname(self._results_director y)
399 # files in the results directory are explicitly used for cross-run 397 file_list = self._filesystem.listdir(results_directory_path)
400 # tracking. 398 results_directories = []
401 self._printer.write_update("Clobbering old results in %s" % 399 for dir in file_list:
402 self._results_directory) 400 file_path = self._filesystem.join(results_directory_path, dir)
403 layout_tests_dir = self._port.layout_tests_dir() 401 if self._filesystem.isdir(file_path):
404 possible_dirs = self._port.test_dirs() 402 results_directories.append(file_path)
405 for dirname in possible_dirs: 403 self._printer.write_update("Clobbering all archived results in %s" % res ults_directory_path)
406 if self._filesystem.isdir(self._filesystem.join(layout_tests_dir, di rname)): 404 for dir in results_directories:
407 self._filesystem.rmtree(self._filesystem.join(self._results_dire ctory, dirname)) 405 self._filesystem.rmtree(dir)
Dirk Pranke 2014/08/20 19:05:10 I think this will delete *any* directory that exis
patro 2014/08/20 19:30:14 Assuming that the test results folder name results
408
409 # Port specific clean-up.
410 self._port.clobber_old_port_specific_results()
Dirk Pranke 2014/08/20 19:05:10 You should probably keep this call as well.
patro 2014/08/20 19:30:14 Done.
411 406
412 def _tests_to_retry(self, run_results): 407 def _tests_to_retry(self, run_results):
413 return [result.test_name for result in run_results.unexpected_results_by _name.values() if result.type != test_expectations.PASS] 408 return [result.test_name for result in run_results.unexpected_results_by _name.values() if result.type != test_expectations.PASS]
414 409
415 def _write_json_files(self, summarized_full_results, summarized_failing_resu lts, initial_results): 410 def _write_json_files(self, summarized_full_results, summarized_failing_resu lts, initial_results):
416 _log.debug("Writing JSON files in %s." % self._results_directory) 411 _log.debug("Writing JSON files in %s." % self._results_directory)
417 412
418 # FIXME: Upload stats.json to the server and delete times_ms. 413 # FIXME: Upload stats.json to the server and delete times_ms.
419 times_trie = json_results_generator.test_timings_trie(initial_results.re sults_by_name.values()) 414 times_trie = json_results_generator.test_timings_trie(initial_results.re sults_by_name.values())
420 times_json_path = self._filesystem.join(self._results_directory, "times_ ms.json") 415 times_json_path = self._filesystem.join(self._results_directory, "times_ ms.json")
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
477 return int(worker_name.split('/')[1]) if worker_name else -1 472 return int(worker_name.split('/')[1]) if worker_name else -1
478 473
479 stats = {} 474 stats = {}
480 for result in initial_results.results_by_name.values(): 475 for result in initial_results.results_by_name.values():
481 if result.type != test_expectations.SKIP: 476 if result.type != test_expectations.SKIP:
482 stats[result.test_name] = {'results': (_worker_number(result.wor ker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int (result.total_run_time * 1000))} 477 stats[result.test_name] = {'results': (_worker_number(result.wor ker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int (result.total_run_time * 1000))}
483 stats_trie = {} 478 stats_trie = {}
484 for name, value in stats.iteritems(): 479 for name, value in stats.iteritems():
485 json_results_generator.add_path_to_trie(name, value, stats_trie) 480 json_results_generator.add_path_to_trie(name, value, stats_trie)
486 return stats_trie 481 return stats_trie
OLDNEW
« no previous file with comments | « no previous file | Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698