Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 # Copyright (C) 2010 Google Inc. All rights reserved. | 1 # Copyright (C) 2010 Google Inc. All rights reserved. |
| 2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze ged | 2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze ged |
| 3 # | 3 # |
| 4 # Redistribution and use in source and binary forms, with or without | 4 # Redistribution and use in source and binary forms, with or without |
| 5 # modification, are permitted provided that the following conditions are | 5 # modification, are permitted provided that the following conditions are |
| 6 # met: | 6 # met: |
| 7 # | 7 # |
| 8 # * Redistributions of source code must retain the above copyright | 8 # * Redistributions of source code must retain the above copyright |
| 9 # notice, this list of conditions and the following disclaimer. | 9 # notice, this list of conditions and the following disclaimer. |
| 10 # * Redistributions in binary form must reproduce the above | 10 # * Redistributions in binary form must reproduce the above |
| (...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 82 self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR | 82 self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR |
| 83 self.INSPECTOR_SUBDIR = 'inspector' + port.TEST_PATH_SEPARATOR | 83 self.INSPECTOR_SUBDIR = 'inspector' + port.TEST_PATH_SEPARATOR |
| 84 self.PERF_SUBDIR = 'perf' | 84 self.PERF_SUBDIR = 'perf' |
| 85 self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR | 85 self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR |
| 86 self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests' | 86 self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests' |
| 87 self.ARCHIVED_RESULTS_LIMIT = 25 | 87 self.ARCHIVED_RESULTS_LIMIT = 25 |
| 88 self._http_server_started = False | 88 self._http_server_started = False |
| 89 self._wptserve_started = False | 89 self._wptserve_started = False |
| 90 self._websockets_server_started = False | 90 self._websockets_server_started = False |
| 91 | 91 |
| 92 self._random_seed = None | |
| 92 self._results_directory = self._port.results_directory() | 93 self._results_directory = self._port.results_directory() |
| 93 self._finder = LayoutTestFinder(self._port, self._options) | 94 self._finder = LayoutTestFinder(self._port, self._options) |
| 94 self._runner = LayoutTestRunner(self._options, self._port, self._printer , self._results_directory, self._test_is_slow) | 95 self._runner = LayoutTestRunner(self._options, self._port, self._printer , self._results_directory, self._test_is_slow) |
| 95 | 96 |
| 96 def run(self, args): | 97 def run(self, args): |
| 97 """Run the tests and return a RunDetails object with the results.""" | 98 """Run the tests and return a RunDetails object with the results.""" |
| 98 start_time = time.time() | 99 start_time = time.time() |
| 99 self._printer.write_update("Collecting tests ...") | 100 self._printer.write_update("Collecting tests ...") |
| 100 running_all_tests = False | 101 running_all_tests = False |
| 101 try: | 102 try: |
| (...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 185 enabled_pixel_tests_in_retry, only_include_failing=True) | 186 enabled_pixel_tests_in_retry, only_include_failing=True) |
| 186 | 187 |
| 187 exit_code = summarized_failing_results['num_regressions'] | 188 exit_code = summarized_failing_results['num_regressions'] |
| 188 if exit_code > test_run_results.MAX_FAILURES_EXIT_STATUS: | 189 if exit_code > test_run_results.MAX_FAILURES_EXIT_STATUS: |
| 189 _log.warning('num regressions (%d) exceeds max exit status (%d)', | 190 _log.warning('num regressions (%d) exceeds max exit status (%d)', |
| 190 exit_code, test_run_results.MAX_FAILURES_EXIT_STATUS) | 191 exit_code, test_run_results.MAX_FAILURES_EXIT_STATUS) |
| 191 exit_code = test_run_results.MAX_FAILURES_EXIT_STATUS | 192 exit_code = test_run_results.MAX_FAILURES_EXIT_STATUS |
| 192 | 193 |
| 193 if not self._options.dry_run: | 194 if not self._options.dry_run: |
| 194 self._write_json_files(summarized_full_results, summarized_failing_r esults, initial_results, running_all_tests) | 195 self._write_json_files(summarized_full_results, summarized_failing_r esults, initial_results, running_all_tests) |
| 196 if self._random_seed: | |
| 197 self._write_random_seed() | |
| 195 | 198 |
| 196 if self._options.write_full_results_to: | 199 if self._options.write_full_results_to: |
| 197 self._filesystem.copyfile(self._filesystem.join(self._results_di rectory, "full_results.json"), | 200 self._filesystem.copyfile(self._filesystem.join(self._results_di rectory, "full_results.json"), |
| 198 self._options.write_full_results_to) | 201 self._options.write_full_results_to) |
| 199 | 202 |
| 200 self._upload_json_files() | 203 self._upload_json_files() |
| 201 | 204 |
| 202 results_path = self._filesystem.join(self._results_directory, "resul ts.html") | 205 results_path = self._filesystem.join(self._results_directory, "resul ts.html") |
| 203 self._copy_results_html_file(results_path) | 206 self._copy_results_html_file(results_path) |
| 204 if initial_results.keyboard_interrupted: | 207 if initial_results.keyboard_interrupted: |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 246 tests_to_run = [test for test in test_names if test not in tests_to_skip ] | 249 tests_to_run = [test for test in test_names if test not in tests_to_skip ] |
| 247 | 250 |
| 248 if not tests_to_run: | 251 if not tests_to_run: |
| 249 return tests_to_run, tests_to_skip | 252 return tests_to_run, tests_to_skip |
| 250 | 253 |
| 251 # Create a sorted list of test files so the subset chunk, | 254 # Create a sorted list of test files so the subset chunk, |
| 252 # if used, contains alphabetically consecutive tests. | 255 # if used, contains alphabetically consecutive tests. |
| 253 if self._options.order == 'natural': | 256 if self._options.order == 'natural': |
| 254 tests_to_run.sort(key=self._port.test_key) | 257 tests_to_run.sort(key=self._port.test_key) |
| 255 elif self._options.order == 'random': | 258 elif self._options.order == 'random': |
| 256 random.shuffle(tests_to_run) | 259 if self._options.seed is not None: |
| 257 elif self._options.order == 'random-seeded': | 260 self._random_seed = self._options.seed |
| 258 rnd = random.Random() | 261 else: |
| 259 rnd.seed(4) # http://xkcd.com/221/ | 262 self._random_seed = int(time.time()) |
|
ojan
2016/09/06 16:53:29
I think we should do a fixed value. Using current
qyearsley
2016/09/06 17:22:48
If the seed is output with the results, it should
jeffcarp
2016/09/06 17:22:51
Is that still true if we log the seed in results.j
ojan
2016/09/06 21:27:11
Think of this from the perspective of a sheriff tr
jeffcarp
2016/09/06 22:57:30
If this would make it hard to tell whether a faili
ojan
2016/09/06 23:07:43
I think this is a good idea. You probably want it
| |
| 260 rnd.shuffle(tests_to_run) | 263 _log.info("Test random order seed: %d", self._random_seed) |
| 264 tests_to_run.sort() | |
| 265 rand = random.Random() | |
| 266 rand.seed(self._random_seed) | |
| 267 rand.shuffle(tests_to_run) | |
| 261 | 268 |
| 262 tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tes ts_to_run) | 269 tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tes ts_to_run) |
| 263 self._expectations.add_extra_skipped_tests(tests_in_other_chunks) | 270 self._expectations.add_extra_skipped_tests(tests_in_other_chunks) |
| 264 tests_to_skip.update(tests_in_other_chunks) | 271 tests_to_skip.update(tests_in_other_chunks) |
| 265 | 272 |
| 266 return tests_to_run, tests_to_skip | 273 return tests_to_run, tests_to_skip |
| 267 | 274 |
| 275 def _write_random_seed(self): | |
| 276 """Writes the random seed used for randomized test order.""" | |
| 277 assert self._options.order == 'random' and self._random_seed | |
| 278 path = self._filesystem.join(self._results_directory, 'random-seed.txt') | |
| 279 contents = '%d\n' % self._random_seed | |
| 280 _log.info('%s %s', path, contents) | |
| 281 self._filesystem.write_text_file(path, contents) | |
|
qyearsley
2016/09/05 00:15:28
We could also potentially:
- Not write the seed t
Dirk Pranke
2016/09/06 01:19:57
I would include the seed as a field in the results
mithro
2016/09/06 02:02:25
I'm happy with Dirk's suggestions.
The aim is to
qyearsley
2016/09/06 17:22:48
Aye, SGTM
| |
| 282 | |
| 268 def _test_input_for_file(self, test_file): | 283 def _test_input_for_file(self, test_file): |
| 269 return TestInput(test_file, | 284 return TestInput(test_file, |
| 270 self._options.slow_time_out_ms if self._test_is_slow(te st_file) else self._options.time_out_ms, | 285 self._options.slow_time_out_ms if self._test_is_slow(te st_file) else self._options.time_out_ms, |
| 271 self._test_requires_lock(test_file), | 286 self._test_requires_lock(test_file), |
| 272 should_add_missing_baselines=(self._options.new_test_re sults and not self._test_is_expected_missing(test_file))) | 287 should_add_missing_baselines=(self._options.new_test_re sults and not self._test_is_expected_missing(test_file))) |
| 273 | 288 |
| 274 def _test_requires_lock(self, test_file): | 289 def _test_requires_lock(self, test_file): |
| 275 """Return True if the test needs to be locked when running multiple | 290 """Return True if the test needs to be locked when running multiple |
| 276 instances of this test runner. | 291 instances of this test runner. |
| 277 | 292 |
| (...skipping 275 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 553 | 568 |
| 554 stats = {} | 569 stats = {} |
| 555 for result in initial_results.results_by_name.values(): | 570 for result in initial_results.results_by_name.values(): |
| 556 if result.type != test_expectations.SKIP: | 571 if result.type != test_expectations.SKIP: |
| 557 stats[result.test_name] = {'results': (_worker_number(result.wor ker_name), result.test_number, result.pid, int( | 572 stats[result.test_name] = {'results': (_worker_number(result.wor ker_name), result.test_number, result.pid, int( |
| 558 result.test_run_time * 1000), int(result.total_run_time * 10 00))} | 573 result.test_run_time * 1000), int(result.total_run_time * 10 00))} |
| 559 stats_trie = {} | 574 stats_trie = {} |
| 560 for name, value in stats.iteritems(): | 575 for name, value in stats.iteritems(): |
| 561 json_results_generator.add_path_to_trie(name, value, stats_trie) | 576 json_results_generator.add_path_to_trie(name, value, stats_trie) |
| 562 return stats_trie | 577 return stats_trie |
| OLD | NEW |