| OLD | NEW |
| 1 # Copyright (C) 2010 Google Inc. All rights reserved. | 1 # Copyright (C) 2010 Google Inc. All rights reserved. |
| 2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze
ged | 2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze
ged |
| 3 # | 3 # |
| 4 # Redistribution and use in source and binary forms, with or without | 4 # Redistribution and use in source and binary forms, with or without |
| 5 # modification, are permitted provided that the following conditions are | 5 # modification, are permitted provided that the following conditions are |
| 6 # met: | 6 # met: |
| 7 # | 7 # |
| 8 # * Redistributions of source code must retain the above copyright | 8 # * Redistributions of source code must retain the above copyright |
| 9 # notice, this list of conditions and the following disclaimer. | 9 # notice, this list of conditions and the following disclaimer. |
| 10 # * Redistributions in binary form must reproduce the above | 10 # * Redistributions in binary form must reproduce the above |
| (...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 89 self._results_directory = self._port.results_directory() | 89 self._results_directory = self._port.results_directory() |
| 90 self._finder = LayoutTestFinder(self._port, self._options) | 90 self._finder = LayoutTestFinder(self._port, self._options) |
| 91 self._runner = LayoutTestRunner(self._options, self._port, self._printer
, self._results_directory, self._test_is_slow) | 91 self._runner = LayoutTestRunner(self._options, self._port, self._printer
, self._results_directory, self._test_is_slow) |
| 92 | 92 |
| 93 def run(self, args): | 93 def run(self, args): |
| 94 """Run the tests and return a RunDetails object with the results.""" | 94 """Run the tests and return a RunDetails object with the results.""" |
| 95 start_time = time.time() | 95 start_time = time.time() |
| 96 self._printer.write_update("Collecting tests ...") | 96 self._printer.write_update("Collecting tests ...") |
| 97 running_all_tests = False | 97 running_all_tests = False |
| 98 try: | 98 try: |
| 99 paths, test_names, running_all_tests = self._collect_tests(args) | 99 paths, all_test_names, running_all_tests = self._collect_tests(args) |
| 100 except IOError: | 100 except IOError: |
| 101 # This is raised if --test-list doesn't exist | 101 # This is raised if --test-list doesn't exist |
| 102 return test_run_results.RunDetails(exit_code=test_run_results.NO_TES
TS_EXIT_STATUS) | 102 return test_run_results.RunDetails(exit_code=test_run_results.NO_TES
TS_EXIT_STATUS) |
| 103 | 103 |
| 104 # Create a sorted list of test files so the subset chunk, |
| 105 # if used, contains alphabetically consecutive tests. |
| 106 if self._options.order == 'natural': |
| 107 all_test_names.sort(key=self._port.test_key) |
| 108 elif self._options.order == 'random': |
| 109 all_test_names.sort() |
| 110 random.Random(self._options.seed).shuffle(all_test_names) |
| 111 |
| 112 test_names, tests_in_other_chunks = self._finder.split_into_chunks(all_t
est_names) |
| 113 |
| 104 self._printer.write_update("Parsing expectations ...") | 114 self._printer.write_update("Parsing expectations ...") |
| 105 self._expectations = test_expectations.TestExpectations(self._port, test
_names) | 115 self._expectations = test_expectations.TestExpectations(self._port, test
_names) |
| 106 | 116 |
| 107 tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names) | 117 tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names) |
| 108 self._printer.print_found(len(test_names), len(tests_to_run), self._opti
ons.repeat_each, self._options.iterations) | 118 |
| 119 self._expectations.remove_tests(tests_in_other_chunks) |
| 120 |
| 121 self._printer.print_found( |
| 122 len(all_test_names), len(test_names), len(tests_to_run), |
| 123 self._options.repeat_each, self._options.iterations) |
| 109 | 124 |
| 110 # Check to make sure we're not skipping every test. | 125 # Check to make sure we're not skipping every test. |
| 111 if not tests_to_run: | 126 if not tests_to_run: |
| 112 _log.critical('No tests to run.') | 127 _log.critical('No tests to run.') |
| 113 return test_run_results.RunDetails(exit_code=test_run_results.NO_TES
TS_EXIT_STATUS) | 128 return test_run_results.RunDetails(exit_code=test_run_results.NO_TES
TS_EXIT_STATUS) |
| 114 | 129 |
| 115 exit_code = self._set_up_run(tests_to_run) | 130 exit_code = self._set_up_run(tests_to_run) |
| 116 if exit_code: | 131 if exit_code: |
| 117 return test_run_results.RunDetails(exit_code=exit_code) | 132 return test_run_results.RunDetails(exit_code=exit_code) |
| 118 | 133 |
| (...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 235 def _http_tests(self, test_names): | 250 def _http_tests(self, test_names): |
| 236 return set(test for test in test_names if self._is_http_test(test)) | 251 return set(test for test in test_names if self._is_http_test(test)) |
| 237 | 252 |
| 238 def _is_perf_test(self, test): | 253 def _is_perf_test(self, test): |
| 239 return self.PERF_SUBDIR == test or (self.PERF_SUBDIR + self._port.TEST_P
ATH_SEPARATOR) in test | 254 return self.PERF_SUBDIR == test or (self.PERF_SUBDIR + self._port.TEST_P
ATH_SEPARATOR) in test |
| 240 | 255 |
| 241 def _prepare_lists(self, paths, test_names): | 256 def _prepare_lists(self, paths, test_names): |
| 242 tests_to_skip = self._finder.skip_tests(paths, test_names, self._expecta
tions, self._http_tests(test_names)) | 257 tests_to_skip = self._finder.skip_tests(paths, test_names, self._expecta
tions, self._http_tests(test_names)) |
| 243 tests_to_run = [test for test in test_names if test not in tests_to_skip
] | 258 tests_to_run = [test for test in test_names if test not in tests_to_skip
] |
| 244 | 259 |
| 245 if not tests_to_run: | |
| 246 return tests_to_run, tests_to_skip | |
| 247 | |
| 248 # Create a sorted list of test files so the subset chunk, | |
| 249 # if used, contains alphabetically consecutive tests. | |
| 250 if self._options.order == 'natural': | |
| 251 tests_to_run.sort(key=self._port.test_key) | |
| 252 elif self._options.order == 'random': | |
| 253 tests_to_run.sort() | |
| 254 random.Random(self._options.seed).shuffle(tests_to_run) | |
| 255 | |
| 256 tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tes
ts_to_run) | |
| 257 self._expectations.add_extra_skipped_tests(tests_in_other_chunks) | |
| 258 tests_to_skip.update(tests_in_other_chunks) | |
| 259 | |
| 260 return tests_to_run, tests_to_skip | 260 return tests_to_run, tests_to_skip |
| 261 | 261 |
| 262 def _test_input_for_file(self, test_file): | 262 def _test_input_for_file(self, test_file): |
| 263 return TestInput(test_file, | 263 return TestInput(test_file, |
| 264 self._options.slow_time_out_ms if self._test_is_slow(te
st_file) else self._options.time_out_ms, | 264 self._options.slow_time_out_ms if self._test_is_slow(te
st_file) else self._options.time_out_ms, |
| 265 self._test_requires_lock(test_file), | 265 self._test_requires_lock(test_file), |
| 266 should_add_missing_baselines=(self._options.new_test_re
sults and | 266 should_add_missing_baselines=(self._options.new_test_re
sults and |
| 267 not self._test_is_expecte
d_missing(test_file))) | 267 not self._test_is_expecte
d_missing(test_file))) |
| 268 | 268 |
| 269 def _test_requires_lock(self, test_file): | 269 def _test_requires_lock(self, test_file): |
| (...skipping 268 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 538 | 538 |
| 539 stats = {} | 539 stats = {} |
| 540 for result in initial_results.results_by_name.values(): | 540 for result in initial_results.results_by_name.values(): |
| 541 if result.type != test_expectations.SKIP: | 541 if result.type != test_expectations.SKIP: |
| 542 stats[result.test_name] = {'results': (_worker_number(result.wor
ker_name), result.test_number, result.pid, int( | 542 stats[result.test_name] = {'results': (_worker_number(result.wor
ker_name), result.test_number, result.pid, int( |
| 543 result.test_run_time * 1000), int(result.total_run_time * 10
00))} | 543 result.test_run_time * 1000), int(result.total_run_time * 10
00))} |
| 544 stats_trie = {} | 544 stats_trie = {} |
| 545 for name, value in stats.iteritems(): | 545 for name, value in stats.iteritems(): |
| 546 json_results_generator.add_path_to_trie(name, value, stats_trie) | 546 json_results_generator.add_path_to_trie(name, value, stats_trie) |
| 547 return stats_trie | 547 return stats_trie |
| OLD | NEW |