OLD | NEW |
1 # Copyright (C) 2010 Google Inc. All rights reserved. | 1 # Copyright (C) 2010 Google Inc. All rights reserved. |
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze
ged | 2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze
ged |
3 # | 3 # |
4 # Redistribution and use in source and binary forms, with or without | 4 # Redistribution and use in source and binary forms, with or without |
5 # modification, are permitted provided that the following conditions are | 5 # modification, are permitted provided that the following conditions are |
6 # met: | 6 # met: |
7 # | 7 # |
8 # * Redistributions of source code must retain the above copyright | 8 # * Redistributions of source code must retain the above copyright |
9 # notice, this list of conditions and the following disclaimer. | 9 # notice, this list of conditions and the following disclaimer. |
10 # * Redistributions in binary form must reproduce the above | 10 # * Redistributions in binary form must reproduce the above |
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
89 self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests' | 89 self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests' |
90 self.ARCHIVED_RESULTS_LIMIT = 25 | 90 self.ARCHIVED_RESULTS_LIMIT = 25 |
91 self._http_server_started = False | 91 self._http_server_started = False |
92 self._wptserve_started = False | 92 self._wptserve_started = False |
93 self._websockets_server_started = False | 93 self._websockets_server_started = False |
94 | 94 |
95 self._results_directory = self._port.results_directory() | 95 self._results_directory = self._port.results_directory() |
96 self._finder = LayoutTestFinder(self._port, self._options) | 96 self._finder = LayoutTestFinder(self._port, self._options) |
97 self._runner = LayoutTestRunner(self._options, self._port, self._printer
, self._results_directory, self._test_is_slow) | 97 self._runner = LayoutTestRunner(self._options, self._port, self._printer
, self._results_directory, self._test_is_slow) |
98 | 98 |
| 99 def run(self, args): |
| 100 """Run the tests and return a RunDetails object with the results.""" |
| 101 start_time = time.time() |
| 102 self._printer.write_update("Collecting tests ...") |
| 103 running_all_tests = False |
| 104 try: |
| 105 paths, test_names, running_all_tests = self._collect_tests(args) |
| 106 except IOError: |
| 107 # This is raised if --test-list doesn't exist |
| 108 return test_run_results.RunDetails(exit_code=test_run_results.NO_TES
TS_EXIT_STATUS) |
| 109 |
| 110 self._printer.write_update("Parsing expectations ...") |
| 111 self._expectations = test_expectations.TestExpectations(self._port, test
_names) |
| 112 |
| 113 tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names) |
| 114 self._printer.print_found(len(test_names), len(tests_to_run), self._opti
ons.repeat_each, self._options.iterations) |
| 115 |
| 116 # Check to make sure we're not skipping every test. |
| 117 if not tests_to_run: |
| 118 _log.critical('No tests to run.') |
| 119 return test_run_results.RunDetails(exit_code=test_run_results.NO_TES
TS_EXIT_STATUS) |
| 120 |
| 121 exit_code = self._set_up_run(tests_to_run) |
| 122 if exit_code: |
| 123 return test_run_results.RunDetails(exit_code=exit_code) |
| 124 |
| 125 # Don't retry failures if an explicit list of tests was passed in. |
| 126 if self._options.retry_failures is None: |
| 127 should_retry_failures = len(paths) < len(test_names) |
| 128 else: |
| 129 should_retry_failures = self._options.retry_failures |
| 130 |
| 131 enabled_pixel_tests_in_retry = False |
| 132 try: |
| 133 self._start_servers(tests_to_run) |
| 134 |
| 135 num_workers = self._port.num_workers(int(self._options.child_process
es)) |
| 136 |
| 137 initial_results = self._run_tests( |
| 138 tests_to_run, tests_to_skip, self._options.repeat_each, self._op
tions.iterations, |
| 139 num_workers) |
| 140 |
| 141 # Don't retry failures when interrupted by user or failures limit ex
ception. |
| 142 should_retry_failures = should_retry_failures and not ( |
| 143 initial_results.interrupted or initial_results.keyboard_interrup
ted) |
| 144 |
| 145 tests_to_retry = self._tests_to_retry(initial_results) |
| 146 all_retry_results = [] |
| 147 if should_retry_failures and tests_to_retry: |
| 148 enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed
() |
| 149 |
| 150 for retry_attempt in xrange(1, self._options.num_retries + 1): |
| 151 if not tests_to_retry: |
| 152 break |
| 153 |
| 154 _log.info('') |
| 155 _log.info('Retrying %s, attempt %d of %d...' % |
| 156 (grammar.pluralize('unexpected failure', len(tests
_to_retry)), |
| 157 retry_attempt, self._options.num_retries)) |
| 158 |
| 159 retry_results = self._run_tests(tests_to_retry, |
| 160 tests_to_skip=set(), |
| 161 repeat_each=1, |
| 162 iterations=1, |
| 163 num_workers=num_workers, |
| 164 retry_attempt=retry_attempt) |
| 165 all_retry_results.append(retry_results) |
| 166 |
| 167 tests_to_retry = self._tests_to_retry(retry_results) |
| 168 |
| 169 if enabled_pixel_tests_in_retry: |
| 170 self._options.pixel_tests = False |
| 171 finally: |
| 172 self._stop_servers() |
| 173 self._clean_up_run() |
| 174 |
| 175 # Some crash logs can take a long time to be written out so look |
| 176 # for new logs after the test run finishes. |
| 177 self._printer.write_update("looking for new crash logs") |
| 178 self._look_for_new_crash_logs(initial_results, start_time) |
| 179 for retry_attempt_results in all_retry_results: |
| 180 self._look_for_new_crash_logs(retry_attempt_results, start_time) |
| 181 |
| 182 _log.debug("summarizing results") |
| 183 summarized_full_results = test_run_results.summarize_results( |
| 184 self._port, self._expectations, initial_results, all_retry_results, |
| 185 enabled_pixel_tests_in_retry) |
| 186 summarized_failing_results = test_run_results.summarize_results( |
| 187 self._port, self._expectations, initial_results, all_retry_results, |
| 188 enabled_pixel_tests_in_retry, only_include_failing=True) |
| 189 |
| 190 exit_code = summarized_failing_results['num_regressions'] |
| 191 if exit_code > test_run_results.MAX_FAILURES_EXIT_STATUS: |
| 192 _log.warning('num regressions (%d) exceeds max exit status (%d)' % |
| 193 (exit_code, test_run_results.MAX_FAILURES_EXIT_STATUS)) |
| 194 exit_code = test_run_results.MAX_FAILURES_EXIT_STATUS |
| 195 |
| 196 if not self._options.dry_run: |
| 197 self._write_json_files(summarized_full_results, summarized_failing_r
esults, initial_results, running_all_tests) |
| 198 |
| 199 if self._options.write_full_results_to: |
| 200 self._filesystem.copyfile(self._filesystem.join(self._results_di
rectory, "full_results.json"), |
| 201 self._options.write_full_results_to) |
| 202 |
| 203 self._upload_json_files() |
| 204 |
| 205 results_path = self._filesystem.join(self._results_directory, "resul
ts.html") |
| 206 self._copy_results_html_file(results_path) |
| 207 if initial_results.keyboard_interrupted: |
| 208 exit_code = test_run_results.INTERRUPTED_EXIT_STATUS |
| 209 else: |
| 210 if initial_results.interrupted: |
| 211 exit_code = test_run_results.EARLY_EXIT_STATUS |
| 212 if self._options.show_results and (exit_code or (self._options.f
ull_results_html and initial_results.total_failures)): |
| 213 self._port.show_results_html_file(results_path) |
| 214 self._printer.print_results(time.time() - start_time, initial_re
sults, summarized_failing_results) |
| 215 |
| 216 return test_run_results.RunDetails( |
| 217 exit_code, summarized_full_results, summarized_failing_results, |
| 218 initial_results, all_retry_results, enabled_pixel_tests_in_retry) |
| 219 |
99 def _collect_tests(self, args): | 220 def _collect_tests(self, args): |
100 return self._finder.find_tests(args, test_list=self._options.test_list, | 221 return self._finder.find_tests(args, test_list=self._options.test_list, |
101 fastest_percentile=self._options.fastest) | 222 fastest_percentile=self._options.fastest) |
102 | 223 |
103 def _is_http_test(self, test): | 224 def _is_http_test(self, test): |
104 return ( | 225 return ( |
105 test.startswith(self.HTTP_SUBDIR) or | 226 test.startswith(self.HTTP_SUBDIR) or |
106 self._is_websocket_test(test) or | 227 self._is_websocket_test(test) or |
107 self.VIRTUAL_HTTP_SUBDIR in test | 228 self.VIRTUAL_HTTP_SUBDIR in test |
108 ) | 229 ) |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
161 """ | 282 """ |
162 return self._is_http_test(test_file) or self._is_perf_test(test_file) | 283 return self._is_http_test(test_file) or self._is_perf_test(test_file) |
163 | 284 |
164 def _test_is_expected_missing(self, test_file): | 285 def _test_is_expected_missing(self, test_file): |
165 expectations = self._expectations.model().get_expectations(test_file) | 286 expectations = self._expectations.model().get_expectations(test_file) |
166 return test_expectations.MISSING in expectations or test_expectations.NE
EDS_REBASELINE in expectations or test_expectations.NEEDS_MANUAL_REBASELINE in e
xpectations | 287 return test_expectations.MISSING in expectations or test_expectations.NE
EDS_REBASELINE in expectations or test_expectations.NEEDS_MANUAL_REBASELINE in e
xpectations |
167 | 288 |
168 def _test_is_slow(self, test_file): | 289 def _test_is_slow(self, test_file): |
169 return test_expectations.SLOW in self._expectations.model().get_expectat
ions(test_file) | 290 return test_expectations.SLOW in self._expectations.model().get_expectat
ions(test_file) |
170 | 291 |
171 def needs_servers(self, test_names): | 292 def _needs_servers(self, test_names): |
172 return any(self._test_requires_lock(test_name) for test_name in test_nam
es) | 293 return any(self._test_requires_lock(test_name) for test_name in test_nam
es) |
173 | 294 |
174 def _rename_results_folder(self): | 295 def _rename_results_folder(self): |
175 try: | 296 try: |
176 timestamp = time.strftime( | 297 timestamp = time.strftime( |
177 "%Y-%m-%d-%H-%M-%S", time.localtime(self._filesystem.mtime(self.
_filesystem.join(self._results_directory, "results.html")))) | 298 "%Y-%m-%d-%H-%M-%S", time.localtime(self._filesystem.mtime(self.
_filesystem.join(self._results_directory, "results.html")))) |
178 except (IOError, OSError), e: | 299 except (IOError, OSError), e: |
179 # It might be possible that results.html was not generated in previo
us run, because the test | 300 # It might be possible that results.html was not generated in previo
us run, because the test |
180 # run was interrupted even before testing started. In those cases, d
on't archive the folder. | 301 # run was interrupted even before testing started. In those cases, d
on't archive the folder. |
181 # Simply override the current folder contents with new results. | 302 # Simply override the current folder contents with new results. |
(...skipping 17 matching lines...) Expand all Loading... |
199 file_path = self._filesystem.join(results_directory_path, dir) | 320 file_path = self._filesystem.join(results_directory_path, dir) |
200 if self._filesystem.isdir(file_path) and self._results_directory in
file_path: | 321 if self._filesystem.isdir(file_path) and self._results_directory in
file_path: |
201 results_directories.append(file_path) | 322 results_directories.append(file_path) |
202 results_directories.sort(key=lambda x: self._filesystem.mtime(x)) | 323 results_directories.sort(key=lambda x: self._filesystem.mtime(x)) |
203 self._printer.write_update("Clobbering excess archived results in %s" %
results_directory_path) | 324 self._printer.write_update("Clobbering excess archived results in %s" %
results_directory_path) |
204 self._delete_dirs(results_directories[:-self.ARCHIVED_RESULTS_LIMIT]) | 325 self._delete_dirs(results_directories[:-self.ARCHIVED_RESULTS_LIMIT]) |
205 | 326 |
206 def _set_up_run(self, test_names): | 327 def _set_up_run(self, test_names): |
207 self._printer.write_update("Checking build ...") | 328 self._printer.write_update("Checking build ...") |
208 if self._options.build: | 329 if self._options.build: |
209 exit_code = self._port.check_build(self.needs_servers(test_names), s
elf._printer) | 330 exit_code = self._port.check_build(self._needs_servers(test_names),
self._printer) |
210 if exit_code: | 331 if exit_code: |
211 _log.error("Build check failed") | 332 _log.error("Build check failed") |
212 return exit_code | 333 return exit_code |
213 | 334 |
214 # This must be started before we check the system dependencies, | 335 # This must be started before we check the system dependencies, |
215 # since the helper may do things to make the setup correct. | 336 # since the helper may do things to make the setup correct. |
216 if self._options.pixel_tests: | 337 if self._options.pixel_tests: |
217 self._printer.write_update("Starting pixel test helper ...") | 338 self._printer.write_update("Starting pixel test helper ...") |
218 self._port.start_helper() | 339 self._port.start_helper() |
219 | 340 |
220 # Check that the system dependencies (themes, fonts, ...) are correct. | 341 # Check that the system dependencies (themes, fonts, ...) are correct. |
221 if not self._options.nocheck_sys_deps: | 342 if not self._options.nocheck_sys_deps: |
222 self._printer.write_update("Checking system dependencies ...") | 343 self._printer.write_update("Checking system dependencies ...") |
223 exit_code = self._port.check_sys_deps(self.needs_servers(test_names)
) | 344 exit_code = self._port.check_sys_deps(self._needs_servers(test_names
)) |
224 if exit_code: | 345 if exit_code: |
225 self._port.stop_helper() | 346 self._port.stop_helper() |
226 return exit_code | 347 return exit_code |
227 | 348 |
228 if self._options.clobber_old_results: | 349 if self._options.clobber_old_results: |
229 self._clobber_old_results() | 350 self._clobber_old_results() |
230 elif self._filesystem.exists(self._results_directory): | 351 elif self._filesystem.exists(self._results_directory): |
231 self._limit_archived_results_count() | 352 self._limit_archived_results_count() |
232 # Rename the existing results folder for archiving. | 353 # Rename the existing results folder for archiving. |
233 self._rename_results_folder() | 354 self._rename_results_folder() |
234 | 355 |
235 # Create the output directory if it doesn't already exist. | 356 # Create the output directory if it doesn't already exist. |
236 self._port.host.filesystem.maybe_make_directory(self._results_directory) | 357 self._port.host.filesystem.maybe_make_directory(self._results_directory) |
237 | 358 |
238 self._port.setup_test_run() | 359 self._port.setup_test_run() |
239 return test_run_results.OK_EXIT_STATUS | 360 return test_run_results.OK_EXIT_STATUS |
240 | 361 |
241 def run(self, args): | |
242 """Run the tests and return a RunDetails object with the results.""" | |
243 start_time = time.time() | |
244 self._printer.write_update("Collecting tests ...") | |
245 running_all_tests = False | |
246 try: | |
247 paths, test_names, running_all_tests = self._collect_tests(args) | |
248 except IOError: | |
249 # This is raised if --test-list doesn't exist | |
250 return test_run_results.RunDetails(exit_code=test_run_results.NO_TES
TS_EXIT_STATUS) | |
251 | |
252 self._printer.write_update("Parsing expectations ...") | |
253 self._expectations = test_expectations.TestExpectations(self._port, test
_names) | |
254 | |
255 tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names) | |
256 self._printer.print_found(len(test_names), len(tests_to_run), self._opti
ons.repeat_each, self._options.iterations) | |
257 | |
258 # Check to make sure we're not skipping every test. | |
259 if not tests_to_run: | |
260 _log.critical('No tests to run.') | |
261 return test_run_results.RunDetails(exit_code=test_run_results.NO_TES
TS_EXIT_STATUS) | |
262 | |
263 exit_code = self._set_up_run(tests_to_run) | |
264 if exit_code: | |
265 return test_run_results.RunDetails(exit_code=exit_code) | |
266 | |
267 # Don't retry failures if an explicit list of tests was passed in. | |
268 if self._options.retry_failures is None: | |
269 should_retry_failures = len(paths) < len(test_names) | |
270 else: | |
271 should_retry_failures = self._options.retry_failures | |
272 | |
273 enabled_pixel_tests_in_retry = False | |
274 try: | |
275 self._start_servers(tests_to_run) | |
276 | |
277 num_workers = self._port.num_workers(int(self._options.child_process
es)) | |
278 | |
279 initial_results = self._run_tests( | |
280 tests_to_run, tests_to_skip, self._options.repeat_each, self._op
tions.iterations, | |
281 num_workers) | |
282 | |
283 # Don't retry failures when interrupted by user or failures limit ex
ception. | |
284 should_retry_failures = should_retry_failures and not ( | |
285 initial_results.interrupted or initial_results.keyboard_interrup
ted) | |
286 | |
287 tests_to_retry = self._tests_to_retry(initial_results) | |
288 all_retry_results = [] | |
289 if should_retry_failures and tests_to_retry: | |
290 enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed
() | |
291 | |
292 for retry_attempt in xrange(1, self._options.num_retries + 1): | |
293 if not tests_to_retry: | |
294 break | |
295 | |
296 _log.info('') | |
297 _log.info('Retrying %s, attempt %d of %d...' % | |
298 (grammar.pluralize('unexpected failure', len(tests
_to_retry)), | |
299 retry_attempt, self._options.num_retries)) | |
300 | |
301 retry_results = self._run_tests(tests_to_retry, | |
302 tests_to_skip=set(), | |
303 repeat_each=1, | |
304 iterations=1, | |
305 num_workers=num_workers, | |
306 retry_attempt=retry_attempt) | |
307 all_retry_results.append(retry_results) | |
308 | |
309 tests_to_retry = self._tests_to_retry(retry_results) | |
310 | |
311 if enabled_pixel_tests_in_retry: | |
312 self._options.pixel_tests = False | |
313 finally: | |
314 self._stop_servers() | |
315 self._clean_up_run() | |
316 | |
317 # Some crash logs can take a long time to be written out so look | |
318 # for new logs after the test run finishes. | |
319 self._printer.write_update("looking for new crash logs") | |
320 self._look_for_new_crash_logs(initial_results, start_time) | |
321 for retry_attempt_results in all_retry_results: | |
322 self._look_for_new_crash_logs(retry_attempt_results, start_time) | |
323 | |
324 _log.debug("summarizing results") | |
325 summarized_full_results = test_run_results.summarize_results( | |
326 self._port, self._expectations, initial_results, all_retry_results, | |
327 enabled_pixel_tests_in_retry) | |
328 summarized_failing_results = test_run_results.summarize_results( | |
329 self._port, self._expectations, initial_results, all_retry_results, | |
330 enabled_pixel_tests_in_retry, only_include_failing=True) | |
331 | |
332 exit_code = summarized_failing_results['num_regressions'] | |
333 if exit_code > test_run_results.MAX_FAILURES_EXIT_STATUS: | |
334 _log.warning('num regressions (%d) exceeds max exit status (%d)' % | |
335 (exit_code, test_run_results.MAX_FAILURES_EXIT_STATUS)) | |
336 exit_code = test_run_results.MAX_FAILURES_EXIT_STATUS | |
337 | |
338 if not self._options.dry_run: | |
339 self._write_json_files(summarized_full_results, summarized_failing_r
esults, initial_results, running_all_tests) | |
340 | |
341 if self._options.write_full_results_to: | |
342 self._filesystem.copyfile(self._filesystem.join(self._results_di
rectory, "full_results.json"), | |
343 self._options.write_full_results_to) | |
344 | |
345 self._upload_json_files() | |
346 | |
347 results_path = self._filesystem.join(self._results_directory, "resul
ts.html") | |
348 self._copy_results_html_file(results_path) | |
349 if initial_results.keyboard_interrupted: | |
350 exit_code = test_run_results.INTERRUPTED_EXIT_STATUS | |
351 else: | |
352 if initial_results.interrupted: | |
353 exit_code = test_run_results.EARLY_EXIT_STATUS | |
354 if self._options.show_results and (exit_code or (self._options.f
ull_results_html and initial_results.total_failures)): | |
355 self._port.show_results_html_file(results_path) | |
356 self._printer.print_results(time.time() - start_time, initial_re
sults, summarized_failing_results) | |
357 | |
358 return test_run_results.RunDetails( | |
359 exit_code, summarized_full_results, summarized_failing_results, | |
360 initial_results, all_retry_results, enabled_pixel_tests_in_retry) | |
361 | |
362 def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations, | 362 def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations, |
363 num_workers, retry_attempt=0): | 363 num_workers, retry_attempt=0): |
364 | 364 |
365 test_inputs = [] | 365 test_inputs = [] |
366 for _ in xrange(iterations): | 366 for _ in xrange(iterations): |
367 for test in tests_to_run: | 367 for test in tests_to_run: |
368 for _ in xrange(repeat_each): | 368 for _ in xrange(repeat_each): |
369 test_inputs.append(self._test_input_for_file(test)) | 369 test_inputs.append(self._test_input_for_file(test)) |
370 return self._runner.run_tests(self._expectations, test_inputs, | 370 return self._runner.run_tests(self._expectations, test_inputs, |
371 tests_to_skip, num_workers, retry_attempt) | 371 tests_to_skip, num_workers, retry_attempt) |
(...skipping 178 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
550 | 550 |
551 stats = {} | 551 stats = {} |
552 for result in initial_results.results_by_name.values(): | 552 for result in initial_results.results_by_name.values(): |
553 if result.type != test_expectations.SKIP: | 553 if result.type != test_expectations.SKIP: |
554 stats[result.test_name] = {'results': (_worker_number(result.wor
ker_name), result.test_number, result.pid, int( | 554 stats[result.test_name] = {'results': (_worker_number(result.wor
ker_name), result.test_number, result.pid, int( |
555 result.test_run_time * 1000), int(result.total_run_time * 10
00))} | 555 result.test_run_time * 1000), int(result.total_run_time * 10
00))} |
556 stats_trie = {} | 556 stats_trie = {} |
557 for name, value in stats.iteritems(): | 557 for name, value in stats.iteritems(): |
558 json_results_generator.add_path_to_trie(name, value, stats_trie) | 558 json_results_generator.add_path_to_trie(name, value, stats_trie) |
559 return stats_trie | 559 return stats_trie |
OLD | NEW |