OLD | NEW |
1 # Copyright (C) 2010 Google Inc. All rights reserved. | 1 # Copyright (C) 2010 Google Inc. All rights reserved. |
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze
ged | 2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze
ged |
3 # | 3 # |
4 # Redistribution and use in source and binary forms, with or without | 4 # Redistribution and use in source and binary forms, with or without |
5 # modification, are permitted provided that the following conditions are | 5 # modification, are permitted provided that the following conditions are |
6 # met: | 6 # met: |
7 # | 7 # |
8 # * Redistributions of source code must retain the above copyright | 8 # * Redistributions of source code must retain the above copyright |
9 # notice, this list of conditions and the following disclaimer. | 9 # notice, this list of conditions and the following disclaimer. |
10 # * Redistributions in binary form must reproduce the above | 10 # * Redistributions in binary form must reproduce the above |
(...skipping 25 matching lines...) Expand all Loading... |
36 | 36 |
37 The Manager object has a constructor and one main method called run. | 37 The Manager object has a constructor and one main method called run. |
38 """ | 38 """ |
39 | 39 |
40 import json | 40 import json |
41 import logging | 41 import logging |
42 import random | 42 import random |
43 import sys | 43 import sys |
44 import time | 44 import time |
45 | 45 |
| 46 from webkitpy.common import exit_codes |
46 from webkitpy.common.net.file_uploader import FileUploader | 47 from webkitpy.common.net.file_uploader import FileUploader |
47 from webkitpy.common.webkit_finder import WebKitFinder | 48 from webkitpy.common.webkit_finder import WebKitFinder |
48 from webkitpy.layout_tests.controllers.layout_test_finder import LayoutTestFinde
r | 49 from webkitpy.layout_tests.controllers.layout_test_finder import LayoutTestFinde
r |
49 from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunne
r | 50 from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunne
r |
50 from webkitpy.layout_tests.controllers.test_result_writer import TestResultWrite
r | 51 from webkitpy.layout_tests.controllers.test_result_writer import TestResultWrite
r |
51 from webkitpy.layout_tests.layout_package import json_results_generator | 52 from webkitpy.layout_tests.layout_package import json_results_generator |
52 from webkitpy.layout_tests.models import test_expectations | 53 from webkitpy.layout_tests.models import test_expectations |
53 from webkitpy.layout_tests.models import test_failures | 54 from webkitpy.layout_tests.models import test_failures |
54 from webkitpy.layout_tests.models import test_run_results | 55 from webkitpy.layout_tests.models import test_run_results |
55 from webkitpy.layout_tests.models.test_input import TestInput | 56 from webkitpy.layout_tests.models.test_input import TestInput |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
99 self._printer.write_update("Collecting tests ...") | 100 self._printer.write_update("Collecting tests ...") |
100 running_all_tests = False | 101 running_all_tests = False |
101 | 102 |
102 self._printer.write_update('Generating MANIFEST.json for web-platform-te
sts ...') | 103 self._printer.write_update('Generating MANIFEST.json for web-platform-te
sts ...') |
103 WPTManifest.ensure_manifest(self._port.host) | 104 WPTManifest.ensure_manifest(self._port.host) |
104 | 105 |
105 try: | 106 try: |
106 paths, all_test_names, running_all_tests = self._collect_tests(args) | 107 paths, all_test_names, running_all_tests = self._collect_tests(args) |
107 except IOError: | 108 except IOError: |
108 # This is raised if --test-list doesn't exist | 109 # This is raised if --test-list doesn't exist |
109 return test_run_results.RunDetails(exit_code=test_run_results.NO_TES
TS_EXIT_STATUS) | 110 return test_run_results.RunDetails(exit_code=exit_codes.NO_TESTS_EXI
T_STATUS) |
110 | 111 |
111 # Create a sorted list of test files so the subset chunk, | 112 # Create a sorted list of test files so the subset chunk, |
112 # if used, contains alphabetically consecutive tests. | 113 # if used, contains alphabetically consecutive tests. |
113 if self._options.order == 'natural': | 114 if self._options.order == 'natural': |
114 all_test_names.sort(key=self._port.test_key) | 115 all_test_names.sort(key=self._port.test_key) |
115 elif self._options.order == 'random': | 116 elif self._options.order == 'random': |
116 all_test_names.sort() | 117 all_test_names.sort() |
117 random.Random(self._options.seed).shuffle(all_test_names) | 118 random.Random(self._options.seed).shuffle(all_test_names) |
118 | 119 |
119 test_names, tests_in_other_chunks = self._finder.split_into_chunks(all_t
est_names) | 120 test_names, tests_in_other_chunks = self._finder.split_into_chunks(all_t
est_names) |
120 | 121 |
121 self._printer.write_update("Parsing expectations ...") | 122 self._printer.write_update("Parsing expectations ...") |
122 self._expectations = test_expectations.TestExpectations(self._port, test
_names) | 123 self._expectations = test_expectations.TestExpectations(self._port, test
_names) |
123 | 124 |
124 tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names) | 125 tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names) |
125 | 126 |
126 self._expectations.remove_tests(tests_in_other_chunks) | 127 self._expectations.remove_tests(tests_in_other_chunks) |
127 | 128 |
128 self._printer.print_found( | 129 self._printer.print_found( |
129 len(all_test_names), len(test_names), len(tests_to_run), | 130 len(all_test_names), len(test_names), len(tests_to_run), |
130 self._options.repeat_each, self._options.iterations) | 131 self._options.repeat_each, self._options.iterations) |
131 | 132 |
132 # Check to make sure we're not skipping every test. | 133 # Check to make sure we're not skipping every test. |
133 if not tests_to_run: | 134 if not tests_to_run: |
134 _log.critical('No tests to run.') | 135 _log.critical('No tests to run.') |
135 return test_run_results.RunDetails(exit_code=test_run_results.NO_TES
TS_EXIT_STATUS) | 136 return test_run_results.RunDetails(exit_code=exit_codes.NO_TESTS_EXI
T_STATUS) |
136 | 137 |
137 exit_code = self._set_up_run(tests_to_run) | 138 exit_code = self._set_up_run(tests_to_run) |
138 if exit_code: | 139 if exit_code: |
139 return test_run_results.RunDetails(exit_code=exit_code) | 140 return test_run_results.RunDetails(exit_code=exit_code) |
140 | 141 |
141 # Don't retry failures if an explicit list of tests was passed in. | 142 # Don't retry failures if an explicit list of tests was passed in. |
142 if self._options.retry_failures is None: | 143 if self._options.retry_failures is None: |
143 should_retry_failures = len(paths) < len(test_names) | 144 should_retry_failures = len(paths) < len(test_names) |
144 else: | 145 else: |
145 should_retry_failures = self._options.retry_failures | 146 should_retry_failures = self._options.retry_failures |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
197 | 198 |
198 _log.debug("summarizing results") | 199 _log.debug("summarizing results") |
199 summarized_full_results = test_run_results.summarize_results( | 200 summarized_full_results = test_run_results.summarize_results( |
200 self._port, self._expectations, initial_results, all_retry_results, | 201 self._port, self._expectations, initial_results, all_retry_results, |
201 enabled_pixel_tests_in_retry) | 202 enabled_pixel_tests_in_retry) |
202 summarized_failing_results = test_run_results.summarize_results( | 203 summarized_failing_results = test_run_results.summarize_results( |
203 self._port, self._expectations, initial_results, all_retry_results, | 204 self._port, self._expectations, initial_results, all_retry_results, |
204 enabled_pixel_tests_in_retry, only_include_failing=True) | 205 enabled_pixel_tests_in_retry, only_include_failing=True) |
205 | 206 |
206 exit_code = summarized_failing_results['num_regressions'] | 207 exit_code = summarized_failing_results['num_regressions'] |
207 if exit_code > test_run_results.MAX_FAILURES_EXIT_STATUS: | 208 if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS: |
208 _log.warning('num regressions (%d) exceeds max exit status (%d)', | 209 _log.warning('num regressions (%d) exceeds max exit status (%d)', |
209 exit_code, test_run_results.MAX_FAILURES_EXIT_STATUS) | 210 exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS) |
210 exit_code = test_run_results.MAX_FAILURES_EXIT_STATUS | 211 exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS |
211 | 212 |
212 if not self._options.dry_run: | 213 if not self._options.dry_run: |
213 self._write_json_files(summarized_full_results, summarized_failing_r
esults, initial_results, running_all_tests) | 214 self._write_json_files(summarized_full_results, summarized_failing_r
esults, initial_results, running_all_tests) |
214 | 215 |
215 if self._options.write_full_results_to: | 216 if self._options.write_full_results_to: |
216 self._filesystem.copyfile(self._filesystem.join(self._results_di
rectory, "full_results.json"), | 217 self._filesystem.copyfile(self._filesystem.join(self._results_di
rectory, "full_results.json"), |
217 self._options.write_full_results_to) | 218 self._options.write_full_results_to) |
218 | 219 |
219 self._upload_json_files() | 220 self._upload_json_files() |
220 | 221 |
221 results_path = self._filesystem.join(self._results_directory, "resul
ts.html") | 222 results_path = self._filesystem.join(self._results_directory, "resul
ts.html") |
222 self._copy_results_html_file(results_path) | 223 self._copy_results_html_file(results_path) |
223 if initial_results.keyboard_interrupted: | 224 if initial_results.keyboard_interrupted: |
224 exit_code = test_run_results.INTERRUPTED_EXIT_STATUS | 225 exit_code = exit_codes.INTERRUPTED_EXIT_STATUS |
225 else: | 226 else: |
226 if initial_results.interrupted: | 227 if initial_results.interrupted: |
227 exit_code = test_run_results.EARLY_EXIT_STATUS | 228 exit_code = exit_codes.EARLY_EXIT_STATUS |
228 if self._options.show_results and ( | 229 if self._options.show_results and ( |
229 exit_code or (self._options.full_results_html and initia
l_results.total_failures)): | 230 exit_code or (self._options.full_results_html and initia
l_results.total_failures)): |
230 self._port.show_results_html_file(results_path) | 231 self._port.show_results_html_file(results_path) |
231 self._printer.print_results(time.time() - start_time, initial_re
sults, summarized_failing_results) | 232 self._printer.print_results(time.time() - start_time, initial_re
sults, summarized_failing_results) |
232 | 233 |
233 return test_run_results.RunDetails( | 234 return test_run_results.RunDetails( |
234 exit_code, summarized_full_results, summarized_failing_results, | 235 exit_code, summarized_full_results, summarized_failing_results, |
235 initial_results, all_retry_results, enabled_pixel_tests_in_retry) | 236 initial_results, all_retry_results, enabled_pixel_tests_in_retry) |
236 | 237 |
237 def _collect_tests(self, args): | 238 def _collect_tests(self, args): |
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
346 self._clobber_old_results() | 347 self._clobber_old_results() |
347 elif self._filesystem.exists(self._results_directory): | 348 elif self._filesystem.exists(self._results_directory): |
348 self._limit_archived_results_count() | 349 self._limit_archived_results_count() |
349 # Rename the existing results folder for archiving. | 350 # Rename the existing results folder for archiving. |
350 self._rename_results_folder() | 351 self._rename_results_folder() |
351 | 352 |
352 # Create the output directory if it doesn't already exist. | 353 # Create the output directory if it doesn't already exist. |
353 self._port.host.filesystem.maybe_make_directory(self._results_directory) | 354 self._port.host.filesystem.maybe_make_directory(self._results_directory) |
354 | 355 |
355 self._port.setup_test_run() | 356 self._port.setup_test_run() |
356 return test_run_results.OK_EXIT_STATUS | 357 return exit_codes.OK_EXIT_STATUS |
357 | 358 |
358 def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations, | 359 def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations, |
359 num_workers, retry_attempt=0): | 360 num_workers, retry_attempt=0): |
360 | 361 |
361 test_inputs = [] | 362 test_inputs = [] |
362 for _ in xrange(iterations): | 363 for _ in xrange(iterations): |
363 for test in tests_to_run: | 364 for test in tests_to_run: |
364 for _ in xrange(repeat_each): | 365 for _ in xrange(repeat_each): |
365 test_inputs.append(self._test_input_for_file(test)) | 366 test_inputs.append(self._test_input_for_file(test)) |
366 return self._runner.run_tests(self._expectations, test_inputs, | 367 return self._runner.run_tests(self._expectations, test_inputs, |
(...skipping 178 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
545 | 546 |
546 stats = {} | 547 stats = {} |
547 for result in initial_results.results_by_name.values(): | 548 for result in initial_results.results_by_name.values(): |
548 if result.type != test_expectations.SKIP: | 549 if result.type != test_expectations.SKIP: |
549 stats[result.test_name] = {'results': (_worker_number(result.wor
ker_name), result.test_number, result.pid, int( | 550 stats[result.test_name] = {'results': (_worker_number(result.wor
ker_name), result.test_number, result.pid, int( |
550 result.test_run_time * 1000), int(result.total_run_time * 10
00))} | 551 result.test_run_time * 1000), int(result.total_run_time * 10
00))} |
551 stats_trie = {} | 552 stats_trie = {} |
552 for name, value in stats.iteritems(): | 553 for name, value in stats.iteritems(): |
553 json_results_generator.add_path_to_trie(name, value, stats_trie) | 554 json_results_generator.add_path_to_trie(name, value, stats_trie) |
554 return stats_trie | 555 return stats_trie |
OLD | NEW |