OLD | NEW |
1 # Copyright (C) 2010 Google Inc. All rights reserved. | 1 # Copyright (C) 2010 Google Inc. All rights reserved. |
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze
ged | 2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze
ged |
3 # | 3 # |
4 # Redistribution and use in source and binary forms, with or without | 4 # Redistribution and use in source and binary forms, with or without |
5 # modification, are permitted provided that the following conditions are | 5 # modification, are permitted provided that the following conditions are |
6 # met: | 6 # met: |
7 # | 7 # |
8 # * Redistributions of source code must retain the above copyright | 8 # * Redistributions of source code must retain the above copyright |
9 # notice, this list of conditions and the following disclaimer. | 9 # notice, this list of conditions and the following disclaimer. |
10 # * Redistributions in binary form must reproduce the above | 10 # * Redistributions in binary form must reproduce the above |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
47 from webkitpy.layout_tests.controllers.test_result_writer import TestResultWrite
r | 47 from webkitpy.layout_tests.controllers.test_result_writer import TestResultWrite
r |
48 from webkitpy.layout_tests.layout_package import json_results_generator | 48 from webkitpy.layout_tests.layout_package import json_results_generator |
49 from webkitpy.layout_tests.models import test_expectations | 49 from webkitpy.layout_tests.models import test_expectations |
50 from webkitpy.layout_tests.models import test_failures | 50 from webkitpy.layout_tests.models import test_failures |
51 from webkitpy.layout_tests.models import test_run_results | 51 from webkitpy.layout_tests.models import test_run_results |
52 from webkitpy.layout_tests.models.test_input import TestInput | 52 from webkitpy.layout_tests.models.test_input import TestInput |
53 | 53 |
54 _log = logging.getLogger(__name__) | 54 _log = logging.getLogger(__name__) |
55 | 55 |
56 # Builder base URL where we have the archived test results. | 56 # Builder base URL where we have the archived test results. |
57 BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/" | 57 BUILDER_BASE_URL = 'http://build.chromium.org/buildbot/layout_test_results/' |
58 | 58 |
59 TestExpectations = test_expectations.TestExpectations | 59 TestExpectations = test_expectations.TestExpectations |
60 | 60 |
61 | 61 |
| 62 class Manager(object): |
62 | 63 |
63 class Manager(object): | |
64 """A class for managing running a series of tests on a series of layout | 64 """A class for managing running a series of tests on a series of layout |
65 test files.""" | 65 test files.""" |
66 | 66 |
67 def __init__(self, port, options, printer): | 67 def __init__(self, port, options, printer): |
68 """Initialize test runner data structures. | 68 """Initialize test runner data structures. |
69 | 69 |
70 Args: | 70 Args: |
71 port: an object implementing port-specific | 71 port: an object implementing port-specific |
72 options: a dictionary of command line options | 72 options: a dictionary of command line options |
73 printer: a Printer object to record updates to. | 73 printer: a Printer object to record updates to. |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
113 return tests_to_run, tests_to_skip | 113 return tests_to_run, tests_to_skip |
114 | 114 |
115 # Create a sorted list of test files so the subset chunk, | 115 # Create a sorted list of test files so the subset chunk, |
116 # if used, contains alphabetically consecutive tests. | 116 # if used, contains alphabetically consecutive tests. |
117 if self._options.order == 'natural': | 117 if self._options.order == 'natural': |
118 tests_to_run.sort(key=self._port.test_key) | 118 tests_to_run.sort(key=self._port.test_key) |
119 elif self._options.order == 'random': | 119 elif self._options.order == 'random': |
120 random.shuffle(tests_to_run) | 120 random.shuffle(tests_to_run) |
121 elif self._options.order == 'random-seeded': | 121 elif self._options.order == 'random-seeded': |
122 rnd = random.Random() | 122 rnd = random.Random() |
123 rnd.seed(4) # http://xkcd.com/221/ | 123 rnd.seed(4) # http://xkcd.com/221/ |
124 rnd.shuffle(tests_to_run) | 124 rnd.shuffle(tests_to_run) |
125 | 125 |
126 tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tes
ts_to_run) | 126 tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tes
ts_to_run) |
127 self._expectations.add_extra_skipped_tests(tests_in_other_chunks) | 127 self._expectations.add_extra_skipped_tests(tests_in_other_chunks) |
128 tests_to_skip.update(tests_in_other_chunks) | 128 tests_to_skip.update(tests_in_other_chunks) |
129 | 129 |
130 return tests_to_run, tests_to_skip | 130 return tests_to_run, tests_to_skip |
131 | 131 |
132 def _test_input_for_file(self, test_file): | 132 def _test_input_for_file(self, test_file): |
133 return TestInput(test_file, | 133 return TestInput(test_file, |
134 self._options.slow_time_out_ms if self._test_is_slow(test_file) else
self._options.time_out_ms, | 134 self._options.slow_time_out_ms if self._test_is_slow(te
st_file) else self._options.time_out_ms, |
135 self._test_requires_lock(test_file), | 135 self._test_requires_lock(test_file), |
136 should_add_missing_baselines=(self._options.new_test_results and not
self._test_is_expected_missing(test_file))) | 136 should_add_missing_baselines=(self._options.new_test_re
sults and not self._test_is_expected_missing(test_file))) |
137 | 137 |
138 def _test_requires_lock(self, test_file): | 138 def _test_requires_lock(self, test_file): |
139 """Return True if the test needs to be locked when | 139 """Return True if the test needs to be locked when |
140 running multiple copies of NRWTs. Perf tests are locked | 140 running multiple copies of NRWTs. Perf tests are locked |
141 because heavy load caused by running other tests in parallel | 141 because heavy load caused by running other tests in parallel |
142 might cause some of them to timeout.""" | 142 might cause some of them to timeout.""" |
143 return self._is_http_test(test_file) or self._is_perf_test(test_file) | 143 return self._is_http_test(test_file) or self._is_perf_test(test_file) |
144 | 144 |
145 def _test_is_expected_missing(self, test_file): | 145 def _test_is_expected_missing(self, test_file): |
146 expectations = self._expectations.model().get_expectations(test_file) | 146 expectations = self._expectations.model().get_expectations(test_file) |
147 return test_expectations.MISSING in expectations or test_expectations.NE
EDS_REBASELINE in expectations or test_expectations.NEEDS_MANUAL_REBASELINE in e
xpectations | 147 return test_expectations.MISSING in expectations or test_expectations.NE
EDS_REBASELINE in expectations or test_expectations.NEEDS_MANUAL_REBASELINE in e
xpectations |
148 | 148 |
149 def _test_is_slow(self, test_file): | 149 def _test_is_slow(self, test_file): |
150 return test_expectations.SLOW in self._expectations.model().get_expectat
ions(test_file) | 150 return test_expectations.SLOW in self._expectations.model().get_expectat
ions(test_file) |
151 | 151 |
152 def needs_servers(self, test_names): | 152 def needs_servers(self, test_names): |
153 return any(self._test_requires_lock(test_name) for test_name in test_nam
es) | 153 return any(self._test_requires_lock(test_name) for test_name in test_nam
es) |
154 | 154 |
155 def _rename_results_folder(self): | 155 def _rename_results_folder(self): |
156 try: | 156 try: |
157 timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime(self._
filesystem.mtime(self._filesystem.join(self._results_directory, "results.html"))
)) | 157 timestamp = time.strftime( |
158 except OSError, e: | 158 '%Y-%m-%d-%H-%M-%S', |
| 159 time.localtime( |
| 160 self._filesystem.mtime( |
| 161 self._filesystem.join( |
| 162 self._results_directory, |
| 163 'results.html')))) |
| 164 except OSError as e: |
159 # It might be possible that results.html was not generated in previo
us run, because the test | 165 # It might be possible that results.html was not generated in previo
us run, because the test |
160 # run was interrupted even before testing started. In those cases, d
on't archive the folder. | 166 # run was interrupted even before testing started. In those cases, d
on't archive the folder. |
161 # Simply override the current folder contents with new results. | 167 # Simply override the current folder contents with new results. |
162 import errno | 168 import errno |
163 if e.errno == errno.EEXIST: | 169 if e.errno == errno.EEXIST: |
164 _log.warning("No results.html file found in previous run, skippi
ng it.") | 170 _log.warning('No results.html file found in previous run, skippi
ng it.') |
165 return None | 171 return None |
166 archived_name = ''.join((self._filesystem.basename(self._results_directo
ry), "_", timestamp)) | 172 archived_name = ''.join((self._filesystem.basename(self._results_directo
ry), '_', timestamp)) |
167 archived_path = self._filesystem.join(self._filesystem.dirname(self._res
ults_directory), archived_name) | 173 archived_path = self._filesystem.join(self._filesystem.dirname(self._res
ults_directory), archived_name) |
168 self._filesystem.move(self._results_directory, archived_path) | 174 self._filesystem.move(self._results_directory, archived_path) |
169 | 175 |
170 def _clobber_old_archived_results(self): | 176 def _clobber_old_archived_results(self): |
171 results_directory_path = self._filesystem.dirname(self._results_director
y) | 177 results_directory_path = self._filesystem.dirname(self._results_director
y) |
172 file_list = self._filesystem.listdir(results_directory_path) | 178 file_list = self._filesystem.listdir(results_directory_path) |
173 results_directories = [] | 179 results_directories = [] |
174 for dir in file_list: | 180 for dir in file_list: |
175 file_path = self._filesystem.join(results_directory_path, dir) | 181 file_path = self._filesystem.join(results_directory_path, dir) |
176 if self._filesystem.isdir(file_path): | 182 if self._filesystem.isdir(file_path): |
177 results_directories.append(file_path) | 183 results_directories.append(file_path) |
178 results_directories.sort(key=lambda x: self._filesystem.mtime(x)) | 184 results_directories.sort(key=lambda x: self._filesystem.mtime(x)) |
179 self._printer.write_update("Clobbering old archived results in %s" % res
ults_directory_path) | 185 self._printer.write_update('Clobbering old archived results in %s' % res
ults_directory_path) |
180 for dir in results_directories[:-self.ARCHIVED_RESULTS_LIMIT]: | 186 for dir in results_directories[:-self.ARCHIVED_RESULTS_LIMIT]: |
181 self._filesystem.rmtree(dir) | 187 self._filesystem.rmtree(dir) |
182 | 188 |
183 def _set_up_run(self, test_names): | 189 def _set_up_run(self, test_names): |
184 self._printer.write_update("Checking build ...") | 190 self._printer.write_update('Checking build ...') |
185 if self._options.build: | 191 if self._options.build: |
186 exit_code = self._port.check_build(self.needs_servers(test_names), s
elf._printer) | 192 exit_code = self._port.check_build(self.needs_servers(test_names), s
elf._printer) |
187 if exit_code: | 193 if exit_code: |
188 _log.error("Build check failed") | 194 _log.error('Build check failed') |
189 return exit_code | 195 return exit_code |
190 | 196 |
191 # This must be started before we check the system dependencies, | 197 # This must be started before we check the system dependencies, |
192 # since the helper may do things to make the setup correct. | 198 # since the helper may do things to make the setup correct. |
193 if self._options.pixel_tests: | 199 if self._options.pixel_tests: |
194 self._printer.write_update("Starting pixel test helper ...") | 200 self._printer.write_update('Starting pixel test helper ...') |
195 self._port.start_helper() | 201 self._port.start_helper() |
196 | 202 |
197 # Check that the system dependencies (themes, fonts, ...) are correct. | 203 # Check that the system dependencies (themes, fonts, ...) are correct. |
198 if not self._options.nocheck_sys_deps: | 204 if not self._options.nocheck_sys_deps: |
199 self._printer.write_update("Checking system dependencies ...") | 205 self._printer.write_update('Checking system dependencies ...') |
200 exit_code = self._port.check_sys_deps(self.needs_servers(test_names)
) | 206 exit_code = self._port.check_sys_deps(self.needs_servers(test_names)
) |
201 if exit_code: | 207 if exit_code: |
202 self._port.stop_helper() | 208 self._port.stop_helper() |
203 return exit_code | 209 return exit_code |
204 | 210 |
205 if self._options.enable_versioned_results and self._filesystem.exists(se
lf._results_directory): | 211 if self._options.enable_versioned_results and self._filesystem.exists(se
lf._results_directory): |
206 if self._options.clobber_old_results: | 212 if self._options.clobber_old_results: |
207 _log.warning("Flag --enable_versioned_results overrides --clobbe
r-old-results.") | 213 _log.warning('Flag --enable_versioned_results overrides --clobbe
r-old-results.') |
208 self._clobber_old_archived_results() | 214 self._clobber_old_archived_results() |
209 # Rename the existing results folder for archiving. | 215 # Rename the existing results folder for archiving. |
210 self._rename_results_folder() | 216 self._rename_results_folder() |
211 elif self._options.clobber_old_results: | 217 elif self._options.clobber_old_results: |
212 self._clobber_old_results() | 218 self._clobber_old_results() |
213 | 219 |
214 # Create the output directory if it doesn't already exist. | 220 # Create the output directory if it doesn't already exist. |
215 self._port.host.filesystem.maybe_make_directory(self._results_directory) | 221 self._port.host.filesystem.maybe_make_directory(self._results_directory) |
216 | 222 |
217 self._port.setup_test_run() | 223 self._port.setup_test_run() |
218 return test_run_results.OK_EXIT_STATUS | 224 return test_run_results.OK_EXIT_STATUS |
219 | 225 |
220 def run(self, args): | 226 def run(self, args): |
221 """Run the tests and return a RunDetails object with the results.""" | 227 """Run the tests and return a RunDetails object with the results.""" |
222 start_time = time.time() | 228 start_time = time.time() |
223 self._printer.write_update("Collecting tests ...") | 229 self._printer.write_update('Collecting tests ...') |
224 try: | 230 try: |
225 paths, test_names = self._collect_tests(args) | 231 paths, test_names = self._collect_tests(args) |
226 except IOError: | 232 except IOError: |
227 # This is raised if --test-list doesn't exist | 233 # This is raised if --test-list doesn't exist |
228 return test_run_results.RunDetails(exit_code=test_run_results.NO_TES
TS_EXIT_STATUS) | 234 return test_run_results.RunDetails(exit_code=test_run_results.NO_TES
TS_EXIT_STATUS) |
229 | 235 |
230 self._printer.write_update("Parsing expectations ...") | 236 self._printer.write_update('Parsing expectations ...') |
231 self._expectations = test_expectations.TestExpectations(self._port, test
_names) | 237 self._expectations = test_expectations.TestExpectations(self._port, test
_names) |
232 | 238 |
233 tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names) | 239 tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names) |
234 self._printer.print_found(len(test_names), len(tests_to_run), self._opti
ons.repeat_each, self._options.iterations) | 240 self._printer.print_found(len(test_names), len(tests_to_run), self._opti
ons.repeat_each, self._options.iterations) |
235 | 241 |
236 # Check to make sure we're not skipping every test. | 242 # Check to make sure we're not skipping every test. |
237 if not tests_to_run: | 243 if not tests_to_run: |
238 _log.critical('No tests to run.') | 244 _log.critical('No tests to run.') |
239 return test_run_results.RunDetails(exit_code=test_run_results.NO_TES
TS_EXIT_STATUS) | 245 return test_run_results.RunDetails(exit_code=test_run_results.NO_TES
TS_EXIT_STATUS) |
240 | 246 |
241 exit_code = self._set_up_run(tests_to_run) | 247 exit_code = self._set_up_run(tests_to_run) |
242 if exit_code: | 248 if exit_code: |
243 return test_run_results.RunDetails(exit_code=exit_code) | 249 return test_run_results.RunDetails(exit_code=exit_code) |
244 | 250 |
245 # Don't retry failures if an explicit list of tests was passed in. | 251 # Don't retry failures if an explicit list of tests was passed in. |
246 if self._options.retry_failures is None: | 252 if self._options.retry_failures is None: |
247 should_retry_failures = len(paths) < len(test_names) | 253 should_retry_failures = len(paths) < len(test_names) |
248 else: | 254 else: |
249 should_retry_failures = self._options.retry_failures | 255 should_retry_failures = self._options.retry_failures |
250 | 256 |
251 enabled_pixel_tests_in_retry = False | 257 enabled_pixel_tests_in_retry = False |
252 try: | 258 try: |
253 self._start_servers(tests_to_run) | 259 self._start_servers(tests_to_run) |
254 | 260 |
255 initial_results = self._run_tests(tests_to_run, tests_to_skip, self.
_options.repeat_each, self._options.iterations, | 261 initial_results = self._run_tests(tests_to_run, tests_to_skip, self.
_options.repeat_each, self._options.iterations, |
256 self._port.num_workers(int(self._options.child_processes)), retr
ying=False) | 262 self._port.num_workers(int(self._o
ptions.child_processes)), retrying=False) |
257 | 263 |
258 # Don't retry failures when interrupted by user or failures limit ex
ception. | 264 # Don't retry failures when interrupted by user or failures limit ex
ception. |
259 should_retry_failures = should_retry_failures and not (initial_resul
ts.interrupted or initial_results.keyboard_interrupted) | 265 should_retry_failures = should_retry_failures and not ( |
| 266 initial_results.interrupted or initial_results.keyboard_interrup
ted) |
260 | 267 |
261 tests_to_retry = self._tests_to_retry(initial_results) | 268 tests_to_retry = self._tests_to_retry(initial_results) |
262 if should_retry_failures and tests_to_retry: | 269 if should_retry_failures and tests_to_retry: |
263 enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed
() | 270 enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed
() |
264 | 271 |
265 _log.info('') | 272 _log.info('') |
266 _log.info("Retrying %d unexpected failure(s) ..." % len(tests_to
_retry)) | 273 _log.info('Retrying %d unexpected failure(s) ...' % len(tests_to
_retry)) |
267 _log.info('') | 274 _log.info('') |
268 retry_results = self._run_tests(tests_to_retry, tests_to_skip=se
t(), repeat_each=1, iterations=1, | 275 retry_results = self._run_tests(tests_to_retry, tests_to_skip=se
t(), repeat_each=1, iterations=1, |
269 num_workers=1, retrying=True) | 276 num_workers=1, retrying=True) |
270 | 277 |
271 if enabled_pixel_tests_in_retry: | 278 if enabled_pixel_tests_in_retry: |
272 self._options.pixel_tests = False | 279 self._options.pixel_tests = False |
273 else: | 280 else: |
274 retry_results = None | 281 retry_results = None |
275 finally: | 282 finally: |
276 self._stop_servers() | 283 self._stop_servers() |
277 self._clean_up_run() | 284 self._clean_up_run() |
278 | 285 |
279 # Some crash logs can take a long time to be written out so look | 286 # Some crash logs can take a long time to be written out so look |
280 # for new logs after the test run finishes. | 287 # for new logs after the test run finishes. |
281 self._printer.write_update("looking for new crash logs") | 288 self._printer.write_update('looking for new crash logs') |
282 self._look_for_new_crash_logs(initial_results, start_time) | 289 self._look_for_new_crash_logs(initial_results, start_time) |
283 if retry_results: | 290 if retry_results: |
284 self._look_for_new_crash_logs(retry_results, start_time) | 291 self._look_for_new_crash_logs(retry_results, start_time) |
285 | 292 |
286 _log.debug("summarizing results") | 293 _log.debug('summarizing results') |
287 summarized_full_results = test_run_results.summarize_results(self._port,
self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retr
y) | 294 summarized_full_results = test_run_results.summarize_results( |
288 summarized_failing_results = test_run_results.summarize_results(self._po
rt, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_r
etry, only_include_failing=True) | 295 self._port, |
| 296 self._expectations, |
| 297 initial_results, |
| 298 retry_results, |
| 299 enabled_pixel_tests_in_retry) |
| 300 summarized_failing_results = test_run_results.summarize_results( |
| 301 self._port, |
| 302 self._expectations, |
| 303 initial_results, |
| 304 retry_results, |
| 305 enabled_pixel_tests_in_retry, |
| 306 only_include_failing=True) |
289 | 307 |
290 exit_code = summarized_failing_results['num_regressions'] | 308 exit_code = summarized_failing_results['num_regressions'] |
291 if exit_code > test_run_results.MAX_FAILURES_EXIT_STATUS: | 309 if exit_code > test_run_results.MAX_FAILURES_EXIT_STATUS: |
292 _log.warning('num regressions (%d) exceeds max exit status (%d)' % | 310 _log.warning('num regressions (%d) exceeds max exit status (%d)' % |
293 (exit_code, test_run_results.MAX_FAILURES_EXIT_STATUS)) | 311 (exit_code, test_run_results.MAX_FAILURES_EXIT_STATUS)) |
294 exit_code = test_run_results.MAX_FAILURES_EXIT_STATUS | 312 exit_code = test_run_results.MAX_FAILURES_EXIT_STATUS |
295 | 313 |
296 if not self._options.dry_run: | 314 if not self._options.dry_run: |
297 self._write_json_files(summarized_full_results, summarized_failing_r
esults, initial_results) | 315 self._write_json_files(summarized_full_results, summarized_failing_r
esults, initial_results) |
298 | 316 |
299 if self._options.write_full_results_to: | 317 if self._options.write_full_results_to: |
300 self._filesystem.copyfile(self._filesystem.join(self._results_di
rectory, "full_results.json"), | 318 self._filesystem.copyfile(self._filesystem.join(self._results_di
rectory, 'full_results.json'), |
301 self._options.write_full_results_to) | 319 self._options.write_full_results_to) |
302 | 320 |
303 self._upload_json_files() | 321 self._upload_json_files() |
304 | 322 |
305 results_path = self._filesystem.join(self._results_directory, "resul
ts.html") | 323 results_path = self._filesystem.join(self._results_directory, 'resul
ts.html') |
306 self._copy_results_html_file(results_path) | 324 self._copy_results_html_file(results_path) |
307 if initial_results.keyboard_interrupted: | 325 if initial_results.keyboard_interrupted: |
308 exit_code = test_run_results.INTERRUPTED_EXIT_STATUS | 326 exit_code = test_run_results.INTERRUPTED_EXIT_STATUS |
309 else: | 327 else: |
310 if initial_results.interrupted: | 328 if initial_results.interrupted: |
311 exit_code = test_run_results.EARLY_EXIT_STATUS | 329 exit_code = test_run_results.EARLY_EXIT_STATUS |
312 if self._options.show_results and (exit_code or (self._options.f
ull_results_html and initial_results.total_failures)): | 330 if self._options.show_results and ( |
| 331 exit_code or (self._options.full_results_html and initia
l_results.total_failures)): |
313 self._port.show_results_html_file(results_path) | 332 self._port.show_results_html_file(results_path) |
314 self._printer.print_results(time.time() - start_time, initial_re
sults, summarized_failing_results) | 333 self._printer.print_results(time.time() - start_time, initial_re
sults, summarized_failing_results) |
315 return test_run_results.RunDetails(exit_code, summarized_full_results, s
ummarized_failing_results, initial_results, retry_results, enabled_pixel_tests_i
n_retry) | 334 return test_run_results.RunDetails( |
| 335 exit_code, summarized_full_results, summarized_failing_results, init
ial_results, retry_results, enabled_pixel_tests_in_retry) |
316 | 336 |
317 def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations, n
um_workers, retrying): | 337 def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations, n
um_workers, retrying): |
318 | 338 |
319 test_inputs = [] | 339 test_inputs = [] |
320 for _ in xrange(iterations): | 340 for _ in xrange(iterations): |
321 for test in tests_to_run: | 341 for test in tests_to_run: |
322 for _ in xrange(repeat_each): | 342 for _ in xrange(repeat_each): |
323 test_inputs.append(self._test_input_for_file(test)) | 343 test_inputs.append(self._test_input_for_file(test)) |
324 return self._runner.run_tests(self._expectations, test_inputs, tests_to_
skip, num_workers, retrying) | 344 return self._runner.run_tests(self._expectations, test_inputs, tests_to_
skip, num_workers, retrying) |
325 | 345 |
(...skipping 12 matching lines...) Expand all Loading... |
338 if self._http_server_started: | 358 if self._http_server_started: |
339 self._printer.write_update('Stopping HTTP server ...') | 359 self._printer.write_update('Stopping HTTP server ...') |
340 self._http_server_started = False | 360 self._http_server_started = False |
341 self._port.stop_http_server() | 361 self._port.stop_http_server() |
342 if self._websockets_server_started: | 362 if self._websockets_server_started: |
343 self._printer.write_update('Stopping WebSocket server ...') | 363 self._printer.write_update('Stopping WebSocket server ...') |
344 self._websockets_server_started = False | 364 self._websockets_server_started = False |
345 self._port.stop_websocket_server() | 365 self._port.stop_websocket_server() |
346 | 366 |
347 def _clean_up_run(self): | 367 def _clean_up_run(self): |
348 _log.debug("Flushing stdout") | 368 _log.debug('Flushing stdout') |
349 sys.stdout.flush() | 369 sys.stdout.flush() |
350 _log.debug("Flushing stderr") | 370 _log.debug('Flushing stderr') |
351 sys.stderr.flush() | 371 sys.stderr.flush() |
352 _log.debug("Stopping helper") | 372 _log.debug('Stopping helper') |
353 self._port.stop_helper() | 373 self._port.stop_helper() |
354 _log.debug("Cleaning up port") | 374 _log.debug('Cleaning up port') |
355 self._port.clean_up_test_run() | 375 self._port.clean_up_test_run() |
356 | 376 |
357 def _force_pixel_tests_if_needed(self): | 377 def _force_pixel_tests_if_needed(self): |
358 if self._options.pixel_tests: | 378 if self._options.pixel_tests: |
359 return False | 379 return False |
360 | 380 |
361 _log.debug("Restarting helper") | 381 _log.debug('Restarting helper') |
362 self._port.stop_helper() | 382 self._port.stop_helper() |
363 self._options.pixel_tests = True | 383 self._options.pixel_tests = True |
364 self._port.start_helper() | 384 self._port.start_helper() |
365 | 385 |
366 return True | 386 return True |
367 | 387 |
368 def _look_for_new_crash_logs(self, run_results, start_time): | 388 def _look_for_new_crash_logs(self, run_results, start_time): |
369 """Since crash logs can take a long time to be written out if the system
is | 389 """Since crash logs can take a long time to be written out if the system
is |
370 under stress do a second pass at the end of the test run. | 390 under stress do a second pass at the end of the test run. |
371 | 391 |
(...skipping 19 matching lines...) Expand all Loading... |
391 crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start
_time) | 411 crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start
_time) |
392 if crash_logs: | 412 if crash_logs: |
393 for test, crash_log in crash_logs.iteritems(): | 413 for test, crash_log in crash_logs.iteritems(): |
394 writer = TestResultWriter(self._port._filesystem, self._port, se
lf._port.results_directory(), test) | 414 writer = TestResultWriter(self._port._filesystem, self._port, se
lf._port.results_directory(), test) |
395 writer.write_crash_log(crash_log) | 415 writer.write_crash_log(crash_log) |
396 | 416 |
397 def _clobber_old_results(self): | 417 def _clobber_old_results(self): |
398 # Just clobber the actual test results directories since the other | 418 # Just clobber the actual test results directories since the other |
399 # files in the results directory are explicitly used for cross-run | 419 # files in the results directory are explicitly used for cross-run |
400 # tracking. | 420 # tracking. |
401 self._printer.write_update("Clobbering old results in %s" % | 421 self._printer.write_update('Clobbering old results in %s' % |
402 self._results_directory) | 422 self._results_directory) |
403 layout_tests_dir = self._port.layout_tests_dir() | 423 layout_tests_dir = self._port.layout_tests_dir() |
404 possible_dirs = self._port.test_dirs() | 424 possible_dirs = self._port.test_dirs() |
405 for dirname in possible_dirs: | 425 for dirname in possible_dirs: |
406 if self._filesystem.isdir(self._filesystem.join(layout_tests_dir, di
rname)): | 426 if self._filesystem.isdir(self._filesystem.join(layout_tests_dir, di
rname)): |
407 self._filesystem.rmtree(self._filesystem.join(self._results_dire
ctory, dirname)) | 427 self._filesystem.rmtree(self._filesystem.join(self._results_dire
ctory, dirname)) |
408 | 428 |
409 # Port specific clean-up. | 429 # Port specific clean-up. |
410 self._port.clobber_old_port_specific_results() | 430 self._port.clobber_old_port_specific_results() |
411 | 431 |
412 def _tests_to_retry(self, run_results): | 432 def _tests_to_retry(self, run_results): |
413 return [result.test_name for result in run_results.unexpected_results_by
_name.values() if result.type != test_expectations.PASS] | 433 return [result.test_name for result in run_results.unexpected_results_by
_name.values( |
| 434 ) if result.type != test_expectations.PASS] |
414 | 435 |
415 def _write_json_files(self, summarized_full_results, summarized_failing_resu
lts, initial_results): | 436 def _write_json_files(self, summarized_full_results, summarized_failing_resu
lts, initial_results): |
416 _log.debug("Writing JSON files in %s." % self._results_directory) | 437 _log.debug('Writing JSON files in %s.' % self._results_directory) |
417 | 438 |
418 # FIXME: Upload stats.json to the server and delete times_ms. | 439 # FIXME: Upload stats.json to the server and delete times_ms. |
419 times_trie = json_results_generator.test_timings_trie(initial_results.re
sults_by_name.values()) | 440 times_trie = json_results_generator.test_timings_trie(initial_results.re
sults_by_name.values()) |
420 times_json_path = self._filesystem.join(self._results_directory, "times_
ms.json") | 441 times_json_path = self._filesystem.join(self._results_directory, 'times_
ms.json') |
421 json_results_generator.write_json(self._filesystem, times_trie, times_js
on_path) | 442 json_results_generator.write_json(self._filesystem, times_trie, times_js
on_path) |
422 | 443 |
423 stats_trie = self._stats_trie(initial_results) | 444 stats_trie = self._stats_trie(initial_results) |
424 stats_path = self._filesystem.join(self._results_directory, "stats.json"
) | 445 stats_path = self._filesystem.join(self._results_directory, 'stats.json'
) |
425 self._filesystem.write_text_file(stats_path, json.dumps(stats_trie)) | 446 self._filesystem.write_text_file(stats_path, json.dumps(stats_trie)) |
426 | 447 |
427 full_results_path = self._filesystem.join(self._results_directory, "full
_results.json") | 448 full_results_path = self._filesystem.join(self._results_directory, 'full
_results.json') |
428 json_results_generator.write_json(self._filesystem, summarized_full_resu
lts, full_results_path) | 449 json_results_generator.write_json(self._filesystem, summarized_full_resu
lts, full_results_path) |
429 | 450 |
430 full_results_path = self._filesystem.join(self._results_directory, "fail
ing_results.json") | 451 full_results_path = self._filesystem.join(self._results_directory, 'fail
ing_results.json') |
431 # We write failing_results.json out as jsonp because we need to load it
from a file url for results.html and Chromium doesn't allow that. | 452 # We write failing_results.json out as jsonp because we need to load it |
432 json_results_generator.write_json(self._filesystem, summarized_failing_r
esults, full_results_path, callback="ADD_RESULTS") | 453 # from a file url for results.html and Chromium doesn't allow that. |
| 454 json_results_generator.write_json(self._filesystem, summarized_failing_r
esults, full_results_path, callback='ADD_RESULTS') |
433 | 455 |
434 _log.debug("Finished writing JSON files.") | 456 _log.debug('Finished writing JSON files.') |
435 | 457 |
436 def _upload_json_files(self): | 458 def _upload_json_files(self): |
437 if not self._options.test_results_server: | 459 if not self._options.test_results_server: |
438 return | 460 return |
439 | 461 |
440 if not self._options.master_name: | 462 if not self._options.master_name: |
441 _log.error("--test-results-server was set, but --master-name was not
. Not uploading JSON files.") | 463 _log.error('--test-results-server was set, but --master-name was not
. Not uploading JSON files.') |
442 return | 464 return |
443 | 465 |
444 _log.debug("Uploading JSON files for builder: %s", self._options.builder
_name) | 466 _log.debug('Uploading JSON files for builder: %s', self._options.builder
_name) |
445 attrs = [("builder", self._options.builder_name), | 467 attrs = [('builder', self._options.builder_name), |
446 ("testtype", "layout-tests"), | 468 ('testtype', 'layout-tests'), |
447 ("master", self._options.master_name)] | 469 ('master', self._options.master_name)] |
448 | 470 |
449 files = [(file, self._filesystem.join(self._results_directory, file)) fo
r file in ["failing_results.json", "full_results.json", "times_ms.json"]] | 471 files = [ |
| 472 (file, |
| 473 self._filesystem.join( |
| 474 self._results_directory, |
| 475 file)) for file in [ |
| 476 'failing_results.json', |
| 477 'full_results.json', |
| 478 'times_ms.json']] |
450 | 479 |
451 url = "http://%s/testfile/upload" % self._options.test_results_server | 480 url = 'http://%s/testfile/upload' % self._options.test_results_server |
452 # Set uploading timeout in case appengine server is having problems. | 481 # Set uploading timeout in case appengine server is having problems. |
453 # 120 seconds are more than enough to upload test results. | 482 # 120 seconds are more than enough to upload test results. |
454 uploader = FileUploader(url, 120) | 483 uploader = FileUploader(url, 120) |
455 try: | 484 try: |
456 response = uploader.upload_as_multipart_form_data(self._filesystem,
files, attrs) | 485 response = uploader.upload_as_multipart_form_data(self._filesystem,
files, attrs) |
457 if response: | 486 if response: |
458 if response.code == 200: | 487 if response.code == 200: |
459 _log.debug("JSON uploaded.") | 488 _log.debug('JSON uploaded.') |
460 else: | 489 else: |
461 _log.debug("JSON upload failed, %d: '%s'" % (response.code,
response.read())) | 490 _log.debug("JSON upload failed, %d: '%s'" % (response.code,
response.read())) |
462 else: | 491 else: |
463 _log.error("JSON upload failed; no response returned") | 492 _log.error('JSON upload failed; no response returned') |
464 except Exception, err: | 493 except Exception as err: |
465 _log.error("Upload failed: %s" % err) | 494 _log.error('Upload failed: %s' % err) |
466 | 495 |
467 def _copy_results_html_file(self, destination_path): | 496 def _copy_results_html_file(self, destination_path): |
468 base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harn
ess') | 497 base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harn
ess') |
469 results_file = self._filesystem.join(base_dir, 'results.html') | 498 results_file = self._filesystem.join(base_dir, 'results.html') |
470 # Note that the results.html template file won't exist when we're using
a MockFileSystem during unit tests, | 499 # Note that the results.html template file won't exist when we're using
a MockFileSystem during unit tests, |
471 # so make sure it exists before we try to copy it. | 500 # so make sure it exists before we try to copy it. |
472 if self._filesystem.exists(results_file): | 501 if self._filesystem.exists(results_file): |
473 self._filesystem.copyfile(results_file, destination_path) | 502 self._filesystem.copyfile(results_file, destination_path) |
474 | 503 |
475 def _stats_trie(self, initial_results): | 504 def _stats_trie(self, initial_results): |
476 def _worker_number(worker_name): | 505 def _worker_number(worker_name): |
477 return int(worker_name.split('/')[1]) if worker_name else -1 | 506 return int(worker_name.split('/')[1]) if worker_name else -1 |
478 | 507 |
479 stats = {} | 508 stats = {} |
480 for result in initial_results.results_by_name.values(): | 509 for result in initial_results.results_by_name.values(): |
481 if result.type != test_expectations.SKIP: | 510 if result.type != test_expectations.SKIP: |
482 stats[result.test_name] = {'results': (_worker_number(result.wor
ker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int
(result.total_run_time * 1000))} | 511 stats[result.test_name] = {'results': (_worker_number(result.wor
ker_name), |
| 512 result.test_number, |
| 513 result.pid, |
| 514 int(result.test_run_time
* 1000), |
| 515 int(result.total_run_time
* 1000))} |
483 stats_trie = {} | 516 stats_trie = {} |
484 for name, value in stats.iteritems(): | 517 for name, value in stats.iteritems(): |
485 json_results_generator.add_path_to_trie(name, value, stats_trie) | 518 json_results_generator.add_path_to_trie(name, value, stats_trie) |
486 return stats_trie | 519 return stats_trie |
OLD | NEW |