OLD | NEW |
---|---|
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. | 2 # Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 """Run layout tests using the test_shell. | 6 """Run layout tests using the test_shell. |
7 | 7 |
8 This is a port of the existing webkit test script run-webkit-tests. | 8 This is a port of the existing webkit test script run-webkit-tests. |
9 | 9 |
10 The TestRunner class runs a series of tests (TestType interface) against a set | 10 The TestRunner class runs a series of tests (TestType interface) against a set |
(...skipping 186 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
197 except Exception, err: | 197 except Exception, err: |
198 if self._options.lint_test_files: | 198 if self._options.lint_test_files: |
199 print str(err) | 199 print str(err) |
200 else: | 200 else: |
201 raise err | 201 raise err |
202 | 202 |
203 def PrepareListsAndPrintOutput(self): | 203 def PrepareListsAndPrintOutput(self): |
204 """Create appropriate subsets of test lists and returns a ResultSummary | 204 """Create appropriate subsets of test lists and returns a ResultSummary |
205 object. Also prints expected test counts.""" | 205 object. Also prints expected test counts.""" |
206 | 206 |
207 result_summary = ResultSummary(self._expectations, self._test_files) | |
208 | |
209 # Remove skipped - both fixable and ignored - files from the | 207 # Remove skipped - both fixable and ignored - files from the |
210 # top-level list of files to test. | 208 # top-level list of files to test. |
211 skipped = set() | 209 num_all_test_files = len(self._test_files) |
212 if len(self._test_files) > 1 and not self._options.force: | 210 skipped = () |
211 if num_all_test_files > 1 and not self._options.force: | |
213 skipped = self._expectations.GetTestsWithResultType( | 212 skipped = self._expectations.GetTestsWithResultType( |
214 test_expectations.SKIP) | 213 test_expectations.SKIP) |
ojan
2009/12/04 00:58:30
nit: indent should be 4 spaces here.
| |
215 for test in skipped: | |
216 result_summary.Add(test, [], test_expectations.SKIP) | |
217 self._test_files -= skipped | 214 self._test_files -= skipped |
218 | 215 |
219 if self._options.force: | |
220 logging.info('Skipped: 0 tests (--force)') | |
221 else: | |
222 logging.info('Skipped: %d tests' % len(skipped)) | |
223 logging.info('Skipped tests do not appear in any of the below numbers\n') | |
224 | |
225 # Create a sorted list of test files so the subset chunk, if used, contains | 216 # Create a sorted list of test files so the subset chunk, if used, contains |
226 # alphabetically consecutive tests. | 217 # alphabetically consecutive tests. |
227 self._test_files_list = list(self._test_files) | 218 self._test_files_list = list(self._test_files) |
228 if self._options.randomize_order: | 219 if self._options.randomize_order: |
229 random.shuffle(self._test_files_list) | 220 random.shuffle(self._test_files_list) |
230 else: | 221 else: |
231 self._test_files_list.sort(self.TestFilesSort) | 222 self._test_files_list.sort(self.TestFilesSort) |
232 | 223 |
233 # Chunking replaces self._expectations, which loses all the skipped test | |
234 # information. Keep the prechunk expectations for tracking number of | |
235 # skipped tests. | |
236 self.prechunk_expectations = self._expectations; | |
237 | |
238 # If the user specifies they just want to run a subset of the tests, | 224 # If the user specifies they just want to run a subset of the tests, |
239 # just grab a subset of the non-skipped tests. | 225 # just grab a subset of the non-skipped tests. |
240 if self._options.run_chunk or self._options.run_part: | 226 if self._options.run_chunk or self._options.run_part: |
241 chunk_value = self._options.run_chunk or self._options.run_part | 227 chunk_value = self._options.run_chunk or self._options.run_part |
242 test_files = self._test_files_list | 228 test_files = self._test_files_list |
243 try: | 229 try: |
244 (chunk_num, chunk_len) = chunk_value.split(":") | 230 (chunk_num, chunk_len) = chunk_value.split(":") |
245 chunk_num = int(chunk_num) | 231 chunk_num = int(chunk_num) |
246 assert(chunk_num >= 0) | 232 assert(chunk_num >= 0) |
247 test_size = int(chunk_len) | 233 test_size = int(chunk_len) |
(...skipping 23 matching lines...) Expand all Loading... | |
271 rounded_tests = num_tests + test_size - (num_tests % test_size) | 257 rounded_tests = num_tests + test_size - (num_tests % test_size) |
272 | 258 |
273 chunk_len = rounded_tests / test_size | 259 chunk_len = rounded_tests / test_size |
274 slice_start = chunk_len * (chunk_num - 1) | 260 slice_start = chunk_len * (chunk_num - 1) |
275 # It does not mind if we go over test_size. | 261 # It does not mind if we go over test_size. |
276 | 262 |
277 # Get the end offset of the slice. | 263 # Get the end offset of the slice. |
278 slice_end = min(num_tests, slice_start + chunk_len) | 264 slice_end = min(num_tests, slice_start + chunk_len) |
279 | 265 |
280 files = test_files[slice_start:slice_end] | 266 files = test_files[slice_start:slice_end] |
267 | |
281 tests_run_msg = 'Run: %d tests (chunk slice [%d:%d] of %d)' % ( | 268 tests_run_msg = 'Run: %d tests (chunk slice [%d:%d] of %d)' % ( |
282 (slice_end - slice_start), slice_start, slice_end, num_tests) | 269 (slice_end - slice_start), slice_start, slice_end, num_tests) |
283 logging.info(tests_run_msg) | 270 logging.info(tests_run_msg) |
284 | 271 |
285 # If we reached the end and we don't have enough tests, we run some | 272 # If we reached the end and we don't have enough tests, we run some |
286 # from the beginning. | 273 # from the beginning. |
287 if self._options.run_chunk and (slice_end - slice_start < chunk_len): | 274 if self._options.run_chunk and (slice_end - slice_start < chunk_len): |
288 extra = 1 + chunk_len - (slice_end - slice_start) | 275 extra = 1 + chunk_len - (slice_end - slice_start) |
289 extra_msg = ' last chunk is partial, appending [0:%d]' % extra | 276 extra_msg = ' last chunk is partial, appending [0:%d]' % extra |
290 logging.info(extra_msg) | 277 logging.info(extra_msg) |
291 tests_run_msg += "\n" + extra_msg | 278 tests_run_msg += "\n" + extra_msg |
292 files.extend(test_files[0:extra]) | 279 files.extend(test_files[0:extra]) |
293 self._test_files_list = files | |
294 self._test_files = set(files) | |
295 | |
296 tests_run_filename = os.path.join(self._options.results_directory, | 280 tests_run_filename = os.path.join(self._options.results_directory, |
297 "tests_run.txt") | 281 "tests_run.txt") |
298 tests_run_file = open(tests_run_filename, "w") | 282 tests_run_file = open(tests_run_filename, "w") |
299 tests_run_file.write(tests_run_msg + "\n") | 283 tests_run_file.write(tests_run_msg + "\n") |
300 tests_run_file.close() | 284 tests_run_file.close() |
301 | 285 |
302 # update expectations so that the stats are calculated correctly | 286 # Update expectations so that the stats are calculated correctly. |
287 len_skip_chunk = int(len(files) * len(skipped) / | |
288 float(len(self._test_files))) | |
289 skip_chunk_list = list(skipped)[0:len_skip_chunk] | |
290 skip_chunk = set(skip_chunk_list) | |
291 self._test_files_list = files + skip_chunk_list | |
292 self._test_files = set(self._test_files_list) | |
293 | |
303 self._expectations = self.ParseExpectations( | 294 self._expectations = self.ParseExpectations( |
304 path_utils.PlatformName(), options.target == 'Debug') | 295 path_utils.PlatformName(), options.target == 'Debug') |
296 | |
297 self._test_files = set(files) | |
ojan
2009/12/04 00:58:30
Maybe add some comments here? It's confusing that
| |
298 self._test_files_list = files | |
305 else: | 299 else: |
300 skip_chunk = skipped | |
306 logging.info('Run: %d tests' % len(self._test_files)) | 301 logging.info('Run: %d tests' % len(self._test_files)) |
307 | 302 |
303 result_summary = ResultSummary(self._expectations, | |
304 self._test_files | skip_chunk) | |
308 self._PrintExpectedResultsOfType(result_summary, test_expectations.PASS, | 305 self._PrintExpectedResultsOfType(result_summary, test_expectations.PASS, |
309 "passes") | 306 "passes") |
310 self._PrintExpectedResultsOfType(result_summary, test_expectations.FAIL, | 307 self._PrintExpectedResultsOfType(result_summary, test_expectations.FAIL, |
311 "failures") | 308 "failures") |
312 self._PrintExpectedResultsOfType(result_summary, test_expectations.FLAKY, | 309 self._PrintExpectedResultsOfType(result_summary, test_expectations.FLAKY, |
313 "flaky") | 310 "flaky") |
314 self._PrintExpectedResultsOfType(result_summary, test_expectations.SKIP, | 311 self._PrintExpectedResultsOfType(result_summary, test_expectations.SKIP, |
315 "skipped") | 312 "skipped") |
313 | |
314 if self._options.force: | |
315 logging.info('Running all tests, including skips (--force)') | |
316 else: | |
317 for test in skip_chunk: | |
318 result_summary.Add(test, [], test_expectations.SKIP) | |
319 | |
316 return result_summary | 320 return result_summary |
317 | 321 |
318 def AddTestType(self, test_type): | 322 def AddTestType(self, test_type): |
319 """Add a TestType to the TestRunner.""" | 323 """Add a TestType to the TestRunner.""" |
320 self._test_types.append(test_type) | 324 self._test_types.append(test_type) |
321 | 325 |
322 # We sort the tests so that tests using the http server will run first. We | 326 # We sort the tests so that tests using the http server will run first. We |
323 # are seeing some flakiness, maybe related to apache getting swapped out, | 327 # are seeing some flakiness, maybe related to apache getting swapped out, |
324 # slow, or stuck after not serving requests for a while. | 328 # slow, or stuck after not serving requests for a while. |
325 def TestFilesSort(self, x, y): | 329 def TestFilesSort(self, x, y): |
(...skipping 1112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1438 "this script.")) | 1442 "this script.")) |
1439 option_parser.add_option("", "--find-baselines", action="store_true", | 1443 option_parser.add_option("", "--find-baselines", action="store_true", |
1440 default=False, | 1444 default=False, |
1441 help="Prints a table mapping tests to their " | 1445 help="Prints a table mapping tests to their " |
1442 "expected results") | 1446 "expected results") |
1443 option_parser.add_option("", "--experimental-fully-parallel", | 1447 option_parser.add_option("", "--experimental-fully-parallel", |
1444 action="store_true", default=False, | 1448 action="store_true", default=False, |
1445 help="run all tests in parallel") | 1449 help="run all tests in parallel") |
1446 options, args = option_parser.parse_args() | 1450 options, args = option_parser.parse_args() |
1447 main(options, args) | 1451 main(options, args) |
OLD | NEW |