| OLD | NEW |
| 1 #!/bin/env python | 1 #!/bin/env python |
| 2 # Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. | 2 # Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. |
| 3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
| 4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
| 5 | 5 |
| 6 """Run layout tests using the test_shell. | 6 """Run layout tests using the test_shell. |
| 7 | 7 |
| 8 This is a port of the existing webkit test script run-webkit-tests. | 8 This is a port of the existing webkit test script run-webkit-tests. |
| 9 | 9 |
| 10 The TestRunner class runs a series of tests (TestType interface) against a set | 10 The TestRunner class runs a series of tests (TestType interface) against a set |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 70 Args: | 70 Args: |
| 71 options: a dictionary of command line options | 71 options: a dictionary of command line options |
| 72 paths: a list of paths to crawl looking for test files | 72 paths: a list of paths to crawl looking for test files |
| 73 """ | 73 """ |
| 74 self._options = options | 74 self._options = options |
| 75 | 75 |
| 76 self._http_server = http_server.Lighttpd(options.results_directory) | 76 self._http_server = http_server.Lighttpd(options.results_directory) |
| 77 # a list of TestType objects | 77 # a list of TestType objects |
| 78 self._test_types = [] | 78 self._test_types = [] |
| 79 | 79 |
| 80 # a set of test files | 80 # a set of test files, and the same tests as a list |
| 81 self._test_files = set() | 81 self._test_files = set() |
| 82 self._test_files_list = None |
| 82 self._file_dir = os.path.join(os.path.dirname(sys.argv[0]), TEST_FILE_DIR) | 83 self._file_dir = os.path.join(os.path.dirname(sys.argv[0]), TEST_FILE_DIR) |
| 83 self._file_dir = path_utils.GetAbsolutePath(self._file_dir) | 84 self._file_dir = path_utils.GetAbsolutePath(self._file_dir) |
| 84 | 85 |
| 85 if options.lint_test_files: | 86 if options.lint_test_files: |
| 86 # Creating the expecations for each platform/target pair does all the | 87 # Creating the expecations for each platform/target pair does all the |
| 87 # test list parsing and ensures it's correct syntax(e.g. no dupes). | 88 # test list parsing and ensures it's correct syntax(e.g. no dupes). |
| 88 self._ParseExpectations('win', is_debug_mode=True) | 89 self._ParseExpectations('win', is_debug_mode=True) |
| 89 self._ParseExpectations('win', is_debug_mode=False) | 90 self._ParseExpectations('win', is_debug_mode=False) |
| 90 self._ParseExpectations('mac', is_debug_mode=True) | 91 self._ParseExpectations('mac', is_debug_mode=True) |
| 91 self._ParseExpectations('mac', is_debug_mode=False) | 92 self._ParseExpectations('mac', is_debug_mode=False) |
| 92 self._ParseExpectations('linux', is_debug_mode=True) | 93 self._ParseExpectations('linux', is_debug_mode=True) |
| 93 self._ParseExpectations('linux', is_debug_mode=False) | 94 self._ParseExpectations('linux', is_debug_mode=False) |
| 94 else: | 95 else: |
| 95 self._GatherTestFiles(paths) | 96 self._GatherTestFiles(paths) |
| 96 self._expectations = self._ParseExpectations( | 97 self._expectations = self._ParseExpectations( |
| 97 platform_utils.GetTestListPlatformName().lower(), | 98 platform_utils.GetTestListPlatformName().lower(), |
| 98 options.target == 'Debug') | 99 options.target == 'Debug') |
| 99 self._GenerateExpecationsAndPrintOutput() | 100 self._PrepareListsAndPrintOutput() |
| 100 | 101 |
| 101 def __del__(self): | 102 def __del__(self): |
| 102 sys.stdout.flush() | 103 sys.stdout.flush() |
| 103 sys.stderr.flush() | 104 sys.stderr.flush() |
| 104 # Stop the http server. | 105 # Stop the http server. |
| 105 self._http_server.Stop() | 106 self._http_server.Stop() |
| 106 | 107 |
| 107 def _GatherTestFiles(self, paths): | 108 def _GatherTestFiles(self, paths): |
| 108 """Generate a set of test files and place them in self._test_files | 109 """Generate a set of test files and place them in self._test_files |
| 109 | 110 |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 155 return test_expectations.TestExpectations(test_files, | 156 return test_expectations.TestExpectations(test_files, |
| 156 self._file_dir, | 157 self._file_dir, |
| 157 platform, | 158 platform, |
| 158 is_debug_mode) | 159 is_debug_mode) |
| 159 except SyntaxError, err: | 160 except SyntaxError, err: |
| 160 if self._options.lint_test_files: | 161 if self._options.lint_test_files: |
| 161 print str(err) | 162 print str(err) |
| 162 else: | 163 else: |
| 163 raise err | 164 raise err |
| 164 | 165 |
| 165 def _GenerateExpecationsAndPrintOutput(self): | 166 def _PrepareListsAndPrintOutput(self): |
| 166 """Create appropriate subsets of self._tests_files in | 167 """Create appropriate subsets of test lists and print test counts. |
| 168 |
| 169 Create appropriate subsets of self._tests_files in |
| 167 self._ignored_failures, self._fixable_failures, and self._fixable_crashes. | 170 self._ignored_failures, self._fixable_failures, and self._fixable_crashes. |
| 171 Also remove skipped files from self._test_files, extract a subset of tests |
| 172 if desired, and create the sorted self._test_files_list. |
| 168 """ | 173 """ |
| 169 # Filter and sort out files from the skipped, ignored, and fixable file | 174 # Filter and sort out files from the skipped, ignored, and fixable file |
| 170 # lists. | 175 # lists. |
| 171 saved_test_files = set() | 176 saved_test_files = set() |
| 172 if len(self._test_files) == 1: | 177 if len(self._test_files) == 1: |
| 173 # If there's only one test file, we don't want to skip it, but we do want | 178 # If there's only one test file, we don't want to skip it, but we do want |
| 174 # to sort it. So we save it to add back to the list later. | 179 # to sort it. So we save it to add back to the list later. |
| 175 saved_test_files = self._test_files | 180 saved_test_files = self._test_files |
| 176 | 181 |
| 177 # Remove skipped - both fixable and ignored - files from the | 182 # Remove skipped - both fixable and ignored - files from the |
| 178 # top-level list of files to test. | 183 # top-level list of files to test. |
| 179 skipped = (self._expectations.GetFixableSkipped() | | 184 skipped = (self._expectations.GetFixableSkipped() | |
| 180 self._expectations.GetIgnoredSkipped()) | 185 self._expectations.GetIgnoredSkipped()) |
| 181 | 186 |
| 182 self._test_files -= skipped | 187 self._test_files -= skipped |
| 183 | 188 |
| 184 # If there was only one test file, run the test even if it was skipped. | 189 # If there was only one test file, run the test even if it was skipped. |
| 185 if len(saved_test_files): | 190 if len(saved_test_files): |
| 186 self._test_files = saved_test_files | 191 self._test_files = saved_test_files |
| 187 | 192 |
| 188 logging.info('Skipped: %d tests' % len(skipped)) | 193 logging.info('Skipped: %d tests' % len(skipped)) |
| 189 logging.info('Skipped tests do not appear in any of the below numbers\n') | 194 logging.info('Skipped tests do not appear in any of the below numbers\n') |
| 190 | 195 |
| 196 # Create a sorted list of test files so the subset chunk, if used, contains |
| 197 # alphabetically consecutive tests. |
| 198 self._test_files_list = list(self._test_files) |
| 199 if self._options.randomize_order: |
| 200 random.shuffle(self._test_files_list) |
| 201 else: |
| 202 self._test_files_list.sort(self.TestFilesSort) |
| 203 |
| 191 # If the user specifies they just want to run a subset chunk of the tests, | 204 # If the user specifies they just want to run a subset chunk of the tests, |
| 192 # just grab a subset of the non-skipped tests. | 205 # just grab a subset of the non-skipped tests. |
| 193 if self._options.run_chunk: | 206 if self._options.run_chunk: |
| 194 test_files = list(self._test_files) | 207 test_files = self._test_files_list |
| 195 try: | 208 try: |
| 196 (chunk_num, chunk_len) = self._options.run_chunk.split(":") | 209 (chunk_num, chunk_len) = self._options.run_chunk.split(":") |
| 197 chunk_num = int(chunk_num) | 210 chunk_num = int(chunk_num) |
| 198 assert(chunk_num >= 0) | 211 assert(chunk_num >= 0) |
| 199 chunk_len = int(chunk_len) | 212 chunk_len = int(chunk_len) |
| 200 assert(chunk_len > 0) | 213 assert(chunk_len > 0) |
| 201 except: | 214 except: |
| 202 logging.critical("invalid chunk '%s'" % self._options.run_chunk) | 215 logging.critical("invalid chunk '%s'" % self._options.run_chunk) |
| 203 sys.exit(1) | 216 sys.exit(1) |
| 204 num_tests = len(test_files) | 217 num_tests = len(test_files) |
| 205 slice_start = (chunk_num * chunk_len) % num_tests | 218 slice_start = (chunk_num * chunk_len) % num_tests |
| 206 slice_end = min(num_tests + 1, slice_start + chunk_len) | 219 slice_end = min(num_tests + 1, slice_start + chunk_len) |
| 207 files = test_files[slice_start:slice_end] | 220 files = test_files[slice_start:slice_end] |
| 208 logging.info('Run: %d tests (chunk slice [%d:%d] of %d)' % ( | 221 logging.info('Run: %d tests (chunk slice [%d:%d] of %d)' % ( |
| 209 chunk_len, slice_start, slice_end, num_tests)) | 222 chunk_len, slice_start, slice_end, num_tests)) |
| 210 if slice_end - slice_start < chunk_len: | 223 if slice_end - slice_start < chunk_len: |
| 211 extra = 1 + chunk_len - (slice_end - slice_start) | 224 extra = 1 + chunk_len - (slice_end - slice_start) |
| 212 logging.info(' last chunk is partial, appending [0:%d]' % extra) | 225 logging.info(' last chunk is partial, appending [0:%d]' % extra) |
| 213 files.extend(test_files[0:extra]) | 226 files.extend(test_files[0:extra]) |
| 227 self._test_files_list = files |
| 214 self._test_files = set(files) | 228 self._test_files = set(files) |
| 215 # update expectations so that the stats are calculated correctly | 229 # update expectations so that the stats are calculated correctly |
| 216 self._expectations = self._ParseExpectations( | 230 self._expectations = self._ParseExpectations( |
| 217 platform_utils.GetTestListPlatformName().lower(), | 231 platform_utils.GetTestListPlatformName().lower(), |
| 218 options.target == 'Debug') | 232 options.target == 'Debug') |
| 219 else: | 233 else: |
| 220 logging.info('Run: %d tests' % len(self._test_files)) | 234 logging.info('Run: %d tests' % len(self._test_files)) |
| 221 | 235 |
| 222 logging.info('Deferred: %d tests' % | 236 logging.info('Deferred: %d tests' % |
| 223 len(self._expectations.GetFixableDeferred())) | 237 len(self._expectations.GetFixableDeferred())) |
| (...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 288 if proc.wait() != 0: | 302 if proc.wait() != 0: |
| 289 logging.info("Aborting because system dependencies check failed.\n" | 303 logging.info("Aborting because system dependencies check failed.\n" |
| 290 "To override, invoke with --nocheck-sys-deps") | 304 "To override, invoke with --nocheck-sys-deps") |
| 291 sys.exit(1) | 305 sys.exit(1) |
| 292 | 306 |
| 293 logging.info("Starting tests") | 307 logging.info("Starting tests") |
| 294 | 308 |
| 295 # Create the output directory if it doesn't already exist. | 309 # Create the output directory if it doesn't already exist. |
| 296 google.path_utils.MaybeMakeDirectory(self._options.results_directory) | 310 google.path_utils.MaybeMakeDirectory(self._options.results_directory) |
| 297 | 311 |
| 298 test_files = list(self._test_files) | 312 test_files = self._test_files_list |
| 299 if self._options.randomize_order: | |
| 300 random.shuffle(test_files) | |
| 301 else: | |
| 302 test_files.sort(self.TestFilesSort) | |
| 303 | 313 |
| 304 # Create the thread safe queue of (test filenames, test URIs) tuples. Each | 314 # Create the thread safe queue of (test filenames, test URIs) tuples. Each |
| 305 # TestShellThread pulls values from this queue. | 315 # TestShellThread pulls values from this queue. |
| 306 filename_queue = Queue.Queue() | 316 filename_queue = Queue.Queue() |
| 307 for test_file in test_files: | 317 for test_file in test_files: |
| 308 filename_queue.put((test_file, path_utils.FilenameToUri(test_file))) | 318 filename_queue.put((test_file, path_utils.FilenameToUri(test_file))) |
| 309 | 319 |
| 310 # If we have http tests, the first one will be an http test. | 320 # If we have http tests, the first one will be an http test. |
| 311 if test_files and test_files[0].find(self.HTTP_SUBDIR) >= 0: | 321 if test_files and test_files[0].find(self.HTTP_SUBDIR) >= 0: |
| 312 self._http_server.Start() | 322 self._http_server.Start() |
| (...skipping 456 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 769 option_parser.add_option("", "--randomize-order", action="store_true", | 779 option_parser.add_option("", "--randomize-order", action="store_true", |
| 770 default=False, | 780 default=False, |
| 771 help=("Run tests in random order (useful for " | 781 help=("Run tests in random order (useful for " |
| 772 "tracking down corruption)")) | 782 "tracking down corruption)")) |
| 773 option_parser.add_option("", "--run-chunk", | 783 option_parser.add_option("", "--run-chunk", |
| 774 default=None, | 784 default=None, |
| 775 help=("Run a specified chunk (n:l), the nth of len l" | 785 help=("Run a specified chunk (n:l), the nth of len l" |
| 776 ", of the layout tests")) | 786 ", of the layout tests")) |
| 777 options, args = option_parser.parse_args() | 787 options, args = option_parser.parse_args() |
| 778 main(options, args) | 788 main(options, args) |
| OLD | NEW |