| OLD | NEW |
| (Empty) |
| 1 #!/usr/bin/env python | |
| 2 # Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. | |
| 3 # Use of this source code is governed by a BSD-style license that can be | |
| 4 # found in the LICENSE file. | |
| 5 | |
| 6 """Run layout tests using the test_shell. | |
| 7 | |
| 8 This is a port of the existing webkit test script run-webkit-tests. | |
| 9 | |
| 10 The TestRunner class runs a series of tests (TestType interface) against a set | |
| 11 of test files. If a test file fails a TestType, it returns a list TestFailure | |
| 12 objects to the TestRunner. The TestRunner then aggregates the TestFailures to | |
| 13 create a final report. | |
| 14 | |
| 15 This script reads several files, if they exist in the test_lists subdirectory | |
| 16 next to this script itself. Each should contain a list of paths to individual | |
| 17 tests or entire subdirectories of tests, relative to the outermost test | |
| 18 directory. Entire lines starting with '//' (comments) will be ignored. | |
| 19 | |
| 20 For details of the files' contents and purposes, see test_lists/README. | |
| 21 """ | |
| 22 | |
| 23 import errno | |
| 24 import glob | |
| 25 import logging | |
| 26 import math | |
| 27 import optparse | |
| 28 import os | |
| 29 import Queue | |
| 30 import random | |
| 31 import re | |
| 32 import shutil | |
| 33 import subprocess | |
| 34 import sys | |
| 35 import time | |
| 36 import traceback | |
| 37 | |
| 38 from layout_package import apache_http_server | |
| 39 from layout_package import test_expectations | |
| 40 from layout_package import http_server | |
| 41 from layout_package import json_layout_results_generator | |
| 42 from layout_package import metered_stream | |
| 43 from layout_package import path_utils | |
| 44 from layout_package import platform_utils | |
| 45 from layout_package import test_failures | |
| 46 from layout_package import test_shell_thread | |
| 47 from layout_package import test_files | |
| 48 from layout_package import websocket_server | |
| 49 from test_types import fuzzy_image_diff | |
| 50 from test_types import image_diff | |
| 51 from test_types import test_type_base | |
| 52 from test_types import text_diff | |
| 53 | |
| 54 sys.path.append(path_utils.PathFromBase('third_party')) | |
| 55 import simplejson | |
| 56 | |
| 57 # Indicates that we want detailed progress updates in the output (prints | |
| 58 # directory-by-directory feedback). | |
| 59 LOG_DETAILED_PROGRESS = 'detailed-progress' | |
| 60 | |
| 61 # Log any unexpected results while running (instead of just at the end). | |
| 62 LOG_UNEXPECTED = 'unexpected' | |
| 63 | |
| 64 # Builder base URL where we have the archived test results. | |
| 65 BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/" | |
| 66 | |
| 67 TestExpectationsFile = test_expectations.TestExpectationsFile | |
| 68 | |
| 69 | |
| 70 class TestInfo: | |
| 71 """Groups information about a test for easy passing of data.""" | |
| 72 | |
| 73 def __init__(self, filename, timeout): | |
| 74 """Generates the URI and stores the filename and timeout for this test. | |
| 75 Args: | |
| 76 filename: Full path to the test. | |
| 77 timeout: Timeout for running the test in TestShell. | |
| 78 """ | |
| 79 self.filename = filename | |
| 80 self.uri = path_utils.FilenameToUri(filename) | |
| 81 self.timeout = timeout | |
| 82 expected_hash_file = path_utils.ExpectedFilename(filename, '.checksum') | |
| 83 try: | |
| 84 self.image_hash = open(expected_hash_file, "r").read() | |
| 85 except IOError, e: | |
| 86 if errno.ENOENT != e.errno: | |
| 87 raise | |
| 88 self.image_hash = None | |
| 89 | |
| 90 | |
| 91 class ResultSummary(object): | |
| 92 """A class for partitioning the test results we get into buckets. | |
| 93 | |
| 94 This class is basically a glorified struct and it's private to this file | |
| 95 so we don't bother with any information hiding.""" | |
| 96 | |
| 97 def __init__(self, expectations, test_files): | |
| 98 self.total = len(test_files) | |
| 99 self.remaining = self.total | |
| 100 self.expectations = expectations | |
| 101 self.expected = 0 | |
| 102 self.unexpected = 0 | |
| 103 self.tests_by_expectation = {} | |
| 104 self.tests_by_timeline = {} | |
| 105 self.results = {} | |
| 106 self.unexpected_results = {} | |
| 107 self.failures = {} | |
| 108 self.tests_by_expectation[test_expectations.SKIP] = set() | |
| 109 for expectation in TestExpectationsFile.EXPECTATIONS.values(): | |
| 110 self.tests_by_expectation[expectation] = set() | |
| 111 for timeline in TestExpectationsFile.TIMELINES.values(): | |
| 112 self.tests_by_timeline[timeline] = ( | |
| 113 expectations.GetTestsWithTimeline(timeline)) | |
| 114 | |
| 115 def Add(self, test, failures, result, expected): | |
| 116 """Add a result into the appropriate bin. | |
| 117 | |
| 118 Args: | |
| 119 test: test file name | |
| 120 failures: list of failure objects from test execution | |
| 121 result: result of test (PASS, IMAGE, etc.). | |
| 122 expected: whether the result was what we expected it to be. | |
| 123 """ | |
| 124 | |
| 125 self.tests_by_expectation[result].add(test) | |
| 126 self.results[test] = result | |
| 127 self.remaining -= 1 | |
| 128 if len(failures): | |
| 129 self.failures[test] = failures | |
| 130 if expected: | |
| 131 self.expected += 1 | |
| 132 else: | |
| 133 self.unexpected_results[test] = result | |
| 134 self.unexpected += 1 | |
| 135 | |
| 136 | |
| 137 class TestRunner: | |
| 138 """A class for managing running a series of tests on a series of layout | |
| 139 test files.""" | |
| 140 | |
| 141 HTTP_SUBDIR = os.sep.join(['', 'http', '']) | |
| 142 WEBSOCKET_SUBDIR = os.sep.join(['', 'websocket', '']) | |
| 143 | |
| 144 # The per-test timeout in milliseconds, if no --time-out-ms option was | |
| 145 # given to run_webkit_tests. This should correspond to the default timeout | |
| 146 # in test_shell.exe. | |
| 147 DEFAULT_TEST_TIMEOUT_MS = 6 * 1000 | |
| 148 | |
| 149 NUM_RETRY_ON_UNEXPECTED_FAILURE = 1 | |
| 150 | |
| 151 def __init__(self, options, meter): | |
| 152 """Initialize test runner data structures. | |
| 153 | |
| 154 Args: | |
| 155 options: a dictionary of command line options | |
| 156 meter: a MeteredStream object to record updates to. | |
| 157 """ | |
| 158 self._options = options | |
| 159 self._meter = meter | |
| 160 | |
| 161 if options.use_apache: | |
| 162 self._http_server = apache_http_server.LayoutTestApacheHttpd( | |
| 163 options.results_directory) | |
| 164 else: | |
| 165 self._http_server = http_server.Lighttpd(options.results_directory) | |
| 166 | |
| 167 self._websocket_server = websocket_server.PyWebSocket( | |
| 168 options.results_directory) | |
| 169 # disable wss server. need to install pyOpenSSL on buildbots. | |
| 170 # self._websocket_secure_server = websocket_server.PyWebSocket( | |
| 171 # options.results_directory, use_tls=True, port=9323) | |
| 172 | |
| 173 # a list of TestType objects | |
| 174 self._test_types = [] | |
| 175 | |
| 176 # a set of test files, and the same tests as a list | |
| 177 self._test_files = set() | |
| 178 self._test_files_list = None | |
| 179 self._file_dir = path_utils.GetAbsolutePath( | |
| 180 os.path.dirname(sys.argv[0])) | |
| 181 self._result_queue = Queue.Queue() | |
| 182 | |
| 183 # These are used for --log detailed-progress to track status by | |
| 184 # directory. | |
| 185 self._current_dir = None | |
| 186 self._current_progress_str = "" | |
| 187 self._current_test_number = 0 | |
| 188 | |
| 189 def __del__(self): | |
| 190 logging.debug("flushing stdout") | |
| 191 sys.stdout.flush() | |
| 192 logging.debug("flushing stderr") | |
| 193 sys.stderr.flush() | |
| 194 logging.debug("stopping http server") | |
| 195 # Stop the http server. | |
| 196 self._http_server.Stop() | |
| 197 # Stop the Web Socket / Web Socket Secure servers. | |
| 198 self._websocket_server.Stop() | |
| 199 # self._websocket_secure_server.Stop() | |
| 200 | |
| 201 def GatherFilePaths(self, paths): | |
| 202 """Find all the files to test. | |
| 203 | |
| 204 Args: | |
| 205 paths: a list of globs to use instead of the defaults.""" | |
| 206 self._test_files = test_files.GatherTestFiles(paths) | |
| 207 | |
| 208 def ParseExpectations(self, platform, is_debug_mode): | |
| 209 """Parse the expectations from the test_list files and return a data | |
| 210 structure holding them. Throws an error if the test_list files have | |
| 211 invalid syntax.""" | |
| 212 if self._options.lint_test_files: | |
| 213 test_files = None | |
| 214 else: | |
| 215 test_files = self._test_files | |
| 216 | |
| 217 try: | |
| 218 self._expectations = test_expectations.TestExpectations(test_files, | |
| 219 self._file_dir, platform, is_debug_mode, | |
| 220 self._options.lint_test_files) | |
| 221 return self._expectations | |
| 222 except Exception, err: | |
| 223 if self._options.lint_test_files: | |
| 224 print str(err) | |
| 225 else: | |
| 226 raise err | |
| 227 | |
| 228 def PrepareListsAndPrintOutput(self, write): | |
| 229 """Create appropriate subsets of test lists and returns a | |
| 230 ResultSummary object. Also prints expected test counts. | |
| 231 | |
| 232 Args: | |
| 233 write: A callback to write info to (e.g., a LoggingWriter) or | |
| 234 sys.stdout.write. | |
| 235 """ | |
| 236 | |
| 237 # Remove skipped - both fixable and ignored - files from the | |
| 238 # top-level list of files to test. | |
| 239 num_all_test_files = len(self._test_files) | |
| 240 write("Found: %d tests" % (len(self._test_files))) | |
| 241 skipped = set() | |
| 242 if num_all_test_files > 1 and not self._options.force: | |
| 243 skipped = self._expectations.GetTestsWithResultType( | |
| 244 test_expectations.SKIP) | |
| 245 self._test_files -= skipped | |
| 246 | |
| 247 # Create a sorted list of test files so the subset chunk, | |
| 248 # if used, contains alphabetically consecutive tests. | |
| 249 self._test_files_list = list(self._test_files) | |
| 250 if self._options.randomize_order: | |
| 251 random.shuffle(self._test_files_list) | |
| 252 else: | |
| 253 self._test_files_list.sort() | |
| 254 | |
| 255 # If the user specifies they just want to run a subset of the tests, | |
| 256 # just grab a subset of the non-skipped tests. | |
| 257 if self._options.run_chunk or self._options.run_part: | |
| 258 chunk_value = self._options.run_chunk or self._options.run_part | |
| 259 test_files = self._test_files_list | |
| 260 try: | |
| 261 (chunk_num, chunk_len) = chunk_value.split(":") | |
| 262 chunk_num = int(chunk_num) | |
| 263 assert(chunk_num >= 0) | |
| 264 test_size = int(chunk_len) | |
| 265 assert(test_size > 0) | |
| 266 except: | |
| 267 logging.critical("invalid chunk '%s'" % chunk_value) | |
| 268 sys.exit(1) | |
| 269 | |
| 270 # Get the number of tests | |
| 271 num_tests = len(test_files) | |
| 272 | |
| 273 # Get the start offset of the slice. | |
| 274 if self._options.run_chunk: | |
| 275 chunk_len = test_size | |
| 276 # In this case chunk_num can be really large. We need | |
| 277 # to make the slave fit in the current number of tests. | |
| 278 slice_start = (chunk_num * chunk_len) % num_tests | |
| 279 else: | |
| 280 # Validate the data. | |
| 281 assert(test_size <= num_tests) | |
| 282 assert(chunk_num <= test_size) | |
| 283 | |
| 284 # To count the chunk_len, and make sure we don't skip | |
| 285 # some tests, we round to the next value that fits exactly | |
| 286 # all the parts. | |
| 287 rounded_tests = num_tests | |
| 288 if rounded_tests % test_size != 0: | |
| 289 rounded_tests = (num_tests + test_size - | |
| 290 (num_tests % test_size)) | |
| 291 | |
| 292 chunk_len = rounded_tests / test_size | |
| 293 slice_start = chunk_len * (chunk_num - 1) | |
| 294 # It does not mind if we go over test_size. | |
| 295 | |
| 296 # Get the end offset of the slice. | |
| 297 slice_end = min(num_tests, slice_start + chunk_len) | |
| 298 | |
| 299 files = test_files[slice_start:slice_end] | |
| 300 | |
| 301 tests_run_msg = 'Running: %d tests (chunk slice [%d:%d] of %d)' % ( | |
| 302 (slice_end - slice_start), slice_start, slice_end, num_tests) | |
| 303 write(tests_run_msg) | |
| 304 | |
| 305 # If we reached the end and we don't have enough tests, we run some | |
| 306 # from the beginning. | |
| 307 if (self._options.run_chunk and | |
| 308 (slice_end - slice_start < chunk_len)): | |
| 309 extra = 1 + chunk_len - (slice_end - slice_start) | |
| 310 extra_msg = (' last chunk is partial, appending [0:%d]' % | |
| 311 extra) | |
| 312 write(extra_msg) | |
| 313 tests_run_msg += "\n" + extra_msg | |
| 314 files.extend(test_files[0:extra]) | |
| 315 tests_run_filename = os.path.join(self._options.results_directory, | |
| 316 "tests_run.txt") | |
| 317 tests_run_file = open(tests_run_filename, "w") | |
| 318 tests_run_file.write(tests_run_msg + "\n") | |
| 319 tests_run_file.close() | |
| 320 | |
| 321 len_skip_chunk = int(len(files) * len(skipped) / | |
| 322 float(len(self._test_files))) | |
| 323 skip_chunk_list = list(skipped)[0:len_skip_chunk] | |
| 324 skip_chunk = set(skip_chunk_list) | |
| 325 | |
| 326 # Update expectations so that the stats are calculated correctly. | |
| 327 # We need to pass a list that includes the right # of skipped files | |
| 328 # to ParseExpectations so that ResultSummary() will get the correct | |
| 329 # stats. So, we add in the subset of skipped files, and then | |
| 330 # subtract them back out. | |
| 331 self._test_files_list = files + skip_chunk_list | |
| 332 self._test_files = set(self._test_files_list) | |
| 333 | |
| 334 self._expectations = self.ParseExpectations( | |
| 335 path_utils.PlatformName(), options.target == 'Debug') | |
| 336 | |
| 337 self._test_files = set(files) | |
| 338 self._test_files_list = files | |
| 339 else: | |
| 340 skip_chunk = skipped | |
| 341 | |
| 342 result_summary = ResultSummary(self._expectations, | |
| 343 self._test_files | skip_chunk) | |
| 344 self._PrintExpectedResultsOfType(write, result_summary, | |
| 345 test_expectations.PASS, "passes") | |
| 346 self._PrintExpectedResultsOfType(write, result_summary, | |
| 347 test_expectations.FAIL, "failures") | |
| 348 self._PrintExpectedResultsOfType(write, result_summary, | |
| 349 test_expectations.FLAKY, "flaky") | |
| 350 self._PrintExpectedResultsOfType(write, result_summary, | |
| 351 test_expectations.SKIP, "skipped") | |
| 352 | |
| 353 | |
| 354 if self._options.force: | |
| 355 write('Running all tests, including skips (--force)') | |
| 356 else: | |
| 357 # Note that we don't actually run the skipped tests (they were | |
| 358 # subtracted out of self._test_files, above), but we stub out the | |
| 359 # results here so the statistics can remain accurate. | |
| 360 for test in skip_chunk: | |
| 361 result_summary.Add(test, [], test_expectations.SKIP, | |
| 362 expected=True) | |
| 363 write("") | |
| 364 | |
| 365 return result_summary | |
| 366 | |
| 367 def AddTestType(self, test_type): | |
| 368 """Add a TestType to the TestRunner.""" | |
| 369 self._test_types.append(test_type) | |
| 370 | |
| 371 def _GetDirForTestFile(self, test_file): | |
| 372 """Returns the highest-level directory by which to shard the given | |
| 373 test file.""" | |
| 374 index = test_file.rfind(os.sep + 'LayoutTests' + os.sep) | |
| 375 | |
| 376 test_file = test_file[index + len('LayoutTests/'):] | |
| 377 test_file_parts = test_file.split(os.sep, 1) | |
| 378 directory = test_file_parts[0] | |
| 379 test_file = test_file_parts[1] | |
| 380 | |
| 381 # The http tests are very stable on mac/linux. | |
| 382 # TODO(ojan): Make the http server on Windows be apache so we can | |
| 383 # turn shard the http tests there as well. Switching to apache is | |
| 384 # what made them stable on linux/mac. | |
| 385 return_value = directory | |
| 386 while ((directory != 'http' or sys.platform in ('darwin', 'linux2')) | |
| 387 and test_file.find(os.sep) >= 0): | |
| 388 test_file_parts = test_file.split(os.sep, 1) | |
| 389 directory = test_file_parts[0] | |
| 390 return_value = os.path.join(return_value, directory) | |
| 391 test_file = test_file_parts[1] | |
| 392 | |
| 393 return return_value | |
| 394 | |
| 395 def _GetTestInfoForFile(self, test_file): | |
| 396 """Returns the appropriate TestInfo object for the file. Mostly this | |
| 397 is used for looking up the timeout value (in ms) to use for the given | |
| 398 test.""" | |
| 399 if self._expectations.HasModifier(test_file, test_expectations.SLOW): | |
| 400 return TestInfo(test_file, self._options.slow_time_out_ms) | |
| 401 return TestInfo(test_file, self._options.time_out_ms) | |
| 402 | |
| 403 def _GetTestFileQueue(self, test_files): | |
| 404 """Create the thread safe queue of lists of (test filenames, test URIs) | |
| 405 tuples. Each TestShellThread pulls a list from this queue and runs | |
| 406 those tests in order before grabbing the next available list. | |
| 407 | |
| 408 Shard the lists by directory. This helps ensure that tests that depend | |
| 409 on each other (aka bad tests!) continue to run together as most | |
| 410 cross-tests dependencies tend to occur within the same directory. | |
| 411 | |
| 412 Return: | |
| 413 The Queue of lists of TestInfo objects. | |
| 414 """ | |
| 415 | |
| 416 if (self._options.experimental_fully_parallel or | |
| 417 self._IsSingleThreaded()): | |
| 418 filename_queue = Queue.Queue() | |
| 419 for test_file in test_files: | |
| 420 filename_queue.put('.', [self._GetTestInfoForFile(test_file)]) | |
| 421 return filename_queue | |
| 422 | |
| 423 tests_by_dir = {} | |
| 424 for test_file in test_files: | |
| 425 directory = self._GetDirForTestFile(test_file) | |
| 426 tests_by_dir.setdefault(directory, []) | |
| 427 tests_by_dir[directory].append(self._GetTestInfoForFile(test_file)) | |
| 428 | |
| 429 # Sort by the number of tests in the dir so that the ones with the | |
| 430 # most tests get run first in order to maximize parallelization. | |
| 431 # Number of tests is a good enough, but not perfect, approximation | |
| 432 # of how long that set of tests will take to run. We can't just use | |
| 433 # a PriorityQueue until we move # to Python 2.6. | |
| 434 test_lists = [] | |
| 435 http_tests = None | |
| 436 for directory in tests_by_dir: | |
| 437 test_list = tests_by_dir[directory] | |
| 438 # Keep the tests in alphabetical order. | |
| 439 # TODO: Remove once tests are fixed so they can be run in any | |
| 440 # order. | |
| 441 test_list.reverse() | |
| 442 test_list_tuple = (directory, test_list) | |
| 443 if directory == 'LayoutTests' + os.sep + 'http': | |
| 444 http_tests = test_list_tuple | |
| 445 else: | |
| 446 test_lists.append(test_list_tuple) | |
| 447 test_lists.sort(lambda a, b: cmp(len(b[1]), len(a[1]))) | |
| 448 | |
| 449 # Put the http tests first. There are only a couple hundred of them, | |
| 450 # but each http test takes a very long time to run, so sorting by the | |
| 451 # number of tests doesn't accurately capture how long they take to run. | |
| 452 if http_tests: | |
| 453 test_lists.insert(0, http_tests) | |
| 454 | |
| 455 filename_queue = Queue.Queue() | |
| 456 for item in test_lists: | |
| 457 filename_queue.put(item) | |
| 458 return filename_queue | |
| 459 | |
| 460 def _GetTestShellArgs(self, index): | |
| 461 """Returns the tuple of arguments for tests and for test_shell.""" | |
| 462 shell_args = [] | |
| 463 test_args = test_type_base.TestArguments() | |
| 464 if not self._options.no_pixel_tests: | |
| 465 png_path = os.path.join(self._options.results_directory, | |
| 466 "png_result%s.png" % index) | |
| 467 shell_args.append("--pixel-tests=" + png_path) | |
| 468 test_args.png_path = png_path | |
| 469 | |
| 470 test_args.new_baseline = self._options.new_baseline | |
| 471 | |
| 472 test_args.show_sources = self._options.sources | |
| 473 | |
| 474 if self._options.startup_dialog: | |
| 475 shell_args.append('--testshell-startup-dialog') | |
| 476 | |
| 477 if self._options.gp_fault_error_box: | |
| 478 shell_args.append('--gp-fault-error-box') | |
| 479 | |
| 480 return (test_args, shell_args) | |
| 481 | |
| 482 def _ContainsTests(self, subdir): | |
| 483 for test_file in self._test_files_list: | |
| 484 if test_file.find(subdir) >= 0: | |
| 485 return True | |
| 486 return False | |
| 487 | |
| 488 def _InstantiateTestShellThreads(self, test_shell_binary, test_files, | |
| 489 result_summary): | |
| 490 """Instantitates and starts the TestShellThread(s). | |
| 491 | |
| 492 Return: | |
| 493 The list of threads. | |
| 494 """ | |
| 495 test_shell_command = [test_shell_binary] | |
| 496 | |
| 497 if self._options.wrapper: | |
| 498 # This split() isn't really what we want -- it incorrectly will | |
| 499 # split quoted strings within the wrapper argument -- but in | |
| 500 # practice it shouldn't come up and the --help output warns | |
| 501 # about it anyway. | |
| 502 test_shell_command = (self._options.wrapper.split() + | |
| 503 test_shell_command) | |
| 504 | |
| 505 filename_queue = self._GetTestFileQueue(test_files) | |
| 506 | |
| 507 # Instantiate TestShellThreads and start them. | |
| 508 threads = [] | |
| 509 for i in xrange(int(self._options.num_test_shells)): | |
| 510 # Create separate TestTypes instances for each thread. | |
| 511 test_types = [] | |
| 512 for t in self._test_types: | |
| 513 test_types.append(t(self._options.platform, | |
| 514 self._options.results_directory)) | |
| 515 | |
| 516 test_args, shell_args = self._GetTestShellArgs(i) | |
| 517 thread = test_shell_thread.TestShellThread(filename_queue, | |
| 518 self._result_queue, | |
| 519 test_shell_command, | |
| 520 test_types, | |
| 521 test_args, | |
| 522 shell_args, | |
| 523 self._options) | |
| 524 if self._IsSingleThreaded(): | |
| 525 thread.RunInMainThread(self, result_summary) | |
| 526 else: | |
| 527 thread.start() | |
| 528 threads.append(thread) | |
| 529 | |
| 530 return threads | |
| 531 | |
| 532 def _StopLayoutTestHelper(self, proc): | |
| 533 """Stop the layout test helper and closes it down.""" | |
| 534 if proc: | |
| 535 logging.debug("Stopping layout test helper") | |
| 536 proc.stdin.write("x\n") | |
| 537 proc.stdin.close() | |
| 538 proc.wait() | |
| 539 | |
| 540 def _IsSingleThreaded(self): | |
| 541 """Returns whether we should run all the tests in the main thread.""" | |
| 542 return int(self._options.num_test_shells) == 1 | |
| 543 | |
| 544 def _RunTests(self, test_shell_binary, file_list, result_summary): | |
| 545 """Runs the tests in the file_list. | |
| 546 | |
| 547 Return: A tuple (failures, thread_timings, test_timings, | |
| 548 individual_test_timings) | |
| 549 failures is a map from test to list of failure types | |
| 550 thread_timings is a list of dicts with the total runtime | |
| 551 of each thread with 'name', 'num_tests', 'total_time' properties | |
| 552 test_timings is a list of timings for each sharded subdirectory | |
| 553 of the form [time, directory_name, num_tests] | |
| 554 individual_test_timings is a list of run times for each test | |
| 555 in the form {filename:filename, test_run_time:test_run_time} | |
| 556 result_summary: summary object to populate with the results | |
| 557 """ | |
| 558 threads = self._InstantiateTestShellThreads(test_shell_binary, | |
| 559 file_list, | |
| 560 result_summary) | |
| 561 | |
| 562 # Wait for the threads to finish and collect test failures. | |
| 563 failures = {} | |
| 564 test_timings = {} | |
| 565 individual_test_timings = [] | |
| 566 thread_timings = [] | |
| 567 try: | |
| 568 for thread in threads: | |
| 569 while thread.isAlive(): | |
| 570 # Let it timeout occasionally so it can notice a | |
| 571 # KeyboardInterrupt. Actually, the timeout doesn't | |
| 572 # really matter: apparently it suffices to not use | |
| 573 # an indefinite blocking join for it to | |
| 574 # be interruptible by KeyboardInterrupt. | |
| 575 thread.join(0.1) | |
| 576 self.UpdateSummary(result_summary) | |
| 577 thread_timings.append({'name': thread.getName(), | |
| 578 'num_tests': thread.GetNumTests(), | |
| 579 'total_time': thread.GetTotalTime()}) | |
| 580 test_timings.update(thread.GetDirectoryTimingStats()) | |
| 581 individual_test_timings.extend(thread.GetIndividualTestStats()) | |
| 582 except KeyboardInterrupt: | |
| 583 for thread in threads: | |
| 584 thread.Cancel() | |
| 585 self._StopLayoutTestHelper(layout_test_helper_proc) | |
| 586 raise | |
| 587 for thread in threads: | |
| 588 # Check whether a TestShellThread died before normal completion. | |
| 589 exception_info = thread.GetExceptionInfo() | |
| 590 if exception_info is not None: | |
| 591 # Re-raise the thread's exception here to make it clear that | |
| 592 # testing was aborted. Otherwise, the tests that did not run | |
| 593 # would be assumed to have passed. | |
| 594 raise exception_info[0], exception_info[1], exception_info[2] | |
| 595 | |
| 596 # Make sure we pick up any remaining tests. | |
| 597 self.UpdateSummary(result_summary) | |
| 598 return (thread_timings, test_timings, individual_test_timings) | |
| 599 | |
| 600 def Run(self, result_summary): | |
| 601 """Run all our tests on all our test files. | |
| 602 | |
| 603 For each test file, we run each test type. If there are any failures, | |
| 604 we collect them for reporting. | |
| 605 | |
| 606 Args: | |
| 607 result_summary: a summary object tracking the test results. | |
| 608 | |
| 609 Return: | |
| 610 We return nonzero if there are regressions compared to the last run. | |
| 611 """ | |
| 612 if not self._test_files: | |
| 613 return 0 | |
| 614 start_time = time.time() | |
| 615 test_shell_binary = path_utils.TestShellPath(self._options.target) | |
| 616 | |
| 617 # Start up any helper needed | |
| 618 layout_test_helper_proc = None | |
| 619 if not options.no_pixel_tests: | |
| 620 helper_path = path_utils.LayoutTestHelperPath(self._options.target) | |
| 621 if len(helper_path): | |
| 622 logging.debug("Starting layout helper %s" % helper_path) | |
| 623 layout_test_helper_proc = subprocess.Popen( | |
| 624 [helper_path], stdin=subprocess.PIPE, | |
| 625 stdout=subprocess.PIPE, stderr=None) | |
| 626 is_ready = layout_test_helper_proc.stdout.readline() | |
| 627 if not is_ready.startswith('ready'): | |
| 628 logging.error("layout_test_helper failed to be ready") | |
| 629 | |
| 630 # Check that the system dependencies (themes, fonts, ...) are correct. | |
| 631 if not self._options.nocheck_sys_deps: | |
| 632 proc = subprocess.Popen([test_shell_binary, | |
| 633 "--check-layout-test-sys-deps"]) | |
| 634 if proc.wait() != 0: | |
| 635 logging.info("Aborting because system dependencies check " | |
| 636 "failed.\n To override, invoke with " | |
| 637 "--nocheck-sys-deps") | |
| 638 sys.exit(1) | |
| 639 | |
| 640 if self._ContainsTests(self.HTTP_SUBDIR): | |
| 641 self._http_server.Start() | |
| 642 | |
| 643 if self._ContainsTests(self.WEBSOCKET_SUBDIR): | |
| 644 self._websocket_server.Start() | |
| 645 # self._websocket_secure_server.Start() | |
| 646 | |
| 647 thread_timings, test_timings, individual_test_timings = ( | |
| 648 self._RunTests(test_shell_binary, self._test_files_list, | |
| 649 result_summary)) | |
| 650 | |
| 651 # We exclude the crashes from the list of results to retry, because | |
| 652 # we want to treat even a potentially flaky crash as an error. | |
| 653 failures = self._GetFailures(result_summary, include_crashes=False) | |
| 654 retries = 0 | |
| 655 retry_summary = result_summary | |
| 656 while (retries < self.NUM_RETRY_ON_UNEXPECTED_FAILURE and | |
| 657 len(failures)): | |
| 658 logging.debug("Retrying %d unexpected failure(s)" % len(failures)) | |
| 659 retries += 1 | |
| 660 retry_summary = ResultSummary(self._expectations, failures.keys()) | |
| 661 self._RunTests(test_shell_binary, failures.keys(), retry_summary) | |
| 662 failures = self._GetFailures(retry_summary, include_crashes=True) | |
| 663 | |
| 664 self._StopLayoutTestHelper(layout_test_helper_proc) | |
| 665 end_time = time.time() | |
| 666 | |
| 667 write = CreateLoggingWriter(self._options, 'timing') | |
| 668 self._PrintTimingStatistics(write, end_time - start_time, | |
| 669 thread_timings, test_timings, | |
| 670 individual_test_timings, | |
| 671 result_summary) | |
| 672 | |
| 673 self._meter.update("") | |
| 674 | |
| 675 if self._options.verbose: | |
| 676 # We write this block to stdout for compatibility with the | |
| 677 # buildbot log parser, which only looks at stdout, not stderr :( | |
| 678 write = lambda s: sys.stdout.write("%s\n" % s) | |
| 679 else: | |
| 680 write = CreateLoggingWriter(self._options, 'actual') | |
| 681 | |
| 682 self._PrintResultSummary(write, result_summary) | |
| 683 | |
| 684 sys.stdout.flush() | |
| 685 sys.stderr.flush() | |
| 686 | |
| 687 if (LOG_DETAILED_PROGRESS in self._options.log or | |
| 688 (LOG_UNEXPECTED in self._options.log and | |
| 689 result_summary.total != result_summary.expected)): | |
| 690 print | |
| 691 | |
| 692 # This summary data gets written to stdout regardless of log level | |
| 693 self._PrintOneLineSummary(result_summary.total, | |
| 694 result_summary.expected) | |
| 695 | |
| 696 unexpected_results = self._SummarizeUnexpectedResults(result_summary, | |
| 697 retry_summary) | |
| 698 self._PrintUnexpectedResults(unexpected_results) | |
| 699 | |
| 700 # Write the same data to log files. | |
| 701 self._WriteJSONFiles(unexpected_results, result_summary, | |
| 702 individual_test_timings) | |
| 703 | |
| 704 # Write the summary to disk (results.html) and maybe open the | |
| 705 # test_shell to this file. | |
| 706 wrote_results = self._WriteResultsHtmlFile(result_summary) | |
| 707 if not self._options.noshow_results and wrote_results: | |
| 708 self._ShowResultsHtmlFile() | |
| 709 | |
| 710 # Ignore flaky failures and unexpected passes so we don't turn the | |
| 711 # bot red for those. | |
| 712 return unexpected_results['num_regressions'] | |
| 713 | |
| 714 def UpdateSummary(self, result_summary): | |
| 715 """Update the summary while running tests.""" | |
| 716 while True: | |
| 717 try: | |
| 718 (test, fail_list) = self._result_queue.get_nowait() | |
| 719 result = test_failures.DetermineResultType(fail_list) | |
| 720 expected = self._expectations.MatchesAnExpectedResult(test, | |
| 721 result) | |
| 722 result_summary.Add(test, fail_list, result, expected) | |
| 723 if (LOG_DETAILED_PROGRESS in self._options.log and | |
| 724 (self._options.experimental_fully_parallel or | |
| 725 self._IsSingleThreaded())): | |
| 726 self._DisplayDetailedProgress(result_summary) | |
| 727 else: | |
| 728 if not expected and LOG_UNEXPECTED in self._options.log: | |
| 729 self._PrintUnexpectedTestResult(test, result) | |
| 730 self._DisplayOneLineProgress(result_summary) | |
| 731 except Queue.Empty: | |
| 732 return | |
| 733 | |
| 734 def _DisplayOneLineProgress(self, result_summary): | |
| 735 """Displays the progress through the test run.""" | |
| 736 self._meter.update("Testing: %d ran as expected, %d didn't, %d left" % | |
| 737 (result_summary.expected, result_summary.unexpected, | |
| 738 result_summary.remaining)) | |
| 739 | |
| 740 def _DisplayDetailedProgress(self, result_summary): | |
| 741 """Display detailed progress output where we print the directory name | |
| 742 and one dot for each completed test. This is triggered by | |
| 743 "--log detailed-progress".""" | |
| 744 if self._current_test_number == len(self._test_files_list): | |
| 745 return | |
| 746 | |
| 747 next_test = self._test_files_list[self._current_test_number] | |
| 748 next_dir = os.path.dirname(path_utils.RelativeTestFilename(next_test)) | |
| 749 if self._current_progress_str == "": | |
| 750 self._current_progress_str = "%s: " % (next_dir) | |
| 751 self._current_dir = next_dir | |
| 752 | |
| 753 while next_test in result_summary.results: | |
| 754 if next_dir != self._current_dir: | |
| 755 self._meter.write("%s\n" % (self._current_progress_str)) | |
| 756 self._current_progress_str = "%s: ." % (next_dir) | |
| 757 self._current_dir = next_dir | |
| 758 else: | |
| 759 self._current_progress_str += "." | |
| 760 | |
| 761 if (next_test in result_summary.unexpected_results and | |
| 762 LOG_UNEXPECTED in self._options.log): | |
| 763 result = result_summary.unexpected_results[next_test] | |
| 764 self._meter.write("%s\n" % self._current_progress_str) | |
| 765 self._PrintUnexpectedTestResult(next_test, result) | |
| 766 self._current_progress_str = "%s: " % self._current_dir | |
| 767 | |
| 768 self._current_test_number += 1 | |
| 769 if self._current_test_number == len(self._test_files_list): | |
| 770 break | |
| 771 | |
| 772 next_test = self._test_files_list[self._current_test_number] | |
| 773 next_dir = os.path.dirname( | |
| 774 path_utils.RelativeTestFilename(next_test)) | |
| 775 | |
| 776 if result_summary.remaining: | |
| 777 remain_str = " (%d)" % (result_summary.remaining) | |
| 778 self._meter.update("%s%s" % | |
| 779 (self._current_progress_str, remain_str)) | |
| 780 else: | |
| 781 self._meter.write("%s\n" % (self._current_progress_str)) | |
| 782 | |
| 783 def _GetFailures(self, result_summary, include_crashes): | |
| 784 """Filters a dict of results and returns only the failures. | |
| 785 | |
| 786 Args: | |
| 787 result_summary: the results of the test run | |
| 788 include_crashes: whether crashes are included in the output. | |
| 789 We use False when finding the list of failures to retry | |
| 790 to see if the results were flaky. Although the crashes may also be | |
| 791 flaky, we treat them as if they aren't so that they're not ignored. | |
| 792 Returns: | |
| 793 a dict of files -> results | |
| 794 """ | |
| 795 failed_results = {} | |
| 796 for test, result in result_summary.unexpected_results.iteritems(): | |
| 797 if (result == test_expectations.PASS or | |
| 798 result == test_expectations.CRASH and not include_crashes): | |
| 799 continue | |
| 800 failed_results[test] = result | |
| 801 | |
| 802 return failed_results | |
| 803 | |
| 804 def _SummarizeUnexpectedResults(self, result_summary, retry_summary): | |
| 805 """Summarize any unexpected results as a dict. | |
| 806 | |
| 807 TODO(dpranke): split this data structure into a separate class? | |
| 808 | |
| 809 Args: | |
| 810 result_summary: summary object from initial test runs | |
| 811 retry_summary: summary object from final test run of retried tests | |
| 812 Returns: | |
| 813 A dictionary containing a summary of the unexpected results from the | |
| 814 run, with the following fields: | |
| 815 'version': a version indicator (1 in this version) | |
| 816 'fixable': # of fixable tests (NOW - PASS) | |
| 817 'skipped': # of skipped tests (NOW & SKIPPED) | |
| 818 'num_regressions': # of non-flaky failures | |
| 819 'num_flaky': # of flaky failures | |
| 820 'num_passes': # of unexpected passes | |
| 821 'tests': a dict of tests -> {'expected': '...', 'actual': '...'} | |
| 822 """ | |
| 823 results = {} | |
| 824 results['version'] = 1 | |
| 825 | |
| 826 tbe = result_summary.tests_by_expectation | |
| 827 tbt = result_summary.tests_by_timeline | |
| 828 results['fixable'] = len(tbt[test_expectations.NOW] - | |
| 829 tbe[test_expectations.PASS]) | |
| 830 results['skipped'] = len(tbt[test_expectations.NOW] & | |
| 831 tbe[test_expectations.SKIP]) | |
| 832 | |
| 833 num_passes = 0 | |
| 834 num_flaky = 0 | |
| 835 num_regressions = 0 | |
| 836 keywords = {} | |
| 837 for k, v in TestExpectationsFile.EXPECTATIONS.iteritems(): | |
| 838 keywords[v] = k.upper() | |
| 839 | |
| 840 tests = {} | |
| 841 for filename, result in result_summary.unexpected_results.iteritems(): | |
| 842 # Note that if a test crashed in the original run, we ignore | |
| 843 # whether or not it crashed when we retried it (if we retried it), | |
| 844 # and always consider the result not flaky. | |
| 845 test = path_utils.RelativeTestFilename(filename) | |
| 846 expected = self._expectations.GetExpectationsString(filename) | |
| 847 actual = [keywords[result]] | |
| 848 | |
| 849 if result == test_expectations.PASS: | |
| 850 num_passes += 1 | |
| 851 elif result == test_expectations.CRASH: | |
| 852 num_regressions += 1 | |
| 853 else: | |
| 854 if filename not in retry_summary.unexpected_results: | |
| 855 actual.extend( | |
| 856 self._expectations.GetExpectationsString( | |
| 857 filename).split(" ")) | |
| 858 num_flaky += 1 | |
| 859 else: | |
| 860 retry_result = retry_summary.unexpected_results[filename] | |
| 861 if result != retry_result: | |
| 862 actual.append(keywords[retry_result]) | |
| 863 num_flaky += 1 | |
| 864 else: | |
| 865 num_regressions += 1 | |
| 866 | |
| 867 tests[test] = {} | |
| 868 tests[test]['expected'] = expected | |
| 869 tests[test]['actual'] = " ".join(actual) | |
| 870 | |
| 871 results['tests'] = tests | |
| 872 results['num_passes'] = num_passes | |
| 873 results['num_flaky'] = num_flaky | |
| 874 results['num_regressions'] = num_regressions | |
| 875 | |
| 876 return results | |
| 877 | |
| 878 def _WriteJSONFiles(self, unexpected_results, result_summary, | |
| 879 individual_test_timings): | |
| 880 """Writes the results of the test run as JSON files into the results | |
| 881 dir. | |
| 882 | |
| 883 There are three different files written into the results dir: | |
| 884 unexpected_results.json: A short list of any unexpected results. | |
| 885 This is used by the buildbots to display results. | |
| 886 expectations.json: This is used by the flakiness dashboard. | |
| 887 results.json: A full list of the results - used by the flakiness | |
| 888 dashboard and the aggregate results dashboard. | |
| 889 | |
| 890 Args: | |
| 891 unexpected_results: dict of unexpected results | |
| 892 result_summary: full summary object | |
| 893 individual_test_timings: list of test times (used by the flakiness | |
| 894 dashboard). | |
| 895 """ | |
| 896 logging.debug("Writing JSON files in %s." % | |
| 897 self._options.results_directory) | |
| 898 unexpected_file = open(os.path.join(self._options.results_directory, | |
| 899 "unexpected_results.json"), "w") | |
| 900 unexpected_file.write(simplejson.dumps(unexpected_results, | |
| 901 sort_keys=True, indent=2)) | |
| 902 unexpected_file.close() | |
| 903 | |
| 904 # Write a json file of the test_expectations.txt file for the layout | |
| 905 # tests dashboard. | |
| 906 expectations_file = open(os.path.join(self._options.results_directory, | |
| 907 "expectations.json"), "w") | |
| 908 expectations_json = \ | |
| 909 self._expectations.GetExpectationsJsonForAllPlatforms() | |
| 910 expectations_file.write("ADD_EXPECTATIONS(" + expectations_json + ");") | |
| 911 expectations_file.close() | |
| 912 | |
| 913 json_layout_results_generator.JSONLayoutResultsGenerator( | |
| 914 self._options.builder_name, self._options.build_name, | |
| 915 self._options.build_number, self._options.results_directory, | |
| 916 BUILDER_BASE_URL, individual_test_timings, | |
| 917 self._expectations, result_summary, self._test_files_list) | |
| 918 | |
| 919 logging.debug("Finished writing JSON files.") | |
| 920 | |
| 921 def _PrintExpectedResultsOfType(self, write, result_summary, result_type, | |
| 922 result_type_str): | |
| 923 """Print the number of the tests in a given result class. | |
| 924 | |
| 925 Args: | |
| 926 write: A callback to write info to (e.g., a LoggingWriter) or | |
| 927 sys.stdout.write. | |
| 928 result_summary - the object containing all the results to report on | |
| 929 result_type - the particular result type to report in the summary. | |
| 930 result_type_str - a string description of the result_type. | |
| 931 """ | |
| 932 tests = self._expectations.GetTestsWithResultType(result_type) | |
| 933 now = result_summary.tests_by_timeline[test_expectations.NOW] | |
| 934 wontfix = result_summary.tests_by_timeline[test_expectations.WONTFIX] | |
| 935 defer = result_summary.tests_by_timeline[test_expectations.DEFER] | |
| 936 | |
| 937 # We use a fancy format string in order to print the data out in a | |
| 938 # nicely-aligned table. | |
| 939 fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd defer, %%%dd wontfix)" | |
| 940 % (self._NumDigits(now), self._NumDigits(defer), | |
| 941 self._NumDigits(wontfix))) | |
| 942 write(fmtstr % (len(tests), result_type_str, len(tests & now), | |
| 943 len(tests & defer), len(tests & wontfix))) | |
| 944 | |
| 945 def _NumDigits(self, num): | |
| 946 """Returns the number of digits needed to represent the length of a | |
| 947 sequence.""" | |
| 948 ndigits = 1 | |
| 949 if len(num): | |
| 950 ndigits = int(math.log10(len(num))) + 1 | |
| 951 return ndigits | |
| 952 | |
| 953 def _PrintTimingStatistics(self, write, total_time, thread_timings, | |
| 954 directory_test_timings, individual_test_timings, | |
| 955 result_summary): | |
| 956 """Record timing-specific information for the test run. | |
| 957 | |
| 958 Args: | |
| 959 write: A callback to write info to (e.g., a LoggingWriter) or | |
| 960 sys.stdout.write. | |
| 961 total_time: total elapsed time (in seconds) for the test run | |
| 962 thread_timings: wall clock time each thread ran for | |
| 963 directory_test_timings: timing by directory | |
| 964 individual_test_timings: timing by file | |
| 965 result_summary: summary object for the test run | |
| 966 """ | |
| 967 write("Test timing:") | |
| 968 write(" %6.2f total testing time" % total_time) | |
| 969 write("") | |
| 970 write("Thread timing:") | |
| 971 cuml_time = 0 | |
| 972 for t in thread_timings: | |
| 973 write(" %10s: %5d tests, %6.2f secs" % | |
| 974 (t['name'], t['num_tests'], t['total_time'])) | |
| 975 cuml_time += t['total_time'] | |
| 976 write(" %6.2f cumulative, %6.2f optimal" % | |
| 977 (cuml_time, cuml_time / int(self._options.num_test_shells))) | |
| 978 write("") | |
| 979 | |
| 980 self._PrintAggregateTestStatistics(write, individual_test_timings) | |
| 981 self._PrintIndividualTestTimes(write, individual_test_timings, | |
| 982 result_summary) | |
| 983 self._PrintDirectoryTimings(write, directory_test_timings) | |
| 984 | |
| 985 def _PrintAggregateTestStatistics(self, write, individual_test_timings): | |
| 986 """Prints aggregate statistics (e.g. median, mean, etc.) for all tests. | |
| 987 Args: | |
| 988 write: A callback to write info to (e.g., a LoggingWriter) or | |
| 989 sys.stdout.write. | |
| 990 individual_test_timings: List of test_shell_thread.TestStats for all | |
| 991 tests. | |
| 992 """ | |
| 993 test_types = individual_test_timings[0].time_for_diffs.keys() | |
| 994 times_for_test_shell = [] | |
| 995 times_for_diff_processing = [] | |
| 996 times_per_test_type = {} | |
| 997 for test_type in test_types: | |
| 998 times_per_test_type[test_type] = [] | |
| 999 | |
| 1000 for test_stats in individual_test_timings: | |
| 1001 times_for_test_shell.append(test_stats.test_run_time) | |
| 1002 times_for_diff_processing.append( | |
| 1003 test_stats.total_time_for_all_diffs) | |
| 1004 time_for_diffs = test_stats.time_for_diffs | |
| 1005 for test_type in test_types: | |
| 1006 times_per_test_type[test_type].append( | |
| 1007 time_for_diffs[test_type]) | |
| 1008 | |
| 1009 self._PrintStatisticsForTestTimings(write, | |
| 1010 "PER TEST TIME IN TESTSHELL (seconds):", times_for_test_shell) | |
| 1011 self._PrintStatisticsForTestTimings(write, | |
| 1012 "PER TEST DIFF PROCESSING TIMES (seconds):", | |
| 1013 times_for_diff_processing) | |
| 1014 for test_type in test_types: | |
| 1015 self._PrintStatisticsForTestTimings(write, | |
| 1016 "PER TEST TIMES BY TEST TYPE: %s" % test_type, | |
| 1017 times_per_test_type[test_type]) | |
| 1018 | |
| 1019 def _PrintIndividualTestTimes(self, write, individual_test_timings, | |
| 1020 result_summary): | |
| 1021 """Prints the run times for slow, timeout and crash tests. | |
| 1022 Args: | |
| 1023 write: A callback to write info to (e.g., a LoggingWriter) or | |
| 1024 sys.stdout.write. | |
| 1025 individual_test_timings: List of test_shell_thread.TestStats for all | |
| 1026 tests. | |
| 1027 result_summary: summary object for test run | |
| 1028 """ | |
| 1029 # Reverse-sort by the time spent in test_shell. | |
| 1030 individual_test_timings.sort(lambda a, b: | |
| 1031 cmp(b.test_run_time, a.test_run_time)) | |
| 1032 | |
| 1033 num_printed = 0 | |
| 1034 slow_tests = [] | |
| 1035 timeout_or_crash_tests = [] | |
| 1036 unexpected_slow_tests = [] | |
| 1037 for test_tuple in individual_test_timings: | |
| 1038 filename = test_tuple.filename | |
| 1039 is_timeout_crash_or_slow = False | |
| 1040 if self._expectations.HasModifier(filename, | |
| 1041 test_expectations.SLOW): | |
| 1042 is_timeout_crash_or_slow = True | |
| 1043 slow_tests.append(test_tuple) | |
| 1044 | |
| 1045 if filename in result_summary.failures: | |
| 1046 result = result_summary.results[filename] | |
| 1047 if (result == test_expectations.TIMEOUT or | |
| 1048 result == test_expectations.CRASH): | |
| 1049 is_timeout_crash_or_slow = True | |
| 1050 timeout_or_crash_tests.append(test_tuple) | |
| 1051 | |
| 1052 if (not is_timeout_crash_or_slow and | |
| 1053 num_printed < self._options.num_slow_tests_to_log): | |
| 1054 num_printed = num_printed + 1 | |
| 1055 unexpected_slow_tests.append(test_tuple) | |
| 1056 | |
| 1057 write("") | |
| 1058 self._PrintTestListTiming(write, "%s slowest tests that are not " | |
| 1059 "marked as SLOW and did not timeout/crash:" % | |
| 1060 self._options.num_slow_tests_to_log, unexpected_slow_tests) | |
| 1061 write("") | |
| 1062 self._PrintTestListTiming(write, "Tests marked as SLOW:", slow_tests) | |
| 1063 write("") | |
| 1064 self._PrintTestListTiming(write, "Tests that timed out or crashed:", | |
| 1065 timeout_or_crash_tests) | |
| 1066 write("") | |
| 1067 | |
| 1068 def _PrintTestListTiming(self, write, title, test_list): | |
| 1069 """Print timing info for each test. | |
| 1070 | |
| 1071 Args: | |
| 1072 write: A callback to write info to (e.g., a LoggingWriter) or | |
| 1073 sys.stdout.write. | |
| 1074 title: section heading | |
| 1075 test_list: tests that fall in this section | |
| 1076 """ | |
| 1077 write(title) | |
| 1078 for test_tuple in test_list: | |
| 1079 filename = test_tuple.filename[len( | |
| 1080 path_utils.LayoutTestsDir()) + 1:] | |
| 1081 filename = filename.replace('\\', '/') | |
| 1082 test_run_time = round(test_tuple.test_run_time, 1) | |
| 1083 write(" %s took %s seconds" % (filename, test_run_time)) | |
| 1084 | |
| 1085 def _PrintDirectoryTimings(self, write, directory_test_timings): | |
| 1086 """Print timing info by directory for any directories that | |
| 1087 take > 10 seconds to run. | |
| 1088 | |
| 1089 Args: | |
| 1090 write: A callback to write info to (e.g., a LoggingWriter) or | |
| 1091 sys.stdout.write. | |
| 1092 directory_test_timing: time info for each directory | |
| 1093 """ | |
| 1094 timings = [] | |
| 1095 for directory in directory_test_timings: | |
| 1096 num_tests, time_for_directory = directory_test_timings[directory] | |
| 1097 timings.append((round(time_for_directory, 1), directory, | |
| 1098 num_tests)) | |
| 1099 timings.sort() | |
| 1100 | |
| 1101 write("Time to process slowest subdirectories:") | |
| 1102 min_seconds_to_print = 10 | |
| 1103 for timing in timings: | |
| 1104 if timing[0] > min_seconds_to_print: | |
| 1105 write(" %s took %s seconds to run %s tests." % (timing[1], | |
| 1106 timing[0], timing[2])) | |
| 1107 write("") | |
| 1108 | |
| 1109 def _PrintStatisticsForTestTimings(self, write, title, timings): | |
| 1110 """Prints the median, mean and standard deviation of the values in | |
| 1111 timings. | |
| 1112 | |
| 1113 Args: | |
| 1114 write: A callback to write info to (e.g., a LoggingWriter) or | |
| 1115 sys.stdout.write. | |
| 1116 title: Title for these timings. | |
| 1117 timings: A list of floats representing times. | |
| 1118 """ | |
| 1119 write(title) | |
| 1120 timings.sort() | |
| 1121 | |
| 1122 num_tests = len(timings) | |
| 1123 percentile90 = timings[int(.9 * num_tests)] | |
| 1124 percentile99 = timings[int(.99 * num_tests)] | |
| 1125 | |
| 1126 if num_tests % 2 == 1: | |
| 1127 median = timings[((num_tests - 1) / 2) - 1] | |
| 1128 else: | |
| 1129 lower = timings[num_tests / 2 - 1] | |
| 1130 upper = timings[num_tests / 2] | |
| 1131 median = (float(lower + upper)) / 2 | |
| 1132 | |
| 1133 mean = sum(timings) / num_tests | |
| 1134 | |
| 1135 for time in timings: | |
| 1136 sum_of_deviations = math.pow(time - mean, 2) | |
| 1137 | |
| 1138 std_deviation = math.sqrt(sum_of_deviations / num_tests) | |
| 1139 write(" Median: %6.3f" % median) | |
| 1140 write(" Mean: %6.3f" % mean) | |
| 1141 write(" 90th percentile: %6.3f" % percentile90) | |
| 1142 write(" 99th percentile: %6.3f" % percentile99) | |
| 1143 write(" Standard dev: %6.3f" % std_deviation) | |
| 1144 write("") | |
| 1145 | |
| 1146 def _PrintResultSummary(self, write, result_summary): | |
| 1147 """Print a short summary about how many tests passed. | |
| 1148 | |
| 1149 Args: | |
| 1150 write: A callback to write info to (e.g., a LoggingWriter) or | |
| 1151 sys.stdout.write. | |
| 1152 result_summary: information to log | |
| 1153 """ | |
| 1154 failed = len(result_summary.failures) | |
| 1155 skipped = len( | |
| 1156 result_summary.tests_by_expectation[test_expectations.SKIP]) | |
| 1157 total = result_summary.total | |
| 1158 passed = total - failed - skipped | |
| 1159 pct_passed = 0.0 | |
| 1160 if total > 0: | |
| 1161 pct_passed = float(passed) * 100 / total | |
| 1162 | |
| 1163 write("") | |
| 1164 write("=> Results: %d/%d tests passed (%.1f%%)" % | |
| 1165 (passed, total, pct_passed)) | |
| 1166 write("") | |
| 1167 self._PrintResultSummaryEntry(write, result_summary, | |
| 1168 test_expectations.NOW, "Tests to be fixed for the current release") | |
| 1169 | |
| 1170 write("") | |
| 1171 self._PrintResultSummaryEntry(write, result_summary, | |
| 1172 test_expectations.DEFER, | |
| 1173 "Tests we'll fix in the future if they fail (DEFER)") | |
| 1174 | |
| 1175 write("") | |
| 1176 self._PrintResultSummaryEntry(write, result_summary, | |
| 1177 test_expectations.WONTFIX, | |
| 1178 "Tests that will only be fixed if they crash (WONTFIX)") | |
| 1179 | |
| 1180 def _PrintResultSummaryEntry(self, write, result_summary, timeline, | |
| 1181 heading): | |
| 1182 """Print a summary block of results for a particular timeline of test. | |
| 1183 | |
| 1184 Args: | |
| 1185 write: A callback to write info to (e.g., a LoggingWriter) or | |
| 1186 sys.stdout.write. | |
| 1187 result_summary: summary to print results for | |
| 1188 timeline: the timeline to print results for (NOT, WONTFIX, etc.) | |
| 1189 heading: a textual description of the timeline | |
| 1190 """ | |
| 1191 total = len(result_summary.tests_by_timeline[timeline]) | |
| 1192 not_passing = (total - | |
| 1193 len(result_summary.tests_by_expectation[test_expectations.PASS] & | |
| 1194 result_summary.tests_by_timeline[timeline])) | |
| 1195 write("=> %s (%d):" % (heading, not_passing)) | |
| 1196 | |
| 1197 for result in TestExpectationsFile.EXPECTATION_ORDER: | |
| 1198 if result == test_expectations.PASS: | |
| 1199 continue | |
| 1200 results = (result_summary.tests_by_expectation[result] & | |
| 1201 result_summary.tests_by_timeline[timeline]) | |
| 1202 desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result] | |
| 1203 if not_passing and len(results): | |
| 1204 pct = len(results) * 100.0 / not_passing | |
| 1205 write(" %5d %-24s (%4.1f%%)" % (len(results), | |
| 1206 desc[len(results) != 1], pct)) | |
| 1207 | |
| 1208 def _PrintOneLineSummary(self, total, expected): | |
| 1209 """Print a one-line summary of the test run to stdout. | |
| 1210 | |
| 1211 Args: | |
| 1212 total: total number of tests run | |
| 1213 expected: number of expected results | |
| 1214 """ | |
| 1215 unexpected = total - expected | |
| 1216 if unexpected == 0: | |
| 1217 print "All %d tests ran as expected." % expected | |
| 1218 elif expected == 1: | |
| 1219 print "1 test ran as expected, %d didn't:" % unexpected | |
| 1220 else: | |
| 1221 print "%d tests ran as expected, %d didn't:" % (expected, | |
| 1222 unexpected) | |
| 1223 | |
| 1224 def _PrintUnexpectedResults(self, unexpected_results): | |
| 1225 """Prints any unexpected results in a human-readable form to stdout.""" | |
| 1226 passes = {} | |
| 1227 flaky = {} | |
| 1228 regressions = {} | |
| 1229 | |
| 1230 if len(unexpected_results['tests']): | |
| 1231 print "" | |
| 1232 | |
| 1233 for test, results in unexpected_results['tests'].iteritems(): | |
| 1234 actual = results['actual'].split(" ") | |
| 1235 expected = results['expected'].split(" ") | |
| 1236 if actual == ['PASS']: | |
| 1237 if 'CRASH' in expected: | |
| 1238 _AddToDictOfLists(passes, 'Expected to crash, but passed', | |
| 1239 test) | |
| 1240 elif 'TIMEOUT' in expected: | |
| 1241 _AddToDictOfLists(passes, | |
| 1242 'Expected to timeout, but passed', test) | |
| 1243 else: | |
| 1244 _AddToDictOfLists(passes, 'Expected to fail, but passed', | |
| 1245 test) | |
| 1246 elif len(actual) > 1: | |
| 1247 # We group flaky tests by the first actual result we got. | |
| 1248 _AddToDictOfLists(flaky, actual[0], test) | |
| 1249 else: | |
| 1250 _AddToDictOfLists(regressions, results['actual'], test) | |
| 1251 | |
| 1252 if len(passes): | |
| 1253 for key, tests in passes.iteritems(): | |
| 1254 print "%s: (%d)" % (key, len(tests)) | |
| 1255 tests.sort() | |
| 1256 for test in tests: | |
| 1257 print " %s" % test | |
| 1258 print | |
| 1259 | |
| 1260 if len(flaky): | |
| 1261 descriptions = TestExpectationsFile.EXPECTATION_DESCRIPTIONS | |
| 1262 for key, tests in flaky.iteritems(): | |
| 1263 result = TestExpectationsFile.EXPECTATIONS[key.lower()] | |
| 1264 print "Unexpected flakiness: %s (%d)" % ( | |
| 1265 descriptions[result][1], len(tests)) | |
| 1266 tests.sort() | |
| 1267 | |
| 1268 for test in tests: | |
| 1269 result = unexpected_results['tests'][test] | |
| 1270 actual = result['actual'].split(" ") | |
| 1271 expected = result['expected'].split(" ") | |
| 1272 result = TestExpectationsFile.EXPECTATIONS[key.lower()] | |
| 1273 new_expectations_list = list(set(actual) | set(expected)) | |
| 1274 print " %s = %s" % (test, " ".join(new_expectations_list)) | |
| 1275 print | |
| 1276 | |
| 1277 if len(regressions): | |
| 1278 descriptions = TestExpectationsFile.EXPECTATION_DESCRIPTIONS | |
| 1279 for key, tests in regressions.iteritems(): | |
| 1280 result = TestExpectationsFile.EXPECTATIONS[key.lower()] | |
| 1281 print "Regressions: Unexpected %s : (%d)" % ( | |
| 1282 descriptions[result][1], len(tests)) | |
| 1283 tests.sort() | |
| 1284 for test in tests: | |
| 1285 print " %s = %s" % (test, key) | |
| 1286 print | |
| 1287 | |
| 1288 if len(unexpected_results['tests']) and self._options.verbose: | |
| 1289 print "-" * 78 | |
| 1290 | |
| 1291 def _PrintUnexpectedTestResult(self, test, result): | |
| 1292 """Prints one unexpected test result line.""" | |
| 1293 desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result][0] | |
| 1294 self._meter.write(" %s -> unexpected %s\n" % | |
| 1295 (path_utils.RelativeTestFilename(test), desc)) | |
| 1296 | |
| 1297 def _WriteResultsHtmlFile(self, result_summary): | |
| 1298 """Write results.html which is a summary of tests that failed. | |
| 1299 | |
| 1300 Args: | |
| 1301 result_summary: a summary of the results :) | |
| 1302 | |
| 1303 Returns: | |
| 1304 True if any results were written (since expected failures may be | |
| 1305 omitted) | |
| 1306 """ | |
| 1307 # test failures | |
| 1308 if self._options.full_results_html: | |
| 1309 test_files = result_summary.failures.keys() | |
| 1310 else: | |
| 1311 unexpected_failures = self._GetFailures(result_summary, | |
| 1312 include_crashes=True) | |
| 1313 test_files = unexpected_failures.keys() | |
| 1314 if not len(test_files): | |
| 1315 return False | |
| 1316 | |
| 1317 out_filename = os.path.join(self._options.results_directory, | |
| 1318 "results.html") | |
| 1319 out_file = open(out_filename, 'w') | |
| 1320 # header | |
| 1321 if self._options.full_results_html: | |
| 1322 h2 = "Test Failures" | |
| 1323 else: | |
| 1324 h2 = "Unexpected Test Failures" | |
| 1325 out_file.write("<html><head><title>Layout Test Results (%(time)s)" | |
| 1326 "</title></head><body><h2>%(h2)s (%(time)s)</h2>\n" | |
| 1327 % {'h2': h2, 'time': time.asctime()}) | |
| 1328 | |
| 1329 test_files.sort() | |
| 1330 for test_file in test_files: | |
| 1331 test_failures = result_summary.failures.get(test_file, []) | |
| 1332 out_file.write("<p><a href='%s'>%s</a><br />\n" | |
| 1333 % (path_utils.FilenameToUri(test_file), | |
| 1334 path_utils.RelativeTestFilename(test_file))) | |
| 1335 for failure in test_failures: | |
| 1336 out_file.write(" %s<br/>" | |
| 1337 % failure.ResultHtmlOutput( | |
| 1338 path_utils.RelativeTestFilename(test_file))) | |
| 1339 out_file.write("</p>\n") | |
| 1340 | |
| 1341 # footer | |
| 1342 out_file.write("</body></html>\n") | |
| 1343 return True | |
| 1344 | |
| 1345 def _ShowResultsHtmlFile(self): | |
| 1346 """Launches the test shell open to the results.html page.""" | |
| 1347 results_filename = os.path.join(self._options.results_directory, | |
| 1348 "results.html") | |
| 1349 subprocess.Popen([path_utils.TestShellPath(self._options.target), | |
| 1350 path_utils.FilenameToUri(results_filename)]) | |
| 1351 | |
| 1352 | |
| 1353 def _AddToDictOfLists(dict, key, value): | |
| 1354 dict.setdefault(key, []).append(value) | |
| 1355 | |
| 1356 | |
| 1357 def ReadTestFiles(files): | |
| 1358 tests = [] | |
| 1359 for file in files: | |
| 1360 for line in open(file): | |
| 1361 line = test_expectations.StripComments(line) | |
| 1362 if line: | |
| 1363 tests.append(line) | |
| 1364 return tests | |
| 1365 | |
| 1366 | |
| 1367 def CreateLoggingWriter(options, log_option): | |
| 1368 """Returns a write() function that will write the string to logging.info() | |
| 1369 if comp was specified in --log or if --verbose is true. Otherwise the | |
| 1370 message is dropped. | |
| 1371 | |
| 1372 Args: | |
| 1373 options: list of command line options from optparse | |
| 1374 log_option: option to match in options.log in order for the messages | |
| 1375 to be logged (e.g., 'actual' or 'expected') | |
| 1376 """ | |
| 1377 if options.verbose or log_option in options.log.split(","): | |
| 1378 return logging.info | |
| 1379 return lambda str: 1 | |
| 1380 | |
| 1381 | |
| 1382 def main(options, args): | |
| 1383 """Run the tests. Will call sys.exit when complete. | |
| 1384 | |
| 1385 Args: | |
| 1386 options: a dictionary of command line options | |
| 1387 args: a list of sub directories or files to test | |
| 1388 """ | |
| 1389 | |
| 1390 if options.sources: | |
| 1391 options.verbose = True | |
| 1392 | |
| 1393 # Set up our logging format. | |
| 1394 meter = metered_stream.MeteredStream(options.verbose, sys.stderr) | |
| 1395 log_fmt = '%(message)s' | |
| 1396 log_datefmt = '%y%m%d %H:%M:%S' | |
| 1397 log_level = logging.INFO | |
| 1398 if options.verbose: | |
| 1399 log_fmt = ('%(asctime)s %(filename)s:%(lineno)-4d %(levelname)s ' | |
| 1400 '%(message)s') | |
| 1401 log_level = logging.DEBUG | |
| 1402 logging.basicConfig(level=log_level, format=log_fmt, datefmt=log_datefmt, | |
| 1403 stream=meter) | |
| 1404 | |
| 1405 if not options.target: | |
| 1406 if options.debug: | |
| 1407 options.target = "Debug" | |
| 1408 else: | |
| 1409 options.target = "Release" | |
| 1410 | |
| 1411 if not options.use_apache: | |
| 1412 options.use_apache = sys.platform in ('darwin', 'linux2') | |
| 1413 | |
| 1414 if options.results_directory.startswith("/"): | |
| 1415 # Assume it's an absolute path and normalize. | |
| 1416 options.results_directory = path_utils.GetAbsolutePath( | |
| 1417 options.results_directory) | |
| 1418 else: | |
| 1419 # If it's a relative path, make the output directory relative to | |
| 1420 # Debug or Release. | |
| 1421 basedir = path_utils.PathFromBase('webkit') | |
| 1422 options.results_directory = path_utils.GetAbsolutePath( | |
| 1423 os.path.join(basedir, options.target, options.results_directory)) | |
| 1424 | |
| 1425 if options.clobber_old_results: | |
| 1426 # Just clobber the actual test results directories since the other | |
| 1427 # files in the results directory are explicitly used for cross-run | |
| 1428 # tracking. | |
| 1429 path = os.path.join(options.results_directory, 'LayoutTests') | |
| 1430 if os.path.exists(path): | |
| 1431 shutil.rmtree(path) | |
| 1432 | |
| 1433 # Ensure platform is valid and force it to the form 'chromium-<platform>'. | |
| 1434 options.platform = path_utils.PlatformName(options.platform) | |
| 1435 | |
| 1436 if not options.num_test_shells: | |
| 1437 # TODO(ojan): Investigate perf/flakiness impact of using numcores + 1. | |
| 1438 options.num_test_shells = platform_utils.GetNumCores() | |
| 1439 | |
| 1440 write = CreateLoggingWriter(options, 'config') | |
| 1441 write("Running %s test_shells in parallel" % options.num_test_shells) | |
| 1442 | |
| 1443 if not options.time_out_ms: | |
| 1444 if options.target == "Debug": | |
| 1445 options.time_out_ms = str(2 * TestRunner.DEFAULT_TEST_TIMEOUT_MS) | |
| 1446 else: | |
| 1447 options.time_out_ms = str(TestRunner.DEFAULT_TEST_TIMEOUT_MS) | |
| 1448 | |
| 1449 options.slow_time_out_ms = str(5 * int(options.time_out_ms)) | |
| 1450 write("Regular timeout: %s, slow test timeout: %s" % | |
| 1451 (options.time_out_ms, options.slow_time_out_ms)) | |
| 1452 | |
| 1453 # Include all tests if none are specified. | |
| 1454 new_args = [] | |
| 1455 for arg in args: | |
| 1456 if arg and arg != '': | |
| 1457 new_args.append(arg) | |
| 1458 | |
| 1459 paths = new_args | |
| 1460 if not paths: | |
| 1461 paths = [] | |
| 1462 if options.test_list: | |
| 1463 paths += ReadTestFiles(options.test_list) | |
| 1464 | |
| 1465 # Create the output directory if it doesn't already exist. | |
| 1466 path_utils.MaybeMakeDirectory(options.results_directory) | |
| 1467 meter.update("Gathering files ...") | |
| 1468 | |
| 1469 test_runner = TestRunner(options, meter) | |
| 1470 test_runner.GatherFilePaths(paths) | |
| 1471 | |
| 1472 if options.lint_test_files: | |
| 1473 # Creating the expecations for each platform/target pair does all the | |
| 1474 # test list parsing and ensures it's correct syntax (e.g. no dupes). | |
| 1475 for platform in TestExpectationsFile.PLATFORMS: | |
| 1476 test_runner.ParseExpectations(platform, is_debug_mode=True) | |
| 1477 test_runner.ParseExpectations(platform, is_debug_mode=False) | |
| 1478 print ("If there are no fail messages, errors or exceptions, then the " | |
| 1479 "lint succeeded.") | |
| 1480 sys.exit(0) | |
| 1481 | |
| 1482 try: | |
| 1483 test_shell_binary_path = path_utils.TestShellPath(options.target) | |
| 1484 except path_utils.PathNotFound: | |
| 1485 print "\nERROR: test_shell is not found. Be sure that you have built" | |
| 1486 print "it and that you are using the correct build. This script" | |
| 1487 print "will run the Release one by default. Use --debug to use the" | |
| 1488 print "Debug build.\n" | |
| 1489 sys.exit(1) | |
| 1490 | |
| 1491 write = CreateLoggingWriter(options, "config") | |
| 1492 write("Using platform '%s'" % options.platform) | |
| 1493 write("Placing test results in %s" % options.results_directory) | |
| 1494 if options.new_baseline: | |
| 1495 write("Placing new baselines in %s" % | |
| 1496 path_utils.ChromiumBaselinePath(options.platform)) | |
| 1497 write("Using %s build at %s" % (options.target, test_shell_binary_path)) | |
| 1498 if options.no_pixel_tests: | |
| 1499 write("Not running pixel tests") | |
| 1500 write("") | |
| 1501 | |
| 1502 meter.update("Parsing expectations ...") | |
| 1503 test_runner.ParseExpectations(options.platform, options.target == 'Debug') | |
| 1504 | |
| 1505 meter.update("Preparing tests ...") | |
| 1506 write = CreateLoggingWriter(options, "expected") | |
| 1507 result_summary = test_runner.PrepareListsAndPrintOutput(write) | |
| 1508 | |
| 1509 if 'cygwin' == sys.platform: | |
| 1510 logging.warn("#" * 40) | |
| 1511 logging.warn("# UNEXPECTED PYTHON VERSION") | |
| 1512 logging.warn("# This script should be run using the version of python") | |
| 1513 logging.warn("# in third_party/python_24/") | |
| 1514 logging.warn("#" * 40) | |
| 1515 sys.exit(1) | |
| 1516 | |
| 1517 # Delete the disk cache if any to ensure a clean test run. | |
| 1518 cachedir = os.path.split(test_shell_binary_path)[0] | |
| 1519 cachedir = os.path.join(cachedir, "cache") | |
| 1520 if os.path.exists(cachedir): | |
| 1521 shutil.rmtree(cachedir) | |
| 1522 | |
| 1523 test_runner.AddTestType(text_diff.TestTextDiff) | |
| 1524 if not options.no_pixel_tests: | |
| 1525 test_runner.AddTestType(image_diff.ImageDiff) | |
| 1526 if options.fuzzy_pixel_tests: | |
| 1527 test_runner.AddTestType(fuzzy_image_diff.FuzzyImageDiff) | |
| 1528 | |
| 1529 meter.update("Starting ...") | |
| 1530 has_new_failures = test_runner.Run(result_summary) | |
| 1531 | |
| 1532 logging.debug("Exit status: %d" % has_new_failures) | |
| 1533 sys.exit(has_new_failures) | |
| 1534 | |
| 1535 if '__main__' == __name__: | |
| 1536 option_parser = optparse.OptionParser() | |
| 1537 option_parser.add_option("", "--no-pixel-tests", action="store_true", | |
| 1538 default=False, | |
| 1539 help="disable pixel-to-pixel PNG comparisons") | |
| 1540 option_parser.add_option("", "--fuzzy-pixel-tests", action="store_true", | |
| 1541 default=False, | |
| 1542 help="Also use fuzzy matching to compare pixel " | |
| 1543 "test outputs.") | |
| 1544 option_parser.add_option("", "--results-directory", | |
| 1545 default="layout-test-results", | |
| 1546 help="Output results directory source dir," | |
| 1547 " relative to Debug or Release") | |
| 1548 option_parser.add_option("", "--new-baseline", action="store_true", | |
| 1549 default=False, | |
| 1550 help="save all generated results as new baselines" | |
| 1551 " into the platform directory, overwriting " | |
| 1552 "whatever's already there.") | |
| 1553 option_parser.add_option("", "--noshow-results", action="store_true", | |
| 1554 default=False, help="don't launch the test_shell" | |
| 1555 " with results after the tests are done") | |
| 1556 option_parser.add_option("", "--full-results-html", action="store_true", | |
| 1557 default=False, help="show all failures in " | |
| 1558 "results.html, rather than only regressions") | |
| 1559 option_parser.add_option("", "--clobber-old-results", action="store_true", | |
| 1560 default=False, help="Clobbers test results from " | |
| 1561 "previous runs.") | |
| 1562 option_parser.add_option("", "--lint-test-files", action="store_true", | |
| 1563 default=False, help="Makes sure the test files " | |
| 1564 "parse for all configurations. Does not run any " | |
| 1565 "tests.") | |
| 1566 option_parser.add_option("", "--force", action="store_true", | |
| 1567 default=False, | |
| 1568 help="Run all tests, even those marked SKIP " | |
| 1569 "in the test list") | |
| 1570 option_parser.add_option("", "--num-test-shells", | |
| 1571 help="Number of testshells to run in parallel.") | |
| 1572 option_parser.add_option("", "--use-apache", action="store_true", | |
| 1573 default=False, | |
| 1574 help="Whether to use apache instead of lighttpd.") | |
| 1575 option_parser.add_option("", "--time-out-ms", default=None, | |
| 1576 help="Set the timeout for each test") | |
| 1577 option_parser.add_option("", "--run-singly", action="store_true", | |
| 1578 default=False, | |
| 1579 help="run a separate test_shell for each test") | |
| 1580 option_parser.add_option("", "--debug", action="store_true", default=False, | |
| 1581 help="use the debug binary instead of the release" | |
| 1582 " binary") | |
| 1583 option_parser.add_option("", "--num-slow-tests-to-log", default=50, | |
| 1584 help="Number of slow tests whose timings " | |
| 1585 "to print.") | |
| 1586 option_parser.add_option("", "--platform", | |
| 1587 help="Override the platform for expected results") | |
| 1588 option_parser.add_option("", "--target", default="", | |
| 1589 help="Set the build target configuration " | |
| 1590 "(overrides --debug)") | |
| 1591 option_parser.add_option("", "--log", action="store", | |
| 1592 default="detailed-progress,unexpected", | |
| 1593 help="log various types of data. The param should" | |
| 1594 " be a comma-separated list of values from: " | |
| 1595 "actual,config," + LOG_DETAILED_PROGRESS + | |
| 1596 ",expected,timing," + LOG_UNEXPECTED + " " | |
| 1597 "(defaults to " + | |
| 1598 "--log detailed-progress,unexpected)") | |
| 1599 option_parser.add_option("-v", "--verbose", action="store_true", | |
| 1600 default=False, help="include debug-level logging") | |
| 1601 option_parser.add_option("", "--sources", action="store_true", | |
| 1602 help="show expected result file path for each " | |
| 1603 "test (implies --verbose)") | |
| 1604 option_parser.add_option("", "--startup-dialog", action="store_true", | |
| 1605 default=False, | |
| 1606 help="create a dialog on test_shell.exe startup") | |
| 1607 option_parser.add_option("", "--gp-fault-error-box", action="store_true", | |
| 1608 default=False, | |
| 1609 help="enable Windows GP fault error box") | |
| 1610 option_parser.add_option("", "--wrapper", | |
| 1611 help="wrapper command to insert before " | |
| 1612 "invocations of test_shell; option is split " | |
| 1613 "on whitespace before running. (Example: " | |
| 1614 "--wrapper='valgrind --smc-check=all')") | |
| 1615 option_parser.add_option("", "--test-list", action="append", | |
| 1616 help="read list of tests to run from file", | |
| 1617 metavar="FILE") | |
| 1618 option_parser.add_option("", "--nocheck-sys-deps", action="store_true", | |
| 1619 default=False, | |
| 1620 help="Don't check the system dependencies " | |
| 1621 "(themes)") | |
| 1622 option_parser.add_option("", "--randomize-order", action="store_true", | |
| 1623 default=False, | |
| 1624 help=("Run tests in random order (useful for " | |
| 1625 "tracking down corruption)")) | |
| 1626 option_parser.add_option("", "--run-chunk", | |
| 1627 default=None, | |
| 1628 help=("Run a specified chunk (n:l), the " | |
| 1629 "nth of len l, of the layout tests")) | |
| 1630 option_parser.add_option("", "--run-part", | |
| 1631 default=None, | |
| 1632 help=("Run a specified part (n:m), the nth of m" | |
| 1633 " parts, of the layout tests")) | |
| 1634 option_parser.add_option("", "--batch-size", | |
| 1635 default=None, | |
| 1636 help=("Run a the tests in batches (n), after " | |
| 1637 "every n tests, the test shell is " | |
| 1638 "relaunched.")) | |
| 1639 option_parser.add_option("", "--builder-name", | |
| 1640 default="DUMMY_BUILDER_NAME", | |
| 1641 help=("The name of the builder shown on the " | |
| 1642 "waterfall running this script e.g. " | |
| 1643 "WebKit.")) | |
| 1644 option_parser.add_option("", "--build-name", | |
| 1645 default="DUMMY_BUILD_NAME", | |
| 1646 help=("The name of the builder used in its path, " | |
| 1647 "e.g. webkit-rel.")) | |
| 1648 option_parser.add_option("", "--build-number", | |
| 1649 default="DUMMY_BUILD_NUMBER", | |
| 1650 help=("The build number of the builder running" | |
| 1651 "this script.")) | |
| 1652 option_parser.add_option("", "--experimental-fully-parallel", | |
| 1653 action="store_true", default=False, | |
| 1654 help="run all tests in parallel") | |
| 1655 | |
| 1656 options, args = option_parser.parse_args() | |
| 1657 main(options, args) | |
| OLD | NEW |