| OLD | NEW |
| 1 #!/bin/env python | 1 #!/bin/env python |
| 2 # Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. | 2 # Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. |
| 3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
| 4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
| 5 | 5 |
| 6 """Run layout tests using the test_shell. | 6 """Run layout tests using the test_shell. |
| 7 | 7 |
| 8 This is a port of the existing webkit test script run-webkit-tests. | 8 This is a port of the existing webkit test script run-webkit-tests. |
| 9 | 9 |
| 10 The TestRunner class runs a series of tests (TestType interface) against a set | 10 The TestRunner class runs a series of tests (TestType interface) against a set |
| (...skipping 30 matching lines...) Expand all Loading... |
| 41 from layout_package import path_utils | 41 from layout_package import path_utils |
| 42 from layout_package import platform_utils | 42 from layout_package import platform_utils |
| 43 from layout_package import test_failures | 43 from layout_package import test_failures |
| 44 from layout_package import test_shell_thread | 44 from layout_package import test_shell_thread |
| 45 from test_types import fuzzy_image_diff | 45 from test_types import fuzzy_image_diff |
| 46 from test_types import image_diff | 46 from test_types import image_diff |
| 47 from test_types import test_type_base | 47 from test_types import test_type_base |
| 48 from test_types import text_diff | 48 from test_types import text_diff |
| 49 from test_types import simplified_text_diff | 49 from test_types import simplified_text_diff |
| 50 | 50 |
| 51 class TestInfo: |
| 52 """Groups information about a test for easy passing of data.""" |
| 53 def __init__(self, filename, timeout): |
| 54 """Generates the URI and stores the filename and timeout for this test. |
| 55 Args: |
| 56 filename: Full path to the test. |
| 57 timeout: Timeout for running the test in TestShell. |
| 58 """ |
| 59 self.filename = filename |
| 60 self.uri = path_utils.FilenameToUri(filename) |
| 61 self.timeout = timeout |
| 62 |
| 51 | 63 |
| 52 class TestRunner: | 64 class TestRunner: |
| 53 """A class for managing running a series of tests on a series of test | 65 """A class for managing running a series of tests on a series of test |
| 54 files.""" | 66 files.""" |
| 55 | 67 |
| 56 # When collecting test cases, we include any file with these extensions. | 68 # When collecting test cases, we include any file with these extensions. |
| 57 _supported_file_extensions = set(['.html', '.shtml', '.xml', '.xhtml', '.pl', | 69 _supported_file_extensions = set(['.html', '.shtml', '.xml', '.xhtml', '.pl', |
| 58 '.php', '.svg']) | 70 '.php', '.svg']) |
| 59 # When collecting test cases, skip these directories | 71 # When collecting test cases, skip these directories |
| 60 _skipped_directories = set(['.svn', '_svn', 'resources']) | 72 _skipped_directories = set(['.svn', '_svn', 'resources']) |
| (...skipping 271 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 332 def _GetTestFileQueue(self, test_files): | 344 def _GetTestFileQueue(self, test_files): |
| 333 """Create the thread safe queue of lists of (test filenames, test URIs) | 345 """Create the thread safe queue of lists of (test filenames, test URIs) |
| 334 tuples. Each TestShellThread pulls a list from this queue and runs those | 346 tuples. Each TestShellThread pulls a list from this queue and runs those |
| 335 tests in order before grabbing the next available list. | 347 tests in order before grabbing the next available list. |
| 336 | 348 |
| 337 Shard the lists by directory. This helps ensure that tests that depend | 349 Shard the lists by directory. This helps ensure that tests that depend |
| 338 on each other (aka bad tests!) continue to run together as most | 350 on each other (aka bad tests!) continue to run together as most |
| 339 cross-tests dependencies tend to occur within the same directory. | 351 cross-tests dependencies tend to occur within the same directory. |
| 340 | 352 |
| 341 Return: | 353 Return: |
| 342 The Queue of lists of (test file, test uri) tuples. | 354 The Queue of lists of TestInfo objects. |
| 343 """ | 355 """ |
| 344 tests_by_dir = {} | 356 tests_by_dir = {} |
| 345 for test_file in test_files: | 357 for test_file in test_files: |
| 346 directory = self._GetDirForTestFile(test_file) | 358 directory = self._GetDirForTestFile(test_file) |
| 347 if directory not in tests_by_dir: | 359 if directory not in tests_by_dir: |
| 348 tests_by_dir[directory] = [] | 360 tests_by_dir[directory] = [] |
| 349 tests_by_dir[directory].append((test_file, | 361 |
| 350 path_utils.FilenameToUri(test_file))) | 362 if self._expectations.HasModifier(test_file, test_expectations.SLOW): |
| 363 timeout = 10 * int(options.time_out_ms) |
| 364 else: |
| 365 timeout = self._options.time_out_ms |
| 366 |
| 367 tests_by_dir[directory].append(TestInfo(test_file, timeout)) |
| 351 | 368 |
| 352 # Sort by the number of tests in the dir so that the ones with the most | 369 # Sort by the number of tests in the dir so that the ones with the most |
| 353 # tests get run first in order to maximize parallelization. Number of tests | 370 # tests get run first in order to maximize parallelization. Number of tests |
| 354 # is a good enough, but not perfect, approximation of how long that set of | 371 # is a good enough, but not perfect, approximation of how long that set of |
| 355 # tests will take to run. We can't just use a PriorityQueue until we move | 372 # tests will take to run. We can't just use a PriorityQueue until we move |
| 356 # to Python 2.6. | 373 # to Python 2.6. |
| 357 test_lists = [] | 374 test_lists = [] |
| 358 http_tests = None | 375 http_tests = None |
| 359 for directory in tests_by_dir: | 376 for directory in tests_by_dir: |
| 360 test_list = tests_by_dir[directory] | 377 test_list = tests_by_dir[directory] |
| (...skipping 30 matching lines...) Expand all Loading... |
| 391 | 408 |
| 392 test_args.new_baseline = self._options.new_baseline | 409 test_args.new_baseline = self._options.new_baseline |
| 393 test_args.show_sources = self._options.sources | 410 test_args.show_sources = self._options.sources |
| 394 | 411 |
| 395 if self._options.startup_dialog: | 412 if self._options.startup_dialog: |
| 396 shell_args.append('--testshell-startup-dialog') | 413 shell_args.append('--testshell-startup-dialog') |
| 397 | 414 |
| 398 if self._options.gp_fault_error_box: | 415 if self._options.gp_fault_error_box: |
| 399 shell_args.append('--gp-fault-error-box') | 416 shell_args.append('--gp-fault-error-box') |
| 400 | 417 |
| 401 # larger timeout if page heap is enabled. | |
| 402 if self._options.time_out_ms: | |
| 403 shell_args.append('--time-out-ms=' + self._options.time_out_ms) | |
| 404 | |
| 405 return (test_args, shell_args) | 418 return (test_args, shell_args) |
| 406 | 419 |
| 407 def _InstantiateTestShellThreads(self, test_shell_binary): | 420 def _InstantiateTestShellThreads(self, test_shell_binary): |
| 408 """Instantitates and starts the TestShellThread(s). | 421 """Instantitates and starts the TestShellThread(s). |
| 409 | 422 |
| 410 Return: | 423 Return: |
| 411 The list of threads. | 424 The list of threads. |
| 412 """ | 425 """ |
| 413 test_shell_command = [test_shell_binary] | 426 test_shell_command = [test_shell_binary] |
| 414 | 427 |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 472 sys.exit(1) | 485 sys.exit(1) |
| 473 | 486 |
| 474 logging.info("Starting tests") | 487 logging.info("Starting tests") |
| 475 | 488 |
| 476 # Create the output directory if it doesn't already exist. | 489 # Create the output directory if it doesn't already exist. |
| 477 google.path_utils.MaybeMakeDirectory(self._options.results_directory) | 490 google.path_utils.MaybeMakeDirectory(self._options.results_directory) |
| 478 | 491 |
| 479 threads = self._InstantiateTestShellThreads(test_shell_binary) | 492 threads = self._InstantiateTestShellThreads(test_shell_binary) |
| 480 | 493 |
| 481 # Wait for the threads to finish and collect test failures. | 494 # Wait for the threads to finish and collect test failures. |
| 482 test_failures = {} | 495 failures = {} |
| 483 test_timings = {} | 496 test_timings = {} |
| 484 individual_test_timings = [] | 497 individual_test_timings = [] |
| 485 try: | 498 try: |
| 486 for thread in threads: | 499 for thread in threads: |
| 487 while thread.isAlive(): | 500 while thread.isAlive(): |
| 488 # Let it timeout occasionally so it can notice a KeyboardInterrupt | 501 # Let it timeout occasionally so it can notice a KeyboardInterrupt |
| 489 # Actually, the timeout doesn't really matter: apparently it | 502 # Actually, the timeout doesn't really matter: apparently it |
| 490 # suffices to not use an indefinite blocking join for it to | 503 # suffices to not use an indefinite blocking join for it to |
| 491 # be interruptible by KeyboardInterrupt. | 504 # be interruptible by KeyboardInterrupt. |
| 492 thread.join(1.0) | 505 thread.join(1.0) |
| 493 test_failures.update(thread.GetFailures()) | 506 failures.update(thread.GetFailures()) |
| 494 test_timings.update(thread.GetDirectoryTimingStats()) | 507 test_timings.update(thread.GetDirectoryTimingStats()) |
| 495 individual_test_timings.extend(thread.GetIndividualTestStats()) | 508 individual_test_timings.extend(thread.GetIndividualTestStats()) |
| 496 except KeyboardInterrupt: | 509 except KeyboardInterrupt: |
| 497 for thread in threads: | 510 for thread in threads: |
| 498 thread.Cancel() | 511 thread.Cancel() |
| 499 raise | 512 raise |
| 500 for thread in threads: | 513 for thread in threads: |
| 501 # Check whether a TestShellThread died before normal completion. | 514 # Check whether a TestShellThread died before normal completion. |
| 502 exception_info = thread.GetExceptionInfo() | 515 exception_info = thread.GetExceptionInfo() |
| 503 if exception_info is not None: | 516 if exception_info is not None: |
| 504 # Re-raise the thread's exception here to make it clear that | 517 # Re-raise the thread's exception here to make it clear that |
| 505 # testing was aborted. Otherwise, the tests that did not run | 518 # testing was aborted. Otherwise, the tests that did not run |
| 506 # would be assumed to have passed. | 519 # would be assumed to have passed. |
| 507 raise exception_info[0], exception_info[1], exception_info[2] | 520 raise exception_info[0], exception_info[1], exception_info[2] |
| 508 | 521 |
| 509 print | 522 print |
| 510 end_time = time.time() | 523 end_time = time.time() |
| 511 logging.info("%f total testing time" % (end_time - start_time)) | 524 logging.info("%f total testing time" % (end_time - start_time)) |
| 512 | 525 |
| 513 print | 526 print |
| 514 self._PrintTimingStatistics(test_timings, individual_test_timings) | 527 self._PrintTimingStatistics(test_timings, individual_test_timings, failures) |
| 515 | 528 |
| 516 print "-" * 78 | 529 print "-" * 78 |
| 517 | 530 |
| 518 # Tests are done running. Compare failures with expected failures. | 531 # Tests are done running. Compare failures with expected failures. |
| 519 regressions = self._CompareFailures(test_failures) | 532 regressions = self._CompareFailures(failures) |
| 520 | 533 |
| 521 print "-" * 78 | 534 print "-" * 78 |
| 522 | 535 |
| 523 # Write summaries to stdout. | 536 # Write summaries to stdout. |
| 524 self._PrintResults(test_failures, sys.stdout) | 537 self._PrintResults(failures, sys.stdout) |
| 525 | 538 |
| 526 # Write the same data to a log file. | 539 # Write the same data to a log file. |
| 527 out_filename = os.path.join(self._options.results_directory, "score.txt") | 540 out_filename = os.path.join(self._options.results_directory, "score.txt") |
| 528 output_file = open(out_filename, "w") | 541 output_file = open(out_filename, "w") |
| 529 self._PrintResults(test_failures, output_file) | 542 self._PrintResults(failures, output_file) |
| 530 output_file.close() | 543 output_file.close() |
| 531 | 544 |
| 532 # Write the summary to disk (results.html) and maybe open the test_shell | 545 # Write the summary to disk (results.html) and maybe open the test_shell |
| 533 # to this file. | 546 # to this file. |
| 534 wrote_results = self._WriteResultsHtmlFile(test_failures, regressions) | 547 wrote_results = self._WriteResultsHtmlFile(failures, regressions) |
| 535 if not self._options.noshow_results and wrote_results: | 548 if not self._options.noshow_results and wrote_results: |
| 536 self._ShowResultsHtmlFile() | 549 self._ShowResultsHtmlFile() |
| 537 | 550 |
| 538 sys.stdout.flush() | 551 sys.stdout.flush() |
| 539 sys.stderr.flush() | 552 sys.stderr.flush() |
| 540 return len(regressions) | 553 return len(regressions) |
| 541 | 554 |
| 542 def _PrintTimingStatistics(self, directory_test_timings, | 555 def _PrintTimingStatistics(self, directory_test_timings, |
| 543 individual_test_timings): | 556 individual_test_timings, failures): |
| 557 self._PrintAggregateTestStatistics(individual_test_timings) |
| 558 self._PrintIndividualTestTimes(individual_test_timings, failures) |
| 559 self._PrintDirectoryTimings(directory_test_timings) |
| 560 |
| 561 def _PrintAggregateTestStatistics(self, individual_test_timings): |
| 562 """Prints aggregate statistics (e.g. median, mean, etc.) for all tests. |
| 563 Args: |
| 564 individual_test_timings: List of test_shell_thread.TestStats for all |
| 565 tests. |
| 566 """ |
| 544 test_types = individual_test_timings[0].time_for_diffs.keys() | 567 test_types = individual_test_timings[0].time_for_diffs.keys() |
| 545 times_for_test_shell = [] | 568 times_for_test_shell = [] |
| 546 times_for_diff_processing = [] | 569 times_for_diff_processing = [] |
| 547 times_per_test_type = {} | 570 times_per_test_type = {} |
| 548 for test_type in test_types: | 571 for test_type in test_types: |
| 549 times_per_test_type[test_type] = [] | 572 times_per_test_type[test_type] = [] |
| 550 | 573 |
| 551 for test_stats in individual_test_timings: | 574 for test_stats in individual_test_timings: |
| 552 times_for_test_shell.append(test_stats.test_run_time) | 575 times_for_test_shell.append(test_stats.test_run_time) |
| 553 times_for_diff_processing.append(test_stats.total_time_for_all_diffs) | 576 times_for_diff_processing.append(test_stats.total_time_for_all_diffs) |
| 554 time_for_diffs = test_stats.time_for_diffs | 577 time_for_diffs = test_stats.time_for_diffs |
| 555 for test_type in test_types: | 578 for test_type in test_types: |
| 556 times_per_test_type[test_type].append(time_for_diffs[test_type]) | 579 times_per_test_type[test_type].append(time_for_diffs[test_type]) |
| 557 | 580 |
| 558 logging.debug("PER TEST TIME IN TESTSHELL (seconds):") | 581 self._PrintStatisticsForTestTimings( |
| 559 self._PrintStatisticsForTestTimings(times_for_test_shell) | 582 "PER TEST TIME IN TESTSHELL (seconds):", |
| 560 logging.debug("PER TEST DIFF PROCESSING TIMES (seconds):") | 583 times_for_test_shell) |
| 561 self._PrintStatisticsForTestTimings(times_for_diff_processing) | 584 self._PrintStatisticsForTestTimings( |
| 585 "PER TEST DIFF PROCESSING TIMES (seconds):", |
| 586 times_for_diff_processing) |
| 562 for test_type in test_types: | 587 for test_type in test_types: |
| 563 logging.debug("TEST TYPE: %s" % test_type) | 588 self._PrintStatisticsForTestTimings( |
| 564 self._PrintStatisticsForTestTimings(times_per_test_type[test_type]) | 589 "PER TEST TIMES BY TEST TYPE: %s" % test_type, |
| 590 times_per_test_type[test_type]) |
| 565 | 591 |
| 592 def _PrintIndividualTestTimes(self, individual_test_timings, failures): |
| 593 """Prints the run times for slow, timeout and crash tests. |
| 594 Args: |
| 595 individual_test_timings: List of test_shell_thread.TestStats for all |
| 596 tests. |
| 597 failures: Dictionary mapping test filenames to list of test_failures. |
| 598 """ |
| 566 # Reverse-sort by the time spent in test_shell. | 599 # Reverse-sort by the time spent in test_shell. |
| 567 individual_test_timings.sort(lambda a, b: | 600 individual_test_timings.sort(lambda a, b: |
| 568 cmp(b.test_run_time, a.test_run_time)) | 601 cmp(b.test_run_time, a.test_run_time)) |
| 569 slowests_tests = ( | |
| 570 individual_test_timings[:self._options.num_slow_tests_to_log] ) | |
| 571 | 602 |
| 572 logging.debug("%s slowest tests:" % self._options.num_slow_tests_to_log) | 603 num_printed = 0 |
| 573 for test in slowests_tests: | 604 slow_tests = [] |
| 574 logging.debug("%s took %s seconds" % (test.filename, | 605 timeout_or_crash_tests = [] |
| 575 round(test.test_run_time, 1))) | 606 unexpected_slow_tests = [] |
| 607 for test_tuple in individual_test_timings: |
| 608 filename = test_tuple.filename |
| 609 is_timeout_crash_or_slow = False |
| 610 if self._expectations.HasModifier(filename, test_expectations.SLOW): |
| 611 is_timeout_crash_or_slow = True |
| 612 slow_tests.append(test_tuple) |
| 613 |
| 614 if filename in failures: |
| 615 for failure in failures[filename]: |
| 616 if (failure.__class__ == test_failures.FailureTimeout or |
| 617 failure.__class__ == test_failures.FailureCrash): |
| 618 is_timeout_crash_or_slow = True |
| 619 timeout_or_crash_tests.append(test_tuple) |
| 620 break |
| 621 |
| 622 if (not is_timeout_crash_or_slow and |
| 623 num_printed < self._options.num_slow_tests_to_log): |
| 624 num_printed = num_printed + 1 |
| 625 unexpected_slow_tests.append(test_tuple) |
| 576 | 626 |
| 577 print | 627 print |
| 628 self._PrintTestListTiming("%s slowest tests that are not marked as SLOW " |
| 629 "and did not timeout/crash:" % self._options.num_slow_tests_to_log, |
| 630 unexpected_slow_tests) |
| 631 print |
| 632 self._PrintTestListTiming("Tests marked as SLOW:", slow_tests) |
| 633 print |
| 634 self._PrintTestListTiming("Tests that timed out or crashed:", |
| 635 timeout_or_crash_tests) |
| 636 print |
| 578 | 637 |
| 638 def _PrintTestListTiming(self, title, test_list): |
| 639 logging.debug(title) |
| 640 for test_tuple in test_list: |
| 641 filename = test_tuple.filename[len(path_utils.LayoutDataDir()) + 1:] |
| 642 filename = filename.replace('\\', '/') |
| 643 test_run_time = round(test_tuple.test_run_time, 1) |
| 644 logging.debug("%s took %s seconds" % (filename, test_run_time)) |
| 645 |
| 646 def _PrintDirectoryTimings(self, directory_test_timings): |
| 579 timings = [] | 647 timings = [] |
| 580 for directory in directory_test_timings: | 648 for directory in directory_test_timings: |
| 581 num_tests, time_for_directory = directory_test_timings[directory] | 649 num_tests, time_for_directory = directory_test_timings[directory] |
| 582 timings.append((round(time_for_directory, 1), directory, num_tests)) | 650 timings.append((round(time_for_directory, 1), directory, num_tests)) |
| 583 timings.sort() | 651 timings.sort() |
| 584 | 652 |
| 585 logging.debug("Time to process each subdirectory:") | 653 logging.debug("Time to process each subdirectory:") |
| 586 for timing in timings: | 654 for timing in timings: |
| 587 logging.debug("%s took %s seconds to run %s tests." % \ | 655 logging.debug("%s took %s seconds to run %s tests." % \ |
| 588 (timing[1], timing[0], timing[2])) | 656 (timing[1], timing[0], timing[2])) |
| 589 | 657 |
| 590 def _PrintStatisticsForTestTimings(self, timings): | 658 def _PrintStatisticsForTestTimings(self, title, timings): |
| 591 """Prints the median, mean and standard deviation of the values in timings. | 659 """Prints the median, mean and standard deviation of the values in timings. |
| 592 Args: | 660 Args: |
| 661 title: Title for these timings. |
| 593 timings: A list of floats representing times. | 662 timings: A list of floats representing times. |
| 594 """ | 663 """ |
| 664 logging.debug(title) |
| 665 timings.sort() |
| 666 |
| 595 num_tests = len(timings) | 667 num_tests = len(timings) |
| 668 percentile90 = timings[int(.9 * num_tests)] |
| 669 percentile99 = timings[int(.99 * num_tests)] |
| 670 |
| 596 if num_tests % 2 == 1: | 671 if num_tests % 2 == 1: |
| 597 median = timings[((num_tests - 1) / 2) - 1] | 672 median = timings[((num_tests - 1) / 2) - 1] |
| 598 else: | 673 else: |
| 599 lower = timings[num_tests / 2 - 1] | 674 lower = timings[num_tests / 2 - 1] |
| 600 upper = timings[num_tests / 2] | 675 upper = timings[num_tests / 2] |
| 601 median = (float(lower + upper)) / 2 | 676 median = (float(lower + upper)) / 2 |
| 602 | 677 |
| 603 mean = sum(timings) / num_tests | 678 mean = sum(timings) / num_tests |
| 604 | 679 |
| 605 for time in timings: | 680 for time in timings: |
| 606 sum_of_deviations = math.pow(time - mean, 2) | 681 sum_of_deviations = math.pow(time - mean, 2) |
| 607 | 682 |
| 608 std_deviation = math.sqrt(sum_of_deviations / num_tests) | 683 std_deviation = math.sqrt(sum_of_deviations / num_tests) |
| 609 logging.debug(("Median: %s, Mean %s, Standard deviation: %s\n" % | 684 logging.debug(("Median: %s, Mean: %s, 90th percentile: %s, " |
| 610 (median, mean, std_deviation))) | 685 "99th percentile: %s, Standard deviation: %s\n" % ( |
| 686 median, mean, percentile90, percentile99, std_deviation))) |
| 611 | 687 |
| 612 def _PrintResults(self, test_failures, output): | 688 def _PrintResults(self, failures, output): |
| 613 """Print a short summary to stdout about how many tests passed. | 689 """Print a short summary to stdout about how many tests passed. |
| 614 | 690 |
| 615 Args: | 691 Args: |
| 616 test_failures is a dictionary mapping the test filename to a list of | 692 failures is a dictionary mapping the test filename to a list of |
| 617 TestFailure objects if the test failed | 693 TestFailure objects if the test failed |
| 618 | 694 |
| 619 output is the file descriptor to write the results to. For example, | 695 output is the file descriptor to write the results to. For example, |
| 620 sys.stdout. | 696 sys.stdout. |
| 621 """ | 697 """ |
| 622 | 698 |
| 623 failure_counts = {} | 699 failure_counts = {} |
| 624 deferred_counts = {} | 700 deferred_counts = {} |
| 625 fixable_counts = {} | 701 fixable_counts = {} |
| 626 non_ignored_counts = {} | 702 non_ignored_counts = {} |
| 627 fixable_failures = set() | 703 fixable_failures = set() |
| 628 deferred_failures = set() | 704 deferred_failures = set() |
| 629 non_ignored_failures = set() | 705 non_ignored_failures = set() |
| 630 | 706 |
| 631 # Aggregate failures in a dictionary (TestFailure -> frequency), | 707 # Aggregate failures in a dictionary (TestFailure -> frequency), |
| 632 # with known (fixable and ignored) failures separated out. | 708 # with known (fixable and ignored) failures separated out. |
| 633 def AddFailure(dictionary, key): | 709 def AddFailure(dictionary, key): |
| 634 if key in dictionary: | 710 if key in dictionary: |
| 635 dictionary[key] += 1 | 711 dictionary[key] += 1 |
| 636 else: | 712 else: |
| 637 dictionary[key] = 1 | 713 dictionary[key] = 1 |
| 638 | 714 |
| 639 for test, failures in test_failures.iteritems(): | 715 for test, failures in failures.iteritems(): |
| 640 for failure in failures: | 716 for failure in failures: |
| 641 AddFailure(failure_counts, failure.__class__) | 717 AddFailure(failure_counts, failure.__class__) |
| 642 if self._expectations.IsDeferred(test): | 718 if self._expectations.IsDeferred(test): |
| 643 AddFailure(deferred_counts, failure.__class__) | 719 AddFailure(deferred_counts, failure.__class__) |
| 644 deferred_failures.add(test) | 720 deferred_failures.add(test) |
| 645 else: | 721 else: |
| 646 if self._expectations.IsFixable(test): | 722 if self._expectations.IsFixable(test): |
| 647 AddFailure(fixable_counts, failure.__class__) | 723 AddFailure(fixable_counts, failure.__class__) |
| 648 fixable_failures.add(test) | 724 fixable_failures.add(test) |
| 649 if not self._expectations.IsIgnored(test): | 725 if not self._expectations.IsIgnored(test): |
| (...skipping 25 matching lines...) Expand all Loading... |
| 675 self._expectations.GetDeferred(), | 751 self._expectations.GetDeferred(), |
| 676 deferred_failures, | 752 deferred_failures, |
| 677 deferred_counts, | 753 deferred_counts, |
| 678 self._expectations.GetDeferredSkipped(), | 754 self._expectations.GetDeferredSkipped(), |
| 679 output) | 755 output) |
| 680 | 756 |
| 681 # Print breakdown of all tests including all skipped tests. | 757 # Print breakdown of all tests including all skipped tests. |
| 682 skipped |= self._expectations.GetWontFixSkipped() | 758 skipped |= self._expectations.GetWontFixSkipped() |
| 683 self._PrintResultSummary("=> All tests", | 759 self._PrintResultSummary("=> All tests", |
| 684 self._test_files, | 760 self._test_files, |
| 685 test_failures, | 761 failures, |
| 686 failure_counts, | 762 failure_counts, |
| 687 skipped, | 763 skipped, |
| 688 output) | 764 output) |
| 689 print | 765 print |
| 690 | 766 |
| 691 def _PrintResultSummary(self, heading, all, failed, failure_counts, skipped, | 767 def _PrintResultSummary(self, heading, all, failed, failure_counts, skipped, |
| 692 output): | 768 output): |
| 693 """Print a summary block of results for a particular category of test. | 769 """Print a summary block of results for a particular category of test. |
| 694 | 770 |
| 695 Args: | 771 Args: |
| (...skipping 18 matching lines...) Expand all Loading... |
| 714 | 790 |
| 715 def _PrintResultLine(self, count, total, message, output): | 791 def _PrintResultLine(self, count, total, message, output): |
| 716 if count == 0: return | 792 if count == 0: return |
| 717 output.write( | 793 output.write( |
| 718 ("%(count)d test case%(plural)s (%(percent).1f%%) %(message)s\n" % | 794 ("%(count)d test case%(plural)s (%(percent).1f%%) %(message)s\n" % |
| 719 { 'count' : count, | 795 { 'count' : count, |
| 720 'plural' : ('s', '')[count == 1], | 796 'plural' : ('s', '')[count == 1], |
| 721 'percent' : float(count) * 100 / total, | 797 'percent' : float(count) * 100 / total, |
| 722 'message' : message })) | 798 'message' : message })) |
| 723 | 799 |
| 724 def _CompareFailures(self, test_failures): | 800 def _CompareFailures(self, failures): |
| 725 """Determine how the test failures from this test run differ from the | 801 """Determine how the test failures from this test run differ from the |
| 726 previous test run and print results to stdout and a file. | 802 previous test run and print results to stdout and a file. |
| 727 | 803 |
| 728 Args: | 804 Args: |
| 729 test_failures is a dictionary mapping the test filename to a list of | 805 failures is a dictionary mapping the test filename to a list of |
| 730 TestFailure objects if the test failed | 806 TestFailure objects if the test failed |
| 731 | 807 |
| 732 Return: | 808 Return: |
| 733 A set of regressions (unexpected failures, hangs, or crashes) | 809 A set of regressions (unexpected failures, hangs, or crashes) |
| 734 """ | 810 """ |
| 735 cf = compare_failures.CompareFailures(self._test_files, | 811 cf = compare_failures.CompareFailures(self._test_files, |
| 736 test_failures, | 812 failures, |
| 737 self._expectations) | 813 self._expectations) |
| 738 | 814 |
| 739 if not self._options.nocompare_failures: | 815 if not self._options.nocompare_failures: |
| 740 cf.PrintRegressions(sys.stdout) | 816 cf.PrintRegressions(sys.stdout) |
| 741 | 817 |
| 742 out_filename = os.path.join(self._options.results_directory, | 818 out_filename = os.path.join(self._options.results_directory, |
| 743 "regressions.txt") | 819 "regressions.txt") |
| 744 output_file = open(out_filename, "w") | 820 output_file = open(out_filename, "w") |
| 745 cf.PrintRegressions(output_file) | 821 cf.PrintRegressions(output_file) |
| 746 output_file.close() | 822 output_file.close() |
| 747 | 823 |
| 748 return cf.GetRegressions() | 824 return cf.GetRegressions() |
| 749 | 825 |
| 750 def _WriteResultsHtmlFile(self, test_failures, regressions): | 826 def _WriteResultsHtmlFile(self, failures, regressions): |
| 751 """Write results.html which is a summary of tests that failed. | 827 """Write results.html which is a summary of tests that failed. |
| 752 | 828 |
| 753 Args: | 829 Args: |
| 754 test_failures: a dictionary mapping the test filename to a list of | 830 failures: a dictionary mapping the test filename to a list of |
| 755 TestFailure objects if the test failed | 831 TestFailure objects if the test failed |
| 756 regressions: a set of test filenames that regressed | 832 regressions: a set of test filenames that regressed |
| 757 | 833 |
| 758 Returns: | 834 Returns: |
| 759 True if any results were written (since expected failures may be omitted) | 835 True if any results were written (since expected failures may be omitted) |
| 760 """ | 836 """ |
| 761 # test failures | 837 # test failures |
| 762 if self._options.full_results_html: | 838 if self._options.full_results_html: |
| 763 test_files = test_failures.keys() | 839 test_files = failures.keys() |
| 764 else: | 840 else: |
| 765 test_files = list(regressions) | 841 test_files = list(regressions) |
| 766 if not len(test_files): | 842 if not len(test_files): |
| 767 return False | 843 return False |
| 768 | 844 |
| 769 out_filename = os.path.join(self._options.results_directory, | 845 out_filename = os.path.join(self._options.results_directory, |
| 770 "results.html") | 846 "results.html") |
| 771 out_file = open(out_filename, 'w') | 847 out_file = open(out_filename, 'w') |
| 772 # header | 848 # header |
| 773 if self._options.full_results_html: | 849 if self._options.full_results_html: |
| 774 h2 = "Test Failures" | 850 h2 = "Test Failures" |
| 775 else: | 851 else: |
| 776 h2 = "Unexpected Test Failures" | 852 h2 = "Unexpected Test Failures" |
| 777 out_file.write("<html><head><title>Layout Test Results (%(time)s)</title>" | 853 out_file.write("<html><head><title>Layout Test Results (%(time)s)</title>" |
| 778 "</head><body><h2>%(h2)s (%(time)s)</h2>\n" | 854 "</head><body><h2>%(h2)s (%(time)s)</h2>\n" |
| 779 % {'h2': h2, 'time': time.asctime()}) | 855 % {'h2': h2, 'time': time.asctime()}) |
| 780 | 856 |
| 781 test_files.sort() | 857 test_files.sort() |
| 782 for test_file in test_files: | 858 for test_file in test_files: |
| 783 if test_file in test_failures: failures = test_failures[test_file] | 859 if test_file in failures: failures = failures[test_file] |
| 784 else: failures = [] # unexpected passes | 860 else: failures = [] # unexpected passes |
| 785 out_file.write("<p><a href='%s'>%s</a><br />\n" | 861 out_file.write("<p><a href='%s'>%s</a><br />\n" |
| 786 % (path_utils.FilenameToUri(test_file), | 862 % (path_utils.FilenameToUri(test_file), |
| 787 path_utils.RelativeTestFilename(test_file))) | 863 path_utils.RelativeTestFilename(test_file))) |
| 788 for failure in failures: | 864 for failure in failures: |
| 789 out_file.write(" %s<br/>" | 865 out_file.write(" %s<br/>" |
| 790 % failure.ResultHtmlOutput( | 866 % failure.ResultHtmlOutput( |
| 791 path_utils.RelativeTestFilename(test_file))) | 867 path_utils.RelativeTestFilename(test_file))) |
| 792 out_file.write("</p>\n") | 868 out_file.write("</p>\n") |
| 793 | 869 |
| (...skipping 180 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 974 default=False, | 1050 default=False, |
| 975 help="Run all tests, even those marked SKIP in the " | 1051 help="Run all tests, even those marked SKIP in the " |
| 976 "test list") | 1052 "test list") |
| 977 option_parser.add_option("", "--nocompare-failures", action="store_true", | 1053 option_parser.add_option("", "--nocompare-failures", action="store_true", |
| 978 default=False, | 1054 default=False, |
| 979 help="Disable comparison to the last test run. " | 1055 help="Disable comparison to the last test run. " |
| 980 "When enabled, show stats on how many tests " | 1056 "When enabled, show stats on how many tests " |
| 981 "newly pass or fail.") | 1057 "newly pass or fail.") |
| 982 option_parser.add_option("", "--num-test-shells", | 1058 option_parser.add_option("", "--num-test-shells", |
| 983 help="Number of testshells to run in parallel.") | 1059 help="Number of testshells to run in parallel.") |
| 984 option_parser.add_option("", "--time-out-ms", | 1060 option_parser.add_option("", "--time-out-ms", default=None, |
| 985 default=None, | |
| 986 help="Set the timeout for each test") | 1061 help="Set the timeout for each test") |
| 987 option_parser.add_option("", "--run-singly", action="store_true", | 1062 option_parser.add_option("", "--run-singly", action="store_true", |
| 988 default=False, | 1063 default=False, |
| 989 help="run a separate test_shell for each test") | 1064 help="run a separate test_shell for each test") |
| 990 option_parser.add_option("", "--debug", action="store_true", default=False, | 1065 option_parser.add_option("", "--debug", action="store_true", default=False, |
| 991 help="use the debug binary instead of the release " | 1066 help="use the debug binary instead of the release " |
| 992 "binary") | 1067 "binary") |
| 993 option_parser.add_option("", "--num-slow-tests-to-log", default=50, | 1068 option_parser.add_option("", "--num-slow-tests-to-log", default=50, |
| 994 help="Number of slow tests whose timings to print.") | 1069 help="Number of slow tests whose timings to print.") |
| 995 option_parser.add_option("", "--platform", | 1070 option_parser.add_option("", "--platform", |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1031 option_parser.add_option("", "--run-part", | 1106 option_parser.add_option("", "--run-part", |
| 1032 default=None, | 1107 default=None, |
| 1033 help=("Run a specified part (n:m), the nth of m" | 1108 help=("Run a specified part (n:m), the nth of m" |
| 1034 " parts, of the layout tests")) | 1109 " parts, of the layout tests")) |
| 1035 option_parser.add_option("", "--batch-size", | 1110 option_parser.add_option("", "--batch-size", |
| 1036 default=None, | 1111 default=None, |
| 1037 help=("Run a the tests in batches (n), after every " | 1112 help=("Run a the tests in batches (n), after every " |
| 1038 "n tests, the test shell is relaunched.")) | 1113 "n tests, the test shell is relaunched.")) |
| 1039 options, args = option_parser.parse_args() | 1114 options, args = option_parser.parse_args() |
| 1040 main(options, args) | 1115 main(options, args) |
| OLD | NEW |