Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(28)

Side by Side Diff: webkit/tools/layout_tests/run_webkit_tests.py

Issue 63127: Print times for individual tests that take longer than 1 second to run. (Closed)
Patch Set: Made us spit out top 50 slowest tests Created 11 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « webkit/tools/layout_tests/layout_package/test_shell_thread.py ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/bin/env python 1 #!/bin/env python
2 # Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. 2 # Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Run layout tests using the test_shell. 6 """Run layout tests using the test_shell.
7 7
8 This is a port of the existing webkit test script run-webkit-tests. 8 This is a port of the existing webkit test script run-webkit-tests.
9 9
10 The TestRunner class runs a series of tests (TestType interface) against a set 10 The TestRunner class runs a series of tests (TestType interface) against a set
(...skipping 462 matching lines...) Expand 10 before | Expand all | Expand 10 after
473 logging.info("Starting tests") 473 logging.info("Starting tests")
474 474
475 # Create the output directory if it doesn't already exist. 475 # Create the output directory if it doesn't already exist.
476 google.path_utils.MaybeMakeDirectory(self._options.results_directory) 476 google.path_utils.MaybeMakeDirectory(self._options.results_directory)
477 477
478 threads = self._InstantiateTestShellThreads(test_shell_binary) 478 threads = self._InstantiateTestShellThreads(test_shell_binary)
479 479
480 # Wait for the threads to finish and collect test failures. 480 # Wait for the threads to finish and collect test failures.
481 test_failures = {} 481 test_failures = {}
482 test_timings = {} 482 test_timings = {}
483 individual_test_timings = []
483 try: 484 try:
484 for thread in threads: 485 for thread in threads:
485 while thread.isAlive(): 486 while thread.isAlive():
486 # Let it timeout occasionally so it can notice a KeyboardInterrupt 487 # Let it timeout occasionally so it can notice a KeyboardInterrupt
487 # Actually, the timeout doesn't really matter: apparently it 488 # Actually, the timeout doesn't really matter: apparently it
488 # suffices to not use an indefinite blocking join for it to 489 # suffices to not use an indefinite blocking join for it to
489 # be interruptible by KeyboardInterrupt. 490 # be interruptible by KeyboardInterrupt.
490 thread.join(1.0) 491 thread.join(1.0)
491 test_failures.update(thread.GetFailures()) 492 test_failures.update(thread.GetFailures())
492 test_timings.update(thread.GetTimingStats()) 493 test_timings.update(thread.GetTimingStats())
494 individual_test_timings.extend(thread.GetIndividualTestTimingStats())
493 except KeyboardInterrupt: 495 except KeyboardInterrupt:
494 for thread in threads: 496 for thread in threads:
495 thread.Cancel() 497 thread.Cancel()
496 raise 498 raise
497 for thread in threads: 499 for thread in threads:
498 # Check whether a TestShellThread died before normal completion. 500 # Check whether a TestShellThread died before normal completion.
499 exception_info = thread.GetExceptionInfo() 501 exception_info = thread.GetExceptionInfo()
500 if exception_info is not None: 502 if exception_info is not None:
501 # Re-raise the thread's exception here to make it clear that 503 # Re-raise the thread's exception here to make it clear that
502 # testing was aborted. Otherwise, the tests that did not run 504 # testing was aborted. Otherwise, the tests that did not run
503 # would be assumed to have passed. 505 # would be assumed to have passed.
504 raise exception_info[0], exception_info[1], exception_info[2] 506 raise exception_info[0], exception_info[1], exception_info[2]
505 507
506 print 508 print
507 end_time = time.time() 509 end_time = time.time()
508 logging.info("%f total testing time" % (end_time - start_time)) 510 logging.info("%f total testing time" % (end_time - start_time))
509 511
510 self._PrintTimingsForRuns(test_timings) 512 print
513 self._PrintTimingStatistics(test_timings, individual_test_timings)
511 514
512 print "-" * 78 515 print "-" * 78
513 516
514 # Tests are done running. Compare failures with expected failures. 517 # Tests are done running. Compare failures with expected failures.
515 regressions = self._CompareFailures(test_failures) 518 regressions = self._CompareFailures(test_failures)
516 519
517 print "-" * 78 520 print "-" * 78
518 521
519 # Write summaries to stdout. 522 # Write summaries to stdout.
520 self._PrintResults(test_failures, sys.stdout) 523 self._PrintResults(test_failures, sys.stdout)
521 524
522 # Write the same data to a log file. 525 # Write the same data to a log file.
523 out_filename = os.path.join(self._options.results_directory, "score.txt") 526 out_filename = os.path.join(self._options.results_directory, "score.txt")
524 output_file = open(out_filename, "w") 527 output_file = open(out_filename, "w")
525 self._PrintResults(test_failures, output_file) 528 self._PrintResults(test_failures, output_file)
526 output_file.close() 529 output_file.close()
527 530
528 # Write the summary to disk (results.html) and maybe open the test_shell 531 # Write the summary to disk (results.html) and maybe open the test_shell
529 # to this file. 532 # to this file.
530 wrote_results = self._WriteResultsHtmlFile(test_failures, regressions) 533 wrote_results = self._WriteResultsHtmlFile(test_failures, regressions)
531 if not self._options.noshow_results and wrote_results: 534 if not self._options.noshow_results and wrote_results:
532 self._ShowResultsHtmlFile() 535 self._ShowResultsHtmlFile()
533 536
534 sys.stdout.flush() 537 sys.stdout.flush()
535 sys.stderr.flush() 538 sys.stderr.flush()
536 return len(regressions) 539 return len(regressions)
537 540
538 def _PrintTimingsForRuns(self, test_timings): 541 def _PrintTimingStatistics(self, test_timings, individual_test_timings):
542 # Don't need to do any processing here for non-debug logging.
543 if logging.getLogger().getEffectiveLevel() > 10:
544 return
545
546 logging.debug("%s slowest tests:" % self._options.num_slow_tests_to_log)
547
548 individual_test_timings.sort(reverse=True)
549 slowests_tests = \
tony 2009/04/08 19:33:19 Nit: Normally in python code we use () to get impl
550 individual_test_timings[:self._options.num_slow_tests_to_log]
551
552 for test in slowests_tests:
553 logging.debug("%s took %s seconds" % (test[1], round(test[0], 1)))
554
555 print
556
539 timings = [] 557 timings = []
540 for directory in test_timings: 558 for directory in test_timings:
541 num_tests, time = test_timings[directory] 559 num_tests, time = test_timings[directory]
542 timings.append((round(time, 1), directory, num_tests)) 560 timings.append((round(time, 1), directory, num_tests))
543 timings.sort() 561 timings.sort()
544 562
545 logging.debug("Time to process each each subdirectory:") 563 logging.debug("Time to process each each subdirectory:")
546 for timing in timings: 564 for timing in timings:
547 logging.debug("%s took %s seconds to run %s tests." % \ 565 logging.debug("%s took %s seconds to run %s tests." % \
548 (timing[1], timing[0], timing[2])) 566 (timing[1], timing[0], timing[2]))
(...skipping 376 matching lines...) Expand 10 before | Expand all | Expand 10 after
925 help="Number of testshells to run in parallel.") 943 help="Number of testshells to run in parallel.")
926 option_parser.add_option("", "--time-out-ms", 944 option_parser.add_option("", "--time-out-ms",
927 default=None, 945 default=None,
928 help="Set the timeout for each test") 946 help="Set the timeout for each test")
929 option_parser.add_option("", "--run-singly", action="store_true", 947 option_parser.add_option("", "--run-singly", action="store_true",
930 default=False, 948 default=False,
931 help="run a separate test_shell for each test") 949 help="run a separate test_shell for each test")
932 option_parser.add_option("", "--debug", action="store_true", default=False, 950 option_parser.add_option("", "--debug", action="store_true", default=False,
933 help="use the debug binary instead of the release " 951 help="use the debug binary instead of the release "
934 "binary") 952 "binary")
953 option_parser.add_option("", "--num-slow-tests-to-log", default=50,
954 help="Number of slow tests whose timings to print.")
935 option_parser.add_option("", "--platform", 955 option_parser.add_option("", "--platform",
936 help="Override the platform for expected results") 956 help="Override the platform for expected results")
937 option_parser.add_option("", "--target", default="", 957 option_parser.add_option("", "--target", default="",
938 help="Set the build target configuration (overrides" 958 help="Set the build target configuration (overrides"
939 " --debug)") 959 " --debug)")
940 # TODO(pamg): Support multiple levels of verbosity, and remove --sources. 960 # TODO(pamg): Support multiple levels of verbosity, and remove --sources.
941 option_parser.add_option("-v", "--verbose", action="store_true", 961 option_parser.add_option("-v", "--verbose", action="store_true",
942 default=False, help="include debug-level logging") 962 default=False, help="include debug-level logging")
943 option_parser.add_option("", "--sources", action="store_true", 963 option_parser.add_option("", "--sources", action="store_true",
944 help="show expected result file path for each test " 964 help="show expected result file path for each test "
(...skipping 26 matching lines...) Expand all
971 option_parser.add_option("", "--run-part", 991 option_parser.add_option("", "--run-part",
972 default=None, 992 default=None,
973 help=("Run a specified part (n:m), the nth of m" 993 help=("Run a specified part (n:m), the nth of m"
974 " parts, of the layout tests")) 994 " parts, of the layout tests"))
975 option_parser.add_option("", "--batch-size", 995 option_parser.add_option("", "--batch-size",
976 default=None, 996 default=None,
977 help=("Run a the tests in batches (n), after every " 997 help=("Run a the tests in batches (n), after every "
978 "n tests, the test shell is relaunched.")) 998 "n tests, the test shell is relaunched."))
979 options, args = option_parser.parse_args() 999 options, args = option_parser.parse_args()
980 main(options, args) 1000 main(options, args)
OLDNEW
« no previous file with comments | « webkit/tools/layout_tests/layout_package/test_shell_thread.py ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698