| OLD | NEW |
| 1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
| 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
| 4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
| 5 | 5 |
| 6 """A tool to run a chrome test executable, used by the buildbot slaves. | 6 """A tool to run a chrome test executable, used by the buildbot slaves. |
| 7 | 7 |
| 8 When this is run, the current directory (cwd) should be the outer build | 8 When this is run, the current directory (cwd) should be the outer build |
| 9 directory (e.g., chrome-release/build/). | 9 directory (e.g., chrome-release/build/). |
| 10 | 10 |
| (...skipping 24 matching lines...) Expand all Loading... |
| 35 # Because of this dependency on a chromium checkout, we need to disable some | 35 # Because of this dependency on a chromium checkout, we need to disable some |
| 36 # pylint checks. | 36 # pylint checks. |
| 37 # pylint: disable=E0611 | 37 # pylint: disable=E0611 |
| 38 # pylint: disable=E1101 | 38 # pylint: disable=E1101 |
| 39 from common import chromium_utils | 39 from common import chromium_utils |
| 40 from common import gtest_utils | 40 from common import gtest_utils |
| 41 import config | 41 import config |
| 42 from slave import crash_utils | 42 from slave import crash_utils |
| 43 from slave import gtest_slave_utils | 43 from slave import gtest_slave_utils |
| 44 from slave import process_log_utils | 44 from slave import process_log_utils |
| 45 from slave import results_dashboard |
| 45 from slave import slave_utils | 46 from slave import slave_utils |
| 46 from slave import xvfb | 47 from slave import xvfb |
| 47 from slave.gtest.json_results_generator import GetSvnRevision | 48 from slave.gtest.json_results_generator import GetSvnRevision |
| 48 | 49 |
| 49 USAGE = '%s [options] test.exe [test args]' % os.path.basename(sys.argv[0]) | 50 USAGE = '%s [options] test.exe [test args]' % os.path.basename(sys.argv[0]) |
| 50 | 51 |
| 51 CHROME_SANDBOX_PATH = '/opt/chromium/chrome_sandbox' | 52 CHROME_SANDBOX_PATH = '/opt/chromium/chrome_sandbox' |
| 52 | 53 |
| 53 DEST_DIR = 'gtest_results' | 54 DEST_DIR = 'gtest_results' |
| 54 | 55 |
| (...skipping 285 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 340 factory_properties=options.factory_properties, | 341 factory_properties=options.factory_properties, |
| 341 webkit_revision=webkit_revision) | 342 webkit_revision=webkit_revision) |
| 342 | 343 |
| 343 if options.annotate and options.generate_json_file: | 344 if options.annotate and options.generate_json_file: |
| 344 tracker_obj.ProcessLine(_GetMasterString(_GetMaster())) | 345 tracker_obj.ProcessLine(_GetMasterString(_GetMaster())) |
| 345 | 346 |
| 346 return tracker_obj | 347 return tracker_obj |
| 347 | 348 |
| 348 | 349 |
| 349 def annotate(test_name, result, results_tracker, full_name=False, | 350 def annotate(test_name, result, results_tracker, full_name=False, |
| 350 perf_dashboard_id=None): | 351 perf_dashboard_id=None, results_url=None, system=None): |
| 351 """Given a test result and tracker, update the waterfall with test results.""" | 352 """Given a test result and tracker, update the waterfall with test results.""" |
| 352 get_text_result = process_log_utils.SUCCESS | 353 get_text_result = process_log_utils.SUCCESS |
| 353 | 354 |
| 354 for failure in sorted(results_tracker.FailedTests()): | 355 for failure in sorted(results_tracker.FailedTests()): |
| 355 if full_name: | 356 if full_name: |
| 356 testabbr = re.sub(r'[^\w\.\-]', '_', failure) | 357 testabbr = re.sub(r'[^\w\.\-]', '_', failure) |
| 357 else: | 358 else: |
| 358 testabbr = re.sub(r'[^\w\.\-]', '_', failure.split('.')[-1]) | 359 testabbr = re.sub(r'[^\w\.\-]', '_', failure.split('.')[-1]) |
| 359 slave_utils.WriteLogLines(testabbr, | 360 slave_utils.WriteLogLines(testabbr, |
| 360 results_tracker.FailureDescription(failure)) | 361 results_tracker.FailureDescription(failure)) |
| (...skipping 30 matching lines...) Expand all Loading... |
| 391 | 392 |
| 392 if hasattr(results_tracker, 'PerformanceLogs'): | 393 if hasattr(results_tracker, 'PerformanceLogs'): |
| 393 if not perf_dashboard_id: | 394 if not perf_dashboard_id: |
| 394 print 'runtest.py error: perf step specified but', | 395 print 'runtest.py error: perf step specified but', |
| 395 print 'no test_id in factory_properties!' | 396 print 'no test_id in factory_properties!' |
| 396 print '@@@STEP_EXCEPTION@@@' | 397 print '@@@STEP_EXCEPTION@@@' |
| 397 return | 398 return |
| 398 for logname, log in results_tracker.PerformanceLogs().iteritems(): | 399 for logname, log in results_tracker.PerformanceLogs().iteritems(): |
| 399 lines = [str(l).rstrip() for l in log] | 400 lines = [str(l).rstrip() for l in log] |
| 400 slave_utils.WriteLogLines(logname, lines, perf=perf_dashboard_id) | 401 slave_utils.WriteLogLines(logname, lines, perf=perf_dashboard_id) |
| 402 if results_url: |
| 403 results_dashboard.SendResults( |
| 404 logname, lines, system, test_name, results_url) |
| 401 | 405 |
| 402 | 406 |
| 403 def get_build_dir_and_exe_path_mac(options, target_dir, exe_name): | 407 def get_build_dir_and_exe_path_mac(options, target_dir, exe_name): |
| 404 """Returns a tuple of the build dir and path to the executable in the | 408 """Returns a tuple of the build dir and path to the executable in the |
| 405 specified target directory. | 409 specified target directory. |
| 406 | 410 |
| 407 Args: | 411 Args: |
| 408 target_dir: the target directory where the executable should be found | 412 target_dir: the target directory where the executable should be found |
| 409 (e.g. 'Debug' or 'Release-iphonesimulator'). | 413 (e.g. 'Debug' or 'Release-iphonesimulator'). |
| 410 exe_name: the name of the executable file in the target directory. | 414 exe_name: the name of the executable file in the target directory. |
| (...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 535 if http_server: | 539 if http_server: |
| 536 http_server.StopServer() | 540 http_server.StopServer() |
| 537 | 541 |
| 538 if options.generate_json_file: | 542 if options.generate_json_file: |
| 539 _GenerateJSONForTestResults(options, results_tracker) | 543 _GenerateJSONForTestResults(options, results_tracker) |
| 540 | 544 |
| 541 if options.annotate: | 545 if options.annotate: |
| 542 annotate(options.test_type, result, results_tracker, | 546 annotate(options.test_type, result, results_tracker, |
| 543 options.factory_properties.get('full_test_name'), | 547 options.factory_properties.get('full_test_name'), |
| 544 perf_dashboard_id=options.factory_properties.get( | 548 perf_dashboard_id=options.factory_properties.get( |
| 545 'test_name')) | 549 'test_name'), |
| 550 results_url=options.results_url, |
| 551 system=options.factory_properties.get('perf_id')) |
| 546 | 552 |
| 547 return result | 553 return result |
| 548 | 554 |
| 549 | 555 |
| 550 def main_ios(options, args): | 556 def main_ios(options, args): |
| 551 if len(args) < 1: | 557 if len(args) < 1: |
| 552 raise chromium_utils.MissingArgument('Usage: %s' % USAGE) | 558 raise chromium_utils.MissingArgument('Usage: %s' % USAGE) |
| 553 | 559 |
| 554 def kill_simulator(): | 560 def kill_simulator(): |
| 555 chromium_utils.RunCommand(['/usr/bin/killall', 'iPhone Simulator']) | 561 chromium_utils.RunCommand(['/usr/bin/killall', 'iPhone Simulator']) |
| (...skipping 225 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 781 if options.xvfb: | 787 if options.xvfb: |
| 782 xvfb.StopVirtualX(slave_name) | 788 xvfb.StopVirtualX(slave_name) |
| 783 | 789 |
| 784 if options.generate_json_file: | 790 if options.generate_json_file: |
| 785 _GenerateJSONForTestResults(options, results_tracker) | 791 _GenerateJSONForTestResults(options, results_tracker) |
| 786 | 792 |
| 787 if options.annotate: | 793 if options.annotate: |
| 788 annotate(options.test_type, result, results_tracker, | 794 annotate(options.test_type, result, results_tracker, |
| 789 options.factory_properties.get('full_test_name'), | 795 options.factory_properties.get('full_test_name'), |
| 790 perf_dashboard_id=options.factory_properties.get( | 796 perf_dashboard_id=options.factory_properties.get( |
| 791 'test_name')) | 797 'test_name'), |
| 798 results_url=options.results_url, |
| 799 system=options.factory_properties.get('perf_id')) |
| 792 | 800 |
| 793 return result | 801 return result |
| 794 | 802 |
| 795 | 803 |
| 796 def main_win(options, args): | 804 def main_win(options, args): |
| 797 """Using the target build configuration, run the executable given in the | 805 """Using the target build configuration, run the executable given in the |
| 798 first non-option argument, passing any following arguments to that | 806 first non-option argument, passing any following arguments to that |
| 799 executable. | 807 executable. |
| 800 """ | 808 """ |
| 801 if len(args) < 1: | 809 if len(args) < 1: |
| (...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 855 if options.enable_pageheap: | 863 if options.enable_pageheap: |
| 856 slave_utils.SetPageHeap(build_dir, 'chrome.exe', False) | 864 slave_utils.SetPageHeap(build_dir, 'chrome.exe', False) |
| 857 | 865 |
| 858 if options.generate_json_file: | 866 if options.generate_json_file: |
| 859 _GenerateJSONForTestResults(options, results_tracker) | 867 _GenerateJSONForTestResults(options, results_tracker) |
| 860 | 868 |
| 861 if options.annotate: | 869 if options.annotate: |
| 862 annotate(options.test_type, result, results_tracker, | 870 annotate(options.test_type, result, results_tracker, |
| 863 options.factory_properties.get('full_test_name'), | 871 options.factory_properties.get('full_test_name'), |
| 864 perf_dashboard_id=options.factory_properties.get( | 872 perf_dashboard_id=options.factory_properties.get( |
| 865 'test_name')) | 873 'test_name'), |
| 874 results_url=options.results_url, |
| 875 system=options.factory_properties.get('perf_id')) |
| 866 | 876 |
| 867 return result | 877 return result |
| 868 | 878 |
| 869 | 879 |
| 870 def main(): | 880 def main(): |
| 871 import platform | 881 import platform |
| 872 | 882 |
| 873 xvfb_path = os.path.join(os.path.dirname(sys.argv[0]), '..', '..', | 883 xvfb_path = os.path.join(os.path.dirname(sys.argv[0]), '..', '..', |
| 874 'third_party', 'xvfb', platform.architecture()[0]) | 884 'third_party', 'xvfb', platform.architecture()[0]) |
| 875 | 885 |
| (...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 958 option_parser.add_option('', '--test-type', default='', | 968 option_parser.add_option('', '--test-type', default='', |
| 959 help='The test name that identifies the test, ' | 969 help='The test name that identifies the test, ' |
| 960 'e.g. \'unit-tests\'') | 970 'e.g. \'unit-tests\'') |
| 961 option_parser.add_option('', '--test-results-server', default='', | 971 option_parser.add_option('', '--test-results-server', default='', |
| 962 help='The test results server to upload the ' | 972 help='The test results server to upload the ' |
| 963 'results.') | 973 'results.') |
| 964 option_parser.add_option('', '--annotate', default='', | 974 option_parser.add_option('', '--annotate', default='', |
| 965 help='Annotate output when run as a buildstep. ' | 975 help='Annotate output when run as a buildstep. ' |
| 966 'Specify which type of test to parse, available' | 976 'Specify which type of test to parse, available' |
| 967 ' types listed with --annotate=list.') | 977 ' types listed with --annotate=list.') |
| 978 option_parser.add_option('', '--results-url', default='', |
| 979 help='The URI of the perf dashboard to upload ' |
| 980 'results to.') |
| 968 chromium_utils.AddPropertiesOptions(option_parser) | 981 chromium_utils.AddPropertiesOptions(option_parser) |
| 969 options, args = option_parser.parse_args() | 982 options, args = option_parser.parse_args() |
| 970 | 983 |
| 971 options.test_type = options.test_type or options.factory_properties.get( | 984 options.test_type = options.test_type or options.factory_properties.get( |
| 972 'step_name') | 985 'step_name') |
| 973 | 986 |
| 974 if options.run_shell_script and options.run_python_script: | 987 if options.run_shell_script and options.run_python_script: |
| 975 sys.stderr.write('Use either --run-shell-script OR --run-python-script, ' | 988 sys.stderr.write('Use either --run-shell-script OR --run-python-script, ' |
| 976 'not both.') | 989 'not both.') |
| 977 return 1 | 990 return 1 |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1038 '%d new files were left in %s: Fix the tests to clean up themselves.' | 1051 '%d new files were left in %s: Fix the tests to clean up themselves.' |
| 1039 ) % ((new_temp_files - temp_files), tempfile.gettempdir()) | 1052 ) % ((new_temp_files - temp_files), tempfile.gettempdir()) |
| 1040 # TODO(maruel): Make it an error soon. Not yet since I want to iron out all | 1053 # TODO(maruel): Make it an error soon. Not yet since I want to iron out all |
| 1041 # the remaining cases before. | 1054 # the remaining cases before. |
| 1042 #result = 1 | 1055 #result = 1 |
| 1043 return result | 1056 return result |
| 1044 | 1057 |
| 1045 | 1058 |
| 1046 if '__main__' == __name__: | 1059 if '__main__' == __name__: |
| 1047 sys.exit(main()) | 1060 sys.exit(main()) |
| OLD | NEW |