| OLD | NEW |
| 1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
| 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
| 4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
| 5 | 5 |
| 6 """A tool to run a chrome test executable, used by the buildbot slaves. | 6 """A tool to run a chrome test executable, used by the buildbot slaves. |
| 7 | 7 |
| 8 When this is run, the current directory (cwd) should be the outer build | 8 When this is run, the current directory (cwd) should be the outer build |
| 9 directory (e.g., chrome-release/build/). | 9 directory (e.g., chrome-release/build/). |
| 10 | 10 |
| (...skipping 24 matching lines...) Expand all Loading... |
| 35 # Because of this dependency on a chromium checkout, we need to disable some | 35 # Because of this dependency on a chromium checkout, we need to disable some |
| 36 # pylint checks. | 36 # pylint checks. |
| 37 # pylint: disable=E0611 | 37 # pylint: disable=E0611 |
| 38 # pylint: disable=E1101 | 38 # pylint: disable=E1101 |
| 39 from common import chromium_utils | 39 from common import chromium_utils |
| 40 from common import gtest_utils | 40 from common import gtest_utils |
| 41 import config | 41 import config |
| 42 from slave import crash_utils | 42 from slave import crash_utils |
| 43 from slave import gtest_slave_utils | 43 from slave import gtest_slave_utils |
| 44 from slave import process_log_utils | 44 from slave import process_log_utils |
| 45 from slave import results_dashboard |
| 45 from slave import slave_utils | 46 from slave import slave_utils |
| 46 from slave import xvfb | 47 from slave import xvfb |
| 47 from slave.gtest.json_results_generator import GetSvnRevision | 48 from slave.gtest.json_results_generator import GetSvnRevision |
| 48 | 49 |
| 49 USAGE = '%s [options] test.exe [test args]' % os.path.basename(sys.argv[0]) | 50 USAGE = '%s [options] test.exe [test args]' % os.path.basename(sys.argv[0]) |
| 50 | 51 |
| 51 CHROME_SANDBOX_PATH = '/opt/chromium/chrome_sandbox' | 52 CHROME_SANDBOX_PATH = '/opt/chromium/chrome_sandbox' |
| 52 | 53 |
| 53 DEST_DIR = 'gtest_results' | 54 DEST_DIR = 'gtest_results' |
| 54 | 55 |
| (...skipping 284 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 339 build_property=options.build_properties, | 340 build_property=options.build_properties, |
| 340 factory_properties=options.factory_properties, | 341 factory_properties=options.factory_properties, |
| 341 webkit_revision=webkit_revision) | 342 webkit_revision=webkit_revision) |
| 342 | 343 |
| 343 if options.annotate and options.generate_json_file: | 344 if options.annotate and options.generate_json_file: |
| 344 tracker_obj.ProcessLine(_GetMasterString(_GetMaster())) | 345 tracker_obj.ProcessLine(_GetMasterString(_GetMaster())) |
| 345 | 346 |
| 346 return tracker_obj | 347 return tracker_obj |
| 347 | 348 |
| 348 | 349 |
| 350 def send_results_to_dashboard(results_tracker, system, test, url, build_dir): |
| 351 for logname, log in results_tracker.PerformanceLogs().iteritems(): |
| 352 lines = [str(l).rstrip() for l in log] |
| 353 results_dashboard.SendResults(logname, lines, system, test, url, build_dir) |
| 354 |
| 355 |
| 349 def annotate(test_name, result, results_tracker, full_name=False, | 356 def annotate(test_name, result, results_tracker, full_name=False, |
| 350 perf_dashboard_id=None): | 357 perf_dashboard_id=None): |
| 351 """Given a test result and tracker, update the waterfall with test results.""" | 358 """Given a test result and tracker, update the waterfall with test results.""" |
| 352 get_text_result = process_log_utils.SUCCESS | 359 get_text_result = process_log_utils.SUCCESS |
| 353 | 360 |
| 354 for failure in sorted(results_tracker.FailedTests()): | 361 for failure in sorted(results_tracker.FailedTests()): |
| 355 if full_name: | 362 if full_name: |
| 356 testabbr = re.sub(r'[^\w\.\-]', '_', failure) | 363 testabbr = re.sub(r'[^\w\.\-]', '_', failure) |
| 357 else: | 364 else: |
| 358 testabbr = re.sub(r'[^\w\.\-]', '_', failure.split('.')[-1]) | 365 testabbr = re.sub(r'[^\w\.\-]', '_', failure.split('.')[-1]) |
| (...skipping 217 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 576 | 583 |
| 577 if options.generate_json_file: | 584 if options.generate_json_file: |
| 578 _GenerateJSONForTestResults(options, results_tracker) | 585 _GenerateJSONForTestResults(options, results_tracker) |
| 579 | 586 |
| 580 if options.annotate: | 587 if options.annotate: |
| 581 annotate(options.test_type, result, results_tracker, | 588 annotate(options.test_type, result, results_tracker, |
| 582 options.factory_properties.get('full_test_name'), | 589 options.factory_properties.get('full_test_name'), |
| 583 perf_dashboard_id=options.factory_properties.get( | 590 perf_dashboard_id=options.factory_properties.get( |
| 584 'test_name')) | 591 'test_name')) |
| 585 | 592 |
| 593 if options.results_url: |
| 594 send_results_to_dashboard( |
| 595 results_tracker, options.factory_properties.get('perf_id'), |
| 596 options.test_type, options.results_url, options.build_dir) |
| 597 |
| 586 return result | 598 return result |
| 587 | 599 |
| 588 | 600 |
| 589 def main_ios(options, args): | 601 def main_ios(options, args): |
| 590 if len(args) < 1: | 602 if len(args) < 1: |
| 591 raise chromium_utils.MissingArgument('Usage: %s' % USAGE) | 603 raise chromium_utils.MissingArgument('Usage: %s' % USAGE) |
| 592 | 604 |
| 593 def kill_simulator(): | 605 def kill_simulator(): |
| 594 chromium_utils.RunCommand(['/usr/bin/killall', 'iPhone Simulator']) | 606 chromium_utils.RunCommand(['/usr/bin/killall', 'iPhone Simulator']) |
| 595 | 607 |
| (...skipping 210 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 806 | 818 |
| 807 if options.generate_json_file: | 819 if options.generate_json_file: |
| 808 _GenerateJSONForTestResults(options, results_tracker) | 820 _GenerateJSONForTestResults(options, results_tracker) |
| 809 | 821 |
| 810 if options.annotate: | 822 if options.annotate: |
| 811 annotate(options.test_type, result, results_tracker, | 823 annotate(options.test_type, result, results_tracker, |
| 812 options.factory_properties.get('full_test_name'), | 824 options.factory_properties.get('full_test_name'), |
| 813 perf_dashboard_id=options.factory_properties.get( | 825 perf_dashboard_id=options.factory_properties.get( |
| 814 'test_name')) | 826 'test_name')) |
| 815 | 827 |
| 828 if options.results_url: |
| 829 send_results_to_dashboard( |
| 830 results_tracker, options.factory_properties.get('perf_id'), |
| 831 options.test_type, options.results_url, options.build_dir) |
| 832 |
| 816 return result | 833 return result |
| 817 | 834 |
| 818 | 835 |
| 819 def main_win(options, args): | 836 def main_win(options, args): |
| 820 """Using the target build configuration, run the executable given in the | 837 """Using the target build configuration, run the executable given in the |
| 821 first non-option argument, passing any following arguments to that | 838 first non-option argument, passing any following arguments to that |
| 822 executable. | 839 executable. |
| 823 """ | 840 """ |
| 824 if len(args) < 1: | 841 if len(args) < 1: |
| 825 raise chromium_utils.MissingArgument('Usage: %s' % USAGE) | 842 raise chromium_utils.MissingArgument('Usage: %s' % USAGE) |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 880 | 897 |
| 881 if options.generate_json_file: | 898 if options.generate_json_file: |
| 882 _GenerateJSONForTestResults(options, results_tracker) | 899 _GenerateJSONForTestResults(options, results_tracker) |
| 883 | 900 |
| 884 if options.annotate: | 901 if options.annotate: |
| 885 annotate(options.test_type, result, results_tracker, | 902 annotate(options.test_type, result, results_tracker, |
| 886 options.factory_properties.get('full_test_name'), | 903 options.factory_properties.get('full_test_name'), |
| 887 perf_dashboard_id=options.factory_properties.get( | 904 perf_dashboard_id=options.factory_properties.get( |
| 888 'test_name')) | 905 'test_name')) |
| 889 | 906 |
| 907 if options.results_url: |
| 908 send_results_to_dashboard( |
| 909 results_tracker, options.factory_properties.get('perf_id'), |
| 910 options.test_type, options.results_url, options.build_dir) |
| 911 |
| 890 return result | 912 return result |
| 891 | 913 |
| 892 | 914 |
| 893 def main(): | 915 def main(): |
| 894 import platform | 916 import platform |
| 895 | 917 |
| 896 xvfb_path = os.path.join(os.path.dirname(sys.argv[0]), '..', '..', | 918 xvfb_path = os.path.join(os.path.dirname(sys.argv[0]), '..', '..', |
| 897 'third_party', 'xvfb', platform.architecture()[0]) | 919 'third_party', 'xvfb', platform.architecture()[0]) |
| 898 | 920 |
| 899 # Initialize logging. | 921 # Initialize logging. |
| (...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 989 'Specify which type of test to parse, available' | 1011 'Specify which type of test to parse, available' |
| 990 ' types listed with --annotate=list.') | 1012 ' types listed with --annotate=list.') |
| 991 option_parser.add_option('', '--parse-input', default='', | 1013 option_parser.add_option('', '--parse-input', default='', |
| 992 help='When combined with --annotate, reads test ' | 1014 help='When combined with --annotate, reads test ' |
| 993 'from a file instead of executing a test ' | 1015 'from a file instead of executing a test ' |
| 994 'binary. Use - for stdin.') | 1016 'binary. Use - for stdin.') |
| 995 option_parser.add_option('', '--parse-result', default=0, | 1017 option_parser.add_option('', '--parse-result', default=0, |
| 996 help='Sets the return value of the simulated ' | 1018 help='Sets the return value of the simulated ' |
| 997 'executable under test. Only has meaning when ' | 1019 'executable under test. Only has meaning when ' |
| 998 '--parse-input is used.') | 1020 '--parse-input is used.') |
| 1021 option_parser.add_option('', '--results-url', default='', |
| 1022 help='The URI of the perf dashboard to upload ' |
| 1023 'results to.') |
| 999 chromium_utils.AddPropertiesOptions(option_parser) | 1024 chromium_utils.AddPropertiesOptions(option_parser) |
| 1000 options, args = option_parser.parse_args() | 1025 options, args = option_parser.parse_args() |
| 1001 | 1026 |
| 1002 options.test_type = options.test_type or options.factory_properties.get( | 1027 options.test_type = options.test_type or options.factory_properties.get( |
| 1003 'step_name') | 1028 'step_name') |
| 1004 | 1029 |
| 1005 if options.run_shell_script and options.run_python_script: | 1030 if options.run_shell_script and options.run_python_script: |
| 1006 sys.stderr.write('Use either --run-shell-script OR --run-python-script, ' | 1031 sys.stderr.write('Use either --run-shell-script OR --run-python-script, ' |
| 1007 'not both.') | 1032 'not both.') |
| 1008 return 1 | 1033 return 1 |
| (...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1071 '%d new files were left in %s: Fix the tests to clean up themselves.' | 1096 '%d new files were left in %s: Fix the tests to clean up themselves.' |
| 1072 ) % ((new_temp_files - temp_files), tempfile.gettempdir()) | 1097 ) % ((new_temp_files - temp_files), tempfile.gettempdir()) |
| 1073 # TODO(maruel): Make it an error soon. Not yet since I want to iron out all | 1098 # TODO(maruel): Make it an error soon. Not yet since I want to iron out all |
| 1074 # the remaining cases before. | 1099 # the remaining cases before. |
| 1075 #result = 1 | 1100 #result = 1 |
| 1076 return result | 1101 return result |
| 1077 | 1102 |
| 1078 | 1103 |
| 1079 if '__main__' == __name__: | 1104 if '__main__' == __name__: |
| 1080 sys.exit(main()) | 1105 sys.exit(main()) |
| OLD | NEW |