OLD | NEW |
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 """A tool to run a chrome test executable, used by the buildbot slaves. | 6 """A tool to run a chrome test executable, used by the buildbot slaves. |
7 | 7 |
8 When this is run, the current directory (cwd) should be the outer build | 8 When this is run, the current directory (cwd) should be the outer build |
9 directory (e.g., chrome-release/build/). | 9 directory (e.g., chrome-release/build/). |
10 | 10 |
(...skipping 24 matching lines...) Expand all Loading... |
35 # Because of this dependency on a chromium checkout, we need to disable some | 35 # Because of this dependency on a chromium checkout, we need to disable some |
36 # pylint checks. | 36 # pylint checks. |
37 # pylint: disable=E0611 | 37 # pylint: disable=E0611 |
38 # pylint: disable=E1101 | 38 # pylint: disable=E1101 |
39 from common import chromium_utils | 39 from common import chromium_utils |
40 from common import gtest_utils | 40 from common import gtest_utils |
41 import config | 41 import config |
42 from slave import crash_utils | 42 from slave import crash_utils |
43 from slave import gtest_slave_utils | 43 from slave import gtest_slave_utils |
44 from slave import process_log_utils | 44 from slave import process_log_utils |
| 45 from slave import results_dashboard |
45 from slave import slave_utils | 46 from slave import slave_utils |
46 from slave import xvfb | 47 from slave import xvfb |
47 from slave.gtest.json_results_generator import GetSvnRevision | 48 from slave.gtest.json_results_generator import GetSvnRevision |
48 | 49 |
49 USAGE = '%s [options] test.exe [test args]' % os.path.basename(sys.argv[0]) | 50 USAGE = '%s [options] test.exe [test args]' % os.path.basename(sys.argv[0]) |
50 | 51 |
51 CHROME_SANDBOX_PATH = '/opt/chromium/chrome_sandbox' | 52 CHROME_SANDBOX_PATH = '/opt/chromium/chrome_sandbox' |
52 | 53 |
53 DEST_DIR = 'gtest_results' | 54 DEST_DIR = 'gtest_results' |
54 | 55 |
(...skipping 284 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
339 build_property=options.build_properties, | 340 build_property=options.build_properties, |
340 factory_properties=options.factory_properties, | 341 factory_properties=options.factory_properties, |
341 webkit_revision=webkit_revision) | 342 webkit_revision=webkit_revision) |
342 | 343 |
343 if options.annotate and options.generate_json_file: | 344 if options.annotate and options.generate_json_file: |
344 tracker_obj.ProcessLine(_GetMasterString(_GetMaster())) | 345 tracker_obj.ProcessLine(_GetMasterString(_GetMaster())) |
345 | 346 |
346 return tracker_obj | 347 return tracker_obj |
347 | 348 |
348 | 349 |
| 350 def send_results_to_dashboard( |
| 351 results_tracker, master, system, test, url, stdio_url): |
| 352 for logname, log in results_tracker.PerformanceLogs().iteritems(): |
| 353 lines = [str(l).rstrip() for l in log] |
| 354 results_dashboard.SendResults( |
| 355 logname, lines, master, system, test, url, stdio_url) |
| 356 |
| 357 |
349 def annotate(test_name, result, results_tracker, full_name=False, | 358 def annotate(test_name, result, results_tracker, full_name=False, |
350 perf_dashboard_id=None): | 359 perf_dashboard_id=None): |
351 """Given a test result and tracker, update the waterfall with test results.""" | 360 """Given a test result and tracker, update the waterfall with test results.""" |
352 get_text_result = process_log_utils.SUCCESS | 361 get_text_result = process_log_utils.SUCCESS |
353 | 362 |
354 for failure in sorted(results_tracker.FailedTests()): | 363 for failure in sorted(results_tracker.FailedTests()): |
355 if full_name: | 364 if full_name: |
356 testabbr = re.sub(r'[^\w\.\-]', '_', failure) | 365 testabbr = re.sub(r'[^\w\.\-]', '_', failure) |
357 else: | 366 else: |
358 testabbr = re.sub(r'[^\w\.\-]', '_', failure.split('.')[-1]) | 367 testabbr = re.sub(r'[^\w\.\-]', '_', failure.split('.')[-1]) |
(...skipping 225 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
584 | 593 |
585 if options.generate_json_file: | 594 if options.generate_json_file: |
586 _GenerateJSONForTestResults(options, results_tracker) | 595 _GenerateJSONForTestResults(options, results_tracker) |
587 | 596 |
588 if options.annotate: | 597 if options.annotate: |
589 annotate(options.test_type, result, results_tracker, | 598 annotate(options.test_type, result, results_tracker, |
590 options.factory_properties.get('full_test_name'), | 599 options.factory_properties.get('full_test_name'), |
591 perf_dashboard_id=options.factory_properties.get( | 600 perf_dashboard_id=options.factory_properties.get( |
592 'test_name')) | 601 'test_name')) |
593 | 602 |
| 603 if options.results_url: |
| 604 send_results_to_dashboard( |
| 605 results_tracker, options.factory_properties.get('master'), |
| 606 options.factory_properties.get('perf_id'), options.test_type, |
| 607 options.results_url, options.factory_properties.get('stdio_url', None)) |
| 608 |
594 return result | 609 return result |
595 | 610 |
596 | 611 |
597 def main_ios(options, args): | 612 def main_ios(options, args): |
598 if len(args) < 1: | 613 if len(args) < 1: |
599 raise chromium_utils.MissingArgument('Usage: %s' % USAGE) | 614 raise chromium_utils.MissingArgument('Usage: %s' % USAGE) |
600 | 615 |
601 def kill_simulator(): | 616 def kill_simulator(): |
602 chromium_utils.RunCommand(['/usr/bin/killall', 'iPhone Simulator']) | 617 chromium_utils.RunCommand(['/usr/bin/killall', 'iPhone Simulator']) |
603 | 618 |
(...skipping 226 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
830 | 845 |
831 if options.generate_json_file: | 846 if options.generate_json_file: |
832 _GenerateJSONForTestResults(options, results_tracker) | 847 _GenerateJSONForTestResults(options, results_tracker) |
833 | 848 |
834 if options.annotate: | 849 if options.annotate: |
835 annotate(options.test_type, result, results_tracker, | 850 annotate(options.test_type, result, results_tracker, |
836 options.factory_properties.get('full_test_name'), | 851 options.factory_properties.get('full_test_name'), |
837 perf_dashboard_id=options.factory_properties.get( | 852 perf_dashboard_id=options.factory_properties.get( |
838 'test_name')) | 853 'test_name')) |
839 | 854 |
| 855 if options.results_url: |
| 856 send_results_to_dashboard( |
| 857 results_tracker, options.factory_properties.get('master_name'), |
| 858 options.factory_properties.get('perf_id'), options.test_type, |
| 859 options.results_url, options.factory_properties.get('stdio_url', None)) |
| 860 |
840 return result | 861 return result |
841 | 862 |
842 | 863 |
843 def main_win(options, args): | 864 def main_win(options, args): |
844 """Using the target build configuration, run the executable given in the | 865 """Using the target build configuration, run the executable given in the |
845 first non-option argument, passing any following arguments to that | 866 first non-option argument, passing any following arguments to that |
846 executable. | 867 executable. |
847 """ | 868 """ |
848 if len(args) < 1: | 869 if len(args) < 1: |
849 raise chromium_utils.MissingArgument('Usage: %s' % USAGE) | 870 raise chromium_utils.MissingArgument('Usage: %s' % USAGE) |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
904 | 925 |
905 if options.generate_json_file: | 926 if options.generate_json_file: |
906 _GenerateJSONForTestResults(options, results_tracker) | 927 _GenerateJSONForTestResults(options, results_tracker) |
907 | 928 |
908 if options.annotate: | 929 if options.annotate: |
909 annotate(options.test_type, result, results_tracker, | 930 annotate(options.test_type, result, results_tracker, |
910 options.factory_properties.get('full_test_name'), | 931 options.factory_properties.get('full_test_name'), |
911 perf_dashboard_id=options.factory_properties.get( | 932 perf_dashboard_id=options.factory_properties.get( |
912 'test_name')) | 933 'test_name')) |
913 | 934 |
| 935 if options.results_url: |
| 936 send_results_to_dashboard( |
| 937 results_tracker, options.factory_properties.get('master'), |
| 938 options.factory_properties.get('perf_id'), options.test_type, |
| 939 options.results_url, options.factory_properties.get('stdio_url', None)) |
| 940 |
914 return result | 941 return result |
915 | 942 |
916 | 943 |
917 def main(): | 944 def main(): |
918 import platform | 945 import platform |
919 | 946 |
920 xvfb_path = os.path.join(os.path.dirname(sys.argv[0]), '..', '..', | 947 xvfb_path = os.path.join(os.path.dirname(sys.argv[0]), '..', '..', |
921 'third_party', 'xvfb', platform.architecture()[0]) | 948 'third_party', 'xvfb', platform.architecture()[0]) |
922 | 949 |
923 # Initialize logging. | 950 # Initialize logging. |
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1013 'Specify which type of test to parse, available' | 1040 'Specify which type of test to parse, available' |
1014 ' types listed with --annotate=list.') | 1041 ' types listed with --annotate=list.') |
1015 option_parser.add_option('', '--parse-input', default='', | 1042 option_parser.add_option('', '--parse-input', default='', |
1016 help='When combined with --annotate, reads test ' | 1043 help='When combined with --annotate, reads test ' |
1017 'from a file instead of executing a test ' | 1044 'from a file instead of executing a test ' |
1018 'binary. Use - for stdin.') | 1045 'binary. Use - for stdin.') |
1019 option_parser.add_option('', '--parse-result', default=0, | 1046 option_parser.add_option('', '--parse-result', default=0, |
1020 help='Sets the return value of the simulated ' | 1047 help='Sets the return value of the simulated ' |
1021 'executable under test. Only has meaning when ' | 1048 'executable under test. Only has meaning when ' |
1022 '--parse-input is used.') | 1049 '--parse-input is used.') |
| 1050 option_parser.add_option('', '--results-url', default='', |
| 1051 help='The URI of the perf dashboard to upload ' |
| 1052 'results to.') |
1023 chromium_utils.AddPropertiesOptions(option_parser) | 1053 chromium_utils.AddPropertiesOptions(option_parser) |
1024 options, args = option_parser.parse_args() | 1054 options, args = option_parser.parse_args() |
1025 | 1055 |
1026 options.test_type = options.test_type or options.factory_properties.get( | 1056 options.test_type = options.test_type or options.factory_properties.get( |
1027 'step_name') | 1057 'step_name') |
1028 | 1058 |
1029 if options.run_shell_script and options.run_python_script: | 1059 if options.run_shell_script and options.run_python_script: |
1030 sys.stderr.write('Use either --run-shell-script OR --run-python-script, ' | 1060 sys.stderr.write('Use either --run-shell-script OR --run-python-script, ' |
1031 'not both.') | 1061 'not both.') |
1032 return 1 | 1062 return 1 |
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1095 '%d new files were left in %s: Fix the tests to clean up themselves.' | 1125 '%d new files were left in %s: Fix the tests to clean up themselves.' |
1096 ) % ((new_temp_files - temp_files), tempfile.gettempdir()) | 1126 ) % ((new_temp_files - temp_files), tempfile.gettempdir()) |
1097 # TODO(maruel): Make it an error soon. Not yet since I want to iron out all | 1127 # TODO(maruel): Make it an error soon. Not yet since I want to iron out all |
1098 # the remaining cases before. | 1128 # the remaining cases before. |
1099 #result = 1 | 1129 #result = 1 |
1100 return result | 1130 return result |
1101 | 1131 |
1102 | 1132 |
1103 if '__main__' == __name__: | 1133 if '__main__' == __name__: |
1104 sys.exit(main()) | 1134 sys.exit(main()) |
OLD | NEW |