Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(582)

Side by Side Diff: scripts/slave/runtest.py

Issue 12317053: Sends test results to new perf dashboard (Closed) Base URL: https://chromium.googlesource.com/chromium/tools/build.git@master
Patch Set: Ready for review Created 7 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """A tool to run a chrome test executable, used by the buildbot slaves. 6 """A tool to run a chrome test executable, used by the buildbot slaves.
7 7
8 When this is run, the current directory (cwd) should be the outer build 8 When this is run, the current directory (cwd) should be the outer build
9 directory (e.g., chrome-release/build/). 9 directory (e.g., chrome-release/build/).
10 10
(...skipping 24 matching lines...) Expand all
35 # Because of this dependency on a chromium checkout, we need to disable some 35 # Because of this dependency on a chromium checkout, we need to disable some
36 # pylint checks. 36 # pylint checks.
37 # pylint: disable=E0611 37 # pylint: disable=E0611
38 # pylint: disable=E1101 38 # pylint: disable=E1101
39 from common import chromium_utils 39 from common import chromium_utils
40 from common import gtest_utils 40 from common import gtest_utils
41 import config 41 import config
42 from slave import crash_utils 42 from slave import crash_utils
43 from slave import gtest_slave_utils 43 from slave import gtest_slave_utils
44 from slave import process_log_utils 44 from slave import process_log_utils
45 from slave import results_dashboard
45 from slave import slave_utils 46 from slave import slave_utils
46 from slave import xvfb 47 from slave import xvfb
47 from slave.gtest.json_results_generator import GetSvnRevision 48 from slave.gtest.json_results_generator import GetSvnRevision
48 49
49 USAGE = '%s [options] test.exe [test args]' % os.path.basename(sys.argv[0]) 50 USAGE = '%s [options] test.exe [test args]' % os.path.basename(sys.argv[0])
50 51
51 CHROME_SANDBOX_PATH = '/opt/chromium/chrome_sandbox' 52 CHROME_SANDBOX_PATH = '/opt/chromium/chrome_sandbox'
52 53
53 DEST_DIR = 'gtest_results' 54 DEST_DIR = 'gtest_results'
54 55
(...skipping 284 matching lines...) Expand 10 before | Expand all | Expand 10 after
339 build_property=options.build_properties, 340 build_property=options.build_properties,
340 factory_properties=options.factory_properties, 341 factory_properties=options.factory_properties,
341 webkit_revision=webkit_revision) 342 webkit_revision=webkit_revision)
342 343
343 if options.annotate and options.generate_json_file: 344 if options.annotate and options.generate_json_file:
344 tracker_obj.ProcessLine(_GetMasterString(_GetMaster())) 345 tracker_obj.ProcessLine(_GetMasterString(_GetMaster()))
345 346
346 return tracker_obj 347 return tracker_obj
347 348
348 349
350 def send_results_to_dashboard(results_tracker, system, test, url, build_dir):
351 for logname, log in results_tracker.PerformanceLogs().iteritems():
352 lines = [str(l).rstrip() for l in log]
353 results_dashboard.SendResults(logname, lines, system, test, url, build_dir)
354
355
349 def annotate(test_name, result, results_tracker, full_name=False, 356 def annotate(test_name, result, results_tracker, full_name=False,
350 perf_dashboard_id=None): 357 perf_dashboard_id=None):
351 """Given a test result and tracker, update the waterfall with test results.""" 358 """Given a test result and tracker, update the waterfall with test results."""
352 get_text_result = process_log_utils.SUCCESS 359 get_text_result = process_log_utils.SUCCESS
353 360
354 for failure in sorted(results_tracker.FailedTests()): 361 for failure in sorted(results_tracker.FailedTests()):
355 if full_name: 362 if full_name:
356 testabbr = re.sub(r'[^\w\.\-]', '_', failure) 363 testabbr = re.sub(r'[^\w\.\-]', '_', failure)
357 else: 364 else:
358 testabbr = re.sub(r'[^\w\.\-]', '_', failure.split('.')[-1]) 365 testabbr = re.sub(r'[^\w\.\-]', '_', failure.split('.')[-1])
(...skipping 225 matching lines...) Expand 10 before | Expand all | Expand 10 after
584 591
585 if options.generate_json_file: 592 if options.generate_json_file:
586 _GenerateJSONForTestResults(options, results_tracker) 593 _GenerateJSONForTestResults(options, results_tracker)
587 594
588 if options.annotate: 595 if options.annotate:
589 annotate(options.test_type, result, results_tracker, 596 annotate(options.test_type, result, results_tracker,
590 options.factory_properties.get('full_test_name'), 597 options.factory_properties.get('full_test_name'),
591 perf_dashboard_id=options.factory_properties.get( 598 perf_dashboard_id=options.factory_properties.get(
592 'test_name')) 599 'test_name'))
593 600
601 if options.results_url:
602 send_results_to_dashboard(
603 results_tracker, options.factory_properties.get('perf_id'),
604 options.test_type, options.results_url, options.build_dir)
605
594 return result 606 return result
595 607
596 608
597 def main_ios(options, args): 609 def main_ios(options, args):
598 if len(args) < 1: 610 if len(args) < 1:
599 raise chromium_utils.MissingArgument('Usage: %s' % USAGE) 611 raise chromium_utils.MissingArgument('Usage: %s' % USAGE)
600 612
601 def kill_simulator(): 613 def kill_simulator():
602 chromium_utils.RunCommand(['/usr/bin/killall', 'iPhone Simulator']) 614 chromium_utils.RunCommand(['/usr/bin/killall', 'iPhone Simulator'])
603 615
(...skipping 226 matching lines...) Expand 10 before | Expand all | Expand 10 after
830 842
831 if options.generate_json_file: 843 if options.generate_json_file:
832 _GenerateJSONForTestResults(options, results_tracker) 844 _GenerateJSONForTestResults(options, results_tracker)
833 845
834 if options.annotate: 846 if options.annotate:
835 annotate(options.test_type, result, results_tracker, 847 annotate(options.test_type, result, results_tracker,
836 options.factory_properties.get('full_test_name'), 848 options.factory_properties.get('full_test_name'),
837 perf_dashboard_id=options.factory_properties.get( 849 perf_dashboard_id=options.factory_properties.get(
838 'test_name')) 850 'test_name'))
839 851
852 if options.results_url:
853 send_results_to_dashboard(
854 results_tracker, options.factory_properties.get('perf_id'),
855 options.test_type, options.results_url, options.build_dir)
856
840 return result 857 return result
841 858
842 859
843 def main_win(options, args): 860 def main_win(options, args):
844 """Using the target build configuration, run the executable given in the 861 """Using the target build configuration, run the executable given in the
845 first non-option argument, passing any following arguments to that 862 first non-option argument, passing any following arguments to that
846 executable. 863 executable.
847 """ 864 """
848 if len(args) < 1: 865 if len(args) < 1:
849 raise chromium_utils.MissingArgument('Usage: %s' % USAGE) 866 raise chromium_utils.MissingArgument('Usage: %s' % USAGE)
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
904 921
905 if options.generate_json_file: 922 if options.generate_json_file:
906 _GenerateJSONForTestResults(options, results_tracker) 923 _GenerateJSONForTestResults(options, results_tracker)
907 924
908 if options.annotate: 925 if options.annotate:
909 annotate(options.test_type, result, results_tracker, 926 annotate(options.test_type, result, results_tracker,
910 options.factory_properties.get('full_test_name'), 927 options.factory_properties.get('full_test_name'),
911 perf_dashboard_id=options.factory_properties.get( 928 perf_dashboard_id=options.factory_properties.get(
912 'test_name')) 929 'test_name'))
913 930
931 if options.results_url:
932 send_results_to_dashboard(
933 results_tracker, options.factory_properties.get('perf_id'),
934 options.test_type, options.results_url, options.build_dir)
935
914 return result 936 return result
915 937
916 938
917 def main(): 939 def main():
918 import platform 940 import platform
919 941
920 xvfb_path = os.path.join(os.path.dirname(sys.argv[0]), '..', '..', 942 xvfb_path = os.path.join(os.path.dirname(sys.argv[0]), '..', '..',
921 'third_party', 'xvfb', platform.architecture()[0]) 943 'third_party', 'xvfb', platform.architecture()[0])
922 944
923 # Initialize logging. 945 # Initialize logging.
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after
1013 'Specify which type of test to parse, available' 1035 'Specify which type of test to parse, available'
1014 ' types listed with --annotate=list.') 1036 ' types listed with --annotate=list.')
1015 option_parser.add_option('', '--parse-input', default='', 1037 option_parser.add_option('', '--parse-input', default='',
1016 help='When combined with --annotate, reads test ' 1038 help='When combined with --annotate, reads test '
1017 'from a file instead of executing a test ' 1039 'from a file instead of executing a test '
1018 'binary. Use - for stdin.') 1040 'binary. Use - for stdin.')
1019 option_parser.add_option('', '--parse-result', default=0, 1041 option_parser.add_option('', '--parse-result', default=0,
1020 help='Sets the return value of the simulated ' 1042 help='Sets the return value of the simulated '
1021 'executable under test. Only has meaning when ' 1043 'executable under test. Only has meaning when '
1022 '--parse-input is used.') 1044 '--parse-input is used.')
1045 option_parser.add_option('', '--results-url', default='',
1046 help='The URI of the perf dashboard to upload '
1047 'results to.')
1023 chromium_utils.AddPropertiesOptions(option_parser) 1048 chromium_utils.AddPropertiesOptions(option_parser)
1024 options, args = option_parser.parse_args() 1049 options, args = option_parser.parse_args()
1025 1050
1026 options.test_type = options.test_type or options.factory_properties.get( 1051 options.test_type = options.test_type or options.factory_properties.get(
1027 'step_name') 1052 'step_name')
1028 1053
1029 if options.run_shell_script and options.run_python_script: 1054 if options.run_shell_script and options.run_python_script:
1030 sys.stderr.write('Use either --run-shell-script OR --run-python-script, ' 1055 sys.stderr.write('Use either --run-shell-script OR --run-python-script, '
1031 'not both.') 1056 'not both.')
1032 return 1 1057 return 1
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
1095 '%d new files were left in %s: Fix the tests to clean up themselves.' 1120 '%d new files were left in %s: Fix the tests to clean up themselves.'
1096 ) % ((new_temp_files - temp_files), tempfile.gettempdir()) 1121 ) % ((new_temp_files - temp_files), tempfile.gettempdir())
1097 # TODO(maruel): Make it an error soon. Not yet since I want to iron out all 1122 # TODO(maruel): Make it an error soon. Not yet since I want to iron out all
1098 # the remaining cases before. 1123 # the remaining cases before.
1099 #result = 1 1124 #result = 1
1100 return result 1125 return result
1101 1126
1102 1127
1103 if '__main__' == __name__: 1128 if '__main__' == __name__:
1104 sys.exit(main()) 1129 sys.exit(main())
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698