Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(617)

Side by Side Diff: scripts/slave/runtest.py

Issue 545803002: Update buildbots to parse new telemetry JSON format. (Closed) Base URL: https://chromium.googlesource.com/chromium/tools/build.git@master
Patch Set: Addressed review comments Created 6 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « scripts/slave/results_dashboard.py ('k') | scripts/slave/telemetry.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """A tool used to run a Chrome test executable and process the output. 6 """A tool used to run a Chrome test executable and process the output.
7 7
8 This script is used by the buildbot slaves. It must be run from the outer 8 This script is used by the buildbot slaves. It must be run from the outer
9 build directory, e.g. chrome-release/build/. 9 build directory, e.g. chrome-release/build/.
10 10
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
47 # TODO(crbug.com/403564). We almost certainly shouldn't be importing this. 47 # TODO(crbug.com/403564). We almost certainly shouldn't be importing this.
48 import config 48 import config
49 49
50 from slave import annotation_utils 50 from slave import annotation_utils
51 from slave import build_directory 51 from slave import build_directory
52 from slave import crash_utils 52 from slave import crash_utils
53 from slave import gtest_slave_utils 53 from slave import gtest_slave_utils
54 from slave import performance_log_processor 54 from slave import performance_log_processor
55 from slave import results_dashboard 55 from slave import results_dashboard
56 from slave import slave_utils 56 from slave import slave_utils
57 from slave import telemetry_utils
57 from slave import xvfb 58 from slave import xvfb
58 59
59 USAGE = '%s [options] test.exe [test args]' % os.path.basename(sys.argv[0]) 60 USAGE = '%s [options] test.exe [test args]' % os.path.basename(sys.argv[0])
60 61
61 CHROME_SANDBOX_PATH = '/opt/chromium/chrome_sandbox' 62 CHROME_SANDBOX_PATH = '/opt/chromium/chrome_sandbox'
62 63
63 # Directory to write JSON for test results into. 64 # Directory to write JSON for test results into.
64 DEST_DIR = 'gtest_results' 65 DEST_DIR = 'gtest_results'
65 66
66 # Names of httpd configuration file under different platforms. 67 # Names of httpd configuration file under different platforms.
(...skipping 474 matching lines...) Expand 10 before | Expand all | Expand 10 after
541 shouldlist = selection and selection == 'list' 542 shouldlist = selection and selection == 'list'
542 if shouldlist: 543 if shouldlist:
543 print 544 print
544 print 'Available log processors:' 545 print 'Available log processors:'
545 for p in LOG_PROCESSOR_CLASSES: 546 for p in LOG_PROCESSOR_CLASSES:
546 print ' ', p, LOG_PROCESSOR_CLASSES[p].__name__ 547 print ' ', p, LOG_PROCESSOR_CLASSES[p].__name__
547 548
548 return shouldlist 549 return shouldlist
549 550
550 551
551 def _SelectLogProcessor(options): 552 def _SelectLogProcessor(options, test_exe):
552 """Returns a log processor class based on the command line options. 553 """Returns a log processor class based on the command line options.
553 554
554 Args: 555 Args:
555 options: Command-line options (from OptionParser). 556 options: Command-line options (from OptionParser).
557 test_exe: Name of the test to execute
556 558
557 Returns: 559 Returns:
558 A log processor class, or None. 560 A log processor class, or None.
559 """ 561 """
560 if _UsingGtestJson(options): 562 if _UsingGtestJson(options):
561 return gtest_utils.GTestJSONParser 563 return gtest_utils.GTestJSONParser
562 564
565 if test_exe and test_exe.endswith('telemetry.py'):
566 return telemetry_utils.TelemetryResultsTracker
567
563 if options.annotate: 568 if options.annotate:
564 if options.annotate in LOG_PROCESSOR_CLASSES: 569 if options.annotate in LOG_PROCESSOR_CLASSES:
565 if options.generate_json_file and options.annotate != 'gtest': 570 if options.generate_json_file and options.annotate != 'gtest':
566 raise NotImplementedError('"%s" doesn\'t make sense with ' 571 raise NotImplementedError('"%s" doesn\'t make sense with '
567 'options.generate_json_file.') 572 'options.generate_json_file.')
568 else: 573 else:
569 return LOG_PROCESSOR_CLASSES[options.annotate] 574 return LOG_PROCESSOR_CLASSES[options.annotate]
570 else: 575 else:
571 raise KeyError('"%s" is not a valid GTest parser!' % options.annotate) 576 raise KeyError('"%s" is not a valid GTest parser!' % options.annotate)
572 elif options.generate_json_file: 577 elif options.generate_json_file:
573 return LOG_PROCESSOR_CLASSES['gtest'] 578 return LOG_PROCESSOR_CLASSES['gtest']
574 579
575 return None 580 return None
576 581
577 582
578 def _GetCommitPos(build_properties): 583 def _GetCommitPos(build_properties):
579 """Extracts the commit position from the build properties, if its there.""" 584 """Extracts the commit position from the build properties, if its there."""
580 if 'got_revision_cp' not in build_properties: 585 if 'got_revision_cp' not in build_properties:
581 return None 586 return None
582 commit_pos = build_properties['got_revision_cp'] 587 commit_pos = build_properties['got_revision_cp']
583 return int(re.search(r'{#(\d+)}', commit_pos).group(1)) 588 return int(re.search(r'{#(\d+)}', commit_pos).group(1))
584 589
585 590
591 def _GetMainRevision(options):
592 build_dir = os.path.abspath(options.build_dir)
593 commit_pos_num = _GetCommitPos(options.build_properties)
594 if commit_pos_num is not None:
595 revision = commit_pos_num
596 elif options.revision:
597 revision = options.revision
598 else:
599 revision = _GetRevision(os.path.dirname(build_dir))
600 return revision
601
602
603 def _GetBlinkRevision(options):
604 build_dir = os.path.abspath(options.build_dir)
605
606 if options.webkit_revision:
607 webkit_revision = options.webkit_revision
608 else:
609 try:
610 webkit_dir = chromium_utils.FindUpward(
611 build_dir, 'third_party', 'WebKit', 'Source')
612 webkit_revision = _GetRevision(webkit_dir)
613 except Exception:
614 webkit_revision = None
615 return webkit_revision
616
617
618 def _GetTelemetryRevisions(options):
619 """Fills in the same revisions fields that process_log_utils does."""
620
621 versions = {}
622 versions['rev'] = _GetMainRevision(options)
623 versions['webkit_rev'] = _GetBlinkRevision(options)
624 versions['webrtc_rev'] = options.build_properties.get('got_webrtc_revision')
625 versions['v8_rev'] = options.build_properties.get('got_v8_revision')
626 versions['ver'] = options.build_properties.get('version')
627 versions['git_revision'] = options.build_properties.get('git_revision')
628 return versions
629
630
586 def _CreateLogProcessor(log_processor_class, options): 631 def _CreateLogProcessor(log_processor_class, options):
587 """Creates a log processor instance. 632 """Creates a log processor instance.
588 633
589 Args: 634 Args:
590 log_processor_class: A subclass of PerformanceLogProcessor or similar class. 635 log_processor_class: A subclass of PerformanceLogProcessor or similar class.
591 options: Command-line options (from OptionParser). 636 options: Command-line options (from OptionParser).
592 637
593 Returns: 638 Returns:
594 An instance of a log processor class, or None. 639 An instance of a log processor class, or None.
595 """ 640 """
596 if not log_processor_class: 641 if not log_processor_class:
597 return None 642 return None
598 643
599 if log_processor_class.__name__ in ('GTestLogParser',): 644 if log_processor_class.__name__ in ('GTestLogParser',
645 'TelemetryResultsTracker'):
600 tracker_obj = log_processor_class() 646 tracker_obj = log_processor_class()
601 elif log_processor_class.__name__ in ('GTestJSONParser',): 647 elif log_processor_class.__name__ in ('GTestJSONParser',):
602 tracker_obj = log_processor_class( 648 tracker_obj = log_processor_class(
603 options.build_properties.get('mastername')) 649 options.build_properties.get('mastername'))
604 else: 650 else:
605 build_dir = os.path.abspath(options.build_dir) 651 webkit_revision = _GetBlinkRevision(options) or 'undefined'
606 652 revision = _GetMainRevision(options) or 'undefined'
607 if options.webkit_revision:
608 webkit_revision = options.webkit_revision
609 else:
610 try:
611 webkit_dir = chromium_utils.FindUpward(
612 build_dir, 'third_party', 'WebKit', 'Source')
613 webkit_revision = _GetRevision(webkit_dir)
614 except Exception:
615 webkit_revision = 'undefined'
616
617 commit_pos_num = _GetCommitPos(options.build_properties)
618 if commit_pos_num is not None:
619 revision = commit_pos_num
620 elif options.revision:
621 revision = options.revision
622 else:
623 revision = _GetRevision(os.path.dirname(build_dir))
624 653
625 tracker_obj = log_processor_class( 654 tracker_obj = log_processor_class(
626 revision=revision, 655 revision=revision,
627 build_properties=options.build_properties, 656 build_properties=options.build_properties,
628 factory_properties=options.factory_properties, 657 factory_properties=options.factory_properties,
629 webkit_revision=webkit_revision) 658 webkit_revision=webkit_revision)
630 659
631 if options.annotate and options.generate_json_file: 660 if options.annotate and options.generate_json_file:
632 tracker_obj.ProcessLine(_GetMasterString(_GetMaster())) 661 tracker_obj.ProcessLine(_GetMasterString(_GetMaster()))
633 662
(...skipping 14 matching lines...) Expand all
648 supplemental_columns = {} 677 supplemental_columns = {}
649 supplemental_columns_file = os.path.join(build_dir, 678 supplemental_columns_file = os.path.join(build_dir,
650 results_dashboard.CACHE_DIR, 679 results_dashboard.CACHE_DIR,
651 supplemental_colummns_file_name) 680 supplemental_colummns_file_name)
652 if os.path.exists(supplemental_columns_file): 681 if os.path.exists(supplemental_columns_file):
653 with file(supplemental_columns_file, 'r') as f: 682 with file(supplemental_columns_file, 'r') as f:
654 supplemental_columns = json.loads(f.read()) 683 supplemental_columns = json.loads(f.read())
655 return supplemental_columns 684 return supplemental_columns
656 685
657 686
658 def _SendResultsToDashboard(log_processor, system, test, url, build_dir, 687 def _ResultsDashboardDict(options):
659 mastername, buildername, buildnumber, 688 """Generates a dict of info needed by the results dashboard.
660 supplemental_columns_file, extra_columns=None): 689
690 Args:
691 options: Program arguments.
692
693 Returns:
694 dict containing data the dashboard needs.
695 """
696 build_dir = os.path.abspath(options.build_dir)
697 supplemental_columns = _GetSupplementalColumns(
698 build_dir, options.supplemental_columns_file)
699 extra_columns = options.perf_config
700 if extra_columns:
701 supplemental_columns.update(extra_columns)
702 fields = {
703 'system': _GetPerfID(options),
704 'test': options.test_type,
705 'url': options.results_url,
706 'mastername': options.build_properties.get('mastername'),
707 'buildername': options.build_properties.get('buildername'),
708 'buildnumber': options.build_properties.get('buildnumber'),
709 'build_dir': build_dir,
710 'supplemental_columns': supplemental_columns,
711 'revisions': _GetTelemetryRevisions(options),
712 }
713 return fields
714
715
716 def _SendResultsToDashboard(log_processor, args):
661 """Sends results from a log processor instance to the dashboard. 717 """Sends results from a log processor instance to the dashboard.
662 718
663 Args: 719 Args:
664 log_processor: An instance of a log processor class, which has been used to 720 log_processor: An instance of a log processor class, which has been used to
665 process the test output, so it contains the test results. 721 process the test output, so it contains the test results.
666 system: A string such as 'linux-release', which comes from perf_id. 722 args: Dict of additional args to send to results_dashboard.
667 test: Test "suite" name string.
668 url: Dashboard URL.
669 build_dir: Build dir name (used for cache file by results_dashboard).
670 mastername: Buildbot master name, e.g. 'chromium.perf'.
671 WARNING! This is incorrectly called "masterid" in some parts of the
672 dashboard code.
673 buildername: Builder name, e.g. 'Linux QA Perf (1)'
674 buildnumber: Build number (as a string).
675 supplemental_columns_file: Filename for JSON supplemental columns file.
676 extra_columns: A dict of extra values to add to the supplemental columns
677 dict.
678 """ 723 """
679 if system is None: 724 if args['system'] is None:
680 # perf_id not specified in factory properties. 725 # perf_id not specified in factory properties.
681 print 'Error: No system name (perf_id) specified when sending to dashboard.' 726 print 'Error: No system name (perf_id) specified when sending to dashboard.'
682 return 727 return
683 supplemental_columns = _GetSupplementalColumns(
684 build_dir, supplemental_columns_file)
685 if extra_columns:
686 supplemental_columns.update(extra_columns)
687 728
688 charts = _GetDataFromLogProcessor(log_processor) 729 results = []
689 points = results_dashboard.MakeListOfPoints( 730 if log_processor.IsChartJson():
690 charts, system, test, mastername, buildername, buildnumber, 731 results = [results_dashboard.MakeDashboardJsonV1(
691 supplemental_columns) 732 log_processor.ChartJson(),
692 results_dashboard.SendResults(points, url, build_dir) 733 args['revisions'], args['system'], args['mastername'],
734 args['buildername'], args['buildnumber'],
735 args['supplemental_columns'], False)]
736 ref_json = log_processor.RefJson()
737 if ref_json:
738 results.append(results_dashboard.MakeDashboardJsonV1(
739 ref_json, args['revisions'], args['system'], args['mastername'],
740 args['buildername'], args['buildnumber'],
741 args['supplemental_columns'], True))
742 log_processor.Cleanup()
743 else:
744 charts = _GetDataFromLogProcessor(log_processor)
745 results = [results_dashboard.MakeListOfPoints(
746 charts, args['system'], args['test'], args['mastername'],
747 args['buildername'], args['buildnumber'], args['supplemental_columns'])]
748 for result in results:
749 results_dashboard.SendResults(result, args['url'], args['build_dir'])
693 750
694 751
695 def _GetDataFromLogProcessor(log_processor): 752 def _GetDataFromLogProcessor(log_processor):
696 """Returns a mapping of chart names to chart data. 753 """Returns a mapping of chart names to chart data.
697 754
698 Args: 755 Args:
699 log_processor: A log processor (aka results tracker) object. 756 log_processor: A log processor (aka results tracker) object.
700 757
701 Returns: 758 Returns:
702 A dictionary mapping chart name to lists of chart data. 759 A dictionary mapping chart name to lists of chart data.
(...skipping 247 matching lines...) Expand 10 before | Expand all | Expand 10 after
950 through the specified annotation parser (aka log processor). 1007 through the specified annotation parser (aka log processor).
951 """ 1008 """
952 if not options.annotate: 1009 if not options.annotate:
953 raise chromium_utils.MissingArgument('--parse-input doesn\'t make sense ' 1010 raise chromium_utils.MissingArgument('--parse-input doesn\'t make sense '
954 'without --annotate.') 1011 'without --annotate.')
955 1012
956 # If --annotate=list was passed, list the log processor classes and exit. 1013 # If --annotate=list was passed, list the log processor classes and exit.
957 if _ListLogProcessors(options.annotate): 1014 if _ListLogProcessors(options.annotate):
958 return 0 1015 return 0
959 1016
960 log_processor_class = _SelectLogProcessor(options) 1017 log_processor_class = _SelectLogProcessor(options, None)
961 log_processor = _CreateLogProcessor(log_processor_class, options) 1018 log_processor = _CreateLogProcessor(log_processor_class, options)
962 1019
963 if options.generate_json_file: 1020 if options.generate_json_file:
964 if os.path.exists(options.test_output_xml): 1021 if os.path.exists(options.test_output_xml):
965 # remove the old XML output file. 1022 # remove the old XML output file.
966 os.remove(options.test_output_xml) 1023 os.remove(options.test_output_xml)
967 1024
968 if options.parse_input == '-': 1025 if options.parse_input == '-':
969 f = sys.stdin 1026 f = sys.stdin
970 else: 1027 else:
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
1017 command = [sys.executable, test_exe] 1074 command = [sys.executable, test_exe]
1018 else: 1075 else:
1019 command = [test_exe_path] 1076 command = [test_exe_path]
1020 if options.annotate == 'gtest': 1077 if options.annotate == 'gtest':
1021 command.extend(['--brave-new-test-launcher', '--test-launcher-bot-mode']) 1078 command.extend(['--brave-new-test-launcher', '--test-launcher-bot-mode'])
1022 command.extend(args[1:]) 1079 command.extend(args[1:])
1023 1080
1024 # If --annotate=list was passed, list the log processor classes and exit. 1081 # If --annotate=list was passed, list the log processor classes and exit.
1025 if _ListLogProcessors(options.annotate): 1082 if _ListLogProcessors(options.annotate):
1026 return 0 1083 return 0
1027 log_processor_class = _SelectLogProcessor(options) 1084 log_processor_class = _SelectLogProcessor(options, test_exe)
1028 log_processor = _CreateLogProcessor(log_processor_class, options) 1085 log_processor = _CreateLogProcessor(log_processor_class, options)
1086 if hasattr(log_processor, 'IsChartJson') and log_processor.IsChartJson():
1087 command.extend(log_processor.GetArguments())
1029 1088
1030 if options.generate_json_file: 1089 if options.generate_json_file:
1031 if os.path.exists(options.test_output_xml): 1090 if os.path.exists(options.test_output_xml):
1032 # remove the old XML output file. 1091 # remove the old XML output file.
1033 os.remove(options.test_output_xml) 1092 os.remove(options.test_output_xml)
1034 1093
1035 try: 1094 try:
1036 http_server = None 1095 http_server = None
1037 if options.document_root: 1096 if options.document_root:
1038 http_server = _StartHttpServer('mac', build_dir=build_dir, 1097 http_server = _StartHttpServer('mac', build_dir=build_dir,
(...skipping 28 matching lines...) Expand all
1067 if not _GenerateJSONForTestResults(options, log_processor): 1126 if not _GenerateJSONForTestResults(options, log_processor):
1068 return 1 1127 return 1
1069 1128
1070 if options.annotate: 1129 if options.annotate:
1071 annotation_utils.annotate( 1130 annotation_utils.annotate(
1072 options.test_type, result, log_processor, 1131 options.test_type, result, log_processor,
1073 options.factory_properties.get('full_test_name'), 1132 options.factory_properties.get('full_test_name'),
1074 perf_dashboard_id=options.perf_dashboard_id) 1133 perf_dashboard_id=options.perf_dashboard_id)
1075 1134
1076 if options.results_url: 1135 if options.results_url:
1077 _SendResultsToDashboard( 1136 _SendResultsToDashboard(log_processor, _ResultsDashboardDict(options))
1078 log_processor, _GetPerfID(options),
1079 options.test_type, options.results_url, options.build_dir,
1080 options.build_properties.get('mastername'),
1081 options.build_properties.get('buildername'),
1082 options.build_properties.get('buildnumber'),
1083 options.supplemental_columns_file,
1084 options.perf_config)
1085 1137
1086 return result 1138 return result
1087 1139
1088 1140
1089 def _MainIOS(options, args, extra_env): 1141 def _MainIOS(options, args, extra_env):
1090 """Runs the test on iOS.""" 1142 """Runs the test on iOS."""
1091 if len(args) < 1: 1143 if len(args) < 1:
1092 raise chromium_utils.MissingArgument('Usage: %s' % USAGE) 1144 raise chromium_utils.MissingArgument('Usage: %s' % USAGE)
1093 1145
1094 def kill_simulator(): 1146 def kill_simulator():
(...skipping 175 matching lines...) Expand 10 before | Expand all | Expand 10 after
1270 command = [sys.executable, test_exe] 1322 command = [sys.executable, test_exe]
1271 else: 1323 else:
1272 command = [test_exe_path] 1324 command = [test_exe_path]
1273 if options.annotate == 'gtest': 1325 if options.annotate == 'gtest':
1274 command.extend(['--brave-new-test-launcher', '--test-launcher-bot-mode']) 1326 command.extend(['--brave-new-test-launcher', '--test-launcher-bot-mode'])
1275 command.extend(args[1:]) 1327 command.extend(args[1:])
1276 1328
1277 # If --annotate=list was passed, list the log processor classes and exit. 1329 # If --annotate=list was passed, list the log processor classes and exit.
1278 if _ListLogProcessors(options.annotate): 1330 if _ListLogProcessors(options.annotate):
1279 return 0 1331 return 0
1280 log_processor_class = _SelectLogProcessor(options) 1332 log_processor_class = _SelectLogProcessor(options, test_exe)
1281 log_processor = _CreateLogProcessor(log_processor_class, options) 1333 log_processor = _CreateLogProcessor(log_processor_class, options)
1334 if hasattr(log_processor, 'IsChartJson') and log_processor.IsChartJson():
1335 command.extend(log_processor.GetArguments())
1282 1336
1283 if options.generate_json_file: 1337 if options.generate_json_file:
1284 if os.path.exists(options.test_output_xml): 1338 if os.path.exists(options.test_output_xml):
1285 # remove the old XML output file. 1339 # remove the old XML output file.
1286 os.remove(options.test_output_xml) 1340 os.remove(options.test_output_xml)
1287 1341
1288 try: 1342 try:
1289 start_xvfb = False 1343 start_xvfb = False
1290 http_server = None 1344 http_server = None
1291 json_file_name = None 1345 json_file_name = None
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
1343 if not _GenerateJSONForTestResults(options, log_processor): 1397 if not _GenerateJSONForTestResults(options, log_processor):
1344 return 1 1398 return 1
1345 1399
1346 if options.annotate: 1400 if options.annotate:
1347 annotation_utils.annotate( 1401 annotation_utils.annotate(
1348 options.test_type, result, log_processor, 1402 options.test_type, result, log_processor,
1349 options.factory_properties.get('full_test_name'), 1403 options.factory_properties.get('full_test_name'),
1350 perf_dashboard_id=options.perf_dashboard_id) 1404 perf_dashboard_id=options.perf_dashboard_id)
1351 1405
1352 if options.results_url: 1406 if options.results_url:
1353 _SendResultsToDashboard( 1407 _SendResultsToDashboard(log_processor, _ResultsDashboardDict(options))
1354 log_processor, _GetPerfID(options),
1355 options.test_type, options.results_url, options.build_dir,
1356 options.build_properties.get('mastername'),
1357 options.build_properties.get('buildername'),
1358 options.build_properties.get('buildnumber'),
1359 options.supplemental_columns_file,
1360 options.perf_config)
1361 1408
1362 return result 1409 return result
1363 1410
1364 1411
1365 def _MainWin(options, args, extra_env): 1412 def _MainWin(options, args, extra_env):
1366 """Runs tests on windows. 1413 """Runs tests on windows.
1367 1414
1368 Using the target build configuration, run the executable given in the 1415 Using the target build configuration, run the executable given in the
1369 first non-option argument, passing any following arguments to that 1416 first non-option argument, passing any following arguments to that
1370 executable. 1417 executable.
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
1419 '--'] + command 1466 '--'] + command
1420 command.extend(args[1:]) 1467 command.extend(args[1:])
1421 1468
1422 # Nuke anything that appears to be stale chrome items in the temporary 1469 # Nuke anything that appears to be stale chrome items in the temporary
1423 # directory from previous test runs (i.e.- from crashes or unittest leaks). 1470 # directory from previous test runs (i.e.- from crashes or unittest leaks).
1424 slave_utils.RemoveChromeTemporaryFiles() 1471 slave_utils.RemoveChromeTemporaryFiles()
1425 1472
1426 # If --annotate=list was passed, list the log processor classes and exit. 1473 # If --annotate=list was passed, list the log processor classes and exit.
1427 if _ListLogProcessors(options.annotate): 1474 if _ListLogProcessors(options.annotate):
1428 return 0 1475 return 0
1429 log_processor_class = _SelectLogProcessor(options) 1476 log_processor_class = _SelectLogProcessor(options, test_exe)
1430 log_processor = _CreateLogProcessor(log_processor_class, options) 1477 log_processor = _CreateLogProcessor(log_processor_class, options)
1478 if hasattr(log_processor, 'IsChartJson') and log_processor.IsChartJson():
1479 command.extend(log_processor.GetArguments())
1431 1480
1432 if options.generate_json_file: 1481 if options.generate_json_file:
1433 if os.path.exists(options.test_output_xml): 1482 if os.path.exists(options.test_output_xml):
1434 # remove the old XML output file. 1483 # remove the old XML output file.
1435 os.remove(options.test_output_xml) 1484 os.remove(options.test_output_xml)
1436 1485
1437 try: 1486 try:
1438 http_server = None 1487 http_server = None
1439 if options.document_root: 1488 if options.document_root:
1440 http_server = _StartHttpServer('win', build_dir=build_dir, 1489 http_server = _StartHttpServer('win', build_dir=build_dir,
(...skipping 24 matching lines...) Expand all
1465 if not _GenerateJSONForTestResults(options, log_processor): 1514 if not _GenerateJSONForTestResults(options, log_processor):
1466 return 1 1515 return 1
1467 1516
1468 if options.annotate: 1517 if options.annotate:
1469 annotation_utils.annotate( 1518 annotation_utils.annotate(
1470 options.test_type, result, log_processor, 1519 options.test_type, result, log_processor,
1471 options.factory_properties.get('full_test_name'), 1520 options.factory_properties.get('full_test_name'),
1472 perf_dashboard_id=options.perf_dashboard_id) 1521 perf_dashboard_id=options.perf_dashboard_id)
1473 1522
1474 if options.results_url: 1523 if options.results_url:
1475 _SendResultsToDashboard( 1524 _SendResultsToDashboard(log_processor, _ResultsDashboardDict(options))
1476 log_processor, _GetPerfID(options),
1477 options.test_type, options.results_url, options.build_dir,
1478 options.build_properties.get('mastername'),
1479 options.build_properties.get('buildername'),
1480 options.build_properties.get('buildnumber'),
1481 options.supplemental_columns_file,
1482 options.perf_config)
1483 1525
1484 return result 1526 return result
1485 1527
1486 1528
1487 def _MainAndroid(options, args, extra_env): 1529 def _MainAndroid(options, args, extra_env):
1488 """Runs tests on android. 1530 """Runs tests on android.
1489 1531
1490 Running GTest-based tests on android is different than on Linux as it requires 1532 Running GTest-based tests on android is different than on Linux as it requires
1491 src/build/android/test_runner.py to deploy and communicate with the device. 1533 src/build/android/test_runner.py to deploy and communicate with the device.
1492 Python scripts are the same as with Linux. 1534 Python scripts are the same as with Linux.
1493 1535
1494 Args: 1536 Args:
1495 options: Command-line options for this invocation of runtest.py. 1537 options: Command-line options for this invocation of runtest.py.
1496 args: Command and arguments for the test. 1538 args: Command and arguments for the test.
1497 extra_env: A dictionary of extra environment variables to set. 1539 extra_env: A dictionary of extra environment variables to set.
1498 1540
1499 Returns: 1541 Returns:
1500 Exit status code. 1542 Exit status code.
1501 """ 1543 """
1502 if options.run_python_script: 1544 if options.run_python_script:
1503 return _MainLinux(options, args, extra_env) 1545 return _MainLinux(options, args, extra_env)
1504 1546
1505 if len(args) < 1: 1547 if len(args) < 1:
1506 raise chromium_utils.MissingArgument('Usage: %s' % USAGE) 1548 raise chromium_utils.MissingArgument('Usage: %s' % USAGE)
1507 1549
1508 if _ListLogProcessors(options.annotate): 1550 if _ListLogProcessors(options.annotate):
1509 return 0 1551 return 0
1510 log_processor_class = _SelectLogProcessor(options) 1552 log_processor_class = _SelectLogProcessor(options, args[0])
1511 log_processor = _CreateLogProcessor(log_processor_class, options) 1553 log_processor = _CreateLogProcessor(log_processor_class, options)
1512 1554
1513 if options.generate_json_file: 1555 if options.generate_json_file:
1514 if os.path.exists(options.test_output_xml): 1556 if os.path.exists(options.test_output_xml):
1515 # remove the old XML output file. 1557 # remove the old XML output file.
1516 os.remove(options.test_output_xml) 1558 os.remove(options.test_output_xml)
1517 1559
1518 # Assume it's a gtest apk, so use the android harness. 1560 # Assume it's a gtest apk, so use the android harness.
1519 test_suite = args[0] 1561 test_suite = args[0]
1520 run_test_target_option = '--release' 1562 run_test_target_option = '--release'
1521 if options.target == 'Debug': 1563 if options.target == 'Debug':
1522 run_test_target_option = '--debug' 1564 run_test_target_option = '--debug'
1523 command = ['src/build/android/test_runner.py', 'gtest', 1565 command = ['src/build/android/test_runner.py', 'gtest',
1524 run_test_target_option, '-s', test_suite] 1566 run_test_target_option, '-s', test_suite]
1525 result = _RunGTestCommand(command, extra_env, log_processor=log_processor) 1567 result = _RunGTestCommand(command, extra_env, log_processor=log_processor)
1526 1568
1527 if options.generate_json_file: 1569 if options.generate_json_file:
1528 if not _GenerateJSONForTestResults(options, log_processor): 1570 if not _GenerateJSONForTestResults(options, log_processor):
1529 return 1 1571 return 1
1530 1572
1531 if options.annotate: 1573 if options.annotate:
1532 annotation_utils.annotate( 1574 annotation_utils.annotate(
1533 options.test_type, result, log_processor, 1575 options.test_type, result, log_processor,
1534 options.factory_properties.get('full_test_name'), 1576 options.factory_properties.get('full_test_name'),
1535 perf_dashboard_id=options.perf_dashboard_id) 1577 perf_dashboard_id=options.perf_dashboard_id)
1536 1578
1537 if options.results_url: 1579 if options.results_url:
1538 _SendResultsToDashboard( 1580 _SendResultsToDashboard(log_processor, _ResultsDashboardDict(options))
1539 log_processor, _GetPerfID(options),
1540 options.test_type, options.results_url, options.build_dir,
1541 options.build_properties.get('mastername'),
1542 options.build_properties.get('buildername'),
1543 options.build_properties.get('buildnumber'),
1544 options.supplemental_columns_file,
1545 options.perf_config)
1546 1581
1547 return result 1582 return result
1548 1583
1549 1584
1550 def main(): 1585 def main():
1551 """Entry point for runtest.py. 1586 """Entry point for runtest.py.
1552 1587
1553 This function: 1588 This function:
1554 (1) Sets up the command-line options. 1589 (1) Sets up the command-line options.
1555 (2) Sets environment variables based on those options. 1590 (2) Sets environment variables based on those options.
(...skipping 387 matching lines...) Expand 10 before | Expand all | Expand 10 after
1943 finally: 1978 finally:
1944 if did_launch_dbus: 1979 if did_launch_dbus:
1945 # It looks like the command line argument --exit-with-session 1980 # It looks like the command line argument --exit-with-session
1946 # isn't working to clean up the spawned dbus-daemon. Kill it 1981 # isn't working to clean up the spawned dbus-daemon. Kill it
1947 # manually. 1982 # manually.
1948 _ShutdownDBus() 1983 _ShutdownDBus()
1949 1984
1950 1985
1951 if '__main__' == __name__: 1986 if '__main__' == __name__:
1952 sys.exit(main()) 1987 sys.exit(main())
OLDNEW
« no previous file with comments | « scripts/slave/results_dashboard.py ('k') | scripts/slave/telemetry.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698